repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
DMASTE | DMASTE-main/BMRC/utils.py | # coding: UTF-8
# @Author: Shaowei Chen, Contact: [email protected]
# @Date: 2021-5-4
import torch
from torch.nn import functional as F
import logging
def normalize_size(tensor):
if len(tensor.size()) == 3:
tensor = tensor.contiguous().view(-1, tensor.size(2))
elif len(tensor.size()) == 2:
tensor = tensor.contiguous().view(-1)
return tensor
def calculate_entity_loss(pred_start, pred_end, gold_start, gold_end):
pred_start = normalize_size(pred_start)
pred_end = normalize_size(pred_end)
gold_start = normalize_size(gold_start)
gold_end = normalize_size(gold_end)
weight = torch.tensor([1, 3]).float().cuda()
loss_start = F.cross_entropy(pred_start, gold_start.long(), size_average=False, weight=weight, ignore_index=-1)
loss_end = F.cross_entropy(pred_end, gold_end.long(), size_average=False, weight=weight, ignore_index=-1)
return 0.5 * loss_start + 0.5 * loss_end
def calculate_domain_loss(pred_domain, gold_domain):
return F.cross_entropy(normalize_size(pred_domain), normalize_size(gold_domain).long(), size_average=False, ignore_index=-1)
def calculate_sentiment_domain_loss(pred_domain, gold_domain):
return F.cross_entropy(pred_domain, gold_domain.long(), size_average=False, ignore_index=-1)
def calculate_sentiment_loss(pred_sentiment, gold_sentiment):
return F.cross_entropy(pred_sentiment, gold_sentiment.long(), size_average=False, ignore_index=-1)
def get_logger(filename, verbosity=1, name=None):
level_dict = {0: logging.DEBUG, 1: logging.INFO, 2: logging.WARNING}
formatter = logging.Formatter(
"[%(asctime)s][%(filename)s][line:%(lineno)d][%(levelname)s] %(message)s"
)
logger = logging.getLogger(name)
logger.setLevel(level_dict[verbosity])
fh = logging.FileHandler(filename, "w")
fh.setFormatter(formatter)
logger.addHandler(fh)
sh = logging.StreamHandler()
sh.setFormatter(formatter)
logger.addHandler(sh)
return logger
def filter_prob(f_asp_prob, f_opi_prob, f_opi_start_index, f_opi_end_index, beta):
filter_start = []
filter_end = []
for idx in range(len(f_opi_prob)):
if f_asp_prob*f_opi_prob[idx]>=beta:
filter_start.append(f_opi_start_index[idx])
filter_end.append(f_opi_end_index[idx])
return filter_start, filter_end
def filter_unpaired(start_prob, end_prob, start, end):
filtered_start = []
filtered_end = []
filtered_prob = []
if len(start)>0 and len(end)>0:
length = start[-1]+1 if start[-1]>=end[-1] else end[-1]+1
temp_seq = [0]*length
for s in start:
temp_seq[s]+=1
for e in end:
temp_seq[e]+=2
last_start = -1
for idx in range(len(temp_seq)):
assert temp_seq[idx]<4
if temp_seq[idx] == 1:
last_start = idx
elif temp_seq[idx] == 2:
if last_start!=-1 and idx-last_start<5:
filtered_start.append(last_start)
filtered_end.append(idx)
prob = start_prob[start.index(last_start)] * end_prob[end.index(idx)]
filtered_prob.append(prob)
last_start = -1
elif temp_seq[idx] == 3:
filtered_start.append(idx)
filtered_end.append(idx)
prob = start_prob[start.index(idx)] * end_prob[end.index(idx)]
filtered_prob.append(prob)
last_start = -1
return filtered_start, filtered_end, filtered_prob | 3,575 | 35.121212 | 128 | py |
DMASTE | DMASTE-main/BMRC/data_utils.py | from torch.utils.data import Dataset
import random
import torch
class Domain:
Target = 1
Source = 0
class Unlabeled_Dataset(Dataset):
def __init__(self, path, tokenizer, max_len=256):
self.data = []
self.max_len = max_len
with open(path) as f:
for line in f:
sent = line.split('####')[-1].strip()
words = ['[ia]'] + sent.split()
idx_list1 = random.sample(range(len(words)), 4)
idx_list2 = random.sample(range(1, 6), 4)
sample_words = [words[i: i + j] for i, j in zip(idx_list1, idx_list2)]
query_list = [["What", "aspects", "?"],
["What", "opinions", "?"],
["What", "opinion", "given", "the", "aspect"] + sample_words[0] + ["?"],
["What", "sentiment", "given", "the", "aspect"] + sample_words[1] + ["and", "the", "opinion"] + sample_words[2] + ['?'],
["What", "aspect", "does", "the", "opinion"] + sample_words[3] + ["describe", "?"]]
for query in query_list:
input_token = ['[CLS]'] + query + ['[SEP]'] + words
seg = [0] * (len(query) + 2) + [1] * len(words)
domain_label = [-1] * (len(query) + 2) + [Domain.Target] * len(words)
input_ids = tokenizer.convert_tokens_to_ids([word.lower() if word not in ['[CLS]', '[SEP]'] else word for word in input_token])
self.data.append({'input_ids': input_ids, 'token_type_ids': seg, 'domain_label': domain_label})
def __getitem__(self, i):
self.data[i]['attention_mask'] = [1] * len(self.data[i]['input_ids'])
ret = dict()
for k in self.data[i]:
ret[k] = self.data[i][k][: self.max_len]
pad = 0 if k != 'domain_label' else -1
ret[k] = ret[k] + [pad] * (self.max_len - len(ret[k]))
ret[k] = torch.tensor(ret[k])
# return ret['input_ids'], ret['token_type_ids'], ret['attention_mask'], ret['domain_label']
return ret
def __len__(self):
return len(self.data)
if __name__ == '__main__':
from transformers import BertTokenizer
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
dataset = Unlabeled_Dataset('../amazon/home.txt', tokenizer)
for i in range(10):
print(dataset[i])
print(tokenizer.convert_ids_to_tokens(dataset[i]['input_ids']))
print()
| 2,601 | 45.464286 | 150 | py |
DMASTE | DMASTE-main/BMRC/makeData_dual.py | # @Author: Shaowei Chen, Contact: [email protected]
# @Date: 2021-5-4
import torch
from torch.utils.data import Dataset
from transformers import BertTokenizer
import numpy as np
_tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
class dual_sample(object):
def __init__(self,
original_sample,
text,
forward_querys,
forward_answers,
backward_querys,
backward_answers,
sentiment_querys,
sentiment_answers):
self.original_sample = original_sample
self.text = text #
self.forward_querys = forward_querys
self.forward_answers = forward_answers
self.backward_querys = backward_querys
self.backward_answers = backward_answers
self.sentiment_querys = sentiment_querys
self.sentiment_answers = sentiment_answers
class sample_tokenized(object):
def __init__(self,
original_sample,
forward_querys,
forward_answers,
backward_querys,
backward_answers,
sentiment_querys,
sentiment_answers,
forward_seg,
backward_seg,
sentiment_seg):
self.original_sample = original_sample
self.forward_querys = forward_querys
self.forward_answers = forward_answers
self.backward_querys = backward_querys
self.backward_answers = backward_answers
self.sentiment_querys = sentiment_querys
self.sentiment_answers = sentiment_answers
self.forward_seg = forward_seg
self.backward_seg = backward_seg
self.sentiment_seg = sentiment_seg
class OriginalDataset(Dataset):
def __init__(self, pre_data):
self._forward_asp_query = pre_data['_forward_asp_query']
self._forward_opi_query = pre_data['_forward_opi_query'] # [max_aspect_num, max_opinion_query_length]
self._forward_asp_answer_start = pre_data['_forward_asp_answer_start']
self._forward_asp_answer_end = pre_data['_forward_asp_answer_end']
self._forward_opi_answer_start = pre_data['_forward_opi_answer_start']
self._forward_opi_answer_end = pre_data['_forward_opi_answer_end']
self._forward_asp_query_mask = pre_data['_forward_asp_query_mask'] # [max_aspect_num, max_opinion_query_length]
self._forward_opi_query_mask = pre_data['_forward_opi_query_mask'] # [max_aspect_num, max_opinion_query_length]
self._forward_asp_query_seg = pre_data['_forward_asp_query_seg'] # [max_aspect_num, max_opinion_query_length]
self._forward_opi_query_seg = pre_data['_forward_opi_query_seg'] # [max_aspect_num, max_opinion_query_length]
self._backward_asp_query = pre_data['_backward_asp_query']
self._backward_opi_query = pre_data['_backward_opi_query'] # [max_aspect_num, max_opinion_query_length]
self._backward_asp_answer_start = pre_data['_backward_asp_answer_start']
self._backward_asp_answer_end = pre_data['_backward_asp_answer_end']
self._backward_opi_answer_start = pre_data['_backward_opi_answer_start']
self._backward_opi_answer_end = pre_data['_backward_opi_answer_end']
self._backward_asp_query_mask = pre_data[
'_backward_asp_query_mask'] # [max_aspect_num, max_opinion_query_length]
self._backward_opi_query_mask = pre_data[
'_backward_opi_query_mask'] # [max_aspect_num, max_opinion_query_length]
self._backward_asp_query_seg = pre_data['_backward_asp_query_seg'] # [max_aspect_num, max_opinion_query_length]
self._backward_opi_query_seg = pre_data['_backward_opi_query_seg'] # [max_aspect_num, max_opinion_query_length]
self._sentiment_query = pre_data['_sentiment_query'] # [max_aspect_num, max_sentiment_query_length]
self._sentiment_answer = pre_data['_sentiment_answer']
self._sentiment_query_mask = pre_data['_sentiment_query_mask'] # [max_aspect_num, max_sentiment_query_length]
self._sentiment_query_seg = pre_data['_sentiment_query_seg'] # [max_aspect_num, max_sentiment_query_length]
self._aspect_num = pre_data['_aspect_num']
self._opinion_num = pre_data['_opinion_num']
def pre_processing(sample_list, max_len):
_forward_asp_query = []
_forward_opi_query = []
_forward_asp_answer_start = []
_forward_asp_answer_end = []
_forward_opi_answer_start = []
_forward_opi_answer_end = []
_forward_asp_query_mask = []
_forward_opi_query_mask = []
_forward_asp_query_seg = []
_forward_opi_query_seg = []
_backward_asp_query = []
_backward_opi_query = []
_backward_asp_answer_start = []
_backward_asp_answer_end = []
_backward_opi_answer_start = []
_backward_opi_answer_end = []
_backward_asp_query_mask = []
_backward_opi_query_mask = []
_backward_asp_query_seg = []
_backward_opi_query_seg = []
_sentiment_query = []
_sentiment_answer = []
_sentiment_query_mask = []
_sentiment_query_seg = []
_aspect_num = []
_opinion_num = []
for instance in sample_list:
f_query_list = instance.forward_querys
f_answer_list = instance.forward_answers
f_query_seg_list = instance.forward_seg
b_query_list = instance.backward_querys
b_answer_list = instance.backward_answers
b_query_seg_list = instance.backward_seg
s_query_list = instance.sentiment_querys
s_answer_list = instance.sentiment_answers
s_query_seg_list = instance.sentiment_seg
# _aspect_num: 1/2/3/...
_aspect_num.append(int(len(f_query_list) - 1))
_opinion_num.append(int(len(b_query_list) - 1))
# Forward
# Aspect
# query
assert len(f_query_list[0]) == len(f_answer_list[0][0]) == len(f_answer_list[0][1])
f_asp_pad_num = max_len['mfor_asp_len'] - len(f_query_list[0])
_forward_asp_query.append(_tokenizer.convert_tokens_to_ids(
[word.lower() if word not in ['[CLS]', '[SEP]'] else word for word in f_query_list[0]]))
_forward_asp_query[-1].extend([0] * f_asp_pad_num)
# query_mask
_forward_asp_query_mask.append([1 for i in range(len(f_query_list[0]))])
_forward_asp_query_mask[-1].extend([0] * f_asp_pad_num)
# answer
_forward_asp_answer_start.append(f_answer_list[0][0])
_forward_asp_answer_start[-1].extend([-1] * f_asp_pad_num)
_forward_asp_answer_end.append(f_answer_list[0][1])
_forward_asp_answer_end[-1].extend([-1] * f_asp_pad_num)
# seg
_forward_asp_query_seg.append(f_query_seg_list[0])
_forward_asp_query_seg[-1].extend([1] * f_asp_pad_num)
# Opinion
single_opinion_query = []
single_opinion_query_mask = []
single_opinion_query_seg = []
single_opinion_answer_start = []
single_opinion_answer_end = []
for i in range(1, len(f_query_list)):
assert len(f_query_list[i]) == len(f_answer_list[i][0]) == len(f_answer_list[i][1])
pad_num = max_len['mfor_opi_len'] - len(f_query_list[i])
# query
single_opinion_query.append(_tokenizer.convert_tokens_to_ids(
[word.lower() if word not in ['[CLS]', '[SEP]'] else word for word in f_query_list[i]]))
single_opinion_query[-1].extend([0] * pad_num)
# query_mask
single_opinion_query_mask.append([1 for i in range(len(f_query_list[i]))])
single_opinion_query_mask[-1].extend([0] * pad_num)
# query_seg
single_opinion_query_seg.append(f_query_seg_list[i])
single_opinion_query_seg[-1].extend([1] * pad_num)
# answer
single_opinion_answer_start.append(f_answer_list[i][0])
single_opinion_answer_start[-1].extend([-1] * pad_num)
single_opinion_answer_end.append(f_answer_list[i][1])
single_opinion_answer_end[-1].extend([-1] * pad_num)
# PAD: max_aspect_num
_forward_opi_query.append(single_opinion_query)
_forward_opi_query[-1].extend([[0 for i in range(max_len['mfor_opi_len'])]] * (max_len['max_aspect_num'] - _aspect_num[-1]))
_forward_opi_query_mask.append(single_opinion_query_mask)
_forward_opi_query_mask[-1].extend([[0 for i in range(max_len['mfor_opi_len'])]] * (max_len['max_aspect_num'] - _aspect_num[-1]))
_forward_opi_query_seg.append(single_opinion_query_seg)
_forward_opi_query_seg[-1].extend([[0 for i in range(max_len['mfor_opi_len'])]] * (max_len['max_aspect_num'] - _aspect_num[-1]))
_forward_opi_answer_start.append(single_opinion_answer_start)
_forward_opi_answer_start[-1].extend([[-1 for i in range(max_len['mfor_opi_len'])]] * (max_len['max_aspect_num'] - _aspect_num[-1]))
_forward_opi_answer_end.append(single_opinion_answer_end)
_forward_opi_answer_end[-1].extend([[-1 for i in range(max_len['mfor_opi_len'])]] * (max_len['max_aspect_num'] - _aspect_num[-1]))
# Backward
# opinion
# query
assert len(b_query_list[0]) == len(b_answer_list[0][0]) == len(b_answer_list[0][1])
b_opi_pad_num = max_len['mback_opi_len'] - len(b_query_list[0])
_backward_opi_query.append(_tokenizer.convert_tokens_to_ids(
[word.lower() if word not in ['[CLS]', '[SEP]'] else word for word in b_query_list[0]]))
_backward_opi_query[-1].extend([0] * b_opi_pad_num)
# mask
_backward_opi_query_mask.append([1 for i in range(len(b_query_list[0]))])
_backward_opi_query_mask[-1].extend([0] * b_opi_pad_num)
# answer
_backward_opi_answer_start.append(b_answer_list[0][0])
_backward_opi_answer_start[-1].extend([-1] * b_opi_pad_num)
_backward_opi_answer_end.append(b_answer_list[0][1])
_backward_opi_answer_end[-1].extend([-1] * b_opi_pad_num)
# seg
_backward_opi_query_seg.append(b_query_seg_list[0])
_backward_opi_query_seg[-1].extend([1] * b_opi_pad_num)
# Aspect
single_aspect_query = []
single_aspect_query_mask = []
single_aspect_query_seg = []
single_aspect_answer_start = []
single_aspect_answer_end = []
for i in range(1, len(b_query_list)):
assert len(b_query_list[i]) == len(b_answer_list[i][0]) == len(b_answer_list[i][1])
pad_num = max_len['mback_asp_len'] - len(b_query_list[i])
# query
single_aspect_query.append(_tokenizer.convert_tokens_to_ids(
[word.lower() if word not in ['[CLS]', '[SEP]'] else word for word in b_query_list[i]]))
single_aspect_query[-1].extend([0] * pad_num)
# query_mask
single_aspect_query_mask.append([1 for i in range(len(b_query_list[i]))])
single_aspect_query_mask[-1].extend([0] * pad_num)
# query_seg
single_aspect_query_seg.append(b_query_seg_list[i])
single_aspect_query_seg[-1].extend([1] * pad_num)
# answer
single_aspect_answer_start.append(b_answer_list[i][0])
single_aspect_answer_start[-1].extend([-1] * pad_num)
single_aspect_answer_end.append(b_answer_list[i][1])
single_aspect_answer_end[-1].extend([-1] * pad_num)
# PAD: max_opinion_num
_backward_asp_query.append(single_aspect_query)
_backward_asp_query[-1].extend([[0 for i in range(max_len['mback_asp_len'])]] * (max_len['max_opinion_num'] - _opinion_num[-1]))
_backward_asp_query_mask.append(single_aspect_query_mask)
_backward_asp_query_mask[-1].extend([[0 for i in range(max_len['mback_asp_len'])]] * (max_len['max_opinion_num'] - _opinion_num[-1]))
_backward_asp_query_seg.append(single_aspect_query_seg)
_backward_asp_query_seg[-1].extend([[0 for i in range(max_len['mback_asp_len'])]] * (max_len['max_opinion_num'] - _opinion_num[-1]))
_backward_asp_answer_start.append(single_aspect_answer_start)
_backward_asp_answer_start[-1].extend([[-1 for i in range(max_len['mback_asp_len'])]] * (max_len['max_opinion_num'] - _opinion_num[-1]))
_backward_asp_answer_end.append(single_aspect_answer_end)
_backward_asp_answer_end[-1].extend([[-1 for i in range(max_len['mback_asp_len'])]] * (max_len['max_opinion_num'] - _opinion_num[-1]))
# Sentiment
single_sentiment_query = []
single_sentiment_query_mask = []
single_sentiment_query_seg = []
single_sentiment_answer = []
for j in range(len(s_query_list)):
sent_pad_num = max_len['max_sent_len'] - len(s_query_list[j])
single_sentiment_query.append(_tokenizer.convert_tokens_to_ids(
[word.lower() if word not in ['[CLS]', '[SEP]'] else word for word in s_query_list[j]]))
single_sentiment_query[-1].extend([0] * sent_pad_num)
single_sentiment_query_mask.append([1 for i in range(len(s_query_list[j]))])
single_sentiment_query_mask[-1].extend([0] * sent_pad_num)
# query_seg
single_sentiment_query_seg.append(s_query_seg_list[j])
single_sentiment_query_seg[-1].extend([1] * sent_pad_num)
single_sentiment_answer.append(s_answer_list[j])
_sentiment_query.append(single_sentiment_query)
_sentiment_query[-1].extend([[0 for i in range(max_len['max_sent_len'])]] * (max_len['max_aspect_num'] - _aspect_num[-1]))
_sentiment_query_mask.append(single_sentiment_query_mask)
_sentiment_query_mask[-1].extend([[0 for i in range(max_len['max_sent_len'])]] * (max_len['max_aspect_num'] - _aspect_num[-1]))
_sentiment_query_seg.append(single_sentiment_query_seg)
_sentiment_query_seg[-1].extend([[0 for i in range(max_len['max_sent_len'])]] * (max_len['max_aspect_num'] - _aspect_num[-1]))
_sentiment_answer.append(single_sentiment_answer)
_sentiment_answer[-1].extend([-1] * (max_len['max_aspect_num'] - _aspect_num[-1]))
def truncate(dataset, max_length):
for i in range(len(dataset)):
ins = dataset[i]
if isinstance(ins[0], list):
new_ins = []
for t in ins:
assert not isinstance(t[0], list)
new_ins.append(t[: max_length - 1] + [t[-1]])
dataset[i] = new_ins
else:
dataset[i] = ins[: max_length - 1] + [ins[-1]]
trunc_objects = [
(max_len['mfor_asp_len'], [_forward_asp_query, _forward_asp_answer_start, _forward_asp_answer_end,
_forward_asp_query_mask, _forward_asp_query_seg]),
(max_len['mfor_opi_len'], [ _forward_opi_query,_forward_opi_answer_start, _forward_opi_answer_end,
_forward_opi_query_mask,_forward_opi_query_seg]),
(max_len['mback_asp_len'],[ _backward_asp_query,_backward_asp_answer_start, _backward_asp_answer_end,
_backward_asp_query_mask, _backward_asp_query_seg]),
(max_len['mback_opi_len'],[_backward_opi_query,_backward_opi_answer_start, _backward_opi_answer_end,
_backward_opi_query_mask, _backward_opi_query_seg]),
(max_len['max_sent_len'], [_sentiment_query, _sentiment_query_mask, _sentiment_query_seg]),
(max_len['max_aspect_num'],[_sentiment_answer]),
]
for k, vs in trunc_objects:
for v in vs:
truncate(v, k)
if isinstance(v[0][0], list):
len_list = [len(x) for xx in v for x in xx]
print(k, sum(len_list) / len(len_list))
for l in len_list:
assert l == len_list[0], len_list
else:
len_list = [len(x) for x in v]
print(k, sum(len_list) / len(len_list))
for l in len_list:
assert l == len_list[0], len_list
result = {"_forward_asp_query":_forward_asp_query, "_forward_opi_query":_forward_opi_query,
"_forward_asp_answer_start":_forward_asp_answer_start, "_forward_asp_answer_end":_forward_asp_answer_end,
"_forward_opi_answer_start":_forward_opi_answer_start, "_forward_opi_answer_end":_forward_opi_answer_end,
"_forward_asp_query_mask":_forward_asp_query_mask, "_forward_opi_query_mask":_forward_opi_query_mask,
"_forward_asp_query_seg":_forward_asp_query_seg, "_forward_opi_query_seg":_forward_opi_query_seg,
"_backward_asp_query":_backward_asp_query, "_backward_opi_query":_backward_opi_query,
"_backward_asp_answer_start":_backward_asp_answer_start, "_backward_asp_answer_end":_backward_asp_answer_end,
"_backward_opi_answer_start":_backward_opi_answer_start, "_backward_opi_answer_end":_backward_opi_answer_end,
"_backward_asp_query_mask":_backward_asp_query_mask, "_backward_opi_query_mask":_backward_opi_query_mask,
"_backward_asp_query_seg":_backward_asp_query_seg, "_backward_opi_query_seg":_backward_opi_query_seg,
"_sentiment_query":_sentiment_query, "_sentiment_answer":_sentiment_answer, "_sentiment_query_mask":_sentiment_query_mask,
"_sentiment_query_seg":_sentiment_query_seg, "_aspect_num":_aspect_num, "_opinion_num":_opinion_num}
return OriginalDataset(result)
def tokenized_data(data):
max_forward_asp_query_length = 0
max_forward_opi_query_length = 0
max_backward_asp_query_length = 0
max_backward_opi_query_length = 0
max_sentiment_query_length = 0
max_aspect_num = 0
max_opinion_num = 0
tokenized_sample_list = []
for sample in data:
forward_querys = []
forward_answers = []
backward_querys = []
backward_answers = []
sentiment_querys = []
sentiment_answers = []
forward_querys_seg = []
backward_querys_seg = []
sentiment_querys_seg = []
if int(len(sample.forward_querys) - 1) > max_aspect_num:
max_aspect_num = int(len(sample.forward_querys) - 1)
if int(len(sample.backward_querys) - 1) > max_opinion_num:
max_opinion_num = int(len(sample.backward_querys) - 1)
for idx in range(len(sample.forward_querys)):
temp_query = sample.forward_querys[idx]
temp_text = sample.text
temp_answer = sample.forward_answers[idx]
temp_query_to = ['[CLS]'] + temp_query + ['[SEP]'] + temp_text
temp_query_seg = [0] * (len(temp_query) + 2) + [1] * len(temp_text)
temp_answer[0] = [-1] * (len(temp_query) + 2) + temp_answer[0]
temp_answer[1] = [-1] * (len(temp_query) + 2) + temp_answer[1]
assert len(temp_answer[0]) == len(temp_answer[1]) == len(temp_query_to) == len(temp_query_seg)
if idx == 0:
if len(temp_query_to) > max_forward_asp_query_length:
max_forward_asp_query_length = len(temp_query_to)
else:
if len(temp_query_to) > max_forward_opi_query_length:
max_forward_opi_query_length = len(temp_query_to)
forward_querys.append(temp_query_to)
forward_answers.append(temp_answer)
forward_querys_seg.append(temp_query_seg)
for idx in range(len(sample.backward_querys)):
temp_query = sample.backward_querys[idx]
temp_text = sample.text
temp_answer = sample.backward_answers[idx]
temp_query_to = ['[CLS]'] + temp_query + ['[SEP]'] + temp_text
temp_query_seg = [0] * (len(temp_query) + 2) + [1] * len(temp_text)
temp_answer[0] = [-1] * (len(temp_query) + 2) + temp_answer[0]
temp_answer[1] = [-1] * (len(temp_query) + 2) + temp_answer[1]
assert len(temp_answer[0]) == len(temp_answer[1]) == len(temp_query_to) == len(temp_query_seg)
if idx == 0:
if len(temp_query_to) > max_backward_opi_query_length:
max_backward_opi_query_length = len(temp_query_to)
else:
if len(temp_query_to) > max_backward_asp_query_length:
max_backward_asp_query_length = len(temp_query_to)
backward_querys.append(temp_query_to)
backward_answers.append(temp_answer)
backward_querys_seg.append(temp_query_seg)
for idx in range(len(sample.sentiment_querys)):
temp_query = sample.sentiment_querys[idx]
temp_text = sample.text
temp_answer = sample.sentiment_answers[idx]
temp_query_to = ['[CLS]'] + temp_query + ['[SEP]'] + temp_text
temp_query_seg = [0] * (len(temp_query) + 2) + [1] * len(temp_text)
assert len(temp_query_to) == len(temp_query_seg)
if len(temp_query_to) > max_sentiment_query_length:
max_sentiment_query_length = len(temp_query_to)
sentiment_querys.append(temp_query_to)
sentiment_answers.append(temp_answer)
sentiment_querys_seg.append(temp_query_seg)
temp_sample = sample_tokenized(sample.original_sample, forward_querys, forward_answers, backward_querys,
backward_answers, sentiment_querys, sentiment_answers, forward_querys_seg,
backward_querys_seg, sentiment_querys_seg)
tokenized_sample_list.append(temp_sample)
max_len = 256
return tokenized_sample_list, {'mfor_asp_len': min(max_len, max_forward_asp_query_length),
'mfor_opi_len': min(max_len, max_forward_opi_query_length),
'mback_asp_len': min(max_len, max_backward_asp_query_length),
'mback_opi_len': min(max_len, max_backward_opi_query_length),
'max_sent_len': min(max_len, max_sentiment_query_length),
'max_aspect_num': max_aspect_num,
'max_opinion_num': max_opinion_num}
if __name__ == '__main__':
sources = ['electronics', 'beauty', 'fashion', 'home', '14res', '15res', '16res', '14lap', 'all']
targets = ['book', 'grocery', 'office', 'pet', 'toy']
for dataset_name in sources + targets:
output_path = './data/preprocess/' + dataset_name + '.pt'
if dataset_name in sources:
train_data = torch.load("./data/preprocess/" + dataset_name + "_train_dual.pt")
dev_data = torch.load("./data/preprocess/" + dataset_name + "_dev_dual.pt")
test_data = torch.load("./data/preprocess/" + dataset_name + "_test_dual.pt")
train_tokenized, train_max_len = tokenized_data(train_data)
dev_tokenized, dev_max_len = tokenized_data(dev_data)
test_tokenized, test_max_len = tokenized_data(test_data)
print('preprocessing_data')
train_preprocess = pre_processing(train_tokenized, train_max_len)
dev_preprocess = pre_processing(dev_tokenized, dev_max_len)
test_preprocess = pre_processing(test_tokenized, test_max_len)
print('save_data')
torch.save({'train': train_preprocess, 'dev': dev_preprocess, 'test': test_preprocess}, output_path)
else:
test_data = torch.load("./data/preprocess/" + dataset_name + "_test_dual.pt")
test_tokenized, test_max_len = tokenized_data(test_data)
print('preprocessing_data')
test_preprocess = pre_processing(test_tokenized, test_max_len)
dev_data = torch.load("./data/preprocess/" + dataset_name + "_dev_dual.pt")
dev_tokenized, dev_max_len = tokenized_data(dev_data)
dev_preprocess = pre_processing(dev_tokenized, dev_max_len)
print('save_data')
torch.save({'train': None, 'dev': dev_preprocess, 'test': test_preprocess}, output_path)
| 24,242 | 50.037895 | 144 | py |
DMASTE | DMASTE-main/BMRC/Model.py | # coding: UTF-8
# @Author: Shaowei Chen, Contact: [email protected]
# @Date: 2021-5-4
from transformers import BertTokenizer, BertModel, BertConfig
import torch.nn as nn
class BERTModel(nn.Module):
def __init__(self, args):
hidden_size = args.hidden_size
super(BERTModel, self).__init__()
# BERT模型
# if args.bert_model_type == 'bert-base-uncased':
self._bert = BertModel.from_pretrained(args.bert_model_type)
self._tokenizer = BertTokenizer.from_pretrained(args.bert_model_type)
print('Bertbase model loaded')
# else:
# raise KeyError('Config.args.bert_model_type should be bert-based-uncased. ')
self.classifier_start = nn.Linear(hidden_size, 2)
self.classifier_end = nn.Linear(hidden_size, 2)
self._classifier_sentiment = nn.Linear(hidden_size, 3)
def forward(self, query_tensor, query_mask, query_seg, step):
hidden_states = self._bert(query_tensor, attention_mask=query_mask, token_type_ids=query_seg)[0]
if step == 0: # predict entity
out_scores_start = self.classifier_start(hidden_states)
out_scores_end = self.classifier_end(hidden_states)
return out_scores_start, out_scores_end
else: # predict sentiment
cls_hidden_states = hidden_states[:, 0, :]
cls_hidden_scores = self._classifier_sentiment(cls_hidden_states)
return cls_hidden_scores
| 1,477 | 35.04878 | 104 | py |
DMASTE | DMASTE-main/BMRC/makeData_standard.py | # @Author: Shaowei Chen, Contact: [email protected]
# @Date: 2021-5-4
import torch
import pickle
from dataProcess import get_text
def make_standard(home_path, dataset_name, dataset_type):
# read triple
f = open(home_path + dataset_name + "/" + dataset_type + ".txt", "r", encoding="utf-8")
text_lines = f.readlines()
f.close()
# get text
_, _, _, triple_data = get_text(text_lines)
standard_list = []
for triplet in triple_data:
aspect_temp = []
opinion_temp = []
pair_temp = []
triplet_temp = []
asp_pol_temp = []
for temp_t in triplet:
triplet_temp.append([temp_t[0][0], temp_t[0][-1], temp_t[1][0], temp_t[1][-1], temp_t[2]])
ap = [temp_t[0][0], temp_t[0][-1], temp_t[2]]
if ap not in asp_pol_temp:
asp_pol_temp.append(ap)
a = [temp_t[0][0], temp_t[0][-1]]
if a not in aspect_temp:
aspect_temp.append(a)
o = [temp_t[1][0], temp_t[1][-1]]
if o not in opinion_temp:
opinion_temp.append(o)
p = [temp_t[0][0], temp_t[0][-1], temp_t[1][0], temp_t[1][-1]]
if p not in pair_temp:
pair_temp.append(p)
standard_list.append({'asp_target': aspect_temp, 'opi_target': opinion_temp, 'asp_opi_target': pair_temp,
'asp_pol_target': asp_pol_temp, 'triplet': triplet_temp})
return standard_list
if __name__ == '__main__':
home_path = "../ia-dataset/"
sources = ['electronics', 'beauty', 'fashion', 'home', '14res', '15res', '16res', '14lap', 'all']
targets = ['book', 'grocery', 'office', 'pet', 'toy']
for dataset_name in sources + targets:
output_path = "./data/preprocess/" + dataset_name + "_standard.pt"
dev_standard = make_standard(home_path, dataset_name, 'dev')
test_standard = make_standard(home_path, dataset_name, 'test')
torch.save({'dev': dev_standard, 'test': test_standard}, output_path)
# else:
# test_standard = make_standard(home_path, dataset_name, 'test')
# torch.save({'dev': None, 'test': test_standard}, output_path)
| 2,219 | 35.393443 | 113 | py |
DMASTE | DMASTE-main/BMRC/scripts/cross-domain/dann/run.py | import os
import sys
import time
import random
import threading
source_list = ['electronics', 'home', 'beauty', 'fashion', 'all']
target_list = ['book', 'grocery', 'pet', 'toy']
class Param:
def __init__(self, model_name, source, target, ad_steps):
self.model_name = model_name
self.source = source
self.target = target
self.ad_steps = ad_steps
class myThread(threading.Thread):
def __init__(self, threadID):
threading.Thread.__init__(self)
self.threadID = threadID
def run(self):
os.system(f'bash scripts/cross-domain/dann/sub/{self.threadID}.sh')
print(f'bash scripts/cross-domain/dann/sub/{self.threadID}.sh')
def main():
param_list = []
for source in source_list:
for target in target_list:
for model_name in range(5):
for ad_steps in [1, 5, 10, 20, 30, 50]:
param = Param(model_name=model_name, source=source, target=target, ad_steps=ad_steps)
param_list.append(param)
num_params = len(param_list)
random.seed(0)
param_list = random.sample(param_list, num_params)
num_batch = int(sys.argv[1])
num_device = 8
batch_size = num_params // num_batch
os.system('rm -r ./scripts/cross-domain/dann/sub')
os.makedirs('./scripts/cross-domain/dann/sub', exist_ok=True)
for i, p in enumerate(param_list):
f = open(f'./scripts/cross-domain/dann/sub/{i % num_batch}.sh', 'a')
f.write(f'bash scripts/cross-domain/dann/maste.sh {p.source} {p.target} {p.ad_steps} {p.model_name} {i % num_device}\n')
f.close()
thread_list = []
worker = int(sys.argv[2])
for i in range(num_device):
thread = myThread(i + num_device * worker)
thread.start()
thread_list.append(thread)
time.sleep(2)
for t in thread_list:
t.join()
main() | 1,911 | 31.965517 | 128 | py |
DMASTE | DMASTE-main/BMRC/scripts/cross-domain/dann/run_xu.py | import os
import sys
import time
import random
import threading
source_list = ['14res', '15res', '16res', '14lap', '14lap', '14lap']
target_list = ['14lap', '14lap', '14lap', '14res', '15res', '16res']
class Param:
def __init__(self, model_name, source, target, ad_steps):
self.model_name = model_name
self.source = source
self.target = target
self.ad_steps = ad_steps
class myThread(threading.Thread):
def __init__(self, threadID):
threading.Thread.__init__(self)
self.threadID = threadID
def run(self):
os.system(f'bash scripts/cross-domain/dann/sub/{self.threadID}.sh')
print(f'bash scripts/cross-domain/dann/sub/{self.threadID}.sh')
def main():
param_list = []
for source, target in zip(source_list, target_list):
for model_name in range(5):
for ad_steps in [1, 10, 30, 50, 100, 500, 700, 800, 1000, 1500]:
param = Param(model_name=model_name, source=source, target=target, ad_steps=ad_steps)
param_list.append(param)
num_params = len(param_list)
random.seed(0)
param_list = random.sample(param_list, num_params)
num_batch = int(sys.argv[1])
num_device = 8
batch_size = num_params // num_batch
os.system('rm -r ./scripts/cross-domain/dann/sub')
os.makedirs('./scripts/cross-domain/dann/sub', exist_ok=True)
for i, p in enumerate(param_list):
f = open(f'./scripts/cross-domain/dann/sub/{i % num_batch}.sh', 'a')
f.write(f'bash scripts/cross-domain/dann/maste.sh {p.source} {p.target} {p.ad_steps} {p.model_name} {i % num_device}\n')
f.close()
thread_list = []
worker = int(sys.argv[2])
for i in range(num_device):
thread = myThread(i + num_device * worker)
thread.start()
thread_list.append(thread)
time.sleep(2)
for t in thread_list:
t.join()
main() | 1,935 | 32.964912 | 128 | py |
DMASTE | DMASTE-main/Generative-ABSA/main.py | import argparse
import os
import logging
import time
import pickle
from tqdm import tqdm
import torch
from torch.utils.data import DataLoader
import pytorch_lightning as pl
from pytorch_lightning import seed_everything
from transformers import AdamW, T5ForConditionalGeneration, T5Tokenizer
from transformers import get_linear_schedule_with_warmup
from data_utils import ABSADataset
from data_utils import write_results_to_log, read_line_examples_from_file
from eval_utils import compute_scores
from convert_to_triplets import convert
logger = logging.getLogger(__name__)
def init_args():
parser = argparse.ArgumentParser()
# basic settings
parser.add_argument("--task", default='uabsa', type=str, required=True,
help="The name of the task, selected from: [uabsa, aste, tasd, aope]")
parser.add_argument("--dataset", default='rest14', type=str, required=True,
help="The name of the dataset, selected from: [laptop14, rest14, rest15, rest16]")
parser.add_argument('--source', type=str)
parser.add_argument('--target', type=str)
parser.add_argument("--model_name_or_path", default='t5-base', type=str,
help="Path to pre-trained model or shortcut name")
parser.add_argument("--paradigm", default='annotation', type=str, required=True,
help="The way to construct target sentence, selected from: [annotation, extraction]")
parser.add_argument('--model_name', type=str)
parser.add_argument("--do_train", action='store_true', help="Whether to run training.")
parser.add_argument("--do_eval", action='store_true', help="Whether to run eval on the dev/test set.")
parser.add_argument("--do_direct_eval", action='store_true',
help="Whether to run direct eval on the dev/test set.")
# Other parameters
parser.add_argument("--max_seq_length", default=128, type=int)
parser.add_argument("--n_gpu", default=0)
parser.add_argument("--train_batch_size", default=16, type=int,
help="Batch size per GPU/CPU for training.")
parser.add_argument("--eval_batch_size", default=16, type=int,
help="Batch size per GPU/CPU for evaluation.")
parser.add_argument('--gradient_accumulation_steps', type=int, default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument("--learning_rate", default=3e-4, type=float)
parser.add_argument("--num_train_epochs", default=20, type=int,
help="Total number of training epochs to perform.")
parser.add_argument('--seed', type=int, default=42, help="random seed for initialization")
# parser.add_argument('--all_output_dir', type=str)
parser.add_argument('--model_dir', type=str)
parser.add_argument('--log_dir', type=str)
parser.add_argument('--tmp_dir', type=str)
# training details
parser.add_argument("--weight_decay", default=0.0, type=float)
parser.add_argument("--adam_epsilon", default=1e-8, type=float)
parser.add_argument("--warmup_steps", default=0.0, type=float)
args = parser.parse_args()
# set up output dir which looks like './aste/rest14/extraction/'
# if not os.path.exists(os.path.join(args.all_output_dir, 'model')):
# os.mkdir(os.path.join(args.all_output_dir, 'model'))
# task_dir = f"{args.all_output_dir}/temp/"
# if not os.path.exists(task_dir):
# os.mkdir(task_dir)
# os.makedirs(task_dir, exist_ok=True)
# args.output_dir = task_dir
os.makedirs(os.path.join(args.log_dir, args.model_name), exist_ok=True)
os.makedirs(os.path.join(args.model_dir, args.model_name), exist_ok=True)
os.makedirs(os.path.join(args.tmp_dir, args.model_name), exist_ok=True)
return args
def get_dataset(tokenizer, data_dir, domain, type_path, args):
return ABSADataset(tokenizer=tokenizer, data_dir=data_dir, domain=domain, data_type=type_path,
paradigm=args.paradigm, task=args.task, max_len=args.max_seq_length)
class T5FineTuner(pl.LightningModule):
def __init__(self, hparams):
super(T5FineTuner, self).__init__()
self.hparams = hparams
self.model = T5ForConditionalGeneration.from_pretrained(hparams.model_name_or_path)
self.tokenizer = T5Tokenizer.from_pretrained(hparams.model_name_or_path)
def is_logger(self):
return True
def forward(self, input_ids, attention_mask=None, decoder_input_ids=None,
decoder_attention_mask=None, labels=None):
return self.model(
input_ids,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
labels=labels,
)
def _step(self, batch):
lm_labels = batch["target_ids"]
lm_labels[lm_labels[:, :] == self.tokenizer.pad_token_id] = -100
outputs = self(
input_ids=batch["source_ids"],
attention_mask=batch["source_mask"],
labels=lm_labels,
decoder_attention_mask=batch['target_mask']
)
loss = outputs[0]
return loss
def training_step(self, batch, batch_idx):
loss = self._step(batch)
tensorboard_logs = {"train_loss": loss}
return {"loss": loss, "log": tensorboard_logs}
def training_epoch_end(self, outputs):
avg_train_loss = torch.stack([x["loss"] for x in outputs]).mean()
tensorboard_logs = {"avg_train_loss": avg_train_loss}
return {"avg_train_loss": avg_train_loss, "log": tensorboard_logs, 'progress_bar': tensorboard_logs}
def validation_step(self, batch, batch_idx):
loss = self._step(batch)
return {"val_loss": loss}
def validation_epoch_end(self, outputs):
avg_loss = torch.stack([x["val_loss"] for x in outputs]).mean()
tensorboard_logs = {"val_loss": avg_loss}
return {"avg_val_loss": avg_loss, "log": tensorboard_logs, 'progress_bar': tensorboard_logs}
def configure_optimizers(self):
'''Prepare optimizer and schedule (linear warmup and decay)'''
model = self.model
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": self.hparams.weight_decay,
},
{
"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],
"weight_decay": 0.0,
},
]
optimizer = AdamW(optimizer_grouped_parameters, lr=self.hparams.learning_rate, eps=self.hparams.adam_epsilon)
self.opt = optimizer
return [optimizer]
def optimizer_step(self, epoch, batch_idx, optimizer, optimizer_idx, second_order_closure=None):
if self.trainer.use_tpu:
xm.optimizer_step(optimizer)
else:
optimizer.step()
optimizer.zero_grad()
self.lr_scheduler.step()
def get_tqdm_dict(self):
tqdm_dict = {"loss": "{:.4f}".format(self.trainer.avg_loss), "lr": self.lr_scheduler.get_last_lr()[-1]}
return tqdm_dict
def train_dataloader(self):
train_dataset = get_dataset(tokenizer=self.tokenizer, data_dir=self.hparams.dataset, domain=self.hparams.source,
type_path="train", args=self.hparams)
dataloader = DataLoader(train_dataset, batch_size=self.hparams.train_batch_size, drop_last=True, shuffle=True, num_workers=4)
t_total = (
(len(dataloader.dataset) // (self.hparams.train_batch_size * max(1, len(self.hparams.n_gpu))))
// self.hparams.gradient_accumulation_steps
* float(self.hparams.num_train_epochs)
)
scheduler = get_linear_schedule_with_warmup(
self.opt, num_warmup_steps=self.hparams.warmup_steps, num_training_steps=t_total
)
self.lr_scheduler = scheduler
return dataloader
def val_dataloader(self):
val_dataset = get_dataset(tokenizer=self.tokenizer, data_dir=self.hparams.dataset, domain=self.hparams.source,
type_path="dev", args=self.hparams)
return DataLoader(val_dataset, batch_size=self.hparams.eval_batch_size, num_workers=4)
class LoggingCallback(pl.Callback):
def on_validation_end(self, trainer, pl_module):
logger.info("***** Validation results *****")
if pl_module.is_logger():
metrics = trainer.callback_metrics
# Log results
for key in sorted(metrics):
if key not in ["log", "progress_bar"]:
logger.info("{} = {}\n".format(key, str(metrics[key])))
def on_test_end(self, trainer, pl_module):
logger.info("***** Test results *****")
if pl_module.is_logger():
metrics = trainer.callback_metrics
# Log and save results to file
output_test_results_file = os.path.join(pl_module.hparams.output_dir, "test_results.txt")
with open(output_test_results_file, "w") as writer:
for key in sorted(metrics):
if key not in ["log", "progress_bar"]:
logger.info("{} = {}\n".format(key, str(metrics[key])))
writer.write("{} = {}\n".format(key, str(metrics[key])))
def evaluate(data_loader, model, paradigm, task, sents):
"""
Compute scores given the predictions and gold labels
"""
device = torch.device(f'cuda:{args.n_gpu}')
model.model.to(device)
model.model.eval()
outputs, targets = [], []
for batch in tqdm(data_loader):
# need to push the data to device
outs = model.model.generate(input_ids=batch['source_ids'].to(device),
attention_mask=batch['source_mask'].to(device),
max_length=128)
dec = [tokenizer.decode(ids, skip_special_tokens=True) for ids in outs]
target = [tokenizer.decode(ids, skip_special_tokens=True) for ids in batch["target_ids"]]
outputs.extend(dec)
targets.extend(target)
raw_scores, fixed_scores, all_labels, all_preds, all_preds_fixed = compute_scores(outputs, targets, sents, paradigm, task)
return raw_scores, fixed_scores, all_preds_fixed, targets
# initialization
args = init_args()
print("\n", "="*30, f"NEW EXP: {args.task.upper()} on {args.dataset}", "="*30, "\n")
seed_everything(args.seed)
tokenizer = T5Tokenizer.from_pretrained(args.model_name_or_path)
# show one sample to check the sanity of the code and the expected output
print(f"Here is an example (from dev set) under `{args.paradigm}` paradigm:")
dataset = ABSADataset(tokenizer=tokenizer, data_dir=args.dataset, domain=args.source, data_type='dev',
paradigm=args.paradigm, task=args.task, max_len=args.max_seq_length)
data_sample = dataset[2] # a random data sample
print('Input :', tokenizer.decode(data_sample['source_ids'], skip_special_tokens=True))
print('Output:', tokenizer.decode(data_sample['target_ids'], skip_special_tokens=True))
# training process
if args.do_train:
print("\n****** Conduct Training ******")
model = T5FineTuner(args)
checkpoint_callback = pl.callbacks.ModelCheckpoint(
filepath=args.tmp_dir, prefix="ckt", monitor='val_loss', mode='min', save_top_k=3
)
# prepare for trainer
train_params = dict(
default_root_dir=os.path.join(args.tmp_dir, args.model_name),
accumulate_grad_batches=args.gradient_accumulation_steps,
gpus=args.n_gpu,
gradient_clip_val=1.0,
#amp_level='O1',
max_epochs=args.num_train_epochs,
checkpoint_callback=checkpoint_callback,
callbacks=[LoggingCallback()],
)
trainer = pl.Trainer(**train_params)
trainer.fit(model)
torch.save(model.model, os.path.join(args.model_dir, args.model_name + '.pt'))
# save the final model
# model.model.save_pretrained(args.output_dir)
print("Finish training and saving the model!")
if args.do_eval:
print("\n****** Conduct Evaluating ******")
# model = T5FineTuner(args)
dev_results, test_results = {}, {}
best_f1, best_checkpoint, best_epoch = -999999.0, None, None
all_checkpoints, all_epochs = [], []
# retrieve all the saved checkpoints for model selection
# saved_model_dir = args.output_dir
# for f in os.listdir(saved_model_dir):
# file_name = os.path.join(saved_model_dir, f)
# if 'cktepoch' in file_name:
# all_checkpoints.append(file_name)
# conduct some selection (or not)
# print(f"We will perform validation on the following checkpoints: {all_checkpoints}")
# load dev and test datasets
# dev_dataset = ABSADataset(tokenizer, data_dir=args.dataset, domain=args.source, data_type='dev',
# paradigm=args.paradigm, task=args.task, max_len=args.max_seq_length)
# dev_loader = DataLoader(dev_dataset, batch_size=32, num_workers=4)
# dev_sents, dev_labels = read_line_examples_from_file(f'{args.dataset}/{args.source}/dev.txt')
test_sents, test_labels = read_line_examples_from_file(f'{args.dataset}/{args.target}/test.txt')
test_dataset = ABSADataset(tokenizer, data_dir=args.dataset, domain=args.target, data_type='test',
paradigm=args.paradigm, task=args.task, max_len=args.max_seq_length)
test_loader = DataLoader(test_dataset, batch_size=32, num_workers=4)
print('model_dir', os.path.join(args.model_dir, args.model_name + '.pt'))
model = T5FineTuner(args)
model.model = torch.load(os.path.join(args.model_dir, args.model_name + '.pt'))
# dev_raw, dev_fixed, _ = evaluate(dev_loader, model, args.paradigm, args.task, dev_sents)
test_raw, test_fixed, test_pred, test_targets = evaluate(test_loader, model, args.paradigm, args.task, test_sents)
# parsed_pred = convert(test_sents, test_pred, test_labels)
os.makedirs(args.log_dir, exist_ok=True)
log_file_path = os.path.join(args.log_dir, args.model_name, 'metric.txt')
local_time = time.asctime(time.localtime(time.time()))
exp_settings = f"{args.task} on {args.source}2{args.target} under {args.paradigm}; Train bs={args.train_batch_size}, num_epochs = {args.num_train_epochs}"
exp_results = f"Raw TEST F1 = {test_raw['f1']:.4f}, Fixed F1 = {test_fixed['f1']:.4f}"
log_str = f'============================================================\n'
log_str += f"{local_time}\n{exp_settings}\n{exp_results}\n\n"
with open(log_file_path, "w") as f:
f.write(log_str)
with open(os.path.join(args.log_dir, args.model_name, 'pred.txt'), 'w') as f:
for p, tgt in zip(test_pred, test_targets):
f.write(str({'pred': p, 'target': tgt}) + '\n')
# evaluation process
if args.do_direct_eval:
print("\n****** Conduct Evaluating with the last state ******")
# model = T5FineTuner(args)
# print("Reload the model")
# model.model.from_pretrained(args.output_dir)
sents, _ = read_line_examples_from_file(f'{args.dataset}/{args.target}/test.txt')
print()
test_dataset = ABSADataset(tokenizer, data_dir=args.dataset, domain=args.target, data_type='test',
paradigm=args.paradigm, task=args.task, max_len=args.max_seq_length)
test_loader = DataLoader(test_dataset, batch_size=32, num_workers=4)
# print(test_loader.device)
raw_scores, fixed_scores = evaluate(test_loader, model, args.paradigm, args.task, sents)
# print(scores)
# write to file
os.makedirs(f'{args.all_output_dir}/log/', exist_ok=True)
log_file_path = f"{args.all_output_dir}/log/{args.task}-{args.dataset}.txt"
local_time = time.asctime(time.localtime(time.time()))
exp_settings = f"{args.task} on {args.dataset} under {args.paradigm}; Train bs={args.train_batch_size}, num_epochs = {args.num_train_epochs}"
exp_results = f"Raw F1 = {raw_scores['f1']:.4f}, Fixed F1 = {fixed_scores['f1']:.4f}"
log_str = f'============================================================\n'
log_str += f"{local_time}\n{exp_settings}\n{exp_results}\n\n"
with open(log_file_path, "a+") as f:
f.write(log_str)
| 16,407 | 42.638298 | 158 | py |
DMASTE | DMASTE-main/Generative-ABSA/convert_to_triplets.py | def idx2term(sent, triplets):
words = sent.split()
ret = []
for a, o, s in triplets:
a_term = words[a[0]: a[-1] + 1]
o_term = words[o[0]: o[-1] + 1]
ret.append((' '.join(a_term), ' '.join(o_term), s))
return ret
def convert(examples, all_preds, golden):
ret = []
for sent, pred_triplets in zip(examples, all_preds):
sent = ' '.join(sent)
ret_triplets = []
for a, o, s in pred_triplets:
a_start = sent.find(a)
o_start = sent.find(o)
if a_start != -1 and o_start != -1:
a_span = [len(sent[: a_start].split()), len(sent[: a_start].split()) + len(a.split()) - 1]
o_span = [len(sent[: o_start].split()), len(sent[: o_start].split()) + len(o.split()) - 1]
a_span = [a_span[0]] if a_span[0] == a_span[1] else a_span
o_span = [o_span[0]] if o_span[0] == o_span[1] else o_span
ret_triplets.append((a_span, o_span, s[:3].upper()))
ret.append(ret_triplets)
tp = 0
pred_num = 0
golden_num = 0
for pred, gold in zip(ret, golden):
pred = [str(x) for x in pred]
gold = [str(x) for x in gold]
correct = set(pred) & set(gold)
tp += len(correct)
pred_num += len(pred)
golden_num += len(gold)
p = tp / pred_num if pred_num != 0 else 0
r = tp / golden_num if golden_num != 0 else 0
f1 = 2 * p * r / (p + r) if p + r != 0 else 0
print(f'p: {p}, r: {r}, f1: {f1}')
| 1,537 | 39.473684 | 106 | py |
DMASTE | DMASTE-main/Generative-ABSA/data_utils.py | # This file contains all data loading and transformation functions
import time
from torch.utils.data import Dataset
senttag2word = {'POS': 'positive', 'NEG': 'negative', 'NEU': 'neutral'}
def read_line_examples_from_file(data_path):
"""
Read data from file, each line is: sent####labels
Return List[List[word]], List[Tuple]
"""
sents, labels = [], []
with open(data_path, 'r', encoding='UTF-8') as fp:
words, labels = [], []
for line in fp:
line = line.strip()
if line != '':
words, tuples = line.split('####')
sents.append(words.split())
labels.append(eval(tuples))
print(f"Total examples = {len(sents)}")
return sents, labels
def get_annotated_uabsa_targets(sents, labels):
annotated_targets = []
num_sents = len(sents)
for i in range(num_sents):
tuples = labels[i]
if tuples != []:
# tup: ([3, 4], POS)
for tup in tuples:
ap, sent = tup[0], tup[1]
if len(ap) == 1:
sents[i][ap[0]] = f"[{sents[i][ap[0]]}|{senttag2word[sent]}]"
else:
sents[i][ap[0]] = f"[{sents[i][ap[0]]}"
sents[i][ap[-1]] = f"{sents[i][ap[-1]]}|{senttag2word[sent]}]"
annotated_targets.append(sents[i])
return annotated_targets
def get_annotated_aope_targets(sents, labels):
annotated_targets = []
num_sents = len(sents)
for i in range(num_sents):
tuples = labels[i]
# tup: ([3, 4], [2])
for tup in tuples:
ap, op = tup[0], tup[1]
opt = [sents[i][j] for j in op]
# multiple OT for one AP
if '[' in sents[i][ap[0]]:
if len(ap) == 1:
sents[i][ap[0]] = f"{sents[i][ap[0]][:-1]}, {' '.join(opt)}]"
else:
sents[i][ap[-1]] = f"{sents[i][ap[-1]][:-1]}, {' '.join(opt)}]"
else:
annotation = f"{' '.join(opt)}"
if len(ap) == 1:
sents[i][ap[0]] = f"[{sents[i][ap[0]]}|{annotation}]"
else:
sents[i][ap[0]] = f"[{sents[i][ap[0]]}"
sents[i][ap[-1]] = f"{sents[i][ap[-1]]}|{annotation}]"
annotated_targets.append(sents[i])
return annotated_targets
def get_annotated_aste_targets(sents, labels):
annotated_targets = []
num_sents = len(sents)
for i in range(num_sents):
tuples = labels[i]
# tup: ([2], [5], 'NEG')
for tup in tuples:
ap, op, sent = tup[0], tup[1], tup[2]
op = [sents[i][j] for j in op]
# multiple OT for one AP
if '[' in sents[i][ap[0]]:
# print(i)
if len(ap) == 1:
sents[i][ap[0]] = f"{sents[i][ap[0]][:-1]}, {' '.join(op)}]"
else:
sents[i][ap[-1]] = f"{sents[i][ap[-1]][:-1]}, {' '.join(op)}]"
else:
annotation = f"{senttag2word[sent]}|{' '.join(op)}"
if len(ap) == 1:
sents[i][ap[0]] = f"[{sents[i][ap[0]]}|{annotation}]"
else:
sents[i][ap[0]] = f"[{sents[i][ap[0]]}"
sents[i][ap[-1]] = f"{sents[i][ap[-1]]}|{annotation}]"
annotated_targets.append(sents[i])
return annotated_targets
def get_annotated_tasd_targets(sents, labels):
targets = []
num_sents = len(sents)
sents_str = [' '.join(s) for s in sents]
for i in range(num_sents):
s_str = sents_str[i]
at_dict = {}
for triplet in labels[i]:
at, ac, polarity = triplet[0], triplet[1], triplet[2]
if at in at_dict:
at_dict[at][0].append(ac)
else:
at_dict[at] = [[ac], polarity]
for at, ac_pol in at_dict.items():
if len(ac_pol[0]) == 1:
annotated_at = f"[{at}|{ac_pol[0][0]}|{ac_pol[1]}]"
else:
annotated_at = f"[{at}|{', '.join(ac_pol[0])}|{ac_pol[1]}]"
if at != 'NULL':
# print('at:', at, 'replaced_at:', annotated_at)
s_str = s_str.replace(at, annotated_at)
else:
s_str += f" {annotated_at}"
targets.append(s_str)
return targets
def get_extraction_uabsa_targets(sents, labels):
targets = []
for i, label in enumerate(labels):
if label == []:
targets.append('None')
else:
all_tri = []
for tri in label:
if len(tri[0]) == 1:
a = sents[i][tri[0][0]]
else:
start_idx, end_idx = tri[0][0], tri[0][-1]
a = ' '.join(sents[i][start_idx:end_idx+1])
c = senttag2word[tri[1]]
all_tri.append((a, c))
label_strs = ['('+', '.join(l)+')' for l in all_tri]
targets.append('; '.join(label_strs))
return targets
def get_extraction_aope_targets(sents, labels):
targets = []
for i, label in enumerate(labels):
all_tri = []
for tri in label:
if len(tri[0]) == 1:
a = sents[i][tri[0][0]]
else:
start_idx, end_idx = tri[0][0], tri[0][-1]
a = ' '.join(sents[i][start_idx:end_idx+1])
if len(tri[1]) == 1:
b = sents[i][tri[1][0]]
else:
start_idx, end_idx = tri[1][0], tri[1][-1]
b = ' '.join(sents[i][start_idx:end_idx+1])
all_tri.append((a, b))
label_strs = ['('+', '.join(l)+')' for l in all_tri]
targets.append('; '.join(label_strs))
return targets
def get_extraction_tasd_targets(sents, labels):
targets = []
for label in labels:
label_strs = ['('+', '.join(l)+')' for l in label]
target = '; '.join(label_strs)
targets.append(target)
return targets
def get_extraction_aste_targets(sents, labels):
targets = []
for i, label in enumerate(labels):
all_tri = []
for tri in label:
if len(tri[0]) == 1:
a = sents[i][tri[0][0]]
else:
start_idx, end_idx = tri[0][0], tri[0][-1]
a = ' '.join(sents[i][start_idx:end_idx+1])
if len(tri[1]) == 1:
b = sents[i][tri[1][0]]
else:
start_idx, end_idx = tri[1][0], tri[1][-1]
b = ' '.join(sents[i][start_idx:end_idx+1])
c = senttag2word[tri[2]]
all_tri.append((a, b, c))
label_strs = ['('+', '.join(l)+')' for l in all_tri]
targets.append('; '.join(label_strs))
return targets
def get_transformed_io(data_path, paradigm, task):
"""
The main function to transform the Input & Output according to
the specified paradigm and task
"""
sents, labels = read_line_examples_from_file(data_path)
# the input is just the raw sentence
inputs = [s.copy() for s in sents]
# Get target according to the paradigm
# annotate the sents (with label info) as targets
if paradigm == 'annotation':
if task == 'uabsa':
targets = get_annotated_uabsa_targets(sents, labels)
elif task == 'aste':
targets = get_annotated_aste_targets(sents, labels)
elif task == 'tasd':
targets = get_annotated_tasd_targets(sents, labels)
elif task == 'aope':
targets = get_annotated_aope_targets(sents, labels)
else:
raise NotImplementedError
# directly treat label infor as the target
elif paradigm == 'extraction':
if task == 'uabsa':
targets = get_extraction_uabsa_targets(sents, labels)
elif task == 'aste':
targets = get_extraction_aste_targets(sents, labels)
elif task == 'tasd':
targets = get_extraction_tasd_targets(sents, labels)
elif task == 'aope':
targets = get_extraction_aope_targets(sents, labels)
else:
raise NotImplementedError
else:
print('Unsupported paradigm!')
raise NotImplementedError
return inputs, targets
class ABSADataset(Dataset):
def __init__(self, tokenizer, data_dir, domain, data_type, paradigm, task, max_len=128):
# 'data/aste/rest16/train.txt'
self.data_path = f'{data_dir}/{domain}/{data_type}.txt'
self.paradigm = paradigm
self.task = task
self.max_len = max_len
self.tokenizer = tokenizer
self.inputs = []
self.targets = []
self._build_examples()
def __len__(self):
return len(self.inputs)
def __getitem__(self, index):
source_ids = self.inputs[index]["input_ids"].squeeze()
target_ids = self.targets[index]["input_ids"].squeeze()
src_mask = self.inputs[index]["attention_mask"].squeeze() # might need to squeeze
target_mask = self.targets[index]["attention_mask"].squeeze() # might need to squeeze
return {"source_ids": source_ids, "source_mask": src_mask,
"target_ids": target_ids, "target_mask": target_mask}
def _build_examples(self):
inputs, targets = get_transformed_io(self.data_path, self.paradigm, self.task)
for i in range(len(inputs)):
input = ' '.join(inputs[i])
if self.paradigm == 'annotation':
if self.task != 'tasd':
target = ' '.join(targets[i])
else:
target = targets[i]
else:
target = targets[i]
tokenized_input = self.tokenizer.batch_encode_plus(
[input], max_length=self.max_len, pad_to_max_length=True, truncation=True,
return_tensors="pt",
)
tokenized_target = self.tokenizer.batch_encode_plus(
[target], max_length=self.max_len, pad_to_max_length=True, truncation=True,
return_tensors="pt"
)
self.inputs.append(tokenized_input)
self.targets.append(tokenized_target)
def write_results_to_log(log_file_path, best_test_result, args, dev_results, test_results, global_steps):
"""
Record dev and test results to log file
"""
local_time = time.asctime(time.localtime(time.time()))
exp_settings = "Exp setting: {0} on {1} under {2} | {3:.4f} | ".format(
args.task, args.dataset, args.paradigm, best_test_result
)
train_settings = "Train setting: bs={0}, lr={1}, num_epochs={2}".format(
args.train_batch_size, args.learning_rate, args.num_train_epochs
)
results_str = "\n* Results *: Dev / Test \n"
metric_names = ['f1', 'precision', 'recall']
for gstep in global_steps:
results_str += f"Step-{gstep}:\n"
for name in metric_names:
name_step = f'{name}_{gstep}'
results_str += f"{name:<8}: {dev_results[name_step]:.4f} / {test_results[name_step]:.4f}"
results_str += ' '*5
results_str += '\n'
log_str = f"{local_time}\n{exp_settings}\n{train_settings}\n{results_str}\n\n"
with open(log_file_path, "a+") as f:
f.write(log_str) | 11,367 | 34.304348 | 105 | py |
DMASTE | DMASTE-main/Generative-ABSA/error_analysis.py | import pickle as pk
def get_result(source, target):
sentences = f'data/aste/{target}/test.txt'
in_domain = f'log/results-aste-{target}.pickle'
cross_domain = f'log/results-aste-{source}_2_{target}.pickle'
lines = []
with open(sentences) as f:
for line in f:
lines.append(line.strip())
return pk.load(open(in_domain, 'rb')), pk.load(open(cross_domain, 'rb')), lines
def analyse(labels, in_domains, cross_domains, sentences):
assert len(labels) == len(in_domains) == len(cross_domains)
for i in range(len(labels)):
in_d = set(in_domains[i])
cross_d = set(cross_domains[i])
if len(in_d) != len(cross_d) or len(in_d) != len(in_d & cross_d):
print(i, sentences[i])
print('in domain: ', in_d)
print('cross domain:', cross_d)
print('label', labels[i])
print()
def main():
source = 'res14'
target = 'lap14'
in_domain, cross_domain, sentences = get_result(source, target)
print(in_domain.keys())
label = in_domain['labels']
pred_cross = cross_domain['preds_fixed']
pred_in = in_domain['preds_fixed']
analyse(label, pred_in, pred_cross, sentences)
if __name__ == '__main__':
main() | 1,251 | 30.3 | 83 | py |
DMASTE | DMASTE-main/Generative-ABSA/eval_utils.py | # This file contains the evaluation functions
import re
import editdistance
sentiment_word_list = ['positive', 'negative', 'neutral']
aspect_cate_list = ['location general',
'food prices',
'food quality',
'ambience general',
'service general',
'restaurant prices',
'drinks prices',
'restaurant miscellaneous',
'drinks quality',
'drinks style_options',
'restaurant general',
'food style_options']
def extract_spans_extraction(task, seq):
extractions = []
if task == 'uabsa' and seq.lower() == 'none':
return []
else:
if task in ['uabsa', 'aope']:
all_pt = seq.split('; ')
for pt in all_pt:
pt = pt[1:-1]
try:
a, b = pt.split(', ')
except ValueError:
a, b = '', ''
extractions.append((a, b))
elif task in ['tasd', 'aste']:
all_pt = seq.split('; ')
for pt in all_pt:
pt = pt[1:-1]
try:
a, b, c = pt.split(', ')
except ValueError:
a, b, c = '', '', ''
extractions.append((a, b, c))
return extractions
def extract_spans_annotation(task, seq):
if task in ['aste', 'tasd']:
extracted_spans = extract_triplets(seq)
elif task in ['aope', 'uabsa']:
extracted_spans = extract_pairs(seq)
return extracted_spans
def extract_pairs(seq):
aps = re.findall('\[.*?\]', seq)
aps = [ap[1:-1] for ap in aps]
pairs = []
for ap in aps:
# the original sentence might have
try:
at, ots = ap.split('|')
except ValueError:
at, ots = '', ''
if ',' in ots: # multiple ots
for ot in ots.split(', '):
pairs.append((at, ot))
else:
pairs.append((at, ots))
return pairs
def extract_triplets(seq):
aps = re.findall('\[.*?\]', seq)
aps = [ap[1:-1] for ap in aps]
triplets = []
for ap in aps:
try:
a, b, c = ap.split('|')
except ValueError:
a, b, c = '', '', ''
# for ASTE
if b in sentiment_word_list:
if ',' in c:
for op in c.split(', '):
triplets.append((a, b, op))
else:
triplets.append((a, b, c))
# for TASD
else:
if ',' in b:
for ac in b.split(', '):
triplets.append((a, ac, c))
else:
triplets.append((a, b, c))
return triplets
def recover_terms_with_editdistance(original_term, sent):
words = original_term.split(' ')
new_words = []
for word in words:
edit_dis = []
for token in sent:
edit_dis.append(editdistance.eval(word, token))
smallest_idx = edit_dis.index(min(edit_dis))
new_words.append(sent[smallest_idx])
new_term = ' '.join(new_words)
return new_term
def fix_preds_uabsa(all_pairs, sents):
all_new_pairs = []
for i, pairs in enumerate(all_pairs):
new_pairs = []
if pairs == []:
all_new_pairs.append(pairs)
else:
for pair in pairs:
# AT not in the original sentence
if pair[0] not in ' '.join(sents[i]):
# print('Issue')
new_at = recover_terms_with_editdistance(pair[0], sents[i])
else:
new_at = pair[0]
if pair[1] not in sentiment_word_list:
new_sentiment = recover_terms_with_editdistance(pair[1], sentiment_word_list)
else:
new_sentiment = pair[1]
new_pairs.append((new_at, new_sentiment))
# print(pair, '>>>>>', word_and_sentiment)
# print(all_target_pairs[i])
all_new_pairs.append(new_pairs)
return all_new_pairs
def fix_preds_aope(all_pairs, sents):
all_new_pairs = []
for i, pairs in enumerate(all_pairs):
new_pairs = []
if pairs == []:
all_new_pairs.append(pairs)
else:
for pair in pairs:
#print(pair)
# AT not in the original sentence
if pair[0] not in ' '.join(sents[i]):
# print('Issue')
new_at = recover_terms_with_editdistance(pair[0], sents[i])
else:
new_at = pair[0]
# OT not in the original sentence
ots = pair[1].split(', ')
new_ot_list = []
for ot in ots:
if ot not in ' '.join(sents[i]):
# print('Issue')
new_ot_list.append(recover_terms_with_editdistance(ot, sents[i]))
else:
new_ot_list.append(ot)
new_ot = ', '.join(new_ot_list)
new_pairs.append((new_at, new_ot))
# print(pair, '>>>>>', word_and_sentiment)
# print(all_target_pairs[i])
all_new_pairs.append(new_pairs)
return all_new_pairs
# for ASTE
def fix_preds_aste(all_pairs, sents):
all_new_pairs = []
for i, pairs in enumerate(all_pairs):
new_pairs = []
if pairs == []:
all_new_pairs.append(pairs)
else:
for pair in pairs:
#two formats have different orders
p0, p1, p2 = pair
# for annotation-type
if p1 in sentiment_word_list:
at, ott, ac = p0, p2, p1
io_format = 'annotation'
# for extraction type
elif p2 in sentiment_word_list:
at, ott, ac = p0, p1, p2
io_format = 'extraction'
#print(pair)
# AT not in the original sentence
if at not in ' '.join(sents[i]):
# print('Issue')
new_at = recover_terms_with_editdistance(at, sents[i])
else:
new_at = at
if ac not in sentiment_word_list:
new_sentiment = recover_terms_with_editdistance(ac, sentiment_word_list)
else:
new_sentiment = ac
# OT not in the original sentence
ots = ott.split(', ')
new_ot_list = []
for ot in ots:
if ot not in ' '.join(sents[i]):
# print('Issue')
new_ot_list.append(recover_terms_with_editdistance(ot, sents[i]))
else:
new_ot_list.append(ot)
new_ot = ', '.join(new_ot_list)
if io_format == 'extraction':
new_pairs.append((new_at, new_ot, new_sentiment))
else:
new_pairs.append((new_at, new_sentiment, new_ot))
# print(pair, '>>>>>', word_and_sentiment)
# print(all_target_pairs[i])
all_new_pairs.append(new_pairs)
return all_new_pairs
def fix_preds_tasd(all_pairs, sents):
all_new_pairs = []
for i, pairs in enumerate(all_pairs):
new_pairs = []
if pairs == []:
all_new_pairs.append(pairs)
else:
for pair in pairs:
#print(pair)
# AT not in the original sentence
sents_and_null = ' '.join(sents[i]) + 'NULL'
if pair[0] not in sents_and_null:
# print('Issue')
new_at = recover_terms_with_editdistance(pair[0], sents[i])
else:
new_at = pair[0]
# AC not in the list
acs = pair[1].split(', ')
new_ac_list = []
for ac in acs:
if ac not in aspect_cate_list:
new_ac_list.append(recover_terms_with_editdistance(ac, aspect_cate_list))
else:
new_ac_list.append(ac)
new_ac = ', '.join(new_ac_list)
if pair[2] not in sentiment_word_list:
new_sentiment = recover_terms_with_editdistance(pair[2], sentiment_word_list)
else:
new_sentiment = pair[2]
new_pairs.append((new_at, new_ac, new_sentiment))
# print(pair, '>>>>>', word_and_sentiment)
# print(all_target_pairs[i])
all_new_pairs.append(new_pairs)
return all_new_pairs
def fix_pred_with_editdistance(all_predictions, sents, task):
if task == 'uabsa':
fixed_preds = fix_preds_uabsa(all_predictions, sents)
elif task == 'aope':
fixed_preds = fix_preds_aope(all_predictions, sents)
elif task == 'aste':
fixed_preds = fix_preds_aste(all_predictions, sents)
elif task == 'tasd':
fixed_preds = fix_preds_tasd(all_predictions, sents)
else:
print("*** Unimplemented Error ***")
fixed_preds = all_predictions
return fixed_preds
def compute_f1_scores(pred_pt, gold_pt):
"""
Function to compute F1 scores with pred and gold pairs/triplets
The input needs to be already processed
"""
# number of true postive, gold standard, predicted aspect terms
n_tp, n_gold, n_pred = 0, 0, 0
for i in range(len(pred_pt)):
n_gold += len(gold_pt[i])
n_pred += len(pred_pt[i])
for t in pred_pt[i]:
if t in gold_pt[i]:
n_tp += 1
precision = float(n_tp) / float(n_pred) if n_pred != 0 else 0
recall = float(n_tp) / float(n_gold) if n_gold != 0 else 0
f1 = 2 * precision * recall / (precision + recall) if precision != 0 or recall != 0 else 0
scores = {'precision': precision, 'recall': recall, 'f1': f1}
return scores
def compute_scores(pred_seqs, gold_seqs, sents, io_format, task):
"""
compute metrics for multiple tasks
"""
assert len(pred_seqs) == len(gold_seqs)
num_samples = len(gold_seqs)
all_labels, all_predictions = [], []
for i in range(num_samples):
if io_format == 'annotation':
gold_list = extract_spans_annotation(task, gold_seqs[i])
pred_list = extract_spans_annotation(task, pred_seqs[i])
elif io_format == 'extraction':
gold_list = extract_spans_extraction(task, gold_seqs[i])
pred_list = extract_spans_extraction(task, pred_seqs[i])
all_labels.append(gold_list)
all_predictions.append(pred_list)
print("\nResults of raw output")
raw_scores = compute_f1_scores(all_predictions, all_labels)
print(raw_scores)
# fix the issues due to generation
all_predictions_fixed = fix_pred_with_editdistance(all_predictions, sents, task)
print("\nResults of fixed output")
fixed_scores = compute_f1_scores(all_predictions_fixed, all_labels)
print(fixed_scores)
return raw_scores, fixed_scores, all_labels, all_predictions, all_predictions_fixed | 11,361 | 31.462857 | 97 | py |
DMASTE | DMASTE-main/GTS/code/NNModel/main.py | #coding utf-8
import json, os
import random
import argparse
import numpy
import torch
import torch.nn.functional as F
from tqdm import trange
import numpy as np
from data import load_data_instances, DataIterator
from model import MultiInferRNNModel, MultiInferCNNModel
import utils
def train(args):
# load double embedding
word2index = json.load(open(args.prefix + 'doubleembedding/word_idx.json'))
general_embedding = numpy.load(args.prefix +'doubleembedding/gen.vec.npy')
general_embedding = torch.from_numpy(general_embedding)
domain_embedding = numpy.load(args.prefix +'doubleembedding/'+args.dataset+'_emb.vec.npy')
domain_embedding = torch.from_numpy(domain_embedding)
# load dataset
train_sentence_packs = json.load(open(args.prefix + args.dataset + '/train.json'))
random.shuffle(train_sentence_packs)
dev_sentence_packs = json.load(open(args.prefix + args.dataset + '/dev.json'))
instances_train = load_data_instances(train_sentence_packs, word2index, args)
instances_dev = load_data_instances(dev_sentence_packs, word2index, args)
random.shuffle(instances_train)
trainset = DataIterator(instances_train, args)
devset = DataIterator(instances_dev, args)
if not os.path.exists(args.model_dir):
os.makedirs(args.model_dir)
# build model
if args.model == 'bilstm':
model = MultiInferRNNModel(general_embedding, domain_embedding, args).to(args.device)
elif args.model == 'cnn':
model = MultiInferCNNModel(general_embedding, domain_embedding, args).to(args.device)
parameters = list(model.parameters())
parameters = filter(lambda x: x.requires_grad, parameters)
optimizer = torch.optim.Adam(parameters, lr=args.lr)
# training
best_joint_f1 = 0
best_joint_epoch = 0
for i in range(args.epochs):
print('Epoch:{}'.format(i))
for j in trange(trainset.batch_count):
_, sentence_tokens, lengths, masks, aspect_tags, _, tags = trainset.get_batch(j)
predictions = model(sentence_tokens, lengths, masks)
loss = 0.
tags_flatten = tags[:, :lengths[0], :lengths[0]].reshape([-1])
for k in range(len(predictions)):
prediction_flatten = predictions[k].reshape([-1, predictions[k].shape[3]])
loss = loss + F.cross_entropy(prediction_flatten, tags_flatten, ignore_index=-1)
optimizer.zero_grad()
loss.backward()
optimizer.step()
joint_precision, joint_recall, joint_f1 = eval(model, devset, args)
if joint_f1 > best_joint_f1:
model_path = args.model_dir + args.model + args.task + '.pt'
torch.save(model, model_path)
best_joint_f1 = joint_f1
best_joint_epoch = i
print('best epoch: {}\tbest dev {} f1: {:.5f}\n\n'.format(best_joint_epoch, args.task, best_joint_f1))
def eval(model, dataset, args):
model.eval()
with torch.no_grad():
predictions=[]
labels=[]
all_ids = []
all_lengths = []
for i in range(dataset.batch_count):
sentence_ids, sentence_tokens, lengths, mask, aspect_tags, _, tags = dataset.get_batch(i)
prediction = model.forward(sentence_tokens,lengths, mask)
prediction = prediction[-1]
prediction = torch.argmax(prediction, dim=3)
prediction_padded = torch.zeros(prediction.shape[0], args.max_sequence_len, args.max_sequence_len)
prediction_padded[:, :prediction.shape[1], :prediction.shape[1]] = prediction
predictions.append(prediction_padded)
all_ids.extend(sentence_ids)
labels.append(tags)
all_lengths.append(lengths)
predictions = torch.cat(predictions,dim=0).cpu().tolist()
labels = torch.cat(labels,dim=0).cpu().tolist()
all_lengths = torch.cat(all_lengths, dim=0).cpu().tolist()
precision, recall, f1 = utils.score_uniontags(args, predictions, labels, all_lengths, ignore_index=-1)
aspect_results = utils.score_aspect(predictions, labels, all_lengths, ignore_index=-1)
opinion_results = utils.score_opinion(predictions, labels, all_lengths, ignore_index=-1)
print('Aspect term\tP:{:.5f}\tR:{:.5f}\tF1:{:.5f}'.format(aspect_results[0], aspect_results[1], aspect_results[2]))
print('Opinion term\tP:{:.5f}\tR:{:.5f}\tF1:{:.5f}'.format(opinion_results[0], opinion_results[1], opinion_results[2]))
print(args.task+'\tP:{:.5f}\tR:{:.5f}\tF1:{:.5f}\n'.format(precision, recall, f1))
model.train()
return precision, recall, f1
def test(args):
print("Evaluation on testset:")
model_path = args.model_dir + args.model + args.task + '.pt'
model = torch.load(model_path).to(args.device)
model.eval()
word2index = json.load(open(args.prefix + 'doubleembedding/word_idx.json'))
sentence_packs = json.load(open(args.prefix + args.dataset + '/test.json'))
instances = load_data_instances(sentence_packs, word2index, args)
testset = DataIterator(instances, args)
eval(model, testset, args)
if __name__=='__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--prefix', type=str, default="../../data/",
help='dataset and embedding path prefix')
parser.add_argument('--model_dir', type=str, default="savemodel/",
help='model path prefix')
parser.add_argument('--task', type=str, default="pair", choices=["pair", "triplet"],
help='option: pair, triplet')
parser.add_argument('--mode', type=str, default="train", choices=["train", "test"],
help='option: train, test')
parser.add_argument('--model', type=str, default="bilstm", choices=["bilstm", "cnn"],
help='option: bilstm, cnn')
parser.add_argument('--dataset', type=str, default="res14",
help='dataset')
parser.add_argument('--max_sequence_len', type=int, default=100,
help='max length of a sentence')
parser.add_argument('--device', type=str, default="cuda",
help='gpu or cpu')
parser.add_argument('--lstm_dim', type=int, default=50,
help='dimension of lstm cell')
parser.add_argument('--cnn_dim', type=int, default=256,
help='dimension of cnn')
parser.add_argument('--nhops', type=int, default=0,
help='inference times')
parser.add_argument('--lr', type=float, default=0.001,
help='learning rate')
parser.add_argument('--batch_size', type=int, default=32,
help='bathc size')
parser.add_argument('--epochs', type=int, default=600,
help='training epoch number')
parser.add_argument('--class_num', type=int, default=4,
help='label number')
args = parser.parse_args()
if args.task == 'triplet':
args.class_num = 6
if args.mode == 'train':
train(args)
test(args)
else:
test(args)
| 7,166 | 39.954286 | 127 | py |
DMASTE | DMASTE-main/GTS/code/NNModel/attention_module.py | import copy
import math
import torch
import torch.nn.functional as F
def attention(query, key, value, mask=None, dropout=None):
"Compute 'Scaled Dot Product Attention'"
d_k = query.size(-1)
scores = torch.matmul(query, key.transpose(-2, -1)) \
/ math.sqrt(d_k)
if mask is not None:
scores = scores.masked_fill(mask == 0, -1e9)
p_attn = F.softmax(scores, dim = -1)
if dropout is not None:
p_attn = dropout(p_attn)
return torch.matmul(p_attn, value), p_attn
def clones(module, N):
"Produce N identical layers."
return torch.nn.ModuleList([copy.deepcopy(module) for _ in range(N)])
class MultiHeadedAttention(torch.nn.Module):
def __init__(self, h, d_model, dropout=0.1):
"Take in model size and number of heads."
super(MultiHeadedAttention, self).__init__()
assert d_model % h == 0
# We assume d_v always equals d_k
self.d_k = d_model // h
self.h = h
self.linears = clones(torch.nn.Linear(d_model, d_model), 4)
self.attn = None
self.dropout = torch.nn.Dropout(p=dropout)
def forward(self, query, key, value, mask=None):
"Implements Figure 2"
if mask is not None:
# Same mask applied to all h heads.
mask = mask.unsqueeze(1)
nbatches = query.size(0)
# 1) Do all the linear projections in batch from d_model => h x d_k
query, key, value = \
[l(x).view(nbatches, -1, self.h, self.d_k).transpose(1, 2)
for l, x in zip(self.linears, (query, key, value))]
# 2) Apply attention on all the projected vectors in batch.
x, self.attn = attention(query, key, value, mask=mask,
dropout=self.dropout)
# 3) "Concat" using a view and apply a final linear.
x = x.transpose(1, 2).contiguous() \
.view(nbatches, -1, self.h * self.d_k)
return self.linears[-1](x)
class SelfAttention(torch.nn.Module):
def __init__(self, args):
super(SelfAttention,self).__init__()
self.args = args
self.linear_q = torch.nn.Linear(args.lstm_dim * 2, args.lstm_dim * 2)
# self.linear_k = torch.nn.Linear(configs.BILSTM_DIM * 2, configs.BILSTM_DIM * 2)
# self.linear_v = torch.nn.Linear(configs.BILSTM_DIM * 2, configs.BILSTM_DIM * 2)
# self.w_query = torch.nn.Linear(configs.BILSTM_DIM * 2, 50)
# self.w_value = torch.nn.Linear(configs.BILSTM_DIM * 2, 50)
self.w_query = torch.nn.Linear(args.cnn_dim, 50)
self.w_value = torch.nn.Linear(args.cnn_dim, 50)
self.v = torch.nn.Linear(50, 1, bias=False)
def forward(self, query, value, mask):
# attention_states = self.linear_q(query)
# attention_states_T = self.linear_k(values)
attention_states = query
attention_states_T = value
attention_states_T = attention_states_T.permute([0, 2, 1])
weights=torch.bmm(attention_states, attention_states_T)
weights = weights.masked_fill(mask.unsqueeze(1).expand_as(weights)==0, float("-inf")) # mask掉每行后面的列
attention = F.softmax(weights,dim=2)
# value=self.linear_v(states)
merged=torch.bmm(attention, value)
merged=merged * mask.unsqueeze(2).float().expand_as(merged)
return merged
def forward_perceptron(self, query, value, mask):
attention_states = query
attention_states = self.w_query(attention_states)
attention_states = attention_states.unsqueeze(2).expand(-1,-1,attention_states.shape[1], -1)
attention_states_T = value
attention_states_T = self.w_value(attention_states_T)
attention_states_T = attention_states_T.unsqueeze(2).expand(-1,-1,attention_states_T.shape[1], -1)
attention_states_T = attention_states_T.permute([0, 2, 1, 3])
weights = torch.tanh(attention_states+attention_states_T)
weights = self.v(weights).squeeze(3)
weights = weights.masked_fill(mask.unsqueeze(1).expand_as(weights)==0, float("-inf")) # mask掉每行后面的列
attention = F.softmax(weights,dim=2)
merged = torch.bmm(attention, value)
merged = merged * mask.unsqueeze(2).float().expand_as(merged)
return merged
| 4,281 | 38.648148 | 112 | py |
DMASTE | DMASTE-main/GTS/code/NNModel/utils.py | import multiprocessing
import pickle
import numpy as np
import sklearn
id2sentiment = {1: 'neg', 3: 'neu', 5: 'pos'}
def get_aspects(tags, length, ignore_index=-1):
spans = []
start = -1
for i in range(length):
if tags[i][i] == ignore_index: continue
elif tags[i][i] == 1:
if start == -1:
start = i
elif tags[i][i] != 1:
if start != -1:
spans.append([start, i - 1])
start = -1
if start != -1:
spans.append([start, length-1])
return spans
def get_opinions(tags, length, ignore_index=-1):
spans = []
start = -1
for i in range(length):
if tags[i][i] == ignore_index: continue
elif tags[i][i] == 2:
if start == -1:
start = i
elif tags[i][i] != 2:
if start != -1:
spans.append([start, i - 1])
start = -1
if start != -1:
spans.append([start, length-1])
return spans
def score_aspect(predicted, golden, lengths, ignore_index=-1):
assert len(predicted) == len(golden)
golden_set = set()
predict_set = set()
for i in range(len(golden)):
golden_spans = get_aspects(golden[i], lengths[i], ignore_index)
for l, r in golden_spans:
golden_set.add('-'.join([str(i), str(l), str(r)]))
predict_spans = get_aspects(predicted[i], lengths[i], ignore_index)
for l, r in predict_spans:
predict_set.add('-'.join([str(i), str(l), str(r)]))
correct_num = len(golden_set & predict_set)
precision = correct_num / len(predict_set) if len(predict_set) > 0 else 0
recall = correct_num / len(golden_set) if len(golden_set) > 0 else 0
f1 = 2 * precision * recall / (precision + recall) if (precision + recall) > 0 else 0
return precision, recall, f1
def score_opinion(predicted, golden, lengths, ignore_index=-1):
assert len(predicted) == len(golden)
golden_set = set()
predict_set = set()
for i in range(len(golden)):
golden_spans = get_opinions(golden[i], lengths[i], ignore_index)
for l, r in golden_spans:
golden_set.add('-'.join([str(i), str(l), str(r)]))
predict_spans = get_opinions(predicted[i], lengths[i], ignore_index)
for l, r in predict_spans:
predict_set.add('-'.join([str(i), str(l), str(r)]))
correct_num = len(golden_set & predict_set)
precision = correct_num / len(predict_set) if len(predict_set) > 0 else 0
recall = correct_num / len(golden_set) if len(golden_set) > 0 else 0
f1 = 2 * precision * recall / (precision + recall) if (precision + recall) > 0 else 0
return precision, recall, f1
def find_pair(tags, aspect_spans, opinion_spans):
pairs = []
for al, ar in aspect_spans:
for pl, pr in opinion_spans:
flag = False
for i in range(al, ar+1):
for j in range(pl, pr+1):
if tags[i][j] == 3 or tags[j][i] == 3:
flag = True
break
if flag: break
if flag:
pairs.append([al, ar, pl, pr])
return pairs
def find_triplet(tags, aspect_spans, opinion_spans):
triplets = []
for al, ar in aspect_spans:
for pl, pr in opinion_spans:
tag_num = [0]*6
for i in range(al, ar+1):
for j in range(pl, pr+1):
if al < pl:
tag_num[int(tags[i][j])] += 1
else:
tag_num[int(tags[j][i])] += 1
if sum(tag_num[3:]) == 0: continue
sentiment = -1
if tag_num[5] >= tag_num[4] and tag_num[5] >= tag_num[3]:
sentiment = 5
elif tag_num[4] >= tag_num[3] and tag_num[4] >= tag_num[5]:
sentiment = 4
elif tag_num[3] >= tag_num[5] and tag_num[3] >= tag_num[4]:
sentiment = 3
if sentiment == -1:
print('wrong!!!!!!!!!!!!!!!!!!!!')
input()
triplets.append([al, ar, pl, pr, sentiment])
return triplets
def score_uniontags(args, predicted, golden, lengths, ignore_index=-1):
assert len(predicted) == len(golden)
golden_set = set()
predicted_set = set()
for i in range(len(golden)):
golden_aspect_spans = get_aspects(golden[i], lengths[i], ignore_index)
golden_opinion_spans = get_opinions(golden[i], lengths[i], ignore_index)
if args.task == 'pair':
golden_tuple = find_pair(golden[i], golden_aspect_spans, golden_opinion_spans)
elif args.task == 'triplet':
golden_tuple = find_triplet(golden[i], golden_aspect_spans, golden_opinion_spans)
for pair in golden_tuple:
golden_set.add(str(i) + '-'+ '-'.join(map(str, pair)))
predicted_aspect_spans = get_aspects(predicted[i], lengths[i], ignore_index)
predicted_opinion_spans = get_opinions(predicted[i], lengths[i], ignore_index)
if args.task == 'pair':
predicted_tuple = find_pair(predicted[i], predicted_aspect_spans, predicted_opinion_spans)
elif args.task == 'triplet':
predicted_tuple = find_triplet(predicted[i], predicted_aspect_spans, predicted_opinion_spans)
for pair in predicted_tuple:
predicted_set.add(str(i) + '-' + '-'.join(map(str, pair)))
correct_num = len(golden_set & predicted_set)
precision = correct_num / len(predicted_set) if len(predicted_set) > 0 else 0
recall = correct_num / len(golden_set) if len(golden_set) > 0 else 0
f1 = 2 * precision * recall / (precision + recall) if (precision + recall) > 0 else 0
return precision, recall, f1
| 5,783 | 37.052632 | 105 | py |
DMASTE | DMASTE-main/GTS/code/NNModel/model.py | import torch
import torch.nn
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
import torch.nn.functional as F
from attention_module import MultiHeadedAttention, SelfAttention
class MultiInferRNNModel(torch.nn.Module):
def __init__(self, gen_emb, domain_emb, args):
'''double embedding + lstm encoder + dot self attention'''
super(MultiInferRNNModel, self).__init__()
self.args = args
self.gen_embedding = torch.nn.Embedding(gen_emb.shape[0], gen_emb.shape[1])
self.gen_embedding.weight.data.copy_(gen_emb)
self.gen_embedding.weight.requires_grad = False
self.domain_embedding = torch.nn.Embedding(domain_emb.shape[0], domain_emb.shape[1])
self.domain_embedding.weight.data.copy_(domain_emb)
self.domain_embedding.weight.requires_grad = False
self.dropout1 = torch.nn.Dropout(0.5)
self.dropout2 = torch.nn.Dropout(0)
self.bilstm = torch.nn.LSTM(300+100, args.lstm_dim,
num_layers=1, batch_first=True, bidirectional=True)
self.attention_layer = SelfAttention(args)
self.feature_linear = torch.nn.Linear(args.lstm_dim*4 + args.class_num*3, args.lstm_dim*4)
self.cls_linear = torch.nn.Linear(args.lstm_dim*4, args.class_num)
def _get_embedding(self, sentence_tokens, mask):
gen_embed = self.gen_embedding(sentence_tokens)
domain_embed = self.domain_embedding(sentence_tokens)
embedding = torch.cat([gen_embed, domain_embed], dim=2)
embedding = self.dropout1(embedding)
embedding = embedding * mask.unsqueeze(2).float().expand_as(embedding)
return embedding
def _lstm_feature(self, embedding, lengths):
embedding = pack_padded_sequence(embedding, lengths, batch_first=True)
context, _ = self.bilstm(embedding)
context, _ = pad_packed_sequence(context, batch_first=True)
return context
def _cls_logits(self, features):
# features = self.dropout2(features)
tags = self.cls_linear(features)
return tags
def multi_hops(self, features, lengths, mask, k):
'''generate mask'''
max_length = features.shape[1]
mask = mask[:, :max_length]
mask_a = mask.unsqueeze(1).expand([-1, max_length, -1])
mask_b = mask.unsqueeze(2).expand([-1, -1, max_length])
mask = mask_a * mask_b
mask = torch.triu(mask).unsqueeze(3).expand([-1, -1, -1, self.args.class_num])
'''save all logits'''
logits_list = []
logits = self._cls_logits(features)
logits_list.append(logits)
for i in range(k):
#probs = torch.softmax(logits, dim=3)
probs = logits
logits = probs * mask
logits_a = torch.max(logits, dim=1)[0]
logits_b = torch.max(logits, dim=2)[0]
logits = torch.cat([logits_a.unsqueeze(3), logits_b.unsqueeze(3)], dim=3)
logits = torch.max(logits, dim=3)[0]
logits = logits.unsqueeze(2).expand([-1,-1, max_length, -1])
logits_T = logits.transpose(1, 2)
logits = torch.cat([logits, logits_T], dim=3)
new_features = torch.cat([features, logits, probs], dim=3)
features = self.feature_linear(new_features)
logits = self._cls_logits(features)
logits_list.append(logits)
return logits_list
def forward(self, sentence_tokens, lengths, mask):
embedding = self._get_embedding(sentence_tokens, mask)
lstm_feature = self._lstm_feature(embedding, lengths)
# self attention
lstm_feature_attention = self.attention_layer(lstm_feature, lstm_feature, mask[:,:lengths[0]])
#lstm_feature_attention = self.attention_layer.forward_perceptron(lstm_feature, lstm_feature, mask[:, :lengths[0]])
lstm_feature = lstm_feature + lstm_feature_attention
lstm_feature = lstm_feature.unsqueeze(2).expand([-1,-1, lengths[0], -1])
lstm_feature_T = lstm_feature.transpose(1, 2)
features = torch.cat([lstm_feature, lstm_feature_T], dim=3)
logits = self.multi_hops(features, lengths, mask, self.args.nhops)
return [logits[-1]]
class MultiInferCNNModel(torch.nn.Module):
def __init__(self, gen_emb, domain_emb, args):
super(MultiInferCNNModel, self).__init__()
self.args = args
self.gen_embedding = torch.nn.Embedding(gen_emb.shape[0], gen_emb.shape[1])
self.gen_embedding.weight.data.copy_(gen_emb)
self.gen_embedding.weight.requires_grad = False
self.domain_embedding = torch.nn.Embedding(domain_emb.shape[0], domain_emb.shape[1])
self.domain_embedding.weight.data.copy_(domain_emb)
self.domain_embedding.weight.requires_grad = False
self.attention_layer = SelfAttention(args)
self.conv1 = torch.nn.Conv1d(gen_emb.shape[1] + domain_emb.shape[1], 128, 5, padding=2)
self.conv2 = torch.nn.Conv1d(gen_emb.shape[1] + domain_emb.shape[1], 128, 3, padding=1)
self.dropout = torch.nn.Dropout(0.5)
self.conv3 = torch.nn.Conv1d(256, 256, 5, padding=2)
self.conv4 = torch.nn.Conv1d(256, 256, 5, padding=2)
self.conv5 = torch.nn.Conv1d(256, 256, 5, padding=2)
self.feature_linear = torch.nn.Linear(args.cnn_dim*2 + args.class_num*3, args.cnn_dim*2)
self.cls_linear = torch.nn.Linear(256*2, args.class_num)
def multi_hops(self, features, lengths, mask, k):
'''generate mtraix mask'''
max_length = features.shape[1]
mask = mask[:, :max_length]
mask_a = mask.unsqueeze(1).expand([-1, max_length, -1])
mask_b = mask.unsqueeze(2).expand([-1, -1, max_length])
mask = mask_a * mask_b
mask = torch.triu(mask).unsqueeze(3).expand([-1, -1, -1, self.args.class_num])
'''save all logits'''
logits_list = []
logits = self.cls_linear(features)
logits_list.append(logits)
for i in range(k):
#probs = torch.softmax(logits, dim=3)
probs = logits
logits = probs * mask
logits_a = torch.max(logits, dim=1)[0]
logits_b = torch.max(logits, dim=2)[0]
logits = torch.cat([logits_a.unsqueeze(3), logits_b.unsqueeze(3)], dim=3)
logits = torch.max(logits, dim=3)[0]
logits = logits.unsqueeze(2).expand([-1,-1, max_length, -1])
logits_T = logits.transpose(1, 2)
logits = torch.cat([logits, logits_T], dim=3)
new_features = torch.cat([features, logits, probs], dim=3)
features = self.feature_linear(new_features)
logits = self.cls_linear(features)
logits_list.append(logits)
return logits_list
def forward(self, x, x_len, x_mask):
x_emb = torch.cat((self.gen_embedding(x), self.domain_embedding(x)), dim=2)
x_emb = self.dropout(x_emb).transpose(1, 2)
x_conv = torch.nn.functional.relu(torch.cat((self.conv1(x_emb), self.conv2(x_emb)), dim=1))
x_conv = self.dropout(x_conv)
x_conv = torch.nn.functional.relu(self.conv3(x_conv))
x_conv = self.dropout(x_conv)
x_conv = torch.nn.functional.relu(self.conv4(x_conv))
x_conv = self.dropout(x_conv)
x_conv = torch.nn.functional.relu(self.conv5(x_conv))
x_conv = x_conv.transpose(1, 2)
x_conv = x_conv[:, :x_len[0], :]
feature_attention = self.attention_layer.forward_perceptron(x_conv, x_conv, x_mask[:, :x_len[0]])
x_conv = x_conv + feature_attention
x_conv = x_conv.unsqueeze(2).expand([-1, -1, x_len[0], -1])
x_conv_T = x_conv.transpose(1, 2)
features = torch.cat([x_conv, x_conv_T], dim=3)
logits = self.multi_hops(features, x_len, x_mask, self.args.nhops)
return [logits[-1]]
| 7,886 | 41.632432 | 123 | py |
DMASTE | DMASTE-main/GTS/code/NNModel/data.py | import math
import torch
sentiment2id = {'negative': 3, 'neutral': 4, 'positive': 5}
def get_spans(tags):
'''for BIO tag'''
tags = tags.strip().split()
length = len(tags)
spans = []
start = -1
for i in range(length):
if tags[i].endswith('B'):
if start != -1:
spans.append([start, i - 1])
start = i
elif tags[i].endswith('O'):
if start != -1:
spans.append([start, i - 1])
start = -1
if start != -1:
spans.append([start, length - 1])
return spans
class Instance(object):
def __init__(self, sentence_pack, word2index, args):
self.id = sentence_pack['id']
self.sentence = sentence_pack['sentence']
self.sentence_tokens = torch.zeros(args.max_sequence_len).long()
'''generate sentence tokens'''
words = self.sentence.split()
self.length = len(words)
for i, w in enumerate(words):
# word = w.lower()
word = w
if word in word2index:
self.sentence_tokens[i] = word2index[word]
else:
self.sentence_tokens[i] = word2index['<unk>']
self.aspect_tags = torch.zeros(args.max_sequence_len).long()
self.opinion_tags = torch.zeros(args.max_sequence_len).long()
self.aspect_tags[self.length:] = -1
self.opinion_tags[self.length:] = -1
self.tags = torch.zeros(args.max_sequence_len, args.max_sequence_len).long()
self.tags[:, :] = -1
for i in range(self.length):
for j in range(i, self.length):
self.tags[i][j] = 0
for pair in sentence_pack['triples']:
aspect = pair['target_tags']
opinion = pair['opinion_tags']
aspect_span = get_spans(aspect)
opinion_span = get_spans(opinion)
for l, r in aspect_span:
for i in range(l, r+1):
self.aspect_tags[i] = 1 if i == l else 2
self.tags[i][i] = 1
if i > l: self.tags[i-1][i] = 1
for j in range(i, r+1):
self.tags[i][j] = 1
for l, r in opinion_span:
for i in range(l, r+1):
self.opinion_tags[i] = 1 if i == l else 2
self.tags[i][i] = 2
if i > l: self.tags[i-1][i] = 2
for j in range(i, r+1):
self.tags[i][j] = 2
for al, ar in aspect_span:
for pl, pr in opinion_span:
for i in range(al, ar+1):
for j in range(pl, pr+1):
if args.task == 'pair':
if i > j: self.tags[j][i] = 3
else: self.tags[i][j] = 3
elif args.task == 'triplet':
if i > j: self.tags[j][i] = sentiment2id[pair['sentiment']]
else: self.tags[i][j] = sentiment2id[pair['sentiment']]
'''generate mask of the sentence'''
self.mask = torch.zeros(args.max_sequence_len)
self.mask[:self.length] = 1
def load_data_instances(sentence_packs, word2index, args):
instances = list()
for sentence_pack in sentence_packs:
instances.append(Instance(sentence_pack, word2index, args))
return instances
class DataIterator(object):
def __init__(self, instances, args):
self.instances = instances
self.args = args
self.batch_count = math.ceil(len(instances)/args.batch_size)
def get_batch(self, index):
sentence_ids = []
sentence_tokens = []
lengths = []
masks = []
aspect_tags = []
opinion_tags = []
tags = []
for i in range(index * self.args.batch_size,
min((index + 1) * self.args.batch_size, len(self.instances))):
sentence_ids.append(self.instances[i].id)
sentence_tokens.append(self.instances[i].sentence_tokens)
lengths.append(self.instances[i].length)
masks.append(self.instances[i].mask)
aspect_tags.append(self.instances[i].aspect_tags)
opinion_tags.append(self.instances[i].opinion_tags)
tags.append(self.instances[i].tags)
indexes = list(range(len(sentence_tokens)))
indexes = sorted(indexes, key=lambda x: lengths[x], reverse=True)
sentence_ids = [sentence_ids[i] for i in indexes]
sentence_tokens = torch.stack(sentence_tokens).to(self.args.device)[indexes]
lengths = torch.tensor(lengths).to(self.args.device)[indexes]
masks = torch.stack(masks).to(self.args.device)[indexes]
aspect_tags = torch.stack(aspect_tags).to(self.args.device)[indexes]
opinion_tags = torch.stack(opinion_tags).to(self.args.device)[indexes]
tags = torch.stack(tags).to(self.args.device)[indexes]
return sentence_ids, sentence_tokens, lengths, masks, aspect_tags, opinion_tags, tags
| 5,123 | 36.955556 | 93 | py |
DMASTE | DMASTE-main/GTS/code/BertModel/main.py | #coding utf-8
import json, os
import random
import argparse
import torch
import torch.nn.functional as F
from tqdm import trange
from data import load_data_instances, DataIterator
from model import MultiInferBert
import utils
def train(args):
# load dataset
train_sentence_packs = json.load(open(args.prefix + args.source + '/train.json'))
random.shuffle(train_sentence_packs)
dev_sentence_packs = json.load(open(args.prefix + args.source + '/dev.json'))
instances_train = load_data_instances(train_sentence_packs, args)
instances_dev = load_data_instances(dev_sentence_packs, args)
random.shuffle(instances_train)
trainset = DataIterator(instances_train, args)
devset = DataIterator(instances_dev, args)
if not os.path.exists(args.model_dir):
os.makedirs(args.model_dir)
model = MultiInferBert(args).to(args.device)
optimizer = torch.optim.Adam([
{'params': model.bert.parameters(), 'lr': 5e-5},
{'params': model.cls_linear.parameters()}
], lr=5e-5)
best_joint_f1 = 0
best_joint_epoch = 0
for i in range(args.epochs):
print('Epoch:{}'.format(i))
for j in trange(trainset.batch_count):
_, tokens, lengths, masks, _, _, aspect_tags, tags = trainset.get_batch(j)
preds = model(tokens, masks)
preds_flatten = preds.reshape([-1, preds.shape[3]])
tags_flatten = tags.reshape([-1])
loss = F.cross_entropy(preds_flatten, tags_flatten, ignore_index=-1)
optimizer.zero_grad()
loss.backward()
optimizer.step()
joint_precision, joint_recall, joint_f1 = eval(model, devset, args)
if joint_f1 > best_joint_f1:
model_path = args.model_dir + 'bert' + args.task + '.pt'
torch.save(model, model_path)
best_joint_f1 = joint_f1
best_joint_epoch = i
print('best epoch: {}\tbest dev {} f1: {:.5f}\n\n'.format(best_joint_epoch, args.task, best_joint_f1))
def eval(model, dataset, args):
model.eval()
with torch.no_grad():
all_ids = []
all_preds = []
all_labels = []
all_lengths = []
all_sens_lengths = []
all_token_ranges = []
for i in range(dataset.batch_count):
sentence_ids, tokens, lengths, masks, sens_lens, token_ranges, aspect_tags, tags = dataset.get_batch(i)
preds = model(tokens, masks)
preds = torch.argmax(preds, dim=3)
all_preds.append(preds)
all_labels.append(tags)
all_lengths.append(lengths)
all_sens_lengths.extend(sens_lens)
all_token_ranges.extend(token_ranges)
all_ids.extend(sentence_ids)
all_preds = torch.cat(all_preds, dim=0).cpu().tolist()
all_labels = torch.cat(all_labels, dim=0).cpu().tolist()
all_lengths = torch.cat(all_lengths, dim=0).cpu().tolist()
metric = utils.Metric(args, all_preds, all_labels, all_lengths, all_sens_lengths, all_token_ranges, ignore_index=-1)
precision, recall, f1 = metric.score_uniontags()
aspect_results = metric.score_aspect()
opinion_results = metric.score_opinion()
print('Aspect term\tP:{:.5f}\tR:{:.5f}\tF1:{:.5f}'.format(aspect_results[0], aspect_results[1],
aspect_results[2]))
print('Opinion term\tP:{:.5f}\tR:{:.5f}\tF1:{:.5f}'.format(opinion_results[0], opinion_results[1],
opinion_results[2]))
print(args.task + '\tP:{:.5f}\tR:{:.5f}\tF1:{:.5f}\n'.format(precision, recall, f1))
model.train()
return precision, recall, f1
def test(args):
print("Evaluation on testset:")
model_path = args.model_dir + 'bert' + args.task + '.pt'
model = torch.load(model_path).to(args.device)
model.eval()
sentence_packs = json.load(open(args.prefix + args.target + '/test.json'))
instances = load_data_instances(sentence_packs, args)
testset = DataIterator(instances, args)
eval(model, testset, args)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--prefix', type=str, default="../../data/",
help='dataset and embedding path prefix')
parser.add_argument('--model_dir', type=str, default="savemodel/",
help='model path prefix')
parser.add_argument('--task', type=str, default="pair", choices=["pair", "triplet"],
help='option: pair, triplet')
parser.add_argument('--mode', type=str, default="train", choices=["train", "test"],
help='option: train, test')
parser.add_argument('--source', type=str)
parser.add_argument('--target', type=str)
parser.add_argument('--max_sequence_len', type=int, default=100,
help='max length of a sentence')
parser.add_argument('--device', type=str, default="cuda",
help='gpu or cpu')
parser.add_argument('--bert_model_path', type=str,
default="bert-base-uncased",
help='pretrained bert model path')
parser.add_argument('--bert_tokenizer_path', type=str,
default="bert-base-uncased",
help='pretrained bert tokenizer path')
parser.add_argument('--bert_feature_dim', type=int, default=768,
help='dimension of pretrained bert feature')
parser.add_argument('--nhops', type=int, default=1,
help='inference times')
parser.add_argument('--batch_size', type=int, default=32,
help='bathc size')
parser.add_argument('--epochs', type=int, default=100,
help='training epoch number')
parser.add_argument('--class_num', type=int, default=4,
help='label number')
args = parser.parse_args()
if args.task == 'triplet':
args.class_num = 6
if args.mode == 'train':
train(args)
# test(args)
else:
test(args)
| 6,176 | 37.60625 | 124 | py |
DMASTE | DMASTE-main/GTS/code/BertModel/utils.py | import multiprocessing
import pickle
import numpy as np
import sklearn
id2sentiment = {1: 'neg', 3: 'neu', 5: 'pos'}
def get_aspects(tags, length, ignore_index=-1):
spans = []
start = -1
for i in range(length):
if tags[i][i] == ignore_index: continue
elif tags[i][i] == 1:
if start == -1:
start = i
elif tags[i][i] != 1:
if start != -1:
spans.append([start, i - 1])
start = -1
if start != -1:
spans.append([start, length-1])
return spans
def get_opinions(tags, length, ignore_index=-1):
spans = []
start = -1
for i in range(length):
if tags[i][i] == ignore_index: continue
elif tags[i][i] == 2:
if start == -1:
start = i
elif tags[i][i] != 2:
if start != -1:
spans.append([start, i - 1])
start = -1
if start != -1:
spans.append([start, length-1])
return spans
class Metric():
def __init__(self, args, predictions, goldens, bert_lengths, sen_lengths, tokens_ranges, ignore_index=-1):
self.args = args
self.predictions = predictions
self.goldens = goldens
self.bert_lengths = bert_lengths
self.sen_lengths = sen_lengths
self.tokens_ranges = tokens_ranges
self.ignore_index = -1
self.data_num = len(self.predictions)
def get_spans(self, tags, length, token_range, type):
spans = []
start = -1
for i in range(length):
l, r = token_range[i]
if tags[l][l] == self.ignore_index:
continue
elif tags[l][l] == type:
if start == -1:
start = i
elif tags[l][l] != type:
if start != -1:
spans.append([start, i - 1])
start = -1
if start != -1:
spans.append([start, length - 1])
return spans
def find_pair(self, tags, aspect_spans, opinion_spans, token_ranges):
pairs = []
for al, ar in aspect_spans:
for pl, pr in opinion_spans:
tag_num = [0] * 4
for i in range(al, ar + 1):
for j in range(pl, pr + 1):
a_start = token_ranges[i][0]
o_start = token_ranges[j][0]
if al < pl:
tag_num[int(tags[a_start][o_start])] += 1
else:
tag_num[int(tags[o_start][a_start])] += 1
if tag_num[3] == 0: continue
sentiment = -1
pairs.append([al, ar, pl, pr, sentiment])
return pairs
def find_triplet(self, tags, aspect_spans, opinion_spans, token_ranges):
triplets = []
for al, ar in aspect_spans:
for pl, pr in opinion_spans:
tag_num = [0] * 6
for i in range(al, ar + 1):
for j in range(pl, pr + 1):
a_start = token_ranges[i][0]
o_start = token_ranges[j][0]
if al < pl:
tag_num[int(tags[a_start][o_start])] += 1
else:
tag_num[int(tags[o_start][a_start])] += 1
# if tags[i][j] != -1:
# tag_num[int(tags[i][j])] += 1
# if tags[j][i] != -1:
# tag_num[int(tags[j][i])] += 1
if sum(tag_num[3:]) == 0: continue
sentiment = -1
if tag_num[5] >= tag_num[4] and tag_num[5] >= tag_num[3]:
sentiment = 5
elif tag_num[4] >= tag_num[3] and tag_num[4] >= tag_num[5]:
sentiment = 4
elif tag_num[3] >= tag_num[5] and tag_num[3] >= tag_num[4]:
sentiment = 3
if sentiment == -1:
print('wrong!!!!!!!!!!!!!!!!!!!!')
input()
triplets.append([al, ar, pl, pr, sentiment])
return triplets
def score_aspect(self):
assert len(self.predictions) == len(self.goldens)
golden_set = set()
predicted_set = set()
for i in range(self.data_num):
golden_aspect_spans = self.get_spans(self.goldens[i], self.sen_lengths[i], self.tokens_ranges[i], 1)
for spans in golden_aspect_spans:
golden_set.add(str(i) + '-' + '-'.join(map(str, spans)))
predicted_aspect_spans = self.get_spans(self.predictions[i], self.sen_lengths[i], self.tokens_ranges[i], 1)
for spans in predicted_aspect_spans:
predicted_set.add(str(i) + '-' + '-'.join(map(str, spans)))
correct_num = len(golden_set & predicted_set)
precision = correct_num / len(predicted_set) if len(predicted_set) > 0 else 0
recall = correct_num / len(golden_set) if len(golden_set) > 0 else 0
f1 = 2 * precision * recall / (precision + recall) if (precision + recall) > 0 else 0
return precision, recall, f1
def score_opinion(self):
assert len(self.predictions) == len(self.goldens)
golden_set = set()
predicted_set = set()
for i in range(self.data_num):
golden_opinion_spans = self.get_spans(self.goldens[i], self.sen_lengths[i], self.tokens_ranges[i], 2)
for spans in golden_opinion_spans:
golden_set.add(str(i) + '-' + '-'.join(map(str, spans)))
predicted_opinion_spans = self.get_spans(self.predictions[i], self.sen_lengths[i], self.tokens_ranges[i], 2)
for spans in predicted_opinion_spans:
predicted_set.add(str(i) + '-' + '-'.join(map(str, spans)))
correct_num = len(golden_set & predicted_set)
precision = correct_num / len(predicted_set) if len(predicted_set) > 0 else 0
recall = correct_num / len(golden_set) if len(golden_set) > 0 else 0
f1 = 2 * precision * recall / (precision + recall) if (precision + recall) > 0 else 0
return precision, recall, f1
def score_uniontags(self):
assert len(self.predictions) == len(self.goldens)
golden_set = set()
predicted_set = set()
for i in range(self.data_num):
golden_aspect_spans = self.get_spans(self.goldens[i], self.sen_lengths[i], self.tokens_ranges[i], 1)
golden_opinion_spans = self.get_spans(self.goldens[i], self.sen_lengths[i], self.tokens_ranges[i], 2)
if self.args.task == 'pair':
golden_tuples = self.find_pair(self.goldens[i], golden_aspect_spans, golden_opinion_spans, self.tokens_ranges[i])
elif self.args.task == 'triplet':
golden_tuples = self.find_triplet(self.goldens[i], golden_aspect_spans, golden_opinion_spans, self.tokens_ranges[i])
for pair in golden_tuples:
golden_set.add(str(i) + '-' + '-'.join(map(str, pair)))
predicted_aspect_spans = self.get_spans(self.predictions[i], self.sen_lengths[i], self.tokens_ranges[i], 1)
predicted_opinion_spans = self.get_spans(self.predictions[i], self.sen_lengths[i], self.tokens_ranges[i], 2)
if self.args.task == 'pair':
predicted_tuples = self.find_pair(self.predictions[i], predicted_aspect_spans, predicted_opinion_spans, self.tokens_ranges[i])
elif self.args.task == 'triplet':
predicted_tuples = self.find_triplet(self.predictions[i], predicted_aspect_spans, predicted_opinion_spans, self.tokens_ranges[i])
for pair in predicted_tuples:
predicted_set.add(str(i) + '-' + '-'.join(map(str, pair)))
correct_num = len(golden_set & predicted_set)
precision = correct_num / len(predicted_set) if len(predicted_set) > 0 else 0
recall = correct_num / len(golden_set) if len(golden_set) > 0 else 0
f1 = 2 * precision * recall / (precision + recall) if (precision + recall) > 0 else 0
return precision, recall, f1 | 8,201 | 43.096774 | 145 | py |
DMASTE | DMASTE-main/GTS/code/BertModel/model.py | import torch
import torch.nn
from transformers import BertModel, BertTokenizer
class MultiInferBert(torch.nn.Module):
def __init__(self, args):
super(MultiInferBert, self).__init__()
self.args = args
self.bert = BertModel.from_pretrained(args.bert_model_path)
self.tokenizer = BertTokenizer.from_pretrained(args.bert_tokenizer_path)
self.cls_linear = torch.nn.Linear(args.bert_feature_dim*2, args.class_num)
self.feature_linear = torch.nn.Linear(args.bert_feature_dim*2 + args.class_num*3, args.bert_feature_dim*2)
self.dropout_output = torch.nn.Dropout(0.1)
def multi_hops(self, features, mask, k):
'''generate mask'''
max_length = features.shape[1]
mask = mask[:, :max_length]
mask_a = mask.unsqueeze(1).expand([-1, max_length, -1])
mask_b = mask.unsqueeze(2).expand([-1, -1, max_length])
mask = mask_a * mask_b
mask = torch.triu(mask).unsqueeze(3).expand([-1, -1, -1, self.args.class_num])
'''save all logits'''
logits_list = []
logits = self.cls_linear(features)
logits_list.append(logits)
for i in range(k):
#probs = torch.softmax(logits, dim=3)
probs = logits
logits = probs * mask
logits_a = torch.max(logits, dim=1)[0]
logits_b = torch.max(logits, dim=2)[0]
logits = torch.cat([logits_a.unsqueeze(3), logits_b.unsqueeze(3)], dim=3)
logits = torch.max(logits, dim=3)[0]
logits = logits.unsqueeze(2).expand([-1,-1, max_length, -1])
logits_T = logits.transpose(1, 2)
logits = torch.cat([logits, logits_T], dim=3)
new_features = torch.cat([features, logits, probs], dim=3)
features = self.feature_linear(new_features)
logits = self.cls_linear(features)
logits_list.append(logits)
return logits_list
def forward(self, tokens, masks):
output = self.bert(tokens, masks)
bert_feature = output.last_hidden_state
bert_feature = self.dropout_output(bert_feature)
bert_feature = bert_feature.unsqueeze(2).expand([-1, -1, self.args.max_sequence_len, -1])
bert_feature_T = bert_feature.transpose(1, 2)
features = torch.cat([bert_feature, bert_feature_T], dim=3)
logits = self.multi_hops(features, masks, self.args.nhops)
return logits[-1]
| 2,451 | 37.3125 | 114 | py |
DMASTE | DMASTE-main/GTS/code/BertModel/data.py | import math
import torch
import numpy as np
sentiment2id = {'negative': 3, 'neutral': 4, 'positive': 5}
from transformers import BertTokenizer
def get_spans(tags):
'''for BIO tag'''
tags = tags.strip().split()
length = len(tags)
spans = []
start = -1
for i in range(length):
if tags[i].endswith('B'):
if start != -1:
spans.append([start, i - 1])
start = i
elif tags[i].endswith('O'):
if start != -1:
spans.append([start, i - 1])
start = -1
if start != -1:
spans.append([start, length - 1])
return spans
def get_evaluate_spans(tags, length, token_range):
'''for BIO tag'''
spans = []
start = -1
for i in range(length):
l, r = token_range[i]
if tags[l] == -1:
continue
elif tags[l] == 1:
if start != -1:
spans.append([start, i - 1])
start = i
elif tags[l] == 0:
if start != -1:
spans.append([start, i - 1])
start = -1
if start != -1:
spans.append([start, length - 1])
return spans
class Instance(object):
def __init__(self, tokenizer, sentence_pack, args):
self.id = sentence_pack['id'] if id in sentence_pack else 0
self.sentence = sentence_pack['sentence']
self.tokens = self.sentence.strip().split()
self.sen_length = len(self.tokens)
self.token_range = []
self.bert_tokens = tokenizer.encode(self.sentence)
self.length = len(self.bert_tokens)
self.bert_tokens_padding = torch.zeros(args.max_sequence_len).long()
self.aspect_tags = torch.zeros(args.max_sequence_len).long()
self.opinion_tags = torch.zeros(args.max_sequence_len).long()
self.tags = torch.zeros(args.max_sequence_len, args.max_sequence_len).long()
self.mask = torch.zeros(args.max_sequence_len)
for i in range(self.length):
self.bert_tokens_padding[i] = self.bert_tokens[i]
self.mask[:self.length] = 1
token_start = 1
for i, w, in enumerate(self.tokens):
token_end = token_start + len(tokenizer.encode(w, add_special_tokens=False))
self.token_range.append([token_start, token_end-1])
token_start = token_end
assert self.length == self.token_range[-1][-1]+2
self.aspect_tags[self.length:] = -1
self.aspect_tags[0] = -1
self.aspect_tags[self.length-1] = -1
self.opinion_tags[self.length:] = -1
self.opinion_tags[0] = -1
self.opinion_tags[self.length - 1] = -1
self.tags[:, :] = -1
for i in range(1, self.length-1):
for j in range(i, self.length-1):
self.tags[i][j] = 0
for triple in sentence_pack['triples']:
aspect = triple['target_tags']
opinion = triple['opinion_tags']
aspect_span = get_spans(aspect)
opinion_span = get_spans(opinion)
'''set tag for aspect'''
for l, r in aspect_span:
start = self.token_range[l][0]
end = self.token_range[r][1]
for i in range(start, end+1):
for j in range(i, end+1):
self.tags[i][j] = 1
for i in range(l, r+1):
set_tag = 1 if i == l else 2
al, ar = self.token_range[i]
self.aspect_tags[al] = set_tag
self.aspect_tags[al+1:ar+1] = -1
'''mask positions of sub words'''
self.tags[al+1:ar+1, :] = -1
self.tags[:, al+1:ar+1] = -1
'''set tag for opinion'''
for l, r in opinion_span:
start = self.token_range[l][0]
end = self.token_range[r][1]
for i in range(start, end+1):
for j in range(i, end+1):
self.tags[i][j] = 2
for i in range(l, r+1):
set_tag = 1 if i == l else 2
pl, pr = self.token_range[i]
self.opinion_tags[pl] = set_tag
self.opinion_tags[pl+1:pr+1] = -1
self.tags[pl+1:pr+1, :] = -1
self.tags[:, pl+1:pr+1] = -1
for al, ar in aspect_span:
for pl, pr in opinion_span:
for i in range(al, ar+1):
for j in range(pl, pr+1):
sal, sar = self.token_range[i]
spl, spr = self.token_range[j]
self.tags[sal:sar+1, spl:spr+1] = -1
if args.task == 'pair':
if i > j:
self.tags[spl][sal] = 3
else:
self.tags[sal][spl] = 3
elif args.task == 'triplet':
if i > j:
self.tags[spl][sal] = sentiment2id[triple['sentiment']]
else:
self.tags[sal][spl] = sentiment2id[triple['sentiment']]
def load_data_instances(sentence_packs, args):
instances = list()
tokenizer = BertTokenizer.from_pretrained(args.bert_tokenizer_path)
for sentence_pack in sentence_packs:
instances.append(Instance(tokenizer, sentence_pack, args))
return instances
class DataIterator(object):
def __init__(self, instances, args):
self.instances = instances
self.args = args
self.batch_count = math.ceil(len(instances)/args.batch_size)
def get_batch(self, index):
sentence_ids = []
sentences = []
sens_lens = []
token_ranges = []
bert_tokens = []
lengths = []
masks = []
aspect_tags = []
opinion_tags = []
tags = []
for i in range(index * self.args.batch_size,
min((index + 1) * self.args.batch_size, len(self.instances))):
sentence_ids.append(self.instances[i].id)
sentences.append(self.instances[i].sentence)
sens_lens.append(self.instances[i].sen_length)
token_ranges.append(self.instances[i].token_range)
bert_tokens.append(self.instances[i].bert_tokens_padding)
lengths.append(self.instances[i].length)
masks.append(self.instances[i].mask)
aspect_tags.append(self.instances[i].aspect_tags)
opinion_tags.append(self.instances[i].opinion_tags)
tags.append(self.instances[i].tags)
bert_tokens = torch.stack(bert_tokens).to(self.args.device)
lengths = torch.tensor(lengths).to(self.args.device)
masks = torch.stack(masks).to(self.args.device)
aspect_tags = torch.stack(aspect_tags).to(self.args.device)
opinion_tags = torch.stack(opinion_tags).to(self.args.device)
tags = torch.stack(tags).to(self.args.device)
return sentence_ids, bert_tokens, lengths, masks, sens_lens, token_ranges, aspect_tags, tags
| 7,269 | 36.864583 | 100 | py |
DMASTE | DMASTE-main/BARTABSA/peng/convert_to_triplets.py | from transformers import AutoTokenizer
import json
import numpy as np
def init_tokenizer():
tokenizer = AutoTokenizer.from_pretrained('facebook/bart-base')
unique_no_split_tokens = tokenizer.unique_no_split_tokens
tokenizer.unique_no_split_tokens = unique_no_split_tokens + ['[ia]']
tokenizer.add_tokens(['[ia]'])
mapping = { # so that the label word can be initialized in a better embedding.
'POS': '<<positive>>',
'NEG': '<<negative>>',
'NEU': '<<neutral>>'
}
cur_num_tokens = tokenizer.vocab_size
cur_num_token = cur_num_tokens
tokens_to_add = sorted(list(mapping.values()), key=lambda x:len(x), reverse=True)
unique_no_split_tokens = tokenizer.unique_no_split_tokens
sorted_add_tokens = sorted(list(tokens_to_add), key=lambda x:len(x), reverse=True)
for tok in sorted_add_tokens:
assert tokenizer.convert_tokens_to_ids([tok])[0]==tokenizer.unk_token_id
tokenizer.unique_no_split_tokens = unique_no_split_tokens + sorted_add_tokens
tokenizer.add_tokens(sorted_add_tokens)
mapping2id = {}
mapping2targetid = []
for key, value in mapping.items():
mapping2targetid.append(key)
return tokenizer
def convert_span_to_idx(tokenizer, sent, spans):
mapping2targetid = []
mapping = { # so that the label word can be initialized in a better embedding.
'POS': '<<positive>>',
'NEG': '<<negative>>',
'NEU': '<<neutral>>'
}
for key, value in mapping.items():
mapping2targetid.append(key)
triplets = []
raw_words = sent.split()
word_bpes = [[tokenizer.bos_token_id]]
for word in raw_words:
bpes = tokenizer.tokenize(word, add_prefix_space=True)
bpes = tokenizer.convert_tokens_to_ids(bpes)
word_bpes.append(bpes)
word_bpes.append([tokenizer.eos_token_id])
lens = list(map(len, word_bpes))
cum_lens = np.cumsum(list(lens)).tolist()
def subword2word(subword_idx, cum_lens):
for i in range(len(cum_lens)):
if i < len(cum_lens) and cum_lens[i] <= subword_idx < cum_lens[i + 1]:
return i
return len(cum_lens) - 1
for span in spans:
target_shift = 5 # pos, neg, neu, <sos>, <eos>
new_spans = [subword2word(i - target_shift, cum_lens) for i in span[:4]]
a, o = new_spans[:2], new_spans[2:]
if not 0 <= span[-1] -2 < len(mapping2targetid):
print('invalid span', span)
continue
s = mapping2targetid[span[-1] - 2]
a = [a[0]] if a[0] == a[-1] else a
o = [o[0]] if o[0] == o[-1] else o
triplets.append((a, o, s))
return triplets
def convert(tokenizer, examples, all_pred_spans):
tp = 0
pred_num = 0
gold_num = 0
all_pred_triplets = []
for example, pred_spans in zip(examples, all_pred_spans):
func = lambda triplets: set([' '.join([str(a), str(o), s]) for a, o, s in triplets])
sent, golden_spans = example
golden_triplets = func(golden_spans)
# print('golden triplets', golden_triplets)
# print('pred spans', pred_spans)
origin_pred_triplets = convert_span_to_idx(tokenizer, sent, pred_spans)
pred_triplets = func(origin_pred_triplets)
all_pred_triplets.append(origin_pred_triplets)
# print('pred triplets', pred_triplets)
# print()
# tp += len(pred_triplets & golden_triplets)
# pred_num += len(pred_triplets)
# gold_num += len(golden_triplets)
# precision = tp / pred_num if pred_num != 0 else 0
# recall = tp / gold_num if gold_num != 0 else 0
# f1 = 2 * precision * recall / (precision + recall) if precision + recall != 0 else 0
# print('precision', precision, 'recall', recall, 'f1', f1)
return all_pred_triplets
def main():
all_pred_spans = []
examples = []
tokenizer = init_tokenizer()
with open('../../ia-dataset/fashion/test.txt') as f:
for line in f:
sent, triplets = line.split('####')
triplets = eval(triplets)
examples.append([sent, triplets])
with open('tmp.txt') as f:
for line in f:
all_pred_spans.append(eval(line))
convert(tokenizer, examples, all_pred_spans)
if __name__ == '__main__':
main() | 4,318 | 37.5625 | 92 | py |
DMASTE | DMASTE-main/BARTABSA/peng/__init__.py | 0 | 0 | 0 | py |
|
DMASTE | DMASTE-main/BARTABSA/peng/train.py | import sys
sys.path.append('../')
import os
if 'p' in os.environ:
os.environ['CUDA_VISIBLE_DEVICES'] = os.environ['p']
# os.environ['CUDA_VISIBLE_DEVICES'] = '7'
import warnings
warnings.filterwarnings('ignore')
from data.pipe import BartBPEABSAPipe
from peng.model.bart_absa import BartSeq2SeqModel
from fastNLP import Trainer, Tester
from peng.model.metrics import Seq2SeqSpanMetric
from peng.model.losses import Seq2SeqLoss
from torch import optim
from fastNLP import BucketSampler, GradientClipCallback, cache_results, WarmupCallback
from fastNLP import FitlogCallback
from fastNLP.core.sampler import SortedSampler
from peng.model.generator import SequenceGeneratorModel
from peng.convert_to_triplets import convert
import fitlog
# fitlog.debug()
os.makedirs('logs', exist_ok=True)
fitlog.set_log_dir('logs')
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--source', type=str)
parser.add_argument('--target', type=str)
parser.add_argument('--lr', default=5e-5, type=float)
parser.add_argument('--batch_size', default=16, type=int)
parser.add_argument('--num_beams', default=4, type=int)
parser.add_argument('--opinion_first', action='store_true', default=False)
parser.add_argument('--n_epochs', default=50, type=int)
parser.add_argument('--decoder_type', default='avg_score', type=str, choices=['None', 'avg_score'])
parser.add_argument('--length_penalty', default=1.0, type=float)
parser.add_argument('--bart_name', default='facebook/bart-base', type=str)
parser.add_argument('--save_model_dir', type=str)
parser.add_argument('--model_name', type=str)
parser.add_argument('--use_encoder_mlp', type=int, default=1)
parser.add_argument('--save_model', type=int, default=0)
parser.add_argument('--mode', type=str, choices=['train', 'test'])
parser.add_argument('--log_dir', type=str)
args= parser.parse_args()
lr = args.lr
n_epochs = args.n_epochs
batch_size = args.batch_size
num_beams = args.num_beams
source = args.source
target = args.target
opinion_first = args.opinion_first
length_penalty = args.length_penalty
if isinstance(args.decoder_type, str) and args.decoder_type.lower() == 'none':
args.decoder_type = None
decoder_type = args.decoder_type
bart_name = args.bart_name
use_encoder_mlp = args.use_encoder_mlp
save_model = args.save_model
fitlog.add_hyper(args)
#######hyper
#######hyper
# @cache_results(cache_fn, _refresh=False)
def get_data(dataset_name):
demo=False
cache_fn = f"caches/data_{bart_name}_{dataset_name}_{opinion_first}.pt"
@cache_results(cache_fn, _refresh=False)
def func():
pipe = BartBPEABSAPipe(tokenizer=bart_name, opinion_first=opinion_first)
data_bundle = pipe.process_from_file(f'../data/{dataset_name}', demo=demo)
return data_bundle, pipe.tokenizer, pipe.mapping2id
return func()
source_data_bundle, tokenizer, mapping2id = get_data(source)
max_len = 10
max_len_a = 1.5
print("The number of tokens in tokenizer ", len(tokenizer.decoder))
bos_token_id = 0 #
eos_token_id = 1 #
label_ids = list(mapping2id.values())
model = BartSeq2SeqModel.build_model(bart_name, tokenizer, label_ids=label_ids, decoder_type=decoder_type,
copy_gate=False, use_encoder_mlp=use_encoder_mlp, use_recur_pos=False)
vocab_size = len(tokenizer)
print(vocab_size, model.decoder.decoder.embed_tokens.weight.data.size(0))
model = SequenceGeneratorModel(model, bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
max_length=max_len, max_len_a=max_len_a,num_beams=num_beams, do_sample=False,
repetition_penalty=1, length_penalty=length_penalty, pad_token_id=eos_token_id,
restricter=None)
import torch
if torch.cuda.is_available():
# device = list([i for i in range(torch.cuda.device_count())])
device = 'cuda'
else:
device = 'cpu'
parameters = []
params = {'lr':lr, 'weight_decay':1e-2}
params['params'] = [param for name, param in model.named_parameters() if not ('bart_encoder' in name or 'bart_decoder' in name)]
parameters.append(params)
params = {'lr':lr, 'weight_decay':1e-2}
params['params'] = []
for name, param in model.named_parameters():
if ('bart_encoder' in name or 'bart_decoder' in name) and not ('layernorm' in name or 'layer_norm' in name):
params['params'].append(param)
parameters.append(params)
params = {'lr':lr, 'weight_decay':0}
params['params'] = []
for name, param in model.named_parameters():
if ('bart_encoder' in name or 'bart_decoder' in name) and ('layernorm' in name or 'layer_norm' in name):
params['params'].append(param)
parameters.append(params)
optimizer = optim.AdamW(parameters)
callbacks = []
callbacks.append(GradientClipCallback(clip_value=5, clip_type='value'))
callbacks.append(WarmupCallback(warmup=0.01, schedule='linear'))
callbacks.append(FitlogCallback())
sampler = None
# sampler = ConstTokenNumSampler('src_seq_len', max_token=1000)
sampler = BucketSampler(seq_len_field_name='src_seq_len')
metric = Seq2SeqSpanMetric(eos_token_id, num_labels=len(label_ids), opinion_first=opinion_first)
model_path = None
if save_model:
model_path = 'save_models/'
if args.mode == 'train':
trainer = Trainer(train_data=source_data_bundle.get_dataset('train'), model=model, optimizer=optimizer,
loss=Seq2SeqLoss(),
batch_size=batch_size, sampler=sampler, drop_last=False, update_every=1,
num_workers=2, n_epochs=n_epochs, print_every=1,
dev_data=source_data_bundle.get_dataset('dev'), metrics=metric, metric_key='triple_f',
validate_every=-1, save_path=model_path, use_tqdm=True, device=device,
callbacks=callbacks, check_code_level=0, test_use_tqdm=False,
test_sampler=SortedSampler('src_seq_len'), dev_batch_size=batch_size)
trainer.train(load_best_model=True)
os.makedirs(args.save_model_dir, exist_ok=True)
torch.save(model, f'{args.save_model_dir}/{args.model_name}.pt')
elif args.mode == 'test':
target_data_bundle, _, _ = get_data(target)
model = torch.load(f'{args.save_model_dir}/{args.model_name}.pt')
tester = Tester(data=target_data_bundle.get_dataset('test'), model=model, metrics=metric, batch_size=args.batch_size,
num_workers=2, device=device, use_tqdm=True, callbacks=callbacks)
res = tester.test()
fitlog.add_best_metric(value=res, name='test')
os.makedirs(os.path.join(args.log_dir, args.model_name), exist_ok=True)
log_file = f'{args.log_dir}/{args.model_name}/metric.txt'
with open(log_file, 'w') as f:
import json
f.write(json.dumps(res) + '\n')
pred = metric.get_pred()
examples = []
with open(f'../../ia-dataset/{target}/test.txt') as f:
for line in f:
sent, triplets = line.split('####')
triplets = eval(triplets)
examples.append([sent, triplets])
pred = convert(tokenizer, examples, pred)
with open(f'{args.log_dir}/{args.model_name}/pred.txt', 'w') as f:
for ts in pred:
f.write(str(ts) + '\n')
| 7,183 | 37.623656 | 128 | py |
DMASTE | DMASTE-main/BARTABSA/peng/data/pipe.py | from fastNLP.io import Pipe, DataBundle, Loader
import os
import json
from fastNLP import DataSet, Instance
from transformers import AutoTokenizer
import numpy as np
from itertools import chain
from functools import cmp_to_key
def cmp_aspect(v1, v2):
if v1[0]['from']==v2[0]['from']:
return v1[1]['from'] - v2[1]['from']
return v1[0]['from'] - v2[0]['from']
def cmp_opinion(v1, v2):
if v1[1]['from']==v2[1]['from']:
return v1[0]['from'] - v2[0]['from']
return v1[1]['from'] - v2[1]['from']
class BartBPEABSAPipe(Pipe):
def __init__(self, tokenizer='facebook/bart-base', opinion_first=True):
super(BartBPEABSAPipe, self).__init__()
self.tokenizer = AutoTokenizer.from_pretrained(tokenizer)
unique_no_split_tokens = self.tokenizer.unique_no_split_tokens
self.tokenizer.unique_no_split_tokens = unique_no_split_tokens + ['[ia]']
self.tokenizer.add_tokens(['[ia]'])
self.mapping = { # so that the label word can be initialized in a better embedding.
'POS': '<<positive>>',
'NEG': '<<negative>>',
'NEU': '<<neutral>>'
}
self.opinion_first = opinion_first # 是否先生成opinion
cur_num_tokens = self.tokenizer.vocab_size
self.cur_num_token = cur_num_tokens
tokens_to_add = sorted(list(self.mapping.values()), key=lambda x:len(x), reverse=True)
unique_no_split_tokens = self.tokenizer.unique_no_split_tokens
sorted_add_tokens = sorted(list(tokens_to_add), key=lambda x:len(x), reverse=True)
for tok in sorted_add_tokens:
assert self.tokenizer.convert_tokens_to_ids([tok])[0]==self.tokenizer.unk_token_id
self.tokenizer.unique_no_split_tokens = unique_no_split_tokens + sorted_add_tokens
self.tokenizer.add_tokens(sorted_add_tokens)
self.mapping2id = {}
self.mapping2targetid = {}
for key, value in self.mapping.items():
key_id = self.tokenizer.convert_tokens_to_ids(self.tokenizer.tokenize(value))
assert len(key_id) == 1, value
assert key_id[0] >= cur_num_tokens
self.mapping2id[key] = key_id[0]
self.mapping2targetid[key] = len(self.mapping2targetid)
def process(self, data_bundle: DataBundle) -> DataBundle:
"""
words: List[str]
aspects: [{
'index': int
'from': int
'to': int
'polarity': str
'term': List[str]
}],
opinions: [{
'index': int
'from': int
'to': int
'term': List[str]
}]
输出为[o_s, o_e, a_s, a_e, c]或者[a_s, a_e, o_s, o_e, c]
:param data_bundle:
:return:
"""
target_shift = len(self.mapping) + 2 # 是由于第一位是sos,紧接着是eos, 然后是
def prepare_target(ins):
raw_words = ins['raw_words']
word_bpes = [[self.tokenizer.bos_token_id]]
for word in raw_words:
bpes = self.tokenizer.tokenize(word, add_prefix_space=True)
bpes = self.tokenizer.convert_tokens_to_ids(bpes)
word_bpes.append(bpes)
word_bpes.append([self.tokenizer.eos_token_id])
lens = list(map(len, word_bpes))
cum_lens = np.cumsum(list(lens)).tolist()
target = [0] # 特殊的开始
target_spans = []
_word_bpes = list(chain(*word_bpes))
aspects_opinions = [(a, o) for a, o in zip(ins['aspects'], ins['opinions'])]
if self.opinion_first:
aspects_opinions = sorted(aspects_opinions, key=cmp_to_key(cmp_opinion))
else:
aspects_opinions = sorted(aspects_opinions, key=cmp_to_key(cmp_aspect))
for aspects, opinions in aspects_opinions: # 预测bpe的start
assert aspects['index'] == opinions['index']
a_start_bpe = cum_lens[aspects['from']] # 因为有一个sos shift
a_end_bpe = cum_lens[aspects['to']-1] # 这里由于之前是开区间,刚好取到最后一个word的开头
o_start_bpe = cum_lens[opinions['from']] # 因为有一个sos shift
o_end_bpe = cum_lens[opinions['to']-1] # 因为有一个sos shift
# 这里需要evaluate是否是对齐的
for idx, word in zip((o_start_bpe, o_end_bpe, a_start_bpe, a_end_bpe),
(opinions['term'][0], opinions['term'][-1], aspects['term'][0], aspects['term'][-1])):
assert _word_bpes[idx] == self.tokenizer.convert_tokens_to_ids(self.tokenizer.tokenize(word, add_prefix_space=True)[:1])[0] or \
_word_bpes[idx] == self.tokenizer.convert_tokens_to_ids(self.tokenizer.tokenize(word, add_prefix_space=True)[-1:])[0]
if self.opinion_first:
target_spans.append([o_start_bpe+target_shift, o_end_bpe+target_shift,
a_start_bpe+target_shift, a_end_bpe+target_shift])
else:
target_spans.append([a_start_bpe+target_shift, a_end_bpe+target_shift,
o_start_bpe+target_shift, o_end_bpe+target_shift])
target_spans[-1].append(self.mapping2targetid[aspects['polarity']]+2) # 前面有sos和eos
target_spans[-1] = tuple(target_spans[-1])
target.extend(list(chain(*target_spans)))
target.append(1) # append 1是由于特殊的eos
return {'tgt_tokens': target, 'target_span': target_spans, 'src_tokens': list(chain(*word_bpes))}
# if len(data_bundle) == 0:
# return data_bundle
data_bundle.apply_more(prepare_target, use_tqdm=True, tqdm_desc='Pre. tgt.')
data_bundle.set_ignore_type('target_span')
data_bundle.set_pad_val('tgt_tokens', 1) # 设置为eos所在的id
data_bundle.set_pad_val('src_tokens', self.tokenizer.pad_token_id)
data_bundle.apply_field(lambda x: len(x), field_name='src_tokens', new_field_name='src_seq_len')
data_bundle.apply_field(lambda x: len(x), field_name='tgt_tokens', new_field_name='tgt_seq_len')
data_bundle.set_input('tgt_tokens', 'src_tokens', 'src_seq_len', 'tgt_seq_len')
data_bundle.set_target('tgt_tokens', 'tgt_seq_len', 'target_span')
return data_bundle
def process_from_file(self, paths, demo=False) -> DataBundle:
"""
:param paths: 支持路径类型参见 :class:`fastNLP.io.loader.ConllLoader` 的load函数。
:return: DataBundle
"""
# 读取数据
data_bundle = ABSALoader(demo=demo).load(paths)
data_bundle = self.process(data_bundle)
return data_bundle
class ABSALoader(Loader):
def __init__(self, demo=False):
super().__init__()
self.demo = demo
def _load(self, path):
with open(path, 'r', encoding='utf-8') as f:
data = json.load(f)
ds = DataSet()
for ins in data:
tokens = ins['words']
aspects = ins['aspects']
opinions = ins['opinions']
assert len(aspects)==len(opinions)
ins = Instance(raw_words=tokens, aspects=aspects, opinions=opinions)
ds.append(ins)
if self.demo and len(ds)>30:
break
return ds
if __name__ == '__main__':
data_bundle = BartBPEABSAPipe().process_from_file('pengb/16res')
print(data_bundle)
| 7,390 | 40.757062 | 148 | py |
DMASTE | DMASTE-main/BARTABSA/peng/data/__init__.py | 0 | 0 | 0 | py |
|
DMASTE | DMASTE-main/BARTABSA/peng/model/losses.py |
from fastNLP import LossBase
import torch.nn.functional as F
from fastNLP import seq_len_to_mask
class Seq2SeqLoss(LossBase):
def __init__(self):
super().__init__()
def get_loss(self, tgt_tokens, tgt_seq_len, pred):
"""
:param tgt_tokens: bsz x max_len, [sos, tokens, eos]
:param pred: bsz x max_len-1 x vocab_size
:return:
"""
tgt_seq_len = tgt_seq_len - 1
mask = seq_len_to_mask(tgt_seq_len, max_len=tgt_tokens.size(1) - 1).eq(0)
tgt_tokens = tgt_tokens[:, 1:].masked_fill(mask, -100)
loss = F.cross_entropy(target=tgt_tokens, input=pred.transpose(1, 2))
return loss
| 671 | 27 | 81 | py |
DMASTE | DMASTE-main/BARTABSA/peng/model/utils.py | import numpy as np
def get_max_len_max_len_a(data_bundle, max_len=10):
"""
:param data_bundle:
:param max_len:
:return:
"""
max_len_a = -1
for name, ds in data_bundle.iter_datasets():
if name=='train':continue
src_seq_len = np.array(ds.get_field('src_seq_len').content)
tgt_seq_len = np.array(ds.get_field('tgt_seq_len').content)
_len_a = round(max(np.maximum(tgt_seq_len - max_len+2, 0)/src_seq_len), 1)
if _len_a>max_len_a:
max_len_a = _len_a
return max_len, max_len_a
def get_num_parameters(model):
num_param = 0
for name, param in model.named_parameters():
num_param += np.prod(param.size())
print(f"The number of parameters is {num_param}") | 756 | 26.035714 | 82 | py |
DMASTE | DMASTE-main/BARTABSA/peng/model/bart_absa.py | import torch
from .modeling_bart import BartEncoder, BartDecoder, BartModel
from transformers import BartTokenizer
from fastNLP import seq_len_to_mask
from fastNLP.modules import Seq2SeqEncoder, Seq2SeqDecoder, State
import torch.nn.functional as F
from fastNLP.models import Seq2SeqModel
from torch import nn
import math
class FBartEncoder(Seq2SeqEncoder):
def __init__(self, encoder):
super().__init__()
assert isinstance(encoder, BartEncoder)
self.bart_encoder = encoder
def forward(self, src_tokens, src_seq_len):
mask = seq_len_to_mask(src_seq_len, max_len=src_tokens.size(1))
dict = self.bart_encoder(input_ids=src_tokens, attention_mask=mask, return_dict=True,
output_hidden_states=True)
encoder_outputs = dict.last_hidden_state
hidden_states = dict.hidden_states
return encoder_outputs, mask, hidden_states
class FBartDecoder(Seq2SeqDecoder):
def __init__(self, decoder, pad_token_id, label_ids, use_encoder_mlp=True):
super().__init__()
assert isinstance(decoder, BartDecoder)
self.decoder = decoder
causal_mask = torch.zeros(512, 512).fill_(float('-inf'))
causal_mask = causal_mask.triu(diagonal=1)
self.register_buffer('causal_masks', causal_mask.float())
self.pad_token_id = pad_token_id
self.label_start_id = label_ids[0]
self.label_end_id = label_ids[-1]+1
# 0th position is <s>, 1st position is </s>
mapping = torch.LongTensor([0, 2]+sorted(label_ids, reverse=False))
self.register_buffer('mapping', mapping)
self.src_start_index = len(mapping) # 加上一个
hidden_size = decoder.embed_tokens.weight.size(1)
if use_encoder_mlp:
self.encoder_mlp = nn.Sequential(nn.Linear(hidden_size, hidden_size),
nn.Dropout(0.3),
nn.ReLU(),
nn.Linear(hidden_size, hidden_size))
def forward(self, tokens, state):
# bsz, max_len = tokens.size()
encoder_outputs = state.encoder_output
encoder_pad_mask = state.encoder_mask
first = state.first
# eos is 1
cumsum = tokens.eq(1).flip(dims=[1]).cumsum(dim=-1)
tgt_pad_mask = cumsum.flip(dims=[1]).ne(cumsum[:, -1:])
# mapping to the BART token index
mapping_token_mask = tokens.lt(self.src_start_index) #
mapped_tokens = tokens.masked_fill(tokens.ge(self.src_start_index), 0)
tag_mapped_tokens = self.mapping[mapped_tokens]
src_tokens_index = tokens - self.src_start_index # bsz x num_src_token
src_tokens_index = src_tokens_index.masked_fill(src_tokens_index.lt(0), 0)
src_tokens = state.src_tokens
if first is not None:
src_tokens = src_tokens.gather(index=first, dim=1)
word_mapped_tokens = src_tokens.gather(index=src_tokens_index, dim=1)
tokens = torch.where(mapping_token_mask, tag_mapped_tokens, word_mapped_tokens)
tokens = tokens.masked_fill(tgt_pad_mask, self.pad_token_id)
if self.training:
tokens = tokens[:, :-1]
decoder_pad_mask = tokens.eq(self.pad_token_id)
dict = self.decoder(input_ids=tokens,
encoder_hidden_states=encoder_outputs,
encoder_padding_mask=encoder_pad_mask,
decoder_padding_mask=decoder_pad_mask,
decoder_causal_mask=self.causal_masks[:tokens.size(1), :tokens.size(1)],
return_dict=True)
else:
past_key_values = state.past_key_values
dict = self.decoder(input_ids=tokens,
encoder_hidden_states=encoder_outputs,
encoder_padding_mask=encoder_pad_mask,
decoder_padding_mask=None,
decoder_causal_mask=None,
past_key_values=past_key_values,
use_cache=True,
return_dict=True)
hidden_state = dict.last_hidden_state # bsz x max_len x hidden_size
if not self.training:
state.past_key_values = dict.past_key_values
logits = hidden_state.new_full((hidden_state.size(0), hidden_state.size(1), self.src_start_index+src_tokens.size(-1)),
fill_value=-1e24)
# first get the
eos_scores = F.linear(hidden_state, self.decoder.embed_tokens.weight[2:3]) # bsz x max_len x 1
tag_scores = F.linear(hidden_state, self.decoder.embed_tokens.weight[self.label_start_id:self.label_end_id]) # bsz x max_len x num_class
# bsz x max_word_len x hidden_size
src_outputs = state.encoder_output
if hasattr(self, 'encoder_mlp'):
src_outputs = self.encoder_mlp(src_outputs)
if first is not None:
mask = first.eq(0) # bsz x 1 x max_word_len, 为1的地方是padding
src_outputs = src_outputs.gather(index=first.unsqueeze(2).repeat(1, 1, src_outputs.size(-1)), dim=1)
else:
mask = state.encoder_mask.eq(0)
mask = mask.unsqueeze(1).__or__(src_tokens.eq(2).cumsum(dim=1).ge(1).unsqueeze(1))
word_scores = torch.einsum('blh,bnh->bln', hidden_state, src_outputs) # bsz x max_len x max_word_len
word_scores = word_scores.masked_fill(mask, -1e32)
logits[:, :, 1:2] = eos_scores
logits[:, :, 2:self.src_start_index] = tag_scores
logits[:, :, self.src_start_index:] = word_scores
return logits
def decode(self, tokens, state):
return self(tokens, state)[:, -1]
class CaGFBartDecoder(FBartDecoder):
# Copy and generate,
def __init__(self, decoder, pad_token_id, label_ids, use_encoder_mlp=False):
super().__init__(decoder, pad_token_id, label_ids, use_encoder_mlp=use_encoder_mlp)
def forward(self, tokens, state):
encoder_outputs = state.encoder_output
encoder_pad_mask = state.encoder_mask
first = state.first
cumsum = tokens.eq(1).flip(dims=[1]).cumsum(dim=-1)
tgt_pad_mask = cumsum.flip(dims=[1]).ne(cumsum[:, -1:])
mapping_token_mask = tokens.lt(self.src_start_index)
mapped_tokens = tokens.masked_fill(tokens.ge(self.src_start_index), 0)
tag_mapped_tokens = self.mapping[mapped_tokens]
src_tokens_index = tokens - self.src_start_index # bsz x num_src_token
src_tokens_index = src_tokens_index.masked_fill(src_tokens_index.lt(0), 0)
src_tokens = state.src_tokens
if first is not None:
src_tokens = src_tokens.gather(index=first, dim=1)
word_mapped_tokens = src_tokens.gather(index=src_tokens_index, dim=1)
tokens = torch.where(mapping_token_mask, tag_mapped_tokens, word_mapped_tokens) # bsz x max_len
tokens = tokens.masked_fill(tgt_pad_mask, self.pad_token_id)
if self.training:
tokens = tokens[:, :-1]
decoder_pad_mask = tokens.eq(self.pad_token_id) # decoder需要让pad位置为1
dict = self.decoder(input_ids=tokens,
encoder_hidden_states=encoder_outputs,
encoder_padding_mask=encoder_pad_mask,
decoder_padding_mask=decoder_pad_mask,
decoder_causal_mask=self.causal_masks[:tokens.size(1), :tokens.size(1)],
return_dict=True)
else:
past_key_values = state.past_key_values
dict = self.decoder(input_ids=tokens,
encoder_hidden_states=encoder_outputs,
encoder_padding_mask=encoder_pad_mask,
decoder_padding_mask=None,
decoder_causal_mask=None,
past_key_values=past_key_values,
use_cache=True,
return_dict=True)
hidden_state = dict.last_hidden_state # bsz x max_len x hidden_size
if not self.training:
state.past_key_values = dict.past_key_values
logits = hidden_state.new_full((hidden_state.size(0), hidden_state.size(1), self.src_start_index+src_tokens.size(-1)),
fill_value=-1e24)
eos_scores = F.linear(hidden_state, self.decoder.embed_tokens.weight[2:3]) # bsz x max_len x 1
tag_scores = F.linear(hidden_state, self.decoder.embed_tokens.weight[self.label_start_id:self.label_end_id]) # bsz x max_len x num_class
# bsz x max_bpe_len x hidden_size
src_outputs = state.encoder_output
if hasattr(self, 'encoder_mlp'):
src_outputs = self.encoder_mlp(src_outputs)
if first is not None:
mask = first.eq(0) # bsz x 1 x max_word_len, 为1的地方是padding
# bsz x max_word_len x hidden_size
src_outputs = src_outputs.gather(index=first.unsqueeze(2).repeat(1, 1, src_outputs.size(-1)), dim=1)
else:
mask = state.encoder_mask.eq(0)
# src_outputs = self.decoder.embed_tokens(src_tokens)
mask = mask.unsqueeze(1)
input_embed = self.decoder.embed_tokens(src_tokens) # bsz x max_word_len x hidden_size
word_scores = torch.einsum('blh,bnh->bln', hidden_state, src_outputs) # bsz x max_len x max_word_len
gen_scores = torch.einsum('blh,bnh->bln', hidden_state, input_embed) # bsz x max_len x max_word_len
word_scores = (gen_scores + word_scores)/2
mask = mask.__or__(src_tokens.eq(2).cumsum(dim=1).ge(1).unsqueeze(1))
word_scores = word_scores.masked_fill(mask, -1e32)
logits[:, :, 1:2] = eos_scores
logits[:, :, 2:self.src_start_index] = tag_scores
logits[:, :, self.src_start_index:] = word_scores
return logits
class BartSeq2SeqModel(Seq2SeqModel):
@classmethod
def build_model(cls, bart_model, tokenizer, label_ids, decoder_type=None, copy_gate=False,
use_encoder_mlp=False, use_recur_pos=False, tag_first=False):
model = BartModel.from_pretrained(bart_model)
num_tokens, _ = model.encoder.embed_tokens.weight.shape
model.resize_token_embeddings(len(tokenizer.unique_no_split_tokens)+num_tokens)
encoder = model.encoder
decoder = model.decoder
if use_recur_pos:
decoder.set_position_embedding(label_ids[0], tag_first)
_tokenizer = BartTokenizer.from_pretrained(bart_model)
for token in tokenizer.unique_no_split_tokens:
if token[:2] == '<<':
index = tokenizer.convert_tokens_to_ids(tokenizer.tokenize(token))
if len(index)>1:
raise RuntimeError(f"{token} wrong split")
else:
index = index[0]
assert index>=num_tokens, (index, num_tokens, token)
indexes = _tokenizer.convert_tokens_to_ids(_tokenizer.tokenize(token[2:-2]))
embed = model.encoder.embed_tokens.weight.data[indexes[0]]
for i in indexes[1:]:
embed += model.decoder.embed_tokens.weight.data[i]
embed /= len(indexes)
model.decoder.embed_tokens.weight.data[index] = embed
encoder = FBartEncoder(encoder)
label_ids = sorted(label_ids)
if decoder_type is None:
assert copy_gate is False
decoder = FBartDecoder(decoder, pad_token_id=tokenizer.pad_token_id, label_ids=label_ids)
elif decoder_type =='avg_score':
decoder = CaGFBartDecoder(decoder, pad_token_id=tokenizer.pad_token_id, label_ids=label_ids,
use_encoder_mlp=use_encoder_mlp)
else:
raise RuntimeError("Unsupported feature.")
return cls(encoder=encoder, decoder=decoder)
def prepare_state(self, src_tokens, src_seq_len=None, first=None, tgt_seq_len=None):
encoder_outputs, encoder_mask, hidden_states = self.encoder(src_tokens, src_seq_len)
src_embed_outputs = hidden_states[0]
state = BartState(encoder_outputs, encoder_mask, src_tokens, first, src_embed_outputs)
# setattr(state, 'tgt_seq_len', tgt_seq_len)
return state
def forward(self, src_tokens, tgt_tokens, src_seq_len, tgt_seq_len, first):
"""
:param torch.LongTensor src_tokens: source的token
:param torch.LongTensor tgt_tokens: target的token
:param torch.LongTensor first: 显示每个, bsz x max_word_len
:param torch.LongTensor src_seq_len: src的长度
:param torch.LongTensor tgt_seq_len: target的长度,默认用不上
:return: {'pred': torch.Tensor}, 其中pred的shape为bsz x max_len x vocab_size
"""
state = self.prepare_state(src_tokens, src_seq_len, first, tgt_seq_len)
decoder_output = self.decoder(tgt_tokens, state)
if isinstance(decoder_output, torch.Tensor):
return {'pred': decoder_output}
elif isinstance(decoder_output, (tuple, list)):
return {'pred': decoder_output[0]}
else:
raise TypeError(f"Unsupported return type from Decoder:{type(self.decoder)}")
class BartState(State):
def __init__(self, encoder_output, encoder_mask, src_tokens, first, src_embed_outputs):
super().__init__(encoder_output, encoder_mask)
self.past_key_values = None
self.src_tokens = src_tokens
self.first = first
self.src_embed_outputs = src_embed_outputs
def reorder_state(self, indices: torch.LongTensor):
super().reorder_state(indices)
self.src_tokens = self._reorder_state(self.src_tokens, indices)
if self.first is not None:
self.first = self._reorder_state(self.first, indices)
self.src_embed_outputs = self._reorder_state(self.src_embed_outputs, indices)
if self.past_key_values is not None:
new = []
for layer in self.past_key_values:
new_layer = {}
for key1 in list(layer.keys()):
new_layer_ = {}
for key2 in list(layer[key1].keys()):
if layer[key1][key2] is not None:
layer[key1][key2] = self._reorder_state(layer[key1][key2], indices)
# print(key1, key2, layer[key1][key2].shape)
new_layer_[key2] = layer[key1][key2]
new_layer[key1] = new_layer_
new.append(new_layer)
self.past_key_values = new | 14,844 | 46.428115 | 145 | py |
DMASTE | DMASTE-main/BARTABSA/peng/model/modeling_bart.py | # coding=utf-8
# Copyright 2020 The Facebook AI Research Team Authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch BART model, ported from the fairseq repo."""
import math
import random
import warnings
from typing import Dict, List, Optional, Tuple
import numpy as np
import torch
import torch.nn.functional as F
from torch import Tensor, nn
from torch.nn import CrossEntropyLoss
from transformers.modeling_bart import *
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = "BartConfig"
_TOKENIZER_FOR_DOC = "BartTokenizer"
BART_PRETRAINED_MODEL_ARCHIVE_LIST = [
"facebook/bart-base",
"facebook/bart-large",
"facebook/bart-large-mnli",
"facebook/bart-large-cnn",
"facebook/bart-large-xsum",
"facebook/mbart-large-en-ro",
]
# This list is incomplete. See all BART models at https://huggingface.co/models?filter=bart
BART_START_DOCSTRING = r"""
This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic
methods the library implements for all its model (such as downloading or saving, resizing the input embeddings,
pruning heads etc.)
This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__ subclass.
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general
usage and behavior.
Parameters:
config (:class:`~transformers.BartConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the configuration.
Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.
"""
BART_GENERATION_EXAMPLE = r"""
Summarization example::
>>> from transformers import BartTokenizer, BartForConditionalGeneration, BartConfig
>>> # see ``examples/summarization/bart/run_eval.py`` for a longer example
>>> model = BartForConditionalGeneration.from_pretrained('facebook/bart-large-cnn')
>>> tokenizer = BartTokenizer.from_pretrained('facebook/bart-large-cnn')
>>> ARTICLE_TO_SUMMARIZE = "My friends are cool but they eat too many carbs."
>>> inputs = tokenizer([ARTICLE_TO_SUMMARIZE], max_length=1024, return_tensors='pt')
>>> # Generate Summary
>>> summary_ids = model.generate(inputs['input_ids'], num_beams=4, max_length=5, early_stopping=True)
>>> print([tokenizer.decode(g, skip_special_tokens=True, clean_up_tokenization_spaces=False) for g in summary_ids])
"""
BART_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
it.
Indices can be obtained using :class:`~transformers.BartTokenizer`.
See :meth:`transformers.PreTrainedTokenizer.encode` and
:meth:`transformers.PreTrainedTokenizer.__call__` for details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on padding token indices.
Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
decoder_input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, target_sequence_length)`, `optional`):
Provide for translation and summarization training. By default, the model will create this tensor by
shifting the :obj:`input_ids` to the right, following the paper.
decoder_attention_mask (:obj:`torch.BoolTensor` of shape :obj:`(batch_size, tgt_seq_len)`, `optional`):
Default behavior: generate a tensor that ignores pad tokens in :obj:`decoder_input_ids`. Causal mask will
also be used by default.
If you want to change padding behavior, you should read :func:`modeling_bart._prepare_decoder_inputs` and
modify to your needs. See diagram 1 in `the paper <https://arxiv.org/abs/1910.13461>`__ for more
information on the default strategy.
encoder_outputs (:obj:`tuple(tuple(torch.FloatTensor)`, `optional`):
Tuple consists of (:obj:`last_hidden_state`, `optional`: :obj:`hidden_states`, `optional`: :obj:`attentions`)
:obj:`last_hidden_state` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`) is a
sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of
the decoder.
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up decoding.
If :obj:`past_key_values` are used, the user can optionally input only the last
``decoder_input_ids`` (those that don't have their past key value states given to this model) of shape
:obj:`(batch_size, 1)` instead of all ``decoder_input_ids`` of shape :obj:`(batch_size, sequence_length)`.
use_cache (:obj:`bool`, `optional`):
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
decoding (see :obj:`past_key_values`).
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned
tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for
more detail.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
"""
def invert_mask(attention_mask):
"""Turns 1->0, 0->1, False->True, True-> False"""
assert attention_mask.dim() == 2
return attention_mask.eq(0)
def _prepare_bart_decoder_inputs(
config, input_ids, decoder_input_ids=None, decoder_padding_mask=None, causal_mask_dtype=torch.float32
):
"""Prepare masks that ignore padding tokens in the decoder and a causal mask for the decoder if
none are provided. This mimics the default behavior in fairseq. To override it pass in masks.
Note: this is not called during generation
"""
pad_token_id = config.pad_token_id
if decoder_input_ids is None:
decoder_input_ids = shift_tokens_right(input_ids, pad_token_id)
bsz, tgt_len = decoder_input_ids.size()
if decoder_padding_mask is None:
decoder_padding_mask = make_padding_mask(decoder_input_ids, pad_token_id)
else:
decoder_padding_mask = invert_mask(decoder_padding_mask)
if decoder_padding_mask is not None and decoder_padding_mask.shape[1] > 1:
# never mask leading token, even if it is pad
decoder_padding_mask[:, 0] = decoder_padding_mask[:, 1]
tmp = fill_with_neg_inf(torch.zeros(tgt_len, tgt_len))
mask = torch.arange(tmp.size(-1))
tmp.masked_fill_(mask < (mask + 1).view(tmp.size(-1), 1), 0)
causal_mask = tmp.to(dtype=causal_mask_dtype, device=decoder_input_ids.device)
return decoder_input_ids, decoder_padding_mask, causal_mask
class PretrainedBartModel(PreTrainedModel):
config_class = BartConfig
base_model_prefix = "model"
def _init_weights(self, module):
std = self.config.init_std
if isinstance(module, nn.Linear):
module.weight.data.normal_(mean=0.0, std=std)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, SinusoidalPositionalEmbedding):
pass
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=std)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
@property
def dummy_inputs(self):
pad_token = self.config.pad_token_id
input_ids = torch.tensor([[0, 6, 10, 4, 2], [0, 8, 12, 2, pad_token]], device=self.device)
dummy_inputs = {
"attention_mask": input_ids.ne(pad_token),
"input_ids": input_ids,
}
return dummy_inputs
def _make_linear_from_emb(emb):
vocab_size, emb_size = emb.weight.shape
lin_layer = nn.Linear(vocab_size, emb_size, bias=False)
lin_layer.weight.data = emb.weight.data
return lin_layer
# Helper Functions, mostly for making masks
def _check_shapes(shape_1, shape2):
if shape_1 != shape2:
raise AssertionError("shape mismatch: {} != {}".format(shape_1, shape2))
def shift_tokens_right(input_ids, pad_token_id):
"""Shift input ids one token to the right, and wrap the last non pad token (usually <eos>)."""
prev_output_tokens = input_ids.clone()
index_of_eos = (input_ids.ne(pad_token_id).sum(dim=1) - 1).unsqueeze(-1)
prev_output_tokens[:, 0] = input_ids.gather(1, index_of_eos).squeeze()
prev_output_tokens[:, 1:] = input_ids[:, :-1]
return prev_output_tokens
def make_padding_mask(input_ids, padding_idx=1):
"""True for pad tokens"""
padding_mask = input_ids.eq(padding_idx)
if not padding_mask.any():
padding_mask = None
return padding_mask
# Helper Modules
class EncoderLayer(nn.Module):
def __init__(self, config: BartConfig):
super().__init__()
self.embed_dim = config.d_model
self.self_attn = Attention(self.embed_dim, config.encoder_attention_heads, dropout=config.attention_dropout)
self.normalize_before = config.normalize_before
self.self_attn_layer_norm = LayerNorm(self.embed_dim)
self.dropout = config.dropout
self.activation_fn = ACT2FN[config.activation_function]
self.activation_dropout = config.activation_dropout
self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim)
self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim)
self.final_layer_norm = LayerNorm(self.embed_dim)
def forward(self, x, encoder_padding_mask, output_attentions=False):
"""
Args:
x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)`
encoder_padding_mask (ByteTensor): binary ByteTensor of shape
`(batch, src_len)` where padding elements are indicated by ``1``.
for t_tgt, t_src is excluded (or masked out), =0 means it is
included in attention
Returns:
encoded output of shape `(seq_len, batch, embed_dim)`
"""
residual = x
if self.normalize_before:
x = self.self_attn_layer_norm(x)
x, attn_weights = self.self_attn(
query=x, key=x, key_padding_mask=encoder_padding_mask, output_attentions=output_attentions
)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
if not self.normalize_before:
x = self.self_attn_layer_norm(x)
residual = x
if self.normalize_before:
x = self.final_layer_norm(x)
x = self.activation_fn(self.fc1(x))
x = F.dropout(x, p=self.activation_dropout, training=self.training)
x = self.fc2(x)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
if not self.normalize_before:
x = self.final_layer_norm(x)
if torch.isinf(x).any() or torch.isnan(x).any():
clamp_value = torch.finfo(x.dtype).max - 1000
x = torch.clamp(x, min=-clamp_value, max=clamp_value)
return x, attn_weights
class BartEncoder(nn.Module):
"""
Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer
is a :class:`EncoderLayer`.
Args:
config: BartConfig
"""
def __init__(self, config: BartConfig, embed_tokens):
super().__init__()
self.dropout = config.dropout
self.layerdrop = config.encoder_layerdrop
embed_dim = embed_tokens.embedding_dim
self.embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0
self.padding_idx = embed_tokens.padding_idx
self.max_source_positions = config.max_position_embeddings
self.embed_tokens = embed_tokens
if config.static_position_embeddings:
self.embed_positions = SinusoidalPositionalEmbedding(
config.max_position_embeddings, embed_dim, self.padding_idx
)
else:
self.embed_positions = LearnedPositionalEmbedding(
config.max_position_embeddings,
embed_dim,
self.padding_idx,
config.extra_pos_embeddings,
)
self.layers = nn.ModuleList([EncoderLayer(config) for _ in range(config.encoder_layers)])
self.layernorm_embedding = LayerNorm(embed_dim) if config.normalize_embedding else nn.Identity()
# mbart has one extra layer_norm
self.layer_norm = LayerNorm(config.d_model) if config.add_final_layer_norm else None
def forward(
self, input_ids, attention_mask=None, output_attentions=False, output_hidden_states=False, return_dict=False
):
"""
Args:
input_ids (LongTensor): tokens in the source language of shape
`(batch, src_len)`
attention_mask (torch.LongTensor): indicating which indices are padding tokens.
Returns:
BaseModelOutput or Tuple comprised of:
- **x** (Tensor): the last encoder layer's output of
shape `(src_len, batch, embed_dim)`
- **encoder_states** (tuple(torch.FloatTensor)): all intermediate
hidden states of shape `(src_len, batch, embed_dim)`.
Only populated if *output_hidden_states:* is True.
- **all_attentions** (tuple(torch.FloatTensor)): Attention weights for each layer.
During training might not be of length n_layers because of layer dropout.
"""
# check attention mask and invert
if attention_mask is not None:
attention_mask = invert_mask(attention_mask)
inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
embed_pos = self.embed_positions(input_ids)
x = inputs_embeds + embed_pos
x = self.layernorm_embedding(x)
x = F.dropout(x, p=self.dropout, training=self.training)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
encoder_states = [] if output_hidden_states else None
all_attentions = () if output_attentions else None
for encoder_layer in self.layers:
if output_hidden_states:
encoder_states.append(x)
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
dropout_probability = random.uniform(0, 1)
if self.training and (dropout_probability < self.layerdrop): # skip the layer
attn = None
else:
x, attn = encoder_layer(x, attention_mask, output_attentions=output_attentions)
if output_attentions:
all_attentions = all_attentions + (attn,)
if self.layer_norm:
x = self.layer_norm(x)
if output_hidden_states:
encoder_states.append(x)
# T x B x C -> B x T x C
encoder_states = tuple(hidden_state.transpose(0, 1) for hidden_state in encoder_states)
# T x B x C -> B x T x C
x = x.transpose(0, 1)
if not return_dict:
return tuple(v for v in [x, encoder_states, all_attentions] if v is not None)
return BaseModelOutput(last_hidden_state=x, hidden_states=encoder_states, attentions=all_attentions)
class DecoderLayer(nn.Module):
def __init__(self, config: BartConfig):
super().__init__()
self.embed_dim = config.d_model
self.self_attn = Attention(
embed_dim=self.embed_dim,
num_heads=config.decoder_attention_heads,
dropout=config.attention_dropout,
)
self.dropout = config.dropout
self.activation_fn = ACT2FN[config.activation_function]
self.activation_dropout = config.activation_dropout
self.normalize_before = config.normalize_before
self.self_attn_layer_norm = LayerNorm(self.embed_dim)
self.encoder_attn = Attention(
self.embed_dim,
config.decoder_attention_heads,
dropout=config.attention_dropout,
encoder_decoder_attention=True,
)
self.encoder_attn_layer_norm = LayerNorm(self.embed_dim)
self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim)
self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim)
self.final_layer_norm = LayerNorm(self.embed_dim)
def forward(
self,
x,
encoder_hidden_states,
encoder_attn_mask=None,
layer_state=None,
causal_mask=None,
decoder_padding_mask=None,
output_attentions=False,
):
residual = x
if layer_state is None:
layer_state = {}
if self.normalize_before:
x = self.self_attn_layer_norm(x)
# Self Attention
x, self_attn_weights = self.self_attn(
query=x,
key=x,
layer_state=layer_state, # adds keys to layer state
key_padding_mask=decoder_padding_mask,
attn_mask=causal_mask,
output_attentions=output_attentions,
)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
if not self.normalize_before:
x = self.self_attn_layer_norm(x)
# Cross attention
residual = x
assert self.encoder_attn.cache_key != self.self_attn.cache_key
if self.normalize_before:
x = self.encoder_attn_layer_norm(x)
x, _ = self.encoder_attn(
query=x,
key=encoder_hidden_states,
key_padding_mask=encoder_attn_mask,
layer_state=layer_state, # mutates layer state
)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
if not self.normalize_before:
x = self.encoder_attn_layer_norm(x)
# Fully Connected
residual = x
if self.normalize_before:
x = self.final_layer_norm(x)
x = self.activation_fn(self.fc1(x))
x = F.dropout(x, p=self.activation_dropout, training=self.training)
x = self.fc2(x)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
if not self.normalize_before:
x = self.final_layer_norm(x)
return (
x,
self_attn_weights,
layer_state,
) # just self_attn weights for now, following t5, layer_state = cache for decoding
class BartDecoder(nn.Module):
"""
Transformer decoder consisting of *config.decoder_layers* layers. Each layer
is a :class:`DecoderLayer`.
Args:
config: BartConfig
embed_tokens (torch.nn.Embedding): output embedding
"""
def __init__(self, config: BartConfig, embed_tokens: nn.Embedding):
super().__init__()
self.dropout = config.dropout
self.layerdrop = config.decoder_layerdrop
self.do_blenderbot_90_layernorm = config.do_blenderbot_90_layernorm # layernorm variant
self.padding_idx = embed_tokens.padding_idx
self.max_target_positions = config.max_position_embeddings
self.embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0
self.embed_tokens = embed_tokens
if config.static_position_embeddings:
self.embed_positions = SinusoidalPositionalEmbedding(
config.max_position_embeddings, config.d_model, config.pad_token_id
)
else:
self.embed_positions = LearnedPositionalEmbedding(
config.max_position_embeddings,
config.d_model,
self.padding_idx,
config.extra_pos_embeddings
)
self.layers = nn.ModuleList(
[DecoderLayer(config) for _ in range(config.decoder_layers)]
) # type: List[DecoderLayer]
self.layernorm_embedding = LayerNorm(config.d_model) if config.normalize_embedding else nn.Identity()
self.layer_norm = LayerNorm(config.d_model) if config.add_final_layer_norm else None
self.config = config
def forward(
self,
input_ids,
encoder_hidden_states,
encoder_padding_mask,
decoder_padding_mask,
decoder_causal_mask,
past_key_values=None,
use_cache=False,
output_attentions=False,
output_hidden_states=False,
return_dict=False,
**unused,
):
"""
Includes several features from "Jointly Learning to Align and
Translate with Transformer Models" (Garg et al., EMNLP 2019).
Args:
input_ids (LongTensor): previous decoder outputs of shape
`(batch, tgt_len)`, for teacher forcing
encoder_hidden_states: output from the encoder, used for
encoder-side attention
encoder_padding_mask: for ignoring pad tokens
past_key_values (dict or None): dictionary used for storing state during generation
Returns:
BaseModelOutputWithPast or tuple:
- the decoder's features of shape `(batch, tgt_len, embed_dim)`
- the cache
- hidden states
- attentions
"""
if "decoder_cached_states" in unused:
warnings.warn(
"The `decoder_cached_states` argument is deprecated and will be removed in a future version, use `past_key_values` instead.",
FutureWarning,
)
past_key_values = unused.pop("decoder_cached_states")
if "decoder_past_key_values" in unused:
warnings.warn(
"The `decoder_past_key_values` argument is deprecated and will be removed in a future version, use `past_key_values` instead.",
FutureWarning,
)
past_key_values = unused.pop("decoder_past_key_values")
# check attention mask and invert
if encoder_padding_mask is not None:
encoder_padding_mask = invert_mask(encoder_padding_mask)
# embed positions
positions = self.embed_positions(input_ids, use_cache=use_cache)
if use_cache:
input_ids = input_ids[:, -1:]
positions = positions[:, -1:]
x = self.embed_tokens(input_ids) * self.embed_scale
if self.do_blenderbot_90_layernorm:
x = self.layernorm_embedding(x)
x += positions
else:
x += positions
x = self.layernorm_embedding(x)
x = F.dropout(x, p=self.dropout, training=self.training)
# Convert to Bart output format: (seq_len, BS, model_dim) -> (BS, seq_len, model_dim)
x = x.transpose(0, 1)
encoder_hidden_states = encoder_hidden_states.transpose(0, 1)
# decoder layers
all_hidden_states = () if output_hidden_states else None
all_self_attns = () if output_attentions else None
next_decoder_cache = []
for idx, decoder_layer in enumerate(self.layers):
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
if output_hidden_states:
all_hidden_states += (x,)
dropout_probability = random.uniform(0, 1)
if self.training and (dropout_probability < self.layerdrop):
continue
layer_state = past_key_values[idx] if past_key_values is not None else None
x, layer_self_attn, layer_past = decoder_layer(
x,
encoder_hidden_states,
encoder_attn_mask=encoder_padding_mask,
decoder_padding_mask=decoder_padding_mask,
layer_state=layer_state,
causal_mask=decoder_causal_mask,
output_attentions=output_attentions,
)
if use_cache:
next_decoder_cache.append(layer_past.copy())
if output_attentions:
all_self_attns += (layer_self_attn,)
if self.layer_norm: # if config.add_final_layer_norm (mBART)
x = self.layer_norm(x)
# Convert to standard output format: (seq_len, BS, model_dim) -> (BS, seq_len, model_dim)
if output_hidden_states:
all_hidden_states = tuple(hidden_state.transpose(0, 1) for hidden_state in all_hidden_states)
x = x.transpose(0, 1)
encoder_hidden_states = encoder_hidden_states.transpose(0, 1)
next_cache = next_decoder_cache if use_cache else None
if not return_dict:
return tuple(v for v in [x, next_cache, all_hidden_states, all_self_attns] if v is not None)
return BaseModelOutputWithPast(
last_hidden_state=x, past_key_values=next_cache, hidden_states=all_hidden_states, attentions=all_self_attns
)
def _reorder_buffer(attn_cache, new_order):
for k, input_buffer_k in attn_cache.items():
if input_buffer_k is not None:
attn_cache[k] = input_buffer_k.index_select(0, new_order)
return attn_cache
class Attention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(
self,
embed_dim,
num_heads,
dropout=0.0,
bias=True,
encoder_decoder_attention=False, # otherwise self_attention
):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads"
self.scaling = self.head_dim ** -0.5
self.encoder_decoder_attention = encoder_decoder_attention
self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.cache_key = "encoder_decoder" if self.encoder_decoder_attention else "self"
def _shape(self, tensor, seq_len, bsz):
return tensor.contiguous().view(seq_len, bsz * self.num_heads, self.head_dim).transpose(0, 1)
def forward(
self,
query,
key: Optional[Tensor],
key_padding_mask: Optional[Tensor] = None,
layer_state: Optional[Dict[str, Optional[Tensor]]] = None,
attn_mask: Optional[Tensor] = None,
output_attentions=False,
) -> Tuple[Tensor, Optional[Tensor]]:
"""Input shape: Time(SeqLen) x Batch x Channel"""
static_kv: bool = self.encoder_decoder_attention
tgt_len, bsz, embed_dim = query.size()
assert embed_dim == self.embed_dim
assert list(query.size()) == [tgt_len, bsz, embed_dim]
# get here for encoder decoder cause of static_kv
if layer_state is not None: # reuse k,v and encoder_padding_mask
saved_state = layer_state.get(self.cache_key, {})
if "prev_key" in saved_state and static_kv:
# previous time steps are cached - no need to recompute key and value if they are static
key = None
else:
saved_state = None
layer_state = {}
q = self.q_proj(query) * self.scaling
if static_kv:
if key is None:
k = v = None
else:
k = self.k_proj(key)
v = self.v_proj(key)
else:
k = self.k_proj(query)
v = self.v_proj(query)
q = self._shape(q, tgt_len, bsz)
if k is not None:
k = self._shape(k, -1, bsz)
if v is not None:
v = self._shape(v, -1, bsz)
if saved_state is not None:
k, v, key_padding_mask = self._use_saved_state(k, v, saved_state, key_padding_mask, static_kv, bsz)
# Update cache
layer_state[self.cache_key] = {
"prev_key": k.view(bsz, self.num_heads, -1, self.head_dim),
"prev_value": v.view(bsz, self.num_heads, -1, self.head_dim),
"prev_key_padding_mask": key_padding_mask if not static_kv else None,
}
assert k is not None
src_len = k.size(1)
attn_weights = torch.bmm(q, k.transpose(1, 2))
assert attn_weights.size() == (bsz * self.num_heads, tgt_len, src_len)
if attn_mask is not None:
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attn_mask
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
# This is part of a workaround to get around fork/join parallelism not supporting Optional types.
if key_padding_mask is not None and key_padding_mask.dim() == 0:
key_padding_mask = None
assert key_padding_mask is None or key_padding_mask.size()[:2] == (
bsz,
src_len,
)
if key_padding_mask is not None: # don't attend to padding symbols
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
reshaped = key_padding_mask.unsqueeze(1).unsqueeze(2)
attn_weights = attn_weights.masked_fill(reshaped, float("-inf"))
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
attn_weights = F.softmax(attn_weights, dim=-1)
attn_probs = F.dropout(
attn_weights,
p=self.dropout,
training=self.training,
)
assert v is not None
attn_output = torch.bmm(attn_probs, v)
assert attn_output.size() == (bsz * self.num_heads, tgt_len, self.head_dim)
attn_output = attn_output.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim)
attn_output = self.out_proj(attn_output)
if output_attentions:
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
else:
attn_weights = None
return attn_output, attn_weights
def _use_saved_state(self, k, v, saved_state, key_padding_mask, static_kv, bsz):
# saved states are stored with shape (bsz, num_heads, seq_len, head_dim)
if "prev_key" in saved_state:
_prev_key = saved_state["prev_key"]
assert _prev_key is not None
prev_key = _prev_key.view(bsz * self.num_heads, -1, self.head_dim)
if static_kv:
k = prev_key
else:
assert k is not None
k = torch.cat([prev_key, k], dim=1)
if "prev_value" in saved_state:
_prev_value = saved_state["prev_value"]
assert _prev_value is not None
prev_value = _prev_value.view(bsz * self.num_heads, -1, self.head_dim)
if static_kv:
v = prev_value
else:
assert v is not None
v = torch.cat([prev_value, v], dim=1)
assert k is not None and v is not None
prev_key_padding_mask: Optional[Tensor] = saved_state.get("prev_key_padding_mask", None)
if prev_key_padding_mask is not None:
if static_kv:
new_key_padding_mask = prev_key_padding_mask
else:
new_key_padding_mask = torch.cat([prev_key_padding_mask, key_padding_mask], dim=1)
else:
new_key_padding_mask = key_padding_mask
return k, v, new_key_padding_mask
class BartClassificationHead(nn.Module):
"""Head for sentence-level classification tasks."""
# This can trivially be shared with RobertaClassificationHead
def __init__(
self,
input_dim,
inner_dim,
num_classes,
pooler_dropout,
):
super().__init__()
self.dense = nn.Linear(input_dim, inner_dim)
self.dropout = nn.Dropout(p=pooler_dropout)
self.out_proj = nn.Linear(inner_dim, num_classes)
def forward(self, x):
x = self.dropout(x)
x = self.dense(x)
x = torch.tanh(x)
x = self.dropout(x)
x = self.out_proj(x)
return x
class LearnedPositionalEmbedding(nn.Embedding):
"""
This module learns positional embeddings up to a fixed maximum size.
Padding ids are ignored by either offsetting based on padding_idx
or by setting padding_idx to None and ensuring that the appropriate
position ids are passed to the forward function.
"""
def __init__(self, num_embeddings: int, embedding_dim: int, padding_idx: int, offset):
# Bart is set up so that if padding_idx is specified then offset the embedding ids by 2
# and adjust num_embeddings appropriately. Other models dont have this hack
self.offset = offset
assert padding_idx is not None
num_embeddings += offset
super().__init__(num_embeddings, embedding_dim, padding_idx=padding_idx)
def forward(self, input_ids, use_cache=False):
"""Input is expected to be of size [bsz x seqlen]."""
bsz, seq_len = input_ids.shape[:2]
if use_cache:
positions = input_ids.data.new(1, 1).fill_(seq_len - 1) # called before slicing
else:
# starts at 0, ends at 1-seq_len
positions = torch.arange(seq_len, dtype=torch.long, device=self.weight.device)
return super().forward(positions + self.offset)
def LayerNorm(normalized_shape, eps=1e-5, elementwise_affine=True):
if torch.cuda.is_available():
try:
from apex.normalization import FusedLayerNorm
return FusedLayerNorm(normalized_shape, eps, elementwise_affine)
except ImportError:
pass
return torch.nn.LayerNorm(normalized_shape, eps, elementwise_affine)
def fill_with_neg_inf(t):
"""FP16-compatible function that fills a input_ids with -inf."""
return t.float().fill_(float("-inf")).type_as(t)
# Public API
def _get_shape(t):
return getattr(t, "shape", None)
@add_start_docstrings(
"The bare BART Model outputting raw hidden-states without any specific head on top.",
BART_START_DOCSTRING,
)
class BartModel(PretrainedBartModel):
def __init__(self, config: BartConfig):
super().__init__(config)
padding_idx, vocab_size = config.pad_token_id, config.vocab_size
self.shared = nn.Embedding(vocab_size, config.d_model, padding_idx)
self.encoder = BartEncoder(config, self.shared)
self.decoder = BartDecoder(config, self.shared)
self.init_weights()
@add_start_docstrings_to_callable(BART_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint="facebook/bart-large",
output_type=Seq2SeqModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids,
attention_mask=None,
decoder_input_ids=None,
decoder_attention_mask=None,
encoder_outputs: Optional[Tuple] = None,
past_key_values=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
**kwargs,
):
if "decoder_past_key_values" in kwargs:
warnings.warn(
"The `decoder_past_key_values` argument is deprecated and will be removed in a future version, use `past_key_values` instead.",
FutureWarning,
)
past_key_values = kwargs.pop("decoder_past_key_values")
if decoder_input_ids is None:
use_cache = False
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# make masks if user doesn't supply
if not use_cache:
decoder_input_ids, decoder_padding_mask, causal_mask = _prepare_bart_decoder_inputs(
self.config,
input_ids,
decoder_input_ids=decoder_input_ids,
decoder_padding_mask=decoder_attention_mask,
causal_mask_dtype=self.shared.weight.dtype,
)
else:
decoder_padding_mask, causal_mask = None, None
assert decoder_input_ids is not None
if encoder_outputs is None:
encoder_outputs = self.encoder(
input_ids=input_ids,
attention_mask=attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
# If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOuput when return_dict=False
elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):
encoder_outputs = BaseModelOutput(
last_hidden_state=encoder_outputs[0],
hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
)
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
decoder_outputs = self.decoder(
decoder_input_ids,
encoder_outputs[0],
attention_mask,
decoder_padding_mask,
decoder_causal_mask=causal_mask,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
if not return_dict:
return decoder_outputs + encoder_outputs
return Seq2SeqModelOutput(
last_hidden_state=decoder_outputs.last_hidden_state,
past_key_values=decoder_outputs.past_key_values,
decoder_hidden_states=decoder_outputs.hidden_states,
decoder_attentions=decoder_outputs.attentions,
encoder_last_hidden_state=encoder_outputs.last_hidden_state,
encoder_hidden_states=encoder_outputs.hidden_states,
encoder_attentions=encoder_outputs.attentions,
)
def get_input_embeddings(self):
return self.shared
def set_input_embeddings(self, value):
self.shared = value
self.encoder.embed_tokens = self.shared
self.decoder.embed_tokens = self.shared
def get_output_embeddings(self):
return _make_linear_from_emb(self.shared) # make it on the fly
@add_start_docstrings(
"The BART Model with a language modeling head. Can be used for summarization.", BART_START_DOCSTRING
)
class BartForConditionalGeneration(PretrainedBartModel):
base_model_prefix = "model"
authorized_missing_keys = [r"final_logits_bias", r"encoder\.version", r"decoder\.version"]
def __init__(self, config: BartConfig):
super().__init__(config)
base_model = BartModel(config)
self.model = base_model
self.register_buffer("final_logits_bias", torch.zeros((1, self.model.shared.num_embeddings)))
def resize_token_embeddings(self, new_num_tokens: int) -> nn.Embedding:
old_num_tokens = self.model.shared.num_embeddings
new_embeddings = super().resize_token_embeddings(new_num_tokens)
self.model.shared = new_embeddings
self._resize_final_logits_bias(new_num_tokens, old_num_tokens)
return new_embeddings
def _resize_final_logits_bias(self, new_num_tokens: int, old_num_tokens: int) -> None:
if new_num_tokens <= old_num_tokens:
new_bias = self.final_logits_bias[:, :new_num_tokens]
else:
extra_bias = torch.zeros((1, new_num_tokens - old_num_tokens), device=self.final_logits_bias.device)
new_bias = torch.cat([self.final_logits_bias, extra_bias], dim=1)
self.register_buffer("final_logits_bias", new_bias)
@add_start_docstrings_to_callable(BART_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)
@add_end_docstrings(BART_GENERATION_EXAMPLE)
def forward(
self,
input_ids,
attention_mask=None,
decoder_input_ids=None,
decoder_attention_mask=None,
encoder_outputs=None,
past_key_values=None,
labels=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
**unused,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the masked language modeling loss.
Indices should either be in ``[0, ..., config.vocab_size]`` or -100 (see ``input_ids`` docstring).
Tokens with indices set to ``-100`` are ignored (masked), the loss is only computed for the tokens
with labels in ``[0, ..., config.vocab_size]``.
Returns:
Conditional generation example::
>>> # Mask filling only works for bart-large
>>> from transformers import BartTokenizer, BartForConditionalGeneration
>>> tokenizer = BartTokenizer.from_pretrained('facebook/bart-large')
>>> TXT = "My friends are <mask> but they eat too many carbs."
>>> model = BartForConditionalGeneration.from_pretrained('facebook/bart-large')
>>> input_ids = tokenizer([TXT], return_tensors='pt')['input_ids']
>>> logits = model(input_ids).logits
>>> masked_index = (input_ids[0] == tokenizer.mask_token_id).nonzero().item()
>>> probs = logits[0, masked_index].softmax(dim=0)
>>> values, predictions = probs.topk(5)
>>> tokenizer.decode(predictions).split()
>>> # ['good', 'great', 'all', 'really', 'very']
"""
if "lm_labels" in unused:
warnings.warn(
"The `lm_labels` argument is deprecated and will be removed in a future version, use `labels` instead.",
FutureWarning,
)
labels = unused.pop("lm_labels")
if "decoder_cached_states" in unused:
warnings.warn(
"The `decoder_cached_states` argument is deprecated and will be removed in a future version, use `past_key_values` instead.",
FutureWarning,
)
past_key_values = unused.pop("decoder_cached_states")
if "decoder_past_key_values" in unused:
warnings.warn(
"The `decoder_past_key_values` argument is deprecated and will be removed in a future version, use `past_key_values` instead.",
FutureWarning,
)
past_key_values = unused.pop("decoder_past_key_values")
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if labels is not None:
use_cache = False
if decoder_input_ids is None:
decoder_input_ids = shift_tokens_right(labels, self.config.pad_token_id)
outputs = self.model(
input_ids,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids,
encoder_outputs=encoder_outputs,
decoder_attention_mask=decoder_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
lm_logits = F.linear(outputs[0], self.model.shared.weight, bias=self.final_logits_bias)
masked_lm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
# TODO(SS): do we need to ignore pad tokens in labels?
masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (lm_logits,) + outputs[1:]
return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
return Seq2SeqLMOutput(
loss=masked_lm_loss,
logits=lm_logits,
past_key_values=outputs.past_key_values,
decoder_hidden_states=outputs.decoder_hidden_states,
decoder_attentions=outputs.decoder_attentions,
encoder_last_hidden_state=outputs.encoder_last_hidden_state,
encoder_hidden_states=outputs.encoder_hidden_states,
encoder_attentions=outputs.encoder_attentions,
)
def prepare_inputs_for_generation(
self, decoder_input_ids, past, attention_mask, use_cache, encoder_outputs, **kwargs
):
return {
"input_ids": None, # encoder_outputs is defined. input_ids not needed
"encoder_outputs": encoder_outputs,
"past_key_values": past,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"use_cache": use_cache, # change this to avoid caching (presumably for debugging)
}
def adjust_logits_during_generation(self, logits, cur_len, max_length):
if cur_len == 1 and self.config.force_bos_token_to_be_generated:
self._force_token_ids_generation(logits, self.config.bos_token_id)
elif cur_len == max_length - 1 and self.config.eos_token_id is not None:
self._force_token_ids_generation(logits, self.config.eos_token_id)
return logits
def _force_token_ids_generation(self, scores, token_id) -> None:
"""force one of token_ids to be generated by setting prob of all other tokens to 0 (logprob=-float("inf"))"""
scores[:, [x for x in range(self.config.vocab_size) if x != token_id]] = -float("inf")
@staticmethod
def _reorder_cache(past, beam_idx):
reordered_past = []
for layer_past in past:
# get the correct batch idx from decoder layer's batch dim for cross and self-attn
layer_past_new = {
attn_key: _reorder_buffer(attn_cache, beam_idx) for attn_key, attn_cache in layer_past.items()
}
reordered_past.append(layer_past_new)
return reordered_past
def get_encoder(self):
return self.model.encoder
def get_output_embeddings(self):
return _make_linear_from_emb(self.model.shared) # make it on the fly
@add_start_docstrings(
"""Bart model with a sequence classification/head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. """,
BART_START_DOCSTRING,
)
class BartForSequenceClassification(PretrainedBartModel):
def __init__(self, config: BartConfig, **kwargs):
super().__init__(config, **kwargs)
self.model = BartModel(config)
self.classification_head = BartClassificationHead(
config.d_model,
config.d_model,
config.num_labels,
config.classifier_dropout,
)
self.model._init_weights(self.classification_head.dense)
self.model._init_weights(self.classification_head.out_proj)
@add_start_docstrings_to_callable(BART_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint="facebook/bart-large",
output_type=Seq2SeqSequenceClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids,
attention_mask=None,
decoder_input_ids=None,
decoder_attention_mask=None,
encoder_outputs=None,
labels=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the sequence classification/regression loss.
Indices should be in :obj:`[0, ..., config.num_labels - 1]`.
If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if labels is not None:
use_cache = False
outputs = self.model(
input_ids,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
encoder_outputs=encoder_outputs,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
x = outputs[0] # last hidden state
eos_mask = input_ids.eq(self.config.eos_token_id)
if len(torch.unique(eos_mask.sum(1))) > 1:
raise ValueError("All examples must have the same number of <eos> tokens.")
sentence_representation = x[eos_mask, :].view(x.size(0), -1, x.size(-1))[:, -1, :]
logits = self.classification_head(sentence_representation)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.config.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[1:]
return ((loss,) + output) if loss is not None else output
return Seq2SeqSequenceClassifierOutput(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
decoder_hidden_states=outputs.decoder_hidden_states,
decoder_attentions=outputs.decoder_attentions,
encoder_last_hidden_state=outputs.encoder_last_hidden_state,
encoder_hidden_states=outputs.encoder_hidden_states,
encoder_attentions=outputs.encoder_attentions,
)
@add_start_docstrings(
"""BART Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layer on top of
the hidden-states output to compute `span start logits` and `span end logits`). """,
BART_START_DOCSTRING,
)
class BartForQuestionAnswering(PretrainedBartModel):
def __init__(self, config):
super().__init__(config)
config.num_labels = 2
self.num_labels = config.num_labels
self.model = BartModel(config)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
self.model._init_weights(self.qa_outputs)
@add_start_docstrings_to_callable(BART_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint="facebook/bart-large",
output_type=Seq2SeqQuestionAnsweringModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids,
attention_mask=None,
decoder_input_ids=None,
decoder_attention_mask=None,
encoder_outputs=None,
start_positions=None,
end_positions=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`).
Position outside of the sequence are not taken into account for computing the loss.
end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`).
Position outside of the sequence are not taken into account for computing the loss.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if start_positions is not None and end_positions is not None:
use_cache = False
outputs = self.model(
input_ids,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
encoder_outputs=encoder_outputs,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1)
end_logits = end_logits.squeeze(-1)
total_loss = None
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions.clamp_(0, ignored_index)
end_positions.clamp_(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
if not return_dict:
output = (
start_logits,
end_logits,
) + outputs[1:]
return ((total_loss,) + output) if total_loss is not None else output
return Seq2SeqQuestionAnsweringModelOutput(
loss=total_loss,
start_logits=start_logits,
end_logits=end_logits,
past_key_values=outputs.past_key_values,
decoder_hidden_states=outputs.decoder_hidden_states,
decoder_attentions=outputs.decoder_attentions,
encoder_last_hidden_state=outputs.encoder_last_hidden_state,
encoder_hidden_states=outputs.encoder_hidden_states,
encoder_attentions=outputs.encoder_attentions,
)
class SinusoidalPositionalEmbedding(nn.Embedding):
"""This module produces sinusoidal positional embeddings of any length."""
def __init__(self, num_positions, embedding_dim, padding_idx=None):
super().__init__(num_positions, embedding_dim)
if embedding_dim % 2 != 0:
raise NotImplementedError(f"odd embedding_dim {embedding_dim} not supported")
self.weight = self._init_weight(self.weight)
@staticmethod
def _init_weight(out: nn.Parameter):
"""Identical to the XLM create_sinusoidal_embeddings except features are not interleaved.
The cos features are in the 2nd half of the vector. [dim // 2:]
"""
n_pos, dim = out.shape
position_enc = np.array(
[[pos / np.power(10000, 2 * (j // 2) / dim) for j in range(dim)] for pos in range(n_pos)]
)
out[:, 0: dim // 2] = torch.FloatTensor(np.sin(position_enc[:, 0::2])) # This line breaks for odd n_pos
out[:, dim // 2:] = torch.FloatTensor(np.cos(position_enc[:, 1::2]))
out.detach_()
out.requires_grad = False
return out
@torch.no_grad()
def forward(self, input_ids, use_cache=False):
"""Input is expected to be of size [bsz x seqlen]."""
bsz, seq_len = input_ids.shape[:2]
if use_cache:
positions = input_ids.data.new(1, 1).fill_(seq_len - 1) # called before slicing
else:
# starts at 0, ends at 1-seq_len
positions = torch.arange(seq_len, dtype=torch.long, device=self.weight.device)
return super().forward(positions)
| 58,282 | 41.234058 | 213 | py |
DMASTE | DMASTE-main/BARTABSA/peng/model/metrics.py | from fastNLP import MetricBase
from fastNLP.core.metrics import _compute_f_pre_rec
from collections import Counter
class Seq2SeqSpanMetric(MetricBase):
def __init__(self, eos_token_id, num_labels, opinion_first=True):
super(Seq2SeqSpanMetric, self).__init__()
self.eos_token_id = eos_token_id
self.num_labels = num_labels
self.word_start_index = num_labels + 2 # +2, shift for sos and eos
self.all_pred = []
self.pre_pred = []
self.ae_oe_fp = 0
self.ae_oe_tp = 0
self.ae_oe_fn = 0
self.triple_fp = 0
self.triple_tp = 0
self.triple_fn = 0
self.em = 0
self.invalid = 0
self.total = 0
self.ae_sc_fp = 0
self.ae_sc_tp = 0
self.ae_sc_fn = 0
assert opinion_first is False, "Current metric only supports aspect first"
self.opinin_first = opinion_first
def evaluate(self, target_span, pred, tgt_tokens):
self.total += pred.size(0)
pred_eos_index = pred.flip(dims=[1]).eq(self.eos_token_id).cumsum(dim=1).long()
target_eos_index = tgt_tokens.flip(dims=[1]).eq(self.eos_token_id).cumsum(dim=1).long()
pred = pred[:, 1:] # delete </s>
tgt_tokens = tgt_tokens[:, 1:]
pred_seq_len = pred_eos_index.flip(dims=[1]).eq(pred_eos_index[:, -1:]).sum(dim=1) # bsz
pred_seq_len = (pred_seq_len - 2).tolist()
target_seq_len = target_eos_index.flip(dims=[1]).eq(target_eos_index[:, -1:]).sum(dim=1) # bsz
target_seq_len = (target_seq_len - 2).tolist()
pred_spans = []
for i, (ts, ps) in enumerate(zip(target_span, pred.tolist())):
em = 0
ps = ps[:pred_seq_len[i]]
if pred_seq_len[i] == target_seq_len[i]:
em = int(
tgt_tokens[i, :target_seq_len[i]].eq(pred[i, :target_seq_len[i]]).sum().item() == target_seq_len[i])
self.em += em
invalid = 0
pairs = []
cur_pair = []
if len(ps):
for index, j in enumerate(ps):
if j < self.word_start_index:
cur_pair.append(j)
if len(cur_pair) != 5 or cur_pair[0] > cur_pair[1] or cur_pair[2] > cur_pair[3]:
invalid = 1
else:
pairs.append(tuple(cur_pair))
cur_pair = []
else:
cur_pair.append(j)
pred_spans.append(pairs.copy())
self.all_pred.append(pairs.copy())
self.invalid += invalid
oe_ae_target = [tuple(t[:4]) for t in ts]
oe_ae_pred = [p[:4] for p in pairs]
oe_ae_target_counter = Counter(oe_ae_target)
oe_ae_pred_counter = Counter(oe_ae_pred)
tp, fn, fp = _compute_tp_fn_fp(set(list(oe_ae_pred_counter.keys())),
set(list(oe_ae_target_counter.keys())))
self.ae_oe_fn += fn
self.ae_oe_fp += fp
self.ae_oe_tp += tp
# note aesc
ae_sc_target = [(t[0], t[1], t[-1]) for t in ts]
ae_sc_pred = [(p[0], p[1], p[-1]) for p in pairs]
asts = set([tuple(t) for t in ae_sc_target])
asps = set(ae_sc_pred)
for p in list(asps): # pairs is a 5-tuple
if p in asts:
asts.remove(p)
self.ae_sc_tp += 1
else:
self.ae_sc_fp += 1
self.ae_sc_fn += len(asts)
ts = set([tuple(t) for t in ts])
ps = set(pairs)
for p in list(ps):
if p in ts:
ts.remove(p)
self.triple_tp += 1
else:
self.triple_fp += 1
self.triple_fn += len(ts)
def get_metric(self, reset=True):
res = {}
f, pre, rec = _compute_f_pre_rec(1, self.triple_tp, self.triple_fn, self.triple_fp)
res['triple_f'] = round(f, 4)*100
res['triple_rec'] = round(rec, 4)*100
res['triple_pre'] = round(pre, 4)*100
f, pre, rec = _compute_f_pre_rec(1, self.ae_oe_tp, self.ae_oe_fn, self.ae_oe_fp)
res['oe_ae_f'] = round(f, 4)*100
res['oe_ae_rec'] = round(rec, 4)*100
res['oe_ae_pre'] = round(pre, 4)*100
f, pre, rec = _compute_f_pre_rec(1, self.ae_sc_tp, self.ae_sc_fn, self.ae_sc_fp)
res["ae_sc_f"] = round(f, 4)*100
res["ae_sc_rec"] = round(rec, 4)*100
res["ae_sc_pre"] = round(pre, 4)*100
res['em'] = round(self.em / self.total, 4)
res['invalid'] = round(self.invalid / self.total, 4)
if reset:
self.ae_oe_fp = 0
self.ae_oe_tp = 0
self.ae_oe_fn = 0
self.triple_fp = 0
self.triple_tp = 0
self.triple_fn = 0
self.em = 0
self.invalid = 0
self.total = 0
self.ae_sc_fp = 0
self.ae_sc_tp = 0
self.ae_sc_fn = 0
self.pre_pred = self.all_pred
self.all_pred = []
return res
def get_pred(self):
return self.pre_pred
def _compute_tp_fn_fp(ps, ts):
ps = ps.copy()
tp = 0
fp = 0
fn = 0
if isinstance(ts, set):
ts = {key: 1 for key in list(ts)}
if isinstance(ps, set):
ps = {key: 1 for key in list(ps)}
for key in ts.keys():
t_num = ts[key]
if key not in ps:
p_num = 0
else:
p_num = ps[key]
tp += min(p_num, t_num)
fp += max(p_num - t_num, 0)
fn += max(t_num - p_num, 0)
if key in ps:
ps.pop(key)
fp += sum(ps.values())
return tp, fn, fp
| 5,863 | 34.539394 | 120 | py |
DMASTE | DMASTE-main/BARTABSA/peng/model/__init__.py | 0 | 0 | 0 | py |
|
DMASTE | DMASTE-main/BARTABSA/peng/model/generator.py | r"""Modify from fastNLP"""
import torch
from torch import nn
from fastNLP.models.seq2seq_model import Seq2SeqModel
from fastNLP.modules.decoder.seq2seq_decoder import Seq2SeqDecoder, State
import torch.nn.functional as F
from fastNLP.core.utils import _get_model_device
from functools import partial
class SequenceGeneratorModel(nn.Module):
"""
用于封装Seq2SeqModel使其可以做生成任务
"""
def __init__(self, seq2seq_model: Seq2SeqModel, bos_token_id, eos_token_id=None, max_length=30, max_len_a=0.0,
num_beams=1, do_sample=True,
repetition_penalty=1, length_penalty=1.0, pad_token_id=0,
restricter=None):
"""
:param Seq2SeqModel seq2seq_model: 序列到序列模型. 会使用seq2seq_model的decoder进行生成
:param int,None bos_token_id: 句子开头的token id
:param int,None eos_token_id: 句子结束的token id
:param int max_length: 生成句子的最大长度, 每句话的decode长度为max_length + max_len_a*src_len
:param float max_len_a: 每句话的decode长度为max_length + max_len_a*src_len。 如果不为0,需要保证State中包含encoder_mask
:param int num_beams: beam search的大小
:param bool do_sample: 是否通过采样的方式生成
:param float temperature: 只有在do_sample为True才有意义
:param int top_k: 只从top_k中采样
:param float top_p: 只从top_p的token中采样,nucles sample
:param float repetition_penalty: 多大程度上惩罚重复的token
:param float length_penalty: 对长度的惩罚,小于1鼓励长句,大于1鼓励短剧
:param int pad_token_id: 当某句话生成结束之后,之后生成的内容用pad_token_id补充
"""
super().__init__()
self.seq2seq_model = seq2seq_model
self.restricter = restricter
self.generator = SequenceGenerator(seq2seq_model.decoder, max_length=max_length, max_len_a=max_len_a,
num_beams=num_beams,
do_sample=do_sample,
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
repetition_penalty=repetition_penalty, length_penalty=length_penalty,
pad_token_id=pad_token_id,
restricter=restricter)
def forward(self, src_tokens, tgt_tokens, src_seq_len=None, tgt_seq_len=None, first=None):
"""
透传调用seq2seq_model的forward
:param torch.LongTensor src_tokens: bsz x max_len
:param torch.LongTensor tgt_tokens: bsz x max_len'
:param torch.LongTensor src_seq_len: bsz
:param torch.LongTensor tgt_seq_len: bsz
:return:
"""
return self.seq2seq_model(src_tokens, tgt_tokens, src_seq_len, tgt_seq_len, first)
def predict(self, src_tokens, src_seq_len=None, first=None):
"""
给定source的内容,输出generate的内容
:param torch.LongTensor src_tokens: bsz x max_len
:param torch.LongTensor src_seq_len: bsz
:return:
"""
state = self.seq2seq_model.prepare_state(src_tokens, src_seq_len, first)
result = self.generator.generate(state)
return {'pred': result}
r"""
"""
__all__ = [
'SequenceGenerator'
]
class SequenceGenerator:
"""
给定一个Seq2SeqDecoder,decode出句子
"""
def __init__(self, decoder: Seq2SeqDecoder, max_length=20, max_len_a=0.0, num_beams=1,
do_sample=False, bos_token_id=None, eos_token_id=None,
repetition_penalty=1, length_penalty=1.0, pad_token_id=0, restricter=None):
"""
:param Seq2SeqDecoder decoder: Decoder对象
:param int max_length: 生成句子的最大长度, 每句话的decode长度为max_length + max_len_a*src_len
:param float max_len_a: 每句话的decode长度为max_length + max_len_a*src_len。 如果不为0,需要保证State中包含encoder_mask
:param int num_beams: beam search的大小
:param bool do_sample: 是否通过采样的方式生成
:param float temperature: 只有在do_sample为True才有意义
:param int top_k: 只从top_k中采样
:param float top_p: 只从top_p的token中采样,nucles sample
:param int,None bos_token_id: 句子开头的token id
:param int,None eos_token_id: 句子结束的token id
:param float repetition_penalty: 多大程度上惩罚重复的token
:param float length_penalty: 对长度的惩罚,小于1鼓励长句,大于1鼓励短剧
:param int pad_token_id: 当某句话生成结束之后,之后生成的内容用pad_token_id补充
"""
self.generate_func = partial(greedy_generate, decoder=decoder, max_length=max_length, max_len_a=max_len_a,
num_beams=num_beams,
bos_token_id=bos_token_id, eos_token_id=eos_token_id,
repetition_penalty=repetition_penalty,
length_penalty=length_penalty, pad_token_id=pad_token_id,
restricter=restricter)
self.do_sample = do_sample
self.max_length = max_length
self.num_beams = num_beams
self.bos_token_id = bos_token_id
self.eos_token_id = eos_token_id
self.repetition_penalty = repetition_penalty
self.length_penalty = length_penalty
self.decoder = decoder
self.pad_token_id = pad_token_id
self.restricter = restricter
self.max_len_a = max_len_a
def set_new_generator(self, max_length=-1, max_len_a=-1, num_beams=-1,
repetition_penalty=-1, length_penalty=-1, restricter=-1):
if max_length == -1:
max_length = self.max_length
if max_len_a == -1:
max_len_a = self.max_len_a
if num_beams == -1:
num_beams = self.num_beams
if repetition_penalty == -1:
repetition_penalty = self.repetition_penalty
if length_penalty == -1:
length_penalty = self.length_penalty
if restricter == -1:
restricter = self.restricter
self.generate_func = partial(greedy_generate, decoder=self.decoder, max_length=max_length, max_len_a=max_len_a,
num_beams=num_beams,
bos_token_id=self.bos_token_id, eos_token_id=self.eos_token_id,
repetition_penalty=repetition_penalty,
length_penalty=length_penalty, pad_token_id=self.pad_token_id,
restricter=restricter)
@torch.no_grad()
def generate(self, state, tokens=None):
"""
:param State state: encoder结果的State, 是与Decoder配套是用的
:param torch.LongTensor,None tokens: batch_size x length, 开始的token
:return: bsz x max_length' 生成的token序列。如果eos_token_id不为None, 每个sequence的结尾一定是eos_token_id
"""
return self.generate_func(tokens=tokens, state=state)
@torch.no_grad()
def greedy_generate(decoder, tokens=None, state=None, max_length=20, max_len_a=0.0, num_beams=1,
bos_token_id=None, eos_token_id=None, pad_token_id=0,
repetition_penalty=1, length_penalty=1.0, restricter=None):
"""
贪婪地搜索句子
:param Decoder decoder: Decoder对象
:param torch.LongTensor tokens: batch_size x len, decode的输入值,如果为None,则自动从bos_token_id开始生成
:param State state: 应该包含encoder的一些输出。
:param int max_length: 生成句子的最大长度, 每句话的decode长度为max_length + max_len_a*src_len
:param float max_len_a: 每句话的decode长度为max_length + max_len_a*src_len。 如果不为0,需要保证State中包含encoder_mask
:param int num_beams: 使用多大的beam进行解码。
:param int bos_token_id: 如果tokens传入为None,则使用bos_token_id开始往后解码。
:param int eos_token_id: 结束的token,如果为None,则一定会解码到max_length这么长。
:param int pad_token_id: pad的token id
:param float repetition_penalty: 对重复出现的token多大的惩罚。
:param float length_penalty: 对每个token(除了eos)按照长度进行一定的惩罚。
:return:
"""
if num_beams == 1:
token_ids = _no_beam_search_generate(decoder, tokens=tokens, state=state, max_length=max_length, max_len_a=max_len_a,
bos_token_id=bos_token_id, eos_token_id=eos_token_id,
repetition_penalty=repetition_penalty, length_penalty=length_penalty,
pad_token_id=pad_token_id, restricter=restricter)
else:
token_ids = _beam_search_generate(decoder, tokens=tokens, state=state, max_length=max_length, max_len_a=max_len_a,
num_beams=num_beams,
bos_token_id=bos_token_id, eos_token_id=eos_token_id, do_sample=False,
repetition_penalty=repetition_penalty, length_penalty=length_penalty,
pad_token_id=pad_token_id, restricter=restricter)
return token_ids
def _no_beam_search_generate(decoder: Seq2SeqDecoder, state, tokens=None, max_length=20, max_len_a=0.0, bos_token_id=None,
eos_token_id=None,
repetition_penalty=1.0, length_penalty=1.0, pad_token_id=0,
restricter=None):
device = _get_model_device(decoder)
if tokens is None:
if bos_token_id is None:
raise RuntimeError("You have to specify either `tokens` or `bos_token_id`.")
batch_size = state.num_samples
if batch_size is None:
raise RuntimeError("Cannot infer the number of samples from `state`.")
tokens = torch.full([batch_size, 1], fill_value=bos_token_id, dtype=torch.long).to(device)
batch_size = tokens.size(0)
if state.num_samples:
assert state.num_samples == batch_size, "The number of samples in `tokens` and `state` should match."
if eos_token_id is None:
_eos_token_id = -1
else:
_eos_token_id = eos_token_id
scores = decoder.decode(tokens=tokens, state=state) # 主要是为了update state
# 这里需要考虑如果在第一个位置就结束的情况
# if _eos_token_id!=-1:
# scores[:, _eos_token_id] = -1e12
if restricter is not None:
_, next_tokens = restricter(state, tokens, scores, num_beams=1)
else:
next_tokens = scores.argmax(dim=-1, keepdim=True)
token_ids = torch.cat([tokens, next_tokens], dim=1)
cur_len = token_ids.size(1)
dones = token_ids.new_zeros(batch_size).eq(1).__or__(next_tokens.squeeze(1).eq(eos_token_id))
# tokens = tokens[:, -1:]
if max_len_a!=0:
# (bsz x num_beams, )
if state.encoder_mask is not None:
max_lengths = (state.encoder_mask.sum(dim=1).float()*max_len_a).long() + max_length
else:
max_lengths = tokens.new_full((tokens.size(0), ), fill_value=max_length, dtype=torch.long)
real_max_length = max_lengths.max().item()
else:
real_max_length = max_length
if state.encoder_mask is not None:
max_lengths = state.encoder_mask.new_ones(state.encoder_mask.size(0)).long()*max_length
else:
max_lengths = tokens.new_full((tokens.size(0),), fill_value=max_length, dtype=torch.long)
while cur_len < real_max_length:
scores = decoder.decode(tokens=token_ids, state=state) # batch_size x vocab_size
if repetition_penalty != 1.0:
token_scores = scores.gather(dim=1, index=token_ids)
lt_zero_mask = token_scores.lt(0).float()
ge_zero_mask = lt_zero_mask.eq(0).float()
token_scores = lt_zero_mask * repetition_penalty * token_scores + ge_zero_mask / repetition_penalty * token_scores
scores.scatter_(dim=1, index=token_ids, src=token_scores)
if eos_token_id is not None and length_penalty != 1.0:
token_scores = scores / cur_len ** length_penalty # batch_size x vocab_size
eos_mask = scores.new_ones(scores.size(1))
eos_mask[eos_token_id] = 0
eos_mask = eos_mask.unsqueeze(0).eq(1)
scores = scores.masked_scatter(eos_mask, token_scores) # 也即除了eos,其他词的分数经过了放大/缩小
if restricter is not None:
_, next_tokens = restricter(state, token_ids, scores, 1)
else:
next_tokens = scores.argmax(dim=-1, keepdim=True)
next_tokens = next_tokens.squeeze(-1)
# 如果已经达到对应的sequence长度了,就直接填为eos了
if _eos_token_id!=-1:
next_tokens = next_tokens.masked_fill(max_lengths.eq(cur_len+1), _eos_token_id)
next_tokens = next_tokens.masked_fill(dones, pad_token_id) # 对已经搜索完成的sample做padding
tokens = next_tokens.unsqueeze(1)
token_ids = torch.cat([token_ids, tokens], dim=-1) # batch_size x max_len
end_mask = next_tokens.eq(_eos_token_id)
dones = dones.__or__(end_mask)
cur_len += 1
if dones.min() == 1:
break
# if eos_token_id is not None:
# tokens.scatter(index=max_lengths[:, None], dim=1, value=eos_token_id) # 将最大长度位置设置为eos
# if cur_len == max_length:
# token_ids[:, -1].masked_fill_(~dones, eos_token_id) # 若到最长长度仍未到EOS,则强制将最后一个词替换成eos
return token_ids
def _beam_search_generate(decoder: Seq2SeqDecoder, tokens=None, state=None, max_length=20, max_len_a=0.0, num_beams=4,
bos_token_id=None, eos_token_id=None, do_sample=True,
repetition_penalty=1.0, length_penalty=None, pad_token_id=0,
restricter=None) -> torch.LongTensor:
assert do_sample is False
# 进行beam search
device = _get_model_device(decoder)
if tokens is None:
if bos_token_id is None:
raise RuntimeError("You have to specify either `tokens` or `bos_token_id`.")
batch_size = state.num_samples
if batch_size is None:
raise RuntimeError("Cannot infer the number of samples from `state`.")
tokens = torch.full([batch_size, 1], fill_value=bos_token_id, dtype=torch.long).to(device)
batch_size = tokens.size(0)
if state.num_samples:
assert state.num_samples == batch_size, "The number of samples in `tokens` and `state` should match."
if eos_token_id is None:
_eos_token_id = -1
else:
_eos_token_id = eos_token_id
scores = decoder.decode(tokens=tokens, state=state) # 这里要传入的是整个句子的长度
# 这里需要考虑如果在第一个位置就结束的情况
# if _eos_token_id!=-1:
# scores[:, _eos_token_id] = -1e12
vocab_size = scores.size(1)
assert vocab_size >= num_beams, "num_beams should be smaller than the number of vocabulary size."
scores = F.log_softmax(scores, dim=-1) # (batch_size, vocab_size)
# 得到(batch_size, num_beams), (batch_size, num_beams)
# TODO 把限制写到这个位置, 加1是因为需要考虑输出就是eos的情况
if restricter is not None:
_next_scores, _next_tokens = restricter(state, tokens, scores, num_beams+1)
else:
# 是bsz x (num_beams+1)大小的东西
_next_scores, _next_tokens = torch.topk(scores, num_beams+1, dim=1, largest=True, sorted=True)
# 根据index来做顺序的调转
indices = torch.arange(batch_size, dtype=torch.long).to(device)
indices = indices.repeat_interleave(num_beams)
state.reorder_state(indices)
tokens = tokens.index_select(dim=0, index=indices) # batch_size * num_beams x length
# if hasattr(state, 'tgt_seq_len'): # TODO 应该需要删除
# max_lengths = state.tgt_seq_len
# real_max_length = max_lengths.max().item()
if max_len_a!=0:
# (bsz x num_beams, )
if state.encoder_mask is not None:
max_lengths = (state.encoder_mask.sum(dim=1).float()*max_len_a).long() + max_length
else:
max_lengths = tokens.new_full((batch_size*num_beams, ), fill_value=max_length, dtype=torch.long)
real_max_length = max_lengths.max().item()
else:
real_max_length = max_length
if state.encoder_mask is not None:
max_lengths = state.encoder_mask.new_ones(state.encoder_mask.size(0)).long()*max_length
else:
max_lengths = tokens.new_full((batch_size*num_beams,), fill_value=max_length, dtype=torch.long)
hypos = [
BeamHypotheses(num_beams, real_max_length, length_penalty, early_stopping=False) for _ in range(batch_size)
]
not_eos_mask = _next_tokens.ne(_eos_token_id) # 为1的地方不是eos
keep_mask = not_eos_mask.cumsum(dim=1).le(num_beams) # 为1的地方需要保留
keep_mask = not_eos_mask.__and__(keep_mask) # 为1的地方是需要进行下一步search的
next_tokens = _next_tokens.masked_select(keep_mask).view(batch_size, num_beams) # 这是真的接下来要继续的
next_scores = _next_scores.masked_select(keep_mask).view(batch_size, num_beams)
rows, cols = not_eos_mask.eq(0)[:, :num_beams].nonzero(as_tuple=True)
if len(rows)>0: # 说明有的开头就结束了
for row, col in zip(rows.tolist(), cols.tolist()):
_token = torch.cat([tokens[row*num_beams], _next_tokens[row, col:col+1]], dim=0)
hypos[row].add(_token.clone(), _next_scores[row, col].item())
# 记录生成好的token (batch_size', cur_len)
token_ids = torch.cat([tokens, next_tokens.view(-1, 1)], dim=-1)
dones = [False] * batch_size
beam_scores = next_scores.view(-1) # batch_size * num_beams
# 用来记录已经生成好的token的长度
cur_len = token_ids.size(1)
# 0, num_beams, 2*num_beams, ...
batch_inds_with_numbeams_interval = (torch.arange(batch_size) * num_beams).view(-1, 1).to(token_ids)
while cur_len < real_max_length:
scores = decoder.decode(token_ids, state) # (bsz x num_beams, vocab_size)
if repetition_penalty != 1.0:
token_scores = scores.gather(dim=1, index=token_ids)
lt_zero_mask = token_scores.lt(0).float()
ge_zero_mask = lt_zero_mask.eq(0).float()
token_scores = lt_zero_mask * repetition_penalty * token_scores + ge_zero_mask / repetition_penalty * token_scores
scores.scatter_(dim=1, index=token_ids, src=token_scores)
if _eos_token_id!=-1:
max_len_eos_mask = max_lengths.eq(cur_len+1)
eos_scores = scores[:, _eos_token_id]
# 如果已经达到最大长度,就把eos的分数加大
scores[:, _eos_token_id] = torch.where(max_len_eos_mask, eos_scores+1e32, eos_scores)
scores = F.log_softmax(scores, dim=-1) # (batch_size * num_beams, vocab_size)
_scores = scores + beam_scores[:, None] # (batch_size * num_beams, vocab_size)
_scores = _scores.view(batch_size, -1) # (batch_size, num_beams*vocab_size)
# TODO 把限制加到这个位置
if restricter is not None:
next_scores, ids = restricter(state, token_ids, _scores, 2 * num_beams)
else:
next_scores, ids = torch.topk(_scores, 2 * num_beams, dim=1, largest=True, sorted=True) # (bsz, 2*num_beams)
from_which_beam = ids // vocab_size # (batch_size, 2*num_beams)
next_tokens = ids % vocab_size # (batch_size, 2*num_beams)
# 接下来需要组装下一个batch的结果。
# 需要选定哪些留下来
# next_scores, sorted_inds = next_scores.sort(dim=-1, descending=True)
# next_tokens = next_tokens.gather(dim=1, index=sorted_inds)
# from_which_beam = from_which_beam.gather(dim=1, index=sorted_inds)
not_eos_mask = next_tokens.ne(_eos_token_id) # 为1的地方不是eos
keep_mask = not_eos_mask.cumsum(dim=1).le(num_beams) # 为1的地方需要保留
keep_mask = not_eos_mask.__and__(keep_mask) # 为1的地方是需要进行下一步search的
_next_tokens = next_tokens.masked_select(keep_mask).view(-1, 1)
_from_which_beam = from_which_beam.masked_select(keep_mask).view(batch_size, num_beams) # 上面的token是来自哪个beam
_next_scores = next_scores.masked_select(keep_mask).view(batch_size, num_beams)
beam_scores = _next_scores.view(-1)
flag = True
if cur_len+1 == real_max_length:
eos_batch_idx = torch.arange(batch_size).to(next_tokens).repeat_interleave(repeats=num_beams, dim=0)
eos_beam_ind = torch.arange(num_beams).to(token_ids).repeat(batch_size) # 表示的是indice
eos_beam_idx = from_which_beam[:, :num_beams].reshape(-1) # 表示的是从哪个beam获取得到的
else:
# 将每个batch中在num_beam内的序列添加到结束中, 为1的地方需要结束了
effective_eos_mask = next_tokens[:, :num_beams].eq(_eos_token_id) # batch_size x num_beams
if effective_eos_mask.sum().gt(0):
eos_batch_idx, eos_beam_ind = effective_eos_mask.nonzero(as_tuple=True)
# 是由于from_which_beam是 (batch_size, 2*num_beams)的,所以需要2*num_beams
eos_beam_idx = eos_batch_idx * num_beams * 2 + eos_beam_ind
eos_beam_idx = from_which_beam.view(-1)[eos_beam_idx] # 获取真实的从哪个beam获取的eos
else:
flag = False
if flag:
_token_ids = torch.cat([token_ids, _next_tokens], dim=-1)
for batch_idx, beam_ind, beam_idx in zip(eos_batch_idx.tolist(), eos_beam_ind.tolist(),
eos_beam_idx.tolist()):
if not dones[batch_idx]:
score = next_scores[batch_idx, beam_ind].item()
# 之后需要在结尾新增一个eos
if _eos_token_id!=-1:
hypos[batch_idx].add(_token_ids[batch_idx * num_beams + beam_idx, :cur_len].clone(), score)
else:
hypos[batch_idx].add(_token_ids[batch_idx * num_beams + beam_idx].clone(), score)
# 更改state状态, 重组token_ids
reorder_inds = (batch_inds_with_numbeams_interval + _from_which_beam).view(-1) # flatten成一维
state.reorder_state(reorder_inds)
# 重新组织token_ids的状态
token_ids = torch.cat([token_ids.index_select(index=reorder_inds, dim=0), _next_tokens], dim=-1)
for batch_idx in range(batch_size):
dones[batch_idx] = dones[batch_idx] or hypos[batch_idx].is_done(next_scores[batch_idx, 0].item()) or \
max_lengths[batch_idx*num_beams]==cur_len+1
cur_len += 1
if all(dones):
break
# select the best hypotheses
tgt_len = token_ids.new_zeros(batch_size)
best = []
for i, hypotheses in enumerate(hypos):
best_hyp = max(hypotheses.hyp, key=lambda x: x[0])[1]
# 把上面替换为非eos的词替换回eos
if _eos_token_id!=-1:
best_hyp = torch.cat([best_hyp, best_hyp.new_ones(1)*_eos_token_id])
tgt_len[i] = len(best_hyp)
best.append(best_hyp)
# generate target batch
decoded = token_ids.new_zeros(batch_size, tgt_len.max().item()).fill_(pad_token_id)
for i, hypo in enumerate(best):
decoded[i, :tgt_len[i]] = hypo
return decoded
class BeamHypotheses(object):
def __init__(self, num_beams, max_length, length_penalty, early_stopping):
"""
Initialize n-best list of hypotheses.
"""
self.max_length = max_length - 1 # ignoring bos_token
self.length_penalty = length_penalty
self.early_stopping = early_stopping
self.num_beams = num_beams
self.hyp = []
self.worst_score = 1e9
def __len__(self):
"""
Number of hypotheses in the list.
"""
return len(self.hyp)
def add(self, hyp, sum_logprobs):
"""
Add a new hypothesis to the list.
"""
score = sum_logprobs / len(hyp) ** self.length_penalty
if len(self) < self.num_beams or score > self.worst_score:
self.hyp.append((score, hyp))
if len(self) > self.num_beams:
sorted_scores = sorted([(s, idx) for idx, (s, _) in enumerate(self.hyp)])
del self.hyp[sorted_scores[0][1]]
self.worst_score = sorted_scores[1][0]
else:
self.worst_score = min(score, self.worst_score)
def is_done(self, best_sum_logprobs):
"""
If there are enough hypotheses and that none of the hypotheses being generated
can become better than the worst one in the heap, then we are done with this sentence.
"""
if len(self) < self.num_beams:
return False
elif self.early_stopping:
return True
else:
return self.worst_score >= best_sum_logprobs / self.max_length ** self.length_penalty
| 23,989 | 44.435606 | 126 | py |
DMASTE | DMASTE-main/BARTABSA/data/process.py | import os
import json
def convert_triples(triples, words):
aspects = []
opinions = []
for i, triple in enumerate(triples):
a, o, s = triple
aspect = {'index': i, 'from': a[0], 'to': a[-1] + 1, 'polarity': s, 'term': words[a[0]: a[-1] + 1]}
opinion = {'index': i, 'from': o[0], 'to': o[-1] + 1, 'term': words[o[0]: o[-1] + 1]}
aspects.append(aspect)
opinions.append(opinion)
return aspects, opinions
def convert(input_file, output_file):
dataset = []
with open(input_file) as f:
for line in f:
ins = {}
sent, triples = line.split('####')
ins['raw_words'] = sent
ins['words'] = sent.split(' ')
triples = eval(triples)
ins['aspects'], ins['opinions'] = convert_triples(triples, ins['words'])
dataset.append(ins)
with open(output_file, 'w') as f:
json.dump(dataset, f)
def main():
root = '../../ia-dataset'
for domain in os.listdir(root):
domain_dir = f'{root}/{domain}'
if '.' in domain:
continue
for mode_file in os.listdir(domain_dir):
mode = mode_file.split('.')[0]
file_name = f'{domain_dir}/{mode_file}'
os.makedirs(f'{domain}', exist_ok=True)
convert(file_name, f'./{domain}/{mode}.json')
if 'train.json' not in os.listdir(domain):
os.system('cp {}/test.json {}/train.json'.format(domain, domain))
main() | 1,503 | 32.422222 | 107 | py |
DMASTE | DMASTE-main/mySpanASTE/main.py | import os
import random
import argparse
import torch
from transformers import BertTokenizer, BertModel
from torch.utils.data import DataLoader
from torch.optim import AdamW
from tqdm import tqdm
from transformers.optimization import get_linear_schedule_with_warmup
from torch.utils.tensorboard import SummaryWriter
import random, os
import numpy as np
from utils.collate import collate_fn
from utils.data_utils import ABSADataset, ABSAProcessor, convert_pad_tensor_to_list, convert_predictions_to_triples
from models.span_aste import SpanModel
from utils.metric import Metric
parser = argparse.ArgumentParser()
parser.add_argument("--data_dir", type=str, default='../dataset',
help="the dataset for train")
parser.add_argument("--unlabeled_data_dir", type=str, default='../amazon')
parser.add_argument("--source", type=str)
parser.add_argument("--target", type=str)
parser.add_argument("--model_dir", type=str, default="save_models",
help="the model.pkl save path")
parser.add_argument('--log_dir', type=str, default='log')
parser.add_argument('--model_name', type=str, default='model')
parser.add_argument("--batch_size", type=int, default=8, help="number of batch_size")
parser.add_argument("--encoder_lr", type=float, default=5e-5, help="learning rate of adam")
parser.add_argument('--cls_lr', type=float, default=1e-3)
parser.add_argument("--mode", type=str, choices=['train', 'test'])
parser.add_argument("--n_epochs", type=int, default=10)
parser.add_argument('--reduction', type=str, default='sum', choices=['mean', 'sum'])
parser.add_argument('--seed', type=int)
args = parser.parse_args()
device = "cuda" if torch.cuda.is_available() else "cpu"
print(args)
def set_seed(seed: int):
"""
Helper function for reproducible behavior to set the seed in ``random``, ``numpy``, ``torch`` and/or ``tf``
(if installed).
Args:
seed (:obj:`int`): The seed to set.
"""
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
if args.seed is not None:
print('set seed', args.seed)
set_seed(args.seed)
def get_dataset(dataset, mode, tokenizer):
data_dir = os.path.join(args.data_dir, dataset)
processor = ABSAProcessor(tokenizer)
examples = processor.get_examples(data_dir, mode)
features = processor.convert_examples_to_features(examples)
dataset = ABSADataset(features)
return examples, dataset
def evaluate(dataloader, model, examples):
model.eval()
all_predictions = []
metric = Metric()
for batch_i, batch in enumerate(dataloader):
input_dict = dict()
for k in ['input_ids', 'attention_mask', 'spans', 'span_labels', 'span_mask', 'relation_labels', 'seq_length']:
input_dict[k] = batch[k].to(device)
output = model(**input_dict)
batch_example = examples[batch_i * args.batch_size: (batch_i + 1) * args.batch_size]
all_predictions.extend(metric.compute(batch_example, output, batch))
model.train()
return metric.get_metric(), all_predictions
def test(test_dataloader, model, test_examples, mode):
metric, predictions = evaluate(test_dataloader, model, test_examples)
print('test metric', metric)
os.makedirs(os.path.join(args.log_dir, args.model_name), exist_ok=True)
metric_file = os.path.join(args.log_dir, args.model_name, 'metric.txt')
with open(metric_file, 'w') as f:
f.write(str(metric) + '\n')
predict_file = os.path.join(args.log_dir, args.model_name, 'pred.txt')
with open(predict_file, 'w') as f:
for p in predictions:
f.write(str(p) + '\n')
def main():
metric_file = os.path.join(args.log_dir, args.model_name, 'metric.txt')
if os.path.exists(metric_file):
print('------------------------------ file exists, return ---------------------------')
return
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
tokenizer.add_special_tokens({'additional_special_tokens': ['<null-aspect>']})
tb = SummaryWriter('tb_' + args.log_dir)
if args.mode == 'train':
os.makedirs(args.model_dir, exist_ok=True)
_, train_dataset = get_dataset(args.source, 'train.txt', tokenizer)
dev_examples, dev_dataset = get_dataset(args.source, 'dev.txt', tokenizer)
train_dataloader = DataLoader(train_dataset, batch_size=args.batch_size, collate_fn=collate_fn, shuffle=True)
dev_dataloader = DataLoader(dev_dataset, batch_size=args.batch_size, collate_fn=collate_fn, shuffle=False)
print('num train data', len(train_dataset), 'num dev data', len(dev_dataset))
bert = BertModel.from_pretrained('bert-base-uncased')
bert.resize_token_embeddings(len(tokenizer))
model = SpanModel(bert).to(device)
optimizer = AdamW([{'params': model.encoder.parameters(), 'lr': args.encoder_lr, 'weight_decay': 1e-2},
{'params': list(set(model.parameters()) - set(model.encoder.parameters())), 'lr': args.cls_lr, 'weight_decay': 0}])
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=int(args.n_epochs * len(train_dataloader) * 0.1),
num_training_steps=args.n_epochs * len(train_dataloader))
total_steps = args.n_epochs * len(train_dataloader)
best_metric = None
num_steps = 0
with tqdm(total=len(train_dataloader)) as pbar:
for epoch in range(args.n_epochs):
model.train()
pbar.reset()
for batch in train_dataloader:
pass
num_steps += 1
input_dict = dict()
for k in ['input_ids', 'attention_mask', 'spans', 'span_labels', 'span_mask', 'relation_labels', 'seq_length']:
input_dict[k] = batch[k].to(device)
output = model(**input_dict)
loss = output['loss']
if num_steps % int(total_steps / 300) == 0:
tb.add_scalar('loss', loss.item(), global_step=num_steps)
tb.add_scalar('ner loss', output['ner_loss'].item(), global_step=num_steps)
tb.add_scalar('relation loss', output['relation_loss'].item(), global_step=num_steps)
optimizer.zero_grad()
loss.backward()
optimizer.step()
scheduler.step()
pbar.update(1)
pbar.set_postfix(epoch=f'{epoch + 1}/{args.n_epochs}', loss=loss.item(), best_f1=f"{round(best_metric['triplet']['f1'] * 100, 2)}" if best_metric is not None else 'none')
metric, _ = evaluate(dev_dataloader, model, dev_examples)
for name in metric:
for k in metric[name]:
tb.add_scalar(f'{name}_{k}', metric[name][k], global_step=num_steps)
if best_metric is None or best_metric['triplet']['f1'] < metric['triplet']['f1']:
best_metric = metric
torch.save(model, os.path.join(args.model_dir, args.model_name + '.pt'))
tb.add_hparams(hparam_dict=vars(args), metric_dict=best_metric['triplet'])
# torch.save(model, os.path.join(args.model_dir, args.model_name + '.pt'))
else:
model = torch.load(os.path.join(args.model_dir, args.model_name + '.pt'))
test_examples, test_dataset = get_dataset(args.target, 'test.txt', tokenizer)
print('num test data', len(test_dataset))
test_dataloader = DataLoader(test_dataset, batch_size=args.batch_size, collate_fn=collate_fn)
test(test_dataloader, model, test_examples, 'test')
# dev_examples, dev_dataset = get_dataset(args.target, 'dev.txt', tokenizer)
# print('num dev data', len(dev_dataset))
# dev_dataloader = DataLoader(dev_dataset, batch_size=args.batch_size, collate_fn=collate_fn)
# test(dev_dataloader, model, dev_examples, 'dev')
os.makedirs(args.log_dir, exist_ok=True)
param_file = os.path.join(args.log_dir, args.model_name + '_params.txt')
with open(param_file, 'w') as f:
f.write(str(args) + '\n')
if __name__ == '__main__':
main()
| 8,336 | 46.369318 | 190 | py |
DMASTE | DMASTE-main/mySpanASTE/DANN_main.py | import os
import random
import argparse
import torch
from transformers import BertTokenizer, BertModel
from torch.utils.data import DataLoader
from torch.optim import AdamW
from tqdm import tqdm
from transformers.optimization import get_linear_schedule_with_warmup
from torch.utils.tensorboard import SummaryWriter
import random, os
import numpy as np
from utils.collate import collate_fn
from utils.collate_unlabeled import collate_fn_target
from utils.data_utils_unlabeled import UnlabeledDataset, UnlabeledProcessor
from utils.data_utils import ABSADataset, ABSAProcessor, convert_pad_tensor_to_list, convert_predictions_to_triples
from models.DANN_span_aste import SpanModel
from utils.metric import Metric
class Domain:
Source = 0
Target = 1
parser = argparse.ArgumentParser()
parser.add_argument("--data_dir", type=str, default='../dataset',
help="the dataset for train")
parser.add_argument("--unlabeled_data_dir", type=str, default='../amazon')
parser.add_argument("--source", type=str)
parser.add_argument("--target", type=str)
parser.add_argument("--model_dir", type=str, default="save_models",
help="the model.pkl save path")
parser.add_argument('--log_dir', type=str, default='log')
parser.add_argument('--model_name', type=str, default='model')
parser.add_argument("--batch_size", type=int, default=8, help="number of batch_size")
parser.add_argument("--encoder_lr", type=float, default=5e-5, help="learning rate of adam")
parser.add_argument('--cls_lr', type=float, default=1e-3)
parser.add_argument("--mode", type=str, choices=['train', 'test'])
parser.add_argument("--n_epochs", type=int, default=10)
parser.add_argument('--reduction', type=str, default='sum', choices=['mean', 'sum'])
parser.add_argument('--seed', type=int)
parser.add_argument('--ad_steps', type=int)
args = parser.parse_args()
device = "cuda" if torch.cuda.is_available() else "cpu"
print(args)
def set_seed(seed: int):
"""
Helper function for reproducible behavior to set the seed in ``random``, ``numpy``, ``torch`` and/or ``tf``
(if installed).
Args:
seed (:obj:`int`): The seed to set.
"""
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
if args.seed is not None:
print('set seed', args.seed)
set_seed(args.seed)
def get_dataset(dataset, mode, tokenizer):
data_dir = os.path.join(args.data_dir, dataset)
processor = ABSAProcessor(tokenizer)
examples = processor.get_examples(data_dir, mode)
features = processor.convert_examples_to_features(examples)
dataset = ABSADataset(features)
return examples, dataset
def get_unlabeled_dataset(dataset, tokenizer):
processor = UnlabeledProcessor(tokenizer)
examples = processor.get_examples(args.unlabeled_data_dir, dataset + '.txt')
features = processor.convert_examples_to_features(examples)
dataset = UnlabeledDataset(features)
return dataset
def evaluate(dataloader, model, examples):
model.eval()
all_predictions = []
metric = Metric()
for batch_i, batch in enumerate(dataloader):
input_dict = dict()
for k in ['input_ids', 'attention_mask', 'spans', 'span_labels', 'span_mask', 'relation_labels', 'seq_length']:
input_dict[k] = batch[k].to(device)
output = model(**input_dict)
batch_example = examples[batch_i * args.batch_size: (batch_i + 1) * args.batch_size]
all_predictions.extend(metric.compute(batch_example, output, batch))
model.train()
return metric.get_metric(), all_predictions
def test(test_dataloader, model, test_examples, mode):
metric, predictions = evaluate(test_dataloader, model, test_examples)
print('test metric', metric)
os.makedirs(os.path.join(args.log_dir, args.model_name), exist_ok=True)
metric_file = os.path.join(args.log_dir, args.model_name, f'{mode}_metric.txt')
with open(metric_file, 'w') as f:
f.write(str(metric) + '\n')
predict_file = os.path.join(args.log_dir, args.model_name, f'{mode}_pred.txt')
with open(predict_file, 'w') as f:
for p in predictions:
f.write(str(p) + '\n')
def main():
metric_file = os.path.join(args.log_dir, args.model_name, 'test_metric.txt')
if os.path.exists(metric_file):
print('------------------------------ file exists, return ---------------------------')
return
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
tokenizer.add_special_tokens({'additional_special_tokens': ['<null-aspect>']})
tb = SummaryWriter('tb_' + args.log_dir)
if args.mode == 'train':
os.makedirs(args.model_dir, exist_ok=True)
_, train_dataset = get_dataset(args.source, 'train.txt', tokenizer)
dev_examples, dev_dataset = get_dataset(args.source, 'dev.txt', tokenizer)
unlabeled_dataset = get_unlabeled_dataset(args.target, tokenizer)
train_dataloader = DataLoader(train_dataset, batch_size=args.batch_size, collate_fn=collate_fn, shuffle=True)
dev_dataloader = DataLoader(dev_dataset, batch_size=args.batch_size, collate_fn=collate_fn, shuffle=False)
unlabeled_dataloader = DataLoader(unlabeled_dataset, batch_size=args.batch_size, collate_fn=collate_fn_target, shuffle=True)
print('num train data', len(train_dataset), 'num dev data', len(dev_dataset), 'num unlabeled data', len(unlabeled_dataset))
bert = BertModel.from_pretrained('bert-base-uncased')
bert.resize_token_embeddings(len(tokenizer))
model = SpanModel(bert).to(device)
optimizer = AdamW([{'params': model.encoder.parameters(), 'lr': args.encoder_lr, 'weight_decay': 1e-2},
{'params': list(set(model.parameters()) - set(model.encoder.parameters())), 'lr': args.cls_lr, 'weight_decay': 0}])
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=int(args.n_epochs * len(train_dataloader) * 0.1),
num_training_steps=args.n_epochs * len(train_dataloader))
total_steps = args.n_epochs * len(train_dataloader)
best_metric = None
num_steps = 0
it = iter(unlabeled_dataloader)
with tqdm(total=len(train_dataloader)) as pbar:
for epoch in range(args.n_epochs):
model.train()
pbar.reset()
for batch in train_dataloader:
pass
num_steps += 1
p = num_steps / total_steps
alpha = 2. / (1. + np.exp(-10 * p)) - 1
input_dict = dict()
for k in ['input_ids', 'attention_mask', 'spans', 'span_labels', 'span_mask', 'relation_labels', 'seq_length']:
input_dict[k] = batch[k].to(device)
input_dict['domain'] = Domain.Source
input_dict['alpha'] = alpha
output = model(**input_dict)
loss = output['loss']
if num_steps % int(total_steps / 300) == 0:
tb.add_scalar('loss', loss.item(), global_step=num_steps)
tb.add_scalar('ner loss', output['ner_loss'].item(), global_step=num_steps)
tb.add_scalar('relation loss', output['relation_loss'].item(), global_step=num_steps)
domain_loss = torch.tensor([0.]).cuda()
if num_steps % args.ad_steps == 0:
domain_loss = output['domain_loss']
try:
unlabeled = it.next()
except StopIteration:
it = iter(unlabeled_dataloader)
unlabeled = it.next()
input_dict = dict()
for k in ['input_ids', 'attention_mask', 'spans', 'span_mask', 'seq_length']:
input_dict[k] = unlabeled[k].to(device)
input_dict['domain'] = Domain.Target
input_dict['alpha'] = alpha
target_output = model(**input_dict)
domain_loss += target_output['domain_loss']
loss += domain_loss
optimizer.zero_grad()
loss.backward()
optimizer.step()
scheduler.step()
pbar.update(1)
pbar.set_postfix(epoch=f'{epoch + 1}/{args.n_epochs}', loss=output['loss'].item(), domain_loss=domain_loss.item(), best_f1=f"{round(best_metric['triplet']['f1'] * 100, 2)}" if best_metric is not None else 'none')
metric, _ = evaluate(dev_dataloader, model, dev_examples)
for name in metric:
for k in metric[name]:
tb.add_scalar(f'{name}_{k}', metric[name][k], global_step=num_steps)
if best_metric is None or best_metric['triplet']['f1'] < metric['triplet']['f1']:
best_metric = metric
torch.save(model, os.path.join(args.model_dir, args.model_name + '.pt'))
tb.add_hparams(hparam_dict=vars(args), metric_dict=best_metric['triplet'])
# torch.save(model, os.path.join(args.model_dir, args.model_name + '.pt'))
else:
model = torch.load(os.path.join(args.model_dir, args.model_name + '.pt'))
test_examples, test_dataset = get_dataset(args.target, 'test.txt', tokenizer)
print('num test data', len(test_dataset))
test_dataloader = DataLoader(test_dataset, batch_size=args.batch_size, collate_fn=collate_fn)
test(test_dataloader, model, test_examples, 'test')
dev_examples, dev_dataset = get_dataset(args.target, 'dev.txt', tokenizer)
print('num dev data', len(dev_dataset))
dev_dataloader = DataLoader(dev_dataset, batch_size=args.batch_size, collate_fn=collate_fn)
test(dev_dataloader, model, dev_examples, 'dev')
os.makedirs(args.log_dir, exist_ok=True)
param_file = os.path.join(args.log_dir, args.model_name + '_params.txt')
with open(param_file, 'w') as f:
f.write(str(args) + '\n')
if __name__ == '__main__':
main()
| 10,335 | 47.754717 | 232 | py |
DMASTE | DMASTE-main/mySpanASTE/models/relation.py | from os import read
import torch
import math
from utils.data_utils import RelationLabel, SpanLabel
from utils.index_select import batched_index_select
from models.feedForward import FeedForward
def bucket_values(
distances: torch.Tensor, num_identity_buckets: int = 4, num_total_buckets: int = 10
) -> torch.Tensor:
"""
Places the given values (designed for distances) into `num_total_buckets`semi-logscale
buckets, with `num_identity_buckets` of these capturing single values.
The default settings will bucket values into the following buckets:
[0, 1, 2, 3, 4, 5-7, 8-15, 16-31, 32-63, 64+].
# Parameters
distances : `torch.Tensor`, required.
A Tensor of any size, to be bucketed.
num_identity_buckets: `int`, optional (default = `4`).
The number of identity buckets (those only holding a single value).
num_total_buckets : `int`, (default = `10`)
The total number of buckets to bucket values into.
# Returns
`torch.Tensor`
A tensor of the same shape as the input, containing the indices of the buckets
the values were placed in.
"""
# Chunk the values into semi-logscale buckets using .floor().
# This is a semi-logscale bucketing because we divide by log(2) after taking the log.
# We do this to make the buckets more granular in the initial range, where we expect
# most values to fall. We then add (num_identity_buckets - 1) because we want these indices
# to start _after_ the fixed number of buckets which we specified would only hold single values.
logspace_index = (distances.float().log() / math.log(2)).floor().long() + (
num_identity_buckets - 1
)
# create a mask for values which will go into single number buckets (i.e not a range).
use_identity_mask = (distances <= num_identity_buckets).long()
use_buckets_mask = 1 + (-1 * use_identity_mask)
# Use the original values if they are less than num_identity_buckets, otherwise
# use the logspace indices.
combined_index = use_identity_mask * distances + use_buckets_mask * logspace_index
# Clamp to put anything > num_total_buckets into the final bucket.
return combined_index.clamp(0, num_total_buckets - 1)
class RelationModel(torch.nn.Module):
def __init__(self, pair_embed_dim, spans_per_word=0.5, distance_embed_dim=128, hidden_dim=150, num_layers=2, activation=torch.nn.ReLU(), dropout=0.4, n_labels=4):
super(RelationModel, self).__init__()
self.pair_embed_dim = pair_embed_dim
self.n_labels = n_labels
self.spans_per_word = spans_per_word
self.distance_embedding = torch.nn.Embedding(512, embedding_dim=distance_embed_dim)
torch.nn.init.xavier_normal_(self.distance_embedding.weight)
self.ffnn = FeedForward(input_dim=pair_embed_dim + distance_embed_dim, hidden_dim=hidden_dim, num_layers=num_layers, activation=activation, dropout=dropout)
self.classifier = torch.nn.Linear(in_features=hidden_dim, out_features=n_labels)
torch.nn.init.xavier_normal_(self.classifier.weight)
self._loss = torch.nn.CrossEntropyLoss(reduction='sum')
def forward(
self, # type: ignore
spans,
ner_scores,
span_embeddings,
span_mask,
seq_length,
relation_labels = None
):
pruned_a = self._prune_spans(ner_scores[..., SpanLabel.ASPECT], span_mask, seq_length)
pruned_o = self._prune_spans(ner_scores[..., SpanLabel.OPINION], span_mask, seq_length)
spans_a = batched_index_select(spans, pruned_a['indices'])
spans_o = batched_index_select(spans, pruned_o['indices'])
relation_scores, relation_mask, relation_embeddings = self.predict_relation(spans, pruned_a['indices'], pruned_a['mask'], pruned_o['indices'], pruned_o['mask'], span_embeddings)
pruned_relation_labels = None
loss = torch.tensor(0, dtype=torch.float).to(spans_a.device)
if relation_labels is not None:
pruned_relation_labels = self.get_pruned_gold_relations(relation_labels, pruned_a, pruned_o)
flatten_relation_scores = relation_scores.reshape([-1, self.n_labels])
flatten_labels = pruned_relation_labels.view(-1)
flatten_score_mask = relation_mask.unsqueeze(-1).expand_as(relation_scores).view(flatten_relation_scores.shape)
flatten_relation_scores = flatten_relation_scores[flatten_score_mask]
flatten_labels = flatten_labels[relation_mask.view(-1)]
loss = self._loss(input=flatten_relation_scores.reshape([-1, self.n_labels]), target=flatten_labels)
return {'relation_scores': torch.softmax(relation_scores, dim=-1),
'relation_mask': relation_mask,
'relation_embeddings': relation_embeddings,
'pruned_relation_labels': pruned_relation_labels,
'loss': loss,
'pruned_a': pruned_a,
'pruned_o': pruned_o,
'spans_a': spans_a,
'spans_a_mask': pruned_a['mask'],
'spans_o': spans_o,
'spans_o_mask': pruned_o['mask']}
def get_pruned_gold_relations(self, relation_labels, pruned_a, pruned_o):
indices_a = pruned_a['indices']
indices_o = pruned_o['indices']
new_relation_labels = []
for i in range(relation_labels.shape[0]):
entry = relation_labels[i]
width = indices_a[i].shape[0]
assert indices_a[i].shape[0] == indices_o[i].shape[0]
idx_a = indices_a[i].unsqueeze(-1).expand([width, width])
idx_o = indices_o[i].unsqueeze(0).expand([width, width])
# print(entry.shape, idx_a.shape, idx_o.shape)
labels = entry[idx_a.reshape(-1), idx_o.reshape(-1)]
new_relation_labels.append(labels.reshape(width, width))
new_relation_labels = torch.stack(new_relation_labels, dim=0)
return new_relation_labels
def predict_relation(self, spans, a_indices, a_mask, o_indices, o_mask, span_embeddings):
bsz, seq_a = a_indices.shape
_, seq_o = o_indices.shape
mask = a_mask.unsqueeze(-1) * o_mask.unsqueeze(1)
# print('mask', mask.shape)
new_shape = (bsz, seq_a, seq_o)
a_indices = a_indices.unsqueeze(2).expand(new_shape)
o_indices = o_indices.unsqueeze(1).expand(new_shape)
a_embeddings = batched_index_select(span_embeddings, a_indices)
o_embeddings = batched_index_select(span_embeddings, o_indices)
spans_a = batched_index_select(spans, a_indices)
spans_o = batched_index_select(spans, o_indices)
dis1 = spans_a[..., 0] - spans_o[..., 1]
dis2 = spans_a[..., 1] - spans_o[..., 0]
dis, _ = torch.min(torch.cat([torch.absolute(dis1).unsqueeze(-1), torch.absolute(dis2).unsqueeze(-1)], dim=-1), dim=-1)
dis = bucket_values(dis)
distance_embeddings = self.distance_embedding(dis)
pair_embeddings = torch.cat([a_embeddings, o_embeddings, distance_embeddings], dim=-1)
pair_scores = self.classifier(self.ffnn(pair_embeddings))
return pair_scores, mask, pair_embeddings
def _prune_spans(self, scores, mask, seq_length):
num_spans_to_keep = torch.ceil(
seq_length.float() * self.spans_per_word
).long()
num_spans = scores.shape[1]
num_items_to_keep = torch.clamp(num_spans_to_keep, max=num_spans).to(scores.device)
max_items_to_keep = max(num_items_to_keep.max().item(), 1)
scores = torch.where(mask.bool(), scores, torch.zeros_like(scores) + -1e20)
_, top_indices = scores.topk(max_items_to_keep, dim=1)
top_indices_mask = torch.arange(start=0, end=max_items_to_keep).to(scores.device).reshape([1, -1]).expand_as(top_indices)
top_indices_mask = top_indices_mask < num_items_to_keep.reshape(-1, 1)
return {'indices': top_indices, 'mask': top_indices_mask} | 8,024 | 51.796053 | 186 | py |
DMASTE | DMASTE-main/mySpanASTE/models/feedForward.py | import torch
class FeedForward(torch.nn.Module):
def __init__(self, input_dim, hidden_dim, num_layers, activation, dropout):
super(FeedForward, self).__init__()
hidden_dims = [hidden_dim] * num_layers # type: ignore
activations = [activation] * num_layers # type: ignore
dropout = [dropout] * num_layers # type: ignore
self._activations = torch.nn.ModuleList(activations)
input_dims = [input_dim] + hidden_dims[:-1]
linear_layers = []
for layer_input_dim, layer_output_dim in zip(input_dims, hidden_dims):
a = torch.nn.Linear(layer_input_dim, layer_output_dim)
torch.nn.init.xavier_normal_(a.weight)
linear_layers.append(a)
self._linear_layers = torch.nn.ModuleList(linear_layers)
dropout_layers = [torch.nn.Dropout(p=value) for value in dropout]
self._dropout = torch.nn.ModuleList(dropout_layers)
self._output_dim = hidden_dims[-1]
self.input_dim = input_dim
def get_output_dim(self):
return self._output_dim
def get_input_dim(self):
return self.input_dim
def forward(self, inputs: torch.Tensor) -> torch.Tensor:
output = inputs
for layer, activation, dropout in zip(
self._linear_layers, self._activations, self._dropout
):
output = dropout(activation(layer(output)))
return output | 1,421 | 39.628571 | 79 | py |
DMASTE | DMASTE-main/mySpanASTE/models/DANN_span_aste.py | import torch
from torch.nn import functional as F
from utils.index_select import batched_index_select
from models.ner import NERModel
from models.relation import RelationModel
from models.functions import ReverseLayerF
class SpanModel(torch.nn.Module):
def __init__(self, encoder, width_embedding_dim=20, max_width=512, spans_per_word=0.5):
super(SpanModel, self).__init__()
self.encoder = encoder
self.max_width = max_width
self.width_embedding = torch.nn.Embedding(max_width, width_embedding_dim)
torch.nn.init.xavier_normal_(self.width_embedding.weight)
self.span_embed_dim = 768 * 2 + width_embedding_dim
self.ner = NERModel(span_embed_dim=self.span_embed_dim)
self.relation = RelationModel(pair_embed_dim=self.span_embed_dim * 2, spans_per_word=spans_per_word)
self.domain_cls = torch.nn.Linear(768, 2)
def forward(self, input_ids, attention_mask, spans, span_mask, seq_length, span_labels=None, relation_labels=None, alpha=None, domain=None):
text_embeddings = self.encoder(input_ids=input_ids, attention_mask=attention_mask).last_hidden_state
span_embeddings = self.text_to_span_embeds(text_embeddings, spans)
ner_output = self.ner(span_embeddings, span_mask, span_labels)
relation_output = self.relation(spans, ner_output['ner_scores'], span_embeddings, span_mask, seq_length=seq_length, relation_labels=relation_labels)
loss = ner_output['loss'] + relation_output['loss']
num_spans = span_mask.sum()
num_relations = relation_output['relation_mask'].sum()
loss = ner_output['loss'] + relation_output['loss']
domain_loss = torch.tensor([0.]).cuda()
if domain is not None:
reverse_embed = ReverseLayerF.apply(text_embeddings, alpha)
domain_scores = self.domain_cls(reverse_embed)
domain_label = torch.where(attention_mask.bool(), torch.zeros_like(attention_mask).long() + domain, torch.zeros_like(attention_mask).long() -1 )
# reverse_rel_embed = ReverseLayerF.apply(relation_output['relation_embeddings'], alpha)
# rel_domain_scores = self.relation_domain_cls(reverse_rel_embed)
# zero = torch.zeros_like(relation_output['relation_mask'])
# rel_domain_label = torch.where(relation_output['relation_mask'].bool(), zero.long() + domain, zero.long() - 1)
domain_loss = F.cross_entropy(domain_scores.view(-1, 2), domain_label.view(-1).long(), reduction='sum', ignore_index=-1)
# rel_domain_loss = F.cross_entropy(rel_domain_scores.view(-1, 2), rel_domain_label.view(-1).long(), reduction='sum', ignore_index=-1)
return {'loss': loss,
'ner_loss': ner_output['loss'] / (num_spans + num_relations),
'relation_loss': relation_output['loss'] / (num_spans + num_relations),
'ner_output': ner_output,
'relation_output': relation_output,
'domain_loss': domain_loss}
def text_to_span_embeds(self, text_embeddings, spans):
# batch index select
span_starts, span_ends = [index.squeeze(-1) for index in spans.split(1, dim=-1)]
start_embeddings = batched_index_select(text_embeddings, span_starts)
end_embeddings = batched_index_select(text_embeddings, span_ends)
width = span_ends - span_starts
width_embedding = self.width_embedding(width)
span_embedding = torch.cat([start_embeddings, end_embeddings, width_embedding], dim=-1)
return span_embedding
| 3,605 | 59.1 | 157 | py |
DMASTE | DMASTE-main/mySpanASTE/models/ner.py | import torch
from torch.nn.modules import dropout
import torch.nn.functional as F
from utils.data_utils import SpanLabel
from models.feedForward import FeedForward
class NERModel(torch.nn.Module):
def __init__(self, span_embed_dim, hidden_dim=150, num_layers=2, activation=torch.nn.ReLU(), dropout=0.4, n_labels=3):
super(NERModel, self).__init__()
self.span_embed_dim = span_embed_dim
self.n_labels = n_labels
self.ffnn = FeedForward(input_dim=span_embed_dim, hidden_dim=hidden_dim, num_layers=num_layers, activation=activation, dropout=dropout)
self.classifier = torch.nn.Linear(in_features=hidden_dim, out_features=n_labels)
torch.nn.init.xavier_normal_(self.classifier.weight)
self._loss = torch.nn.CrossEntropyLoss(reduction='sum')
def forward(self, span_embeddings, span_mask, span_labels=None):
# shape: bsz, span_length, n_labels
ner_scores = self.classifier(self.ffnn(span_embeddings))
masked_scores = torch.zeros_like(span_mask, dtype=torch.long) + 1e20
ner_scores[..., SpanLabel.INVALID] = torch.where(span_mask.bool(), ner_scores[..., SpanLabel.INVALID], masked_scores)
softmax_ner_scores = ner_scores.softmax(dim=-1)
output_dict = dict()
output_dict.update(ner_scores=softmax_ner_scores)
output_dict.update(opinion_scores=ner_scores.softmax(dim=-1)[..., SpanLabel.OPINION])
output_dict.update(target_scores=ner_scores.softmax(dim=-1)[..., SpanLabel.ASPECT])
loss = torch.tensor(0,dtype=torch.float).to(span_mask.device)
if span_labels is not None:
# test
# predicts = torch.argmax(softmax_ner_scores, dim=-1)
# from sklearn.metrics import precision_score, recall_score, f1_score
# valid_mask = span_labels != SpanLabel.INVALID
# predicts = predicts[valid_mask]
# new_labels = span_labels[valid_mask]
# p, r = precision_score(new_labels.cpu().tolist(), predicts.cpu().tolist(), average='macro'), recall_score(new_labels.cpu().tolist(), predicts.cpu().tolist(), average='macro')
# f1 = f1_score(new_labels.cpu().tolist(), predicts.cpu().tolist(), average='macro')
# print(f'ner p: {p}, r: {r}, f1: {f1}')
# end
ner_scores_flat = ner_scores.view(
-1, self.n_labels
)
ner_labels_flat = span_labels.view(-1)
mask_flat = span_mask.view(-1).bool()
loss = self._loss(ner_scores_flat[mask_flat], ner_labels_flat[mask_flat])
output_dict["loss"] = loss
return output_dict
| 2,651 | 48.111111 | 188 | py |
DMASTE | DMASTE-main/mySpanASTE/models/functions.py | from torch.autograd import Function
class ReverseLayerF(Function):
@staticmethod
def forward(ctx, x, alpha):
ctx.alpha = alpha
return x.view_as(x)
@staticmethod
def backward(ctx, grad_output):
output = grad_output.neg() * ctx.alpha
return output, None | 305 | 18.125 | 46 | py |
DMASTE | DMASTE-main/mySpanASTE/models/span_aste.py | import torch
from utils.index_select import batched_index_select
from models.ner import NERModel
from models.relation import RelationModel
class SpanModel(torch.nn.Module):
def __init__(self, encoder, width_embedding_dim=20, max_width=512, spans_per_word=0.5):
super(SpanModel, self).__init__()
self.encoder = encoder
self.max_width = max_width
self.width_embedding = torch.nn.Embedding(max_width, width_embedding_dim)
torch.nn.init.xavier_normal_(self.width_embedding.weight)
self.span_embed_dim = 768 * 2 + width_embedding_dim
self.ner = NERModel(span_embed_dim=self.span_embed_dim)
self.relation = RelationModel(pair_embed_dim=self.span_embed_dim * 2, spans_per_word=spans_per_word)
def forward(self, input_ids, attention_mask, spans, span_mask, seq_length, span_labels=None, relation_labels=None):
text_embeddings = self.encoder(input_ids=input_ids, attention_mask=attention_mask).last_hidden_state
span_embeddings = self.text_to_span_embeds(text_embeddings, spans)
ner_output = self.ner(span_embeddings, span_mask, span_labels)
relation_output = self.relation(spans, ner_output['ner_scores'], span_embeddings, span_mask, seq_length=seq_length, relation_labels=relation_labels)
loss = ner_output['loss'] + relation_output['loss']
num_spans = span_mask.sum()
num_relations = relation_output['relation_mask'].sum()
loss = ner_output['loss'] + relation_output['loss']
return {'loss': loss,
'ner_loss': ner_output['loss'] / (num_spans + num_relations),
'relation_loss': relation_output['loss'] / (num_spans + num_relations),
'ner_output': ner_output,
'relation_output': relation_output}
def text_to_span_embeds(self, text_embeddings, spans):
# batch index select
span_starts, span_ends = [index.squeeze(-1) for index in spans.split(1, dim=-1)]
start_embeddings = batched_index_select(text_embeddings, span_starts)
end_embeddings = batched_index_select(text_embeddings, span_ends)
width = span_ends - span_starts
width_embedding = self.width_embedding(width)
span_embedding = torch.cat([start_embeddings, end_embeddings, width_embedding], dim=-1)
return span_embedding
| 2,370 | 52.886364 | 157 | py |
DMASTE | DMASTE-main/mySpanASTE/scripts/cross-domain/dann/run.py | import os
import sys
import time
import random
import threading
source_list = ['electronics', 'home', 'beauty', 'fashion', 'all']
target_list = ['book', 'grocery', 'pet', 'toy']
class Param:
def __init__(self, model_name, source, target, ad_steps):
self.model_name = model_name
self.source = source
self.target = target
self.ad_steps = ad_steps
class myThread(threading.Thread):
def __init__(self, threadID):
threading.Thread.__init__(self)
self.threadID = threadID
def run(self):
os.system(f'bash scripts/cross-domain/dann/sub/{self.threadID}.sh')
print(f'bash scripts/cross-domain/dann/sub/{self.threadID}.sh')
def main():
param_list = []
for source in source_list:
for target in target_list:
for model_name in range(5):
for ad_steps in [30, 50, 100, 300]:
param = Param(model_name=model_name, source=source, target=target, ad_steps=ad_steps)
param_list.append(param)
num_params = len(param_list)
random.seed(0)
param_list = random.sample(param_list, num_params)
num_batch = int(sys.argv[1])
num_device = 8
batch_size = num_params // num_batch
os.system('rm -r ./scripts/cross-domain/dann/sub')
os.makedirs('./scripts/cross-domain/dann/sub', exist_ok=True)
for i, p in enumerate(param_list):
f = open(f'./scripts/cross-domain/dann/sub/{i % num_batch}.sh', 'a')
f.write(f'bash scripts/cross-domain/dann/maste.sh {p.source} {p.target} {p.ad_steps} {p.model_name} {i % num_device}\n')
f.close()
thread_list = []
worker = int(sys.argv[2])
for i in range(num_device):
thread = myThread(i + num_device * worker)
thread.start()
thread_list.append(thread)
time.sleep(2)
for t in thread_list:
t.join()
main() | 1,907 | 31.896552 | 128 | py |
DMASTE | DMASTE-main/mySpanASTE/scripts/cross-domain/dann/run_eq.py | import os
import sys
import time
import random
import threading
source_list = ['electronics', 'home', 'beauty', 'fashion', 'all']
target_list = ['book', 'grocery', 'pet', 'toy']
class Param:
def __init__(self, model_name, source, target, ad_steps):
self.model_name = model_name
self.source = source
self.target = target
self.ad_steps = ad_steps
class myThread(threading.Thread):
def __init__(self, threadID):
threading.Thread.__init__(self)
self.threadID = threadID
def run(self):
os.system(f'bash scripts/cross-domain/dann/sub/{self.threadID}.sh')
print(f'bash scripts/cross-domain/dann/sub/{self.threadID}.sh')
def main():
param_list = []
for source in source_list:
for target in target_list:
for model_name in range(5):
for ad_steps in [30, 50, 100, 300]:
param = Param(model_name=model_name, source=source, target=target, ad_steps=ad_steps)
param_list.append(param)
num_params = len(param_list)
random.seed(0)
param_list = random.sample(param_list, num_params)
num_batch = int(sys.argv[1])
num_device = 8
batch_size = num_params // num_batch
os.system('rm -r ./scripts/cross-domain/dann/sub')
os.makedirs('./scripts/cross-domain/dann/sub', exist_ok=True)
for i, p in enumerate(param_list):
f = open(f'./scripts/cross-domain/dann/sub/{i % num_batch}.sh', 'a')
f.write(f'bash scripts/cross-domain/dann/eq-maste.sh {p.source} {p.target} {p.ad_steps} {p.model_name} {i % num_device}\n')
f.close()
thread_list = []
worker = int(sys.argv[2])
for i in range(num_device):
thread = myThread(i + num_device * worker)
thread.start()
thread_list.append(thread)
time.sleep(2)
for t in thread_list:
t.join()
main() | 1,910 | 31.948276 | 131 | py |
DMASTE | DMASTE-main/mySpanASTE/scripts/cross-domain/dann/run_xu.py | import os
import sys
import time
import random
import threading
source_list = ['14res', '15res', '16res', '14lap', '14lap', '14lap']
target_list = ['14lap', '14lap', '14lap', '14res', '15res', '16res']
class Param:
def __init__(self, model_name, source, target, ad_steps):
self.model_name = model_name
self.source = source
self.target = target
self.ad_steps = ad_steps
class myThread(threading.Thread):
def __init__(self, threadID):
threading.Thread.__init__(self)
self.threadID = threadID
def run(self):
os.system(f'bash scripts/cross-domain/dann/sub/{self.threadID}.sh')
print(f'bash scripts/cross-domain/dann/sub/{self.threadID}.sh')
def main():
param_list = []
for source, target in zip(source_list, target_list):
for model_name in range(5):
for ad_steps in [1, 3, 5, 7, 10, 15, 20, 30, 50, 100]:
param = Param(model_name=model_name, source=source, target=target, ad_steps=ad_steps)
param_list.append(param)
num_params = len(param_list)
random.seed(0)
param_list = random.sample(param_list, num_params)
num_batch = int(sys.argv[1])
num_device = 8
batch_size = num_params // num_batch
os.system('rm -r ./scripts/cross-domain/dann/sub')
os.makedirs('./scripts/cross-domain/dann/sub', exist_ok=True)
for i, p in enumerate(param_list):
f = open(f'./scripts/cross-domain/dann/sub/{i % num_batch}.sh', 'a')
f.write(f'bash scripts/cross-domain/dann/maste.sh {p.source} {p.target} {p.ad_steps} {p.model_name} {i % num_device}\n')
f.close()
thread_list = []
worker = int(sys.argv[2])
for i in range(num_device):
thread = myThread(i + num_device * worker)
thread.start()
thread_list.append(thread)
time.sleep(2)
for t in thread_list:
t.join()
main() | 1,926 | 32.224138 | 128 | py |
DMASTE | DMASTE-main/mySpanASTE/utils/data_utils_unlabeled.py | import os
from enum import IntEnum
from torch.utils.data import Dataset
class DomainLabel(IntEnum):
Source = 0
Target = 1
class UnlabeledDataset(Dataset):
def __init__(self, features):
self.features = features
def __getitem__(self, index):
return self.features[index]
def __len__(self):
return len(self.features)
class UnlabeledFeature:
def __init__(self, input_ids, spans, token_range, seq_length) -> None:
self.input_ids = input_ids
self.spans = spans
self.seq_length = seq_length
self.token_range = token_range
class UnlabeledProcessor:
def __init__(self, tokenizer, min_span_width=1, max_span_width=10, max_seq_length=512):
self.tokenizer = tokenizer
self.null_aspect_id = self.tokenizer.convert_tokens_to_ids(['[ia]'])
self.min_span_width = min_span_width
self.max_span_width = max_span_width
self.max_seq_length = max_seq_length
def get_examples(self, data_dir, mode):
file_name = os.path.join(data_dir, mode)
lines = []
with open(file_name) as f:
counter = 0
for line in f:
lines.append('[ia] ' + line.split(' #### ')[-1])
return lines
def convert_examples_to_features(self, examples):
features = []
for sent in examples:
input_ids, token_range = self._tokenize(sent)
seq_length = len(sent.split())
spans = self._enumerate_spans(token_range)
features.append(UnlabeledFeature(input_ids=input_ids,
spans=spans,
seq_length=seq_length,
token_range=token_range))
return features
def _enumerate_spans(self, token_range):
word_length = len(token_range)
spans = [(1, 1)]
for i in range(word_length):
for j in range(self.min_span_width - 1, self.max_span_width):
if i + j < word_length:
start = token_range[i][0]
end = token_range[i + j][1]
spans.append((start, end))
return spans
def _tokenize(self, sentence):
words = sentence.split()
input_ids = [self.tokenizer.cls_token_id]
token_range = []
start_ids = 1
for word in words:
word_ids = self.tokenizer.convert_tokens_to_ids(self.tokenizer.tokenize(word))
if len(input_ids) + len(word_ids) + 1 > self.max_seq_length:
break
input_ids.extend(word_ids)
token_range.append([start_ids, start_ids + len(word_ids) - 1])
start_ids += len(word_ids)
input_ids.append(self.tokenizer.sep_token_id)
return input_ids, token_range
if __name__ == '__main__':
from transformers import BertTokenizer
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
tokenizer.add_special_tokens({'additional_special_tokens': ['<null-aspect>']})
processor = UnlabeledProcessor(tokenizer=tokenizer)
root = '../../../dataset/amazon'
for domain in os.listdir(root):
examples = processor.get_examples('../../../dataset/amazon/', domain, num_data=1000)
features = processor.convert_examples_to_features(examples)
for example, feature in zip(examples, features):
print(example)
print(tokenizer.convert_ids_to_tokens(feature.input_ids))
print(feature.token_range)
print()
| 3,572 | 33.68932 | 92 | py |
DMASTE | DMASTE-main/mySpanASTE/utils/collate_unlabeled.py | import torch
from utils.data_utils import RelationLabel
from utils.data_utils_unlabeled import DomainLabel
def collate_fn_target(data):
"""批处理,填充同一batch中句子最大的长度"""
def pad_and_tensor(data, pad_value=0):
max_len = max([len(x) for x in data])
new_data = []
mask = []
for x in data:
tmp_data = torch.tensor(x)
size = tmp_data.shape
pad_data = torch.zeros((max_len - size[0], *size[1:]))
new_data.append(torch.cat([tmp_data, pad_data], dim=0))
mask.append(torch.cat([torch.ones_like(tmp_data), torch.zeros_like(pad_data)], dim=0))
return torch.stack(new_data, dim=0).to(torch.long), torch.stack(mask, dim=0).to(torch.long)
input_ids = [f.input_ids for f in data]
bsz = len(data)
input_ids, attention_mask = pad_and_tensor(input_ids)
spans = [f.spans for f in data]
spans, span_mask = pad_and_tensor(spans)
span_mask = span_mask[...,0]
seq_length = [f.seq_length for f in data]
seq_length = torch.tensor(seq_length).to(torch.long)
token_range = [f.token_range for f in data]
token_range, token_range_mask = pad_and_tensor(token_range)
token_range_mask = token_range_mask[..., 0]
batch = {'input_ids': input_ids,
'attention_mask': attention_mask,
'spans': spans,
'span_mask': span_mask,
'seq_length': seq_length,
'token_range': token_range,
'token_range_mask': token_range_mask}
return batch
| 1,535 | 36.463415 | 99 | py |
DMASTE | DMASTE-main/mySpanASTE/utils/data_utils.py | import os
from enum import IntEnum
from pydantic import BaseModel
from typing import List
from torch.utils.data import Dataset
import torch
class SpanLabel(IntEnum):
INVALID = 0
ASPECT = 1
OPINION = 2
class RelationLabel(IntEnum):
INVALID = 0
POS = 1
NEG = 2
NEU = 3
class ABSADataset(Dataset):
def __init__(self, features):
self.features = features
def __getitem__(self, index):
return self.features[index]
def __len__(self):
return len(self.features)
class SentimentTriple(BaseModel):
aspects: List
opinions: List
triples: List
@classmethod
def from_sentiment_triple(cls, triples, token_range):
"""read from sentiment triple"""
sentiment_map = {'POS': RelationLabel.POS, 'NEG': RelationLabel.NEG, 'NEU': RelationLabel.NEU}
aspects, opinions, new_triples = [], [], []
for a, o, s in triples:
new_a, new_o = None, None
if a[1] < len(token_range):
if -1 in a:
new_a = (1, 1)
else:
new_a = (token_range[a[0]][0], token_range[a[1]][1])
aspects.append(new_a)
if o[1] < len(token_range):
assert -1 not in o
new_o = (token_range[o[0]][0], token_range[o[1]][1])
opinions.append(new_o)
if new_a is not None and new_o is not None:
new_triples.append((new_a, new_o, sentiment_map[s]))
return cls(
aspects=aspects,
opinions=opinions,
triples=new_triples,
)
class ABSAFeature:
def __init__(self, input_ids, spans, span_labels, triples, token_range, seq_length) -> None:
self.input_ids = input_ids
self.spans = spans
self.span_labels = span_labels
# self.relation_labels = relation_labels
self.seq_length = seq_length
self.token_range = token_range
self.triples = triples
class ABSAProcessor:
def __init__(self, tokenizer, min_span_width=1, max_span_width=10, max_seq_length=512):
self.tokenizer = tokenizer
self.null_aspect_id = self.tokenizer.convert_tokens_to_ids('<null-aspect>')
self.min_span_width = min_span_width
self.max_span_width = max_span_width
self.max_seq_length = max_seq_length
def get_features(self, data_dir, mode):
examples = self.get_examples(data_dir, mode)
features = self.convert_examples_to_features(examples)
return features
def get_examples(self, data_dir, mode):
file_name = os.path.join(data_dir, mode)
instances = []
lines = []
with open(file_name) as f:
lines = f.readlines()
lines = [x.split('####') for x in lines]
for line in lines:
sentence, triples, = line[:2]
triples = eval(triples)
new_triples = []
for t in triples:
a, o, s = t
a = [a[0], a[-1]]
o = [o[0], o[-1]]
assert len(a) == 2 and len(o) == 2 and s in ('POS', 'NEG', 'NEU')
assert a[0] <= a[1]
assert o[0] <= o[1]
new_triples.append((a, o, s))
instances.append((sentence, new_triples))
return instances
def convert_examples_to_features(self, examples):
features = []
for sent, triples in examples:
input_ids, token_range = self._tokenize(sent)
seq_length = len(sent.split())
triples = SentimentTriple.from_sentiment_triple(triples, token_range)
spans = self._enumerate_spans(token_range)
span_labels = [SpanLabel.INVALID] * len(spans)
for a in triples.aspects:
# print(a)
if a[-1] - a[0] > self.max_span_width:
continue
idx = spans.index(a)
span_labels[idx] = SpanLabel.ASPECT
for o in triples.opinions:
if o[-1] - o[0] > self.max_span_width:
continue
idx = spans.index(o)
span_labels[idx] = SpanLabel.OPINION
# for a, o, s in triples.triples:
# idx_a, idx_o = spans.index(a), spans.index(o)
# relation_labels[idx_a][idx_o] = s
features.append(ABSAFeature(input_ids=input_ids,
spans=spans,
span_labels=span_labels,
triples = triples.triples,
# relation_labels=relation_labels,
seq_length=seq_length,
token_range=token_range))
return features
def _enumerate_spans(self, token_range):
word_length = len(token_range)
spans = [(1, 1)]
for i in range(word_length):
for j in range(self.min_span_width - 1, self.max_span_width):
if i + j < word_length:
start = token_range[i][0]
end = token_range[i + j][1]
spans.append((start, end))
return spans
def _tokenize(self, sentence):
words = sentence.split()
input_ids = [self.tokenizer.cls_token_id, self.null_aspect_id]
token_range = []
start_ids = 2
for word in words:
word_ids = self.tokenizer.convert_tokens_to_ids(self.tokenizer.tokenize(word))
if len(input_ids) + len(word_ids) + 1 > self.max_seq_length:
break
input_ids.extend(word_ids)
token_range.append([start_ids, start_ids + len(word_ids) - 1])
start_ids += len(word_ids)
input_ids.append(self.tokenizer.sep_token_id)
return input_ids, token_range
def convert_predictions_to_triples(spans_a, spans_o, relation_labels, token_range):
# relation_idx = [i for i, label in enumerate(relations_labels) if label != RelationLabel.INVALID]
# relations_labels = [relations_labels[i] for i in relation_idx]
relation_indices = [(i, j) for i in range(len(relation_labels)) for j in range(len(relation_labels)) if relation_labels[i][j] != RelationLabel.INVALID]
# print('relation indices', relation_indices)
def subword_span2_word_span(subword_span, token_range):
if 1 in subword_span:
return [-1, -1]
start, end = -1, -1
for i, ran in enumerate(token_range):
if ran[0] <= subword_span[0] <= ran[1]:
assert start == -1
start = i
if ran[0] <= subword_span[1] <= ran[1]:
assert end == -1
end = i
return [start, end]
triples = []
int2sentiment = {RelationLabel.POS: 'POS', RelationLabel.NEG: 'NEG', RelationLabel.NEU: 'NEU'}
for i, (a_idx, o_idx) in enumerate(relation_indices):
# assert span_labels[a_idx] == SpanLabel.ASPECT, span_labels[a_idx]
# assert span_labels[o_idx] == SpanLabel.OPINION, span_labels[o_idx]
a_subword_span, o_subword_span = spans_a[a_idx], spans_o[o_idx]
a_word_span = subword_span2_word_span(a_subword_span, token_range)
o_word_span = subword_span2_word_span(o_subword_span, token_range)
# print('idx', a_idx, o_idx)
triples.append((a_word_span, o_word_span, int2sentiment[relation_labels[a_idx][o_idx]]))
return triples
def convert_pad_tensor_to_list(batch_data, mask):
assert len(mask.shape) == 2
batch_data = batch_data.detach().cpu().tolist()
len_list = torch.sum(mask, dim=-1).detach().cpu().tolist()
ret = []
for length, data in zip(len_list, batch_data):
ret.append(data[: length])
return ret
if __name__ == '__main__':
from transformers import BertTokenizer
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
processor = ABSAProcessor(tokenizer=tokenizer)
root = '../../../dataset/del/CDASTE-Data'
for domain in os.listdir(root):
if '.' in domain:
continue
examples = processor.get_examples(f'../../../dataset/del/CDASTE-Data/{domain}', 'train.txt')
features = processor.convert_examples_to_features(examples)
for example, feature in zip(examples, features):
triples1 = example[1]
# print(domain, example)
triples2 = convert_predictions_to_triples(feature.spans, feature.relation_labels, feature.token_range)
assert len(feature.input_ids) == feature.token_range[-1][1] + 2
if str(sorted(triples1)) != str(sorted(triples2)):
print(example, len(feature.token_range))
print(triples2)
print()
| 8,811 | 38.339286 | 155 | py |
DMASTE | DMASTE-main/mySpanASTE/utils/collate.py | import torch
from utils.data_utils import RelationLabel
def collate_fn(data):
"""批处理,填充同一batch中句子最大的长度"""
def pad_and_tensor(data, pad_value=0):
max_len = max([len(x) for x in data])
new_data = []
mask = []
for x in data:
tmp_data = torch.tensor(x)
size = tmp_data.shape
pad_data = torch.zeros((max_len - size[0], *size[1:]))
new_data.append(torch.cat([tmp_data, pad_data], dim=0))
mask.append(torch.cat([torch.ones_like(tmp_data), torch.zeros_like(pad_data)], dim=0))
return torch.stack(new_data, dim=0).to(torch.long), torch.stack(mask, dim=0).to(torch.long)
input_ids = [f.input_ids for f in data]
input_ids, attention_mask = pad_and_tensor(input_ids)
spans = [f.spans for f in data]
max_span_length = max([len(x) for x in spans])
triples = [f.triples for f in data]
relation_labels = []
relation_mask = []
for i, ins_triple in enumerate(triples):
labels = torch.zeros([max_span_length, max_span_length], dtype=torch.long) + RelationLabel.INVALID
for triple in ins_triple:
a, o, s = triple
try:
a_idx, o_idx = spans[i].index(a), spans[i].index(o)
labels[a_idx, o_idx] = s
except:
pass
mask = torch.zeros([max_span_length, max_span_length], dtype=torch.long)
mask[: len(spans[i]), : len(spans[i])] = 1
relation_labels.append(labels)
relation_mask.append(mask)
relation_labels = torch.stack(relation_labels, dim=0)
relation_mask = torch.stack(relation_mask, dim=0)
spans, _ = pad_and_tensor(spans)
span_labels = [f.span_labels for f in data]
span_labels, span_mask = pad_and_tensor(span_labels)
seq_length = [f.seq_length for f in data]
seq_length = torch.tensor(seq_length).to(torch.long)
token_range = [f.token_range for f in data]
token_range, token_range_mask = pad_and_tensor(token_range)
token_range_mask = token_range_mask[..., 0]
batch = {'input_ids': input_ids,
'attention_mask': attention_mask,
'spans': spans,
'span_labels': span_labels,
'span_mask': span_mask,
'relation_labels': relation_labels,
'relation_mask': relation_mask,
'seq_length': seq_length,
'token_range': token_range,
'token_range_mask': token_range_mask}
return batch
| 2,502 | 39.370968 | 106 | py |
DMASTE | DMASTE-main/mySpanASTE/utils/__init__.py | 0 | 0 | 0 | py |
|
DMASTE | DMASTE-main/mySpanASTE/utils/index_select.py | import torch
def batched_index_select(target, indices):
"""
target : `torch.Tensor`, required.
A 3 dimensional tensor of shape (batch_size, sequence_length, embedding_size).
This is the tensor to be indexed.
indices : `torch.LongTensor`
A tensor of shape (batch_size, ...), where each element is an index into the
`sequence_length` dimension of the `target` tensor.
"""
# Shape: (batch_size * sequence_length, embedding_size)
flattened_target = target.view(-1, target.size(-1))
flattened_indices = flatten_and_batch_shift_indices(indices, target.size(1))
# Shape: (batch_size * d_1 * ... * d_n, embedding_size)
flattened_selected = flattened_target.index_select(0, flattened_indices)
selected_shape = list(indices.size()) + [target.size(-1)]
# Shape: (batch_size, d_1, ..., d_n, embedding_size)
selected_targets = flattened_selected.view(*selected_shape)
return selected_targets
def flatten_and_batch_shift_indices(indices, sequence_length):
if torch.max(indices) >= sequence_length or torch.min(indices) < 0:
print(
f"All elements in indices should be in range (0, {sequence_length - 1})"
)
exit()
offsets = torch.arange(start=0, end=indices.size(0), dtype=torch.long).to(indices.device) * sequence_length
for _ in range(len(indices.size()) - 1):
offsets = offsets.unsqueeze(1)
# Shape: (batch_size, d_1, ..., d_n)
offset_indices = indices + offsets
# Shape: (batch_size * d_1 * ... * d_n)
offset_indices = offset_indices.view(-1)
return offset_indices | 1,622 | 42.864865 | 111 | py |
DMASTE | DMASTE-main/mySpanASTE/utils/metric.py | import torch
from utils.data_utils import convert_pad_tensor_to_list, convert_predictions_to_triples, SpanLabel, RelationLabel
from sklearn.metrics import precision_score, recall_score, f1_score
def convert_relations_to_list(relations, mask):
ret = []
for i in range(relations.shape[0]):
r, m = relations[i], mask[i]
width = torch.sum(m, dim=0)
height = torch.sum(m, dim=1)
assert torch.sum(torch.eq(width, height)) == width.shape[0]
ret.append(r[: width[0], :width[0]].detach().tolist())
return ret
class Metric:
def __init__(self):
self.triplet = {'pred': 0, 'golden': 0, 'tp': 0}
self.ner = {'p': 0, 'r': 0, 'f1': 0}
self.relation = {'p': 0, 'r': 0, 'f1': 0}
self.aspect = {'pred': 0, 'golden': 0, 'tp': 0}
self.opinion = {'pred': 0, 'golden': 0, 'tp': 0}
self.pos_relation = {'pred': 0, 'golden': 0, 'tp': 0}
self.neg_relation = {'pred': 0, 'golden': 0, 'tp': 0}
self.neu_relation = {'pred': 0, 'golden': 0, 'tp': 0}
self.inv_relaiton = {'pred': 0, 'golden': 0, 'tp': 0}
self.num_ins = 0
def get_metric(self):
ret = dict()
mean_metric = {'ner': self.ner, 'relation': self.relation}
for type_ in mean_metric:
type_metric = dict()
for metric_name in ['p', 'r', 'f1']:
type_metric[metric_name] = mean_metric[type_][metric_name] / self.num_ins
ret[type_] = type_metric
num_metric = {'triplet': self.triplet, 'aspect': self.aspect, 'opinion': self.opinion, 'pos_rel': self.pos_relation,
'neg_rel': self.neg_relation, 'nue_rel': self.neu_relation, 'inv_rel': self.inv_relaiton}
for type_ in num_metric:
num = num_metric[type_]
tp, golden, pred = num['tp'], num['golden'], num['pred']
p = tp / pred if pred != 0 else 0
r = tp / golden if golden != 0 else 0
f1 = 2 * p * r / (p + r) if (p + r) != 0 else 0
ret[type_] = {'p': p, 'r': r, 'f1': f1}
return ret
def get_span_labels(self, batch, output):
span_labels = batch['span_labels']
span_mask = batch['span_mask']
span_labels = convert_pad_tensor_to_list(span_labels, span_mask)
span_predictions = output['ner_output']['ner_scores']
span_predictions = torch.argmax(span_predictions, dim=-1)
span_predictions = convert_pad_tensor_to_list(span_predictions, span_mask)
return span_labels, span_predictions
def cal_num(self, ins_pred, ins_label, ins_type, metric):
golden = set([i for i, x in enumerate(ins_label) if x == ins_type])
pred = set([i for i, x in enumerate(ins_pred) if x == ins_type])
tp = golden & pred
ins_metric = {'golden': len(golden), 'pred': len(pred), 'tp': len(tp)}
for k in ins_metric:
metric[k] += ins_metric[k]
def cal_span_metric(self, span_labels, span_predictions):
for ins_label, ins_pred in zip(span_labels, span_predictions):
assert len(ins_label) == len(ins_pred)
self.num_ins += 1
self.ner['p'] += precision_score(ins_label, ins_pred, average='macro', zero_division=1)
self.ner['r'] += recall_score(ins_label, ins_pred, average='macro', zero_division=1)
self.ner['f1'] += f1_score(ins_label, ins_pred, average='macro', zero_division=1)
self.cal_num(ins_pred, ins_label, SpanLabel.ASPECT, self.aspect)
self.cal_num(ins_pred, ins_label, SpanLabel.OPINION, self.opinion)
def cal_relation_metric(self, output):
relation_labels = output['relation_output']['pruned_relation_labels']
relation_mask = output['relation_output']['relation_mask']
relation_predictions = output['relation_output']['relation_scores']
relation_predictions = torch.argmax(relation_predictions, dim=-1)
assert relation_labels.shape == relation_predictions.shape
relation_labels = convert_relations_to_list(relation_labels, relation_mask)
relation_predictions = convert_relations_to_list(relation_predictions, relation_mask)
for ins_label, ins_pred in zip(relation_labels, relation_predictions):
ins_label = [x for row in ins_label for x in row]
ins_pred = [x for row in ins_pred for x in row]
assert len(ins_label) == len(ins_pred)
self.relation['p'] += precision_score(ins_label, ins_pred, average='macro', zero_division=1)
self.relation['r'] += recall_score(ins_label, ins_pred, average='macro', zero_division=1)
self.relation['f1'] += f1_score(ins_label, ins_pred, average='macro', zero_division=1)
self.cal_num(ins_pred, ins_label, RelationLabel.NEG, self.neg_relation)
self.cal_num(ins_pred, ins_label, RelationLabel.NEU, self.neu_relation)
self.cal_num(ins_pred, ins_label, RelationLabel.POS, self.pos_relation)
self.cal_num(ins_pred, ins_label, RelationLabel.INVALID, self.inv_relaiton)
def compute(self, examples, output, batch):
# ner
span_labels, span_predictions = self.get_span_labels(batch, output)
self.cal_span_metric(span_labels, span_predictions)
# relation
self.cal_relation_metric(output)
# triples
spans_a = output['relation_output']['spans_a']
spans_a_mask = output['relation_output']['spans_a_mask']
spans_a = convert_pad_tensor_to_list(spans_a, spans_a_mask)
spans_o = output['relation_output']['spans_o']
spans_o_mask = output['relation_output']['spans_o_mask']
spans_o = convert_pad_tensor_to_list(spans_o, spans_o_mask)
relation_scores = output['relation_output']['relation_scores']
relation_mask = output['relation_output']['relation_mask']
predict_relations = torch.argmax(relation_scores, dim=-1)
# print('relation', predict_relations.shape, batch['relation_labels'].shape)
predict_relations = convert_relations_to_list(predict_relations, relation_mask)
# print(predict_relations)
token_range, token_range_mask = batch['token_range'], batch['token_range_mask']
token_range = convert_pad_tensor_to_list(token_range, token_range_mask)
predict_triples = []
for i in range(len(examples)):
triples1 = examples[i][1]
triples2 = convert_predictions_to_triples(spans_a=spans_a[i], spans_o=spans_o[i], relation_labels=predict_relations[i], token_range=token_range[i])
predict_triples.append(triples2)
self.triplet['pred'] += len(triples2)
self.triplet['golden'] += len(triples1)
for t1 in triples1:
for t2 in triples2:
if str(t1) == str(t2):
self.triplet['tp'] += 1
return predict_triples
| 6,970 | 51.022388 | 159 | py |
GPOMCP | GPOMCP-master/logscan.py | import sys
import math
threshold_string = "threshold = "
reward_string = "Obtained an accum reward of: "
if len(sys.argv) != 4:
print "3 Arguments required: input_file and output_file and avg_out_file"
exit(0)
in_filename = sys.argv[1]
out_filename = sys.argv[2]
avg_out_filename = sys.argv[3]
in_file = open(in_filename, 'r')
out_file = open(out_filename, 'w')
avg_out_file = open(avg_out_filename, 'w')
t = "N/A"
r = "N/A"
s = dict()
c = dict()
data = []
for line in in_file.readlines():
if line.find(threshold_string) >= 0:
t = line[len(threshold_string):].strip()
if line.find(reward_string) >= 0:
r = line[len(reward_string):].strip()
# out_file.write(t + " " + r + "\n")
data.append((t, r))
if t not in s:
c[t] = 1
s[t] = float(r)
else:
c[t] += 1
s[t] += float(r)
total_squared_difs = dict()
for (t, r) in data:
if t not in total_squared_difs:
total_squared_difs[t] = (float(r) - float(s[t]) / c[t]) ** 2
else:
total_squared_difs[t] += (float(r) - float(s[t]) / c[t]) ** 2
std_dev = dict()
for t in c:
std_dev[t] = math.sqrt(float(total_squared_difs[t]) / c[t])
for (t, r) in data:
out_file.write(t + " " + r + " " + str(std_dev[t]) + "\n")
for t in s:
avg_out_file.write(t + " " + str(float(s[t]) / c[t]) + " " +
str(std_dev[t]) + "\n")
in_file.close()
out_file.close()
avg_out_file.close()
| 1,478 | 24.5 | 77 | py |
GPOMCP | GPOMCP-master/datscan.py | import sys
import math
threshold_string = "threshold = "
reward_string = "Obtained an accum reward of: "
if len(sys.argv) != 5:
print "4 Arguments required: input_file1 "\
"input_file2 and output_file and avg_out_file"
exit(0)
in_filename1 = sys.argv[1]
in_filename2 = sys.argv[2]
out_filename = sys.argv[3]
avg_out_filename = sys.argv[4]
in_file1 = open(in_filename1, 'r')
in_file2 = open(in_filename2, 'r')
out_file = open(out_filename, 'w')
avg_out_file = open(avg_out_filename, 'w')
t = "N/A"
r = "N/A"
s = dict()
c = dict()
data = []
for line in in_file1.readlines():
line_data = line.split()
assert(len(line_data) == 3)
t = line_data[0].strip()
r = line_data[1] .strip()
data.append((t, r))
if t not in s:
c[t] = 1
s[t] = float(r)
else:
c[t] += 1
s[t] += float(r)
for line in in_file2.readlines():
line_data = line.split()
assert(len(line_data) == 3)
t = line_data[0].strip()
r = line_data[1] .strip()
data.append((t, r))
if t not in s:
c[t] = 1
s[t] = float(r)
else:
c[t] += 1
s[t] += float(r)
total_squared_difs = dict()
for (t, r) in data:
if t not in total_squared_difs:
total_squared_difs[t] = (float(r) - float(s[t]) / c[t]) ** 2
else:
total_squared_difs[t] += (float(r) - float(s[t]) / c[t]) ** 2
std_dev = dict()
for t in c:
std_dev[t] = math.sqrt(float(total_squared_difs[t]) / c[t])
for (t, r) in data:
out_file.write(t + " " + r + " " + str(std_dev[t]) + "\n")
for t in s:
avg_out_file.write(t + " " + str(float(s[t]) / c[t]) + " " +
str(std_dev[t]) + "\n")
in_file1.close()
in_file2.close()
out_file.close()
avg_out_file.close()
| 1,752 | 22.689189 | 69 | py |
GPOMCP | GPOMCP-master/Examples/Hallway/hallcp.py | from shutil import copyfile
import os
for i in range(1,10):
os.system("mkdir "+"./hallway"+str(i))
os.system("python genHallway.py "+"maph"+str(i)+".txt "+"hallway"+str(i)+".POMDP")
os.system("mv maph"+str(i)+ ".txt ./hallway"+str(i))
os.system("mv hallway"+str(i)+ ".POMDP ./hallway"+str(i))
| 298 | 32.222222 | 83 | py |
GPOMCP | GPOMCP-master/Examples/Hallway/genHallway.py | # Generates a POMDP file with a Hallway instance from a given maze map in a text file.
# Usage: python genHallway.py input_map.txt output_file [discount_factor]
# Hallway: a classic POMDP robot navigation benchmark. We have a maze with walls, traps and goals.
# The robot can move forward or turn left and right and it can only sense walls around itself (it cannot sense
# traps). The goal is to find the target as fast as possible (i.e. actions have zero reward until
# target is reached, after which a large number is recieved, discounted by the number of steps).
# The maze is given as a text file, each row representing a row of a maze,
# individual cells in a row separated by spaces. See the following example:
# 1 1 1 1 1 1
# 1 0 + + 0 1
# 1 1 + + 1 1
# 1 0 0 0 0 1
# 1 1 g x 1 1
# 1 1 1 1 1 1
# 1s are walls, 0s are empty cells, R are reloading places (this is from a different application domain,
# we probably don't need this), g is a goal state, x is a trap which we have to avoid
# (state with a self-loop), + is a possible starting location. The robot starts in a starting location
# chosen uniformly at random, looking southwards.
# FOR THE SCRIPT TO FUNCTION, THE MAZE HAS TO BE SURROUNDED BY WALLS
# NW corner of the maze is the coordinate origin, coordinates increase with
# each step to the east and to the south.
# The output is given in the standard POMDP file format (http://www.pomdp.org/code/pomdp-file-spec.html)
__author__ = 'mchmelik, edited by pnovotny'
import sys
# Test for optional input (discount factor), otherwise take default.
if len(sys.argv)>3:
discount = sys.argv[3]
else:
discount="0.95"
# How does the orientation change when turning around.
left = {'N':'W','W':'S','S':'E','E':'N'}
right = {'N':'E','E':'S','S':'W','W':'N'}
# Coordinate change after moving forward with a given orientation.
changeMap = {'N':[-1,0],'S':[1,0],'E':[0,1],'W':[0,-1]}
# Is there a wall in front of the robot?
def isWall(i,j,direction):
if(direction.__eq__('N')):
return (maze[i-1][j] == '1')
if(direction.__eq__('E')):
return (maze[i][j+1] == '1')
if(direction.__eq__('W')):
return (maze[i][j-1] == '1')
if(direction.__eq__('S')):
return (maze[i+1][j] == '1')
def isTrap(i,j,direction):
if(direction.__eq__('N')):
return (maze[i-1][j] == 'x')
if(direction.__eq__('E')):
return (maze[i][j+1] == 'x')
if(direction.__eq__('W')):
return (maze[i][j-1] == 'x')
if(direction.__eq__('S')):
return (maze[i+1][j] == 'x')
# Retrieve a new orientation after a turn is made.
def turnLeft(orient):
return left[orient]
def turnRight(orient):
return right[orient]
# Produce observation for the current state. The only information about the environment
# the robot gets is, for each direction (forward,left,right,behind), whether there is a wall next to the robot
# in that direction. Hence, an observation can be characterized by a word from
# {F,f}.{L,l}.{R,r}.{B,b}, where a capital letter indicates that there _is_ a wall.
def getObs(i,j,orientation):
if(orientation.__eq__('N')):
return ['F' if isWall(i,j,'N') else 'f', 'L' if isWall(i,j,'W') else 'l', 'R' if isWall(i,j,'E') else 'r', 'B' if isWall(i,j,'S') else 'b']
if(orientation.__eq__('E')):
return ['F' if isWall(i,j,'E') else 'f', 'L' if isWall(i,j,'N') else 'l', 'R' if isWall(i,j,'S') else 'r', 'B' if isWall(i,j,'W') else 'b']
if(orientation.__eq__('S')):
return ['F' if isWall(i,j,'S') else 'f', 'L' if isWall(i,j,'E') else 'l', 'R' if isWall(i,j,'W') else 'r', 'B' if isWall(i,j,'N') else 'b']
if(orientation.__eq__('W')):
return ['F' if isWall(i,j,'W') else 'f', 'L' if isWall(i,j,'S') else 'l', 'R' if isWall(i,j,'N') else 'r', 'B' if isWall(i,j,'E') else 'b']
# print(sys.argv[1])
# print(sys.argv[2])
fIn = open(sys.argv[1], 'r');
fOut = open(sys.argv[2], 'w');
# fIn = open("hallway2.txt", 'r');
# fOut = open("hal.txt", 'w');
maze = [];
for line in fIn.readlines():
maze.append(line.split());
width = len(maze[0]);
height = len(maze);
actions = ["f", "l", "r"]
states = ["absorb", "goal", "goal2"]
stateToObsMap = {"absorb":"Oabs", "goal":"Ogoal", "goal2":"Ogoal2"}
observations = {"Oabs", "Ogoal", "Ogoal2"}
reloads = set()
transitions = ""
observationEntries = ""
# Add self-loops under all actions for goal and trap states. We have two states for goal, one entered upon
# reaching goal and the other, with a self-loop, immediately after that.
for action in {'f','l','r'}:
transitions += "T: %s : absorb : absorb 1.0 \n" %action
transitions += "T: %s : goal : goal2 1.0 \n" %action
transitions += "T: %s : goal2 : goal2 1.0 \n" %action
initialStates = set()
# For each possible robot state, generate outgoing transitions and the corresponding observation.
# State names have a format sixjxo, where [i,j] are coordinates and o is an oritentation.
for i in range(0, height):
for j in range(0, width):
element = maze[i][j]
for orient in {'N','E','S','W'}:
# Standard states, where there are all movement actions enabled.
if element in {"0","+","R","x"}:
# print("i: "+str(i)+" j: " + str(j) + " orient: " + orient)
# print(''.join(getObs(i,j,orient)))
# Forward
# Move forward
if isWall(i,j,orient):
target = "absorb"
transitions += "T: f : s%dx%dx%s : %s 1.0 \n" % (i,j,orient,target)
elif isTrap(i,j,orient):
target = "s%dx%dx%s" % (i+changeMap[orient][0],j+changeMap[orient][1],'N')
transitions += "T: f : s%dx%dx%s : %s 0.25 \n" % (i,j,orient,target)
target = "s%dx%dx%s" % (i+changeMap[orient][0],j+changeMap[orient][1],'S')
transitions += "T: f : s%dx%dx%s : %s 0.25 \n" % (i,j,orient,target)
target = "s%dx%dx%s" % (i+changeMap[orient][0],j+changeMap[orient][1],'W')
transitions += "T: f : s%dx%dx%s : %s 0.25 \n" % (i,j,orient,target)
target = "s%dx%dx%s" % (i+changeMap[orient][0],j+changeMap[orient][1],'E')
transitions += "T: f : s%dx%dx%s : %s 0.25 \n" % (i,j,orient,target)
else:
target = "s%dx%dx%s" % (i+changeMap[orient][0],j+changeMap[orient][1],orient);
transitions += "T: f : s%dx%dx%s : %s 1.0 \n" % (i,j,orient,target);
#target = "s%dx%dx%s" % (i+changeMap[orient][0],j+changeMap[orient][1],orient) if not isWall(i,j,orient) else "absorb";
#target2 = "s%dx%dx%s" % (i,j,orient);
#transitions += "T: f : s%dx%dx%s : %s 1.0 \n" % (i,j,orient,target)
# transitions += "T: f : s%dx%dx%s : %s 0.02 \n" % (i,j,orient,target2)
# transitions += "TRANSITIONS: [s%dx%dx%s,F,{%s}] #\n" % (i,j,orient,target);
# Left
# Turn left
target = "s%dx%dx%s" % (i,j,left[orient]);
transitions += "T: l : s%dx%dx%s : %s 1.0 \n" % (i,j,orient,target)
# transitions += "T: l : s%dx%dx%s : %s 0.02 \n" % (i,j,orient,target2)
# transitions += "TRANSITIONS: [s%dx%dx%s,L,{%s}] #\n" % (i,j,orient,target);
# Or not turn at all
# Right
# Turn right
target = "s%dx%dx%s" % (i,j,right[orient]);
transitions += "T: r : s%dx%dx%s : %s 1.0\n" % (i,j,orient,target)
# transitions += "T: r : s%dx%dx%s : %s 0.02\n" % (i,j,orient,target2)
# transitions += "TRANSITIONS: [s%dx%dx%s,R,{%s}] #\n" % (i,j,orient,target);
# Or not turn at all
#obs = ''.join(getObs(i,j,orient))
#observations.add(obs);
# If in goal or trap location, go to the corresponding abstract state.
if element in {"g"}:
transitions += "T: f : s%dx%dx%s : goal 1.0\n" % (i,j,orient)
transitions += "T: l : s%dx%dx%s : goal 1.0\n" % (i,j,orient)
transitions += "T: r : s%dx%dx%s : goal 1.0\n" % (i,j,orient)
#if element in {"x"}:
# for orient1 in {'N','E','S','W'}:
# target = "s%dx%dx%s" % (i+changeMap[orient1][0],j+changeMap[orient1][1],orient1) if not isWall(i,j,orient1) else "absorb";
# transitions += "T: f : s%dx%dx%s : %s 0.25 \n" % (i,j,orient,target)
# target = "s%dx%dx%s" % (i,j,left[orient1]);
# transitions += "T: l : s%dx%dx%s : %s 0.25 \n" % (i,j,orient,target)
# target = "s%dx%dx%s" % (i,j,right[orient1]);
# transitions += "T: r : s%dx%dx%s : %s 0.25\n" % (i,j,orient,target)
# if element in {"R"}:
# reloads.add("s%dx%dx%s" %(i,j,orient))
if element in {"0","+","g","x","R"}:
#if element in {"R"}:
# obs = ''.join(getObs(i,j,orient)) + "R"
#else:
#compute an observation and add it to an state-to-obs map
obs = ''.join(getObs(i,j,orient))
observations.add(obs);
state = "s%dx%dx%s" % (i,j,orient)
states.append(state)
stateToObsMap[state]=obs
# states+=", {s%dx%dx%s; 1; %s; F}" % (i,j,orient,obs)
if element in {"+"}:
initialStates.add("s%dx%dxS" % (i,j))
#prob = 1.0 / initialStates.__len__()
#for state in initialStates:
# transitions+="T: f : st : %s %f \n" % (state,prob);
# transitions+="T: l : st : %s %f \n" % (state,prob);
# transitions+="T: r : st : %s %f \n" % (state,prob);
# Observations are deterministic and action-independent.
for state in states:
observationEntries+="O: * : %s : %s 1.0\n" % (state,stateToObsMap.get(state))
# energyEntries = ""
# for state in states:
# for action in actions:
# obs = stateToObsMap.get(state)
# if state in reloads:
# energyEntries+="E: %s : %s 15.0\n" %(obs,action)
# else:
# energyEntries+="E: %s : %s -1.0\n" %(obs,action)
# Dump everything to an output file.
#fOut.write("capacity: 10\n\n\n")
fOut.write("discount: "+discount+"\n")
fOut.write("values: reward\n")
fOut.write("states: "+' '.join(states)+"\n\n")
fOut.write("actions: "+' '.join(actions)+"\n\n")
fOut.write("observations: " + ' '.join(observations) + " \n\n")
#fOut.write("goals: 2\n")
#fOut.write("start: 1.0")
#for _ in range(states.__len__()-1):
# fOut.write(" 0.0")
#fOut.write("\n\n")
fOut.write("start include: " +' '.join(initialStates) +"\n\n")
# fOut.write("STATES: ["+states +"] #\n")
# fOut.write("ACTIONS:[" + actions +"] #\n")
fOut.write(transitions+"\n")
fOut.write(observationEntries+"\n")
#fOut.write(energyEntries+"\n")
fOut.write("R: * : * : * : * 0.0\n")
fOut.write("R: * : goal : goal2 : * 10000.0\n")
print("Num of states: " + str(len(states)))
print("Width", width);
print("Heigth", height);
print("Succesfully generated a POMDP format file.")
# print(transitions)
| 11,319 | 36.859532 | 147 | py |
GPOMCP | GPOMCP-master/Examples/Hallway/hallcp2.py | import os
for i in range(1,10):
os.system("python genHallway.py hallway"+str(i)+".txt hallway"+str(i)+".POMDP")
| 114 | 22 | 80 | py |
GPOMCP | GPOMCP-master/Examples/Hallway/MazeGen/MazeGeneratorRecursiveDivision.py | import random
import numpy
def isIn(i,j,num_rows,num_cols):
b = 1
if (i < 0) or (j < 0) or (i >= num_rows) or (j >= num_cols):
b=0;
return b;
def Modify(M,x_ul,y_ul,x_dr,y_dr):
if (x_dr - x_ul < 5) or (y_dr - y_ul < 5): pass
else:
x_cut = random.randint(x_ul+2,x_dr-2) if not (x_ul + 2 == x_dr - 2) else x_ul + 2
y_cut = random.randint(y_ul+2,y_dr-2) if not (y_ul + 2 == y_dr - 2) else y_ul + 2
hole_x1 = random.randint(x_ul+1,x_cut-1) if not (x_ul + 1 == x_cut - 1) else x_ul + 1
hole_x2 = random.randint(x_cut+1,x_dr-1) if not (x_cut + 1 == x_dr - 1) else x_cut + 1
hole_y1 = random.randint(y_cut+1,y_dr-1) if not (y_cut + 1 == y_dr - 1) else y_cut + 1
for i in range(0,x_dr-x_ul): M[x_ul+i,y_cut] = '1'
for i in range(0,y_dr-y_ul): M[x_cut,y_ul+i] = '1'
for i in range(0,x_dr-x_ul): M[x_ul+i,y_ul] = '0'
for i in range(0,x_dr-x_ul): M[x_ul+i,y_dr] = '0'
for i in range(0,y_dr-y_ul): M[x_ul,y_ul+i] = '0'
for i in range(0,y_dr-y_ul): M[x_dr,y_ul+i] = '0'
M[hole_x1,y_cut] = '0'
M[hole_x2,y_cut] = '0'
M[x_cut,hole_y1] = '0'
Modify(M,x_ul,y_ul,x_cut-1,y_cut-1)
Modify(M,x_cut+1,y_ul,x_dr,y_cut-1)
Modify(M,x_ul,y_cut+1,x_cut-1,y_dr)
Modify(M,x_cut+1,y_cut+1,x_dr,y_dr)
#input
num_rows = int(input("Rows-2: "))
num_cols = int(input("Columns-2: "))
#generate a maze
M = numpy.chararray((num_rows,num_cols))
for i in range(0,num_rows):
for j in range(0,num_cols):
M[i,j] = '0';
Modify(M,0,0,num_rows-1,num_cols-1)
nowalls = []
for i in range(0,num_rows):
for j in range(0,num_cols):
if (M[i,j] == '0'): nowalls.append([i,j])
number = len(nowalls)
random.shuffle(nowalls)
u = nowalls[0]
M[u[0],u[1]] = '+'
u = nowalls[1]
M[u[0],u[1]] = 'x'
u = nowalls[2]
M[u[0],u[1]] = 'g'
for i in range(3,number-1):
k = random.randint(1,60)
if (k == 1):
u = nowalls[i]
M[u[0],u[1]] = '+'
elif (k == 2):
u = nowalls[i]
M[u[0],u[1]] = 'x'
elif (k == 3):
u = nowalls[i]
M[u[0],u[1]] = 'g'
#output
s1 = '1'
for i in range(0,num_cols+1):
s1 = s1 + ' 1'
f = open('E:\workfile.txt', 'w')
f.write(s1+"\n")
for i in range(0,num_rows-1):
s = '1 ' + M[i,0]
for j in range(1,num_cols):
s = s + ' ' + M[i,j]
s = s + ' 1'
f.write(s+"\n")
s = '1 ' + M[num_rows-1,0]
for j in range(1,num_cols):
s = s + ' ' + M[num_rows-1,j]
s = s + ' 1'
f.write(s+"\n")
f.write(s1)
f.close() | 2,613 | 25.673469 | 94 | py |
GPOMCP | GPOMCP-master/Examples/Hallway/MazeGen/MazeGenerator.py | import random
import numpy
def isIn(i,j,num_rows,num_cols):
b = 1
if (i < 0) or (j < 0) or (i >= num_rows) or (j >= num_cols):
b=0;
return b;
#input
num_rows = int(input("Rows-2: "))
num_cols = int(input("Columns-2: "))
num_nowall = int(input("Number of cells that are not walls: "))
num_trap = int(input("Number of traps: "))
num_initial = int(input("Number of possible initial locations: "))
num_goal = int(input("Number of reward locations: "))
#generate a maze
M = numpy.chararray((num_rows,num_cols))
for i in range(0,num_rows):
for j in range(0,num_cols):
M[i,j] = '1';
#set all to 1's as we'll change only non-wall cells
#variables used for generating a random maze
current = random.randint(1, num_rows*num_cols)
current_x = current % num_cols
current_y = (current - current_x) / num_cols
count = num_nowall - 1
M[current_x,current_y] = '0'
stack = []
stack.append([current_x,current_y])
#those cells with no walls
nowalls = []
nowalls.append([current_x,current_y])
current_direction = 'D'
while count > 0:
adjacent = [];
if (isIn(current_x-1,current_y,num_rows,num_cols)==1) and (M[current_x-1,current_y] == '1'): adjacent.append('L');
if (isIn(current_x+1,current_y,num_rows,num_cols)==1) and (M[current_x+1,current_y] == '1'): adjacent.append('R');
if (isIn(current_x,current_y-1,num_rows,num_cols)==1) and (M[current_x,current_y-1] == '1'): adjacent.append('D');
if (isIn(current_x,current_y+1,num_rows,num_cols)==1) and (M[current_x,current_y+1] == '1'): adjacent.append('U');
if (current_direction in adjacent):
if (current_direction.__eq__('U')):
count = count - 1
current_y = current_y + 1
M[current_x,current_y] = '0'
nowalls.append([current_x,current_y])
stack.append([current_x,current_y])
elif (current_direction.__eq__('D')):
count = count - 1
current_y = current_y - 1
M[current_x,current_y] = '0'
nowalls.append([current_x,current_y])
stack.append([current_x,current_y])
elif (current_direction.__eq__('L')):
count = count - 1
current_x = current_x - 1
M[current_x,current_y] = '0'
nowalls.append([current_x,current_y])
stack.append([current_x,current_y])
elif (current_direction.__eq__('R')):
count = count - 1
current_x = current_x + 1
M[current_x,current_y] = '0'
nowalls.append([current_x,current_y])
stack.append([current_x,current_y])
elif (len(adjacent)>0):
direction = random.choice(adjacent)
if (direction.__eq__('U')):
count = count - 1
current_y = current_y + 1
M[current_x,current_y] = '0'
nowalls.append([current_x,current_y])
stack.append([current_x,current_y])
current_direction = 'U'
elif (direction.__eq__('D')):
count = count - 1
current_y = current_y - 1
M[current_x,current_y] = '0'
nowalls.append([current_x,current_y])
stack.append([current_x,current_y])
current_direction = 'D'
elif (direction.__eq__('L')):
count = count - 1
current_x = current_x - 1
M[current_x,current_y] = '0'
nowalls.append([current_x,current_y])
stack.append([current_x,current_y])
current_direction = 'L'
elif (direction.__eq__('R')):
count = count - 1
current_x = current_x + 1
M[current_x,current_y] = '0'
nowalls.append([current_x,current_y])
stack.append([current_x,current_y])
current_direction = 'R'
else:
del stack[-1]
u = stack[-1]
current_x = u[0]
current_y = u[1]
#no we will just allocate traps, initial locations and goals to some cells randomly
random.shuffle(nowalls)
for i in range(0,num_trap):
alpha=nowalls[i]
M[alpha[0],alpha[1]] = 'x'
for i in range(0,num_initial):
alpha=nowalls[num_trap+i]
M[alpha[0],alpha[1]] = '+'
for i in range(0,num_goal):
alpha=nowalls[num_trap+num_initial+i]
M[alpha[0],alpha[1]] = 'g'
#output
s1 = '1'
for i in range(0,num_cols+1):
s1 = s1 + ' 1'
f = open('E:\workfile.txt', 'w')
f.write(s1+"\n")
for i in range(0,num_rows-1):
s = '1 ' + M[i,0]
for j in range(1,num_cols):
s = s + ' ' + M[i,j]
s = s + ' 1'
f.write(s+"\n")
s = '1 ' + M[num_rows-1,0]
for j in range(1,num_cols):
s = s + ' ' + M[num_rows-1,j]
s = s + ' 1'
f.write(s+"\n")
f.write(s1)
f.close()
| 4,716 | 28.298137 | 118 | py |
GPOMCP | GPOMCP-master/Examples/rockSample/genScript/Coord.py | #dimX = 6
#dimY = 6
# dimY = 2
class Coord:
# (0,0) bottom left corner
def __init__(self,x,y,dim):
self.x = x
self.y = y
self.dim = dim
def setWalls(self,walls):
self.walls = walls
def __str__(self):
return "%dx%d" % (self.x, self.y)
def __repr__(self):
return "%dx%d" % (self.x, self.y)
def __eq__(self, other):
if other==None :
return False
return self.x == other.x and self.y == other.y
def __hash__(self):
return hash(repr(self))
def getNorth(self):
if self.y<self.dim-1 :
return (Coord(self.x,self.y+1,self.dim),False)
else:
return (Coord(self.x,self.y,self.dim),True)
def getSouth(self):
if self.y>0 :
return (Coord(self.x,self.y-1,self.dim),False)
else:
return (Coord(self.x,self.y,self.dim),True)
def getWest(self):
if self.x>0:
return (Coord(self.x-1,self.y,self.dim),False)
else:
return (Coord(self.x,self.y,self.dim),True)
def getEast(self):
if self.x<self.dim-1:
return (Coord(self.x+1,self.y,self.dim),False)
else:
return (Coord(self.x,self.y,self.dim),True)
def getNext(self,direction, walls=None):
if direction == "N":
targetCoord,bump = self.getNorth()
elif direction == "E":
targetCoord,bump = self.getEast()
elif direction == "S":
targetCoord,bump = self.getSouth()
elif direction =="W":
targetCoord,bump = self.getWest()
else:
targetCoord,bump = (self,False)
if walls != None:
blockedByAWall = self.checkWall(targetCoord,walls)
if blockedByAWall:
return self,False
return (targetCoord,bump)
def checkWall(self,targetCoord,walls):
currentCoord = self
if frozenset([currentCoord,targetCoord]) in walls:
return 1
else:
return 0
def generateCoords(dim):
for i in range(dim*dim):
yield getCoord(i,dim)
# Coord(i // dimX,i % dimX)
def getPossibleNeighbors(robotCoord,dim):
possibleNeighbors = []
diff = [lambda x: x+1,lambda x: x, lambda x: x-1]
for diffX in diff:
for diffY in diff:
newX = diffX(robotCoord.x)
newY = diffY(robotCoord.y)
if(newX >= 0 and newX < dim and newY >=0 and newY < dim and robotCoord != Coord(newX,newY,dim)):
possibleNeighbors.append(Coord(newX,newY,dim))
return possibleNeighbors
def getCoord(integer, dim):
y = integer//dim
x = integer%dim
return Coord(x,y,dim)
| 2,694 | 24.186916 | 102 | py |
GPOMCP | GPOMCP-master/Examples/rockSample/genScript/State.py | from Coord import *
import itertools
class State:
def __init__(self, robot, mines, observation):
self.robot = robot
self.mines = mines
self.observation = observation
def __repr__(self):
return "N%sM%sT" % (str(self.robot),str(self.mines))
# return "(R:%s,T:%s,A: %s,O:%s)" % (self.robot,self.target,self.automatonState,self.observation)
def __str__(self):
return self.__repr__()
def __eq__(self, other):
if not isinstance(other,State):
return False
return self.__repr__() == other.__repr__()
# return self.robot == other.robot and self.target == other.target and self.observation == other.observation and self.automatonState == other.automatonState
def __hash__(self):
return hash(repr(self))
# Coords: origin has (0,0), then standard geometric interpretation, final and losing states have coords (-1,-1) and (-2,-2), respectively.
# We map integers to coords as follows:
# 12 13 14 15
# 8 9 10 11
# 4 5 6 7
# 0 1 2 3
#
obsDict = {0 : (1,0), 1 : (0.9,0.1), 2 : (0.8,0.2), 3 : (0.7,0.3), 4 : (0.6,0.4), 5 : (0.5,0.5)}
def distMine(cRobot, cMine):
return (abs(cRobot.x-cMine.x) + abs(cRobot.y - cMine.y) )
def getObs(coord, mineString, mines, minePosToInt,sensetuples):
# What kind of observations I can get if I use the sensing action in a given state
# print(str(coord))
mineIndObs = {}
returnObs = {}
retObsTemp = ""
counter = 0
for mine in mines:
distToM = min(distMine(coord,mine), 5)
mineID = minePosToInt.get(mine)
corrProb = obsDict[distToM][0]
incorrProb = obsDict[distToM][1]
if int(mineString[mineID]) > 0:
mineIndObs[counter] = (corrProb,incorrProb) # First item is the probability of sensing good, second of
# sensing bad mine
else:
mineIndObs[counter] = (incorrProb,corrProb)
counter+=1
# counter = 0
for senseTuple in sensetuples:
prob = 1
counter = 0
for i in senseTuple:
#retObsTemp += i
if i == 'G':
prob = prob*mineIndObs[counter][0]
else:
prob = prob*mineIndObs[counter][1]
counter += 1
index = "".join(str(e) for e in senseTuple)
returnObs[index] = prob
# return "o"+str(coord.x)+"x"+str(coord.y) + "x" + mineObs
assert len(returnObs) == len(sensetuples)
return returnObs
#
# def getObs(relPos):
# if relPos==9:
# return ["oNot"]
# elif relPos ==8:
# return ["oSE"]
# elif relPos ==7:
# return ["oSE","oSW"]
# elif relPos ==6:
# return ["oSW"]
# elif relPos ==5:
# return ["oSE","oNE"]
# elif relPos ==4:
# return ["oDetected"]
# elif relPos ==3:
# return ["oSW","oNW"]
# elif relPos ==2:
# return ["oNE"]
# elif relPos ==1:
# return ["oNE","oNW"]
# elif relPos ==0:
# return ["oNW"]
| 2,678 | 24.514286 | 158 | py |
GPOMCP | GPOMCP-master/Examples/rockSample/genScript/genRockSample.py | import itertools
import random
from State import *
from Coord import *
# Here you can set up rewards for various type of transitions and other
stepRew = -1
goodMineRew = 50
badMineRew = -25
illegalMoveRew = -100
discount = 0.98
sense = -5
assert not badMineRew == 0 and not goodMineRew == 0 and not badMineRew == goodMineRew
perfect_sense_dist = 0 # in which distance does sensing work perfectly
useless_sense_dist = 5
dist_decay = 0.5/(useless_sense_dist - perfect_sense_dist)
# Input
dim = int(input('Dimension:'))
totalMines = int(input('Total number of mines:'))
assert totalMines <= dim*dim, "I cannot fit that many mines into the grid of size "+str(dim*dim)
goodMines = int(input('Number of good mines:'))
assert totalMines >= goodMines, "Number of good mines cannot be larger than total number of mines"
outFile = str(raw_input('Output file name:'))
# f = outFile
def sumOfSampleMined(mineString):
num = 0
for i in range(len(mineString)):
if int(mineString[i]) == 2:
num+=1
return num
def filterValidEntries(mineString):
goodNM = 0
goodM = 0
for i in mineString:
if i == 1:
goodNM+=1
if i == 2:
goodM+=1
if goodNM + goodM <= goodMines:
return True
else:
return False
# write a model in the old format
# def writeOldFormat():
# filename = '../RS'+str(dimX)+'x'+str(dimY) + 'x'+ str(minesCount) +'x'+str(numOfGoodToSample) + '.txt'
# print("Output written to: "+filename)
# f = open(filename, 'w')
# f.write("NAME: [generated] #\n")
# f.write("OBSERVATIONS: [")
# f.write(', '.join(str(e) for e in observations))
# f.write("] #\n")
# f.write("STATES: [")
# print("Num of states: " + str(len(listOfStates)))
# for state in listOfStates:
# f.write("{%s;1;%s;F}," %(str(state),state.observation))
#
# f.write("{NgoalMNoneToGoal;2;oGoal;F},")
# f.write("{NloosingMNoneToLoosing;1;oLoosing;F},")
# f.write("{NdStartMNoneToStart;1;oStart;T}")
#
#
#
#
#
#
# f.write("] #\n")
#
# f.write("ACTIONS: [")
# f.write(', '.join(str(e) for e in actions))
# f.write("] #\n")
#
#
# transitionsMap = {}
#
# for transition in listOfTransitions:
#
# source,action,target = transition
#
# if (source,action) in transitionsMap:
# targets = transitionsMap[(source,action)]
# targets.append(target)
# else:
# targets = [target]
# transitionsMap[(source,action)]=targets
#
#
# # print(transitionsMap)
# #
# for transition in transitionsMap.keys():
# source,action = transition
# f.write("TRANSITIONS: [%s,%s,{" %(source,action))
# support = ';'.join(str(e) for e in transitionsMap[transition])
# f.write(support)
# f.write("}] # \n")
# for action in actions:
# f.write("TRANSITIONS: [NgoalMNoneToGoal,%s,{NgoalMNoneToGoal}]\n" %(action))
# f.write("TRANSITIONS: [NloosingMNoneToLoosing,%s,{NloosingMNoneToLoosing}]\n" %(action))
# f.close()
def writeNewFormat():
f = open(outFile, 'w')
f.write("discount: "+str(discount)+"\n")
f.write("values: reward\n")
f.write("states: %s %s " %(goalState,losingState))
f.write(' '.join(str(e) for e in listOfStates))
f.write(' ')
f.write(' '.join("sen"+str(e[0]) for e in senseStates))
f.write(' ')
f.write(' '.join("min"+str(e[0])+"R"+str(e[1]) for e in mineStates))
f.write('\n')
f.write("\nactions: ")
f.write(' '.join(str(e) for e in actions))
f.write("\nobservations: ")
f.write(' '.join(str(e) for e in observations))
#
# f.write("\n\nparity: 2 ")
# for state in listOfStates:
# automatonState = state.automatonState
# priority = getPriority(int(automatonState))
# f.write("%s " %(priority))
# # f.write("2 ")
# f.write("1")
# f.write("\n\ngoals: 1\n")
# Start line
# f.write("\nstart: 1.0 ")
# for state in listOfStates:
# f.write("0.0 ")
# for state in senseStates:
# f.write("0.0 ")
# # for the loosing state
# f.write("0.0 0.0")
f.write("\n start include: " +' '.join(map(str,startStates)) +"\n\n")
#
#
#
# Transitions:
for transition in listOfTransitions:
source,robotAct,target, rew = transition
#
if isinstance(source, tuple):
if source[1] == "sensing":
prob = 1.0
f.write("\nT: %s : %s : %s %.2f" % (robotAct,"sen"+str(source[0]),target,prob))
else:
prob = 1.0
f.write("\nT: %s : %s : %s %.2f" % (robotAct,"min"+str(source[0])+"R"+str(source[1]),target,prob))
elif isinstance(target, tuple):
if target[1] == "sensing":
prob = 1.0
f.write("\nT: %s : %s : %s %.2f" % (robotAct,source,"sen"+str(target[0]),prob))
else:
prob = 1.0
f.write("\nT: %s : %s : %s %.2f" % (robotAct,source,"min"+str(target[0])+"R"+str(target[1]),prob))
else:
prob = 1.0
f.write("\nT: %s : %s : %s %.2f" % (robotAct,source,target,prob))
#
#
#
f.write("\nT: * : %s : %s 1.0" %(goalState,goalState))
f.write("\nT: * : %s : %s 1.0" %(losingState,losingState))
#
#
# for obs in observations:
# for action in actions:
# if obs[-1] == "G" and action == "Sample":
# f.write("\nE: %s : %s %d" %(obs,action, capacity))
# else:
# f.write("\nE: %s : %s -1.0" %(obs,action))
# prob = 1.0 / startStates.__len__()
#
# for state in startStates:
# f.write("\nT: * : dStart : %s %f" %(state,prob))
#
#
#
# Observations:
f.write("\n")
for state in listOfStates:
robotPos = state.robot
formDiscObs = "o%sx%sx" % (str(robotPos.x), str(robotPos.y))
f.write("\nO: * : %s : %s 1.0" %(state,formDiscObs))
for state in senseStates:
robotPos = state[0].robot
obs = state[0].observation
for key in obs:
formDiscObs = "o%sx%sx%s" % (str(robotPos.x), str(robotPos.y), key)
f.write("\nO: * : %s : %s %f" %("sen"+str(state[0]),formDiscObs, obs[key]))
for state in mineStates:
robotPos = state[0].robot
if state[1] == 0:
formDiscObs = "o%sx%sxMM" % (str(robotPos.x), str(robotPos.y))
elif state[1] == badMineRew:
formDiscObs = "o%sx%sxMB" % (str(robotPos.x), str(robotPos.y))
else:
formDiscObs = "o%sx%sxMG" % (str(robotPos.x), str(robotPos.y))
f.write("\nO: * : %s : %s 1.0" %("min"+str(state[0])+"R"+str(state[1]),formDiscObs))
f.write("\nO: * : %s : oLosing 1.0" %(losingState))
f.write("\nO: * : %s : oGoal 1.0" %(goalState))
# f.write("\nO: * : dStart : oStart 1.0")
# f.write("\nR: * : * : * : * 1.0")
f.write("\nR: * : %s : * : * 0.0" %(goalState))
f.write("\nR: * : %s : * : * 0.0" %(losingState))
for transition in listOfTransitions:
source,robotAct,target, rew = transition
if not isinstance(source,tuple):
f.write("\nR: %s : %s : * : * %d" % (robotAct, source, rew))
elif source[1] == "sensing":
f.write("\nR: %s : %s : * : * %d" % (robotAct, "sen"+str(source[0]), rew))
else:
f.write("\nR: %s : %s : * : * %d" % (robotAct, "min"+str(source[0])+"R"+str(source[1]), rew))
# Set up observations
observationString = "oStart,oGoal,oLosing"
observations = set(observationString.split(","))
actionString = "N,E,S,W"
actions = actionString.split(",")
listOfStates = []
# Structure for holding mone locations and map from integer to tuple coords and vice versa
listMines = []
minePosToInt = {}
mineIntToPos = {}
def randMines(rdim, rtotalMines):
print("Not a valid coordinate list. (Does the number of mines correspond to what was input above?)")
print("Generating the list randomly.")
listMinesR = random.sample(range(rdim*rdim), rtotalMines)
listMines=[]
counter=0
# Generate tuple coordinates of mines
for intID in listMinesR:
pos = getCoord(intID,dim)
listMines.append(pos)
mineIntToPos[counter]=pos
minePosToInt[pos]=counter
counter+=1
return listMines
# Input mine coords, if not given, generate randomly
def can_evaluate(string):
try:
eval(string)
return True
except:
return False
listMinesIn = input('Enter list of mine coordinates, as list of tuples, or some bogus to generate randomly')
listMinesCp = listMinesIn
if not can_evaluate(listMinesCp):
listMines = randMines(dim, totalMines)
else:
listMinesEv = eval(listMinesIn)
if not isinstance(listMinesEv,list) or not len(listMinesEv) == totalMines:
listMines = randMines(dim, totalMines)
else:
counter = 0
for mineCoord in listMinesEv:
if not isinstance(mineCoord,tuple) or not len(mineCoord) == 2:
listMines = randMines(dim, totalMines)
elif not (isinstance(mineCoord[0],int) and isinstance(mineCoord[1],int) and 0 <= mineCoord[0] and mineCoord[0] <= dim - 1 and mineCoord[1] <= dim - 1):
listMines = randMines(dim, totalMines)
else:
listMines.append(mineCoord)
newCoord = Coord(mineCoord[0],mineCoord[1],dim)
mineIntToPos[counter]= newCoord
minePosToInt[newCoord]=counter
counter += 1
# Generate possible info about state of Mines 0 = bad, 1=good, 2= was good, but now is mined
tuplesList = itertools.product([0, 1, 2], repeat=len(listMines))
sensetuples = list(itertools.product(['G', 'B'], repeat=len(listMines)))
restrictedTuples = [x for x in tuplesList if filterValidEntries(x)]
# sensetuples = [x for x in sensetuplesAll if x.count('G')==goodMines]
# restrictedTuples = tuples
# print(list(restrictedTuples))
# startState = State("dStart",None,"oStart")
goalState = State("goal",None,{"oGoal":1})
losingState = State("losing",None,{"oLosing":1})
# listOfStates = []
print("List of mines:"+str(listMines))
print("Indexes of mines:"+str(minePosToInt))
# Generate standard states
for robotPos in list(generateCoords(dim)):
formDiscObs = "o%sx%sx" % (str(robotPos.x), str(robotPos.y))
if not formDiscObs in observations:
observations.add(formDiscObs)
for mineStats in restrictedTuples:
mineString = "".join(map(str,list(mineStats)))
# obs is a map from strings of 'G' and 'B' to probabilities
obs = getObs(robotPos, mineString, listMines, minePosToInt,sensetuples)
for key in obs:
formDiscObs = "o%sx%sx%s" % (str(robotPos.x), str(robotPos.y), key)
if not formDiscObs in observations:
observations.add(formDiscObs)
if robotPos in listMines:
formDiscObs = "o%sx%sxMG" % (str(robotPos.x), str(robotPos.y))
if not formDiscObs in observations:
observations.add(formDiscObs)
formDiscObs = "o%sx%sxMM" % (str(robotPos.x), str(robotPos.y))
if not formDiscObs in observations:
observations.add(formDiscObs)
formDiscObs = "o%sx%sxMB" % (str(robotPos.x), str(robotPos.y))
if not formDiscObs in observations:
observations.add(formDiscObs)
state = State(robotPos,mineString,obs)
# print(str(state.robot))
listOfStates.append(state)
senseStates = []
for state in listOfStates:
newState = (state,"sensing")
senseStates.append(newState)
mineStates = []
#
# print(listOfStates)
# Transitions
listOfTransitions = []
#
for state in listOfStates:
for robotAct in actions:
newRCoord,bump = state.robot.getNext(robotAct)
obs = getObs(newRCoord, state.mines, listMines, minePosToInt,sensetuples)
newState = State(newRCoord,state.mines,obs)
if bump:
setRew = illegalMoveRew
else:
setRew = stepRew
listOfTransitions.append((str(state),robotAct,str(newState),setRew))
robotAct = "Sample"
# sampleActionTaken = False
coord = state.robot
isOnMine = False
for mine in listMines:
if mine == coord:
isOnMine = True
mineID = minePosToInt.get(coord)
mineVal = int(state.mines[mineID])
if mineVal == 0:
# Mining a bad mine
newState = (state,badMineRew)
listOfTransitions.append((state,robotAct,newState,0))
# sampleActionTaken = True
elif mineVal == 2:
# Mining an already mined state makes it bad, we receive zero reward
newMineString = ""
for i in range(len(state.mines)):
if i == mineID:
newMineString+="0"
else:
newMineString+=state.mines[i]
# nothing changes
# listOfTransitions.append((state,robotAct,state))
obs = getObs(state.robot, newMineString, listMines, minePosToInt,sensetuples)
newState = (State(state.robot,newMineString,obs),0)
listOfTransitions.append((state,robotAct,newState,0))
# sampleActionTaken = True
elif mineVal == 1:
# Mining a good state makes it mined but not bad
newMineString = ""
for i in range(len(state.mines)):
if i == mineID:
newMineString+="2"
else:
newMineString+=state.mines[i]
# if sumOfSampleMined(newMineString) == numOfGoodToSample:
# listOfTransitions.append((state,robotAct,goalState))
# sampleActionTaken = True
# elif sumOfSampleMined(newMineString) < numOfGoodToSample:
newState = (State(state.robot,newMineString,state.observation),goodMineRew)
listOfTransitions.append((state, robotAct, newState, 0))
if not newState in mineStates:
mineStates.append(newState)
if not isOnMine:
listOfTransitions.append((state, robotAct, losingState, illegalMoveRew))
# sampleActionTaken = True
# if not sampleActionTaken:
# listOfTransitions.append((state,robotAct,losingState))
robotAct = "Sense"
newState = (state,"sensing")
listOfTransitions.append((state, robotAct, newState, sense))
robotAct = "Finish"
newState = goalState
listOfTransitions.append((state, robotAct, newState, 0))
actions.append("Sample")
actions.append("Finish")
actions.append("Sense")
for state in senseStates:
for act in actions:
listOfTransitions.append((state,act,state[0], 0))
for state in mineStates:
for act in actions:
listOfTransitions.append((state,act,state[0], state[1]))
startStates = []
tuples = itertools.product([0, 1], repeat=len(listMines))
restrictedTuples = [x for x in tuples if sum(x) == goodMines]
# print(restrictedTuples)
for rtuple in restrictedTuples:
mineString = "".join(map(str,list(rtuple)))
obs = getObs(Coord(0,0,dim),mineString,listMines,minePosToInt,sensetuples)
newState = State(Coord(0,0,dim),mineString,obs)
startStates.append(newState)
# for action in actions:
# listOfTransitions.append((startState,action,newState))
# writeOldFormat()
# f = outFile
writeNewFormat()
totalStates = len(listOfStates)+len(senseStates)+len(mineStates)+2
totalObs = len(observations)
print("\n Total no of states: %d" % (totalStates))
print("\n Total no of Obs: %d" % (totalObs))
| 15,375 | 31.033333 | 163 | py |
ges-idr5 | ges-idr5-master/cross_validate_sun.py |
"""
Cross-validate ensemble model on the sun.
"""
import yaml
import logging
import os
import matplotlib.pyplot as plt
import numpy as np
from glob import glob
from code import (GESDatabase, plot, summary)
from astropy.table import Table
# Initialize logging.
logger = logging.getLogger("ges.idr5.qc")
logger.setLevel(logging.DEBUG)
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter(
"%(asctime)s [%(levelname)-8s] %(message)s"))
logger.addHandler(handler)
# Create a database object.
db_filename = "db.yaml"
with open(db_filename, "r") as fp:
credentials = yaml.load(fp)
database = GESDatabase(**credentials)
from code.model.ensemble import (NewEnsembleModel, SingleParameterEnsembleModel, MultipleParameterEnsembleModel, SingleParameterEnsembleModelWithCorrelations)
benchmarks = Table.read("fits-templates/benchmarks/GES_iDR5_FGKMCoolWarm_Benchmarks_AcceptedParams_01082016.fits")
benchmarks["E_FEH"] = 0.2
bms_sun_excluded = benchmarks[benchmarks["TEFF"] < 8000]
keep = np.array([e.strip() != "SUN" for e in bms_sun_excluded["GES_FLD"]])
assert len(keep) - sum(keep) > 0
bms_sun_excluded = bms_sun_excluded[keep]
cv_sun_model = NewEnsembleModel(database, 11, bms_sun_excluded)
parameters = {
"logg": 0.25**2,
"teff": 100**2,
"feh": 0.20**2
}
saved_fits = {}
posteriors = {}
for parameter, var_intrinsic in parameters.items():
if parameter in posteriors: continue
data, metadata = cv_sun_model._prepare_data(parameter)
init = {
"truths": data["mu_calibrator"],
"var_intrinsic": var_intrinsic**2,
"var_sys_estimator": var_intrinsic**2 * np.ones(data["N_estimators"]),
"alpha_sq": 1000 * np.ones(data["N_estimators"]),
"rho_estimators": np.zeros(metadata["N_pairwise_estimators"]),
"c0_estimators": np.zeros(data["N_estimators"])
}
op_params = cv_sun_model.optimize(data, init=init)
samples = cv_sun_model.sample(data, init=op_params, chains=6, iter=2000)
saved_fits[parameter] = samples
posteriors[parameter] = homogenise_survey_measurements(
"ssssssss-sssssss", 11, parameter, samples, database)
response = ""
for k, v in posteriors.items():
p = np.percentile(v, [16, 50, 84])
response += "{0}: {1:.2f} ({2:.2f}, {3:.2f})\n".format(
k, p[1], p[0] - p[1], p[2] - p[1])
print(response)
with open("cv_solar.log", "w") as fp:
fp.write(response)
raise a
# 18 Sco:
homogenise_survey_measurements(
"16153746-0822162", 11, "logg", samples, database)
homogenise_survey_measurements(
"14153967+1910567", 11, "logg", samples, database) | 2,635 | 22.327434 | 158 | py |
ges-idr5 | ges-idr5-master/setup.py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
import sys
from setuptools import setup, find_packages
from codecs import open
from os import path, system
from re import compile as re_compile
# For convenience.
if sys.argv[-1] == "publish":
system("python setup.py sdist upload")
sys.exit()
def read(filename):
kwds = {"encoding": "utf-8"} if sys.version_info[0] >= 3 else {}
with open(filename, **kwds) as fp:
contents = fp.read()
return contents
# Get the version information.
here = path.abspath(path.dirname(__file__))
vre = re_compile("__version__ = \"(.*?)\"")
version = vre.findall(read(path.join(here, "code", "__init__.py")))[0]
setup(
name="ges",
version=version,
author="Andrew R. Casey",
author_email="[email protected]",
description="Tools for homogenisation of the Gaia-ESO Survey (DR5)",
long_description=read(path.join(here, "README.md")),
license="MIT",
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.4",
"Topic :: Scientific/Engineering :: Astronomy",
"Topic :: Scientific/Engineering :: Physics"
],
packages=find_packages(exclude=["documents", "tests"]),
install_requires=["numpy", "scipy", "six", "pystan-2.12.0"],
#extras_require={
# "test": ["coverage"]
},
package_data={
"": ["LICENSE"],
},
include_package_data=True,
data_files=None,
)
| 1,640 | 28.836364 | 72 | py |
ges-idr5 | ges-idr5-master/sandbox_plot.py |
from code.model import ensemble, plot
raise a
for wg in (11, ):
for parameter in ("teff", "logg", "feh"):
model = ensemble.EnsembleModel.read(
"homogenisation-wg11-{}.model".format(parameter), None)
# Plot the distribution of biases for each node
fig = plot.biases(model)
fig.savefig("figures/wg{wg}/wg{wg}-biases-{param}.pdf".format(
wg=wg, param=parameter))
fig.savefig("figures/wg{wg}/wg{wg}-biases-{param}.png".format(
wg=wg, param=parameter))
# Plot the random uncertainty as a function of SNR
fig = plot.node_uncertainty_with_snr(model)
fig.savefig("figures/wg{wg}/wg{wg}-node-uncertainty-random-{param}.pdf"\
.format(wg=wg, param=parameter))
fig.savefig("figures/wg{wg}/wg{wg}-node-uncertainty-random-{param}.png"\
.format(wg=wg, param=parameter))
# Plot the relative systematic uncertainty as a function of parameters
fig = plot.node_relative_systematic_uncertainty(model)
fig.savefig("figures/wg{wg}/wg{wg}-node-uncertainty-sys-relative-{param}.pdf"\
.format(wg=wg, param=parameter))
fig.savefig("figures/wg{wg}/wg{wg}-node-uncertainty-sys-relative-{param}.png"\
.format(wg=wg, param=parameter))
# Plot the baseline systematic uncertainty
fig = plot.systematic_uncertainty(model)
fig.savefig("figures/wg{wg}/wg{wg}-node-uncertainty-sys-constant-{param}.pdf"\
.format(wg=wg, param=parameter))
fig.savefig("figures/wg{wg}/wg{wg}-node-uncertainty-sys-constant-{param}.png"\
.format(wg=wg, param=parameter))
# Plot node correlations.
fig = plot.node_correlations(model)
fig.savefig("figures/wg{wg}/wg{wg}-correlations-{param}.pdf"\
.format(wg=wg, param=parameter))
fig.savefig("figures/wg{wg}/wg{wg}-correlations-{param}.png"\
.format(wg=wg, param=parameter)) | 1,991 | 38.058824 | 86 | py |
ges-idr5 | ges-idr5-master/sandbox_plotting.py | #!/usr/bin/python
import yaml
import logging
import os
import matplotlib.pyplot as plt
from glob import glob
from code import (GESDatabase, plot, summary)
from astropy.table import Table
# Initialize logging.
logger = logging.getLogger("ges.idr5.qc")
logger.setLevel(logging.DEBUG)
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter(
"%(asctime)s [%(levelname)-8s] %(message)s"))
logger.addHandler(handler)
# Create a database object.
db_filename = "db.yaml"
with open(db_filename, "r") as fp:
credentials = yaml.load(fp)
database = GESDatabase(**credentials)
debug = False
wgs = (10, 11, 12, 13)
parameters = ("teff", "logg", "mh", "xi")
isochrones = glob("isochrones/*.dat")
# Create node-level folders.
nodes = database.retrieve_table("SELECT id, wg, TRIM(name) as name FROM nodes")
for node in nodes:
folder = "figures/qc/wg{}/{}".format(node["wg"], node["name"])
if not os.path.exists(folder):
os.mkdir(folder)
for node in nodes:
print("Plotting benchmark performance for {}".format(node))
try:
fig = plot.node_benchmark_performance(database, node["wg"], node["name"],
ylims=dict(teff=500, logg=0.5, mh=0.5))
except:
raise
if debug: raise
logger.exception(
"Could not create node_benchmark_performance figure for {}/{}:"\
.format(node["wg"], node["name"]))
continue
else:
if fig is not None:
basename = "figures/qc/wg{wg}/{name}/benchmarks-zoom".format(
wg=node["wg"], name=node["name"])
fig.savefig("{}.pdf".format(basename))
fig.savefig("{}.png".format(basename))
else:
logger.warn("No benchmark information for {}/{}".format(
node["wg"], node["name"]))
plt.close("all")
for node in nodes:
print("Plotting benchmark performance for {}".format(node))
try:
fig = plot.node_benchmark_performance(database, node["wg"], node["name"])
except:
if debug: raise
logger.exception(
"Could not create node_benchmark_performance figure for {}/{}:"\
.format(node["wg"], node["name"]))
continue
else:
if fig is not None:
basename = "figures/qc/wg{wg}/{name}/benchmarks".format(
wg=node["wg"], name=node["name"])
fig.savefig("{}.pdf".format(basename))
fig.savefig("{}.png".format(basename))
else:
logger.warn("No benchmark information for {}/{}".format(
node["wg"], node["name"]))
plt.close("all")
# Plot against clusters.
cluster_velocities = Table.read("fits-templates/oc_gc_radial_vel.dat", format="ascii")
for node in nodes:
for isochrone in isochrones:
cluster = isochrone.split("/")[-1].split("_")[0]
if cluster == "gamma2-Vel":
cluster = "gamma2_Vel"
print("Plotting cluster {} for node {}".format(cluster, node))
match = cluster_velocities["id"] == cluster
if not any(match):
vel_range = None
else:
vrad = cluster_velocities["rv"][match]
e_vrad = cluster_velocities["erv"][match]
vel_range = (vrad - 2*e_vrad, vrad + 2*e_vrad)
vel_range = (vel_range[0][0], vel_range[1][0])
try:
fig = plot.cluster(database, node["wg"], node["name"], cluster,
isochrone_filename=isochrone, vel_range=vel_range)
except:
if debug: raise
logger.exception(
"Could not create cluster {} figure for {}/{}:".format(
cluster, node["wg"], node["name"]))
else:
if fig is not None:
basename = "figures/qc/wg{}/{}/cluster-{}".format(
node["wg"], node["name"], cluster)
fig.savefig("{}.pdf".format(basename))
fig.savefig("{}.png".format(basename))
try:
fig = plot.cluster(database, node["wg"], node["name"], cluster,
isochrone_filename=isochrone, vel_range=vel_range,
limit_to_isochrone_range=True)
except:
if debug: raise
logger.exception(
"Could not create cluster {} figure for {}/{}:".format(
cluster, node["wg"], node["name"]))
else:
if fig is not None:
basename = "figures/qc/wg{}/{}/cluster-{}-limited".format(
node["wg"], node["name"], cluster)
fig.savefig("{}.pdf".format(basename))
fig.savefig("{}.png".format(basename))
# Plot against parameters for stars in this cluster.
try:
fig = plot.param_vs_param(database, node["wg"], node["name"],
cluster, "teff", vel_range=vel_range)
except:
if debug: raise
logger.exception(
"Could not create param_vs_param plot for cluster {} for {}/{}:"\
.format(cluster, node["wg"], node["name"]))
else:
if fig is not None:
basename = "figures/qc/wg{}/{}/cluster-{}-vs-teff".format(
node["wg"], node["name"], cluster)
fig.savefig("{}.pdf".format(basename))
fig.savefig("{}.png".format(basename))
try:
fig = plot.param_vs_param(database, node["wg"], node["name"],
cluster, "logg", vel_range=vel_range)
except:
if debug: raise
logger.exception(
"Could not create param_vs_param plot for cluster {} for {}/{}:"\
.format(cluster, node["wg"], node["name"]))
else:
if fig is not None:
basename = "figures/qc/wg{}/{}/cluster-{}-vs-logg".format(
node["wg"], node["name"], cluster)
fig.savefig("{}.pdf".format(basename))
fig.savefig("{}.png".format(basename))
plt.close("all")
# H-R diagrams by setup.
for node in nodes:
print("Plotting HR diagrams for node {}".format(node))
try:
fig = plot.hrd_by_setup(database, node["wg"], node["name"])
except:
if debug: raise
logger.exception(
"Could not create hrd_by_setup figure for {}/{}:"\
.format(node["wg"], node["name"]))
continue
else:
if fig is None: continue
basename = "figures/qc/wg{wg}/{name}/hrd-by-setup".format(
wg=node["wg"], name=node["name"])
fig.savefig("{}.pdf".format(basename))
fig.savefig("{}.png".format(basename))
plt.close("all")
# Node-to-node comparisons within a WG
for wg in wgs:
for parameter in parameters:
print("Plotting node-to-node comparison of {} for wg {}".format(
parameter, wg))
try:
fig = plot.compare_nodes_within_wg(database, wg, parameter)
except:
if debug: raise
logger.exception(
"Could not create compare_nodes_within_wg figure for {}/{}:"\
.format(wg, parameter))
continue
else:
fig.savefig("figures/qc/wg{}/{}.pdf".format(wg, parameter))
fig.savefig("figures/qc/wg{}/{}.png".format(wg, parameter))
plt.close("all")
"""
# Compare temperatures to photometric temperatures.
for node in nodes:
print("Plotting photometric temperature comparisons for {}".format(node))
try:
fig = plot.compare_to_photometric_teff(database, node["wg"], node["name"])
except:
if debug: raise
logger.exception(
"Could not create compare_to_photometric_teff figure for {}/{}"\
.format(node["wg"], node["name"]))
continue
else:
basename = "figures/qc/wg{}/{}/photometric-teff".format(
node["wg"], node["name"])
fig.savefig("{}.pdf".format(basename))
fig.savefig("{}.png".format(basename))
plt.close("all")
"""
# Compare to previous data release.
for node in nodes:
for parameter in parameters:
print("Comparing node {} parameters of {} to previous DR".format(node,
parameter))
try:
fig = plot.compare_to_previous_dr(
database, node["wg"], node["name"], parameter)
except:
if debug: raise
logger.exception(
"Could not create compare_to_previous_dr figure for {}/{}/{}:"\
.format(node["wg"], node["name"], parameter))
continue
else:
basename = "figures/qc/wg{}/{}/idr4-compare-{}".format(
node["wg"], node["name"], parameter)
fig.savefig("{}.pdf".format(basename))
fig.savefig("{}.png".format(basename))
plt.close("all")
# Total HRD.
for node in nodes:
print("Plotting total HRD for {}".format(node))
try:
fig = plot.hrd(database, node["wg"], node["name"])
except:
if debug: raise
logger.exception(
"Could not create hrd figure for {}/{}:".format(
node["wg"], node["name"]))
continue
else:
basename = "figures/qc/wg{wg}/{name}/hrd".format(
wg=node["wg"], name=node["name"])
fig.savefig("{}.pdf".format(basename))
fig.savefig("{}.png".format(basename))
plt.close("all")
# Plot results for the Sun.
for node in nodes:
print("PLotting solar results for {}".format(node))
try:
fig = plot.hrd(database, node["wg"], node["name"], mark=(5777, 4.4),
where="CNAME = 'ssssssss-sssssss'")
except:
if debug: raise
logger.exception(
"Could not create hrd (SUN) figure for {}/{}:".format(
node["wg"], node["name"]))
continue
else:
basename = "figures/qc/wg{wg}/{name}/solar".format(
wg=node["wg"], name=node["name"])
fig.savefig("{}.pdf".format(basename))
fig.savefig("{}.png".format(basename))
plt.close("all")
# Plot distributions of stellar parameters.
for node in nodes:
print("Plotting stellar parameter distributions for {}".format(node))
try:
fig = plot.stellar_parameter_histograms(database, node["wg"], node["name"])
except:
if debug: raise
logger.exception(
"Could not create stellar_parameter_histograms for {}/{}:".format(
node["wg"], node["name"]))
continue
else:
basename = "figures/qc/wg{}/{}/parameter-histogram".format(
node["wg"], node["name"])
fig.savefig("{}.pdf".format(basename))
fig.savefig("{}.png".format(basename))
plt.close("all")
# Plot distributions of errors in stellar parameters.
for node in nodes:
print("Plotting stellar parameter error distributions for {}".format(node))
try:
fig = plot.stellar_parameter_error_histograms(database, node["wg"], node["name"])
except:
if debug: raise
logger.exception(
"Could not create stellar_parameter_error_histograms for {}/{}:".format(
node["wg"], node["name"]))
continue
else:
basename = "figures/qc/wg{}/{}/parameter-error-histogram".format(
node["wg"], node["name"])
fig.savefig("{}.pdf".format(basename))
fig.savefig("{}.png".format(basename))
plt.close("all")
# Create summary tables.
parameter_summary = summary.stellar_parameter_summary(database)
parameter_summary.write("figures/qc/parameter-summary.txt",
format="ascii")
parameter_ranges = summary.stellar_parameter_range(database)
parameter_ranges.write("figures/qc/parameter-range-summary.txt",
format="ascii")
for node in nodes:
tech = summary.tech_flags(database, node["wg"], node["name"])
if tech is not None:
tech.write(
"figures/tech-summary-{}-{}.txt".format(node["wg"], node["name"]),
format="ascii")
| 12,041 | 25.819599 | 89 | py |
ges-idr5 | ges-idr5-master/sandbox_ensemble.py |
"""
Sandbox for ensemble model
"""
import yaml
import logging
import os
import matplotlib.pyplot as plt
import numpy as np
from glob import glob
from code import (GESDatabase, plot, summary)
from astropy.table import Table
# Initialize logging.
logger = logging.getLogger("ges.idr5.qc")
logger.setLevel(logging.DEBUG)
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter(
"%(asctime)s [%(levelname)-8s] %(message)s"))
logger.addHandler(handler)
# Create a database object.
db_filename = "db.yaml"
with open(db_filename, "r") as fp:
credentials = yaml.load(fp)
database = GESDatabase(**credentials)
from code.model.ensemble import SingleParameterEnsembleModel
benchmarks = Table.read("fits-templates/benchmarks/GES_iDR5_FGKMCoolWarm_Benchmarks_AcceptedParams_01082016.fits")
"""
model = SingleParameterEnsembleModel(database, 11, benchmarks[benchmarks["TEFF"] < 8000])
data = model._prepare_data("teff", default_calibrator_sigma=150)
op_params = model.optimize(data, init={
"intrinsic_var": 100**2,
"estimator_sys_var": np.ones(data["N_estimators"]) * 100**2,
"estimator_rand_var": np.ones(data["N_estimators"]) * 100**2,
"truths": np.array(data["calibrator_mu"]),
"rho_parameters": np.zeros(10)
}, overwrite=True)
# Drop the optimized covariance matrix.
del op_params["covariance"]
samples = model.sample(data, init=op_params, overwrite=True)
fig = samples.plot(
pars=("intrinsic_sigma", "estimator_rand_sigma", "estimator_sys_sigma"))
"""
benchmarks["E_FEH"] = 0.1
"""
model = MultipleParameterEnsembleModel(database, 11,
benchmarks[(8000 > benchmarks["TEFF"]) * (benchmarks["TEFF"] > 4000)])
data = model._prepare_data(("teff", "logg"), (150, 0.1, ))
intrinsic_sigma = np.array([150, 0.1])
K = data["N_parameters"] * data["N_estimators"]
init = {
#"intrinsic_var": intrinsic_var,
"estimator_sys_sigma": np.tile(intrinsic_sigma, data["N_estimators"]).reshape(data["N_estimators"], -1),
"estimator_rand_sigma": np.tile(intrinsic_sigma, data["N_estimators"]).reshape(data["N_estimators"], -1),
"truths": data["calibrator_mu"].reshape(-1, data["N_calibrators"]).T,
"rho_estimators": np.zeros(data["__N_pairwise_estimators"]),
"rho_parameters": np.zeros(data["__N_pairwise_parameters"])
#"Sigma": 100 * np.array([np.eye(data["N_parameters"] * data["N_estimators"]) for i in range(data["N_calibrators"])])
}
#op_params = model.optimize(data, iter=10000, init=init)
bar = model.sample(data, init=lambda x=0: init, iter=10000, chains=1, validate=False)
"""
"""
model = SingleParameterEnsembleModelWithCorrelations(database, 11, benchmarks[benchmarks["TEFF"] < 8000])
data = model._prepare_data("teff", default_calibrator_sigma=150)
data["additive_sigma"] = data["additive_var"]**0.5
intrinsic_sigma = 150.0
init = {
"estimator_sys_sigma": np.tile(intrinsic_sigma, data["N_estimators"]),
"estimator_rand_sigma": np.tile(intrinsic_sigma, data["N_estimators"]),
"truths": data["calibrator_mu"],
"rho_estimators": np.zeros(data["__N_pairwise_estimators"]),
"Sigma": intrinsic_sigma**2 * np.array([np.eye(data["N_estimators"]) for _ in range(data["N_calibrators"])])
}
op_params = model.optimize(data, init=init)
"""
model = SingleParameterEnsembleModel(database, 10, "teff", benchmarks[benchmarks["TEFF"] < 8000])
data, metadata = model._prepare_data()
var_intrinsic = 100**2
init = {
"truths": data["mu_calibrator"],
"var_intrinsic": var_intrinsic**2,
"var_sys_estimator": var_intrinsic**2 * np.ones(data["N_estimators"]),
"alpha_sq": 1000 * np.ones(data["N_estimators"]),
"rho_estimators": np.zeros(metadata["N_pairwise_estimators"]),
"c0_estimators": np.zeros(data["N_estimators"])
}
op_params = model.optimize(data, init=init)
fit = model.sample(data, init=op_params, iter=10)
raise a
model.homog
raise a
def homogenise_survey_measurements(cname, wg, parameter, ensemble_model_samples,
database):
"""
Produce an unbiased estimate of an astrophyiscal parameter for a given
survey object.
:param cname:
The CNAME (unique star identifier) of an object.
:param wg:
The working group to consider measurements from.
:param parameter:
The name of the parameter to estimate.
:param ensemble_model_samples:
Samples from the ensemble model to use when estimating the astrophysical
parameter.
"""
# Get the data for this object.
estimates = database.retrieve_table(
""" SELECT DISTINCT ON (filename, node_id)
cname, node_id, snr, {parameter}
FROM results, nodes
WHERE nodes.wg = {wg}
AND nodes.id = results.node_id
AND cname = '{cname}'
AND {parameter} <> 'NaN'
""".format(wg=wg, cname=cname, parameter=parameter))
assert estimates is not None
# Extract N samples for all the parameters.
# For each sample, calculate:
# 1. The total variance (systematic**2 + (alpha/SNR)**2)
# 2. The weighted mean from all observations by that nodes.
# --> check that this follows 1/sqrt(N)
# 3. Construct a covariance matrix using the weighted means, uncertainties
# and the correlation coefficients
# 4. Draw from a Gaussian using the weighted means and your new Cov matrix
# 5. Record the draw.
pars = [
"var_intrinsic",
"var_sys_estimator",
"alpha_sq",
"rho_estimators",
"c0_estimators"
]
samples = ensemble_model_samples.extract(pars=pars)
unique_node_ids = ensemble_model_samples.data["node_ids"]
K = len(samples["var_intrinsic"])
estimates = estimates.group_by("node_id")
# 1. Calculate the total variance in each measurement.
var_total = np.zeros((len(estimates), K))
for j in range(len(estimates)):
# Get the node index.
k = np.where(estimates["node_id"][j] == unique_node_ids)[0][0]
var_total[j, :] \
= samples["var_sys_estimator"][:, k] \
+ samples["alpha_sq"][:, k]/estimates["snr"][j]
# 2. Calculate the weighted mean from each node.
M = len(set(estimates["node_id"]))
weighted_node_mu = np.zeros((M, K))
weighted_node_variance = np.zeros((M, K))
node_ids = np.zeros(M)
for i, si in enumerate(estimates.groups.indices[:-1]):
ei = estimates.groups.indices[i + 1]
mu = (estimates[parameter][si:ei]).reshape(-1, 1) # Biases
variance = var_total[si:ei]
weights = 1.0/variance
normalized_weights = weights/np.sum(weights, axis=0)
weighted_mu = np.sum(normalized_weights * mu, axis=0)
weighted_variance = 1.0/np.sum(weights, axis=0)
weighted_node_mu[i, :] = weighted_mu + samples["c0_estimators"][:, i]
weighted_node_variance[i, :] = weighted_variance
node_ids[i] = estimates["node_id"][si]
posterior = np.nan * np.ones(K)
for i in range(K):
Sigma = np.eye(M) * weighted_node_variance[:, i]
a = 0
for j in range(M):
for k in range(j + 1, M):
term = samples["rho_estimators"][i, a] * Sigma[j, j]**0.5 * Sigma[k, k]**0.5
Sigma[j, k] = term
Sigma[k, j] = term
a += 1
W = np.ones((M, 1))
Cinv = np.linalg.inv(Sigma)
var_min = 1.0/np.dot(np.dot(W.T, Cinv), W)
posterior[i] = var_min * np.dot(np.dot(W.T, Cinv), weighted_node_mu[:, i])
return posterior
sun = homogenise_survey_measurements(
"ssssssss-sssssss", 11, "logg", samples, database)
# 18 Sco:
homogenise_survey_measurements(
"16153746-0822162", 11, "logg", samples, database)
homogenise_survey_measurements(
"14153967+1910567", 11, "logg", samples, database) | 7,903 | 27.534296 | 121 | py |
ges-idr5 | ges-idr5-master/sandbox_plot_wg_performance.py |
import yaml
from glob import glob
from code import (GESDatabase, plot, summary)
from astropy.table import Table
# Create a database object.
db_filename = "db.yaml"
with open(db_filename, "r") as fp:
credentials = yaml.load(fp)
database = GESDatabase(**credentials)
# Clean up bits and pieces...
savefig_kwds = dict(dpi=300, bbox_inches="tight")
benchmarks = Table.read("fits-templates/benchmarks/GES_iDR5_FGKM_Benchmarks_ARC_29092016.fits")
benchmarks = benchmarks[benchmarks["TEFF"] < 8000]
isochrones = glob("isochrones/*.dat")
debug = True
for wg in (11, ):
# Plot clusters.
# Plot against clusters.
cluster_velocities = Table.read("fits-templates/oc_gc_radial_vel.dat", format="ascii")
for isochrone in isochrones:
cluster = isochrone.split("/")[-1].split("_")[0]
if cluster == "gamma2-Vel":
cluster = "gamma2_Vel"
match = cluster_velocities["id"] == cluster
if not any(match):
vel_range = None
else:
vrad = cluster_velocities["rv"][match]
e_vrad = cluster_velocities["erv"][match]
vel_range = (vrad - 2*e_vrad, vrad + 2*e_vrad)
vel_range = (vel_range[0][0], vel_range[1][0])
try:
fig = plot.cluster(database, cluster, wg,
isochrone_filename=isochrone, vel_range=vel_range)
except:
if debug: raise
logger.exception(
"Could not create cluster {} figure for {}:".format(cluster, wg))
else:
if fig is not None:
basename = "figures/wg{wg}/wg{wg}-cluster-{cluster}".format(
wg=wg, cluster=cluster)
fig.savefig("{}.pdf".format(basename), **savefig_kwds)
fig.savefig("{}.png".format(basename), **savefig_kwds)
try:
fig = plot.cluster(database, cluster, wg,
isochrone_filename=isochrone, vel_range=vel_range,
limit_to_isochrone_range=True)
except:
if debug: raise
logger.exception(
"Could not create cluster {} figure for {}:".format(cluster, wg))
else:
if fig is not None:
basename = "figures/wg{wg}/wg{wg}-cluster-{cluster}-limited"\
.format(wg=wg, cluster=cluster)
fig.savefig("{}.pdf".format(basename), **savefig_kwds)
fig.savefig("{}.png".format(basename), **savefig_kwds)
raise a
# Plot benchmarks first.
fig = plot.wg_benchmark_performance(database, wg, benchmarks,
show_recommended=True, ylims=dict(TEFF=1000, LOGG=1, FEH=1))
fig.savefig("figures/wg{wg}/wg{wg}-benchmarks-zoom.pdf".format(wg=wg), **savefig_kwds)
fig.savefig("figures/wg{wg}/wg{wg}-benchmarks-zoom.png".format(wg=wg), **savefig_kwds)
fig = plot.wg_benchmark_performance(
database, wg, benchmarks, show_recommended=True)
fig.savefig("figures/wg{wg}/wg{wg}-benchmarks.pdf".format(wg=wg), **savefig_kwds)
fig.savefig("figures/wg{wg}/wg{wg}-benchmarks.png".format(wg=wg), **savefig_kwds)
| 3,134 | 30.35 | 95 | py |
ges-idr5 | ges-idr5-master/sandbox_plot_flags.py | #!/usr/bin/python
import yaml
from code import (GESDatabase, plot)
# Create a database object.
db_filename = "db.yaml"
with open(db_filename, "r") as fp:
credentials = yaml.load(fp)
database = GESDatabase(**credentials)
for wg in (10, 11, 12, 13):
fig, meta = plot.flags.heatmap(database, wg,
show_multiple_flags_per_node=True, group_by="node",
use_cuthill_mckee=True)
fig.savefig(
"figures/wg{}/flags-heatmap-node.pdf".format(wg), dpi=300)
fig, meta = plot.flags.heatmap(database, wg,
show_multiple_flags_per_node=True, group_by="node",
use_cuthill_mckee=False)
fig.savefig(
"figures/wg{}/flags-heatmap-node-ordered.pdf".format(wg), dpi=300)
print("Done {}".format(wg)) | 751 | 24.931034 | 74 | py |
ges-idr5 | ges-idr5-master/sandbox_mean_ensemble.py |
"""
Sandbox for ensemble model
"""
import yaml
import logging
import os
import matplotlib.pyplot as plt
import numpy as np
from glob import glob
from code import (GESDatabase, plot, summary)
from astropy.table import Table
# Initialize logging.
logger = logging.getLogger("ges.idr5.qc")
logger.setLevel(logging.DEBUG)
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter(
"%(asctime)s [%(levelname)-8s] %(message)s"))
logger.addHandler(handler)
# Create a database object.
db_filename = "db.yaml"
with open(db_filename, "r") as fp:
credentials = yaml.load(fp)
database = GESDatabase(**credentials)
from code.model.ensemble import MeanEnsembleModel
benchmarks = Table.read("fits-templates/benchmarks/GES_iDR5_FGKMCoolWarm_Benchmarks_AcceptedParams_01082016.fits")
benchmarks["E_FEH"] = 0.1
model = MeanEnsembleModel(database, 11, "teff", benchmarks[benchmarks["TEFF"] < 8000])
data, metadata = model._prepare_data()
init = {
"truths": data["mu_calibrator"],
"var_sys_estimator": 100**2 * np.ones(data["N_nodes"]),
"log10_alpha_sq": 6 * np.ones(data["N_nodes"]),
"rho_estimators": np.zeros(metadata["N_pairwise_nodes"]),
"bias": np.zeros(data["N_nodes"])
}
raise a
op_params = model.optimize(data, init=init, iter=100000)
raise a
fitted = model.sample(data, init=[op_params]*4, iter=10000, chains=4)
raise a
model.homog
raise a
| 1,405 | 18.260274 | 114 | py |
ges-idr5 | ges-idr5-master/scripts/plot_korn_cluster.py |
import yaml
import logging
import os
import matplotlib.pyplot as plt
from glob import glob
from code import (GESDatabase, plot, summary)
from astropy.table import Table
# Initialize logging.
logger = logging.getLogger("ges.idr5.qc")
logger.setLevel(logging.DEBUG)
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter(
"%(asctime)s [%(levelname)-8s] %(message)s"))
logger.addHandler(handler)
# Create a database object.
db_filename = "db.yaml"
with open(db_filename, "r") as fp:
credentials = yaml.load(fp)
database = GESDatabase(**credentials)
debug = False
wgs = (10, 11, 12, 13)
parameters = ("teff", "logg", "mh", "xi")
isochrones = glob("isochrones/*.dat")
isochrones = ["isochrones/M67_Parsec_4.0Gyr_Z0.017.dat"]
# Plot against clusters.
cluster_velocities = Table.read("fits-templates/oc_gc_radial_vel.dat", format="ascii")
nodes = database.retrieve_table("SELECT id, wg, TRIM(name) as name FROM nodes WHERE wg = 11")
fig, axes = plt.subplots(2, 3)
axes = np.array(axes).flatten()
index = 0
node_names = []
for node in nodes:
try:
ax = axes[index]
except IndexError:
print("Whoa we ran outa axes dude!")
break
for isochrone in isochrones:
cluster = isochrone.split("/")[-1].split("_")[0]
if cluster == "gamma2-Vel":
cluster = "gamma2_Vel"
print("Plotting cluster {} for node {}".format(cluster, node))
match = cluster_velocities["id"] == cluster
if not any(match):
vel_range = None
else:
vrad = cluster_velocities["rv"][match]
e_vrad = cluster_velocities["erv"][match]
vel_range = (vrad - 2*e_vrad, vrad + 2*e_vrad)
vel_range = (vel_range[0][0], vel_range[1][0])
try:
r = plot.cluster(database, node["wg"], node["name"], cluster,
isochrone_filename=isochrone, vel_range=vel_range, ax=ax,
no_tech_flags=True, limit_to_isochrone_range=True,
show_legend=False, vmin=-0.3, vmax=0.15)
except:
if debug: raise
logger.exception(
"Could not create cluster {} figure for {}/{}:".format(
cluster, node["wg"], node["name"]))
else:
if r is not None:
node_names.append(node["name"])
index += 1
continue
xticks = [3000, 4000, 5000, 6000, 7000]
for ax, name in zip(axes, node_names):
ax.text(0.05, 0.90, name, color="k", transform=ax.transAxes)
if not ax.is_first_col():
ax.set_ylabel("")
ax.set_yticklabels([])
ax.set_xticks(xticks)
if ax.is_last_row():
ax.set_xticks(xticks)
ax.set_xticklabels(xticks)
ax.set_xlabel(r"$T_{\rm eff}$ $[{\rm K}]$")
fig.tight_layout()
cbar = plt.colorbar(r, ax=list(axes))
cbar.set_label(r"[Fe/H]")
raise a | 2,899 | 25.363636 | 93 | py |
ges-idr5 | ges-idr5-master/scripts/propagate_flags.py | #!/usr/bin/python
"""
Propagate relevant flag information from one node to others.
"""
import logging
import yaml
from code import GESDatabase
# Connect to database.
db_filename = "db.yaml"
with open(db_filename, "r") as fp:
credentials = yaml.load(fp)
# Create a database object.
database = GESDatabase(**credentials)
logger = logging.getLogger("ges")
with open("flags.yaml", "r") as fp:
qc_flags = yaml.load(fp)
raise a
# Clear any previous propagations before starting.
logger.info("Clearing previous propagations and setting all to have passed_quality_control = True")
database.update(
""" UPDATE results
SET propagated_tech_from_result_id = null,
propagated_peculi_from_result_id = null,
propagated_remark_from_result_id = null,
propagated_tech = '',
propagated_peculi = '',
propagated_remark = '',
passed_quality_control = true
WHERE passed_quality_control = false;""")
database.connection.commit()
# Identify spurious spectra and mark them as such.
N_peculiar_spectra = {}
peculiar_spectra_kwds = dict(
and_or="and", sigma_discrepant=3, teff_discrepant=250, logg_discrepant=0.25)
for wg in (10, 11, 12, 13):
logger.info("Querying for peculiar spectra in WG{}".format(wg))
kwds = dict(wg=wg)
kwds.update(peculiar_spectra_kwds)
peculiar_spectrum_query = """
with t4 as (
select id, cname, filename, avg_filename_teff, avg_cname_teff, stddev_cname_teff, abs((avg_cname_teff - avg_filename_teff)/(0.00001 + stddev_cname_teff)) as abs_sigma_teff_discrepant, avg_filename_logg, avg_cname_logg, stddev_cname_logg, abs((avg_cname_logg - avg_filename_logg)/(0.00001 + stddev_cname_logg)) as abs_sigma_logg_discrepant FROM (with ar as (select distinct on (filename) id, cname, trim(filename) as filename, avg(teff) over w as avg_filename_teff, avg(logg) over w as avg_filename_logg from (with n as (select id from nodes where wg = {wg}) select distinct on (r.filename, r.node_id) r.id, r.cname, trim(r.filename) as filename, r.node_id, teff, logg from n, results as r where r.node_id = n.id and teff <> 'NaN' or logg <> 'NaN') t window w as (partition by filename)) select ar.id, ar.cname, ar.filename, ar.avg_filename_teff, avg(avg_filename_teff) over w2 as avg_cname_teff, stddev(avg_filename_teff) over w2 as stddev_cname_teff, ar.avg_filename_logg, avg(avg_filename_logg) over w2 as avg_cname_logg, stddev(avg_filename_logg) over w2 as stddev_cname_logg FROM ar window w2 as (partition by cname)) t3)
select * from t4 where (t4.abs_sigma_teff_discrepant > {sigma_discrepant} and t4.abs_sigma_teff_discrepant <> 'NaN' and abs(t4.avg_cname_teff - avg_filename_teff) >= {teff_discrepant}) {and_or} (t4.abs_sigma_logg_discrepant > {sigma_discrepant} and abs(t4.avg_cname_logg - t4.avg_filename_logg) >= {logg_discrepant} and t4.abs_sigma_logg_discrepant <> 'NaN') order by cname asc;""".format(
**kwds)
peculiar_spectra = database.retrieve_table(peculiar_spectrum_query)
N_peculiar_spectra[wg] = len(peculiar_spectra)
for row in peculiar_spectra:
filenames = row["filename"].strip().split("|")
logger.info("Propagating {}/{}/{}".format(
row["id"], row["cname"], row["filename"]))
n = 0
for filename in filenames:
n += database.update(
""" UPDATE results
SET propagated_tech = '10106-{}-00-00-A',
propagated_tech_from_result_id = '{}',
passed_quality_control = false
WHERE filename LIKE '%{}%';""".format(
wg, int(row["id"]), filename))
if n > 0:
logger.info("--> affected {} results".format(n))
database.connection.commit()
# PROPAGATE BY SPECTRUM
def propagate_by_spectrum(flag, constraint=None, commit=False):
constraint_str = "" if constraint is None else " AND {}".format(constraint)
affected = database.retrieve_table(
""" SELECT id, TRIM(filename) AS filename, TRIM(tech) as tech
FROM results
WHERE tech LIKE '%{}-%' {}
""".format(flag, constraint_str))
if affected is None:
return 0
N = 0
for row in affected:
# Each row can have multiple TECH flags, so first identify the TECH flag
# that PostgreSQL matched on.
tech_flags = row["tech"].strip().split("|")
for tech_flag in tech_flags:
if "{}-".format(flag) in tech_flag:
matched_tech_flag = tech_flag
break
else:
raise ValueError(
"cannot identify tech flag {} from the SQL match: {}".format(
flag, row["tech"].strip()))
# Update other results using the same filename(s).
filenames = row["filename"].strip().split("|")
for j, filename in enumerate(filenames):
if not filename: continue
n = database.update(
""" UPDATE results
SET propagated_tech_from_result_id = '{}',
propagated_tech = '{}',
passed_quality_control = false
WHERE filename LIKE '%{}%'
AND passed_quality_control = true
""".format(
int(row["id"]), matched_tech_flag, filename))
N += n
if n > 0:
logger.info("Propagated ({}/{}/{}) to {} other entries".format(
row["id"], matched_tech_flag, filename, n))
if commit:
database.connection.commit()
return N
# PROPAGATE BY SPECTRUM
N_propagations = {}
for key, value in qc_flags["propagate_flags_by_spectrum"].items():
if key == "no_constraint":
for flag in value:
N_propagations.setdefault(flag, 0)
N_propagations[flag] += propagate_by_spectrum(flag, None)
else:
flag = key
constraint = value.get("constraint", None)
N_propagations.setdefault(flag, 0)
N_propagations[flag] += propagate_by_spectrum(flag, constraint)
database.connection.commit()
# PROPAGATE BY CNAME
def propagate_by_cname(flag, constraint=None, commit=False):
constraint_str = "" if constraint is None else " AND {}".format(constraint)
affected = database.retrieve_table(
""" SELECT id, cname, TRIM(tech) as tech
FROM results
WHERE tech LIKE '%{}-%' {}
""".format(flag, constraint_str))
if affected is None:
return 0
N = 0
for row in affected:
# Each row can have multiple TECH flags, so first identify the TECH flag
# that PostgreSQL matched on.
tech_flags = row["tech"].strip().split("|")
for tech_flag in tech_flags:
if "{}-".format(flag) in tech_flag:
matched_tech_flag = tech_flag
break
else:
raise ValueError(
"cannot identify tech flag {} from the SQL match: {}".format(
flag, row["tech"].strip()))
# Update other results matching this CNAME.
n = database.update(
""" UPDATE results
SET propagated_tech_from_result_id = '{}',
propagated_tech = '{}',
passed_quality_control = false
WHERE cname = '{}'
AND passed_quality_control = true;
""".format(int(row["id"]), matched_tech_flag, row["cname"]))
N += n
if n > 0:
logger.info("Propagated ({}/{}/{}) to {} other entries".format(
row["id"], matched_tech_flag, row["cname"], n))
if commit:
database.connection.commit()
return N
# PROPAGATE BY CNAME
for key, value in qc_flags["propagate_flags_by_cname"].items():
if key == "no_constraint":
for flag in value:
N_propagations.setdefault(flag, 0)
N_propagations[flag] += propagate_by_cname(flag, None)
else:
flag = key
constraint = value.get("constraint", None)
N_propagations.setdefault(flag, 0)
N_propagations[flag] += propagate_by_cname(flag, constraint)
database.connection.commit()
# NODE-SPECIFIC FLAGS
N_marked_as_poor_quality = {}
for key, value in qc_flags["node_specific_flags"].items():
if key == "no_constraint":
for flag in value:
N_marked_as_poor_quality.setdefault(flag, 0)
N = database.update(
""" UPDATE results
SET passed_quality_control = false
WHERE tech LIKE '%{}-%'
AND passed_quality_control = true;
""".format(flag))
N_marked_as_poor_quality[flag] += N
if N > 0:
logger.info(
"Marked {} results as poor quality due to matching flag {}"\
.format(N, flag))
else:
flag = key
constraint = value.get("constraint", None)
constraint_str = "" if constraint is None else " AND {}".format(constraint)
N_marked_as_poor_quality.setdefault(flag, 0)
N = database.update(
""" UPDATE results
SET passed_quality_control = false
WHERE tech LIKE '%{}-%'
AND passed_quality_control = true
{};
""".format(flag, constraint_str))
N_marked_as_poor_quality[flag] += N
if N > 0:
logger.info(
"Marked {} results as poor quality due to matching flag {} "\
"and constraint {}".format(N, flag, constraint))
# Flag *CLEARLY* erroneous problems from the WG11/IACAIP node. Some of these will
node_id = database.retrieve_node_id(11, "IACAIP")
N = database.update(
""" UPDATE results
SET passed_quality_control = false,
propagated_tech = '10308-11-00-00-A'
WHERE results.node_id = '{}'
AND passed_quality_control
AND (
(teff <> 'NaN' AND (teff <= 3000 OR teff >= 8000))
OR
(logg <> 'NaN' AND (logg <= 0 OR logg >= 5))
OR
(feh <> 'NaN' AND (feh <= -3 OR feh >= 1))
);
""".format(node_id))
logger.info("Marked {} WG11/IACAIP results as out-of-grid.")
database.connection.commit() | 10,591 | 35.524138 | 1,133 | py |
ges-idr5 | ges-idr5-master/scripts/homogenise_wg11.py |
"""
Homogenisation models.
"""
import yaml
import logging
import numpy as np
from code import GESDatabase
from code.model.ensemble import SingleParameterEnsembleModel
from astropy.table import Table
# Initialize logging.
logger = logging.getLogger("ges")
# Create a database object.
db_filename = "db.yaml"
with open(db_filename, "r") as fp:
credentials = yaml.load(fp)
database = GESDatabase(**credentials)
# Load the "benchmarks"
# Only use "benchmarks" with TEFF < 8000 K
benchmarks = Table.read("fits-templates/benchmarks/GES_iDR5_FGKM_Benchmarks_ARC_29092016.fits")
benchmarks = benchmarks[benchmarks["TEFF"] < 8000]
model_path_format = "homogenisation-wg{wg}-{parameter}.model"
wg = 11
parameters = ("teff", "logg")
sample_kwds = dict(chains=4, iter=2000)
scales = dict(teff=250, logg=0.25)
models = {}
for parameter in parameters:
model = SingleParameterEnsembleModel(database, wg, parameter, benchmarks)
scale = scales[parameter]
data, metadata = model._prepare_data(default_sigma_calibrator=scale)
init = {
"truths": data["mu_calibrator"],
"var_intrinsic": scale**2,
"var_sys_estimator": scale**2 * np.ones(data["N_estimators"]),
"alpha_sq": 1000 * np.ones(data["N_estimators"]),
"rho_estimators": np.zeros(metadata["N_pairwise_estimators"]),
"c0_estimators": np.zeros(data["N_estimators"])
}
op_params = model.optimize(data, init=init)
fit = model.sample(data, init=op_params, **sample_kwds)
model.write(
"homogenisation-wg{}-{}.model".format(wg, parameter), overwrite=True)
# Homogenise this parameter for all stars.
model.homogenise_all_stars(update_database=True)
models[parameter] = model
# TODO:
# - VROT
# - VEL
# - FLAGS
# - Need to remove IACAIP's shit on the edges (e.g., 8000) -- those fuckers
# WTF: 12581939-6453533
# WTF: 20184620-1501141
# WTF: 22002658-5507466
# Some notes:
# - Nice is the only WG11 node that provides [alpha/Fe].
# - If a node provides `mh` or `feh`, we can treat them as the same (see `setup_db.py`)
# - Five nodes provide estimates of `xi`: Lumba, CAUP, Vilnius, EPINARBO, UCM
| 2,151 | 25.567901 | 95 | py |
ges-idr5 | ges-idr5-master/scripts/homogenise_fast.py |
"""
Homogenisation models.
"""
import yaml
import logging
import numpy as np
import os
from astropy.table import Table
from collections import OrderedDict
from code import GESDatabase
from code.model.ensemble import EnsembleModel, MedianModel
# Initialize logging.
logger = logging.getLogger("ges")
# Create a database object.
db_filename = "db.yaml"
with open(db_filename, "r") as fp:
credentials = yaml.load(fp)
database = GESDatabase(**credentials)
# Load the "benchmarks"
# Only use "benchmarks" with TEFF < 8000 K
benchmarks = Table.read("fits-templates/benchmarks/GES_iDR5_FGKM_Benchmarks_ARC_29092016.fits")
benchmarks = benchmarks[benchmarks["TEFF"] < 8000]
benchmarks["E_FEH"] = 0.10
model_paths = "homogenisation-wg{wg}-{parameter}.model"
wgs = (11, )
parameter_scales = OrderedDict([
("teff", 250),
("logg", 0.25),
("feh", 0.25)
])
sample_kwds = dict(chains=4, iter=10000, thin=10)
finite = np.isfinite(benchmarks["TEFF"] * benchmarks["LOGG"] * benchmarks["FEH"])
benchmarks = benchmarks[finite]
model = MedianModel(database, 11, "xi")
model.homogenise_all_stars(update_database=True, default_sigma=0.5)
model = MedianModel(database, 11, "alpha_fe")
model.homogenise_all_stars(update_database=True, default_sigma=0.10)
models = {}
for wg in wgs:
models[wg] = {}
for parameter, scale in parameter_scales.items():
model_path = model_paths.format(wg=wg, parameter=parameter)
if os.path.exists(model_path):
model = EnsembleModel.read(model_path, database)
else:
model = EnsembleModel(database, wg, parameter, benchmarks)
data, metadata = model._prepare_data(
default_sigma_calibrator=scale)
init = {
"truths": data["mu_calibrator"],
"biases": np.zeros(data["N"]),
"missing_estimates": np.random.uniform(
data["lower_bound"], data["upper_bound"], size=data["TM"]),
"alpha_sq": np.mean([data["lower_alpha_sq"], data["upper_alpha_sq"]]) * np.ones(data["N"]),
"vs_c": scale**2 * np.ones(data["N"]),
"vs_a": 1e-2 + np.zeros((data["N"], data["S"])).T,
"vs_b": 1e-2 + np.ones((data["N"], data["S"])).T,
"L_corr": np.eye(data["N"])
}
print("Number of model parameters: {}".format(
sum([np.array(v).size for v in init.values()])))
op_params = model.optimize(data, init=init, iter=100000)
fit = model.sample(data, init=op_params, **sample_kwds)
model.write(model_path,
overwrite=True, __ignore_model_pars=("Sigma", "full_rank_estimates"))
model.homogenise_all_stars(update_database=True)
# TODO:
# - VROT
# - VEL
# - FLAGS
# - XI
# Some notes:
# - If a node provides `mh` or `feh`, we can treat them as the same (see `scripts/setup_db.py`)
| 2,926 | 27.980198 | 107 | py |
ges-idr5 | ges-idr5-master/scripts/setup_db.py | #!/usr/bin/python
"""
Create database for the fifth internal data release of the Gaia-ESO Survey.
"""
import logging
import numpy as np
import psycopg2 as pg
import yaml
from glob import glob
# For fake data generation
import os
from astropy.io import fits
from code import GESDatabase
db_filename = "db.yaml"
nodes_filename = "nodes.yaml"
schema_filename = "code/schema.sql"
masterlist_filename \
= "fits-templates/masterlist/GES_iDR5_spectra_masterlist_15052016.fits"
# Connect to database.
with open(db_filename, "r") as fp:
credentials = yaml.load(fp)
credentials["database"] = "ges_idr5_with_spectra_snr"
connection = pg.connect(**credentials)
logger = logging.getLogger("ges")
logger.info("Connected to database.")
# Create the tables.
cursor = connection.cursor()
logger.info("Creating tables from {}...".format(schema_filename))
with open(schema_filename, "r") as fp:
cursor.execute(fp.read())
cursor.close()
logger.info("Tables created.")
connection.commit()
connection.close()
# Create a database object.
database = GESDatabase(**credentials)
# Create nodes.
with open(nodes_filename, "r") as fp:
all_nodes = yaml.load(fp)
for wg, node_names in all_nodes.items():
for node_name in node_names:
node_id = database.create_or_retrieve_node_id(wg, node_name)
# Ingest the masterlist of spectra.
N_ingested = database.ingest_spectra_masterlist(masterlist_filename)
# Ingest results from the nodes.
for filename in glob("node-results/stellar-parameters/WG??/GES_iDR5_WG??_*.fits"):
N = database.ingest_node_results(filename, extension=1)
logger.info("Ingested {} results from {}".format(N, filename))
# Ingest additional photometric temperatures from Laura Magrini.
#for filename in glob("fits-templates/additional-tphots-magrini/*.fits"):
# database.ingest_magrini_photometric_temperatures(filename)
# Ingest previous data release for comparison
database.ingest_recommended_results_from_previous_dr(
"node-results/GES_iDR4_WG15_Recommended_Abundances_20042016.fits")
database.connection.commit()
logger.info("Ingestion complete.")
# Note that there is an issue with the CNAMEs of UVES benchmark spectra. There
# are two CNAME entries for alf_Cen_A, two for alf_Cet, and two for GJ880.
database.execute(
""" UPDATE spectra
SET cname = '14392972-6049560'
WHERE ges_fld like 'alf_Cen_A%'""")
database.execute(
""" UPDATE spectra
SET cname = '03021676+0405219'
WHERE ges_fld like 'alf_Cet%'""")
database.execute(
""" UPDATE spectra
SET cname = '22563384+1633085'
WHERE ges_fld like 'GJ880%'""")
# Fix the MH/FEH issue:
database.execute(
""" UPDATE results
SET feh = mh
WHERE feh = 'NaN'
AND mh <> 'NaN';""")
database.connection.commit() | 2,824 | 25.401869 | 82 | py |
ges-idr5 | ges-idr5-master/scripts/update_benchmarks.py |
"""
Update the benchmark parameters to include some values -- even if they are
uncertain -- and to include a less-biased value for HD 140283.
"""
from astropy.table import Table
input_path = "../fits-templates/benchmarks/GES_iDR5_FGKMCoolWarm_Benchmarks_AcceptedParams_01082016.fits"
output_path = "../fits-templates/benchmarks/GES_iDR5_FGKM_Benchmarks_ARC_29092016.fits"
overwrite = False
benchmarks = Table.read(input_path)
print("Read in benchmarks from {}".format(input_path))
updated_values = {
"HD140283": {
"TEFF": 5700,
"E_TEFF": 200,
"LOGG": 3.58,
"E_LOGG": 0.11,
"FEH": -2.43,
},
"HD220009": {
"TEFF": 4217,
"E_TEFF": 60,
"LOGG": 1.43,
"E_LOGG": 0.12,
"FEH": -0.75,
}
}
for ges_fld, params in updated_values.items():
match = np.array([each.strip() == ges_fld for each in benchmarks["GES_FLD"]])
for key, value in params.items():
benchmarks[key][match] = value
print("Updated {} = {} for {}".format(key, value, ges_fld))
benchmarks.write(output_path, overwrite=overwrite)
print("Written new file to {}".format(output_path))
| 1,167 | 24.391304 | 105 | py |
ges-idr5 | ges-idr5-master/scripts/ship_wg11.py |
"""
Ship a WG11 recommended SP product.
"""
import yaml
import logging
from code import GESDatabase, ship
# Initialize logging.
logger = logging.getLogger("ges")
# Create a database object.
db_filename = "db.yaml"
with open(db_filename, "r") as fp:
credentials = yaml.load(fp)
database = GESDatabase(**credentials)
# Produce a per CNAME file.
ship.wg_recommended_sp_template(database,
"fits-templates/recommended-templates/GES_iDR5_WG11_RecommendedTemplate_16072016.fits",
"outputs/GES_iDR5_WG11_Recommended.fits",
11, overwrite=True)
# Produce a PER-SETUP file even though they will be duplicates.
ship.wg_recommended_sp_template(database,
"fits-templates/recommended-templates/GES_iDR5_WG11_RecommendedTemplate_16072016_PERSETUP.fits",
"outputs/GES_iDR5_WG11_Recommended_PERSETUP.fits",
11, overwrite=True) | 845 | 25.4375 | 100 | py |
ges-idr5 | ges-idr5-master/scripts/homogenise_all_wgs.py |
"""
Homogenisation models.
"""
import yaml
import logging
import numpy as np
import os
from astropy.table import Table
from collections import OrderedDict
from code import GESDatabase
from code.model.ensemble import SingleParameterEnsembleModel
# Initialize logging.
logger = logging.getLogger("ges")
# Create a database object.
db_filename = "db.yaml"
with open(db_filename, "r") as fp:
credentials = yaml.load(fp)
database = GESDatabase(**credentials)
# Load the "benchmarks"
# Only use "benchmarks" with TEFF < 8000 K
benchmarks = Table.read("fits-templates/benchmarks/GES_iDR5_FGKM_Benchmarks_ARC_29092016.fits")
benchmarks = benchmarks[benchmarks["TEFF"] < 8000]
benchmarks["E_FEH"] = 0.10
model_paths = "homogenisation-wg{wg}-{parameter}.model"
wgs = (11, )
parameter_scales = OrderedDict([
("teff", 250),
("logg", 0.25),
("feh", 0.25)
])
sample_kwds = dict(chains=4, iter=2000)
benchmarks = benchmarks[(benchmarks["FEH"] > -2) * (benchmarks["TEFF"] > 4000)]
models = {}
for wg in wgs:
models[wg] = {}
for parameter, scale in parameter_scales.items():
model_path = model_paths.format(wg=wg, parameter=parameter)
if os.path.exists(model_path):
model = SingleParameterEnsembleModel.read(model_path, database)
else:
model = SingleParameterEnsembleModel(database, wg, parameter, benchmarks)
data, metadata = model._prepare_data(default_sigma_calibrator=scale)
init = {
"truths": data["mu_calibrator"],
"var_sys_estimator": (scale/5.)**2 * np.ones(data["N_estimators"]),
"alpha": np.mean([data["lower_alpha"], data["upper_alpha"]]) \
* np.ones(data["N_estimators"]),
"rho_estimators": np.zeros(metadata["N_pairwise_estimators"]),
"c0_estimators": np.zeros(data["N_estimators"])
}
op_params = model.optimize(data, init=init, iter=100000)
fit = model.sample(data, init=op_params, **sample_kwds)
model.write(model_path, overwrite=True)
fig = model.plot_node_correlations()
fig.savefig("wg{}-{}-node-correlations.pdf".format(wg, parameter))
fig = model.plot_node_uncertainty_with_snr(
show_data_points=False, legend_kwds=dict(ncol=2))
fig.savefig("wg{}-{}-node-uncertainties.pdf".format(wg, parameter))
continue
# Homogenise this parameter for all stars.
model.homogenise_all_stars(update_database=True)
# Keep the model.
models[wg][parameter] = model
# TODO:
# - VROT
# - VEL
# - FLAGS
# - XI
# Some notes:
# - If a node provides `mh` or `feh`, we can treat them as the same (see `scripts/setup_db.py`)
| 2,757 | 27.142857 | 95 | py |
ges-idr5 | ges-idr5-master/code/db.py |
""" A convenience object for databases. """
import logging
import numpy as np
import psycopg2 as pg
from astropy.table import Table
from collections import Counter
from decimal import Decimal
from time import time
logger = logging.getLogger("ges")
class Database(object):
def __init__(self, **kwargs):
self.connection = pg.connect(**kwargs)
return None
def update(self, query, values=None, full_output=False, **kwargs):
"""
Update the database with a SQL query.
:param query:
The SQL query to execute.
:type query:
str
:param values: [optional]
Values to use when formatting the SQL string.
:type values:
tuple or dict
"""
logger.debug("Running SQL update query: {}".format(query))
names, results, cursor = self.execute(query, values, **kwargs)
return (names, results, cursor) if full_output else cursor.rowcount
def retrieve(self, query, values=None, full_output=False, **kwargs):
"""
Retrieve some data from the database.
:param query:
The SQL query to execute.
:type query:
str
:param values: [optional]
Values to use when formatting the SQL string.
:type values:
tuple or dict
"""
names, results, cursor = self.execute(query, values, fetch=True,
**kwargs)
return (names, results, cursor.rowcount) if full_output else results
def execute(self, query, values=None, fetch=False, **kwargs):
"""
Execute some SQL from the database.
:param query:
The SQL query to execute.
:type query:
str
:param values: [optional]
Values to use when formatting the SQL string.
:type values:
tuple or dict
"""
t_init = time()
try:
with self.connection.cursor() as cursor:
cursor.execute(query, values)
if fetch: results = cursor.fetchall()
else: results = None
except pg.ProgrammingError:
logger.exception("SQL query failed: {0}, {1}".format(query, values))
cursor.close()
raise
else:
taken = 1e3 * (time() - t_init)
try:
logger.debug("Took {0:.0f} ms for SQL query {1}".format(taken,
" ".join((query % values).split())))
except (TypeError, ValueError):
logger.debug("Took {0:.0f} ms for SQL query {1} with values {2}"\
.format(taken, query, values))
names = None if cursor.description is None \
else tuple([column[0] for column in cursor.description])
return (names, results, cursor)
def retrieve_table(self, query, values=None, prefixes=True, **kwargs):
"""
Retrieve a named table from a database.
:param query:
The SQL query to execute.
:type query:
str
:param values: [optional]
Values to use when formatting the SQL string.
:type values:
tuple or dict
:param prefixes: [optional]
Prefix duplicate column names with the given tuple.
:type prefixes:
tuple of str
"""
names, rows, rowcount = self.retrieve(query, values, full_output=True)
# TODO:
if len(rows) == 0:
return None
counted_names = Counter(names)
duplicates = [k for k, v in counted_names.items() if v > 1]
if duplicates and prefixes:
use_prefixes = map(str, range(max(counted_names.values()))) \
if isinstance(prefixes, bool) else prefixes
# Put the prefixes and names in the right order & format for joining
prefixes = [
([], [use_prefixes[names[:i].count(n)]])[n in duplicates] \
for i, n in enumerate(names)]
names = [[n] for n in names]
names = [".".join(p + n) for p, n in zip(prefixes, names)]
# Guess data types.
dtype = kwargs.pop("dtype", None)
if dtype is None:
dtype = []
for i, name in enumerate(names):
if isinstance(rows[0][i], Decimal):
dtype.append(float)
else:
dtype.append(type(rows[0][i]))
return Table(rows=rows, names=names, dtype=dtype)
def node_id(self, description):
"""
Return the identifer for a node based on its description.
If the `description` given is an integer, no search will occur and that
value will be returned. If a string-like object is provided in the
`description`, then the expected format is 'wg.node_query', or just
'node_query' if the node only applies to one working group.
:param description:
The search term to use to identify the node.
"""
try:
node = int(description)
except:
# If '.' is in the node descriptor,
# Split the node descriptor by '.'
if "." in description:
wg, node_description = description.split(".")
else:
raise NotImplementedError
# Search by the name, filter by wg?
# TODO
else:
return node
| 5,523 | 27.183673 | 81 | py |
ges-idr5 | ges-idr5-master/code/ship.py |
""" Fucking :shipit: """
import logging
import numpy as np
from astropy.io import fits
from astropy.table import Table
from datetime import datetime
import utils
from db import Database
logger = logging.getLogger("ges")
def wg_recommended_sp_template(database, input_path, output_path, wg,
ext=-1, overwrite=False, **kwargs):
"""
Produce a WG-recommended file of stellar parameters from the template
provided (on a per CNAME) basis.
:param database:
A database to connect to.
:param input_path:
The local file path of the WG-recommended template file.
:param output_path:
The local file path where to save the WG-recommended file to.
:param wg:
The working group.
:param ext: [optional]
The extension of the template to use for updating.
:param overwrite: [optional]
Overwrite the `output_path` if it already exists.
"""
columns = [
"teff",
"e_teff",
"e_pos_teff",
"e_neg_teff",
"nn_nodes_teff",
"nn_spectra_teff",
"enn_teff",
"nne_teff",
"sys_err_teff",
"logg",
"e_logg",
"e_pos_logg",
"e_neg_logg",
"nn_nodes_logg",
"nn_spectra_logg",
"enn_logg",
"nne_logg",
"sys_err_logg",
"lim_logg",
"feh",
"e_feh",
"e_pos_feh",
"e_neg_feh",
"nn_nodes_feh",
"nn_spectra_feh",
"enn_feh",
"nne_feh",
"sys_err_feh",
"xi",
"e_xi",
"e_pos_xi",
"e_neg_xi",
"nn_nodes_xi",
"nn_spectra_xi",
"enn_xi",
"nne_xi",
"mh",
"e_mh",
"e_pos_mh",
"e_neg_mh",
"nn_nodes_mh",
"nn_spectra_mh",
"enn_mh",
"nne_mh",
"alpha_fe",
"e_alpha_fe",
"e_pos_alpha_fe",
"e_neg_alpha_fe",
"nn_nodes_alpha_fe",
"nn_spectra_alpha_fe",
"enn_alpha_fe",
"nne_alpha_fe",
"tech",
"remark",
"peculi"
]
translations = {
"nn_nodes_teff": "NN_TEFF",
"nn_nodes_logg": "NN_LOGG",
"nn_nodes_feh": "NN_FEH",
"nn_nodes_mh": "NN_MH",
"nn_nodes_xi": "NN_XI",
"nn_nodes_alpha_fe": "NN_ALPHA_FE"
}
default_translator = lambda x: x.upper()
updated_data = {}
for column in columns:
updated_data[column] = []
image = fits.open(input_path)
if kwargs.pop("skip_verify", False):
if image[0].header["NODE1"].strip() != "WG{}".format(wg):
raise ValueError(
"Template expected {} in NODE1 keyword, but WG{} given. "
"Use skip_verify=False to override this error.".format(
image[0].header["NODE1"].strip(), wg))
# For each cname in the template, fill in the entries from the database.
N = len(image[ext].data)
for i, cname in enumerate(image[ext].data["CNAME"]):
logger.info("At row {}/{}: {}".format(i + 1, N, cname))
record = database.retrieve_table(
""" SELECT {columns}
FROM wg_recommended_results
WHERE wg = '{wg}'
AND cname = '{cname}'
""".format(wg=wg, cname=cname, columns=", ".join(columns)))
assert record is not None
assert len(record) == 1
for column in columns:
updated_data[column].append(record[column][0])
J = len(columns)
for j, column in enumerate(columns):
# Translate the column.
fits_column = translations.get(column, None)
if fits_column is None:
fits_column = default_translator(column)
logger.info("Updating column {}/{} in template: {} -> {}".format(
j + 1, J, column, fits_column))
if fits_column not in image[ext].data.dtype.names:
logger.warn(
"Column '{}' (from {}) not in template. Skipping..".format(
fits_column, column))
continue
try:
image[ext].data[fits_column] = updated_data[column]
except:
logger.exception(
"Could not update column '{}':".format(fits_column))
continue
# Update the release date.
now = datetime.now()
image[0].header["DATETAB"] = "{year}-{month}-{day}".format(
year=now.year, month=now.month, day=now.day)
image.writeto(output_path, clobber=overwrite)
logger.info("Written WG{}-recommended file to {}".format(wg, output_path))
return None | 4,674 | 24.972222 | 78 | py |
ges-idr5 | ges-idr5-master/code/utils.py |
""" General utility functions. """
import os
from numpy import isfinite
def safe_int(x, fill_value=-1):
try:
return int(x)
except:
return fill_value
def wg_as_int(wg):
return int(str(wg).strip().lower().lstrip("wg"))
def parse_node_filename(filename):
# GES_iDR5_WG10_NodeTemplate_QC_15052016
_ = os.path.basename(filename).split("_")
wg, node_name = _[2:4]
if node_name.lower().endswith(".fits"):
node_name = node_name[:-5]
wg = wg_as_int(wg)
return (wg, node_name)
def mh_or_feh(table):
feh = sum(isfinite(table["feh"]))
mh = sum(isfinite(table["mh"]))
return "feh" if feh > mh else "mh"
| 675 | 17.777778 | 52 | py |
ges-idr5 | ges-idr5-master/code/__init__.py |
import logging
logger = logging.getLogger("ges")
logger.setLevel(logging.INFO)
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter(
"%(asctime)s [%(levelname)-8s] %(message)s"))
logger.addHandler(handler)
from . import model
from .gesdb import GESDatabase
| 285 | 19.428571 | 49 | py |
ges-idr5 | ges-idr5-master/code/gesdb.py |
""" A specialized database class for Gaia-ESO Survey data releases. """
import logging
import numpy as np
from astropy.io import fits
from astropy.table import Table
import utils
from db import Database
logger = logging.getLogger("ges")
class GESDatabase(Database):
def __init__(self, *args, **kwargs):
super(GESDatabase, self).__init__(*args, **kwargs)
def create_or_retrieve_node_id(self, wg, node_name):
"""
Reteive a unique identifier for a node, or create one.
:param wg:
The working group (e.g., 10).
:param node_name:
The name of the node.
"""
try:
return self.retrieve_node_id(wg, node_name)
except UnknownNodeError:
return self._create_node(wg, node_name)
def retrieve_node_id(self, wg, node_name):
"""
Retrieve a unique identifier for a node.
:param wg:
The working group (w.g., 10).
:param node_name:
The name of the node.
:raises UnknownNodeError:
If no node exists.
:returns:
The identifier.
"""
result = self.retrieve("""SELECT id FROM nodes
WHERE wg = %s AND lower(name) = %s""",
(utils.wg_as_int(wg), node_name.strip().lower(), ))
if not result:
raise UnknownNodeError("node does not exist")
else:
return int(result[0][0])
def _create_node(self, wg, node_name):
"""
Create a node.
:param wg:
The working group (e.g., 10).
:param node_name:
The name of the node.
"""
wg = utils.wg_as_int(wg)
node_name = node_name.strip()
result = self.execute(
"""INSERT INTO nodes (wg, name) VALUES (%s, %s) RETURNING id""",
(wg, node_name), fetch=True)
node_id = int(result[1][0][0])
logger.info("Created node '{}' in WG{} with id {}".format(
node_name, wg, node_id))
return node_id
def ingest_recommended_results_from_previous_dr(self, filename, extension=-1):
"""
Ingest recommended results from a node FITS file.
:param filename:
A node template file in FITS format.
:param extension: [optional]
The extension index to read from.
:returns:
The number of rows inserted.
"""
image = fits.open(filename)
data = image[extension].data
columns = ("cname", "ges_fld", "object", "filename", "ges_type",
"teff", "e_teff", "logg", "e_logg", "mh", "e_mh", "xi", "e_xi",
"peculi", "remark", "tech")
fits_format_adapters = {
"teff": float,
"e_teff": float,
"logg": float,
"e_logg": float,
"mh": float,
"e_mh": float,
"xi": float,
"e_xi": float,
}
N = len(data)
for i, row in enumerate(data):
logger.info("Ingesting recommended row {}/{}".format(i, N))
row_data = {}
for column in columns:
value = row[column]
f = fits_format_adapters.get(column, None)
if f is not None:
value = f(value)
row_data[column] = value
self.execute(
"INSERT INTO recommended_idr4 ({}) VALUES ({})".format(
", ".join(columns),
", ".join(["%({})s".format(column) for column in columns])),
row_data)
self.connection.commit()
return N
def ingest_node_results(self, filename, extension=-1):
"""
Ingest results from a node FITS file.
:param filename:
A node template file in FITS format.
:param extension: [optional]
The extension index to read from.
:returns:
The number of rows inserted.
"""
# Which node is this?
wg, node_name = utils.parse_node_filename(filename)
node_id = self.retrieve_node_id(wg, node_name)
# Start ingesting results.
data = Table.read(filename, hdu=extension)
default_row = {"node_id": node_id}
columns = (
"node_id", "cname", "filename", "setup", "snr",
"vel", "e_vel", "vrot", "e_vrot",
"teff", "e_teff", "nn_teff", "enn_teff", "nne_teff", "sys_err_teff",
"logg", "e_logg", "nn_logg", "enn_logg", "nne_logg", "sys_err_logg", "lim_logg",
"feh", "e_feh", "nn_feh", "enn_feh", "nne_feh", "sys_err_feh",
"xi", "e_xi", "nn_xi", "enn_xi", "nne_xi",
"mh", "e_mh", "nn_mh", "enn_mh", "nne_mh",
"alpha_fe", "e_alpha_fe", "nn_alpha_fe", "enn_alpha_fe", "nne_alpha_fe",
"vrad", "e_vrad", "vsini", "e_vsini",
"peculi", "remark", "tech")
fits_format_adapters = {
"snr": float,
"vel": float,
"e_vel": float,
"vrot": float,
"e_vrot": float,
"teff": float,
"e_teff": float,
"nn_teff": int,
"enn_teff": float,
"nne_teff": float,
"sys_err_teff": float,
"logg": float,
"e_logg": float,
"nn_logg": int,
"enn_logg": float,
"nne_logg": float,
"sys_err_logg": float,
"lim_logg": int,
"feh": float,
"e_feh": float,
"nn_feh": int,
"enn_feh": float,
"nne_feh": float,
"sys_err_feh": float,
"xi": float,
"e_xi": float,
"nn_xi": int,
"enn_xi": float,
"nne_xi": float,
"mh": float,
"e_mh": float,
"nn_mh": int,
"enn_mh": float,
"nne_mh": float,
"alpha_fe": float,
"e_alpha_fe": float,
"nn_alpha_fe": int,
"enn_alpha_fe": float,
"nne_alpha_fe": float,
"vrad": float,
"e_vrad": float,
"vsini": float,
"e_vsini": float,
}
# Update formats, as necessary.
tmp_key_format = "{}_NEW_DTYPE"
for key, new_dtype in fits_format_adapters.items():
data[tmp_key_format.format(key.upper())] = np.array(data[key.upper()], dtype=new_dtype)
del data[key.upper()]
data.rename_column(tmp_key_format.format(key.upper()), key.upper())
N = len(data)
for i, row in enumerate(data):
logger.info("Ingesting row {}/{} from node WG{}: {}".format(i, N,
wg, node_name))
row_data = {}
row_data.update(default_row)
row_data.update(dict(zip(columns[1:], [row[c.upper()] for c in columns[1:]])))
self.execute(
"INSERT INTO results ({}) VALUES ({})".format(
", ".join(columns),
", ".join(["%({})s".format(column) for column in columns])),
row_data)
self.connection.commit()
return N
def ingest_spectra_masterlist(self, filename, extension=-1):
"""
Ingest a master list of spectra from a FITS template file.
:param filename:
A FITS template file that contains the masterlist of all spectra.
:returns:
The number of rows inserted.
"""
image = fits.open(filename)
data = image[extension].data
# Create mapper between FITS and database columns.
columns = ("cname", "ges_fld", "object", "filename", "ges_type", "setup",
"wg", "instrument", "ra", "dec", "snr", "vel", "e_vel", "vrot",
"e_vrot", "teff_irfm", "e_teff_irfm", "peculi", "remark", "tech")
fits_column_adapters = {
"instrument": "instrume"
}
fits_format_adapters = {
"wg": utils.safe_int,
"ra": float,
"dec": float,
"snr": float,
"vel": float,
"e_vel": float,
"vrot": float,
"e_vrot": float,
"teff_irfm": float,
"e_teff_irfm": float,
}
N = len(data)
for i, row in enumerate(data):
logger.info("Inserting row {}/{}".format(i, N))
values = []
for col in columns:
use_col = fits_column_adapters.get(col, col)
value = row[use_col]
# Formatting.
if col in fits_format_adapters:
f = fits_format_adapters[col]
value = f(value)
values.append(value)
self.execute(
"INSERT INTO spectra ({}) VALUES ({})".format(
", ".join(columns), ", ".join(["%s"] * len(columns))),
values)
self.connection.commit()
return N
def ingest_magrini_photometric_temperatures(self, filename, extension=-1):
"""
Ingest a FITS table containing CNAMEs and photometric temperatures.
:param filename:
A FITS table.
:param extension: [optional]
The HDU extension that contains the photometric temperatures.
"""
image = fits.open(filename)
data = image[extension].data
# The columns might be different, but in general if we lowerize them all
# then we are looking for:
# ('CNAME_2', 'GES_FLD', 'teffjk', 'jk', 'FILENAME')
cname_col, teff_col = (data.dtype.names[0], "teffjk")
# Update the value in the spectra table, unless it already exists.
N = 0
for row in data:
result = self.execute(
""" UPDATE spectra
SET teff_irfm = %s
WHERE cname = %s AND
teff_irfm = 'NaN'""",
(float(row[teff_col]), row[cname_col], ))
return True
class UnknownNodeError(BaseException):
pass
| 10,196 | 28.903226 | 99 | py |
ges-idr5 | ges-idr5-master/code/summary.py |
""" Produce summary tables. """
import numpy as np
from collections import Counter
from astropy.table import Table
import utils
def stellar_parameter_range(database, wg=None):
"""
Produce a summary table outlining the range of stellar parameters reported.
:param database:
A database for transactions.
:param wg: [optional]
A specific working group.
"""
if wg is None:
nodes = database.retrieve("SELECT id, wg, name FROM nodes")
else:
nodes = database.retrieve("SELECT id, wg, name FROM nodes WHERE wg = %s",
(utils.wg_as_int(wg), ))
rows = []
for node_id, node_wg, node_name in nodes:
results = database.retrieve_table(
"""SELECT teff, e_teff, logg, e_logg, mh, e_mh, xi, e_xi
FROM results WHERE node_id = %s""", (node_id, ))
name = "WG{}/{}".format(node_wg, node_name) if wg is None else node_name
if results is None or len(results) == 0:
rows.append([name] + [np.nan] * 16)
continue
row = [name]
for column in ("teff", "logg", "mh", "xi"):
for column in [column, "e_{}".format(column)]:
row.extend([
np.nanmin(results[column]),
np.nanmax(results[column])
])
rows.append(row)
return Table(rows=rows, names=("Name",
"Min. TEFF", "Max. TEFF", "Min. E_TEFF", "Max. E_TEFF",
"Min. LOGG", "Max. LOGG", "Min. E_LOGG", "Max. E_LOGG",
"Min. MH", "Max. MH", "Min. E_MH", "Max. E_MH",
"Min. XI", "Max. XI", "Min. E_XI", "Max. E_XI"))
def stellar_parameter_summary(database, wg=None):
"""
Produce a summary table outlining the number of valid results produced by
different nodes.
:param database:
A database for transactions.
:param wg: [optional]
The working group.
"""
# Get node ids.
if wg is None:
nodes = database.retrieve("""SELECT id, wg, name FROM nodes""")
else:
nodes = database.retrieve(
"""SELECT id, wg, name FROM nodes WHERE wg = %s""",
(utils.wg_as_int(wg), ))
rows = []
for node_id, node_wg, node_name in nodes:
results = database.retrieve_table(
"""SELECT teff, e_teff, logg, e_logg, mh, e_mh, xi, e_xi,
tech, remark, peculi
FROM results
WHERE node_id = %s""", (node_id, ))
name = "WG{}/{}".format(node_wg, node_name) if wg is None else node_name
if results is None or len(results) == 0:
rows.append([name] + [0] * (4 * 2 + 3 + 1))
continue
N = len(results)
valid = []
for column in ("teff", "logg", "mh", "xi"):
valid.extend([
np.isfinite(results[column]).sum(),
np.isfinite(results["e_{}".format(column)]).sum()
])
num_flags = []
for column in ("tech", "remark", "peculi"):
num_flags.append(np.sum(results[column] != ""))
row = [name, N]
row.extend(valid)
row.extend(num_flags)
rows.append(row)
return Table(rows=rows, names=("Node", "Number of results", "Valid TEFF",
"Valid E_TEFF", "Valid LOGG", "Valid E_LOGG", "Valid MH", "Valid E_MH",
"Valid XI", "Valid E_XI", "TECH entries", "REMARK entries", "PECULI entries"))
def tech_flags(database, wg, node_name, column="TECH"):
"""
Produce a summary table outlining the number of times certain flags were
used.
:param database:
A database for transactions.
:param wg:
The working group.
:param node_name:
The name of the node to summarize results for.
"""
# FLAG / TOTAL_COUNT
node_id = database.retrieve_node_id(wg, node_name)
flags = database.retrieve_table(
"""SELECT {} FROM results WHERE node_id = %s""".format(column),
(node_id, ))
if flags is None: return None
counts = Counter([each.strip() \
for each in "|".join(flags["tech"]).split("|") if each.strip()])
rows = sorted(counts.iteritems(), key=lambda (k, v): v)[::-1]
return Table(rows=rows, names=("{} FLAG".format(column.upper()), "N"))
| 4,274 | 28.895105 | 86 | py |
ges-idr5 | ges-idr5-master/code/plot/nodes.py |
import numpy as np
import os
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
from matplotlib import gridspec
import corner
import utils
__all__ = ["compare_nodes_within_wg", "compare_to_photometric_teff",
"compare_to_previous_dr"]
def compare_to_previous_dr(database, wg, node_name, parameter):
"""
Compare the reported node parameter against the recommended results from
the previous data release.
:param database:
A database for the connection.
:param wg:
The working group.
:param node_name:
The name of the node.
:param parameter:
The parameter to compare against.
"""
parameter = str(parameter).lower().strip()
if parameter not in ("teff", "logg", "mh", "xi"):
raise ValueError("parameter '{}' not recognised".format(parameter))
node_id = database.retrieve_node_id(wg, node_name)
results = database.retrieve_table(
""" SELECT DISTINCT ON (r.cname) r.cname,
r.{0}, r.e_{0},
p.{0} as recommended_{0}, p.e_{0} as e_recommended_{0}
FROM results r, recommended_idr4 p
WHERE r.cname = p.cname
AND r.node_id = %s""".format(parameter), (node_id, ))
labels = {
"teff": r"$T_{\rm eff}$",
"logg": r"$\log{g}$",
"mh": r"$[{\rm Fe}/{\rm H}]$",
"xi": r"$\xi$"
}
gs = gridspec.GridSpec(2, 1, height_ratios=[1, 4])
fig = plt.figure()
ax_diff = plt.subplot(gs[0])
ax_main = plt.subplot(gs[1])
y, x = (results[parameter], results["recommended_{}".format(parameter)])
yerr, xerr = (
results["e_{}".format(parameter)],
results["e_recommended_{}".format(parameter)]
)
ax_main.scatter(x, y, facecolor="#666666")
ax_main.errorbar(x, y, xerr=xerr, yerr=yerr,
fmt=None, ecolor="#666666", alpha=0.5, zorder=-1)
# Propagate nans.
_ = x * y
limits = [
np.nanmin(np.hstack([_/x, _/y]).flatten()),
np.nanmax(np.hstack([_/x, _/y]).flatten())
]
_ = np.ptp(limits)
limits = [limits[0] - 0.05 * _, limits[1] + 0.05 * _]
ax_main.plot(limits, limits, linestyle=":", c="#666666", zorder=-100)
ax_main.set_xlim(limits)
ax_main.set_ylim(limits)
ax_main.set_xlabel("{} (iDR4)".format(labels.get(parameter, parameter)))
ax_main.set_ylabel("{} ({})".format(labels.get(parameter, parameter),
node_name))
y = y - x
yerr = np.sqrt(xerr**2 + yerr**2)
ax_diff.scatter(x, y, facecolor="#666666")
ax_diff.errorbar(x, y, xerr=xerr, yerr=yerr,
fmt=None, ecolor="#666666", alpha=0.5, zorder=-1)
ax_diff.set_xlim(limits)
ax_diff.set_xticklabels([])
ax_diff.set_ylabel(r"$\Delta{}$" + labels.get(parameter, parameter))
limit = np.abs(ax_diff.get_ylim()).max()
ax_diff.set_ylim(-limit, +limit)
ax_diff.axhline(0, linestyle=":", c="#666666", zorder=-100)
ax_diff.yaxis.set_major_locator(MaxNLocator(3))
return fig
def compare_to_photometric_teff(database, wg, node_name):
"""
Compare the reported temperatures against the photometric temperatures.
:param database:
A database for connections.
:param wg:
The working group.
:param node_name:
The name of the node.
"""
node_id = database.retrieve_node_id(wg, node_name)
results = database.retrieve_table(
""" SELECT DISTINCT ON (r.cname) r.cname, r.filename,
r.teff, r.e_teff, s.teff_irfm, s.e_teff_irfm
FROM results r, spectra s
WHERE r.cname = s.cname
AND r.node_id = %s""", (node_id, ))
gs = gridspec.GridSpec(2, 1, height_ratios=[1, 4])
fig = plt.figure()
ax_diff = plt.subplot(gs[0])
ax_main = plt.subplot(gs[1])
ax_main.scatter(results["teff_irfm"], results["teff"],
facecolor="#666666")
ax_main.errorbar(results["teff_irfm"], results["teff"],
xerr=results["e_teff_irfm"], yerr=results["e_teff"],
fmt=None, ecolor="#666666", alpha=0.5, zorder=-1)
limits = ax_main.get_xlim()
ax_main.plot(limits, limits, linestyle=":", c="#666666", zorder=-100)
ax_main.set_xlim(limits)
ax_main.set_ylim(limits)
ax_main.set_xlabel(r"$T_{\rm eff,photometry}$ $({\rm K})$")
ax_main.set_ylabel(r"$T_{\rm eff}$ $({\rm K})$")
x, y = (results["teff_irfm"], results["teff"] - results["teff_irfm"])
ax_diff.scatter(x, y, facecolor="#666666")
ax_diff.errorbar(x, y,
xerr=results["e_teff_irfm"],
yerr=np.sqrt(results["e_teff"]**2 + results["e_teff_irfm"]**2),
fmt=None, ecolor="#666666", alpha=0.5, zorder=-1)
ax_diff.set_xlim(limits)
ax_diff.set_xticklabels([])
ax_diff.set_ylabel(r"$\Delta{}T_{\rm eff}$ $({\rm K})$")
limit = np.abs(ax_diff.get_ylim()).max()
ax_diff.set_ylim(-limit, +limit)
ax_diff.set_yticks([-limit, 0, +limit])
ax_diff.axhline(0, linestyle=":", c="#666666", zorder=-100)
fig.tight_layout()
return fig
def compare_nodes_within_wg(database, wg, parameter, extent=None,
show_one_to_one=True):
"""
Show a corner plot comparing all of the nodes within a single working group.
:param database:
A database for connections.
:param wg:
The working group.
:param parameter:
The parameter to compare.
:param extent: [optional]
The (lower, upper) limits to show in each axis.
:param show_one_to_one: [optional]
Show a dashed line marking the `y=x` relation.
"""
wg = utils.wg_as_int(wg)
parameter = parameter.lower()
if parameter not in ("teff", "logg", "mh", "xi"):
raise ValueError("parameter '{}' not recognised".format(parameter))
# Get the nodes.
nodes = database.retrieve_table("""SELECT id, name FROM nodes
WHERE wg = %s""", (wg, ))
N_nodes = len(nodes)
# Get the data.
results = database.retrieve_table(
"""SELECT r.node_id, r.cname, r.filename, r.{0}, r.e_{0}, r.feh, r.e_feh
FROM results r, nodes n
WHERE n.wg = %s and n.id = r.node_id
""".format(parameter),
(wg, ))
results = results.group_by("cname")
N_groups = len(results.groups)
data = np.nan * np.ones((N_nodes, N_groups))
error = np.nan * np.ones_like(data)
node_ids = np.sort(np.unique(results["node_id"]))
indices = results.groups.indices
for i, start_index in enumerate(indices[:-1]):
end_index = indices[i + 1]
for index in range(start_index, end_index):
j = np.where(results["node_id"][index] == node_ids)[0][0]
if parameter == "mh" and \
not np.any(np.isfinite(results["mh"][index])) \
and np.any(results["feh"][index]):
data[j, i] = results["feh"][index]
error[j, i] = results["e_feh"][index]
else:
data[j, i] = results[parameter][index]
error[j, i] = results["e_{}".format(parameter)][index]
# Remove axes without any data.
use = np.any(np.isfinite(data), axis=1)
data = data[use, :]
error = error[use, :]
node_ids = node_ids[use]
labels = [nodes["name"][nodes["id"] == _][0].strip() for _ in node_ids]
# How many nodes to plot?
K = data.shape[0]
assert K > 0, "Need more than one node to compare against."
factor = 2.0 # size of one side of one panel
lbdim = 0.5 * factor # size of left/bottom margin
trdim = 0.5 * factor # size of top/right margin
whspace = 0.15 # w/hspace size
plotdim = factor * (K - 1.) + factor * (K - 2.) * whspace
dim = lbdim + plotdim + trdim
fig, axes = plt.subplots(K - 1, K - 1, figsize=(dim, dim))
if 3 > K:
axes = np.atleast_2d([axes])
lb = lbdim / dim
tr = (lbdim + plotdim) / dim
fig.subplots_adjust(left=lb, bottom=lb, right=tr, top=tr,
wspace=whspace, hspace=whspace)
# Match all of the nodes
lim = [np.inf, -np.inf]
for i in range(1, K):
for j in range(K):
if j >= i:
try:
ax = axes[i-1, j]
except IndexError:
continue
ax.set_visible(False)
ax.set_frame_on(False)
continue
if K > 1:
ax = axes[i-1, j]
else:
ax = axes
ax.scatter(data[j, :], data[i, :], facecolor="#666666")
ax.errorbar(data[j, :], data[i, :],
xerr=error[j, :], yerr=error[i, :],
fmt=None, ecolor="#666666", alpha=0.5, zorder=-1)
if ax.is_last_row():
ax.set_xlabel(labels[j])
else:
ax.set_xticklabels([])
if ax.is_first_col():
ax.set_ylabel(labels[i])
else:
ax.set_yticklabels([])
if extent is not None:
if show_one_to_one:
ax.plot(extent, extent, c="#666666", zorder=-100)
ax.set_xlim(extent)
ax.set_ylim(extent)
ax.xaxis.set_major_locator(MaxNLocator(5))
ax.yaxis.set_major_locator(MaxNLocator(5))
else:
lim[0] = np.nanmin([ax.get_xlim()[0], ax.get_ylim()[0], lim[0]])
lim[1] = np.nanmax([ax.get_xlim()[1], ax.get_ylim()[1], lim[1]])
# Ensure all have the same limits and ticks.
if extent is None:
for ax in np.array(axes).flatten():
if ax.get_visible():
if show_one_to_one:
ax.plot(lim, lim, c="#666666", zorder=-100)
ax.set_xlim(lim)
ax.set_ylim(lim)
ax.xaxis.set_major_locator(MaxNLocator(5))
ax.yaxis.set_major_locator(MaxNLocator(5))
return fig
| 9,929 | 30.324921 | 80 | py |
ges-idr5 | ges-idr5-master/code/plot/cluster.py |
import logging
import numpy as np
import os
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
from matplotlib import gridspec
import utils
__all__ = ["cluster", "param_vs_param"]
logger = logging.getLogger("ges.idr5.qc")
def param_vs_param(database, wg, node_name, ges_fld, reference_parameter,
vel_range=None):
"""
Show parameters vs parameters for stars in a cluster.
:param database:
A database for transactions.
:param wg:
The working group.
:param node_name:
The name of the node to show results for.
:param ges_fld:
The name of the cluster (as listed in GES_FLD).
:param reference_parameter:
The name of the reference parameter to show on the x-axis.
:param vel_range: [optional]
The (lower, upper) range of velocities (VEL) to select as bonafide
cluster members.
"""
vel_range = vel_range or (-1000, 1000)
parameters = ["teff", "logg", "mh", "xi"]
reference_parameter = reference_parameter.lower()
if reference_parameter not in parameters:
raise ValueError("unrecognized reference parameter")
parameters.remove(reference_parameter)
labels = {
"teff": r"$T_{\rm eff}$ $({\rm K})$",
"logg": r"$\log{g}$",
"feh": r"$[{\rm Fe}/{\rm H}]$",
"mh": r"$[{\rm M}/{\rm H}]$",
"xi": r"$\xi$ $({\rm km}$ ${\rm s}^{-1})$"
}
# Get the data.
node_id = database.retrieve_node_id(wg, node_name)
# Collect the results for this node.
results = database.retrieve_table(
""" SELECT DISTINCT ON (r.cname)
r.cname, r.node_id, r.setup, s.filename, s.vel, s.e_vel,
teff, e_teff, logg, e_logg, feh, e_feh, mh, e_mh, xi, e_xi
FROM results r, spectra s
WHERE r.cname = s.cname
AND TRIM(s.ges_fld) = %s
AND s.vel > %s
AND s.vel < %s
AND node_id = %s""",
(ges_fld, min(vel_range), max(vel_range), node_id))
if results is None:
return None
if reference_parameter.lower() == "mh":
reference_parameter = utils.mh_or_feh(results)
fig, axes = plt.subplots(3, 1)
for i, (ax, parameter) in enumerate(zip(axes, parameters)):
if ax.is_first_row():
ax.set_title("({0:.1f}, {1:.1f})".format(*vel_range))
if parameter == "mh":
parameter = utils.mh_or_feh(results)
ax.scatter(results[reference_parameter], results[parameter],
facecolor="#666666", zorder=1)
ax.errorbar(results[reference_parameter], results[parameter],
xerr=results["e_{}".format(reference_parameter)],
yerr=results["e_{}".format(parameter)],
fmt=None, ecolor="k", alpha=0.5, zorder=-1)
if ax.is_last_row():
ax.set_xlabel(labels.get(reference_parameter, reference_parameter))
else:
ax.set_xticklabels([])
ax.set_ylabel(labels.get(parameter, parameter))
return fig
def cluster(database, ges_fld, wg, node_name=None, vel_range=None,
isochrone_filename=None, limit_to_isochrone_range=False, ax=None,
no_tech_flags=False, show_legend=True, **kwargs):
"""
Show a Hertzsprung-Russell diagram for cluster stars, with isochrones
optionally shown.
:param database:
A database for transactions.
:param ges_fld:
The `GES_FLD` entry for the cluster.
:param wg:
The working group.
:param node_name: [optional]
The name of the node to show results for. If `None` is provided, then
recommended results will be shown for the specified working group `wg`.
:param ges_fld:
The name of the cluster (as listed in GES_FLD).
:param vel_range: [optional]
The (lower, upper) range of velocities (VEL) to select as bonafide
cluster members.
:param isochrone_filename: [optional]
The path of an isochrone_filename file to show for this cluster.
:param limit_to_isochrone_range: [optional]
:param ax: [optional]
Provide an axes to plot the HRD in.
:param no_tech_flags: [optional]
Require that all results have no TECH flags.
"""
vel_range = vel_range or (-1000, 1000)
vmin = kwargs.pop("vmin", None)
vmax = kwargs.pop("vmax", None)
if node_name is not None:
# Get the data.
table = "results"
node_id = database.retrieve_node_id(wg, node_name)
sql_constraint = "AND r.node_id = '{}'".format(node_id)
else:
table = "wg_recommended_results"
sql_constraint = "AND r.wg = '{}'".format(wg)
if no_tech_flags:
tech_flag_constraint = " AND TRIM(r.TECH) = ''"
else:
tech_flag_constraint = ""
# Collect the results for this node.
results = database.retrieve_table(
""" SELECT DISTINCT ON (r.cname)
r.cname, s.vel, s.e_vel,
r.teff, r.e_teff, r.logg, r.e_logg, r.feh, r.e_feh, r.mh, r.e_mh
FROM {table} r, spectra s
WHERE r.cname = s.cname
AND TRIM(s.ges_fld) = '{ges_fld}'
AND s.vel > '{lower_vel}'
AND s.vel < '{upper_vel}'
{sql_constraint} {tech_flag_constraint}""".format(
table=table, ges_fld=ges_fld,
sql_constraint=sql_constraint,
tech_flag_constraint=tech_flag_constraint,
lower_vel=min(vel_range), upper_vel=max(vel_range)))
if results is None:
logger.warn("No cluster data on {} from {}/{}".format(
ges_fld, wg, node_name))
return None
# Draw velocity/metallicity. Highlight members.
gs = gridspec.GridSpec(3, 1, height_ratios=[1, 12, 4])
if ax is None:
fig = plt.figure()
ax_hrd = plt.subplot(gs[1])
ax_diff = plt.subplot(gs[2])
else:
ax_diff = None
ax_hrd = ax
fig = ax.figure
mh_col = utils.mh_or_feh(results)
# Draw HRD, distinguishing markers by setup.
scat = ax_hrd.scatter(results["teff"], results["logg"], c=results[mh_col],
label=None, cmap="viridis", vmin=vmin, vmax=vmax)
ax_hrd.errorbar(results["teff"], results["logg"],
xerr=results["e_teff"], yerr=results["e_logg"],
fmt=None, ecolor="k", alpha=0.5, zorder=-1, cmap="viridis",
label=None)
if isochrone_filename is not None:
isochrone = utils.parse_isochrone(isochrone_filename)
label, _ = os.path.splitext(os.path.basename(isochrone_filename))
label = label.replace("_", "-")
label = "{0} ({1:.1f}, {2:.1f})".format(label, vel_range[0], vel_range[1])
ax_hrd.plot(isochrone["teff"], isochrone["logg"],
c="k", lw=2, zorder=-1, label=label)
if limit_to_isochrone_range:
xlimits = (7000, 3000)
ylimits = (5.5, 0)
#xlimits = ax_hrd.get_xlim()[::-1]
#ylimits = ax_hrd.get_ylim()[::-1]
# Draw different wrt to isochrone.
x = results["teff"]
y = []
for i in range(len(x)):
distance = np.sqrt((results["teff"][i] - isochrone["teff"])**2 \
+ (1000*(results["logg"][i] - isochrone["logg"]))**2)
index = np.argmin(distance)
y.append(results["logg"][i] - isochrone["logg"][index])
if not limit_to_isochrone_range:
xlimits = ax_hrd.get_xlim()[::-1]
ylimits = ax_hrd.get_ylim()[::-1]
if ax_diff is not None:
ax_diff.scatter(x, y, c=results[mh_col], cmap="viridis", s=50)
ax_diff.errorbar(x, y, xerr=results["e_teff"], yerr=results["e_logg"],
fmt=None, ecolor="k", alpha=0.5, zorder=-1)
ax_diff.axhline(0, linestyle=":", c="#666666", zorder=-2)
ax_diff.set_xlabel(r"$T_{\rm eff}$ $({\rm K})$")
ax_diff.set_ylabel(r"$\Delta\log{g}$")
#ax_diff.set_xlim(ax_hrd.get_xlim()[::-1])
ax_diff.set_xlim(xlimits)
ax_diff.set_ylim(ax_diff.get_ylim()[::-1])
ax_diff.xaxis.set_major_locator(MaxNLocator(5))
ax_diff.yaxis.set_major_locator(MaxNLocator(5))
ax_hrd.set_xticklabels([])
if show_legend:
ax_hrd.legend(frameon=False, loc="upper left")
else:
ax_hrd.set_xlabel(r"$T_{\rm eff}$ $({\rm K})$")
# Labels, etc.
ax_hrd.xaxis.set_major_locator(MaxNLocator(5))
ax_hrd.yaxis.set_major_locator(MaxNLocator(5))
ax_hrd.set_ylabel(r"$\log{g}$")
ax_hrd.set_xlim(xlimits)
ax_hrd.set_ylim(ylimits)
if ax is None:
cb = plt.colorbar(
cax=plt.subplot(gs[0]), mappable=scat, orientation='horizontal')
cb.ax.xaxis.set_ticks_position('top')
cb.ax.xaxis.set_label_position('top')
cb.set_label(r"$[{\rm Fe}/{\rm H}]$")
return fig | 9,020 | 30.876325 | 84 | py |
ges-idr5 | ges-idr5-master/code/plot/benchmarks.py |
import logging
import numpy as np
import os
import matplotlib.pyplot as plt
from astropy.table import Table
from matplotlib.ticker import MaxNLocator
from matplotlib import gridspec
import utils
__all__ = ["node_benchmark_performance", "wg_benchmark_performance"]
benchmark_filename = "fits-templates/benchmarks/GES_iDR5_FGKMCoolWarm_Benchmarks_AcceptedParams_01082016.fits"
logger = logging.getLogger("ges")
def node_benchmark_performance(database, wg, node_name, sort_by="TEFF",
ylims=None):
"""
Show a box-and-whisker plot for the benchmark parameters reported by a given
node.
:param database:
A database for transactions.
:param wg:
The working group.
:param node_name:
The node name.
:param ylims: [optional]
A dictionary containing the absolute y-limit for each label (teff, logg,
mh).
"""
width = 0.45
colors=("#000000", "g", "#0874D4")
node_id = database.retrieve_node_id(wg, node_name)
benchmarks = Table.read(benchmark_filename)
ok = np.isfinite(benchmarks["TEFF"] * benchmarks["LOGG"] * benchmarks["FEH"])
benchmarks = benchmarks[ok]
fig, axes = plt.subplots(3, figsize=(16.5, 7.5))
parameters = ("teff", "logg", "mh")
fits_parameters = ("TEFF", "LOGG", "FEH")
ylabels = {
"teff": r"$\Delta{}T_{\rm eff}$ $({\rm K})$",
"logg": r"$\Delta\log{g}$",
"mh": r"$\Delta[{\rm Fe}/{\rm H}]$",
"feh": r"$\Delta[{\rm Fe}/{\rm H}]$"
}
benchmarks.sort(sort_by)
N = 0
for i, (ax, parameter) in enumerate(zip(axes, parameters)):
ax.axhline(0, c="k", zorder=-1)
fits_parameter = fits_parameters[i]
x = benchmarks[fits_parameter]
diff = []
for benchmark in benchmarks:
results = database.retrieve_table(
""" SELECT DISTINCT ON (r.filename)
r.cname, ges_fld, feh, e_feh, {0}, e_{0}
FROM results r, spectra s
WHERE r.cname = s.cname
AND r.node_id = %s
AND TRIM(s.ges_fld) = %s""".format(parameter),
(node_id, benchmark["GES_FLD"].strip()))
if results is None:
diff.append([])
else:
if parameter == "mh":
data = results[utils.mh_or_feh(results)]
else:
data = results[parameter]
data -= benchmark[fits_parameter]
if np.any(np.isfinite(data)):
diff.append(data[np.isfinite(data)])
else:
diff.append([])
# Show box-and-whisker plot.
N += np.hstack(diff).size
bp = ax.boxplot(diff, widths=width, patch_artist=True)
ax.set_xlim(-0.5, len(benchmarks) + 0.5)
raise CHECKTHIS
ax.set_ylabel(ylabels.get(parameter))
ylim = ylims.get(parameter, None)
if ylim is not None:
ax.set_ylim(-ylim, ylim)
# Put numbers.
if ax.is_last_row():
y_loc = ax.get_ylim()[0] + 0.05 * np.ptp(ax.get_ylim())
for j, d in enumerate(diff):
ax.text(j + 1, y_loc, r"${}$".format(len(d)), color="k",
horizontalalignment="center")
# Show how many are outside of each frame.
if ylim:
y_loc = ax.get_ylim()[1] - 0.05 * np.ptp(ax.get_ylim())
for j, d in enumerate(diff):
N_bad = np.sum(np.abs(d) > ylim)
if N_bad == 0: continue
ax.text(j + 1, y_loc, r"${}$".format(N_bad), color="r",
horizontalalignment="center",
verticalalignment="top")
ax.set_xticklabels(
[each.strip().replace("_", "-") for each in benchmarks["GES_FLD"]])
[l.set_rotation(90) for l in ax.get_xticklabels()]
else:
ax.set_xticklabels([])
ax.yaxis.set_major_locator(MaxNLocator(5))
ax.spines["left"]._linewidth = 0.5
ax.spines["bottom"]._linewidth = 0.0
ax.spines["top"]._linewidth = 0.0
ax.spines["right"]._linewidth = 0.0
opposite_ax = ax.twinx()
opposite_ax.set_yticks([])
plt.setp(bp["medians"], color="k", linewidth=2)
plt.setp(bp["fliers"], color="k")
plt.setp(bp["caps"], visible=False)
plt.setp(bp["whiskers"], color="k", linestyle="solid", linewidth=0.5)
plt.setp(bp["boxes"], color="k", alpha=0.5, linewidth=1)
fig.tight_layout()
return fig if N > 0 else None
def wg_benchmark_performance(database, wg, truths, show_recommended=False,
sort_by=None, ylims=None, node_sql_constraint=None, skip_missing=True,
show_num_estimates=None, xlabels=None, ylabels=None, **kwargs):
"""
Show a box-and-whisker plot for the benchmark parameters reported by a given
node.
:param database:
A database for transactions.
:param wg:
The working group.
:param truths:
An :astropy.table.Table: with the accepted "truth" values for the
benchmark stars. A `GES_FLD` column is required for matching.
:param show_recommended: [optional]
Show the recommended values from the working group.
:param sort_by: [optional]
A column in the `truths` table to sort the benchmarks by.
:param ylims: [optional]
A dictionary containing the absolute y-limit for each label (teff, logg,
mh).
:param node_sql_constraint: [optional]
An additional SQL constraint to apply to the node query.
:param skip_missing: [optional]
Skip benchmarks with zero node estimates.
:param show_num_estimates: [optional]
Show the number of node estimates for each benchmark star. If `None` is
specified, then the number of estimates will be shown if no `ylims` are
given. If `ylims` are given (and there are likely data points outside
the range), then the number of estimates will only be shown if
`show_num_estimates = True`.
:param xlabels: [optional]
A dictionary containing labels for each benchmark value, where the
`GES_FLD` entries are keys and the labels to use are values.
:param ylabels: [optional]
A dictionary containing labels for each parameter, where the parameters
are keys and the labels to use are values.
"""
width = kwargs.get("width", 0.45)
colors = kwargs.get("colors", ("#000000", "#4daf4a", "#e41a1c"))
show_num_estimates = True if ylims is None or show_num_estimates else False
xlabels = xlabels or {}
ylims = ylims or {}
parameters = kwargs.get("parameters", ("TEFF", "LOGG", "FEH"))
fig, axes = plt.subplots(len(parameters), figsize=(16.5, 7.5))
default_ylabels = {
"TEFF": r"$\Delta{}T_{\rm eff}$ $({\rm K})$",
"LOGG": r"$\Delta\log{g}$",
"FEH": r"$\Delta[{\rm Fe}/{\rm H}]$"
}
default_ylabels.update(ylabels or {})
ylabels = default_ylabels
truths = truths.copy()
truths.sort(sort_by or parameters[0])
N = 0
diffs = {}
recommended_diffs = {}
#for i, (ax, parameter) in enumerate(zip(axes, parameters)):
for i, parameter in enumerate(parameters):
x = truths[parameter]
diffs[parameter] = []
recommended_diffs[parameter] = []
for benchmark in truths:
kwds = dict(
parameter=parameter, wg=wg, ges_fld=benchmark["GES_FLD"].strip(),
node_sql_constraint_str=" AND {}".format(node_sql_constraint) \
if node_sql_constraint is not None else "")
node_estimates = database.retrieve_table(
""" SELECT DISTINCT ON (results.id)
{parameter}, e_{parameter}
FROM results, nodes, spectra
WHERE nodes.wg = '{wg}'
AND results.node_id = nodes.id
AND results.cname = spectra.cname
AND TRIM(spectra.ges_fld) = '{ges_fld}'
AND {parameter} <> 'NaN'
{node_sql_constraint_str};""".format(**kwds))
if not np.isfinite(benchmark[parameter]):
logger.warn("No finite {} `truth` value for {}!".format(
parameter, benchmark["GES_FLD"].strip()))
if node_estimates is None:
# No node estimates for this object
diffs[parameter].append([])
else:
diffs[parameter].append(
node_estimates[parameter.lower()] - benchmark[parameter])
if show_recommended:
wg_recommended = database.retrieve_table(
""" SELECT {parameter}, e_{parameter},
e_pos_{parameter}, e_neg_{parameter}
FROM wg_recommended_results as wgr, spectra
WHERE wgr.wg = {wg}
AND wgr.cname = spectra.cname
AND TRIM(spectra.ges_fld) = '{ges_fld}';
""".format(**kwds))
if wg_recommended is None:
recommended_diffs[parameter].append([])
else:
recommended_diffs[parameter].append([
wg_recommended[parameter.lower()][0] - benchmark[parameter],
wg_recommended["e_{}".format(parameter.lower())][0]
])
# Show benchmarks with missing entries?
if skip_missing:
keep = np.sum(np.array([map(len, v) for v in diffs.values()]), axis=0) > 0
truths = truths[keep]
for parameter in parameters:
diffs[parameter] = np.array(diffs[parameter])[keep]
recommended_diffs[parameter] = np.array(recommended_diffs[parameter])[keep]
for i, (ax, parameter) in enumerate(zip(axes, parameters)):
ax.axhline(0, c="k", zorder=-1)
diff = diffs[parameter]
recommended_diff = recommended_diffs[parameter]
# Show box-and-whisker plot.
N += np.hstack(diff).size
bp = ax.boxplot(diff, widths=width, patch_artist=True)
# Show the recommended values.
if show_recommended:
for j, result in enumerate(recommended_diff):
if not result: continue
mu_diff, sigma = result
ax.fill_between(
[j + 1 - width/2, j + 1 + width/2],
[mu_diff - sigma, mu_diff - sigma],
[mu_diff + sigma, mu_diff + sigma],
linewidth=0, alpha=0.5, facecolor=colors[1])
ax.plot(
[j + 1 - width/2, j + 1 + width/2],
[mu_diff, mu_diff],
lw=2, color=colors[1])
ax.set_ylabel(ylabels.get(parameter))
ax.set_xlim(0.5, len(truths) + 0.5)
ylim = ylims.get(parameter, None)
if ylim is not None:
try:
ylim[0]
except:
ax.set_ylim(-ylim, ylim)
else:
ax.set_ylim(*ylim)
# Show how many are outside of each frame.
y_loc = ax.get_ylim()[1] - 0.075 * np.ptp(ax.get_ylim())
for j, d in enumerate(diff):
N_bad = np.sum(d > ax.get_ylim()[1])
if N_bad == 0: continue
ax.plot([j + 1], [y_loc], marker="^",
c=colors[2], linewidth=0)
ax.text(j + 1.4, y_loc, r"${}$".format(N_bad),
color=colors[2], fontsize=9, horizontalalignment="center",
verticalalignment="center")
y_loc = ax.get_ylim()[0] + 0.075 * np.ptp(ax.get_ylim())
for j, d in enumerate(diff):
N_bad = np.sum(d < ax.get_ylim()[0])
if N_bad == 0: continue
ax.plot([j + 1], [y_loc], marker="v",
c=colors[2], linewidth=0)
ax.text(j + 1.4, y_loc, r"${}$".format(N_bad),
color=colors[2], fontsize=9, horizontalalignment="center",
verticalalignment="center")
if show_num_estimates and ax.is_last_row():
y_loc = ax.get_ylim()[0] + 0.05 * np.ptp(ax.get_ylim())
for j, d in enumerate(diff):
ax.text(j + 1, y_loc, r"${}$".format(len(d)), color="k",
horizontalalignment="center")
if ax.is_last_row():
#ax.set_xticks(range(len(truths)))
ax.set_xticklabels([
xlabels.get(each, each.strip().replace("_", "-")) \
for each in truths["GES_FLD"]])
[l.set_rotation(90) for l in ax.get_xticklabels()]
else:
ax.set_xticklabels([])
ax.yaxis.set_major_locator(MaxNLocator(5))
ax.spines["left"]._linewidth = 0.5
ax.spines["bottom"]._linewidth = 0.0
ax.spines["top"]._linewidth = 0.0
ax.spines["right"]._linewidth = 0.0
opposite_ax = ax.twinx()
opposite_ax.set_yticks([])
plt.setp(bp["medians"], color=colors[0], linewidth=2)
plt.setp(bp["fliers"], color=colors[0])
plt.setp(bp["caps"], visible=False)
plt.setp(bp["boxes"], color=colors[0], alpha=0.5, linewidth=1)
plt.setp(
bp["whiskers"], color=colors[0], linestyle="solid", linewidth=0.5)
ax.xaxis.set_tick_params(width=0)
fig.tight_layout()
return fig if N > 0 else None
| 13,779 | 34.792208 | 110 | py |
ges-idr5 | ges-idr5-master/code/plot/utils.py |
import numpy as np
from astropy.table import Table
def parse_isochrone(filename):
"""
Parse a PARSEC or Siess isochrone.
:param filename:
The filename of the isochrone.
"""
is_parsec = "parsec" in filename.lower()
kwds = {
"format": "ascii"
}
if is_parsec:
kwds.update({
"data_start": 0,
"header_start": 13
})
isochrone = Table.read(filename, **kwds)
# Make columns common.
if is_parsec:
isochrone["logg"] = isochrone["logG"]
isochrone["teff"] = 10**isochrone["logTe"]
else: # Seiss
isochrone["logg"] \
= np.log10(isochrone["Mass"]) - np.log10(isochrone["R"]**2) + 4.44
isochrone["teff"] = isochrone["Teff"]
return isochrone
def wg_as_int(wg):
return int(str(wg).strip().lower().lstrip("wg"))
def mh_or_feh(table):
if table["mh"][0] is None:
return "feh"
elif table["feh"][0] is None:
return "mh"
feh = np.sum(np.isfinite(table["feh"]))
mh = np.sum(np.isfinite(table["mh"]))
return "feh" if feh > mh else "mh"
| 1,119 | 19.363636 | 78 | py |
ges-idr5 | ges-idr5-master/code/plot/flags.py |
import numpy as np
import matplotlib.pyplot as plt
from itertools import combinations
from matplotlib.colors import LogNorm
import scipy.sparse.csgraph
_WG14_NODE_IDS = {
"01": "Arcetri",
"02": "CAUP",
"03": "EPINARBO",
"04": "IAC",
"05": "Lumba",
"06": "MaxPlanck",
"07": "MyGIsFOS",
"08": "Nice",
"09": "OACT",
"10": "OAPA",
"11": "UCM",
"12": "ULB",
"13": "Vilnius",
"14": "GSSP",
"15": "IAC",
"16": "Liege",
"17": "MGNDU",
"18": "Mntp",
"19": "ON",
"20": "ROB",
"21": "ROBGrid",
"22": "BIN",
"23": "Halpha",
"24": "NBfilters",
"25": "TSNE"
}
def heatmap(database, wg, kind="tech", show_multiple_flags_per_node=True,
group_by=None, use_cuthill_mckee=True, minimum_entry=1, figsize=(12, 12),
**kwargs):
"""
Plot a heat map of the flags used by a working group. A 2D heat map is
created showing the frequency of flags used by different nodes. Flag entries
are only shown if they are used by at least two nodes.
:param database:
A database to connect to.
:param wg:
The working group (e.g., 10).
:param kind: [optional]
The kind of flag to show. Options include 'tech', 'remark', and 'peculi'
:param show_multiple_flags_per_node: [optional]
If set to `True`, then multiple flags used by the same node will be
shown in the figure. If `False`, then only flags used by two different
nodes will be shown.
:param group_by: [optional]
Group the grid. Options include: `None` for no grouping, `nodes` to
group by nodes, and `issue` to group by issues.
:param use_cuthill_mckee: [optional]
Use the Cuthill-McKee algorithm to sort the flags to maximize symmetry
and minimize the diagonal bandwidth of the matrix.
:param minimum_entry: [optional]
The minimum number of acceptable entries for any two dimensional grid
bin. Any bins with values less than `minimum_entry` will not be shown.
:param figsize: [optional]
The size of the figure in inches `(xsize, ysize)`.
"""
# Ensure to strip on | because some nodes give trailing | entries.
_strip_chars = " |"
kind = kind.lower()
kind_available = ("tech", "remark", "peculi")
if kind not in kind_available:
raise ValueError("kind '{}' not recognized in: {}".format(
kind, ", ".join(kind_available)))
if group_by is not None:
group_by = group_by.lower()
group_by_available = ("issue", "node")
if group_by not in group_by_available:
raise ValueError("ordering by '{}' not available: {}".format(
group_by, ", ".join(group_by_available)))
# Select everything with a non-zero flag entry
results = database.retrieve_table(
""" WITH n as (
SELECT id FROM nodes WHERE wg = {wg})
SELECT r.node_id, r.cname, r.{kind}
FROM n, results as r
WHERE TRIM(r.{kind}) <> ''
AND TRIM(r.{kind}) <> 'NaN'
AND r.node_id = n.id
""".format(wg=wg, kind=kind))
flat_flags \
= sum([_.strip(_strip_chars).split("|") for _ in results[kind]], [])
# Unique issue id numbers.
issue_ids = np.sort(np.unique([each.split("-")[0] for each in flat_flags]))
# Node ids.
node_ids = np.sort(np.unique([each.split("-")[2] for each in flat_flags]))
L, M = len(issue_ids), len(node_ids)
Z = np.zeros((L * M, L * M), dtype=int)
if group_by == "node" or group_by is None:
get_index = lambda issue, node: \
(np.where(node_ids == node)[0][0] * len(issue_ids) \
+ np.where(issue_ids == issue)[0][0])
labels = np.tile(issue_ids, M)
elif group_by == "issue":
get_index = lambda issue, node: \
(np.where(issue_ids == issue)[0][0] * len(node_ids)) \
+ np.where(node_ids == node)[0][0]
labels = np.repeat(issue_ids, M)
else:
raise ValueError("sorting by '{}' not available".format(group_by))
# Group results by CNAME.
for group in results.group_by("cname").groups:
if isinstance(group[kind], (str, unicode)):
group_flags = group[kind].strip(_strip_chars).split("|")
else:
group_flags \
= sum([_.strip(_strip_chars).split("|") for _ in group[kind]], [])
if len(group_flags) == 1:
continue
# Remove the WG and confidence entry.
flags = np.unique(
["-".join([_.split("-")[0], _.split("-")[2]]) for _ in group_flags])
for x, y in combinations(flags, 2):
x_issue, x_node = x.split("-")
y_issue, y_node = y.split("-")
if x_node == y_node and not show_multiple_flags_per_node: continue
xi = get_index(x_issue, x_node)
yi = get_index(y_issue, y_node)
Z[xi, yi] += 1
Z[yi, xi] += 1
before_reorder = np.sort(labels[np.where(Z == Z.max())[0]])
# Show structure within each node?
if use_cuthill_mckee:
if group_by is None:
matrix_indices = scipy.sparse.csgraph.reverse_cuthill_mckee(
scipy.sparse.csr_matrix(Z))
Z = Z[matrix_indices, :][:, matrix_indices]
labels = np.array([
["-".join([iid, nid]) for iid in issue_ids] for nid in node_ids])
labels = labels.flatten()[matrix_indices]
elif group_by == "node":
# Re-order the issue indices so that they show structure, but keep
# the same issue order for each of the nodes.
"""
matrix_indices = scipy.sparse.csgraph.reverse_cuthill_mckee(
scipy.sparse.csr_matrix(Z))
issue_indices = np.array(
[_ for i, _ in enumerate(matrix_indices % L) \
if _ not in (matrix_indices % L)[:i]]).flatten().astype(int)
for i in range(M):
labels[L*i:L*(i + 1)] = labels[L*i + issue_indices]
Z[L*i:L*(i + 1), :] = Z[L*i + issue_indices, :]
Z[:, L*i:L*(i + 1)] = Z[:, L*i + issue_indices]
"""
# Re-order the issue indices so that they show structure
for i in range(M):
indices = scipy.sparse.csgraph.reverse_cuthill_mckee(
scipy.sparse.csr_matrix(Z[L*i:L*(i+1), L*i:L*(i+1)]))
labels[L*i:L*(i + 1)] = labels[L*i + indices]
Z[L*i:L*(i + 1), :] = Z[L*i + indices, :]
Z[:, L*i:L*(i + 1)] = Z[:, L*i + indices]
else:
raise NotImplementedError
after_reorder = np.sort(labels[np.where(Z == Z.max())[0]])
assert np.all(before_reorder == after_reorder)
keep = np.any(Z >= minimum_entry, axis=1)
Z = Z[keep, :][:, keep]
labels = labels[keep]
gridlines = np.array([sum(keep[L*i:L*(i + 1)]) for i in range(M)])
fig, ax = plt.subplots(figsize=figsize)
kwds = dict(cmap="viridis", aspect="auto", interpolation="nearest", norm=LogNorm())
kwds.update(**kwargs)
ax.imshow(np.eye(*Z.shape), cmap="Greys", vmin=0, vmax=2, interpolation="nearest")
image = ax.imshow(Z, **kwds)
ax.set_xticks(np.arange(Z.shape[0]))
ax.set_yticks(np.arange(Z.shape[0]))
# Put gridlines
if group_by is not None and gridlines is None:
a, b = (L, M) if group_by == "issue" else (M, L)
for _ in range(a):
ax.axhline(_*b - 0.5, c="#000000", linewidth=2)
ax.axvline(_*b - 0.5, c="#000000", linewidth=2)
if gridlines is not None:
for _ in np.hstack([0, np.cumsum(gridlines)]):
ax.axhline(_ - 0.5, c="#000000", linewidth=2)
ax.axvline(_ - 0.5, c="#000000", linewidth=2)
ticks = np.hstack([0, np.cumsum(gridlines)])
ticks = 0.5 * np.diff(ticks) + ticks[:-1] - 0.5
ax_right = ax.twinx()
ax_right.set_yticks(ticks)
ax_right.set_yticklabels(
[_WG14_NODE_IDS.get(node_id, node_id) for node_id in node_ids],
verticalalignment="center")
ax_right.set_ylim(ax.get_ylim())
ax_right.tick_params(width=0)
ax.set_xlim(-0.5, Z.shape[0] - 0.5)
ax.set_ylim(Z.shape[0] - 0.5, -0.5)
ax_right.set_ylim(ax.get_ylim())
labels = labels if labels is not None else np.tile(issue_ids, M)
ax.set_yticklabels(labels)
ax.set_xticklabels(labels, rotation=90)
ax.tick_params(width=0)
fig.tight_layout()
_height, _space = 0.025, 0.025
cbar = plt.colorbar(image,
cax=fig.add_axes([
fig.subplotpars.left,
1 - _height - _space,
fig.subplotpars.right - fig.subplotpars.left,
_height
]),
orientation="horizontal")
fig.subplots_adjust(top=1 - _height - 2*_space)
cbar.ax.xaxis.set_ticks_position("top")
cbar.ax.xaxis.set_label_position("top")
cbar.ax.xaxis.set_tick_params(width=0)
# Empty metadata dict.
meta = {}
return (fig, meta)
| 9,136 | 30.725694 | 87 | py |
ges-idr5 | ges-idr5-master/code/plot/__init__.py | from hrd import *
from cluster import *
from nodes import *
from benchmarks import *
import flags | 97 | 18.6 | 24 | py |
ges-idr5 | ges-idr5-master/code/plot/hrd.py |
__all__ = ["hrd", "stellar_parameter_histograms", "stellar_parameter_error_histograms",
"hrd_by_setup"]
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
from matplotlib import gridspec
import utils
def hrd_by_setup(database, wg, node_name):
"""
Show Hertszprung-Russell Diagrams for stars in each unique setup.
:param database:
A database for transactions.
:param wg:
The working group.
:param node_name:
The name of the node to show results for.
"""
node_id = database.retrieve_node_id(wg, node_name)
# Get results.
results = database.retrieve_table(
""" SELECT cname, setup, teff, e_teff, logg, e_logg, mh, e_mh,
feh, e_feh, xi, e_xi
FROM results WHERE node_id = %s""", (node_id, ))
if results is None:
return None
setups = sorted(list(set(results["setup"])))
remove_setups = []
for i, setup in enumerate(setups):
mask = (results["setup"] == setup)
ok = np.isfinite(results["teff"][mask] * results["logg"][mask])
if not np.any(ok):
remove_setups.append(setup)
continue
setups = list(set(setups).difference(remove_setups))
N_setups = len(setups)
if N_setups == 0:
raise WTFError
gs = gridspec.GridSpec(1, N_setups + 1, width_ratios=([12] * N_setups) + [1])
if not np.all(np.isfinite(results["xi"])):
# Don't show size-varying points.
s = 100 * np.ones(len(results))
else:
s = 100 * results["xi"]
mh_col = utils.mh_or_feh(results)
fig = plt.figure(figsize=(6 * N_setups, 6))
for i, setup in enumerate(setups):
mask = (results["setup"] == setup)
ax = fig.add_subplot(gs[i])
scat = ax.scatter(results["teff"][mask], results["logg"][mask],
facecolor=results[mh_col][mask], edgecolor="k", s=100,
vmin=np.nanmin(results[mh_col]), vmax=np.nanmax(results[mh_col]),
cmap="plasma")
ax.errorbar(results["teff"][mask], results["logg"][mask],
xerr=results["e_teff"][mask], yerr=results["e_logg"][mask],
fmt=None, ecolor="#666666", alpha=0.5, zorder=-1)
ax.xaxis.set_major_locator(MaxNLocator(5))
ax.yaxis.set_major_locator(MaxNLocator(5))
ax.set_xlabel(r"$T_{\rm eff}$ $({\rm K})$")
ax.set_ylabel(r"$\log{g}$")
ax.set_title(setup.strip())
ax.set_xlim(ax.get_xlim()[::-1])
ax.set_ylim(ax.get_ylim()[::-1])
cax = fig.add_subplot(gs[-1])
cb = fig.colorbar(cax=cax, mappable=scat)
cb.set_label(r"$[{\rm Fe}/{\rm H}]$")
#fig.tight_layout()
return fig
def hrd(database, wg, node_name, where=None, isochrones=None,
show_other_wg_nodes=False, mark=None):
"""
Return a Hertszprung-Russell Diagram (effective temperature versus surface
gravity) for all stars reported by a single node.
:param database:
A database for transactions.
:param wg:
The working group.
:param node_name:
The name of the node to show results for.
:param where: [optional]
Additional SQL constraints, which would start from '... and `where`'.
:param isochrones: [optional]
A single path or list of paths of isochrones to display on the figure.
:param show_other_wg_nodes: [optional]
If set to `True`, the results from other nodes in the same working
group will be shown in the background to provide context.
:param mark: [optional]
Mark an (x, y) position with vertical and horizontal lines in the
figure.
"""
node_id = database.retrieve_node_id(wg, node_name)
where_str = "" if where is None else " AND {}".format(where)
if isochrones is not None and not isinstance(isochrones, (list, tuple)):
isochrones = [isochrones]
# Collect the results for this node.
results = database.retrieve_table(
""" SELECT node_id, cname, teff, e_teff, logg, e_logg,
feh, e_feh, mh, e_mh, xi, e_xi
FROM results WHERE node_id = %s {}""".format(where_str), (node_id, ))
if results is None:
raise ValueError("no results found for {}/{}".format(wg, node_name))
ok = np.isfinite(
results["teff"].astype(float) * results["logg"].astype(float))
# TODO Get full limit range on teff/logg/etc?
#param_ranges = database.retrieve_table(
# """SELECT max(teff) AS max_teff, min(teff) AS min_teff,
# max(logg) AS max_logg, min(logg) AS min_logg,
# max(mh) AS max_mh, min(mh) AS min_mh,
# max(xi) AS max_xi, min(xi) as min_xi
# FROM results
# WHERE node_id
fig, ax = plt.subplots()
if not np.any(np.isfinite(results["xi"].astype(float))):
# Don't show size-varying points.
s = None
else:
s = 100 * results["xi"]
ok *= np.isfinite(results["xi"].astype(float))
# Error bars.
ax.errorbar(results["teff"][ok], results["logg"][ok],
xerr=results["e_teff"][ok], yerr=results["e_logg"][ok],
fmt=None, ecolor="#666666", zorder=-1, alpha=0.5)
mh_col = utils.mh_or_feh(results)
scat = ax.scatter(results["teff"][ok], results["logg"][ok],
c=results[mh_col][ok], s=s, cmap="plasma", zorder=2)
cbar = plt.colorbar(scat)
cbar.set_label(r"$[{\rm Fe}/{\rm H}]$")
if mark is None:
ax.set_xlim(8000, 3000)
ax.set_ylim(5.5, 0)
else:
ax.axhline(mark[1], c="k", lw=2, zorder=-1)
ax.axvline(mark[0], c="k", lw=2, zorder=-1)
ax.scatter([mark[0]], [mark[1]], facecolor="k", s=100)
ax.set_xlim(ax.get_xlim())[::-1]
ax.set_ylim(ax.get_ylim())[::-1]
ax.set_xlabel(r"$T_{\rm eff}$ $({\rm K})$")
ax.set_ylabel(r"$\log{g}$")
fig.tight_layout()
return fig
def histograms(database, wg, node_name, parameters, labels=None, where=None,
**kwargs):
"""
Show histograms of parameters.
:param database:
A database for transactions.
:param wg:
The working group.
:param node_name:
The name of the node to show results for.
:param parameters:
The names of the columns (parameters) to show.
:param labels:
The labels to show. If `None` are given, the `parameters` will be used.
:param where: [optional]
Additional SQL constraints, which would start from '... and `where`'.
"""
node_id = database.retrieve_node_id(wg, node_name)
where_str = "" if where is None else " AND {}".format(where)
parameters = list(parameters)
if "mh" in parameters:
# a HACK
parameters.append("feh")
if "e_mh" in parameters:
parameters.append("e_feh")
# Collect the results for this node.
labels = labels or parameters
results = database.retrieve_table(
"""SELECT {} FROM results WHERE node_id = %s {}""".format(
", ".join(parameters), where_str), (node_id, ))
N = int(np.ceil(np.sqrt(len(labels))))
fig, axes = plt.subplots(N, N)
axes = np.array(axes).flatten()
for i, (ax, parameter, label) in enumerate(zip(axes, parameters, labels)):
if parameter == "mh":
parameter = utils.mh_or_feh(results)
elif parameter == "e_mh":
parameter = "e_{}".format(utils.mh_or_feh(results))
ok = np.isfinite(results[parameter])
ax.hist(results[parameter][ok], bins=50, facecolor="#666666")
ax.text(0.05, 0.95, r"${}$".format(ok.sum()),
verticalalignment="top",
color="k", transform=ax.transAxes)
ax.set_xlabel(label)
ax.set_ylabel(r"Count")
ax.xaxis.set_major_locator(MaxNLocator(5))
ax.yaxis.set_major_locator(MaxNLocator(5))
fig.tight_layout()
for ax in axes[len(parameters):]:
ax.set_visible(False)
return fig
def stellar_parameter_histograms(database, wg, node_name, where=None, **kwargs):
"""
Show four panel historgrams of the TEFF, LOGG, MH, and XI for a given node.
:param database:
A database for transactions.
:param wg:
The working group.
:param node_name:
The name of the node to show results for.
:param where: [optional]
Additional SQL constraints, which would start from '... and `where`'.
"""
parameters = ("teff", "logg", "mh", "xi")
labels = (r"$T_{\rm eff}$ $({\rm K})$", r"$\log{g}$",
r"$[{\rm Fe}/{\rm H}]$", r"$\xi$ $({\rm km}$ ${\rm s}^{-1})$")
return histograms(database, wg, node_name, parameters, labels,
where=where, **kwargs)
def stellar_parameter_error_histograms(database, wg, node_name, where=None, **kwargs):
"""
Show four panel historgrams of the E_TEFF, E_LOGG, E_MH, and E_XI for a
given node.
:param database:
A database for transactions.
:param wg:
The working group.
:param node_name:
The name of the node to show results for.
:param where: [optional]
Additional SQL constraints, which would start from '... and `where`'.
"""
parameters = ("e_teff", "e_logg", "e_mh", "e_xi")
labels = (r"$\sigma_{T_{\rm eff}}$ $({\rm K})$", r"$\sigma_{\log{g}}$",
r"$\sigma_{[{\rm Fe}/{\rm H}]}$", r"$\sigma_\xi$ $({\rm km}$ ${\rm s}^{-1})$")
return histograms(database, wg, node_name, parameters, labels,
where=where, **kwargs)
| 9,544 | 28.642857 | 87 | py |
ges-idr5 | ges-idr5-master/code/model/plot.py |
"""
Plot things relevant to the ensemble model.
"""
import cPickle as pickle
import itertools
import logging
import numpy as np
import scipy.sparse
import matplotlib.pyplot as plt
from collections import OrderedDict
from matplotlib.ticker import MaxNLocator
import brewer2mpl # For pretty colors.
logger = logging.getLogger("ges")
_DEFAULT_SAVEFIG_KWDS = dict(dpi=300, bbox_inches="tight")
def systematic_uncertainty(model, ax=None, N_bins=50, xlabel=None, legend=True,
**kwargs):
"""
Plot the distribution of constant systematic uncertainties from all nodes.
:param model:
A fully-sampled ensemble model.
"""
if ax is None:
fig, ax = plt.subplots()
else:
fig, ax = ax.figure, ax
N = model._chains["vs_c"].shape[1]
colors = brewer2mpl.get_map("Set1", "qualitative", N).mpl_colors
max_abs_sigma = np.sqrt(np.max(np.abs(model._chains["vs_c"])))
bins = np.linspace(0, max_abs_sigma, N_bins)
nodes = (model._metadata["node_ids"], model._metadata["node_names"])
for i, (node_id, node_name) in enumerate(zip(*nodes)):
ax.hist(np.sqrt(model._chains["vs_c"][:, i]),
facecolor=colors[i], edgecolor=colors[i], bins=bins,
alpha=0.5, lw=2, normed=True, histtype="stepfilled",
label=r"${{\rm {0}}}$".format(node_name.strip()))
latex_labels = {
"teff": r"${\rm Constant}$ ${\rm systematic}$ ${\rm uncertainty}$ "
r"${\rm in}$ ${\rm effective}$ ${\rm temperature},$ "
r"$\sigma_{c,T_{\rm eff}}$ $({\rm K})$",
"logg": r"${\rm Constant}$ ${\rm systematic}$ ${\rm uncertainty}$ "
r"${\rm in}$ ${\rm surface}$ ${\rm gravity},$ "
r"$\sigma_{c,\log{g}}$ $({\rm dex})$",
"feh": r"${\rm Constant}$ ${\rm systematic}$ ${\rm uncertainty}$ "
r"${\rm in}$ ${\rm metallicity},$ "
r"$\sigma_{c,[{\rm Fe/H}]}$ $({\rm dex})$"
}
ax.set_xlabel(
xlabel or latex_labels.get(model._parameter, model._parameter))
ax.set_ylabel(r"${\rm Frequency}$")
if legend:
kwds = dict(frameon=False, ncol=2, loc="upper center")
kwds.update(kwargs.get("legend_kwds", {}))
ax.legend(**kwds)
ax.set(
adjustable="box-forced",
aspect=np.ptp(ax.get_xlim())/np.ptp(ax.get_ylim()))
fig.tight_layout()
# Monkey patch the savefig to incorporate the default keywords to ensure
# the resulting figure is ready for publication.
old_savefig = fig.savefig
def new_savefig(self, *args, **kwargs):
kwds = _DEFAULT_SAVEFIG_KWDS.copy()
kwds.update(kwargs)
return old_savefig(self, *args, **kwds)
fig.savefig = new_savefig
return fig
def node_relative_systematic_uncertainty(model, axes=None, quartiles=[16, 50, 84],
ylims=(0.5, 10), **kwargs):
"""
Plot the systematic uncertainty from all nodes as a function of the stellar
parameters.
"""
quartiles = np.hstack(quartiles)
Q = len(quartiles)
if Q not in (1, 3):
raise ValueError("quartiles must be length 1 or 3")
chains = model._chains
assert chains is not None, "Has the model been sampled?"
nodes = (model._metadata["node_ids"], model._metadata["node_names"])
# TODO: Common colors.
N = len(nodes[0])
colors = brewer2mpl.get_map("Set1", "qualitative", N + 1).mpl_colors
parameter_bounds = OrderedDict([
("teff", (3000, 8000)),
("logg", (0, 5)),
("feh", (-3.5, 0.5))
])
if axes is None:
fig, axes = plt.subplots(1, len(parameter_bounds))
else:
fig = axes[0].figure
latex_xlabels = kwargs.get("latex_xlabels", dict(
teff=r"${\rm Effective}$ ${\rm temperature},$ $T_{\rm eff}$ $({\rm K})$",
logg=r"${\rm Surface}$ ${\rm gravity},$ $\log{g}$",
feh=r"${\rm Metallicity},$ $[{\rm Fe/H}]$"))
latex_ylabels = kwargs.get("latex_ylabels", dict(
teff=r"$\sigma_{sys,T_{\rm eff}}/c_{sys}$",
logg=r"$\sigma_{sys,\log{g}}/c_{sys}$",
feh=r"$\sigma_{sys,[{\rm Fe/H}]}/c_{sys}$"))
K = len(parameter_bounds)
S = 500
xs = np.linspace(0, 1, S)
nticks = kwargs.get("nticks", 5)
for i, (parameter, bounds) in enumerate(parameter_bounds.items()):
x, ax = np.linspace(bounds[0], bounds[1], S), axes[i]
for j, (node_id, node_name) in enumerate(zip(*nodes)):
values = chains["vs_a"][:, i, j] * (1 - xs).reshape(-1, 1)**chains["vs_b"][:, i, j]
sigma = np.sqrt(np.exp(values))
q = np.percentile(sigma, quartiles, axis=1)
ax.plot(x, q[Q / 2], lw=2, c=colors[j], zorder=10,
label=r"${{\rm {0}}}$".format(node_name.strip()))
# Show filled region.
if Q > 1:
ax.fill_between(x.flatten(), q[0], q[-1], facecolor=colors[j],
alpha=0.5, edgecolor="none")
# Set common ylimits.
ylims = ylims or (0, np.max([ax.get_ylim() for ax in axes]))
for i, (parameter, bounds) in enumerate(parameter_bounds.items()):
ax = axes[i]
ax.xaxis.set_major_locator(MaxNLocator(nticks))
ax.yaxis.set_major_locator(MaxNLocator(nticks))
ax.set_xlim(bounds)
ax.set_ylim(ylims)
if ax.is_first_col():
ax.set_ylabel(latex_ylabels.get(model._parameter, model._parameter))
else:
ax.set_yticklabels([])
if ax.is_last_row():
ax.set_xlabel(latex_xlabels.get(parameter, parameter))
else:
ax.set_xticklabels([])
# Force the axes to be square when saving it.
ax.set(
adjustable="box-forced",
aspect=np.ptp(ax.get_xlim())/np.ptp(ax.get_ylim()))
fig.tight_layout()
old_savefig = fig.savefig
def new_savefig(self, *args, **kwargs):
kwds = _DEFAULT_SAVEFIG_KWDS.copy()
kwds.update(kwargs)
return old_savefig(self, *args, **kwds)
fig.savefig = new_savefig
return fig
def node_uncertainty_with_snr(model, quartiles=[16, 50, 84], show_cr_bound=True,
xlims=(1, 100), ylims=None, Ns=100, **kwargs):
"""
Plot the total node uncertainty as a function of S/N ratio.
:param model:
A trained ensemble model.
:param xlims: [optional]
A two-length tuple giving the lower and upper bounds to show in SNR.
:param N: [optional]
The number of draws to use when calculating the projected node uncertainty.
"""
quartiles = sum([quartiles], [])
Q = len(quartiles)
if Q not in (1, 3):
raise ValueError("quartiles must be length 1 or 3")
chains = model._chains
assert chains is not None, "Has the model been sampled?"
x = np.linspace(xlims[0], xlims[1], 500).reshape(-1, 1)
nodes = (model._metadata["node_ids"], model._metadata["node_names"])
N = len(nodes[0])
colors = brewer2mpl.get_map("Set1", "qualitative", N + 1).mpl_colors
fig, ax = plt.subplots()
for i, (node_id, node_name) in enumerate(zip(*nodes)):
# Calculate total uncertainty as a function of SNR
# (assuming no increase in systematic uncertainty due to different parts
# of parameter space)
sigma = np.sqrt(
chains["alpha_sq"][:, i].reshape(-1, 1) / x.T +
chains["vs_c"][:, i].reshape(-1, 1) * np.ones_like(x.T))
q = np.percentile(sigma, quartiles, axis=0)
ax.plot(x, q[Q / 2], lw=2, c=colors[i], zorder=10,
label=r"${{\rm {0}}}$".format(node_name.strip()))
# Show filled region.
if Q > 1:
ax.fill_between(x.flatten(), q[0], q[-1], facecolor=colors[i],
alpha=0.5, edgecolor="none")
if show_cr_bound:
# Calculate the minimum variance as a function of SNR using all the
# information from every node.
C = model._chains["alpha_sq"].shape[0]
sigma_wg = np.zeros((C, x.size))
y = np.zeros((Ns, x.size))
indices = np.random.choice(range(C), size=Ns, replace=False)
for i, j in enumerate(indices):
diag = np.sqrt(
chains["alpha_sq"][j].reshape(-1, 1) / x.T +
chains["vs_c"][j].reshape(-1, 1) * np.ones_like(x.T))
I = np.eye(N)
rho = np.dot(
np.dot(I, chains["L_corr"][i]),
np.dot(I, chains["L_corr"][i]).T)
for k in range(x.size):
Sigma = np.tile(diag[:, k], N).reshape(N, N) \
* np.repeat(diag[:, k], N).reshape(N, N) \
* rho
W = np.ones((N, 1))
Cinv = np.linalg.inv(Sigma)
y[i, k] = 1.0/np.sqrt(np.dot(np.dot(W.T, Cinv), W))
g = np.nanpercentile(y, quartiles, axis=0)
ax.plot(x.flatten(), g[Q / 2], lw=2, c=colors[-1], linestyle="--",
label=\
r"${\rm Homogenised}$"
"\n"
r"$({\rm Cram\'er$-${\rm Rao}$ ${\rm bound)}$")
if Q > 1:
ax.fill_between(x.flatten(), g[0], g[-1],
facecolor=colors[-1], alpha=0.5, edgecolor="none", zorder=10)
ax.set_xlim(*xlims)
ax.set_xlabel(r"${\rm Signal}$-${\rm to}$-${\rm noise}$ ${\rm ratio},$ $S/N$ $({\rm pixel}^{-1})$")
default_ylabels = {
"teff": r"${\rm Uncertainty}$ ${\rm in}$ ${\rm effective}$ ${\rm temperature},$ $\sigma_{T_{\rm eff}}$ $({\rm K})$",
"logg": r"${\rm Uncertainty}$ ${\rm in}$ ${\rm surface}$ ${\rm gravity},$ $\sigma_\log{g}$ $({\rm dex})$",
"feh": r"${\rm Uncertainty}$ ${\rm in}$ ${\rm metallicity},$ $\sigma_{\rm [Fe/H]}$ $({\rm dex})$",
}
ylabel = kwargs.get("ylabel", default_ylabels.get(model._parameter,""))
ax.set_ylabel(ylabel)
if ylims is None:
default_ylims = dict(teff=(0, 500), logg=(0, 1), feh=(0, 0.5))
ylims = default_ylims.get(model._parameter, None)
ax.set_ylim(ylims)
legend_kwds = dict(loc="upper center", ncol=2, frameon=False)
legend_kwds.update(kwargs.get("legend_kwds", {}))
plt.legend(**legend_kwds)
ax.set(
adjustable="box-forced",
aspect=np.ptp(ax.get_xlim())/np.ptp(ax.get_ylim()))
fig.tight_layout()
# Monkey patch the savefig to incorporate the default keywords to ensure
# the resulting figure is ready for publication.
old_savefig = fig.savefig
def new_savefig(self, *args, **kwargs):
kwds = _DEFAULT_SAVEFIG_KWDS.copy()
kwds.update(kwargs)
return old_savefig(self, *args, **kwds)
fig.savefig = new_savefig
return fig
def node_correlations(model, reorder=True, plot_edges=True, animate=False,
**kwargs):
"""
Show a lower-diagonal matrix coloured by the median correlation coefficient
between each node.
:param reorder: [optional]
Re-order the correlation matrix to highlight structure.
:param plot_edges: [optional]
Plot edges surrounding the lower-diagonal matrix.
:param animate: [optional]
If `True`, then return a generator that produces `frames` of the
correlation matrix.
"""
if not animate:
return _node_correlations(
model, reorder=reorder, plot_edges=plot_edges, **kwargs)
else:
return _node_correlations_animated(
model, reorder=reorder, plot_edges=plot_edges, **kwargs)
def _node_correlations_animated(model, reorder=True, plot_edges=True, frames=100,
reverse=True, **kwargs):
node_names = np.array(model._metadata["node_names"])
S, N, _ = model._chains["L_corr"].shape
assert N == len(node_names)
# Construct the correlation coefficients.
I = np.eye(N)
rho = np.nan * np.ones((S, N, N))
for s in range(S):
L = model._chains["L_corr"][s]
rho[s] = np.dot(np.dot(I, L), np.dot(I, L).T)
# Compress down to percentiles.
if not reverse:
q = np.linspace(0, 100, frames)
else:
q = np.hstack([
np.linspace(0, 100, frames/2),
np.linspace(100, 0, frames/2)
])
F = q.size
rho = np.percentile(rho, q, axis=0)
if reorder:
L_corr_median = np.median(model._chains["L_corr"], axis=0)
rho_median = np.dot(np.dot(I, L_corr_median), np.dot(I, L_corr_median).T)
permutations = list(itertools.permutations(range(N), N))
score = np.nan * np.ones(len(permutations))
for i, permutation in enumerate(permutations):
_ = np.array(permutation)
m = rho_median[:, _][_, :]
score[i] = np.nansum(np.abs(np.diff(m, axis=0))) \
+ np.nansum(np.abs(np.diff(m, axis=1)))
matrix_indices = np.array(permutations[np.argmin(score)])
# Check if we should flip this or not for visual aspects
rho_ = rho_median[:, matrix_indices][matrix_indices, :]
if np.sum(rho_[:, 0]) > np.sum(rho_[:, -1]):
matrix_indices = matrix_indices[::-1]
for f in range(F):
rho[f] = rho[f][:, matrix_indices][matrix_indices, :]
node_names = node_names[matrix_indices]
# Common plotting parameters between frames.
vrange = np.round(np.nanmax(np.abs(rho)), 1)
assert vrange <= 1.0
vmin = kwargs.get("vmin", None) or -vrange
vmax = kwargs.get("vmax", None) or +vrange
cmap = kwargs.get("cmap", "coolwarm")
fig, ax = plt.subplots()
# Monkey patch the savefig to incorporate the default keywords to ensure
# the resulting figure is ready for publication.
# Set NaN's for the upper triangle.
rho = rho[:, 1:, :-1]
for f in range(F):
for i in range(N - 1):
for j in range(i + 1, N - 1):
rho[f, i, j] = np.nan
im = ax.imshow(rho[0], vmin=vmin, vmax=vmax, cmap=cmap, interpolation="nearest")
ax.spines["right"].set_visible(False)
ax.spines["top"].set_visible(False)
ax.spines["left"].set_visible(False)
ax.spines["bottom"].set_visible(False)
ax.xaxis.set_ticks_position("none") # Seriously matplotlib what the fuck
ax.yaxis.set_ticks_position("none")
ax.set_xticks(range(N - 1))
ax.set_yticks(range(N - 1))
ax.set_xticklabels(node_names[:-1], rotation=90)
ax.set_yticklabels(node_names[1:])
# Draw a line around everything.
if plot_edges:
kwds = dict(lw=2, c="k")
edge = [-0.5, N - 1 - 0.5]
ax.plot(edge, [N - 1 - 0.5, N - 1 -0.5], **kwds)
ax.plot([-0.5, -0.5], edge, **kwds)
x = []
y = []
for i in range(N - 1):
x.extend([-0.5 + i, i + 0.5, i + 0.5])
y.extend([i - 0.5, i - 0.5, i + 0.5])
ax.plot(x, y, **kwds)
tolerance = kwargs.get("__tolerance", 0.10)
ax.set_xlim(-0.5 - tolerance, N - 1.5 + tolerance)
ax.set_ylim(N - 1.5 + tolerance, -0.5 - tolerance)
ax.set(adjustable="box-forced", aspect="equal")
p = ax.get_position()
cbar = plt.colorbar(ax=[ax], mappable=im, orientation="horizontal")
cbar.set_ticks(np.linspace(-vrange, vrange, 5))
cbar.ax.xaxis.set_ticks_position("none")
default_labels = {
"teff": r"${\rm Correlation}$ ${\rm coefficient},$ $\rho_{T_{\rm eff}}$",
"logg": r"${\rm Correlation}$ ${\rm coefficient},$ $\rho_\log{g}$",
"feh": r"${\rm Correlation}$ ${\rm coefficient},$ $\rho_{\rm [Fe/H]}$"
}
cbar.set_label(
kwargs.get("label", default_labels.get(model._parameter, "rho")))
fig.tight_layout()
fig.subplots_adjust(bottom=0.35)
plt.show()
bottom, height = 0.05, 0.05
co = kwargs.get("__cbar_offset", 0.01)
cbar.ax.set_position([
ax.get_position().x0 + co,
bottom,
ax.get_position().width - co*2,
bottom + height
])
plt.show()
"""
old_savefig = fig.savefig
def new_savefig(self, *args, **kwargs):
kwds = _DEFAULT_SAVEFIG_KWDS.copy()
kwds.update(kwargs)
return old_savefig(self, *args, **kwds)
fig.savefig = new_savefig
"""
for f in range(F):
im.set_data(rho[f])
#plt.show()
yield fig
def _node_correlations(model, reorder=True, plot_edges=True, **kwargs):
node_names = np.array(model._metadata["node_names"])
S, N, _ = model._chains["L_corr"].shape
assert N == len(node_names)
# Construct the correlation coefficients from the Cholesky factors.
I = np.eye(N)
L = np.median(model._chains["L_corr"], axis=0)
rho = np.dot(np.dot(I, L), np.dot(I, L).T)
matrix_indices = np.arange(N, dtype=int)
if reorder:
permutations = list(itertools.permutations(range(N), N))
score = np.nan * np.ones(len(permutations))
for i, permutation in enumerate(permutations):
_ = np.array(permutation)
m = rho[:, _][_, :]
score[i] = np.nansum(np.abs(np.diff(m, axis=0))) \
+ np.nansum(np.abs(np.diff(m, axis=1)))
matrix_indices = np.array(permutations[np.argmin(score)])
# Check if we should flip this or not for visual aspects
rho_ = rho[:, matrix_indices][matrix_indices, :]
if np.sum(rho_[:, 0]) > np.sum(rho_[:, -1]):
matrix_indices = matrix_indices[::-1]
rho = rho[:, matrix_indices][matrix_indices, :]
node_names = node_names[matrix_indices]
# Get plotting parameters
vrange = np.round(np.nanmax(np.abs(rho)), 1)
vmin = kwargs.get("vmin", None) or -vrange
vmax = kwargs.get("vmax", None) or +vrange
cmap = kwargs.get("cmap", "coolwarm")
fig, ax = plt.subplots()
old_savefig = fig.savefig
def new_savefig(self, *args, **kwargs):
kwds = _DEFAULT_SAVEFIG_KWDS.copy()
kwds.update(kwargs)
return old_savefig(self, *args, **kwds)
fig.savefig = new_savefig
# Monkey patch the savefig to incorporate the default keywords to ensure
# the resulting figure is ready for publication.
# Set NaN's for the upper triangle.
rho = rho[1:, :-1]
for i in range(N - 1):
for j in range(i + 1, N - 1):
rho[i, j] = np.nan
im = ax.imshow(rho, vmin=vmin, vmax=vmax, cmap=cmap, interpolation="nearest")
ax.spines["right"].set_visible(False)
ax.spines["top"].set_visible(False)
ax.spines["left"].set_visible(False)
ax.spines["bottom"].set_visible(False)
ax.xaxis.set_ticks_position("none") # Seriously matplotlib what the fuck
ax.yaxis.set_ticks_position("none")
ax.set_xticks(range(N - 1))
ax.set_yticks(range(N - 1))
ax.set_xticklabels(node_names[:-1], rotation=90)
ax.set_yticklabels(node_names[1:])
# Draw a line around everything.
if plot_edges:
kwds = dict(lw=2, c="k")
edge = [-0.5, N - 1 - 0.5]
ax.plot(edge, [N - 1 - 0.5, N - 1 -0.5], **kwds)
ax.plot([-0.5, -0.5], edge, **kwds)
x = []
y = []
for i in range(N - 1):
x.extend([-0.5 + i, i + 0.5, i + 0.5])
y.extend([i - 0.5, i - 0.5, i + 0.5])
ax.plot(x, y, **kwds)
tolerance = kwargs.get("__tolerance", 0.10)
ax.set_xlim(-0.5 - tolerance, N - 1.5 + tolerance)
ax.set_ylim(N - 1.5 + tolerance, -0.5 - tolerance)
ax.set(adjustable="box-forced", aspect="equal")
p = ax.get_position()
cbar = plt.colorbar(ax=[ax], mappable=im, orientation="horizontal")
cbar.set_ticks(np.linspace(-vrange, vrange, 5))
cbar.ax.xaxis.set_ticks_position("none")
default_labels = {
"teff": r"${\rm Correlation}$ ${\rm coefficient},$ $\rho_{T_{\rm eff}}$",
"logg": r"${\rm Correlation}$ ${\rm coefficient},$ $\rho_\log{g}$",
"feh": r"${\rm Correlation}$ ${\rm coefficient},$ $\rho_{\rm [Fe/H]}$"
}
cbar.set_label(
kwargs.get("label", default_labels.get(model._parameter, "rho")))
fig.tight_layout()
fig.subplots_adjust(bottom=0.35)
plt.show()
bottom, height = 0.05, 0.05
co = kwargs.get("__cbar_offset", 0.01)
cbar.ax.set_position([
ax.get_position().x0 + co,
bottom,
ax.get_position().width - co*2,
bottom + height
])
plt.show()
return fig
def biases(model, ax=None, N_bins=50, xlabel=None, legend=True, **kwargs):
"""
Plot the distribution of biases from all nodes.
:param model:
A trained ensemble model.
:param ax: [optional]
The axes to plot the distributions.
"""
if ax is None:
fig, ax = plt.subplots()
else:
fig, ax = ax.figure, ax
N = model._chains["biases"].shape[1]
colors = brewer2mpl.get_map("Set1", "qualitative", N).mpl_colors
max_abs_bias = np.max(np.abs(model._chains["biases"]))
bins = np.linspace(-max_abs_bias, +max_abs_bias, N_bins)
nodes = (model._metadata["node_ids"], model._metadata["node_names"])
for i, (node_id, node_name) in enumerate(zip(*nodes)):
ax.hist(model._chains["biases"][:, i],
facecolor=colors[i], edgecolor=colors[i], bins=bins,
alpha=0.5, lw=2, normed=True, histtype="stepfilled",
label=r"${{\rm {0}}}$".format(node_name.strip()))
latex_labels = {
"teff": r"${\rm Bias}$ ${\rm in}$ ${\rm effective}$ ${\rm temperature},$ "
r"$T_{\rm eff}$ $({\rm K})$",
"logg": r"${\rm Bias}$ ${\rm in}$ ${\rm surface}$ ${\rm gravity},$ "
r"$\log{g}$ $({\rm dex})$",
"feh": r"${\rm Bias}$ ${\rm in}$ ${\rm metallicity},$ "
r"$[{\rm Fe/H}]$ $({\rm dex})$"
}
ax.set_xlabel(
xlabel or latex_labels.get(model._parameter, model._parameter))
ax.set_ylabel(r"${\rm Frequency}$")
if legend:
kwds = dict(frameon=False, ncol=2, loc="upper center")
kwds.update(kwargs.get("legend_kwds", {}))
ax.legend(**kwds)
ax.set(adjustable="box-forced")
fig.tight_layout()
# Monkey patch the savefig to incorporate the default keywords to ensure
# the resulting figure is ready for publication.
old_savefig = fig.savefig
def new_savefig(self, *args, **kwargs):
kwds = _DEFAULT_SAVEFIG_KWDS.copy()
kwds.update(kwargs)
return old_savefig(self, *args, **kwds)
fig.savefig = new_savefig
return fig
| 22,417 | 30.619182 | 124 | py |
ges-idr5 | ges-idr5-master/code/model/__init__.py | from .ensemble import * | 23 | 23 | 23 | py |
ges-idr5 | ges-idr5-master/code/model/ensemble.py |
"""
Classes to deal with homogenisation models written in Stan.
"""
import cPickle as pickle
import logging
import numpy as np
import os
import pystan as stan
from astropy.table import Table
from collections import OrderedDict
from itertools import combinations
from time import time
from . import plot
logger = logging.getLogger("ges")
def _guess_parameter_name(table, name):
if name in table.dtype.names:
p_name = name
else:
p_name = name.upper() if name == name.lower() else name.lower()
guesses = ("e_{}", "E_{}", "{}_ERR", "{}_err", "{}_ERROR", "{}_error")
for guess in guesses:
if guess.format(p_name) in table.dtype.names:
p_e_name = guess.format(p_name)
break
else:
raise ValueError(
"cannot guess column name for the error in {}".format(name))
return (p_name, p_e_name)
def _guess_parameter_names(table, names):
return zip(*map(lambda name: _guess_parameter_name(table, name), names))
def _homogenise_survey_measurements(database, wg, parameter, cname, N=100,
stan_model=None, update_database=True):
"""
Produce an unbiased estimate of an astrophyiscal parameter for a given
survey object.
:param cname:
The CNAME (unique star identifier) of an object.
:param wg:
The working group to consider measurements from.
:param parameter:
The name of the parameter to estimate.
:param N: [optional]
The number of samples to draw. Setting to zero or outside the valid
range defaults to the maximum number of draws availiable.
:param stan_model: [optional]
The fitted Stan model.
Either the fitted stan model must be provided, or the Stan chains and
the data dictionary (`stan_chains` and `stan_data`) must be provided.
"""
# Get the data for this object.
estimates = database.retrieve_table(
""" SELECT DISTINCT ON (filename, node_id)
results.id, cname, node_id, snr, trim(filename) as filename,
teff, logg, feh
FROM results, nodes
WHERE nodes.wg = {wg}
AND nodes.id = results.node_id
AND cname = '{cname}'
AND {parameter} <> 'NaN'
AND passed_quality_control = true;
""".format(wg=wg, cname=cname, parameter=parameter))
if estimates is None:
return np.nan * np.ones(4)
# Extract N samples for all the parameters.
# For each sample, calculate:
# 1. The total variance (systematic**2 + (alpha/SNR)**2)
# 2. The weighted mean from all observations by that nodes.
# --> check that this follows 1/sqrt(N)
# 3. Construct a covariance matrix using the weighted means, uncertainties
# and the correlation coefficients
# 4. Draw from a Gaussian using the weighted means and your new Cov matrix
# 5. Record the draw.
samples = stan_model._chains
unique_node_ids = stan_model._metadata["node_ids"]
K = samples["truths"].shape[0]
M = len(set(estimates["node_id"]))
if 1 > N or N > K:
N = K
indices = range(K)
else:
# Select N random indices from K
indices = np.random.choice(K, N, replace=False)
estimates = estimates.group_by("node_id")
mu_samples = np.zeros(N)
var_samples = np.zeros(N)
# Scale the median submitted parameters between (0, 1) so that we can
# estimate the systematic uncertainty from each node.
bounds = OrderedDict([
["teff", (3000, 8000)],
["logg", (0, 5)],
["feh", (-3, 0.5)],
])
xs = np.clip(
[(np.nanmedian(estimates[p]) - l)/(u - l) for p, (l, u) in bounds.items()],
0, 1)
for ii, i in enumerate(indices):
# Weighted mean from each node first (over many observations)
mu_node = np.zeros(M)
var_node_sys = np.zeros(M)
var_node_rand = np.zeros(M)
var_node_stat = np.zeros(M)
node_ids = np.zeros(M)
for j, s in enumerate(estimates.groups.indices[:-1]):
e = estimates.groups.indices[j + 1]
L = e - s
k = np.where(estimates["node_id"][s] == unique_node_ids)[0][0]
node_ids[j] = estimates["node_id"][s]
# Get the 'unbiased' values.
mu = np.array(
estimates[parameter][s:e] - samples["biases"][i, k])
spectrum_snr = np.clip(estimates["snr"][s:e], 1, 500)
var_node_sys[j] = samples["vs_c"][i, j] * np.exp(np.sum([
samples["vs_a"][i, l, k] * pow(1 - xs[l], samples["vs_b"][i, l, k]) \
for l in range(len(xs))]))
diag_variance = (samples["alpha_sq"][i, k]/spectrum_snr) \
+ var_node_sys[j]
C = np.eye(L) * diag_variance
W = np.ones((L, 1))
Cinv = np.linalg.inv(C)
# Get the weighted mean for this node, and the statistical
# uncertainty associated with that estimate.
var_node_stat[j] = 1.0/np.dot(np.dot(W.T, Cinv), W)
mu_node[j] = var_node_stat[j] * np.dot(np.dot(W.T, Cinv), mu)
weights = 1.0/diag_variance
var_node_rand[j] \
= np.sqrt(np.sum(weights * (mu - mu_node[j])**2)/np.sum(weights))
node_ids = node_ids.astype(int)
# Construct the covariance matrix for node-to-node measurements.
# (This includes the systematic component.)
I = np.eye(M)
C = I * (var_node_rand + var_node_sys + var_node_stat)
L = samples["L_corr"][i]
rho = np.dot(
np.dot(np.eye(L.shape[1]), L),
np.dot(np.eye(L.shape[1]), L).T)
for j in range(M):
for k in range(j + 1, M):
a = np.where(node_ids[j] == node_ids)[0][0]
b = np.where(node_ids[k] == node_ids)[0][0]
C[a, b] = C[b, a] = rho[a, b] * (C[a, a] * C[b, b])**0.5
W = np.ones((M, 1))
#Cinv = np.linalg.inv(C)
# Use SVD to invert matrix.
U, s, V = np.linalg.svd(C)
Cinv = np.dot(np.dot(V.T, np.linalg.inv(np.diag(s))), U.T)
var = 1.0/np.dot(np.dot(W.T, Cinv), W)
if var < 0:
logger.warn("Negative variance returned!")
raise a
mu = np.abs(var) * np.dot(np.dot(W.T, Cinv), mu_node)
mu_samples[ii] = mu
var_samples[ii] = var
# We have some distribution of mu now (with a statistical uncertainty)
c = np.percentile(mu_samples, [16, 50, 84])
central, pos_error, neg_error = (c[1], c[2] - c[1], c[0] - c[1])
stat_error = np.median(np.sqrt(var_samples))
if update_database:
data = {
"wg": wg,
"cname": cname,
"snr": np.nanmedian(np.clip(estimates["snr"], 1, 500)),
parameter: central,
"e_pos_{}".format(parameter): pos_error,
"e_neg_{}".format(parameter): neg_error,
"e_{}".format(parameter): stat_error,
"nn_nodes_{}".format(parameter): len(set(estimates["node_id"])),
"nn_spectra_{}".format(parameter): len(set(estimates["filename"])),
"provenance_ids_for_{}".format(parameter): list(estimates["id"].data.astype(int))
}
record = database.retrieve(
""" SELECT id
FROM wg_recommended_results
WHERE wg = %s
AND cname = %s
""", (wg, cname, ))
if record:
database.update(
""" UPDATE wg_recommended_results
SET {}
WHERE id = '{}'""".format(
", ".join([" = ".join([k, "%s"]) for k in data.keys()]),
record[0][0]),
data.values())
logger.info(
"Updated record {} in wg_recommended_results".format(record[0][0]))
else:
new_record = database.retrieve(
""" INSERT INTO wg_recommended_results ({})
VALUES ({}) RETURNING id""".format(
", ".join(data.keys()), ", ".join(["%s"] * len(data))),
data.values())
logger.info(
"Created new record {} in wg_recommended_results ({} / {})"\
.format(new_record[0][0], wg, cname))
return (central, pos_error, neg_error, stat_error)
class MedianModel(object):
def __init__(self, database, wg, parameter, default_sigma=None, **kwargs):
self._database = database
self._wg = wg
self._parameter = parameter
if default_sigma is None:
self._default_sigma = {
"xi": 0.5,
"alpha_fe": 0.5,
}.get(parameter)
else:
self._default_sigma = default_sigma
return None
def _homogenise_survey_measurement(self, cname, update_database=False,
**kwargs):
param = self._parameter
# Just return a median and stddev.
records = self._database.retrieve_table(
""" SELECT results.id, node_id, filename, {parameter}, e_{parameter}
FROM results, nodes
WHERE results.node_id = nodes.id
AND nodes.wg = '{wg}'
AND results.cname = '{cname}'
AND results.passed_quality_control;""".format(
parameter=param, wg=self._wg, cname=cname))
if records is None:
return {}
N = len(set(records["node_id"]))
S = len(set(records["filename"]))
mu = np.nanmedian(records[param])
sigma = np.nanstd(records[param])/np.sqrt(S)
provenance = list(records["id"].data.astype(int))
if np.isfinite(mu) and (not np.isfinite(sigma) or sigma <= 0):
sigma = kwargs.get("default_sigma", self._default_sigma)
result = {
"wg": self._wg,
"cname": cname,
param: mu,
"e_{}".format(param): sigma,
"nn_nodes_{}".format(param): N,
"nn_spectra_{}".format(param): S,
"provenance_ids_for_{}".format(param): provenance
}
logger.info(
"Homogenised {} for {}/{}: {:.2f} +/- {:.2f} ({} spectra; {} nodes)"\
.format(param, self._wg, cname, mu, sigma, S, N))
if update_database:
record = self._database.retrieve(
""" SELECT id
FROM wg_recommended_results
WHERE wg = %s
AND cname = %s
""", (self._wg, cname, ))
if record:
self._database.update(
""" UPDATE wg_recommended_results
SET {}
WHERE id = '{}'""".format(
", ".join([" = ".join([k, "%s"]) for k in result.keys()]),
record[0][0]),
result.values())
logger.info(
"Updated record {} in wg_recommended_results".format(record[0][0]))
else:
new_record = self._database.retrieve(
""" INSERT INTO wg_recommended_results ({})
VALUES ({}) RETURNING id""".format(
", ".join(result.keys()), ", ".join(["%s"] * len(result))),
result.values())
logger.info(
"Created new record {} in wg_recommended_results ({} / {})"\
.format(new_record[0][0], self._wg, cname))
return result
def homogenise_all_stars(self, update_database=False, **kwargs):
"""
Homogenise the stellar astrophysical parameter for all stars in the
database that are analysed by the current working group.
Note: this will homogenise a single parameter for each survey object.
"""
# Get all unique cnames.
records = self._database.retrieve_table(
""" WITH s AS (
SELECT id FROM nodes WHERE wg = %s)
SELECT DISTINCT ON (r.cname) r.cname
FROM s, results AS r
WHERE r.node_id = s.id
ORDER BY cname DESC""", (self._wg, ))
# Get samples and data dictionary -- it will be faster.
N = len(records)
for i, cname in enumerate(records["cname"]):
self._homogenise_survey_measurement(
cname, update_database=update_database, **kwargs)
if update_database:
self._database.connection.commit()
return None
class BaseEnsembleModel(object):
def __init__(self, database, wg, parameter, calibrators, recompile=False,
overwrite=True, **kwargs):
self._database = database
self._wg = wg
self._parameter = parameter
self._calibrators = calibrators
model_code = kwargs.get("model_code", None)
if model_code is None:
with open(self._MODEL_PATH, "r") as fp:
model_code = fp.read()
self._model_code = model_code
# For later.
self._data = None
self._fit = None
self._chains = None
return None
def _load_model(self, recompile, overwrite, path=None):
"""
Load the model.
:param recompile:
Re-compile the model if it has already been compiled.
:param overwrite:
Overwrite the compiled model path if it already exists.
:param path: [optional]
The path of the file containing the model code. Defaults to
`self._MODEL_PATH`
"""
# Is the model already compiled?
path = path or self._MODEL_PATH
compiled_path = "{}.compiled".format(path)
while os.path.exists(compiled_path) and not recompile:
with open(compiled_path, "rb") as fp:
model = pickle.load(fp)
# Check that the model code is the same as what we expected.
with open(path, "r") as fp:
model_code = fp.read()
if model_code != model.model_code:
logger.warn("Pre-compiled model differs to the code in {}; "\
"re-compiling".format(path))
recompile = True
continue
else:
logger.info(
"Using pre-compiled model from {}".format(compiled_path))
break
else:
logger.info("Compiling model from {}".format(path))
model = stan.StanModel(file=path)
# Save the compiled model.
if not os.path.exists(compiled_path) or overwrite:
with open(compiled_path, "wb") as fp:
pickle.dump(model, fp, -1)
return model
def _validate_stan_inputs(self, **kwargs):
"""
Check the format of the initial values for the model. If a dictionary
is specified and multiple chains are given, then the initial value will
be re-cast as a list of dictionaries (one per chain).
"""
# Copy the dictionary of keywords.
kwds = {}
kwds.update(kwargs)
# Allow for a keyword that will disable any verification checks.
if not kwds.pop("validate", True):
return kwds
# Check chains and init values.
if "init" in kwds.keys() and isinstance(kwds["init"], dict) \
and kwds.get("chains", 1) > 1:
init, chains = (kwds["init"], kwds.get("chains", 1))
logger.info(
"Re-specifying initial values to be list of dictionaries, "\
"allowing one dictionary per chain ({}). "\
"Specify validate=False to disable this behaviour"\
.format(chains))
kwds["init"] = [init] * chains
if kwargs.get("data", None) is None:
try:
self._data
except AttributeError:
self._data, self._metadata = self._prepare_data()
kwds["data"] = self._data
return kwds
def optimize(self, data=None, recompile=False, overwrite=False, **kwargs):
"""
Optimize the model given the data. Keyword arguments are passed directly
to the `StanModel.optimizing` method.
:param data:
A dictionary containing the required key/value pairs for the STAN
model.
:param recompile: [optional]
Re-compile the model if it has already been compiled.
:param overwrite: [optional]
Overwrite the compiled model path if it already exists.
"""
kwds = self._validate_stan_inputs(data=data, **kwargs)
return self._model.optimizing(**kwds)
def sample(self, data=None, chains=4, iter=2000, warmup=None, recompile=False,
overwrite=False, **kwargs):
"""
Draw samples from the model. Keyword arguments are passed directly to
`StanModel.sampling`.
:param data:
A dictionary containing the required key/value pairs for the Stan
model.
:param chains: [optional]
Positive integer specifying the number of chains.
:param iter:
Positive integer specifying how many iterations for each chain
including warmup.
:param warmup: [optional]
Positive integer specifying the number of warmup (aka burn-in)
iterations. As warm-up also specifies the number of iterations used
for step-size adaption, warmup samples should not be used for
inference. Defaults to iter // 2.
:param recompile: [optional]
Re-compile the model if it has already been compiled.
:param overwrite: [optional]
Overwrite the compiled model path if it already exists.
"""
kwds = self._validate_stan_inputs(
data=data, chains=chains, iter=iter, warmup=warmup, **kwargs)
self._fit = self._model.sampling(**kwds)
return self._fit
def homogenise_star(self, cname, **kwargs):
"""
Produce an unbiased estimate of an astrophyiscal parameter for a given
survey object.
:param cname:
The CNAME (unique star identifier) of an object.
:returns:
An array of draws from the marginalized posterior distribution.
"""
return _homogenise_survey_measurements(
self._database, self._wg, self._parameter, cname,
stan_model=self, **kwargs)
def homogenise_all_stars(self, **kwargs):
"""
Homogenise the stellar astrophysical parameter for all stars in the
database that are analysed by the current working group.
Note: this will homogenise a single parameter for each survey object.
"""
# Get all unique cnames.
records = self._database.retrieve_table(
""" WITH s AS (
SELECT id FROM nodes WHERE wg = %s)
SELECT DISTINCT ON (r.cname) r.cname
FROM s, results AS r
WHERE r.node_id = s.id
ORDER BY cname DESC""", (self._wg, ))
# Get samples and data dictionary -- it will be faster.
if self._chains is None:
self._extract_chains(**kwargs)
assert self._data is not None
N = len(records)
for i, cname in enumerate(records["cname"]):
mu, e_pos, e_neg, e_stat = _homogenise_survey_measurements(
self._database, self._wg, self._parameter, cname,
stan_model=self, **kwargs)
logger.info("Homogenised {parameter} for {cname} (WG{wg} {i}/{N}): "
"{mu:.2f} ({pos_error:.2f}, {neg_error:.2f}, {stat_error:.2f})"\
.format(
parameter=self._parameter, cname=cname,
wg=self._wg, i=i+1, N=N, mu=mu,
pos_error=e_pos, neg_error=e_neg, stat_error=e_stat))
if kwargs.get("update_database", True):
self._database.connection.commit()
return None
def _extract_chains(self, **kwargs):
ignore_model_pars = kwargs.get("__ignore_model_pars", ("Sigma", ))
model_pars = set(self._fit.model_pars).difference(ignore_model_pars)
self._chains = self._fit.extract(pars=model_pars)
return None
def write(self, filename, overwrite=False, **kwargs):
"""
Write the model to disk, including any MCMC chains and data dictionaries
from Stan.
:param filename:
The local path of where to write the model.
:param overwrite: [optional]
Overwrite the `filename` if it already exists.
"""
if os.path.exists(filename) and not overwrite:
raise IOError(
"filename {} exists and not overwriting".format(filename))
state = {
"model_path": self._MODEL_PATH,
"model_code": self._model.model_code,
"wg": self._wg,
"parameter": self._parameter,
"calibrators": self._calibrators,
"data": self._data,
"metadata": self._metadata,
"chains": None
}
if self._chains is not None:
state["chains"] = self._chains
elif self._fit is not None:
# Extract chains.
self._extract_chains()
state["chains"] = self._chains
with open(filename, "wb") as fp:
pickle.dump(state, fp, -1)
return None
@classmethod
def read(cls, filename, database, **kwargs):
"""
Read a saved model from disk.
:param filename:
The local path of where the model is saved.
:param database:
The database connection that this model will use.
"""
with open(filename, "rb") as fp:
state = pickle.load(fp)
klass = \
cls(database, state["wg"], state["parameter"], state["calibrators"],
model_code=state.get("model_code", None), **kwargs)
# Update the klass.
klass._data = state.get("data", None)
klass._metadata = state.get("metadata", None)
klass._chains = state.get("chains", None)
return klass
def plot_node_uncertainty_with_snr(self, **kwargs):
return plot.node_uncertainty_with_snr(self, **kwargs)
def plot_node_correlations(self, **kwargs):
return plot.node_correlations(self, **kwargs)
class EnsembleModel(BaseEnsembleModel):
_MODEL_PATH = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
"ensemble-model.stan")
def _prepare_data(self, parameter=None, default_sigma_calibrator=1e3,
minimum_node_estimates=1, sql_constraint=None):
"""
Prepare the data for the model so that it can be supplied to Stan.
:param parameter: [optional]
The name of the model parameter (e.g., teff) that will be used in
this single parameter ensemble model. If `None` provided, then this
defaults to the model parameter provided when the EnsembleModel was
initiated.
:param minimum_node_estimates: [optional]
The minimum number of node measurements for a single visit of a
calibrator spectrum. If this is set to a negative value, then only
calibrator results will be used where *all* nodes have provided a
measurement for that spectrum.
:param sql_constraint: [optional]
Specify an optional SQL constraint to include when retrieving the
node results.
"""
parameter = str(parameter or self._parameter).lower()
valid_parameters = ["teff", "logg", "feh"]
if parameter not in valid_parameters:
raise AreYouSureYouKnowWhatYoureDoing
# Get the data from the database for this WG.
# TODO: Need a better way to identify calibrators. Right now we do it
# just on GES_TYPE, but in future we may want to do this directly
# on to CNAMEs in the calibrator list.
data = self._database.retrieve_table(
""" WITH n AS (
SELECT id FROM nodes WHERE wg = {wg}),
s AS (
SELECT cname, ges_type, ges_fld
FROM spectra
WHERE ges_type LIKE 'GE_SD_B%')
SELECT DISTINCT ON (r.filename, r.node_id)
s.cname, s.ges_type, s.ges_fld,
r.filename, r.node_id, r.snr,
r.{parameter}, r.e_{parameter}
FROM s, n, results as r
WHERE r.cname = s.cname
AND r.node_id = n.id
AND r.passed_quality_control = true {sql_constraint}
""".format(
wg=self._wg, parameter=parameter,
sql_constraint="" if sql_constraint is None \
else " AND {}".format(sql_constraint)))
assert data is not None, "No calibrator data from WG {}".format(wg)
# Calibrator parameter names
calibrator_name, calibrator_e_name = _guess_parameter_name(
self._calibrators, parameter)
finite_calibrator = np.isfinite(self._calibrators[calibrator_name])
if not np.all(finite_calibrator):
logger.warn("Not all calibrator values of {} are finite! ({}/{})"\
.format(parameter, sum(finite_calibrator), len(finite_calibrator)))
# OK now update the data dictionary with the spectroscopic measurements.
# Need to group by node id and CNAME.
calibrators = self._calibrators.copy()
# Common calibrators to serve as an indexing reference.
common_calibrators = set(map(str.strip, calibrators["GES_FLD"]))\
.intersection(map(str.strip, data["ges_fld"]))
common_calibrators = np.sort(list(common_calibrators))
# Remove calibrators not in common.
keep = np.ones(len(self._calibrators), dtype=bool)
for i, ges_fld in enumerate(self._calibrators["GES_FLD"]):
if ges_fld.strip() not in common_calibrators:
keep[i] = False
calibrators = self._calibrators[keep]
calibrators.sort("GES_FLD")
assert calibrators["GES_FLD"][0].strip() == common_calibrators[0]
C = len(calibrators)
unique_estimators = np.sort(np.unique(data["node_id"]))
N = unique_estimators.size
# Get the maximum number of visits for any calibrator
data = data.group_by(["cname"])
V = np.max([len(set(group["filename"])) for group in data.groups])
skipped = {}
visits = np.zeros(C, dtype=int)
estimates = np.nan * np.ones((C, N, V))
spectrum_ivar = np.zeros((C, V))
for i, si in enumerate(data.groups.indices[:-1]):
ges_fld = data["ges_fld"][si].strip()
if ges_fld not in common_calibrators: continue
c = np.where(ges_fld == common_calibrators)[0][0]
ei = data.groups.indices[i + 1]
filename_visits = np.unique(data["filename"][si:ei])
for k in range(si, ei):
n = np.where(data["node_id"][k] == unique_estimators)[0][0]
v = np.where(data["filename"][k] == filename_visits)[0][0]
estimates[c, n, v] = data[parameter][k]
snr = np.clip(data["snr"][k], 1, np.inf)
spectrum_ivar[c, v] = snr**2
visits[c] = np.sum(np.any(np.isfinite(estimates[c]), axis=0))
# Remove any nodes with zero measurements.
keep = np.array([np.any(np.isfinite(estimates[:, n, :])) for n in range(N)])
estimates = estimates[:, keep, :]
unique_estimators = unique_estimators[keep]
N = sum(keep)
if minimum_node_estimates != 0:
if minimum_node_estimates < 0:
minimum_node_estimates = N
# Only keep visits that have at least the number of minimum node measurements
for c in range(C):
mask = np.sum(np.isfinite(estimates[c]), axis=0) >= minimum_node_estimates
n_full_rank = mask.sum()
estimates[c][:, :n_full_rank] = estimates[c][:, mask]
estimates[c][:, n_full_rank:] = np.nan
spectrum_ivar[c, :n_full_rank] = spectrum_ivar[c][mask]
spectrum_ivar[c, n_full_rank:] = 0
# Update the number of visits for this calibrator
visits[c] = n_full_rank
_slice = visits > 0
calibrators = calibrators[_slice]
visits = visits[_slice]
estimates = estimates[_slice, :, :max(visits)]
spectrum_ivar = spectrum_ivar[_slice, :max(visits)]
C, _, V = estimates.shape
# Construct N_missing
is_missing = np.zeros(estimates.shape, dtype=bool)
for c, v in enumerate(visits):
is_missing[c] = (~np.isfinite(estimates[c])) \
* (np.tile(np.arange(V), N).reshape(N, V) < v)
if minimum_node_estimates >= N: assert np.sum(is_missing) == 0
mu_calibrator = np.array(calibrators[calibrator_name])
sigma_calibrator = np.array(calibrators[calibrator_e_name])
if not np.all(np.isfinite(sigma_calibrator)):
logger.warn("Not all calibrator uncertainties are finite! "
"Filling in with default value {}".format(
default_sigma_calibrator))
sigma_calibrator[~np.isfinite(sigma_calibrator)] = default_sigma_calibrator
data_dict = {
"N": N, # number of nodes
"C": C, # number of calibrators
"V": V, # maximum number of visits to any calibrator
"visits": visits.astype(int),
"is_missing": is_missing.astype(int),
"TM": np.sum(is_missing),
"estimates": estimates,
"spectrum_ivar": spectrum_ivar.T,
"spectrum_isnr": 1.0/np.sqrt(spectrum_ivar.T),
"mu_calibrator": mu_calibrator,
"sigma_calibrator": sigma_calibrator,
"S": 3, # TODO: Make this flexible? Make the calibrator values accessible from kwargs?
"all_mu_calibrator": np.vstack(
[calibrators[p] for p in ("TEFF", "LOGG", "FEH")]).T
}
alpha_bounds = dict(teff=(100, 1000), logg=(0.1, 1.0), feh=(0.1, 1.0))
data_dict.update(
dict(zip(("lower_alpha_sq", "upper_alpha_sq"), np.array(alpha_bounds[parameter])**2)))
bounds = dict(teff=(3000, 8000), logg=(0, 5), feh=(-3.5, 0.5))
data_dict.update(
dict(zip(("lower_bound", "upper_bound"), bounds[parameter])))
# Create additional metadata
node_names = self._database.retrieve_table("SELECT * FROM nodes")
metadata = {
"calibrators": calibrators,
"node_ids": unique_estimators,
"node_names": \
[node_names["name"][node_names["id"] == node_id][0].strip() \
for node_id in unique_estimators]
}
result = (data_dict, metadata)
self._data, self._metadata = result
return result
class CNAMEEnsembleModel(EnsembleModel):
_MODEL_PATH = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
"ensemble-model.stan")
def _prepare_data(self, parameter=None, default_sigma_calibrator=1e3,
minimum_node_estimates=1, sql_constraint=None):
"""
Prepare the data for the model so that it can be supplied to Stan.
:param parameter: [optional]
The name of the model parameter (e.g., teff) that will be used in
this single parameter ensemble model. If `None` provided, then this
defaults to the model parameter provided when the EnsembleModel was
initiated.
:param minimum_node_estimates: [optional]
The minimum number of node measurements for a single visit of a
calibrator spectrum. If this is set to a negative value, then only
calibrator results will be used where *all* nodes have provided a
measurement for that spectrum.
:param sql_constraint: [optional]
Specify an optional SQL constraint to include when retrieving the
node results.
"""
parameter = str(parameter or self._parameter).lower()
valid_parameters = ["teff", "logg", "feh"]
if parameter not in valid_parameters:
raise AreYouSureYouKnowWhatYoureDoing
# Get the data from the database for this WG.
calibrator_cnames = self._calibrators["CNAME"]
data = self._database.retrieve_table(
""" WITH n AS (
SELECT id FROM nodes WHERE wg = {wg}),
SELECT DISTINCT ON (r.filename, r.node_id)
s.cname, s.ges_type, s.ges_fld,
r.filename, r.node_id, r.snr,
r.{parameter}, r.e_{parameter}
FROM n, results as r
WHERE TRIM(r.cname) in ({calibrator_cnames})
AND r.node_id = n.id
AND r.passed_quality_control = true {sql_constraint}
""".format(
wg=self._wg, parameter=parameter,
sql_constraint="" if sql_constraint is None \
else " AND {}".format(sql_constraint)),
calibrator_cnames=", ".join(
["'{}'".format(cname.strip()) for cname in calibrator_cnames]))
assert data is not None, "No calibrator data from WG {}".format(wg)
# Calibrator parameter names
calibrator_col, calibrator_e_col \
= _guess_parameter_name(self._calibrators, parameter)
finite_calibrator = np.isfinite(self._calibrators[calibrator_col])
if not np.all(finite_calibrator):
logger.warn("Not all calibrator values of {} are finite! ({}/{})"\
.format(parameter, sum(finite_calibrator), len(finite_calibrator)))
# OK now update the data dictionary with the spectroscopic measurements.
# Need to group by node id and CNAME.
calibrators = self._calibrators.copy()
# Common calibrators to serve as an indexing reference.
common_calibrators = set(map(str.strip, calibrator_cnames))\
.intersection(map(str.strip, data["cname"]))
common_calibrators = np.sort(list(common_calibrators))
# Remove calibrators not in common.
keep = np.ones(len(self._calibrators), dtype=bool)
for i, cname in enumerate(self._calibrators["CNAME"]):
if cname.strip() not in common_calibrators:
keep[i] = False
calibrators = self._calibrators[keep]
calibrators.sort("CNAME")
assert calibrators["CNAME"][0].strip() == common_calibrators[0]
C = len(calibrators)
unique_estimators = np.sort(np.unique(data["node_id"]))
N = unique_estimators.size
# Get the maximum number of visits for any calibrator
data = data.group_by(["cname"])
V = np.max([len(set(group["filename"])) for group in data.groups])
skipped = {}
visits = np.zeros(C, dtype=int)
estimates = np.nan * np.ones((C, N, V))
spectrum_ivar = np.zeros((C, V))
for i, si in enumerate(data.groups.indices[:-1]):
cname = data["cname"][si].strip()
if cname not in common_calibrators: continue
c = np.where(cname == common_calibrators)[0][0]
ei = data.groups.indices[i + 1]
filename_visits = np.unique(data["filename"][si:ei])
for k in range(si, ei):
n = np.where(data["node_id"][k] == unique_estimators)[0][0]
v = np.where(data["filename"][k] == filename_visits)[0][0]
estimates[c, n, v] = data[parameter][k]
snr = np.clip(data["snr"][k], 1, np.inf)
spectrum_ivar[c, v] = snr**2
visits[c] = np.sum(np.any(np.isfinite(estimates[c]), axis=0))
# Remove any nodes with zero measurements.
keep = np.array([np.any(np.isfinite(estimates[:, n, :])) for n in range(N)])
estimates = estimates[:, keep, :]
unique_estimators = unique_estimators[keep]
N = sum(keep)
if minimum_node_estimates != 0:
if minimum_node_estimates < 0:
minimum_node_estimates = N
# Only keep visits that have at least the number of minimum node measurements
for c in range(C):
mask = np.sum(np.isfinite(estimates[c]), axis=0) >= minimum_node_estimates
n_full_rank = mask.sum()
estimates[c][:, :n_full_rank] = estimates[c][:, mask]
estimates[c][:, n_full_rank:] = np.nan
spectrum_ivar[c, :n_full_rank] = spectrum_ivar[c][mask]
spectrum_ivar[c, n_full_rank:] = 0
# Update the number of visits for this calibrator
visits[c] = n_full_rank
_slice = visits > 0
calibrators = calibrators[_slice]
visits = visits[_slice]
estimates = estimates[_slice, :, :max(visits)]
spectrum_ivar = spectrum_ivar[_slice, :max(visits)]
C, _, V = estimates.shape
# Construct N_missing
is_missing = np.zeros(estimates.shape, dtype=bool)
for c, v in enumerate(visits):
is_missing[c] = (~np.isfinite(estimates[c])) \
* (np.tile(np.arange(V), N).reshape(N, V) < v)
if minimum_node_estimates >= N: assert np.sum(is_missing) == 0
mu_calibrator = np.array(calibrators[calibrator_col])
sigma_calibrator = np.array(calibrators[calibrator_e_col])
if not np.all(np.isfinite(sigma_calibrator)):
logger.warn("Not all calibrator uncertainties are finite! "
"Filling in with default value {}".format(
default_sigma_calibrator))
sigma_calibrator[~np.isfinite(sigma_calibrator)] = default_sigma_calibrator
data_dict = {
"N": N, # number of nodes
"C": C, # number of calibrators
"V": V, # maximum number of visits to any calibrator
"visits": visits.astype(int),
"is_missing": is_missing.astype(int),
"TM": np.sum(is_missing),
"estimates": estimates,
"spectrum_ivar": spectrum_ivar.T,
"spectrum_isnr": 1.0/np.sqrt(spectrum_ivar.T),
"mu_calibrator": mu_calibrator,
"sigma_calibrator": sigma_calibrator,
"S": 3, # TODO: Make this flexible? Make the calibrator values accessible from kwargs?
"all_mu_calibrator": np.vstack(
[calibrators[p] for p in ("TEFF", "LOGG", "FEH")]).T
}
alpha_bounds = dict(teff=(100, 1000), logg=(0.1, 1.0), feh=(0.1, 1.0))
data_dict.update(
dict(zip(("lower_alpha_sq", "upper_alpha_sq"), np.array(alpha_bounds[parameter])**2)))
bounds = dict(teff=(3000, 8000), logg=(0, 5), feh=(-3.5, 0.5))
data_dict.update(
dict(zip(("lower_bound", "upper_bound"), bounds[parameter])))
# Create additional metadata
node_names = self._database.retrieve_table("SELECT * FROM nodes")
metadata = {
"calibrators": calibrators,
"node_ids": unique_estimators,
"node_names": \
[node_names["name"][node_names["id"] == node_id][0].strip() \
for node_id in unique_estimators]
}
result = (data_dict, metadata)
self._data, self._metadata = result
raise a
return result
| 40,550 | 34.854111 | 98 | py |
YouTokenToMe | YouTokenToMe-master/setup.py | import io
import os
from setuptools import Extension, find_packages, setup
from Cython.Build import cythonize
extensions = [
Extension(
"_youtokentome_cython",
[
"youtokentome/cpp/yttm.pyx",
"youtokentome/cpp/bpe.cpp",
"youtokentome/cpp/utils.cpp",
"youtokentome/cpp/utf8.cpp",
],
extra_compile_args=["-std=c++11", "-pthread", "-O3"],
language="c++",
)
]
with io.open(
os.path.join(os.path.abspath(os.path.dirname(__file__)), "README.md"),
encoding="utf-8",
) as f:
LONG_DESCRIPTION = "\n" + f.read()
setup(
name="youtokentome",
version="1.0.6",
packages=find_packages(),
description="Unsupervised text tokenizer focused on computational efficiency",
long_description=LONG_DESCRIPTION,
long_description_content_type="text/markdown",
url="https://github.com/vkcom/youtokentome",
python_requires=">=3.5.0",
install_requires=["Click>=7.0"],
entry_points={"console_scripts": ["yttm = youtokentome.yttm_cli:main"]},
author="Ivan Belonogov",
license="MIT",
classifiers=[
"License :: OSI Approved :: MIT License",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Cython",
"Programming Language :: C++",
],
ext_modules=cythonize(extensions),
)
| 1,673 | 29.436364 | 82 | py |
YouTokenToMe | YouTokenToMe-master/tests/speed_test/speed_test.py | import argparse
import os
from pathlib import Path
from time import time
from tabulate import tabulate
from tokenizers import pre_tokenizers
from tokenizers import Tokenizer as HuggingFaceBPETokenizer
from tokenizers.models import BPE as HuggingFaceBPEModel
from tokenizers.trainers import BpeTrainer as HuggingFaceBPETrainer
MODEL_FILE_NAME = "bpe.model"
MODEL_SUFFIX = ".model"
YOU_TOKEN_TO_ME = "YouTokenToMe"
SENTENCE_PIECE = "SentencePiece"
FAST_BPE = "fastBPE"
HUGGING_FACE_BPE = "Hugging_Face_BPE"
PATH_TO_FASTBPE = "./fastBPE"
class HuggingfaceInterface:
def train_from_file(self, train_file, vocab_size, model_file, _):
tokenizer = HuggingFaceBPETokenizer(HuggingFaceBPEModel(unk_token="[UNK]"))
trainer = HuggingFaceBPETrainer(special_tokens=["[UNK]", "[PAD]"], vocab_size=vocab_size)
tokenizer.pre_tokenizer = pre_tokenizers.Whitespace()
tokenizer.train([str(train_file)], trainer)
tokenizer.save(model_file)
def encode_file(self, model_path, path_in, path_out, _):
tokenizer = HuggingFaceBPETokenizer.from_file(model_path)
with open(path_in) as fin:
full_text = fin.readlines()
tokenizer.encode_batch(full_text)
class SentencePieceInterface:
def train_from_file(self, train_file, vocab_size, model_file, _):
tmp = model_file.split(".")
assert len(tmp) == 2
assert tmp[1] == "model"
train_command = f"spm_train "
train_command += f" --input={str(train_file)} "
train_command += f" --model_prefix={tmp[0]} "
train_command += f" --vocab_size={vocab_size} "
train_command += f" --character_coverage=1.0 "
train_command += f" --model_type=bpe "
assert os.system(train_command) == 0
def encode_file(self, model_path, path_in, path_out, _):
encode_command = f"spm_encode "
encode_command += f" --model={model_path} "
encode_command += f" --output_format=piece "
encode_command += f" < {path_in} > {path_out} "
assert os.system(encode_command) == 0
class FastBPEInterface:
def train_from_file(self, file_path, vocab_size, model_file, _):
train_command = f"{PATH_TO_FASTBPE} learnbpe"
train_command += f" {vocab_size} {str(file_path)} > {model_file}"
assert os.system(train_command) == 0
def encode_file(self, model_path, path_in, path_out, _):
encode_command = f"{PATH_TO_FASTBPE} applybpe {path_out} {path_in} {model_path}"
assert os.system(encode_command) == 0
class YouTokenToMeInterface:
def train_from_file(self, file_path, vocab_size, model_path, n_threads):
train_command = f"yttm bpe "
train_command += f" --data={file_path} --model={model_path} "
train_command += f" --vocab_size={vocab_size} --n_threads={n_threads} "
assert os.system(train_command) == 0
def encode_file(self, model_path, path_in, path_out, n_threads):
encode_command = "yttm encode "
encode_command += f" --model={model_path} --output_type=id "
encode_command += f" --n_threads={n_threads} "
encode_command += f" < {str(path_in)} > {str(path_out)}"
assert os.system(encode_command) == 0
def get_bpe(impl_name):
if impl_name == YOU_TOKEN_TO_ME:
return YouTokenToMeInterface()
if impl_name == SENTENCE_PIECE:
return SentencePieceInterface()
if impl_name == FAST_BPE:
return FastBPEInterface()
if impl_name == HUGGING_FACE_BPE:
return HuggingfaceInterface()
assert False
def check_train(algorithm, vocab_size, corpus_path, n_threads):
bpe = get_bpe(algorithm)
start_time = time()
bpe.train_from_file(corpus_path, vocab_size, MODEL_FILE_NAME, n_threads)
return time() - start_time
def check_inference_file(algorithm, corpus_path, n_threads):
bpe = get_bpe(algorithm)
start_time = time()
bpe.encode_file(MODEL_FILE_NAME, corpus_path, "rm_it.txt", n_threads)
return time() - start_time
def download_xml2txt():
if not Path("xml2txt.pl").exists():
print("downloading xml2txt.pl ...")
os.system("wget https://www.dropbox.com/s/p3ta9spzfviovk0/xml2txt.pl")
def prepare_data(zip_path, size_mb):
expected_extension = ".xml.bz2"
assert zip_path.endswith(expected_extension)
base_path = Path(zip_path).parent
unzip_path = base_path / "wiki.xml"
full_text_path = base_path / "wiki.txt"
cutted_text_path = base_path / f"wiki_{size_mb}MB.txt"
if not Path(unzip_path).exists():
print(f"unziping file {zip_path} ...")
assert os.system(f"bzip2 -kdc {zip_path} > {unzip_path}") == 0
if not Path(full_text_path).exists():
print(f"converting xml to text {unzip_path} ...")
download_xml2txt()
preprocess_command = f"perl xml2txt.pl "
preprocess_command += f" -nomath -notables "
preprocess_command += f" {unzip_path} {full_text_path}"
assert os.system(preprocess_command) == 0
if not Path(cutted_text_path).exists():
byte_processed = 0
with open(cutted_text_path, "w") as fout:
with open(full_text_path, "r") as fin:
while byte_processed < size_mb * 1_000_000:
s = fin.readline()
byte_processed += len(s.encode())
fout.write(s)
return cutted_text_path
def speed_test(corpus_path, vocab_size, algorithms, n_threads):
train_res = {}
infer_res = {}
for algorithm in algorithms:
time_train = check_train(algorithm, vocab_size, corpus_path, n_threads)
time_infer = check_inference_file(algorithm, corpus_path, n_threads)
train_res[algorithm] = time_train
infer_res[algorithm] = time_infer
return train_res, infer_res
def print_results(cfg, result_name, corpuses, algorithms):
result_table = [
["#" for _ in range(len(corpuses) + 1)] for _ in range(len(algorithms))
]
table_header = ["#"] + [lang for lang in corpuses]
rev_lang = {lang: i for i, lang in enumerate(table_header)}
rev_algo = {algo: i for i, algo in enumerate(algorithms)}
for i, algo_name in enumerate(algorithms):
result_table[i][0] = algo_name
for lang, res in cfg.items():
best = min(res.values())
for algo in res:
j = rev_lang[lang]
i = rev_algo[algo]
multiplier_str = f"{res[algo]/best:.1f}".rstrip('0').rstrip('.')
result_table[i][j] = f"{res[algo]:.1f} (x{multiplier_str})"
table_header[0] = result_name
column_align = ["left"] + ["center" for _ in corpuses]
print(tabulate(result_table, table_header, tablefmt="github", colalign=column_align))
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--vocab_size", type=int, default=30000)
parser.add_argument("--n_threads", type=int, default=4)
parser.add_argument(
"--corpus_size", type=int, default=100, help="Size of testing corpus in MB"
)
parser.add_argument(
"--langs",
type=str,
nargs="+",
help="list of languages for speed test",
default="ru",
)
return parser.parse_args()
def main(args):
langs = args.langs if isinstance(args.langs, list) else [args.langs]
# Hugging Face - limit number of processes
os.environ["RAYON_RS_NUM_CPUS"] = str(args.n_threads)
short_to_long_names = {
"en": "English",
"ru": "Russian",
"ja": "Japanese",
"zh": "Chinese",
}
# For adding more languages check out this page https://linguatools.org/tools/corpora/wikipedia-monolingual-corpora/
all_links = {
"English": "https://www.dropbox.com/s/cnrhd11zdtc1pic/enwiki-20181001-corpus.xml.bz2?dl=1",
"Russian": "https://www.dropbox.com/s/lpfmyrl7nxn5ugg/ruwiki-20181001-corpus.xml.bz2?dl=1",
"Japanese": "https://www.dropbox.com/s/wf496hlu512z9kc/jawiki-20140807-corpus.xml.bz2?dl=1",
"Chinese": "https://www.dropbox.com/s/czhr6s5jwaljeue/zhwiki-20140804-corpus.xml.bz2?dl=1",
}
links = {
short_to_long_names[lang]: all_links[short_to_long_names[lang]]
for lang in langs
}
corpuses = {}
Path("data").mkdir(exist_ok=True)
for lang, link in links.items():
Path(f"data/{lang}").mkdir(exist_ok=True)
zip_file = f"data/{lang}/wiki.xml.bz2"
if not Path(zip_file).exists():
os.system(f"wget -O {zip_file} {link}")
corpuses[lang] = prepare_data(zip_file, args.corpus_size)
algorithms = [YOU_TOKEN_TO_ME, HUGGING_FACE_BPE, SENTENCE_PIECE, FAST_BPE]
global_train = {}
global_tokenization = {}
for lang, corpus_path in corpuses.items():
train_stat, tokenization_stat = speed_test(
corpus_path, args.vocab_size, algorithms, args.n_threads
)
global_train[lang] = train_stat
global_tokenization[lang] = tokenization_stat
print_results(global_train, f"Train {args.corpus_size}MB", corpuses, algorithms)
print_results(global_tokenization, f"Tokenization {args.corpus_size}MB", corpuses, algorithms)
if __name__ == "__main__":
args = parse_args()
main(args)
| 9,224 | 34.755814 | 120 | py |
YouTokenToMe | YouTokenToMe-master/tests/unit_tests/test_cli.py | import os
import random
from subprocess import run
from utils_for_testing import (
BASE_MODEL_FILE,
RENAME_ID_MODEL_FILE,
TEST_FILE,
TRAIN_FILE,
BOS_ID,
EOS_ID,
file_starts_with,
generate_artifacts,
)
def test_bos_eos_reverse():
generate_artifacts()
cmd_args = [
"yttm",
"encode",
f"--model={BASE_MODEL_FILE}",
"--output_type=subword",
"--n_threads=1",
"--bos",
]
run(cmd_args, stdin=open(TEST_FILE, "r"), stdout=open("log.txt", "w"), check=True)
file_starts_with("log.txt", "<BOS>")
cmd_args = [
"yttm",
"encode",
f"--model={BASE_MODEL_FILE}",
"--output_type=subword",
"--n_threads=1",
"--reverse",
"--eos",
]
run(cmd_args, stdin=open(TEST_FILE, "r"), stdout=open("log.txt", "w"), check=True)
file_starts_with("log.txt", "<EOS>")
cmd_args = [
"yttm",
"encode",
f"--model={BASE_MODEL_FILE}",
"--output_type=id",
"--n_threads=1",
"--bos",
]
run(cmd_args, stdin=open(TEST_FILE, "r"), stdout=open("log.txt", "w"), check=True)
file_starts_with("log.txt", "2")
cmd_args = [
"yttm",
"encode",
f"--model={BASE_MODEL_FILE}",
"--output_type=id",
"--n_threads=1",
"--reverse",
"--eos",
]
run(cmd_args, stdin=open(TEST_FILE, "r"), stdout=open("log.txt", "w"), check=True)
file_starts_with("log.txt", "3")
os.remove("log.txt")
def test_interactive_mode():
generate_artifacts()
print("interactive helper running id ...")
cmd = f"python interactor.py | yttm encode --stream --model={BASE_MODEL_FILE} --output_type=id > log.txt"
assert os.system(cmd) == 0
print("interactive helper running subword ...")
cmd = f"python interactor.py | yttm encode --stream --model={BASE_MODEL_FILE} --output_type=subword > log.txt"
assert os.system(cmd) == 0
os.remove("log.txt")
def test_multithreading():
generate_artifacts()
cmd_args = [
"yttm",
"encode",
f"--model={BASE_MODEL_FILE}",
"--output_type=subword",
"--n_threads=10",
]
run(cmd_args, stdin=open(TEST_FILE, "r"), stdout=open("log.txt", "w"), check=True)
def test_renaming():
generate_artifacts()
cmd_args = [
"yttm",
"encode",
f"--model={RENAME_ID_MODEL_FILE}",
"--output_type=id",
"--bos",
"--n_threads=1",
]
run(cmd_args, stdin=open(TEST_FILE, "r"), stdout=open("log.txt", "w"), check=True)
file_starts_with("log.txt", "29")
cmd_args = [
"yttm",
"encode",
f"--model={RENAME_ID_MODEL_FILE}",
"--output_type=id",
"--eos",
"--reverse",
"--n_threads=1",
]
run(cmd_args, stdin=open(TEST_FILE, "r"), stdout=open("log.txt", "w"), check=True)
file_starts_with("log.txt", "1148")
os.remove("log.txt")
def test_renaming_unknown():
generate_artifacts()
with open("local_test.txt", "w") as fout:
fout.write("z")
cmd_args = [
"yttm",
"encode",
f"--model={RENAME_ID_MODEL_FILE}",
"--output_type=id",
"--reverse",
"--n_threads=1",
]
run(
cmd_args,
stdin=open("local_test.txt", "r"),
stdout=open("log.txt", "w"),
check=True,
)
file_starts_with("log.txt", "2922")
os.remove("local_test.txt")
os.remove("log.txt")
return
def test_vocab():
generate_artifacts()
run(["yttm", "vocab", f"--model={BASE_MODEL_FILE}"], check=True)
run(["yttm", "vocab", f"--model={BASE_MODEL_FILE}", "--verbose"], check=True)
def test_decode():
generate_artifacts()
text_in = " ".join("".join([random.choice("abcd ") for _ in range(50)]).split())
with open("decode_text_in.txt", "w") as fout:
fout.write(text_in)
cmd_args = ["yttm", "encode", f"--model={BASE_MODEL_FILE}", "--output_type=id"]
run(
cmd_args,
stdin=open("decode_text_in.txt", "r"),
stdout=open("decode_id.txt", "w"),
check=True,
)
cmd_args = ["yttm", "decode", f"--model={BASE_MODEL_FILE}"]
run(
cmd_args,
stdin=open("decode_id.txt", "r"),
stdout=open("decode_text_out.txt", "w"),
check=True,
)
with open("decode_text_out.txt", "r") as fin:
text_out = fin.readline()
assert text_in == text_out[:-1]
cmd_args = [
"yttm",
"encode",
f"--model={BASE_MODEL_FILE}",
"--output_type=id",
"--bos",
"--eos",
]
run(
cmd_args,
stdin=open("decode_text_in.txt", "r"),
stdout=open("decode_id.txt", "w"),
check=True,
)
cmd_args = [
"yttm",
"decode",
f"--model={BASE_MODEL_FILE}",
f"--ignore_ids={BOS_ID},{EOS_ID}",
]
run(
cmd_args,
stdin=open("decode_id.txt", "r"),
stdout=open("decode_text_out.txt", "w"),
check=True,
)
with open("decode_text_out.txt", "r") as fin:
text_out = fin.readline()
assert text_in == text_out[:-1]
os.remove("decode_text_in.txt")
os.remove("decode_text_out.txt")
os.remove("decode_id.txt")
| 5,301 | 23.892019 | 114 | py |
YouTokenToMe | YouTokenToMe-master/tests/unit_tests/test_stress.py | import os
from subprocess import run
tests_compiled = False
def compile_test():
global tests_compiled
if tests_compiled:
return
build_files = ["bpe.cpp", "utils.cpp", "utf8.cpp"]
files = ["../../youtokentome/cpp/" + file_name for file_name in build_files]
files.append("stress_test.cpp")
print("compiling stress test ...")
command = [
"g++",
*files,
"-o",
"stress",
"-std=c++11",
"-pthread",
"-Og",
"-D_GLIBCXX_DEBUG",
"-fno-omit-frame-pointer -fsanitize=address -fsanitize=leak -fsanitize=undefined",
"-DDETERMINISTIC_QUEUE",
]
command = " ".join(command)
print("command:", command)
run(command, check=True, shell=True)
tests_compiled = True
def test_stress():
compile_test()
run(["./stress", "base", "1000"], check=True)
def test_manual():
compile_test()
run(["./stress", "manual"], check=True)
os.remove("remove_it.txt")
def test_parallel():
compile_test()
run(["./stress", "parallel", "50"], check=True)
os.remove("remove_it.txt")
| 1,115 | 20.882353 | 90 | py |
YouTokenToMe | YouTokenToMe-master/tests/unit_tests/test_python_api.py | import os
import random
import youtokentome as yttm
from utils_for_testing import (
BASE_MODEL_FILE,
RENAME_ID_MODEL_FILE,
TEST_FILE,
TRAIN_FILE,
BOS_ID,
EOS_ID,
file_starts_with,
generate_artifacts,
)
def test_encode_decode():
generate_artifacts()
os.remove(BASE_MODEL_FILE)
yttm.BPE.train(
data=TRAIN_FILE,
vocab_size=16000,
model=BASE_MODEL_FILE,
bos_id=BOS_ID,
eos_id=EOS_ID,
)
bpe = yttm.BPE(BASE_MODEL_FILE)
text_in = [" ".join("".join([random.choice("abcd ") for _ in range(50)]).split())]
ids = bpe.encode(text_in, yttm.OutputType.ID)
assert text_in == bpe.decode(ids)
ids_bos_eos = bpe.encode(text_in, yttm.OutputType.ID, bos=True, eos=True)
assert text_in == bpe.decode(ids_bos_eos, ignore_ids=[BOS_ID, EOS_ID])
assert bpe.decode(ids, ignore_ids=[]) == bpe.decode(
ids_bos_eos, ignore_ids=[BOS_ID, EOS_ID]
)
def test_vocabulary_consistency():
generate_artifacts()
os.remove(BASE_MODEL_FILE)
yttm.BPE.train(data=TRAIN_FILE, vocab_size=16000, model=BASE_MODEL_FILE)
bpe = yttm.BPE(BASE_MODEL_FILE)
assert bpe.vocab_size() == len(bpe.vocab())
assert bpe.vocab_size() == len(set(bpe.vocab()))
vc = bpe.vocab()
for i, subword in enumerate(vc):
assert i == bpe.subword_to_id(subword)
assert subword == bpe.id_to_subword(i)
| 1,411 | 26.153846 | 86 | py |
YouTokenToMe | YouTokenToMe-master/tests/unit_tests/test_manual.py | # -*- coding: utf-8 -*-
import os
import youtokentome as yttm
def test_russian():
train_text = """
собирать cборник сборище отобранный сборщица
"""
test_text = """
собранный собрание прибор
"""
TRAIN_DATA_PATH = "train_data.txt"
MODEL_PATH = "model.yttm"
with open(TRAIN_DATA_PATH, "w") as fin:
fin.write(train_text)
model = yttm.BPE.train(TRAIN_DATA_PATH, MODEL_PATH, 50)
tokenized_text = model.encode([test_text], output_type=yttm.OutputType.SUBWORD)
expected_result = [
["▁с", "обранный", "▁с", "об", "ран", "и", "е", "▁", "п", "р", "и", "бор"]
]
assert tokenized_text == expected_result
print(tokenized_text)
os.remove(TRAIN_DATA_PATH)
def test_english():
train_text = """
anachronism
synchronous
chronology
chronic
chronophilia
chronoecological
chronocoulometry
"""
test_text = "chronocline synchroscope "
TRAIN_DATA_PATH = "train_data.txt"
MODEL_PATH = "model.yttm"
with open(TRAIN_DATA_PATH, "w") as fin:
fin.write(train_text)
model = yttm.BPE.train(TRAIN_DATA_PATH, MODEL_PATH, 200, n_threads=1)
tokenized_text = model.encode([test_text], output_type=yttm.OutputType.SUBWORD)
expected_result = [['▁chrono', 'c', 'l', 'i', 'n', 'e', '▁', 'sy', 'n', 'ch', 'r', 'o', 's', 'co', 'p', 'e']]
assert tokenized_text == expected_result
print(tokenized_text)
os.remove(TRAIN_DATA_PATH)
def test_japanese():
train_text = """
むかし、 むかし、 ある ところ に
おじいさん と おばあさん が いました。
おじいさん が 山(やま) へ 木(き) を きり に いけば、
おばあさん は 川(かわ) へ せんたく に でかけます。
「おじいさん、 はよう もどって きなされ。」
「おばあさん も き を つけて な。」
まい日(にち) やさしく いい あって でかけます
"""
test_text = " おばあさん が 川 で せん "
TRAIN_DATA_PATH = "train_data.txt"
MODEL_PATH = "model.yttm"
with open(TRAIN_DATA_PATH, "w") as fin:
fin.write(train_text)
model = yttm.BPE.train(TRAIN_DATA_PATH, MODEL_PATH, 100)
tokenized_text = model.encode([test_text], output_type=yttm.OutputType.SUBWORD)
expected_result = [["▁おばあさん", "▁が", "▁", "川", "▁", "で", "▁", "せ", "ん"]]
assert tokenized_text == expected_result
print(tokenized_text)
os.remove(TRAIN_DATA_PATH)
| 2,294 | 29.197368 | 113 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.