Spaces:
Build error
Build error
import torch | |
import torch.nn as nn | |
from transformers import AutoModel, AutoTokenizer | |
DEFAULT_DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu") | |
class TransformerRepresentation(nn.Module): | |
def __init__(self, model_name='bert-base-uncased', | |
transformer_kwargs={'attention_probs_dropout_prob': 0.1, | |
'hidden_dropout_prob': 0.1}, | |
device=DEFAULT_DEVICE): | |
super(TransformerRepresentation, self).__init__() | |
self.tokenizer = AutoTokenizer.from_pretrained(model_name) | |
self.model = AutoModel.from_pretrained(model_name, | |
output_hidden_states=True, | |
**transformer_kwargs) | |
self.embedding_dim = self.model.config.hidden_size | |
self.device = device | |
def add_subword_maps(texts, encodings): | |
for encoding, t in zip(encodings, texts): | |
encoding.subword_map = [encoding.word_to_tokens(i) | |
for i, _ in enumerate(t)] | |
def apply_token_pooling_strategy(outputs, encodings, strategy='first'): | |
""" | |
Applies a token pooling strategy for pretokenized inputs based on | |
a sub-word mapping of words to tokens. | |
:param outputs: Output of the application of a `TransformerRepresentation.model` to a pretokenized input. | |
:param encodings: Encodings from the application of `TransformerRepresentation.tokenizer` to a pretokenized input. | |
:param strategy: One of ['first', 'last', 'sum', 'average']. Defaults to 'first'. | |
:return: | |
""" | |
vec_map = [[vecs[m[0]:m[1]] for m in encoding.subword_map | |
if m is not None] # Only return vectors for words that were not truncated during tokenization | |
for vecs, encoding | |
in zip(outputs.last_hidden_state.unbind(), encodings)] | |
if strategy == 'first': | |
return [torch.stack([vec[0] for vec in vm]) if vm else torch.zeros(0) for vm in vec_map] | |
elif strategy == 'last': | |
return [torch.stack([vec[-1] for vec in vm]) if vm else torch.zeros(0) for vm in vec_map] | |
elif strategy == 'sum': | |
return [torch.stack([torch.sum(vec, dim=0) for vec in vm]) if vm else torch.zeros(0) for vm in vec_map] | |
elif strategy == 'average': | |
return [torch.stack([torch.sum(vec, dim=0)/len(vec) for vec in vm]) if vm else torch.zeros(0) for vm in vec_map] | |
return vec_map | |
def add_special_tokens(self, tokens): | |
self.tokenizer.add_special_tokens({'additional_special_tokens': self.tokenizer.additional_special_tokens + tokens}) | |
self.model.resize_token_embeddings(len(self.tokenizer)) | |
def forward(self, text, is_pretokenized=False, add_special_tokens=True, token_pooling='first'): | |
inputs = self.tokenizer(text, padding='longest', | |
is_split_into_words=is_pretokenized, | |
add_special_tokens=add_special_tokens, | |
return_tensors='pt', | |
max_length=512, | |
truncation=True).to(self.device) | |
output = self.model(**inputs.to(self.device)) | |
if is_pretokenized: | |
self.add_subword_maps(text, [i for i in inputs.encodings]) | |
output.pooled_tokens = self.apply_token_pooling_strategy( | |
output, [i for i in inputs.encodings], strategy=token_pooling) | |
return output | |
if __name__ == 'main': | |
toks = ['Tom', 'Thabane', 'resigned', 'in', 'October', 'last', 'year', | |
'to', 'form', 'the', 'All', 'Basotho', 'Convention', '-LRB-', | |
'ABC', '-RRB-', ',', 'crossing', 'the', 'floor', 'with', '17', | |
'members', 'of', 'parliament', ',', 'causing', 'constitutional', | |
'monarch', 'King', 'Letsie', 'III', 'to', 'dissolve', | |
'parliament', 'and', 'call', 'the', 'snap', 'election', '.'] | |
e1_type = 'PERSON' | |
e2_type = 'ORGANIZATION' | |
e1_tokens = [0, 1] | |
e2_tokens = [10, 12] | |
text = [['EU', 'rejects', 'German', 'call', 'to', 'boycott', 'British', 'lamb', '.'], | |
['Peter', 'Blackburn'], | |
['BRUSSELS', '1996-08-22'], | |
['The', 'European', 'Commission', 'said', 'on', 'Thursday', 'it', 'disagreed', 'with', 'German', 'advice', 'to', 'consumers', 'to', 'shun', 'British', 'lamb', 'until', 'scientists', 'determine', 'whether', 'mad', 'cow', 'disease', 'can', 'be', 'transmitted', 'to', 'sheep', '.'], | |
['Germany', "'s", 'representative', 'to', 'the', 'European', 'Union', "'s", 'veterinary', 'committee', 'Werner', 'Zwingmann', 'said', 'on', 'Wednesday', 'consumers', 'should', 'buy', 'sheepmeat', 'from', 'countries', 'other', 'than', 'Britain', 'until', 'the', 'scientific', 'advice', 'was', 'clearer', '.'], | |
['"', 'We', 'do', "n't", 'support', 'any', 'such', 'recommendation', 'because', 'we', 'do', "n't", 'see', 'any', 'grounds', 'for', 'it', ',', '"', 'the', 'Commission', "'s", 'chief', 'spokesman', 'Nikolaus', 'van', 'der', 'Pas', 'told', 'a', 'news', 'briefing', '.'], | |
['He', 'said', 'further', 'scientific', 'study', 'was', 'required', 'and', 'if', 'it', 'was', 'found', 'that', 'action', 'was', 'needed', 'it', 'should', 'be', 'taken', 'by', 'the', 'European', 'Union', '.'], | |
['He', 'said', 'a', 'proposal', 'last', 'month', 'by', 'EU', 'Farm', 'Commissioner', 'Franz', 'Fischler', 'to', 'ban', 'sheep', 'brains', ',', 'spleens', 'and', 'spinal', 'cords', 'from', 'the', 'human', 'and', 'animal', 'food', 'chains', 'was', 'a', 'highly', 'specific', 'and', 'precautionary', 'move', 'to', 'protect', 'human', 'health', '.']] | |
model = TransformerRepresentation() | |