version
stringclasses 25
values | code
stringlengths 75
178k
| apis
sequence | full_version
stringlengths 1
6
| repo_name
stringlengths 9
78
| hexsha
stringlengths 40
40
|
---|---|---|---|---|---|
0.4 | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import unittest
import json
import random
import shutil
import pytest
import torch
from pytorch_pretrained_bert import (BertConfig, BertModel, BertForMaskedLM,
BertForNextSentencePrediction, BertForPreTraining,
BertForQuestionAnswering, BertForSequenceClassification,
BertForTokenClassification, BertForMultipleChoice)
from pytorch_pretrained_bert.modeling import PRETRAINED_MODEL_ARCHIVE_MAP
class BertModelTest(unittest.TestCase):
class BertModelTester(object):
def __init__(self,
parent,
batch_size=13,
seq_length=7,
is_training=True,
use_input_mask=True,
use_token_type_ids=True,
use_labels=True,
vocab_size=99,
hidden_size=32,
num_hidden_layers=5,
num_attention_heads=4,
intermediate_size=37,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=16,
type_sequence_label_size=2,
initializer_range=0.02,
num_labels=3,
num_choices=4,
scope=None):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_input_mask = use_input_mask
self.use_token_type_ids = use_token_type_ids
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.num_labels = num_labels
self.num_choices = num_choices
self.scope = scope
def prepare_config_and_inputs(self):
input_ids = BertModelTest.ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = BertModelTest.ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = BertModelTest.ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
sequence_labels = None
token_labels = None
choice_labels = None
if self.use_labels:
sequence_labels = BertModelTest.ids_tensor([self.batch_size], self.type_sequence_label_size)
token_labels = BertModelTest.ids_tensor([self.batch_size, self.seq_length], self.num_labels)
choice_labels = BertModelTest.ids_tensor([self.batch_size], self.num_choices)
config = BertConfig(
vocab_size_or_config_json_file=self.vocab_size,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
hidden_act=self.hidden_act,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
type_vocab_size=self.type_vocab_size,
initializer_range=self.initializer_range)
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def check_loss_output(self, result):
self.parent.assertListEqual(
list(result["loss"].size()),
[])
def create_bert_model(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels):
model = BertModel(config=config)
model.eval()
all_encoder_layers, pooled_output = model(input_ids, token_type_ids, input_mask)
outputs = {
"sequence_output": all_encoder_layers[-1],
"pooled_output": pooled_output,
"all_encoder_layers": all_encoder_layers,
}
return outputs
def check_bert_model_output(self, result):
self.parent.assertListEqual(
[size for layer in result["all_encoder_layers"] for size in layer.size()],
[self.batch_size, self.seq_length, self.hidden_size] * self.num_hidden_layers)
self.parent.assertListEqual(
list(result["sequence_output"].size()),
[self.batch_size, self.seq_length, self.hidden_size])
self.parent.assertListEqual(list(result["pooled_output"].size()), [self.batch_size, self.hidden_size])
def create_bert_for_masked_lm(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels):
model = BertForMaskedLM(config=config)
model.eval()
loss = model(input_ids, token_type_ids, input_mask, token_labels)
prediction_scores = model(input_ids, token_type_ids, input_mask)
outputs = {
"loss": loss,
"prediction_scores": prediction_scores,
}
return outputs
def check_bert_for_masked_lm_output(self, result):
self.parent.assertListEqual(
list(result["prediction_scores"].size()),
[self.batch_size, self.seq_length, self.vocab_size])
def create_bert_for_next_sequence_prediction(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels):
model = BertForNextSentencePrediction(config=config)
model.eval()
loss = model(input_ids, token_type_ids, input_mask, sequence_labels)
seq_relationship_score = model(input_ids, token_type_ids, input_mask)
outputs = {
"loss": loss,
"seq_relationship_score": seq_relationship_score,
}
return outputs
def check_bert_for_next_sequence_prediction_output(self, result):
self.parent.assertListEqual(
list(result["seq_relationship_score"].size()),
[self.batch_size, 2])
def create_bert_for_pretraining(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels):
model = BertForPreTraining(config=config)
model.eval()
loss = model(input_ids, token_type_ids, input_mask, token_labels, sequence_labels)
prediction_scores, seq_relationship_score = model(input_ids, token_type_ids, input_mask)
outputs = {
"loss": loss,
"prediction_scores": prediction_scores,
"seq_relationship_score": seq_relationship_score,
}
return outputs
def check_bert_for_pretraining_output(self, result):
self.parent.assertListEqual(
list(result["prediction_scores"].size()),
[self.batch_size, self.seq_length, self.vocab_size])
self.parent.assertListEqual(
list(result["seq_relationship_score"].size()),
[self.batch_size, 2])
def create_bert_for_question_answering(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels):
model = BertForQuestionAnswering(config=config)
model.eval()
loss = model(input_ids, token_type_ids, input_mask, sequence_labels, sequence_labels)
start_logits, end_logits = model(input_ids, token_type_ids, input_mask)
outputs = {
"loss": loss,
"start_logits": start_logits,
"end_logits": end_logits,
}
return outputs
def check_bert_for_question_answering_output(self, result):
self.parent.assertListEqual(
list(result["start_logits"].size()),
[self.batch_size, self.seq_length])
self.parent.assertListEqual(
list(result["end_logits"].size()),
[self.batch_size, self.seq_length])
def create_bert_for_sequence_classification(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels):
model = BertForSequenceClassification(config=config, num_labels=self.num_labels)
model.eval()
loss = model(input_ids, token_type_ids, input_mask, sequence_labels)
logits = model(input_ids, token_type_ids, input_mask)
outputs = {
"loss": loss,
"logits": logits,
}
return outputs
def check_bert_for_sequence_classification_output(self, result):
self.parent.assertListEqual(
list(result["logits"].size()),
[self.batch_size, self.num_labels])
def create_bert_for_token_classification(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels):
model = BertForTokenClassification(config=config, num_labels=self.num_labels)
model.eval()
loss = model(input_ids, token_type_ids, input_mask, token_labels)
logits = model(input_ids, token_type_ids, input_mask)
outputs = {
"loss": loss,
"logits": logits,
}
return outputs
def check_bert_for_token_classification_output(self, result):
self.parent.assertListEqual(
list(result["logits"].size()),
[self.batch_size, self.seq_length, self.num_labels])
def create_bert_for_multiple_choice(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels):
model = BertForMultipleChoice(config=config, num_choices=self.num_choices)
model.eval()
multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
multiple_choice_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
loss = model(multiple_choice_inputs_ids,
multiple_choice_token_type_ids,
multiple_choice_input_mask,
choice_labels)
logits = model(multiple_choice_inputs_ids,
multiple_choice_token_type_ids,
multiple_choice_input_mask)
outputs = {
"loss": loss,
"logits": logits,
}
return outputs
def check_bert_for_multiple_choice(self, result):
self.parent.assertListEqual(
list(result["logits"].size()),
[self.batch_size, self.num_choices])
def create_and_check_bert_for_attentions(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels):
for model_class in (BertModel, BertForMaskedLM, BertForNextSentencePrediction,
BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification,
BertForTokenClassification):
if model_class in [BertForSequenceClassification,
BertForTokenClassification]:
model = model_class(config=config, num_labels=self.num_labels, output_attentions=True)
else:
model = model_class(config=config, output_attentions=True)
model.eval()
output = model(input_ids, token_type_ids, input_mask)
attentions = output[0]
self.parent.assertEqual(len(attentions), self.num_hidden_layers)
self.parent.assertListEqual(
list(attentions[0].size()),
[self.batch_size, self.num_attention_heads, self.seq_length, self.seq_length])
def test_default(self):
self.run_tester(BertModelTest.BertModelTester(self))
def test_config_to_json_string(self):
config = BertConfig(vocab_size_or_config_json_file=99, hidden_size=37)
obj = json.loads(config.to_json_string())
self.assertEqual(obj["vocab_size"], 99)
self.assertEqual(obj["hidden_size"], 37)
def test_config_to_json_file(self):
config_first = BertConfig(vocab_size_or_config_json_file=99, hidden_size=37)
json_file_path = "/tmp/config.json"
config_first.to_json_file(json_file_path)
config_second = BertConfig.from_json_file(json_file_path)
os.remove(json_file_path)
self.assertEqual(config_second.to_dict(), config_first.to_dict())
@pytest.mark.slow
def test_model_from_pretrained(self):
cache_dir = "/tmp/pytorch_pretrained_bert_test/"
for model_name in list(PRETRAINED_MODEL_ARCHIVE_MAP.keys())[:1]:
model = BertModel.from_pretrained(model_name, cache_dir=cache_dir)
shutil.rmtree(cache_dir)
self.assertIsNotNone(model)
def run_tester(self, tester):
config_and_inputs = tester.prepare_config_and_inputs()
output_result = tester.create_bert_model(*config_and_inputs)
tester.check_bert_model_output(output_result)
output_result = tester.create_bert_for_masked_lm(*config_and_inputs)
tester.check_bert_for_masked_lm_output(output_result)
tester.check_loss_output(output_result)
output_result = tester.create_bert_for_next_sequence_prediction(*config_and_inputs)
tester.check_bert_for_next_sequence_prediction_output(output_result)
tester.check_loss_output(output_result)
output_result = tester.create_bert_for_pretraining(*config_and_inputs)
tester.check_bert_for_pretraining_output(output_result)
tester.check_loss_output(output_result)
output_result = tester.create_bert_for_question_answering(*config_and_inputs)
tester.check_bert_for_question_answering_output(output_result)
tester.check_loss_output(output_result)
output_result = tester.create_bert_for_sequence_classification(*config_and_inputs)
tester.check_bert_for_sequence_classification_output(output_result)
tester.check_loss_output(output_result)
output_result = tester.create_bert_for_token_classification(*config_and_inputs)
tester.check_bert_for_token_classification_output(output_result)
tester.check_loss_output(output_result)
output_result = tester.create_bert_for_multiple_choice(*config_and_inputs)
tester.check_bert_for_multiple_choice(output_result)
tester.check_loss_output(output_result)
tester.create_and_check_bert_for_attentions(*config_and_inputs)
@classmethod
def ids_tensor(cls, shape, vocab_size, rng=None, name=None):
"""Creates a random int32 tensor of the shape within the vocab size."""
if rng is None:
rng = random.Random()
total_dims = 1
for dim in shape:
total_dims *= dim
values = []
for _ in range(total_dims):
values.append(rng.randint(0, vocab_size - 1))
return torch.tensor(data=values, dtype=torch.long).view(shape).contiguous()
if __name__ == "__main__":
unittest.main()
| [
"torch.tensor"
] | 0.4.1 | z422684562/pytorch-pretrained-BERT | 5cd0a3db927b3b9232187674aad98d04f505cece |
1.5 | import logging
import re
from itertools import zip_longest
from typing import List, Generator, TypeVar, Union
import torch
from numpy import ndarray
from transformers import BertTokenizer, AlbertTokenizer, BertModel, AlbertModel
from bio_embeddings.embed.embedder_interfaces import EmbedderWithFallback
# https://stackoverflow.com/a/39205612/3549270
RealBertEmbedder = TypeVar("RealBertEmbedder", bound="BertBaseEmbedder")
logger = logging.getLogger(__name__)
class BertBaseEmbedder(EmbedderWithFallback):
""" Shared code between Bert and Albert """
_tokenizer: Union[AlbertTokenizer, BertTokenizer]
_model: Union[AlbertModel, BertModel]
_necessary_directories = ["model_directory"]
def _get_fallback_model(self) -> Union[BertModel, AlbertModel]:
raise NotImplementedError
def _embed_batch_impl(
self, batch: List[str], model: Union[BertModel, AlbertModel]
) -> Generator[ndarray, None, None]:
""" Embed batch code shared between Bert and Albert """
seq_lens = [len(seq) for seq in batch]
# Remove rare amino acids
batch = [re.sub(r"[UZOB]", "X", sequence) for sequence in batch]
# transformers needs spaces between the amino acids
batch = [" ".join(list(seq)) for seq in batch]
ids = self._tokenizer.batch_encode_plus(
batch, add_special_tokens=True, pad_to_max_length=True
)
tokenized_sequences = torch.tensor(ids["input_ids"]).to(model.device)
attention_mask = torch.tensor(ids["attention_mask"]).to(model.device)
with torch.no_grad():
embeddings = model(
input_ids=tokenized_sequences, attention_mask=attention_mask
)
embeddings = embeddings[0].cpu().numpy()
for seq_num, seq_len in zip_longest(range(len(embeddings)), seq_lens):
# slice off first and last positions (special tokens)
embedding = embeddings[seq_num][1 : seq_len + 1]
assert (
seq_len == embedding.shape[0]
), f"Sequence length mismatch: {seq_len} vs {embedding.shape[0]}"
yield embedding
@staticmethod
def reduce_per_protein(embedding):
return embedding.mean(axis=0)
def embed(self, sequence: str) -> ndarray:
[embedding] = self.embed_batch([sequence])
return embedding
| [
"torch.no_grad",
"torch.tensor"
] | 1.5.1 | konstin/bio_embeddings | a7de49fd8e152f8e735283818e6f2e2de7b824c8 |
1.6 | import unittest
from .. import TEST_DTYPES, TEST_DEVICE
import torch
from pytorch_metric_learning.losses import MultiSimilarityLoss
from pytorch_metric_learning.utils import common_functions as c_f
class TestMultiSimilarityLoss(unittest.TestCase):
def test_multi_similarity_loss(self):
for dtype in TEST_DTYPES:
if dtype == torch.float16:
alpha, beta, base = 0.1, 10, 0.5
else:
alpha, beta, base = 0.1, 40, 0.5
loss_func = MultiSimilarityLoss(alpha=alpha, beta=beta, base=base)
embedding_angles = [0, 20, 40, 60, 80]
embeddings = torch.tensor(
[c_f.angle_to_coord(a) for a in embedding_angles],
requires_grad=True,
dtype=dtype,
).to(
TEST_DEVICE
) # 2D embeddings
labels = torch.LongTensor([0, 0, 1, 1, 2])
loss = loss_func(embeddings, labels)
loss.backward()
pos_pairs = [(0, 1), (1, 0), (2, 3), (3, 2)]
neg_pairs = [
(0, 2),
(0, 3),
(0, 4),
(1, 2),
(1, 3),
(1, 4),
(2, 0),
(2, 1),
(2, 4),
(3, 0),
(3, 1),
(3, 4),
(4, 0),
(4, 1),
(4, 2),
(4, 3),
]
correct_total = 0
for i in range(len(embeddings)):
correct_pos_loss = 0
correct_neg_loss = 0
for a, p in pos_pairs:
if a == i:
anchor, positive = embeddings[a], embeddings[p]
correct_pos_loss += torch.exp(
-alpha * (torch.matmul(anchor, positive) - base)
)
if correct_pos_loss > 0:
correct_pos_loss = (1 / alpha) * torch.log(1 + correct_pos_loss)
for a, n in neg_pairs:
if a == i:
anchor, negative = embeddings[a], embeddings[n]
correct_neg_loss += torch.exp(
beta * (torch.matmul(anchor, negative) - base)
)
if correct_neg_loss > 0:
correct_neg_loss = (1 / beta) * torch.log(1 + correct_neg_loss)
correct_total += correct_pos_loss + correct_neg_loss
correct_total /= embeddings.size(0)
rtol = 1e-2 if dtype == torch.float16 else 1e-5
self.assertTrue(torch.isclose(loss, correct_total, rtol=rtol))
def test_with_no_valid_pairs(self):
alpha, beta, base = 0.1, 40, 0.5
loss_func = MultiSimilarityLoss(alpha=alpha, beta=beta, base=base)
for dtype in TEST_DTYPES:
embedding_angles = [0]
embeddings = torch.tensor(
[c_f.angle_to_coord(a) for a in embedding_angles],
requires_grad=True,
dtype=dtype,
).to(
TEST_DEVICE
) # 2D embeddings
labels = torch.LongTensor([0])
loss = loss_func(embeddings, labels)
loss.backward()
self.assertEqual(loss, 0)
| [
"torch.log",
"torch.isclose",
"torch.matmul",
"torch.LongTensor"
] | 1.6.0 | RaunakRahi/pytorch-metric-learning | 1525e5b7bd840967b344b3da5667297295042361 |
1.8 | from typing import List, Any
import pytorch_lightning.core.lightning as pl
import torch
import torch.nn.functional as F
import numpy as np
from allennlp.modules import ConditionalRandomField
from allennlp.modules.conditional_random_field import allowed_transitions
from torch import nn
from torch.utils.data import DataLoader
from transformers import get_linear_schedule_with_warmup, AutoModel
from log import logger
from utils.metric import SpanF1
from utils.reader_utils import extract_spans
import pdb
def KL(input, target):
input = input.float()
target = target.float()
loss = F.kl_div(F.log_softmax(input, dim=-1, dtype=torch.float32), F.softmax(target, dim=-1, dtype=torch.float32))
return loss
class NERBaseAnnotator(pl.LightningModule):
def __init__(self,
train_data=None,
dev_data=None,
lr=1e-5,
dropout_rate=0.1,
batch_size=16,
tag_to_id=None,
stage='fit',
pad_token_id=1,
encoder_model='xlm-roberta-large',
num_gpus=1):
super(NERBaseAnnotator, self).__init__()
self.train_data = train_data
self.dev_data = dev_data
self.id_to_tag = {v: k for k, v in tag_to_id.items()}
self.tag_to_id = tag_to_id
self.batch_size = batch_size
self.stage = stage
self.num_gpus = num_gpus
self.target_size = len(self.id_to_tag)
# set the default baseline model here
self.pad_token_id = pad_token_id
self.encoder_model = encoder_model
self.encoder = AutoModel.from_pretrained(encoder_model, return_dict=True)
rnn_dim = self.encoder.config.hidden_size // 2
self.feature_embed = nn.Linear(self.target_size, self.encoder.config.hidden_size)
self.birnn = nn.LSTM(self.encoder.config.hidden_size, rnn_dim, num_layers=1, bidirectional=True,
batch_first=True)
self.feedforward = nn.Linear(in_features=self.encoder.config.hidden_size * 2, out_features=self.target_size)
self.crf_layer = ConditionalRandomField(num_tags=self.target_size,
constraints=allowed_transitions(constraint_type="BIO",
labels=self.id_to_tag))
self.linear_for_input = nn.Linear(self.encoder.config.hidden_size, self.target_size)
self.linear_for_target = nn.Linear(self.encoder.config.hidden_size, self.target_size)
self.lr = lr
self.dropout = nn.Dropout(dropout_rate)
self.span_f1 = SpanF1()
self.setup_model(self.stage)
self.save_hyperparameters('pad_token_id', 'encoder_model')
def setup_model(self, stage_name):
if stage_name == 'fit' and self.train_data is not None:
# Calculate total steps
train_batches = len(self.train_data) // (self.batch_size * self.num_gpus)
self.total_steps = 50 * train_batches
self.warmup_steps = int(self.total_steps * 0.01)
def collate_batch(self, batch):
batch_ = list(zip(*batch))
tokens, masks, gold_spans, tags, feature_input, train_label_feature, head_pos = batch_[0], batch_[1], batch_[2], \
batch_[3], batch_[4], batch_[5], \
batch_[6]
max_len = max([len(token) for token in tokens])
token_tensor = torch.empty(size=(len(tokens), max_len), dtype=torch.long).fill_(self.pad_token_id)
tag_tensor = torch.empty(size=(len(tokens), max_len), dtype=torch.long).fill_(self.tag_to_id['O'])
mask_tensor = torch.zeros(size=(len(tokens), max_len), dtype=torch.bool)
feature_tensor = torch.zeros(size=(len(tokens), max_len, self.target_size), dtype=torch.float32)
train_label_feature_tensor = torch.zeros(size=(len(tokens), max_len, self.target_size), dtype=torch.float32)
for i in range(len(tokens)):
token_tensor[i] = tokens[i]
tag_tensor[i] = tags[i]
mask_tensor[i] = masks[i]
feature_tensor[i] = feature_input[i]
train_label_feature_tensor[i] = train_label_feature[i]
return token_tensor, tag_tensor, mask_tensor, gold_spans, feature_tensor, train_label_feature_tensor, head_pos
def configure_optimizers(self):
optimizer_grouped_parameters = [{'params': self.encoder.parameters(), 'lr': self.lr},
{'params': self.feedforward.parameters(), 'lr': self.lr},
{'params': self.linear_for_input.parameters(), 'lr': self.lr},
{'params': self.linear_for_target.parameters(), 'lr': self.lr},
{'params': self.crf_layer.parameters(), 'lr': self.lr},
{'params': self.feature_embed.parameters(), 'lr': self.lr * 10},
{'params': self.birnn.parameters(), 'lr': self.lr * 10}]
optimizer = torch.optim.AdamW(optimizer_grouped_parameters, lr=self.lr, weight_decay=0.01)
if self.stage == 'fit':
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=self.warmup_steps,
num_training_steps=self.total_steps)
scheduler = {
'scheduler': scheduler,
'interval': 'step',
'frequency': 1
}
return [optimizer], [scheduler]
return [optimizer]
def train_dataloader(self):
loader = DataLoader(self.train_data, batch_size=self.batch_size, collate_fn=self.collate_batch, num_workers=10)
return loader
def val_dataloader(self):
if self.dev_data is None:
return None
loader = DataLoader(self.dev_data, batch_size=self.batch_size, collate_fn=self.collate_batch, num_workers=10)
return loader
def test_epoch_end(self, outputs):
pred_results = self.span_f1.get_metric()
avg_loss = np.mean([preds['loss'].item() for preds in outputs])
self.log_metrics(pred_results, loss=avg_loss, on_step=False, on_epoch=True)
out = {"test_loss": avg_loss, "results": pred_results}
return out
def training_epoch_end(self, outputs: List[Any]) -> None:
pred_results = self.span_f1.get_metric(True)
avg_loss = np.mean([preds['loss'].item() for preds in outputs])
self.log_metrics(pred_results, loss=avg_loss, suffix='', on_step=False, on_epoch=True)
def validation_epoch_end(self, outputs: List[Any]) -> None:
pred_results = self.span_f1.get_metric(True)
avg_loss = np.mean([preds['loss'].item() for preds in outputs])
self.log_metrics(pred_results, loss=avg_loss, suffix='val_', on_step=False, on_epoch=True)
def validation_step(self, batch, batch_idx):
output = self.perform_forward_step(batch)
self.log_metrics(output['results'], loss=output['loss'], suffix='val_', on_step=True, on_epoch=False)
return output
def training_step(self, batch, batch_idx):
output = self.perform_forward_step(batch)
self.log_metrics(output['results'], loss=output['loss'], suffix='', on_step=True, on_epoch=False)
return output
def test_step(self, batch, batch_idx):
output = self.perform_forward_step(batch, mode=self.stage)
self.log_metrics(output['results'], loss=output['loss'], suffix='_t', on_step=True, on_epoch=False)
return output
def log_metrics(self, pred_results, loss=0.0, suffix='', on_step=False, on_epoch=True):
for key in pred_results:
self.log(suffix + key, pred_results[key], on_step=on_step, on_epoch=on_epoch, prog_bar=True, logger=True)
self.log(suffix + 'loss', loss, on_step=on_step, on_epoch=on_epoch, prog_bar=True, logger=True)
def perform_forward_step(self, batch, mode=''):
tokens, tags, token_mask, metadata, feature_input, train_label_feature, head_pos = batch
batch_size = tokens.size(0)
if mode == 'predict':
_device = self.device
train_label_feature = train_label_feature.to(_device)
tokens = tokens.to(_device)
tags = tags.to(_device)
token_mask = token_mask.to(_device)
feature_input = feature_input.to(_device)
embedded_text_input = self.encoder(input_ids=tokens, attention_mask=token_mask)
embedded_text_input = embedded_text_input.last_hidden_state
embedded_text_input = self.dropout(F.leaky_relu(embedded_text_input))
feature_express_linear = self.feature_embed(feature_input)
feature_express_rnn, _ = self.birnn(feature_express_linear)
concat_output = torch.cat((embedded_text_input, feature_express_rnn), dim=-1)
# project the token representation for classification
token_scores = self.feedforward(concat_output)
loss_KL = None
loss_KL1 = None
loss_KL2 = None
if train_label_feature is not None:
logits_input = self.linear_for_input(embedded_text_input)
train_label_feature_express_linear = self.feature_embed(train_label_feature)
train_label_feature_express_rnn, _ = self.birnn(train_label_feature_express_linear)
logits_target = self.linear_for_target(train_label_feature_express_rnn)
loss_KL1 = KL(logits_input.view(-1, self.target_size), logits_target.view(-1, self.target_size).detach())
loss_KL2 = KL(logits_target.view(-1, self.target_size), logits_input.view(-1, self.target_size).detach())
loss_KL = loss_KL1 + loss_KL2
# compute the log-likelihood loss and compute the best NER annotation sequence
output = self._compute_token_tags(token_scores=token_scores, tags=tags, token_mask=token_mask,
metadata=metadata, batch_size=batch_size, loss_KL=loss_KL, head_pos=head_pos,
mode=mode)
return output
def _compute_token_tags(self, token_scores, tags, token_mask, metadata, batch_size, loss_KL, head_pos, mode=''):
# compute the log-likelihood loss and compute the best NER annotation sequence
loss = -self.crf_layer(token_scores, tags, token_mask) / float(batch_size)
total_loss = loss + loss_KL * 1000
best_path = self.crf_layer.viterbi_tags(token_scores, token_mask)
pred_results, pred_tags = [], []
for i in range(batch_size):
false_tag_seq, _ = best_path[i]
assert len(head_pos[i]) == len(false_tag_seq)
tag_seq = []
for ifhead, f_tag in zip(head_pos[i], false_tag_seq):
if ifhead == 1:
tag_seq.append(f_tag)
pred_tags.append([self.id_to_tag[x] for x in tag_seq])
pred_results.append(extract_spans([self.id_to_tag[x] for x in tag_seq if x in self.id_to_tag]))
self.span_f1(pred_results, metadata)
output = {"loss": total_loss, "results": self.span_f1.get_metric()}
if mode == 'predict':
output['token_tags'] = pred_tags
return output
def predict_tags(self, batch):
pred_tags = self.perform_forward_step(batch, mode='predict')['token_tags']
return pred_tags
| [
"torch.nn.Linear",
"torch.nn.Dropout",
"torch.optim.AdamW",
"torch.nn.LSTM",
"torch.cat",
"torch.nn.functional.log_softmax",
"torch.utils.data.DataLoader",
"torch.nn.functional.softmax",
"torch.nn.functional.leaky_relu"
] | 1.8.0 | Mckysse/AGAN | 67dd049828681a1ea6acb6bf3e7247651109c17b |
0.1 | # Copyright 2019 The Forte Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = ["LabeledSpanGraphNetwork"]
import math
from collections import defaultdict
from typing import Any, Dict, List, Tuple
from mypy_extensions import TypedDict
from forte.utils import create_import_error_msg
from forte.models.srl import model_utils as utils
from forte.models.srl.data import SRLSpan, Span
try:
import torch
from torch import nn
from torch.nn import functional as F
except ImportError as e:
raise ImportError(
create_import_error_msg("torch", "models", "Texar model support")
) from e
try:
import texar.torch as tx
except ImportError as e:
raise ImportError(
create_import_error_msg(
"texar-pytorch", "models", "Texar model support"
)
) from e
class LabeledSpanGraphNetwork(tx.ModuleBase):
@property
def output_size(self):
"""
This module is supposed to be the last layer so we will not return
an informative output_size
Returns:
"""
return 0
__torch_device__: torch.device
def __init__(
self, word_vocab: tx.data.Vocab, char_vocab: tx.data.Vocab, hparams=None
):
super().__init__(hparams)
# Word vocabulary & representation
self.word_vocab = word_vocab
self.word_embed = tx.modules.WordEmbedder(
init_value=tx.data.Embedding(
vocab=self.word_vocab.token_to_id_map_py,
hparams={
"file": self._hparams.context_embeddings.path,
"dim": self._hparams.context_embeddings.size,
"read_fn": "load_glove",
},
).word_vecs
)
self.head_embed = tx.modules.WordEmbedder(
init_value=tx.data.Embedding(
vocab=self.word_vocab.token_to_id_map_py,
hparams={
"file": self._hparams.head_embeddings.path,
"dim": self._hparams.head_embeddings.size,
"read_fn": "load_glove",
},
).word_vecs
)
self.span_length_embed = tx.modules.PositionEmbedder(
position_size=self._hparams.max_arg_width,
hparams={
"dim": self._hparams.feature_size,
},
)
# Character vocabulary & representation
self.char_cnn = utils.CharCNN(
char_vocab=char_vocab,
hparams={
"char_embed_size": self._hparams.char_embedding_size,
"filter_widths": self._hparams.filter_widths,
"filter_size": self._hparams.filter_size,
},
)
self.embed_dropout = nn.Dropout(self._hparams.lexical_dropout_rate)
# ELMo representation
if self._hparams.elmo.path is not None:
# pylint: disable=import-outside-toplevel
from allennlp.modules.elmo import Elmo, batch_to_ids
elmo_hparams = self._hparams.elmo
self.elmo = Elmo(
options_file=elmo_hparams.config,
weight_file=elmo_hparams.path,
num_output_representations=1,
)
self._elmo_char_ids_fn = batch_to_ids
else:
self.elmo = None
# LSTM
single_hidden_dim = self._hparams.contextualization_size
lstm_input_dim = self.word_embed.dim + self.char_cnn.output_size
if self.elmo is not None:
lstm_input_dim += self._hparams.elmo.dim
self.lstm = utils.CustomBiLSTM(
hparams={
"input_dim": lstm_input_dim,
"hidden_dim": single_hidden_dim,
"num_layers": self._hparams.contextualization_layers,
"dropout": self._hparams.lstm_dropout_rate,
}
)
hidden_dim = single_hidden_dim * 2
self.label_vocab = {
label: idx + 1 # reserve index 0 for null label
for idx, label in enumerate(self._hparams.srl_labels)
}
self.label_inverse_vocab = {v: k for k, v in self.label_vocab.items()}
self.head_attention = nn.Linear(hidden_dim, 1)
word_input_dim = self.word_embed.dim + self.char_cnn.output_size
mlp_num_layers = self._hparams.ffnn_depth
mlp_hparams = {
"input_sizes": [
hidden_dim, # concat'd state at start of span
hidden_dim, # concat'd state at end of span
word_input_dim,
self.span_length_embed.dim,
],
"num_layers": mlp_num_layers,
"hidden_size": [self._hparams.ffnn_size] * mlp_num_layers,
"dropout_rate": self._hparams.dropout_rate,
}
self.argument_mlp = utils.ConcatInputMLP(
hparams={
**mlp_hparams,
"output_size": 1,
"activation": "ReLU",
}
)
self.predicate_mlp = utils.ConcatInputMLP(
hparams={
**mlp_hparams,
"input_sizes": [hidden_dim],
"output_size": 1,
"activation": "ReLU",
}
)
self.span_label_mlp = utils.ConcatInputMLP(
hparams={
**mlp_hparams,
"input_sizes": mlp_hparams["input_sizes"] + [hidden_dim],
"output_size": len(self.label_vocab),
"activation": "ReLU",
}
)
@staticmethod
def default_hparams() -> Dict[str, Any]:
return {
"filter_widths": [3, 4, 5],
"filter_size": 50,
"char_embedding_size": 8,
"context_embeddings": {
"path": "embeddings/glove.840B.300d.05.filtered",
"size": 300,
"datasets": "txt",
"lowercase": False,
},
"head_embeddings": {
"path": "embeddings/glove_50_300_2.filtered",
# "path": "embeddings/glove_50_300_2.txt",
"size": 300,
"datasets": "txt",
"lowercase": False,
},
"elmo": {
"path": None,
"config": None,
"dim": 256,
},
"contextualizer": "lstm",
"contextualization_size": 200,
"contextualization_layers": 3,
"ffnn_size": 150,
"ffnn_depth": 2,
"feature_size": 20,
"max_span_width": 30,
"model_heads": True,
"num_attention_heads": 1,
"srl_labels": [
# predicate
"V",
# simple propositions
"A0",
"A1",
"A2",
"A3",
"A4",
"A5",
"AA",
"AM",
"AM-ADV",
"AM-CAU",
"AM-DIR",
"AM-DIS",
"AM-EXT",
"AM-LOC",
"AM-MNR",
"AM-MOD",
"AM-NEG",
"AM-PNC",
"AM-PRD",
"AM-REC",
"AM-TM",
"AM-TMP",
# propositions with coreferenced arguments
"C-A0",
"C-A1",
"C-A2",
"C-A3",
"C-A4",
"C-A5",
"C-AM-ADV",
"C-AM-CAU",
"C-AM-DIR",
"C-AM-DIS",
"C-AM-EXT",
"C-AM-LOC",
"C-AM-MNR",
"C-AM-NEG",
"C-AM-PNC",
"C-AM-TMP",
"C-V",
# propositions with discontinuous argument
"R-A0",
"R-A1",
"R-A2",
"R-A3",
"R-A4",
"R-AA",
"R-AM-ADV",
"R-AM-CAU",
"R-AM-DIR",
"R-AM-EXT",
"R-AM-LOC",
"R-AM-MNR",
"R-AM-PNC",
"R-AM-TMP",
],
"max_arg_width": 30,
"argument_ratio": 0.8,
"predicate_ratio": 0.4,
"lexical_dropout_rate": 0.5,
"dropout_rate": 0.2,
"lstm_dropout_rate": 0.4,
}
@property
def _device(self) -> torch.device:
if not hasattr(self, "__torch_device__"):
self.__torch_device__ = next(self.parameters()).device
return self.__torch_device__
def _create_span_indices(
self, batch_size: int, max_len: int, max_span: int
) -> Tuple[torch.LongTensor, torch.LongTensor, torch.LongTensor]:
start_ids = torch.arange(0, max_len).repeat_interleave(max_span)
end_ids = start_ids + torch.arange(0, max_span).repeat(max_len)
valid_mask = end_ids < max_len
start_ids, end_ids = start_ids[valid_mask], end_ids[valid_mask]
span_length = end_ids - start_ids
start_ids = start_ids.expand(batch_size, *start_ids.size()).to(
device=self._device
)
end_ids = end_ids.expand_as(start_ids).to(device=self._device)
span_length = span_length.expand_as(start_ids).to(device=self._device)
return start_ids, end_ids, span_length
@staticmethod
def _set_submatrix(mat: torch.Tensor, x: int, y: int, value: torch.Tensor):
mat[x : (x + value.size(0)), y : (y + value.size(1))] = value
def _create_softmax_mask(
self, batch_size: int, max_len: int, max_span: int
) -> torch.ByteTensor:
# 1 + 2 + ... + max_span + max_span + ... (total max_len terms)
total_lines = (1 + min(max_span, max_len)) * min(max_span, max_len) // 2
if max_len > max_span:
total_lines += (max_len - max_span) * max_span
lower_tri = torch.tril(
torch.ones(max_span, max_span, dtype=torch.uint8)
)
mask = torch.zeros(total_lines, max_len, dtype=torch.uint8)
line_count = 0
for idx in range(max_len):
if max_len - idx < max_span:
cur_mask = lower_tri[: (max_len - idx), : (max_len - idx)]
else:
cur_mask = lower_tri
self._set_submatrix(mask, line_count, idx, cur_mask)
line_count += cur_mask.size(0)
mask = mask.expand(batch_size, total_lines, max_len)
return mask.to(device=self._device)
def _filter_labels(
self,
start_ids: torch.LongTensor,
end_ids: torch.LongTensor,
predicates: torch.LongTensor,
srls: List[List[SRLSpan]],
) -> torch.LongTensor:
batch_size, num_spans = start_ids.size()
num_predicates = predicates.size(1)
device = start_ids.device
start_ids = start_ids.cpu().numpy()
end_ids = end_ids.cpu().numpy()
predicates = predicates.cpu().numpy()
batch_predicates = [
{pred: idx for idx, pred in enumerate(preds)}
for preds in predicates
]
batch_spans = [
{(l, r): idx for idx, (l, r) in enumerate(zip(starts, ends))}
for starts, ends in zip(start_ids, end_ids)
]
gold_labels = torch.zeros(
batch_size, num_predicates * num_spans, dtype=torch.long
)
for b_idx in range(batch_size):
for srl in srls[b_idx]:
span_idx = batch_spans[b_idx].get((srl.start, srl.end), None)
predicate_idx = batch_predicates[b_idx].get(srl.predicate, None)
if span_idx is not None and predicate_idx is not None:
label_idx = predicate_idx * num_spans + span_idx
gold_labels[b_idx, label_idx] = self.label_vocab[srl.label]
gold_labels = gold_labels.to(device=device)
return gold_labels
def _compute_soft_head_attention_brute(
self,
start_ids: torch.LongTensor,
end_ids: torch.LongTensor,
sent_lengths: torch.LongTensor,
states: torch.Tensor,
word_inputs: torch.Tensor,
) -> Tuple[torch.Tensor, torch.LongTensor]:
device = start_ids.device
batch_size, max_len = states.size()[:2]
num_spans = start_ids.size(1)
max_span_width = self._hparams.max_span_width
batch_offset = torch.arange(batch_size, device=device) * max_len
span_indices = torch.arange(max_span_width, device=device)
# span_indices: (batch_size, num_spans, max_span_width)
span_indices = (
span_indices.expand(batch_size, num_spans, -1)
+ start_ids.unsqueeze(-1)
+ batch_offset.view(-1, 1, 1)
)
# valid_spans: (batch_size, num_spans)
valid_spans = end_ids < sent_lengths.unsqueeze(-1)
# valid_spans_idx: (total_spans)
valid_spans_idx = valid_spans.view(-1).nonzero().view(-1)
# flat_span_indices: (total_spans, max_span_width)
flat_span_indices = torch.index_select(
span_indices.view(-1, max_span_width), dim=0, index=valid_spans_idx
)
# flat_sent_lengths: (total_spans)
flat_sent_lengths = torch.index_select(
(
torch.min(end_ids + 1, sent_lengths.unsqueeze(-1))
+ batch_offset.unsqueeze(-1)
).view(-1),
dim=0,
index=valid_spans_idx,
)
# flat_mask: (total_spans, max_span_width)
flat_mask = flat_span_indices < flat_sent_lengths.unsqueeze(-1)
flat_span_indices *= flat_mask.type_as(flat_span_indices)
# span_word_inputs: (total_spans, max_span_width, word_input_dim)
span_word_inputs = torch.index_select(
word_inputs.view(-1, word_inputs.size(-1)),
dim=0,
index=flat_span_indices.view(-1),
).view(*flat_span_indices.size(), -1)
# logits: (batch_size, max_len)
logits = self.head_attention(states).squeeze(-1)
# flat_span_logits: (total_spans, max_span_width)
flat_span_logits = torch.index_select(
logits.view(-1), dim=0, index=flat_span_indices.view(-1)
).view(flat_span_indices.size())
masked_span_logits = flat_span_logits - 1e10 * (~flat_mask).type_as(
flat_span_logits
)
weights = torch.softmax(masked_span_logits, dim=-1)
# weighted_inputs: (total_spans, max_span_width, word_input_dim)
weighted_inputs = span_word_inputs * weights.unsqueeze(-1)
# soft_head: (total_spans, word_input_dim)
soft_head = torch.sum(weighted_inputs, dim=1)
# indices: (batch_size, num_spans)
indices = torch.cumsum(valid_spans.view(-1).type(torch.long), dim=0) - 1
indices = torch.clamp_min(indices, 0).view_as(valid_spans)
return soft_head, indices
class ReturnType(TypedDict):
loss: torch.Tensor
total_scores: torch.Tensor
start_ids: torch.LongTensor
end_ids: torch.LongTensor
predicates: torch.LongTensor
def _arange(self, *args, **kwargs):
return torch.arange(*args, device=self._device, **kwargs)
def forward(self, inputs: tx.data.Batch) -> ReturnType:
# Compute embeddings and recurrent states.
char_embed = self.char_cnn(inputs.text)
with torch.no_grad():
# A workaround for freezing embeddings.
word_embed = self.word_embed(inputs.text_ids)
head_embed = self.head_embed(inputs.text_ids)
context_embeds = [word_embed, char_embed]
head_embeds = [head_embed, char_embed]
if self.elmo is not None:
char_ids = self._elmo_char_ids_fn(inputs.text).to(self._device)
elmo_embed = self.elmo(char_ids)["elmo_representations"][0]
context_embeds.append(elmo_embed)
# *word_inputs: (batch_size, max_len, word_input_dim)
lstm_word_inputs = self.embed_dropout(torch.cat(context_embeds, dim=-1))
word_inputs = self.embed_dropout(torch.cat(head_embeds, dim=-1))
# states: (batch_size, max_len, hidden_dim)
states = self.lstm(lstm_word_inputs, inputs.length)
# Create span indices.
batch_size, max_len = inputs.text_ids.size()
max_span = self._hparams.max_span_width
# *_ids: (batch_size, max_num_spans)
# max_num_spans ~= max_len * max_span
start_ids, end_ids, span_length = self._create_span_indices(
batch_size, max_len, max_span
)
# Create soft head representation weights.
# head_attn_cache, head_attn_index = self._compute_soft_head_attention(
(
head_attn_cache,
head_attn_index,
) = self._compute_soft_head_attention_brute(
start_ids, end_ids, inputs.length, states, word_inputs
)
# Compute argument & predicate scores.
span_length_embed = self.embed_dropout(self.span_length_embed.embedding)
cache_inputs = [states, states, head_attn_cache, span_length_embed]
pred_indices = self._arange(max_len).expand(batch_size, -1)
with self.argument_mlp.cache_results(
cache_inputs
), self.predicate_mlp.cache_results([states]):
# arg_scores: (batch_size, max_num_spans)
arg_scores = self.argument_mlp(
[start_ids, end_ids, head_attn_index, span_length]
).squeeze(-1)
# pred_scores: (batch_size, max_len)
pred_scores = self.predicate_mlp([pred_indices]).squeeze(-1)
# Beam pruning of arguments & predicates.
# topk_*: (batch_size, max_arguments)
max_arguments = math.ceil(self._hparams.argument_ratio * max_len)
num_arguments = torch.ceil(
self._hparams.argument_ratio * inputs.length.float()
).long()
topk_arg_scores, topk_arg_indices = torch.topk(
arg_scores, k=max_arguments, dim=1, sorted=True
)
topk_start_ids, topk_end_ids, topk_attn_index = utils.batch_gather(
[start_ids, end_ids, head_attn_index], index=topk_arg_indices
)
topk_span_length = topk_end_ids - topk_start_ids
# topk_pred_*: (batch_size, max_predicates)
max_predicates = math.ceil(self._hparams.predicate_ratio * max_len)
num_predicates = torch.ceil(
self._hparams.predicate_ratio * inputs.length.float()
).long()
topk_pred_scores, topk_pred_indices = torch.topk(
pred_scores, k=max_predicates, dim=1, sorted=True
)
# Compute label scores for pruned argument-predicate pairs.
with self.span_label_mlp.cache_results(cache_inputs + [states]):
# label_scores:
# (batch_size, max_predicates * max_arguments, num_labels)
label_scores = self.span_label_mlp(
[
topk_start_ids.repeat(1, max_predicates),
topk_end_ids.repeat(1, max_predicates),
tx.utils.map_structure(
lambda x: x.repeat(1, max_predicates)
if isinstance(x, torch.Tensor)
else x,
topk_attn_index,
),
topk_span_length.repeat(1, max_predicates),
topk_pred_indices.repeat_interleave(max_arguments, dim=1),
]
)
# Compute log-probabilities.
total_scores = (
topk_arg_scores.repeat(1, max_predicates).unsqueeze(-1)
+ topk_pred_scores.repeat_interleave(
max_arguments, dim=1
).unsqueeze(-1)
+ label_scores
)
total_scores = torch.cat(
[
total_scores.new_zeros(*total_scores.size()[:-1], 1),
total_scores,
],
dim=-1,
)
gold_labels = self._filter_labels(
topk_start_ids, topk_end_ids, topk_pred_indices, inputs.srl
)
# Compute masked loss.
# unmasked_loss: (batch_size, max_predicates * max_arguments)
unmasked_loss = F.cross_entropy(
total_scores.view(-1, total_scores.size(-1)),
gold_labels.view(-1),
reduction="none",
)
# pred_*_mask: (batch_size, max_predicates)
pred_index_mask = topk_pred_indices < inputs.length.unsqueeze(-1)
pred_topk_mask = self._arange(max_predicates).unsqueeze(
0
) < num_predicates.unsqueeze(1)
# arg_*_mask: (batch_size, max_arguments)
arg_index_mask = topk_end_ids < inputs.length.unsqueeze(-1)
arg_topk_mask = self._arange(max_arguments).unsqueeze(
0
) < num_arguments.unsqueeze(1)
loss_mask = (
(arg_index_mask & arg_topk_mask).unsqueeze(1)
& (pred_index_mask & pred_topk_mask).unsqueeze(2)
).view(-1)
loss = torch.sum(unmasked_loss * loss_mask.type_as(unmasked_loss))
# loss = loss / batch_size
return {
"loss": loss,
"total_scores": total_scores,
"start_ids": topk_start_ids,
"end_ids": topk_end_ids,
"predicates": topk_pred_indices,
}
_CORE_ARGS = {
f"{prefix}{arg}": 1 << idx
for prefix in ["A", "ARG"]
for idx, arg in enumerate("012345A")
}
def _dp_decode(
self,
max_len: int,
pred_idx: int,
start_ids: List[int],
end_ids: List[int],
argmax_labels: List[int],
label_scores: List[float],
enforce_constraint: bool = False,
) -> List[Span]:
# Map positions to list of span indices for quick lookup during DP.
spans_ending_at: Dict[int, List[int]] = defaultdict(list)
for idx in range( # pylint: disable=consider-using-enumerate
len(end_ids)
):
if argmax_labels[idx] == 0: # ignore null spans
continue
if start_ids[idx] <= pred_idx <= end_ids[idx]:
# Skip spans overlapping with the predicate.
continue
if end_ids[idx] >= max_len:
# Skip invalid spans longer than the sentence.
continue
spans_ending_at[end_ids[idx]].append(idx)
if all(len(spans) == 0 for spans in spans_ending_at.values()):
return [] # no spans at all, just return
if enforce_constraint:
label_states = [
self._CORE_ARGS.get(self.label_inverse_vocab[label], -1)
if label != 0
else -1
for label in argmax_labels
]
else:
# ignore constraints
label_states = [-1] * len(argmax_labels)
# Perform DP.
# Each state is a tuple (time, core_args), where `core_args` is the set
# of core arguments (ARGA, ARG0 to ARG5) previously selected,
# represented in binary (so {ARG0, ARG2, ARG5} would be
# 2^0 + 2^2 + 2^5 = 37).
max_scores = [{0: 0.0}]
# only record selected spans
best_span_indices: List[Dict[int, int]] = [{}]
for idx in range(max_len):
cur_scores = max_scores[-1].copy()
cur_span_idx = {}
for span_idx in spans_ending_at[idx]:
label_state = label_states[span_idx]
prev_states = max_scores[start_ids[span_idx]]
for state, prev_score in prev_states.items():
if label_state != -1 and (label_state & state != 0):
# A core argument of this type has already been selected
continue
score = prev_score + label_scores[span_idx]
new_state = state | label_state
if score > cur_scores.get(new_state, 0):
cur_scores[new_state] = score
cur_span_idx[new_state] = span_idx
max_scores.append(cur_scores)
best_span_indices.append(cur_span_idx)
# Backtrack to obtain optimal span choices.
srl = []
pos = max_len
state = max(
(score, state) for state, score in max_scores[max_len].items()
)[1]
while pos > 0:
best_span_idx = best_span_indices[pos].get(state, None)
if best_span_idx is not None:
assert end_ids[best_span_idx] == pos - 1
srl.append(
Span(
start_ids[best_span_idx],
end_ids[best_span_idx],
self.label_inverse_vocab[argmax_labels[best_span_idx]],
)
)
pos = start_ids[best_span_idx]
if label_states[best_span_idx] != -1:
state &= ~label_states[best_span_idx]
else:
pos -= 1
return srl
@torch.no_grad()
def decode(
self, inputs: tx.data.Batch, enforce_constraint: bool = False
) -> List[Dict[int, List[Span]]]:
r"""Performs optimal decoding with dynamic programming.
:returns: A nested structure of SRL spans, representing the (inner) list
of spans for each predicate (middle `dict`) and for each example in
the batch (outer list).
"""
result_dict = self.forward(inputs)
start_ids = result_dict["start_ids"].cpu().numpy()
end_ids = result_dict["end_ids"].cpu().numpy()
predicates = result_dict["predicates"].cpu().numpy()
batch_size, num_arguments = start_ids.shape
num_predicates = predicates.shape[1]
total_scores = result_dict["total_scores"].view(
batch_size, num_predicates, num_arguments, -1
)
label_scores, argmax_label = torch.max(total_scores, dim=3)
argmax_label = argmax_label.cpu().numpy()
label_scores = label_scores.cpu().numpy()
sent_lengths = inputs.length.cpu().numpy()
# Do DP one example at a time...
batch_srl = []
for b_idx in range(batch_size):
cur_srl: Dict[int, List[Span]] = {}
# ... and one predicate at a time.
for pred_idx, predicate in enumerate(predicates[b_idx]):
if predicate >= inputs.length[b_idx]:
# Skip invalid predicates outside the sentence.
continue
srl = self._dp_decode(
sent_lengths[b_idx],
predicate,
start_ids[b_idx],
end_ids[b_idx],
argmax_label[b_idx, pred_idx],
label_scores[b_idx, pred_idx],
enforce_constraint,
)
if len(srl) > 0:
cur_srl[predicate] = srl
batch_srl.append(cur_srl)
return batch_srl
| [
"torch.nn.Linear",
"torch.zeros",
"torch.nn.Dropout",
"torch.cat",
"torch.arange",
"torch.max",
"torch.no_grad",
"torch.softmax",
"torch.ones",
"torch.topk",
"torch.clamp_min",
"torch.sum"
] | 0.1.4 | bhaskar2443053/forte | 95fabd94126d45c0db07cdcc197049ed1859d228 |
1.7 | """ full training (train rnn-ext + abs + RL) """
import argparse
import json
import pickle as pkl
import os
from os.path import join, exists
from itertools import cycle
from toolz.sandbox.core import unzip
from cytoolz import identity
import torch
from torch import optim
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torch.utils.data import DataLoader
from data.data import CnnDmDataset
from data.batcher import tokenize
from model.rl import ActorCritic
from model.extract import PtrExtractSumm
from training import BasicTrainer
from rl import get_grad_fn
from rl import A2CPipeline
from decoding import load_best_ckpt
from decoding import Abstractor, ArticleBatcher
from metric import compute_rouge_l, compute_rouge_n, compute_bertscore
MAX_ABS_LEN = 30
try:
DATA_DIR = os.environ['DATA']
except KeyError:
print('please use environment variable to specify data directories')
class RLDataset(CnnDmDataset):
""" get the article sentences only (for decoding use)"""
def __init__(self, split, cross_rev_bucket=None):
super().__init__(split, DATA_DIR, cross_rev_bucket=cross_rev_bucket)
def __getitem__(self, i):
js_data = super().__getitem__(i)
art_sents = js_data['article']
abs_sents = js_data['abstract']
return art_sents, abs_sents
def load_ext_net(ext_dir):
ext_meta = json.load(open(join(ext_dir, 'meta.json')))
assert ext_meta['net'] == 'ml_rnn_extractor'
ext_ckpt = load_best_ckpt(ext_dir)
ext_args = ext_meta['net_args']
vocab = pkl.load(open(join(ext_dir, 'vocab.pkl'), 'rb'))
ext = PtrExtractSumm(**ext_args)
ext.load_state_dict(ext_ckpt)
return ext, vocab
def configure_net(abs_dir, ext_dir, cuda):
""" load pretrained sub-modules and build the actor-critic network"""
# load pretrained abstractor model
if abs_dir is not None:
abstractor = Abstractor(abs_dir, MAX_ABS_LEN, cuda)
else:
abstractor = identity
# load ML trained extractor net and buiild RL agent
extractor, agent_vocab = load_ext_net(ext_dir)
agent = ActorCritic(extractor._sent_enc,
extractor._art_enc,
extractor._extractor,
ArticleBatcher(agent_vocab, cuda))
if cuda:
agent = agent.cuda()
net_args = {}
net_args['abstractor'] = (None if abs_dir is None
else json.load(open(join(abs_dir, 'meta.json'))))
net_args['extractor'] = json.load(open(join(ext_dir, 'meta.json')))
return agent, agent_vocab, abstractor, net_args
def configure_training(opt, lr, clip_grad, lr_decay, batch_size,
gamma, reward, stop_coeff, stop_reward):
assert opt in ['adam']
opt_kwargs = {}
opt_kwargs['lr'] = lr
train_params = {}
train_params['optimizer'] = (opt, opt_kwargs)
train_params['clip_grad_norm'] = clip_grad
train_params['batch_size'] = batch_size
train_params['lr_decay'] = lr_decay
train_params['gamma'] = gamma
train_params['reward'] = reward
train_params['stop_coeff'] = stop_coeff
train_params['stop_reward'] = stop_reward
return train_params
def build_batchers(batch_size, cross_rev_bucket=None):
def coll(batch):
art_batch, abs_batch = unzip(batch)
art_sents = list(filter(bool, map(tokenize(None), art_batch)))
abs_sents = list(filter(bool, map(tokenize(None), abs_batch)))
return art_sents, abs_sents
loader = DataLoader(
RLDataset('train', cross_rev_bucket=cross_rev_bucket), batch_size=batch_size,
shuffle=True, num_workers=4,
collate_fn=coll
)
val_loader = DataLoader(
RLDataset('val'), batch_size=batch_size,
shuffle=False, num_workers=4,
collate_fn=coll
)
return cycle(loader), val_loader
def train(args):
if not exists(args.path):
os.makedirs(args.path)
# make net
agent, agent_vocab, abstractor, net_args = configure_net(
args.abs_dir, args.ext_dir, args.cuda)
# configure training setting
assert args.stop > 0
train_params = configure_training(
'adam', args.lr, args.clip, args.decay, args.batch,
args.gamma, args.reward, args.stop, 'rouge-1'
)
train_batcher, val_batcher = build_batchers(args.batch, cross_rev_bucket=args.cross_rev_bucket)
reward_fn = compute_rouge_l
stop_reward_fn = compute_rouge_n(n=1)
# # reward_fn = compute_rouge_l
# reward_fn = compute_bertscore
# reward_fn.metric = datasets.load_metric('bertscore')
# stop_reward_fn = reward_fn
# save abstractor binary
if args.abs_dir is not None:
abs_ckpt = {}
abs_ckpt['state_dict'] = load_best_ckpt(args.abs_dir)
abs_vocab = pkl.load(open(join(args.abs_dir, 'vocab.pkl'), 'rb'))
abs_dir = join(args.path, 'abstractor')
os.makedirs(join(abs_dir, 'ckpt'))
with open(join(abs_dir, 'meta.json'), 'w') as f:
json.dump(net_args['abstractor'], f, indent=4)
torch.save(abs_ckpt, join(abs_dir, 'ckpt/ckpt-0-0'))
with open(join(abs_dir, 'vocab.pkl'), 'wb') as f:
pkl.dump(abs_vocab, f)
# save configuration
meta = {}
meta['net'] = 'rnn-ext_abs_rl'
meta['net_args'] = net_args
meta['train_params'] = train_params
with open(join(args.path, 'meta.json'), 'w') as f:
json.dump(meta, f, indent=4)
with open(join(args.path, 'agent_vocab.pkl'), 'wb') as f:
pkl.dump(agent_vocab, f)
# prepare trainer
grad_fn = get_grad_fn(agent, args.clip)
optimizer = optim.Adam(agent.parameters(), **train_params['optimizer'][1])
scheduler = ReduceLROnPlateau(optimizer, 'max', verbose=True,
factor=args.decay, min_lr=0,
patience=args.lr_p)
pipeline = A2CPipeline(meta['net'], agent, abstractor,
train_batcher, val_batcher,
optimizer, grad_fn,
reward_fn, args.gamma,
stop_reward_fn, args.stop)
trainer = BasicTrainer(pipeline, args.path,
args.ckpt_freq, args.patience, scheduler,
val_mode='score')
print('start training with the following hyper-parameters:')
print(meta)
trainer.train()
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='program to demo a Seq2Seq model'
)
parser.add_argument('--path', required=True, help='root of the model')
parser.add_argument('--cross-rev-bucket', default=None,
help='cross review bucket id if training agent to get difficulty scores for summarization')
# model options
parser.add_argument('--abs_dir', action='store',
help='pretrained summarizer model root path')
parser.add_argument('--ext_dir', action='store',
help='root of the extractor model')
parser.add_argument('--ckpt', type=int, action='store', default=None,
help='ckeckpoint used decode')
# training options
parser.add_argument('--reward', action='store', default='rouge-l',
help='reward function for RL')
parser.add_argument('--lr', type=float, action='store', default=1e-4,
help='learning rate')
parser.add_argument('--decay', type=float, action='store', default=0.5,
help='learning rate decay ratio')
parser.add_argument('--lr_p', type=int, action='store', default=0,
help='patience for learning rate decay')
parser.add_argument('--gamma', type=float, action='store', default=0.95,
help='discount factor of RL')
parser.add_argument('--stop', type=float, action='store', default=1.0,
help='stop coefficient for rouge-1')
parser.add_argument('--clip', type=float, action='store', default=2.0,
help='gradient clipping')
parser.add_argument('--batch', type=int, action='store', default=32,
help='the training batch size')
parser.add_argument(
'--ckpt_freq', type=int, action='store', default=1000,
help='number of update steps for checkpoint and validation'
)
parser.add_argument('--patience', type=int, action='store', default=3,
help='patience for early stopping')
parser.add_argument('--no-cuda', action='store_true',
help='disable GPU training')
args = parser.parse_args()
args.cuda = torch.cuda.is_available() and not args.no_cuda
train(args)
| [
"torch.cuda.is_available",
"torch.optim.lr_scheduler.ReduceLROnPlateau"
] | 1.7.0 | aniket03/fast_abs_rl | 8e97d2a24ea8c867c7a0ed181265db325ee0da1b |
1.2 | import torch.nn.functional as F
import torch
import os
import shutil
def save_checkpoint(state, save):
if not os.path.exists(save):
os.makedirs(save)
filename = os.path.join(save, 'checkpoint.path.tar')
torch.save(state, filename)
class KeyValueMemory(object):
"""
kv is a dictionary, for each item, the key is the support vector, and the
corresponding value is its one-hot label.
"""
def __init__(self, x, x_labels):
super().__init__()
x_one_hot_labels = F.one_hot(x_labels)
self.kv = dict()
for i in range(len(x)):
self.kv[x[i, :]] = x_one_hot_labels[i, :]
def mem_size(self):
return len(self.kv)
def softabs(alpha):
""" The sharpening function used in Nat Comm """
beta = 10
sa = 1 / torch.exp(-(beta * (alpha - 0.5))) + 1 / torch.exp(-(beta * (-alpha - 0.5)))
return sa
def sim_comp(kv, batch_features):
"""
Input:
- kv: a dictionary, see KeyValueMemory.
- batch_features: a matrix, which is of size [batch * m, d].
"""
ks = []
vs = []
for k, v in kv.items():
ks.append(k)
vs.append(v)
ks = torch.stack(ks).cuda() # a matrix, which is of size [mn, d]
vs = torch.stack(vs).float().cuda() # a matrix, which is of size [mn, m]
# Cosine Similarity
inner_product = torch.matmul(batch_features, ks.t()) # [batch * m, mn]
ks_norm = torch.norm(ks, dim=1).unsqueeze(0) # ks: [mn, d], ks_norm: [1, mn]
feature_norm = torch.norm(batch_features, dim=1).unsqueeze(1) # [batch * m, 1]
norm_product = ks_norm * feature_norm # [batch * m, mn]
K = torch.squeeze(inner_product / (norm_product + 1e-8))
# Calculating softabs
K_exp = softabs(K)
w = K_exp / torch.sum(K_exp, 1, keepdim=True) # [batch * m, mn]
# normalization
w = (w - w.mean([0, 1], keepdim=True)) / w.std([0, 1], keepdim=True)
ws = torch.matmul(w, vs) # [batch * m, m]
return ws
| [
"torch.nn.functional.one_hot",
"torch.stack",
"torch.norm",
"torch.save",
"torch.squeeze",
"torch.matmul",
"torch.exp",
"torch.sum"
] | 1.2.0 | RuiLin0212/BATMANN | 5c5cc3334090fc0442bfd2ffdd41bdcab88cbea2 |
1.5 | #
# Author: [email protected]
# Date: 04/25/2019
#
""" Winograd Schema Challenge model for common sense reasoning
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from concurrent.futures import ThreadPoolExecutor
import csv
import os
import json
import random
import time
from tqdm import tqdm, trange
import numpy as np
import torch
import torch.nn as nn
from bert.modeling import *
from module import *
import utils
logger=utils.get_logger()
import pdb
from collections.abc import Mapping
from copy import copy
class HNNEncoder(NNModule):
"""HNN model
"""
def __init__(self, config):
super(HNNEncoder, self).__init__(config)
self.embeddings = BertEmbeddings(config)
self.encoder = BertEncoder(config)
self.apply(self.init_weights)
self.config = config
def forward(self, input_ids, token_type_ids=None, input_mask=None, output_all_encoded_layers=True, position_ids = None, return_att = False):
if input_mask is None:
input_mask = torch.ones_like(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
extended_input_mask = input_mask.unsqueeze(1).unsqueeze(2)
embedding_output = self.embeddings(input_ids.to(torch.long), token_type_ids.to(torch.long), position_ids, input_mask)
encoded_layers = self.encoder(embedding_output,
extended_input_mask,
output_all_encoded_layers=output_all_encoded_layers, return_att=return_att)
if return_att:
encoded_layers, att_matrixs = encoded_layers
if not output_all_encoded_layers:
encoded_layers = encoded_layers[-1:]
if return_att:
return (encoded_layers, att_matrixs)
return encoded_layers
class Cosine(torch.nn.Module):
def __init__(self, config):
super().__init__()
def forward(self, src, tgt):
src = src.float()
tgt = tgt.float()
return (torch.matmul(src, tgt.transpose(2,1))/(src.norm(p=2, dim=-1,keepdim=True)*tgt.norm(p=2, dim=-1, keepdim=True)+1e-9)).squeeze()
class BiLinearSim(torch.nn.Module):
def __init__(self, config):
super().__init__()
self.linear = torch.nn.Linear(config.hidden_size, config.hidden_size, bias=False)
def forward(self, src, tgt):
src_ = self.linear(src)
output = torch.matmul(src_, tgt.transpose(2,1))
return output
def binary_loss(x, labels, alpha=10, beta=0.5, gama=1):
pos = ((x)*labels).sum(-1)
neg = ((x + beta)*(1-labels)).sum(-1)
loss = (-torch.nn.functional.logsigmoid(gama*pos)-torch.nn.functional.logsigmoid(-neg*gama))/2
return loss
def binary_rank_loss(x, labels, alpha=10, beta=0.5, gama=1):
logp = torch.nn.functional.logsigmoid(x)
log1_p = -x + logp
prob = torch.exp(logp)
# suppose we only have two candidates
pos_idx = labels.nonzero()[:,1].view(x.size(0), 1)
neg_idx = 1 - pos_idx
pos = torch.gather(prob, dim=-1, index=pos_idx.long()).squeeze(-1)
neg = torch.gather(prob, dim=-1, index=neg_idx.long()).squeeze(-1)
loss = -(labels*logp).sum(-1) + alpha*torch.max(torch.zeros_like(pos), neg-pos+beta)
return loss
def rank_loss(x, labels, alpha=10, beta=0.5, gama=1):
p = x
logp = (torch.log(p)*labels).sum(-1)
pos = (p*labels).sum(-1)
neg = (p*(1-labels)).sum(-1)
delta = beta*(1-labels)+x
loss = -(torch.nn.LogSoftmax(-1)(gama*delta)*labels).sum(-1)
return loss
_Similarity={'cos': Cosine, 'bilinear':BiLinearSim}
_loss={'binary': binary_loss}
class SSMatcher(torch.nn.Module):
""" Semantic similarity matcher
"""
def __init__(self, config, alpha = 5, beta = 0.1, gama = 1, similarity='cos', \
loss_type='binary', pooling='cap'):
super().__init__()
self.alpha = alpha
self.beta = beta
self.gama = gama
self.config = config
self.sim = similarity
self.similarity = _Similarity[similarity](config)
assert pooling in ['mean', 'cap', 'ftp'], 'Only cap, mean, ftp are supported pooling methods.'
self.pooling = pooling
if pooling=='cap':
self.query = torch.nn.Linear(config.hidden_size, config.hidden_size, bias=False)
self.dropout = torch.nn.Dropout(config.hidden_dropout_prob)
self.loss_fn = _loss[loss_type]
def forward(self, bert, input_ids, labels=None, return_att=False):
#if str(input_ids.device)=='cuda:0':
# pdb.set_trace()
# expanded candidates
batch_size = input_ids.size(0)
seq_len = input_ids.size(-1)
input_ids = input_ids.view(-1, *input_ids.size()[2:])
token_ids, mask_ids, type_ids, candidate_masks, pronoun_mask = [x.squeeze(1).contiguous() for x in input_ids.split(1, dim=1)]
encoder_layers = bert(token_ids, type_ids, mask_ids, output_all_encoded_layers=True, return_att=return_att)
if return_att:
encoder_layers, att_matrixs = encoder_layers
ctx_layer = encoder_layers[-1]
pronoun_encoding = self.calc_pronoun_encoding(ctx_layer, pronoun_mask)
# [bxc]x1xd
if self.pooling == 'mean':
pooling_fn = self.calc_candidate_encoding_mean
elif self.pooling == 'ftp':
pooling_fn = self.calc_candidate_encoding_ftp
else:
pooling_fn = self.calc_candidate_encoding_cap
candidate_encoding,att_score = pooling_fn(ctx_layer, pronoun_encoding, candidate_masks)
if return_att:
att_matrixs.append(att_score)
sim_score = self.similarity(candidate_encoding, pronoun_encoding).view(batch_size, -1)
cands_id = (candidate_masks.sum(dim=-1)>0).to(sim_score).view(batch_size, -1)
logits = sim_score + -10000*(1-cands_id)
pred_probs = torch.sigmoid(logits)
loss = torch.zeros(1).to(logits)
if self.training:
assert labels is not None
# b x n
labels = labels.view(batch_size, -1)
x = logits
# b x c x d
cand_ebd = candidate_encoding.view(batch_size, -1, candidate_encoding.size(-1))
# bx1
loss = self.loss_fn(x, labels, self.alpha, self.beta, self.gama)
loss = loss.mean()
if torch.isnan(loss) or torch.isinf(loss):
pdb.set_trace()
if self.sim=='cos':
logits = (logits+1)/2
else:
logits = pred_probs
if return_att:
return logits, loss, att_matrixs
return (logits, loss)
def calc_pronoun_encoding(self, context_layer, pronoun_mask):
ctx = context_layer[:,0,:].unsqueeze(1)
query = ctx
att = torch.matmul(query, context_layer.transpose(2,1))/math.sqrt(query.size(-1))
att_score = XSoftmax.apply(att, pronoun_mask.unsqueeze(1), -1)
pronoun_ebd = torch.matmul(att_score, context_layer)
return pronoun_ebd
# wwm 75.4
# CAP
def calc_candidate_encoding_cap(self, context_layer, pronoun_encoding, candidate_masks):
#bx1xd
ctx = context_layer[:,0,:].unsqueeze(1)
#if str(context_layer.device)=='cuda:1':
# pdb.set_trace()
query = self.query(ctx)
att = torch.matmul(query, context_layer.transpose(2,1))/math.sqrt(query.size(-1))
att_score = XSoftmax.apply(att, candidate_masks.unsqueeze(1), -1)
cand_ebd = torch.matmul(att_score, context_layer)
return cand_ebd, att_score
# Mean pooling
def calc_candidate_encoding_mean(self, context_layer, pronoun_encoding, candidate_masks):
#bx1xd
ctx = context_layer[:,0,:].unsqueeze(1)
query = torch.zeros_like(ctx)
att = torch.matmul(query, context_layer.transpose(2,1))/math.sqrt(query.size(-1))
att_score = XSoftmax.apply(att, candidate_masks.unsqueeze(1), -1)
cand_ebd = torch.matmul(att_score, context_layer)
return cand_ebd, att_score
# FTP First token pooling
def calc_candidate_encoding_ftp(self, context_layer, pronoun_encoding, candidate_masks):
#bx1xd
ctx = context_layer[:,0,:].unsqueeze(1)
idx = torch.arange(candidate_masks.size(-1),0,-1).unsqueeze(0)\
.expand(candidate_masks.size()).to(candidate_masks)
idx = idx*candidate_masks
_, first_idx = torch.max(idx, dim=1, keepdim=True)
first_idx = first_idx.unsqueeze(-1).expand([context_layer.size(0), 1, context_layer.size(-1)])
cand_ebd = torch.gather(context_layer, dim=1, index=first_idx)
return cand_ebd, None
def lm_loss(loglogits, labels, alpha, beta, gama):
selected = (loglogits*labels).sum(-1)
unselected = (loglogits*(1-labels)).sum(-1)
delta = beta*(1-labels) + torch.exp(loglogits)
delta = -(torch.nn.LogSoftmax(-1)(gama*delta)*labels).sum(-1)
loss = -selected + alpha*delta
return loss
class LMMatcher(torch.nn.Module):
""" Langumage model matcher
"""
def __init__(self, bert, config, alpha = 5, beta = 0.1, gama = 1):
super().__init__()
self.alpha = alpha
self.beta = beta
self.gama = gama
self.lm_predictions = BertLMPredictionHead(config, bert.embeddings.word_embeddings.weight)
def forward(self, bert, input_ids, labels=None):
# expanded candidates
is_expanded = False
batch_size = input_ids.size(0)
if input_ids.size(1)>2:
is_expanded = True
input_ids = input_ids.view(-1, *input_ids.size()[2:])
input_ids,attention_mask,token_type_ids,input_labels,_ = [x.squeeze(1).contiguous() for x in input_ids.split(1, dim=1)]
if is_expanded:
# bc x 1
valid_instances = input_labels.sum(-1)>0
valid_index = valid_instances.nonzero()
valid_index_ex = valid_index.expand(valid_index.size(0), input_ids.size(1))
input_ids = torch.gather(input_ids, dim=0, index=valid_index_ex)
input_labels = torch.gather(input_labels, dim=0, index=valid_index_ex)
attention_mask = torch.gather(attention_mask, dim=0, index=valid_index_ex)
token_type_ids = torch.gather(token_type_ids, dim=0, index=valid_index_ex)
encoder_layers = bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=True)
ebd_weight = bert.embeddings.word_embeddings.weight
ctx_layer = encoder_layers[-1]
#bc x s x V
mask_logits = self.lm_predictions(ctx_layer, ebd_weight).float()
log_logits = nn.LogSoftmax(-1)(mask_logits)
#bc x s
label_logits = torch.gather(log_logits, dim=-1, index=input_labels.unsqueeze(-1).long()).squeeze(-1)
label_mask = (input_labels != 0)
#bc
pred_logits = label_logits.masked_fill_(input_labels==0, 0).sum(-1)/label_mask.to(label_logits).sum(-1)
if is_expanded:
logits_ex = torch.zeros_like(valid_instances.to(pred_logits)).fill_(-1000.0)
#if str(input_ids.device)=='cuda:0':
# pdb.set_trace()
logits_ex.scatter_(dim=0, index=valid_index.squeeze(-1), src=pred_logits)
pred_logits = logits_ex
#b x c
pred_logits = pred_logits.view(batch_size, -1)
loss = torch.zeros(1).to(pred_logits)
if self.training:
assert labels is not None
# all candidates are 2 or zeros
# b x 1
labels = labels.view(batch_size, -1)
loss = lm_loss(pred_logits, labels, self.alpha, self.beta, self.gama)
loss = loss.mean()
if torch.isnan(loss) or torch.isinf(loss):
pdb.set_trace()
return (pred_logits.exp(), loss)
class HNNClassifer(NNModule):
""" HNN model
"""
def __init__(self, config, drop_out=None, alpha = [10,10], beta = [0.5,0.5], gama = [1,1], similarity='cos', loss_type='binary', pooling='cap'):
super().__init__(config)
self.bert = HNNEncoder(config)
self.sm_matcher = SSMatcher(config, alpha[0], beta[0], gama[0], similarity, loss_type, pooling)
lm_idx = 1 if len(alpha)>1 else 0
self.lm_matcher = LMMatcher(self.bert, config, alpha[lm_idx], beta[lm_idx], gama[lm_idx])
self.theta = torch.nn.Parameter(torch.tensor([1,1], dtype=torch.float))
self.config = config
self.loss_fn = _loss[loss_type]
self.alpha = alpha
self.beta = beta
self.gama = gama
self.apply(self.init_weights)
def forward(self, input_ids, tids, labels=None, return_att=False, group_tasks=True):
#if str(input_ids.device)=='cuda:0':
# pdb.set_trace()
# expanded candidates
group_tasks = bool(group_tasks[0]) if isinstance(group_tasks, torch.Tensor) else group_tasks
sm_ids = (tids == 0).nonzero()
lm_ids = (tids == 1).nonzero()
def extract_tasks(task):
if group_tasks:
task = task[:,1].view(input_ids.size(0), -1).unsqueeze(-1).unsqueeze(-1)
task = task.expand((task.size(0), task.size(1), input_ids.size(-2), input_ids.size(-1))).long()
task_inputs = torch.gather(input_ids, dim=1, index=task).contiguous()
return task_inputs, labels
else:
task = task[:,0].view(-1, input_ids.size(1))
input_idx = task.unsqueeze(-1).unsqueeze(-1).\
expand((task.size(0), task.size(1), input_ids.size(-2), input_ids.size(-1))).long()
task_inputs = torch.gather(input_ids, dim=0, index=input_idx).contiguous()
#if str(input_ids.device)=='cuda:0':
# pdb.set_trace()
task_labels = torch.gather(labels, dim=0, index=task).contiguous()
return task_inputs, task_labels
loss = torch.zeros(1).to(input_ids.device).float()
sm_logits = None
if len(sm_ids)>0:
sm_inputs, sm_labels=extract_tasks(sm_ids)
sm_logits, sm_loss = self.sm_matcher(self.bert, sm_inputs, sm_labels)
loss += sm_loss
lm_logits = None
if len(lm_ids)>0:
lm_inputs, lm_labels=extract_tasks(lm_ids)
lm_logits, lm_loss = self.lm_matcher(self.bert, lm_inputs, lm_labels)
loss += lm_loss
en_logits = None
#if sm_logits is not None and lm_logits is not None and group_tasks:
#if sm_logits is not None and lm_logits is not None and group_tasks:
if group_tasks or sm_logits is None or lm_logits is None:
if sm_logits is None:
en_logits = lm_logits
elif lm_logits is None:
en_logits = sm_logits
else:
en_logits = (sm_logits + lm_logits)/2
if self.training:
en_loss = rank_loss(en_logits, labels, self.alpha[-1], self.beta[-1], self.gama[-1])
loss += en_loss.mean()
if self.training:
return None,None,None,loss
else:
return sm_logits, lm_logits, en_logits, loss
| [
"torch.nn.Linear",
"torch.sigmoid",
"torch.nn.Dropout",
"torch.nn.LogSoftmax",
"torch.zeros",
"torch.isnan",
"torch.max",
"torch.gather",
"torch.nn.functional.logsigmoid",
"torch.matmul",
"torch.tensor",
"torch.isinf",
"torch.ones_like",
"torch.zeros_like",
"torch.log",
"torch.exp"
] | 1.5.0 | anlewy/mt-dnn | eeb6f01ce0630e61a52b8c9c6f7537cd34978e45 |
1.0 | import torch
import torchvision
import torch.optim as optim
from torch.optim import lr_scheduler
import torch.nn as nn
from torchsummary import summary
# set device
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# base densenet model
model = torchvision.models.resnet18(pretrained=True)
num_ftrs = model.fc.in_features
# freeze all layers
for param in model.parameters():
param.requires_grad = False
# add final trainable linear layers
model.fc = nn.Linear(num_ftrs, 101) # add final linear layer for 101 classes
# loss function
criterion = nn.CrossEntropyLoss()
# optimizer
optimizer = optim.SGD(model.fc.parameters(), lr=0.001, momentum=0.9)
# learning rate scheduler
lr_rate = lr_scheduler.StepLR(optimizer, step_size=7, gamma=0.1)
# model summary
# summary(model, (3, 299, 299)) | [
"torch.nn.Linear",
"torch.cuda.is_available",
"torch.optim.lr_scheduler.StepLR",
"torch.nn.CrossEntropyLoss"
] | 1.0.0 | oranges0da/food101 | 2a54f9d3da4de4d408183912947f25b016bd6d81 |
1.6 | #
# Simple MLP and CNN models
#
import torch
import torch.nn as nn
from torch.distributions import Categorical
from gym import spaces
#
# Utility functions
#
from rlberry.agents.torch.utils.training import model_factory, activation_factory
def default_twinq_net_fn(env):
"""
Returns a default Twinq network
"""
assert isinstance(env.action_space, spaces.Discrete)
if isinstance(env.observation_space, spaces.Box):
obs_shape = env.observation_space.shape
elif isinstance(env.observation_space, spaces.Tuple):
obs_shape = env.observation_space.spaces[0].shape
else:
raise ValueError(
"Incompatible observation space: {}".format(env.observation_space)
)
# Assume CHW observation space
if len(obs_shape) == 1:
model_config = {
"type": "MultiLayerPerceptron",
"in_size": int(obs_shape[0]) + int(env.action_space.n),
"layer_sizes": [64, 64],
}
else:
raise ValueError(
"Incompatible observation shape: {}".format(env.observation_space.shape)
)
model_config["out_size"] = 1
q1 = model_factory(**model_config)
q2 = model_factory(**model_config)
return (q1, q2)
def default_policy_net_fn(env):
"""
Returns a default value network.
"""
if isinstance(env.observation_space, spaces.Box):
obs_shape = env.observation_space.shape
elif isinstance(env.observation_space, spaces.Tuple):
obs_shape = env.observation_space.spaces[0].shape
else:
raise ValueError(
"Incompatible observation space: {}".format(env.observation_space)
)
if len(obs_shape) == 3:
if obs_shape[0] < obs_shape[1] and obs_shape[0] < obs_shape[1]:
# Assume CHW observation space
model_config = {
"type": "ConvolutionalNetwork",
"is_policy": True,
"in_channels": int(obs_shape[0]),
"in_height": int(obs_shape[1]),
"in_width": int(obs_shape[2]),
}
elif obs_shape[2] < obs_shape[0] and obs_shape[2] < obs_shape[1]:
# Assume WHC observation space
model_config = {
"type": "ConvolutionalNetwork",
"is_policy": True,
"transpose_obs": True,
"in_channels": int(obs_shape[2]),
"in_height": int(obs_shape[1]),
"in_width": int(obs_shape[0]),
}
elif len(obs_shape) == 2:
model_config = {
"type": "ConvolutionalNetwork",
"is_policy": True,
"in_channels": int(1),
"in_height": int(obs_shape[0]),
"in_width": int(obs_shape[1]),
}
elif len(obs_shape) == 1:
model_config = {
"type": "MultiLayerPerceptron",
"in_size": int(obs_shape[0]),
"layer_sizes": [64, 64],
"reshape": False,
"is_policy": True,
}
else:
raise ValueError(
"Incompatible observation shape: {}".format(env.observation_space.shape)
)
if isinstance(env.action_space, spaces.Discrete):
model_config["out_size"] = env.action_space.n
elif isinstance(env.action_space, spaces.Tuple):
model_config["out_size"] = env.action_space.spaces[0].n
return model_factory(**model_config)
def default_value_net_fn(env):
"""
Returns a default value network.
"""
if isinstance(env.observation_space, spaces.Box):
obs_shape = env.observation_space.shape
elif isinstance(env.observation_space, spaces.Tuple):
obs_shape = env.observation_space.spaces[0].shape
else:
raise ValueError(
"Incompatible observation space: {}".format(env.observation_space)
)
# Assume CHW observation space
if len(obs_shape) == 3:
model_config = {
"type": "ConvolutionalNetwork",
"in_channels": int(obs_shape[0]),
"in_height": int(obs_shape[1]),
"in_width": int(obs_shape[2]),
}
elif len(obs_shape) == 2:
model_config = {
"type": "ConvolutionalNetwork",
"in_channels": int(1),
"in_height": int(obs_shape[0]),
"in_width": int(obs_shape[1]),
}
elif len(obs_shape) == 1:
model_config = {
"type": "MultiLayerPerceptron",
"in_size": int(obs_shape[0]),
"layer_sizes": [64, 64],
}
else:
raise ValueError(
"Incompatible observation shape: {}".format(env.observation_space.shape)
)
model_config["out_size"] = 1
return model_factory(**model_config)
class Net(nn.Module):
def __init__(self, obs_size, hidden_size, n_actions):
super(Net, self).__init__()
self.net = nn.Sequential(
nn.Linear(obs_size, hidden_size),
nn.ReLU(),
nn.Linear(hidden_size, n_actions),
)
def forward(self, x):
return self.net(x)
class BaseModule(torch.nn.Module):
"""
Base torch.nn.Module implementing basic features:
- initialization factory
- normalization parameters
"""
def __init__(self, activation_type="RELU", reset_type="XAVIER"):
super().__init__()
self.activation = activation_factory(activation_type)
self.reset_type = reset_type
def _init_weights(self, m):
if hasattr(m, "weight"):
if self.reset_type == "XAVIER":
torch.nn.init.xavier_uniform_(m.weight.data)
elif self.reset_type == "ZEROS":
torch.nn.init.constant_(m.weight.data, 0.0)
else:
raise ValueError("Unknown reset type")
if hasattr(m, "bias") and m.bias is not None:
torch.nn.init.constant_(m.bias.data, 0.0)
def reset(self):
self.apply(self._init_weights)
class Table(torch.nn.Module):
"""Torch module for a policy for discrete state-action spaces.
Parameters
----------
state_size: int
Number of states
action_size: int
Number of actions
"""
def __init__(self, state_size, action_size):
super().__init__()
self.policy = nn.Embedding.from_pretrained(
torch.zeros(state_size, action_size), freeze=False
)
self.softmax = nn.Softmax(dim=-1)
def forward(self, x):
action_probs = self.softmax(self.action_scores(x))
return Categorical(action_probs)
def action_scores(self, x):
return self.policy(x.long())
class MultiLayerPerceptron(BaseModule):
"""Torch module for an MLP.
Parameters
----------
in_size: int
Input size
layer_sizes: Sequence[int]
Dimensions of each hidden layer.
reshape: bool, default = True
If True, input tensors are reshaped to (batch_size, dim)
out_size: int, optional
Output size. If None, the output size is given by the last
element of layer_sizes.
activation: {"RELU", "TANH", "ELU"}
Activation function.
is_policy: bool, default=False
If true, the :meth:`forward` method returns a categorical
distribution corresponding to the softmax of the output.
"""
def __init__(
self,
in_size=None,
layer_sizes=None,
reshape=True,
out_size=None,
activation="RELU",
is_policy=False,
**kwargs
):
super().__init__(**kwargs)
self.reshape = reshape
self.layer_sizes = layer_sizes or [64, 64]
self.layer_sizes = list(self.layer_sizes)
self.out_size = out_size
self.activation = activation_factory(activation)
self.is_policy = is_policy
self.softmax = nn.Softmax(dim=-1)
sizes = [in_size] + self.layer_sizes
layers_list = [nn.Linear(sizes[i], sizes[i + 1]) for i in range(len(sizes) - 1)]
self.layers = nn.ModuleList(layers_list)
if out_size:
self.predict = nn.Linear(sizes[-1], out_size)
def forward(self, x):
if self.reshape:
x = x.reshape(x.shape[0], -1) # We expect a batch of vectors
for layer in self.layers:
x = self.activation(layer(x.float()))
if self.out_size:
x = self.predict(x)
if self.is_policy:
action_probs = self.softmax(x)
dist = Categorical(action_probs)
return dist
return x
def action_scores(self, x):
if self.is_policy:
if self.reshape:
x = x.reshape(x.shape[0], -1) # We expect a batch of vectors
for layer in self.layers:
x = self.activation(layer(x.float()))
if self.out_size:
action_scores = self.predict(x)
return action_scores
class DuelingNetwork(BaseModule):
"""Torch module for a DQN dueling network based on a MultiLayerPerceptron.
Parameters
-----------
in_size: int
Input size
base_module_kwargs: dict
Parameters for :func:`~rlberry.agents.torch.utils.training.model_factory`
to build shared (MLP) architecture for the advantage and value nets.
value_kwargs: dict
Parameters for :func:`~rlberry.agents.torch.utils.training.model_factory`
to build value network (MLP).
advantage_kwargs: dict
Parameters for :func:`~rlberry.agents.torch.utils.training.model_factory`
to build advantage network (MLP).
out_size: int
Output size.
"""
def __init__(
self,
in_size=None,
base_module_kwargs=None,
value_kwargs=None,
advantage_kwargs=None,
out_size=None,
):
super().__init__()
self.out_size = out_size
base_module_kwargs = base_module_kwargs or {}
base_module_kwargs["in_size"] = in_size
self.base_module = model_factory(**base_module_kwargs)
value_kwargs = value_kwargs or {}
value_kwargs["in_size"] = self.base_module.layer_sizes[-1]
value_kwargs["out_size"] = 1
self.value = model_factory(**value_kwargs)
advantage_kwargs = advantage_kwargs or {}
advantage_kwargs["in_size"] = self.base_module.layer_sizes[-1]
advantage_kwargs["out_size"] = out_size
self.advantage = model_factory(**advantage_kwargs)
def forward(self, x):
x = self.base_module(x)
value = self.value(x).expand(-1, self.out_size)
advantage = self.advantage(x)
return (
value + advantage - advantage.mean(1).unsqueeze(1).expand(-1, self.out_size)
)
class ConvolutionalNetwork(nn.Module):
"""Torch module for a CNN.
Expects inputs of shape BCHW, where
B = batch size;
C = number of channels;
H = height;
W = width.
Parameters
----------
activation: {"RELU", "TANH", "ELU"}
Activation function.
in_channels: int
Number of input channels C
in_height: int
Input height H
in_width: int
Input width W
head_mlp_kwargs: dict, optional
Parameters to build an MLP
(:class:`~rlberry.agents.torch.utils.models.MultiLayerPerceptron`)
using the factory
:func:`~rlberry.agents.torch.utils.training.model_factory`
"""
def __init__(
self,
activation="RELU",
in_channels=None,
in_height=None,
in_width=None,
head_mlp_kwargs=None,
out_size=None,
is_policy=False,
transpose_obs=False,
**kwargs
):
super().__init__()
self.activation = activation_factory(activation)
self.conv1 = nn.Conv2d(in_channels, 16, kernel_size=2, stride=2)
self.conv2 = nn.Conv2d(16, 32, kernel_size=2, stride=2)
self.conv3 = nn.Conv2d(32, 64, kernel_size=2, stride=2)
# MLP Head
# Number of Linear input connections depends on output of conv2d layers
# and therefore the input image size, so compute it.
def conv2d_size_out(size, kernel_size=2, stride=2):
return (size - (kernel_size - 1) - 1) // stride + 1
convw = conv2d_size_out(conv2d_size_out(conv2d_size_out(in_width)))
convh = conv2d_size_out(conv2d_size_out(conv2d_size_out(in_height)))
assert convh > 0 and convw > 0
self.head_mlp_kwargs = head_mlp_kwargs or {}
self.head_mlp_kwargs["in_size"] = convw * convh * 64
self.head_mlp_kwargs["out_size"] = out_size
self.head_mlp_kwargs["is_policy"] = is_policy
self.head = model_factory(**self.head_mlp_kwargs)
self.is_policy = is_policy
self.transpose_obs = transpose_obs
def convolutions(self, x):
x = x.float()
if len(x.shape) == 3:
x = x.unsqueeze(0)
if self.transpose_obs:
x = torch.transpose(x, -1, -3)
x = self.activation((self.conv1(x)))
x = self.activation((self.conv2(x)))
x = self.activation((self.conv3(x)))
return x
def forward(self, x):
"""
Forward convolutional network
Parameters
----------
x: torch.tensor
Tensor of shape BCHW
"""
return self.head(self.convolutions(x))
def action_scores(self, x):
return self.head.action_scores(self.convolutions(x))
| [
"torch.nn.Linear",
"torch.zeros",
"torch.distributions.Categorical",
"torch.nn.ModuleList",
"torch.nn.Softmax",
"torch.nn.init.constant_",
"torch.nn.init.xavier_uniform_",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.transpose"
] | 1.6.0 | riccardodv/rlberry | 8bb03772cda1e13c57de0e1da7bc7356a3014cfb |
1.1 | # -*- coding: utf-8 -*-
import torch
import random
import inspect
from itertools import islice, repeat
import os
def split_corpus(path, shard_size, default=None):
"""yield a `list` containing `shard_size` line of `path`,
or repeatly generate `default` if `path` is None.
"""
if path is not None:
return _split_corpus(path, shard_size)
else:
return repeat(default)
def _split_corpus(path, shard_size):
"""Yield a `list` containing `shard_size` line of `path`.
"""
with open(path, "rb") as f:
if shard_size <= 0:
yield f.readlines()
else:
while True:
shard = list(islice(f, shard_size))
if not shard:
break
yield shard
def aeq(*args):
"""
Assert all arguments have the same value
"""
arguments = (arg for arg in args)
first = next(arguments)
assert all(arg == first for arg in arguments), \
"Not all arguments have the same value: " + str(args)
def sequence_mask(lengths, max_len=None):
"""
Creates a boolean mask from sequence lengths.
"""
batch_size = lengths.numel()
max_len = max_len or lengths.max()
return (torch.arange(0, max_len, device=lengths.device)
.type_as(lengths)
.repeat(batch_size, 1)
.lt(lengths.unsqueeze(1)))
def tile(x, count, dim=0):
"""
Tiles x on dimension dim count times.
"""
perm = list(range(len(x.size())))
if dim != 0:
perm[0], perm[dim] = perm[dim], perm[0]
x = x.permute(perm).contiguous()
out_size = list(x.size())
out_size[0] *= count
batch = x.size(0)
x = x.view(batch, -1) \
.transpose(0, 1) \
.repeat(count, 1) \
.transpose(0, 1) \
.contiguous() \
.view(*out_size)
if dim != 0:
x = x.permute(perm).contiguous()
return x
def use_gpu(opt):
"""
Creates a boolean if gpu used
"""
return (hasattr(opt, 'gpu_ranks') and len(opt.gpu_ranks) > 0) or \
(hasattr(opt, 'gpu') and opt.gpu > -1)
def set_random_seed(seed, is_cuda):
"""Sets the random seed."""
if seed > 0:
torch.manual_seed(seed)
# this one is needed for torchtext random call (shuffled iterator)
# in multi gpu it ensures datasets are read in the same order
random.seed(seed)
# some cudnn methods can be random even after fixing the seed
# unless you tell it to be deterministic
torch.backends.cudnn.deterministic = True
if is_cuda and seed > 0:
# These ensure same initialization in multi gpu mode
torch.cuda.manual_seed(seed)
def generate_relative_positions_matrix(length, max_relative_positions,
cache=False):
"""Generate the clipped relative positions matrix
for a given length and maximum relative positions"""
if cache:
distance_mat = torch.arange(-length+1, 1, 1).unsqueeze(0)
else:
range_vec = torch.arange(length)
range_mat = range_vec.unsqueeze(-1).expand(-1, length).transpose(0, 1)
distance_mat = range_mat - range_mat.transpose(0, 1)
distance_mat_clipped = torch.clamp(distance_mat,
min=-max_relative_positions,
max=max_relative_positions)
# Shift values to be >= 0
final_mat = distance_mat_clipped + max_relative_positions
return final_mat
def relative_matmul(x, z, transpose):
"""Helper function for relative positions attention."""
batch_size = x.shape[0]
heads = x.shape[1]
length = x.shape[2]
x_t = x.permute(2, 0, 1, 3)
x_t_r = x_t.reshape(length, heads * batch_size, -1)
if transpose:
z_t = z.transpose(1, 2)
x_tz_matmul = torch.matmul(x_t_r, z_t)
else:
x_tz_matmul = torch.matmul(x_t_r, z)
x_tz_matmul_r = x_tz_matmul.reshape(length, batch_size, heads, -1)
x_tz_matmul_r_t = x_tz_matmul_r.permute(1, 2, 0, 3)
return x_tz_matmul_r_t
def fn_args(fun):
"""Returns the list of function arguments name."""
return inspect.getfullargspec(fun).args
def nwise(iterable, n=2):
iterables = tee(iterable, n)
[next(iterables[i]) for i in range(n) for j in range(i)]
return zip(*iterables)
def report_matrix(row_label, column_label, matrix):
header_format = "{:>10.10} " + "{:>10.7} " * len(row_label)
row_format = "{:>10.10} " + "{:>10.7f} " * len(row_label)
output = header_format.format("", *row_label) + '\n'
for word, row in zip(column_label, matrix):
max_index = row.index(max(row))
row_format = row_format.replace(
"{:>10.7f} ", "{:*>10.7f} ", max_index + 1)
row_format = row_format.replace(
"{:*>10.7f} ", "{:>10.7f} ", max_index)
output += row_format.format(word, *row) + '\n'
row_format = "{:>10.10} " + "{:>10.7f} " * len(row_label)
return output
def check_model_config(model_config, root):
# we need to check the model path + any tokenizer path
for model in model_config["models"]:
model_path = os.path.join(root, model)
if not os.path.exists(model_path):
raise FileNotFoundError(
"{} from model {} does not exist".format(
model_path, model_config["id"]))
if "tokenizer" in model_config.keys():
if "params" in model_config["tokenizer"].keys():
for k, v in model_config["tokenizer"]["params"].items():
if k.endswith("path"):
tok_path = os.path.join(root, v)
if not os.path.exists(tok_path):
raise FileNotFoundError(
"{} from model {} does not exist".format(
tok_path, model_config["id"]))
| [
"torch.cuda.manual_seed",
"torch.arange",
"torch.clamp",
"torch.manual_seed",
"torch.matmul"
] | 1.1.0 | sakrnference/data-to-text-hierarchical | da88d2d4491266fccc39ac1cc1fbb56bd7bbc30c |
1.2 | #!/usr/bin/env python3
import typing
from typing import Optional, Tuple, Union, Any, List, Callable, cast
import warnings
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
from torch.nn import Module
from torch.utils.hooks import RemovableHandle
import numpy as np
from .._utils.common import (
_format_input,
_format_baseline,
_format_callable_baseline,
_format_attributions,
_format_tensor_into_tuples,
_format_additional_forward_args,
_run_forward,
_validate_input,
_expand_target,
_expand_additional_forward_args,
_tensorize_baseline,
_call_custom_attribution_func,
_compute_conv_delta_and_format_attrs,
ExpansionTypes,
)
from .._utils.attribution import GradientAttribution
from .._utils.gradient import apply_gradient_requirements, undo_gradient_requirements
from .._utils.typing import TensorOrTupleOfTensors
# Check if module backward hook can safely be used for the module that produced
# this inputs / outputs mapping
def _check_valid_module(inputs_grad_fn, outputs) -> bool:
def is_output_cloned(output_fn, input_grad_fn) -> bool:
"""
Checks if the output has been cloned. This happens especially in case of
layer deeplift.
"""
return (
output_fn[0].next_functions is not None
and output_fn[0].next_functions[0][0] == input_grad_fn
)
curr_fn = outputs.grad_fn
first_next = curr_fn.next_functions[0]
try:
# if `inputs` in the input to the network then the grad_fn is None and
# for that input backward_hook isn't computed. That's the reason why we
# need to check on `inputs_grad_fns[first_next[1]]` being None.
return (
inputs_grad_fn is None
or first_next[0] == inputs_grad_fn
or is_output_cloned(first_next, inputs_grad_fn)
)
except IndexError:
return False
class DeepLift(GradientAttribution):
def __init__(self, model: Module) -> None:
r"""
Args:
model (nn.Module): The reference to PyTorch model instance.
"""
GradientAttribution.__init__(self, model)
self.model = model
self.forward_handles: List[RemovableHandle] = []
self.backward_handles: List[RemovableHandle] = []
@typing.overload
def attribute(
self,
inputs: TensorOrTupleOfTensors,
baselines: Union[
Tensor, int, float, Tuple[Union[Tensor, int, float], ...]
] = None,
target: Optional[
Union[int, Tuple[int, ...], Tensor, List[Tuple[int, ...]]]
] = None,
additional_forward_args: Any = None,
custom_attribution_func: Callable[..., Tuple[Tensor, ...]] = None,
) -> TensorOrTupleOfTensors:
...
@typing.overload
def attribute(
self,
inputs: TensorOrTupleOfTensors,
baselines: Optional[
Union[Tensor, int, float, Tuple[Union[Tensor, int, float], ...]]
] = None,
target: Optional[
Union[int, Tuple[int, ...], Tensor, List[Tuple[int, ...]]]
] = None,
additional_forward_args: Any = None,
return_convergence_delta: bool = False,
custom_attribution_func: Callable[..., Tuple[Tensor, ...]] = None,
) -> Union[TensorOrTupleOfTensors, Tuple[TensorOrTupleOfTensors, Tensor]]:
...
def attribute(
self,
inputs,
baselines=None,
target=None,
additional_forward_args=None,
return_convergence_delta=False,
custom_attribution_func=None,
):
r""""
Implements DeepLIFT algorithm based on the following paper:
Learning Important Features Through Propagating Activation Differences,
Avanti Shrikumar, et. al.
https://arxiv.org/abs/1704.02685
and the gradient formulation proposed in:
Towards better understanding of gradient-based attribution methods for
deep neural networks, Marco Ancona, et.al.
https://openreview.net/pdf?id=Sy21R9JAW
This implementation supports only Rescale rule. RevealCancel rule will
be supported in later releases.
In addition to that, in order to keep the implementation cleaner, DeepLIFT
for internal neurons and layers extends current implementation and is
implemented separately in LayerDeepLift and NeuronDeepLift.
Although DeepLIFT's(Rescale Rule) attribution quality is comparable with
Integrated Gradients, it runs significantly faster than Integrated
Gradients and is preferred for large datasets.
Currently we only support a limited number of non-linear activations
but the plan is to expand the list in the future.
Note: As we know, currently we cannot access the building blocks,
of PyTorch's built-in LSTM, RNNs and GRUs such as Tanh and Sigmoid.
Nonetheless, it is possible to build custom LSTMs, RNNS and GRUs
with performance similar to built-in ones using TorchScript.
More details on how to build custom RNNs can be found here:
https://pytorch.org/blog/optimizing-cuda-rnn-with-torchscript/
Args:
inputs (tensor or tuple of tensors): Input for which
attributions are computed. If forward_func takes a single
tensor as input, a single input tensor should be provided.
If forward_func takes multiple tensors as input, a tuple
of the input tensors should be provided. It is assumed
that for all given input tensors, dimension 0 corresponds
to the number of examples (aka batch size), and if
multiple input tensors are provided, the examples must
be aligned appropriately.
baselines (scalar, tensor, tuple of scalars or tensors, optional):
Baselines define reference samples that are compared with
the inputs. In order to assign attribution scores DeepLift
computes the differences between the inputs/outputs and
corresponding references.
Baselines can be provided as:
- a single tensor, if inputs is a single tensor, with
exactly the same dimensions as inputs or the first
dimension is one and the remaining dimensions match
with inputs.
- a single scalar, if inputs is a single tensor, which will
be broadcasted for each input value in input tensor.
- a tuple of tensors or scalars, the baseline corresponding
to each tensor in the inputs' tuple can be:
- either a tensor with matching dimensions to
corresponding tensor in the inputs' tuple
or the first dimension is one and the remaining
dimensions match with the corresponding
input tensor.
- or a scalar, corresponding to a tensor in the
inputs' tuple. This scalar value is broadcasted
for corresponding input tensor.
In the cases when `baselines` is not provided, we internally
use zero scalar corresponding to each input tensor.
Default: None
target (int, tuple, tensor or list, optional): Output indices for
which gradients are computed (for classification cases,
this is usually the target class).
If the network returns a scalar value per example,
no target index is necessary.
For general 2D outputs, targets can be either:
- a single integer or a tensor containing a single
integer, which is applied to all input examples
- a list of integers or a 1D tensor, with length matching
the number of examples in inputs (dim 0). Each integer
is applied as the target for the corresponding example.
For outputs with > 2 dimensions, targets can be either:
- A single tuple, which contains #output_dims - 1
elements. This target index is applied to all examples.
- A list of tuples with length equal to the number of
examples in inputs (dim 0), and each tuple containing
#output_dims - 1 elements. Each tuple is applied as the
target for the corresponding example.
Default: None
additional_forward_args (any, optional): If the forward function
requires additional arguments other than the inputs for
which attributions should not be computed, this argument
can be provided. It must be either a single additional
argument of a Tensor or arbitrary (non-tuple) type or a tuple
containing multiple additional arguments including tensors
or any arbitrary python types. These arguments are provided to
forward_func in order, following the arguments in inputs.
Note that attributions are not computed with respect
to these arguments.
Default: None
return_convergence_delta (bool, optional): Indicates whether to return
convergence delta or not. If `return_convergence_delta`
is set to True convergence delta will be returned in
a tuple following attributions.
Default: False
custom_attribution_func (callable, optional): A custom function for
computing final attribution scores. This function can take
at least one and at most three arguments with the
following signature:
- custom_attribution_func(multipliers)
- custom_attribution_func(multipliers, inputs)
- custom_attribution_func(multipliers, inputs, baselines)
In case this function is not provided, we use the default
logic defined as: multipliers * (inputs - baselines)
It is assumed that all input arguments, `multipliers`,
`inputs` and `baselines` are provided in tuples of same
length. `custom_attribution_func` returns a tuple of
attribution tensors that have the same length as the
`inputs`.
Default: None
Returns:
**attributions** or 2-element tuple of **attributions**, **delta**:
- **attributions** (*tensor* or tuple of *tensors*):
Attribution score computed based on DeepLift rescale rule with respect
to each input feature. Attributions will always be
the same size as the provided inputs, with each value
providing the attribution of the corresponding input index.
If a single tensor is provided as inputs, a single tensor is
returned. If a tuple is provided for inputs, a tuple of
corresponding sized tensors is returned.
- **delta** (*tensor*, returned if return_convergence_delta=True):
This is computed using the property that
the total sum of forward_func(inputs) - forward_func(baselines)
must equal the total sum of the attributions computed
based on DeepLift's rescale rule.
Delta is calculated per example, meaning that the number of
elements in returned delta tensor is equal to the number of
of examples in input.
Note that the logic described for deltas is guaranteed when the
default logic for attribution computations is used, meaning that the
`custom_attribution_func=None`, otherwise it is not guaranteed and
depends on the specifics of the `custom_attribution_func`.
Examples::
>>> # ImageClassifier takes a single input tensor of images Nx3x32x32,
>>> # and returns an Nx10 tensor of class probabilities.
>>> net = ImageClassifier()
>>> dl = DeepLift(net)
>>> input = torch.randn(2, 3, 32, 32, requires_grad=True)
>>> # Computes deeplift attribution scores for class 3.
>>> attribution = dl.attribute(input, target=3)
"""
# Keeps track whether original input is a tuple or not before
# converting it into a tuple.
is_inputs_tuple = isinstance(inputs, tuple)
inputs = _format_input(inputs)
baselines = _format_baseline(baselines, inputs)
gradient_mask = apply_gradient_requirements(inputs)
_validate_input(inputs, baselines)
# set hooks for baselines
warnings.warn(
"""Setting forward, backward hooks and attributes on non-linear
activations. The hooks and attributes will be removed
after the attribution is finished"""
)
baselines = _tensorize_baseline(inputs, baselines)
main_model_pre_hook = self._pre_hook_main_model()
self.model.apply(self._register_hooks)
additional_forward_args = _format_additional_forward_args(
additional_forward_args
)
input_base_additional_args = _expand_additional_forward_args(
additional_forward_args, 2, ExpansionTypes.repeat
)
expanded_target = _expand_target(
target, 2, expansion_type=ExpansionTypes.repeat
)
wrapped_forward_func = self._construct_forward_func(
self.model, (inputs, baselines), expanded_target, input_base_additional_args
)
gradients = self.gradient_func(wrapped_forward_func, inputs,)
if custom_attribution_func is None:
attributions = tuple(
(input - baseline) * gradient
for input, baseline, gradient in zip(inputs, baselines, gradients)
)
else:
attributions = _call_custom_attribution_func(
custom_attribution_func, gradients, inputs, baselines
)
# remove hooks from all activations
main_model_pre_hook.remove()
self._remove_hooks()
undo_gradient_requirements(inputs, gradient_mask)
return _compute_conv_delta_and_format_attrs(
self,
return_convergence_delta,
attributions,
baselines,
inputs,
additional_forward_args,
target,
is_inputs_tuple,
)
def _construct_forward_func(
self,
forward_func: Callable,
inputs: TensorOrTupleOfTensors,
target: Optional[
Union[int, Tuple[int, ...], Tensor, List[Tuple[int, ...]]]
] = None,
additional_forward_args: Any = None,
) -> Callable:
def forward_fn():
return _run_forward(forward_func, inputs, target, additional_forward_args)
if hasattr(forward_func, "device_ids"):
forward_fn.device_ids = forward_func.device_ids # type: ignore
return forward_fn
def _is_non_linear(self, module: Module) -> bool:
return type(module) in SUPPORTED_NON_LINEAR.keys()
def _forward_pre_hook_ref(
self, module: Module, inputs: Union[Tensor, Tuple[Tensor, ...]]
) -> None:
inputs = _format_tensor_into_tuples(inputs)
module.input_ref = tuple( # type: ignore
input.clone().detach() for input in inputs
)
def _forward_pre_hook(
self, module: Module, inputs: Union[Tensor, Tuple[Tensor, ...]]
) -> None:
"""
For the modules that perform in-place operations such as ReLUs, we cannot
use inputs from forward hooks. This is because in that case inputs
and outputs are the same. We need access the inputs in pre-hooks and
set necessary hooks on inputs there.
"""
inputs = _format_tensor_into_tuples(inputs)
module.input = inputs[0].clone().detach()
module.input_grad_fns = inputs[0].grad_fn # type: ignore
def tensor_backward_hook(grad):
if module.saved_grad is None:
raise RuntimeError(
"""Module {} was detected as not supporting correctly module
backward hook. You should modify your hook to ignore the given
grad_inputs (recompute them by hand if needed) and save the
newly computed grad_inputs in module.saved_grad. See MaxPool1d
as an example.""".format(
module
)
)
return module.saved_grad
# the hook is set by default but it will be used only for
# failure cases and will be removed otherwise
handle = inputs[0].register_hook(tensor_backward_hook)
module.input_hook = handle
def _forward_hook(
self,
module: Module,
inputs: Union[Tensor, Tuple[Tensor, ...]],
outputs: Union[Tensor, Tuple[Tensor, ...]],
) -> None:
r"""
we need forward hook to access and detach the inputs and
outputs of a neuron
"""
outputs = _format_tensor_into_tuples(outputs)
module.output = outputs[0].clone().detach()
if not _check_valid_module(module.input_grad_fns, outputs[0]):
warnings.warn(
"""An invalid module {} is detected. Saved gradients will
be used as the gradients of the module's input tensor.
See MaxPool1d as an example.""".format(
module
)
)
module.is_invalid = True # type: ignore
module.saved_grad = None # type: ignore
self.forward_handles.append(cast(RemovableHandle, module.input_hook))
else:
module.is_invalid = False # type: ignore
# removing the hook if there is no failure case
cast(RemovableHandle, module.input_hook).remove()
del module.input_hook
del module.input_grad_fns
def _backward_hook(
self,
module: Module,
grad_input: Union[Tensor, Tuple[Tensor, ...]],
grad_output: Union[Tensor, Tuple[Tensor, ...]],
eps: float = 1e-10,
):
r"""
`grad_input` is the gradient of the neuron with respect to its input
`grad_output` is the gradient of the neuron with respect to its output
we can override `grad_input` according to chain rule with.
`grad_output` * delta_out / delta_in.
"""
# before accessing the attributes from the module we want
# to ensure that the properties exist, if not, then it is
# likely that the module is being reused.
attr_criteria = self.satisfies_attribute_criteria(module)
if not attr_criteria:
raise RuntimeError(
"A Module {} was detected that does not contain some of "
"the input/output attributes that are required for DeepLift "
"computations. This can occur, for example, if "
"your module is being used more than once in the network."
"Please, ensure that module is being used only once in the "
"network.".format(module)
)
multipliers = tuple(
SUPPORTED_NON_LINEAR[type(module)](
module, module.input, module.output, grad_input, grad_output, eps=eps
)
)
# remove all the properies that we set for the inputs and output
del module.input
del module.output
return multipliers
def satisfies_attribute_criteria(self, module: Module) -> bool:
return hasattr(module, "input") and hasattr(module, "output")
def _can_register_hook(self, module: Module) -> bool:
# TODO find a better way of checking if a module is a container or not
module_fullname = str(type(module))
has_already_hooks = len(module._backward_hooks) > 0 # type: ignore
return not (
"nn.modules.container" in module_fullname
or has_already_hooks
or not self._is_non_linear(module)
)
def _register_hooks(self, module: Module) -> None:
if not self._can_register_hook(module):
return
# adds forward hook to leaf nodes that are non-linear
forward_handle = module.register_forward_hook(self._forward_hook)
pre_forward_handle = module.register_forward_pre_hook(self._forward_pre_hook)
backward_handle = module.register_backward_hook(self._backward_hook)
self.forward_handles.append(forward_handle)
self.forward_handles.append(pre_forward_handle)
self.backward_handles.append(backward_handle)
def _remove_hooks(self) -> None:
for forward_handle in self.forward_handles:
forward_handle.remove()
for backward_handle in self.backward_handles:
backward_handle.remove()
def _pre_hook_main_model(self) -> RemovableHandle:
def pre_hook(module: Module, baseline_inputs_add_args: Tuple) -> Tuple:
inputs = baseline_inputs_add_args[0]
baselines = baseline_inputs_add_args[1]
additional_args = None
if len(baseline_inputs_add_args) > 2:
additional_args = baseline_inputs_add_args[2:]
baseline_input_tsr = tuple(
torch.cat([input, baseline])
for input, baseline in zip(inputs, baselines)
)
if additional_args is not None:
return (*baseline_input_tsr, *additional_args)
return baseline_input_tsr
if isinstance(self.model, nn.DataParallel):
return self.model.module.register_forward_pre_hook(pre_hook) # type: ignore
else:
return self.model.register_forward_pre_hook(pre_hook) # type: ignore
def has_convergence_delta(self) -> bool:
return True
class DeepLiftShap(DeepLift):
def __init__(self, model: Module) -> None:
r"""
Args:
model (nn.Module): The reference to PyTorch model instance.
"""
DeepLift.__init__(self, model)
# There's a mismatch between the signatures of DeepLift.attribute and
# DeepLiftShap.attribute, so we ignore typing here
@typing.overload # type: ignore
def attribute(
self,
inputs: TensorOrTupleOfTensors,
baselines: Union[TensorOrTupleOfTensors, Callable[..., TensorOrTupleOfTensors]],
target: Optional[
Union[int, Tuple[int, ...], Tensor, List[Tuple[int, ...]]]
] = None,
additional_forward_args: Any = None,
custom_attribution_func: Callable[..., Tuple[Tensor, ...]] = None,
) -> TensorOrTupleOfTensors:
...
@typing.overload
def attribute(
self,
inputs: TensorOrTupleOfTensors,
baselines: Union[TensorOrTupleOfTensors, Callable[..., TensorOrTupleOfTensors]],
target: Optional[
Union[int, Tuple[int, ...], Tensor, List[Tuple[int, ...]]]
] = None,
additional_forward_args: Any = None,
return_convergence_delta: bool = False,
custom_attribution_func: Callable[..., Tuple[Tensor, ...]] = None,
) -> Union[TensorOrTupleOfTensors, Tuple[TensorOrTupleOfTensors, Tensor]]:
...
def attribute(
self,
inputs,
baselines,
target=None,
additional_forward_args=None,
return_convergence_delta=False,
custom_attribution_func=None,
):
r"""
Extends DeepLift algorithm and approximates SHAP values using Deeplift.
For each input sample it computes DeepLift attribution with respect to
each baseline and averages resulting attributions.
More details about the algorithm can be found here:
http://papers.nips.cc/paper/7062-a-unified-approach-to-interpreting-model-predictions.pdf
Note that the explanation model:
1. Assumes that input features are independent of one another
2. Is linear, meaning that the explanations are modeled through
the additive composition of feature effects.
Although, it assumes a linear model for each explanation, the overall
model across multiple explanations can be complex and non-linear.
Args:
inputs (tensor or tuple of tensors): Input for which
attributions are computed. If forward_func takes a single
tensor as input, a single input tensor should be provided.
If forward_func takes multiple tensors as input, a tuple
of the input tensors should be provided. It is assumed
that for all given input tensors, dimension 0 corresponds
to the number of examples (aka batch size), and if
multiple input tensors are provided, the examples must
be aligned appropriately.
baselines (tensor, tuple of tensors, callable):
Baselines define reference samples that are compared with
the inputs. In order to assign attribution scores DeepLift
computes the differences between the inputs/outputs and
corresponding references. Baselines can be provided as:
- a single tensor, if inputs is a single tensor, with
the first dimension equal to the number of examples
in the baselines' distribution. The remaining dimensions
must match with input tensor's dimension starting from
the second dimension.
- a tuple of tensors, if inputs is a tuple of tensors,
with the first dimension of any tensor inside the tuple
equal to the number of examples in the baseline's
distribution. The remaining dimensions must match
the dimensions of the corresponding input tensor
starting from the second dimension.
- callable function, optionally takes `inputs` as an
argument and either returns a single tensor
or a tuple of those.
It is recommended that the number of samples in the baselines'
tensors is larger than one.
target (int, tuple, tensor or list, optional): Output indices for
which gradients are computed (for classification cases,
this is usually the target class).
If the network returns a scalar value per example,
no target index is necessary.
For general 2D outputs, targets can be either:
- a single integer or a tensor containing a single
integer, which is applied to all input examples
- a list of integers or a 1D tensor, with length matching
the number of examples in inputs (dim 0). Each integer
is applied as the target for the corresponding example.
For outputs with > 2 dimensions, targets can be either:
- A single tuple, which contains #output_dims - 1
elements. This target index is applied to all examples.
- A list of tuples with length equal to the number of
examples in inputs (dim 0), and each tuple containing
#output_dims - 1 elements. Each tuple is applied as the
target for the corresponding example.
Default: None
additional_forward_args (any, optional): If the forward function
requires additional arguments other than the inputs for
which attributions should not be computed, this argument
can be provided. It must be either a single additional
argument of a Tensor or arbitrary (non-tuple) type or a tuple
containing multiple additional arguments including tensors
or any arbitrary python types. These arguments are provided to
forward_func in order, following the arguments in inputs.
Note that attributions are not computed with respect
to these arguments.
Default: None
return_convergence_delta (bool, optional): Indicates whether to return
convergence delta or not. If `return_convergence_delta`
is set to True convergence delta will be returned in
a tuple following attributions.
Default: False
custom_attribution_func (callable, optional): A custom function for
computing final attribution scores. This function can take
at least one and at most three arguments with the
following signature:
- custom_attribution_func(multipliers)
- custom_attribution_func(multipliers, inputs)
- custom_attribution_func(multipliers, inputs, baselines)
In case this function is not provided we use the default
logic defined as: multipliers * (inputs - baselines)
It is assumed that all input arguments, `multipliers`,
`inputs` and `baselines` are provided in tuples of same
length. `custom_attribution_func` returns a tuple of
attribution tensors that have the same length as the
`inputs`.
Default: None
Returns:
**attributions** or 2-element tuple of **attributions**, **delta**:
- **attributions** (*tensor* or tuple of *tensors*):
Attribution score computed based on DeepLift rescale rule with
respect to each input feature. Attributions will always be
the same size as the provided inputs, with each value
providing the attribution of the corresponding input index.
If a single tensor is provided as inputs, a single tensor is
returned. If a tuple is provided for inputs, a tuple of
corresponding sized tensors is returned.
- **delta** (*tensor*, returned if return_convergence_delta=True):
This is computed using the property that the
total sum of forward_func(inputs) - forward_func(baselines)
must be very close to the total sum of attributions
computed based on approximated SHAP values using
Deeplift's rescale rule.
Delta is calculated for each example input and baseline pair,
meaning that the number of elements in returned delta tensor
is equal to the
`number of examples in input` * `number of examples
in baseline`. The deltas are ordered in the first place by
input example, followed by the baseline.
Note that the logic described for deltas is guaranteed
when the default logic for attribution computations is used,
meaning that the `custom_attribution_func=None`, otherwise
it is not guaranteed and depends on the specifics of the
`custom_attribution_func`.
Examples::
>>> # ImageClassifier takes a single input tensor of images Nx3x32x32,
>>> # and returns an Nx10 tensor of class probabilities.
>>> net = ImageClassifier()
>>> dl = DeepLiftShap(net)
>>> input = torch.randn(2, 3, 32, 32, requires_grad=True)
>>> # Computes shap values using deeplift for class 3.
>>> attribution = dl.attribute(input, target=3)
"""
baselines = _format_callable_baseline(baselines, inputs)
assert isinstance(baselines[0], torch.Tensor) and baselines[0].shape[0] > 1, (
"Baselines distribution has to be provided in form of a torch.Tensor"
" with more than one example but found: {}."
" If baselines are provided in shape of scalars or with a single"
" baseline example, `DeepLift`"
" approach can be used instead.".format(baselines[0])
)
# Keeps track whether original input is a tuple or not before
# converting it into a tuple.
is_inputs_tuple = isinstance(inputs, tuple)
inputs = _format_input(inputs)
# batch sizes
inp_bsz = inputs[0].shape[0]
base_bsz = baselines[0].shape[0]
(
exp_inp,
exp_base,
exp_tgt,
exp_addit_args,
) = self._expand_inputs_baselines_targets(
baselines, inputs, target, additional_forward_args
)
attributions = super().attribute(
exp_inp,
exp_base,
target=exp_tgt,
additional_forward_args=exp_addit_args,
return_convergence_delta=return_convergence_delta,
custom_attribution_func=custom_attribution_func,
)
if return_convergence_delta:
attributions, delta = attributions
attributions = tuple(
self._compute_mean_across_baselines(inp_bsz, base_bsz, attribution)
for attribution in attributions
)
if return_convergence_delta:
return _format_attributions(is_inputs_tuple, attributions), delta
else:
return _format_attributions(is_inputs_tuple, attributions)
def _expand_inputs_baselines_targets(
self,
baselines: Tuple[Tensor, ...],
inputs: Tuple[Tensor, ...],
target: Optional[Union[int, Tuple[int, ...], Tensor, List[Tuple[int, ...]]]],
additional_forward_args: Any,
):
inp_bsz = inputs[0].shape[0]
base_bsz = baselines[0].shape[0]
expanded_inputs = tuple(
[
input.repeat_interleave(base_bsz, dim=0).requires_grad_()
for input in inputs
]
)
expanded_baselines = tuple(
[
baseline.repeat(
(inp_bsz,) + tuple([1] * (len(baseline.shape) - 1))
).requires_grad_()
for baseline in baselines
]
)
expanded_target = _expand_target(
target, base_bsz, expansion_type=ExpansionTypes.repeat_interleave
)
input_additional_args = (
_expand_additional_forward_args(additional_forward_args, base_bsz)
if additional_forward_args is not None
else None
)
return (
expanded_inputs,
expanded_baselines,
expanded_target,
input_additional_args,
)
def _compute_mean_across_baselines(
self, inp_bsz: int, base_bsz: int, attribution: Tensor
) -> Tensor:
# Average for multiple references
attr_shape: Tuple = (inp_bsz, base_bsz)
if len(attribution.shape) > 1:
attr_shape += attribution.shape[1:]
return torch.mean(attribution.view(attr_shape), dim=1, keepdim=False)
def nonlinear(
module: Module,
inputs: Tensor,
outputs: Tensor,
grad_input: Tensor,
grad_output: Tensor,
eps: float = 1e-10,
):
r"""
grad_input: (dLoss / dprev_layer_out, dLoss / wij, dLoss / bij)
grad_output: (dLoss / dlayer_out)
https://github.com/pytorch/pytorch/issues/12331
"""
delta_in, delta_out = _compute_diffs(inputs, outputs)
new_grad_inp = list(grad_input)
# supported non-linear modules take only single tensor as input hence accessing
# only the first element in `grad_input` and `grad_output`
new_grad_inp[0] = torch.where(
abs(delta_in) < eps, new_grad_inp[0], grad_output[0] * delta_out / delta_in,
)
# If the module is invalid, save the newly computed gradients
# The original_grad_input will be overridden later in the Tensor hook
if module.is_invalid:
module.saved_grad = new_grad_inp[0]
return new_grad_inp
def softmax(
module: Module,
inputs: Tensor,
outputs: Tensor,
grad_input: Tensor,
grad_output: Tensor,
eps: float = 1e-10,
):
delta_in, delta_out = _compute_diffs(inputs, outputs)
new_grad_inp = list(grad_input)
grad_input_unnorm = torch.where(
abs(delta_in) < eps, new_grad_inp[0], grad_output[0] * delta_out / delta_in,
)
# normalizing
n = np.prod(grad_input[0].shape)
# updating only the first half
new_grad_inp[0] = grad_input_unnorm - grad_input_unnorm.sum() * 1 / n
return new_grad_inp
def maxpool1d(
module: Module,
inputs: Tensor,
outputs: Tensor,
grad_input: Tensor,
grad_output: Tensor,
eps: float = 1e-10,
):
return maxpool(
module,
F.max_pool1d,
F.max_unpool1d,
inputs,
outputs,
grad_input,
grad_output,
eps=eps,
)
def maxpool2d(
module: Module,
inputs: Tensor,
outputs: Tensor,
grad_input: Tensor,
grad_output: Tensor,
eps: float = 1e-10,
):
return maxpool(
module,
F.max_pool2d,
F.max_unpool2d,
inputs,
outputs,
grad_input,
grad_output,
eps=eps,
)
def maxpool3d(
module: Module, inputs, outputs, grad_input, grad_output, eps: float = 1e-10
):
return maxpool(
module,
F.max_pool3d,
F.max_unpool3d,
inputs,
outputs,
grad_input,
grad_output,
eps=eps,
)
def maxpool(
module: Module,
pool_func: Callable,
unpool_func: Callable,
inputs,
outputs,
grad_input,
grad_output,
eps: float = 1e-10,
):
with torch.no_grad():
input, input_ref = inputs.chunk(2)
output, output_ref = outputs.chunk(2)
delta_in = input - input_ref
delta_in = torch.cat(2 * [delta_in])
# Extracts cross maximum between the outputs of maxpool for the
# actual inputs and its corresponding references. In case the delta outputs
# for the references are larger the method relies on the references and
# corresponding gradients to compute the multiplies and contributions.
delta_out_xmax = torch.max(output, output_ref)
delta_out = torch.cat([delta_out_xmax - output_ref, output - delta_out_xmax])
_, indices = pool_func(
module.input,
module.kernel_size,
module.stride,
module.padding,
module.dilation,
module.ceil_mode,
True,
)
grad_output_updated = grad_output[0]
unpool_grad_out_delta, unpool_grad_out_ref_delta = torch.chunk(
unpool_func(
grad_output_updated * delta_out,
indices,
module.kernel_size,
module.stride,
module.padding,
list(cast(torch.Size, module.input.shape)),
),
2,
)
unpool_grad_out_delta = unpool_grad_out_delta + unpool_grad_out_ref_delta
unpool_grad_out_delta = torch.cat(2 * [unpool_grad_out_delta])
# If the module is invalid, we need to recompute the grad_input
if module.is_invalid:
original_grad_input = grad_input
grad_input = (
unpool_func(
grad_output_updated,
indices,
module.kernel_size,
module.stride,
module.padding,
list(cast(torch.Size, module.input.shape)),
),
)
new_grad_inp = torch.where(
abs(delta_in) < eps, grad_input[0], unpool_grad_out_delta / delta_in
)
# If the module is invalid, save the newly computed gradients
# The original_grad_input will be overridden later in the Tensor hook
if module.is_invalid:
module.saved_grad = new_grad_inp
return original_grad_input
else:
return (new_grad_inp,)
def _compute_diffs(inputs: Tensor, outputs: Tensor) -> Tuple[Tensor, Tensor]:
input, input_ref = inputs.chunk(2)
# if the model is a single non-linear module and we apply Rescale rule on it
# we might not be able to perform chunk-ing because the output of the module is
# usually being replaced by model output.
output, output_ref = outputs.chunk(2)
delta_in = input - input_ref
delta_out = output - output_ref
return torch.cat(2 * [delta_in]), torch.cat(2 * [delta_out])
SUPPORTED_NON_LINEAR = {
nn.ReLU: nonlinear,
nn.ELU: nonlinear,
nn.LeakyReLU: nonlinear,
nn.Sigmoid: nonlinear,
nn.Tanh: nonlinear,
nn.Softplus: nonlinear,
nn.MaxPool1d: maxpool1d,
nn.MaxPool2d: maxpool2d,
nn.MaxPool3d: maxpool3d,
nn.Softmax: softmax,
}
| [
"torch.cat",
"torch.no_grad",
"torch.max"
] | 1.2 | kolvia/captum | 4b6280296dbc740df09afdc592b74e27e0b7ce88 |
1.8 | import json
import os
import librosa
import numpy as np
from torch.utils.data import Dataset
from text import text_to_sequence
from utils.tools import pad_1D
class Dataset(Dataset):
def __init__(
self, filename, preprocess_config, train_config, sort=False, drop_last=False
):
self.dataset_name = preprocess_config["dataset"]
self.preprocessed_path = preprocess_config["path"]["preprocessed_path"]
self.raw_path = preprocess_config["path"]["raw_path"]
self.cleaners = preprocess_config["preprocessing"]["text"]["text_cleaners"]
self.batch_size = train_config["optimizer"]["batch_size"]
self.sample_rate = preprocess_config["preprocessing"]["audio"]["sampling_rate"]
self.hop_length = preprocess_config["preprocessing"]["stft"]["hop_length"]
self.segment_length_up = train_config["window"]["segment_length"]
self.segment_length = train_config["window"]["segment_length"] // self.hop_length
self.basename, self.speaker, self.text, self.raw_text = self.process_meta(
filename
)
with open(os.path.join(self.preprocessed_path, "speakers.json")) as f:
self.speaker_map = json.load(f)
self.sort = sort
self.drop_last = drop_last
def __len__(self):
return len(self.text)
def load_audio_to_torch(self, audio_path):
# Read and trim wav files
audio, sample_rate = librosa.load(audio_path)
return audio.squeeze(), sample_rate
def __getitem__(self, idx):
basename = self.basename[idx]
speaker = self.speaker[idx]
speaker_id = self.speaker_map[speaker]
raw_text = self.raw_text[idx]
phone = np.array(text_to_sequence(self.text[idx], self.cleaners))
audio_path = os.path.join(
self.preprocessed_path,
"wav",
"{}-wav-{}.wav".format(speaker, basename)
)
audio, sample_rate = self.load_audio_to_torch(audio_path)
assert sample_rate == self.sample_rate, \
f"""Got path to audio of sampling rate {sample_rate}, \
but required {self.sample_rate} according config."""
duration_path = os.path.join(
self.preprocessed_path,
"duration",
"{}-duration-{}.npy".format(speaker, basename),
)
duration = np.load(duration_path)
# Sample Segment
seq_start = 0
max_seq_start = sum(duration) - self.segment_length
if max_seq_start > 0:
seq_start = np.random.randint(0, max_seq_start) * self.hop_length
audio = audio[seq_start:seq_start+self.segment_length_up]
sample = {
"id": basename,
"speaker": speaker_id,
"text": phone,
"raw_text": raw_text,
"audio": audio,
"duration": duration,
"seq_start": seq_start // self.hop_length,
"phone": self.text[idx],
}
return sample
def process_meta(self, filename):
with open(
os.path.join(self.preprocessed_path, filename), "r", encoding="utf-8"
) as f:
name = []
speaker = []
text = []
raw_text = []
for line in f.readlines():
n, s, t, r = line.strip("\n").split("|")
name.append(n)
speaker.append(s)
text.append(t)
raw_text.append(r)
return name, speaker, text, raw_text
def reprocess(self, data, idxs):
ids = [data[idx]["id"] for idx in idxs]
speakers = [data[idx]["speaker"] for idx in idxs]
texts = [data[idx]["text"] for idx in idxs]
raw_texts = [data[idx]["raw_text"] for idx in idxs]
phones = [data[idx]["phone"] for idx in idxs]
audios = [data[idx]["audio"] for idx in idxs]
durations = [data[idx]["duration"] for idx in idxs]
seq_starts = [data[idx]["seq_start"] for idx in idxs]
text_lens = np.array([text.shape[0] for text in texts])
speakers = np.array(speakers)
texts = pad_1D(texts)
audios = pad_1D(audios)
durations = pad_1D(durations)
seq_starts = np.array(seq_starts)
return (
ids,
raw_texts,
speakers,
texts,
text_lens,
max(text_lens),
audios,
durations,
seq_starts,
phones,
)
def collate_fn(self, data):
data_size = len(data)
if self.sort:
len_arr = np.array([d["text"].shape[0] for d in data])
idx_arr = np.argsort(-len_arr)
else:
idx_arr = np.arange(data_size)
tail = idx_arr[len(idx_arr) - (len(idx_arr) % self.batch_size) :]
idx_arr = idx_arr[: len(idx_arr) - (len(idx_arr) % self.batch_size)]
idx_arr = idx_arr.reshape((-1, self.batch_size)).tolist()
if not self.drop_last and len(tail) > 0:
idx_arr += [tail.tolist()]
output = list()
for idx in idx_arr:
output.append(self.reprocess(data, idx))
return output
class TextDataset(Dataset):
def __init__(self, filepath, preprocess_config):
self.cleaners = preprocess_config["preprocessing"]["text"]["text_cleaners"]
self.basename, self.speaker, self.text, self.raw_text = self.process_meta(
filepath
)
with open(
os.path.join(
preprocess_config["path"]["preprocessed_path"], "speakers.json"
)
) as f:
self.speaker_map = json.load(f)
def __len__(self):
return len(self.text)
def __getitem__(self, idx):
basename = self.basename[idx]
speaker = self.speaker[idx]
speaker_id = self.speaker_map[speaker]
raw_text = self.raw_text[idx]
phone = np.array(text_to_sequence(self.text[idx], self.cleaners))
return (basename, speaker_id, phone, raw_text)
def process_meta(self, filename):
with open(filename, "r", encoding="utf-8") as f:
name = []
speaker = []
text = []
raw_text = []
for line in f.readlines():
n, s, t, r = line.strip("\n").split("|")
name.append(n)
speaker.append(s)
text.append(t)
raw_text.append(r)
return name, speaker, text, raw_text
def collate_fn(self, data):
ids = [d[0] for d in data]
speakers = np.array([d[1] for d in data])
texts = [d[2] for d in data]
raw_texts = [d[3] for d in data]
text_lens = np.array([text.shape[0] for text in texts])
texts = pad_1D(texts)
return ids, raw_texts, speakers, texts, text_lens, max(text_lens)
if __name__ == "__main__":
# Test
import torch
import yaml
from torch.utils.data import DataLoader
from utils.utils import to_device
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
preprocess_config = yaml.load(
open("./config/LJSpeech/preprocess.yaml", "r"), Loader=yaml.FullLoader
)
train_config = yaml.load(
open("./config/LJSpeech/train.yaml", "r"), Loader=yaml.FullLoader
)
train_dataset = Dataset(
"train.txt", preprocess_config, train_config, sort=True, drop_last=True
)
val_dataset = Dataset(
"val.txt", preprocess_config, train_config, sort=False, drop_last=False
)
train_loader = DataLoader(
train_dataset,
batch_size=train_config["optimizer"]["batch_size"] * 4,
shuffle=True,
collate_fn=train_dataset.collate_fn,
)
val_loader = DataLoader(
val_dataset,
batch_size=train_config["optimizer"]["batch_size"],
shuffle=False,
collate_fn=val_dataset.collate_fn,
)
n_batch = 0
for batchs in train_loader:
for batch in batchs:
to_device(batch, device)
n_batch += 1
print(
"Training set with size {} is composed of {} batches.".format(
len(train_dataset), n_batch
)
)
n_batch = 0
for batchs in val_loader:
for batch in batchs:
to_device(batch, device)
n_batch += 1
print(
"Validation set with size {} is composed of {} batches.".format(
len(val_dataset), n_batch
)
) | [
"torch.utils.data.Dataset",
"torch.cuda.is_available",
"torch.utils.data.DataLoader"
] | 1.8.1 | Seungwoo0326/WaveGrad2-1 | 3b202201348449b89353f28bce1596ca7939a810 |
0.4 | #!/usr/bin/env python3
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of the following repo:
# https://github.com/facebookresearch/poincare-embeddings.
#
import torch as th
from torch.optim.optimizer import Optimizer, required
import math
spten_t = th.sparse.FloatTensor
def poincare_grad(p, d_p):
r"""
Function to compute Riemannian gradient from the
Euclidean gradient in the Poincaré ball.
Args:
p (Tensor): Current point in the ball
d_p (Tensor): Euclidean gradient at p
"""
if d_p.is_sparse:
p_sqnorm = th.sum(
p.data[d_p._indices()[0].squeeze()] ** 2, dim=1,
keepdim=True
).expand_as(d_p._values())
n_vals = d_p._values() * ((1 - p_sqnorm) ** 2) / 4
d_p = spten_t(d_p._indices(), n_vals, d_p.size())
else:
p_sqnorm = th.sum(p.data ** 2, dim=-1, keepdim=True)
d_p = d_p * ((1 - p_sqnorm) ** 2 / 4).expand_as(d_p)
return d_p
def euclidean_grad(p, d_p):
return d_p
def euclidean_retraction(p, d_p, lr):
p.data.add_(-lr, d_p)
class RiemannianSGD(Optimizer):
r"""Riemannian stochastic gradient descent.
Args:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
rgrad (Function): Function to compute the Riemannian gradient from
an Euclidean gradient
retraction (Function): Function to update the parameters via a
retraction of the Riemannian gradient
lr (float): learning rate
"""
def __init__(self, params, lr=required, rgrad=required, retraction=required):
defaults = dict(lr=lr, rgrad=rgrad, retraction=retraction)
super(RiemannianSGD, self).__init__(params, defaults)
def step(self, lr=None):
"""Performs a single optimization step.
Arguments:
lr (float, optional): learning rate for the current update.
"""
loss = None
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
d_p = p.grad.data
if lr is None:
lr = group['lr']
d_p = group['rgrad'](p, d_p)
group['retraction'](p, d_p, lr)
return loss | [
"torch.sum"
] | 0.4.1 | prokolyvakis/hyperkg | db0b8eee78424dde31f8f691b0c6dd07c376ce73 |
1.7 | import torch
from torch.utils import data
from torch.utils.data.dataset import Dataset
import os
from plyfile import PlyData
import numpy as np
from utils import split
class SemanticKittiDataset(Dataset):
def __init__(self, sequence, data_folder):
super(SemanticKittiDataset, self).__init__()
self.data_folder = data_folder
self.sequence = sequence
self.root_dir = f"{self.data_folder}/{self.sequence:02}"
self.count = len([name for name in os.listdir(self.root_dir) if os.path.isfile(self.root_dir + "/" + name)])
def __getitem__(self, index):
points_dir = f"{self.root_dir}/{index:06}.ply"
with open(points_dir, 'rb') as f:
ply_data = PlyData.read(f)['vertex'].data
points = np.stack([
ply_data['x'],
ply_data['y'],
ply_data['z']
], axis=1)
labels = np.array(ply_data['label'], dtype=int)
points = torch.from_numpy(points.T).float()
labels = torch.from_numpy(labels)
return points, labels
def __len__(self):
return self.count
def semantic_kitti_dataloader(sequence, data_folder):
dataset = SemanticKittiDataset(sequence=sequence, data_folder=data_folder)
n_train, n_test = split(len(dataset), (0.8, 0.2))
n_dev = 2 # For computing reasons
n_test -= n_dev
train_ds, dev_ds, test_ds = torch.utils.data.random_split(dataset=dataset, lengths=(n_train, n_dev, n_test))
train_loader = data.DataLoader(dataset=train_ds, batch_size=1, shuffle=True)
dev_loader = data.DataLoader(dataset=dev_ds, batch_size=1, shuffle=True)
test_loader = data.DataLoader(dataset=test_ds, batch_size=1, shuffle=True)
return train_loader, dev_loader, test_loader
| [
"torch.utils.data.random_split",
"torch.from_numpy",
"torch.utils.data.DataLoader"
] | 1.7.0 | KASCedric/PointNet | 51d494249cc5a308a94cd703f607e636f47ee9ac |
1.5 | import hashlib
import inspect
import logging
import os
import re
from abc import abstractmethod
from collections import Counter
from pathlib import Path
from typing import List, Union, Dict, Optional
import gensim
import numpy as np
import torch
from bpemb import BPEmb
from torch import nn
from transformers import AutoTokenizer, AutoConfig, AutoModel, CONFIG_MAPPING, PreTrainedTokenizer, XLNetModel, \
TransfoXLModel
import flair
from flair.data import Sentence, Token, Corpus, Dictionary
from flair.embeddings.base import Embeddings
from flair.file_utils import cached_path, open_inside_zip, instance_lru_cache
log = logging.getLogger("flair")
class TokenEmbeddings(Embeddings):
"""Abstract base class for all token-level embeddings. Ever new type of word embedding must implement these methods."""
@property
@abstractmethod
def embedding_length(self) -> int:
"""Returns the length of the embedding vector."""
pass
@property
def embedding_type(self) -> str:
return "word-level"
@staticmethod
def get_instance_parameters(locals: dict) -> dict:
class_definition = locals.get("__class__")
instance_parameters = set(inspect.getfullargspec(class_definition.__init__).args)
instance_parameters.difference_update(set(["self"]))
instance_parameters.update(set(["__class__"]))
instance_parameters = {class_attribute: attribute_value for class_attribute, attribute_value in locals.items()
if class_attribute in instance_parameters}
return instance_parameters
class StackedEmbeddings(TokenEmbeddings):
"""A stack of embeddings, used if you need to combine several different embedding types."""
def __init__(self, embeddings: List[TokenEmbeddings]):
"""The constructor takes a list of embeddings to be combined."""
super().__init__()
self.embeddings = embeddings
# IMPORTANT: add embeddings as torch modules
for i, embedding in enumerate(embeddings):
embedding.name = f"{str(i)}-{embedding.name}"
self.add_module(f"list_embedding_{str(i)}", embedding)
self.name: str = "Stack"
self.static_embeddings: bool = True
self.__embedding_type: str = embeddings[0].embedding_type
self.__embedding_length: int = 0
for embedding in embeddings:
self.__embedding_length += embedding.embedding_length
def embed(
self, sentences: Union[Sentence, List[Sentence]], static_embeddings: bool = True
):
# if only one sentence is passed, convert to list of sentence
if type(sentences) is Sentence:
sentences = [sentences]
for embedding in self.embeddings:
embedding.embed(sentences)
@property
def embedding_type(self) -> str:
return self.__embedding_type
@property
def embedding_length(self) -> int:
return self.__embedding_length
def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:
for embedding in self.embeddings:
embedding._add_embeddings_internal(sentences)
return sentences
def __str__(self):
return f'StackedEmbeddings [{",".join([str(e) for e in self.embeddings])}]'
def get_names(self) -> List[str]:
"""Returns a list of embedding names. In most cases, it is just a list with one item, namely the name of
this embedding. But in some cases, the embedding is made up by different embeddings (StackedEmbedding).
Then, the list contains the names of all embeddings in the stack."""
names = []
for embedding in self.embeddings:
names.extend(embedding.get_names())
return names
def get_named_embeddings_dict(self) -> Dict:
named_embeddings_dict = {}
for embedding in self.embeddings:
named_embeddings_dict.update(embedding.get_named_embeddings_dict())
return named_embeddings_dict
class WordEmbeddings(TokenEmbeddings):
"""Standard static word embeddings, such as GloVe or FastText."""
def __init__(self, embeddings: str, field: str = None, fine_tune: bool = False, force_cpu: bool = True,
stable: bool = False):
"""
Initializes classic word embeddings. Constructor downloads required files if not there.
:param embeddings: one of: 'glove', 'extvec', 'crawl' or two-letter language code or custom
If you want to use a custom embedding file, just pass the path to the embeddings as embeddings variable.
set stable=True to use the stable embeddings as described in https://arxiv.org/abs/2110.02861
"""
self.embeddings = embeddings
self.instance_parameters = self.get_instance_parameters(locals=locals())
if fine_tune and force_cpu and flair.device.type != "cpu":
raise ValueError("Cannot train WordEmbeddings on cpu if the model is trained on gpu, set force_cpu=False")
hu_path: str = "https://flair.informatik.hu-berlin.de/resources/embeddings/token"
cache_dir = Path("embeddings")
# GLOVE embeddings
if embeddings.lower() == "glove" or embeddings.lower() == "en-glove":
cached_path(f"{hu_path}/glove.gensim.vectors.npy", cache_dir=cache_dir)
embeddings = cached_path(f"{hu_path}/glove.gensim", cache_dir=cache_dir)
# TURIAN embeddings
elif embeddings.lower() == "turian" or embeddings.lower() == "en-turian":
cached_path(f"{hu_path}/turian.vectors.npy", cache_dir=cache_dir)
embeddings = cached_path(f"{hu_path}/turian", cache_dir=cache_dir)
# KOMNINOS embeddings
elif embeddings.lower() == "extvec" or embeddings.lower() == "en-extvec":
cached_path(f"{hu_path}/extvec.gensim.vectors.npy", cache_dir=cache_dir)
embeddings = cached_path(f"{hu_path}/extvec.gensim", cache_dir=cache_dir)
# pubmed embeddings
elif embeddings.lower() == "pubmed" or embeddings.lower() == "en-pubmed":
cached_path(f"{hu_path}/pubmed_pmc_wiki_sg_1M.gensim.vectors.npy", cache_dir=cache_dir)
embeddings = cached_path(f"{hu_path}/pubmed_pmc_wiki_sg_1M.gensim", cache_dir=cache_dir)
# FT-CRAWL embeddings
elif embeddings.lower() == "crawl" or embeddings.lower() == "en-crawl":
cached_path(f"{hu_path}/en-fasttext-crawl-300d-1M.vectors.npy", cache_dir=cache_dir)
embeddings = cached_path(f"{hu_path}/en-fasttext-crawl-300d-1M", cache_dir=cache_dir)
# FT-CRAWL embeddings
elif embeddings.lower() in ["news", "en-news", "en"]:
cached_path(f"{hu_path}/en-fasttext-news-300d-1M.vectors.npy", cache_dir=cache_dir)
embeddings = cached_path(f"{hu_path}/en-fasttext-news-300d-1M", cache_dir=cache_dir)
# twitter embeddings
elif embeddings.lower() in ["twitter", "en-twitter"]:
cached_path(f"{hu_path}/twitter.gensim.vectors.npy", cache_dir=cache_dir)
embeddings = cached_path(f"{hu_path}/twitter.gensim", cache_dir=cache_dir)
# two-letter language code wiki embeddings
elif len(embeddings.lower()) == 2:
cached_path(f"{hu_path}/{embeddings}-wiki-fasttext-300d-1M.vectors.npy", cache_dir=cache_dir)
embeddings = cached_path(f"{hu_path}/{embeddings}-wiki-fasttext-300d-1M", cache_dir=cache_dir)
# two-letter language code wiki embeddings
elif len(embeddings.lower()) == 7 and embeddings.endswith("-wiki"):
cached_path(f"{hu_path}/{embeddings[:2]}-wiki-fasttext-300d-1M.vectors.npy", cache_dir=cache_dir)
embeddings = cached_path(f"{hu_path}/{embeddings[:2]}-wiki-fasttext-300d-1M", cache_dir=cache_dir)
# two-letter language code crawl embeddings
elif len(embeddings.lower()) == 8 and embeddings.endswith("-crawl"):
cached_path(f"{hu_path}/{embeddings[:2]}-crawl-fasttext-300d-1M.vectors.npy", cache_dir=cache_dir)
embeddings = cached_path(f"{hu_path}/{embeddings[:2]}-crawl-fasttext-300d-1M", cache_dir=cache_dir)
elif not Path(embeddings).exists():
raise ValueError(
f'The given embeddings "{embeddings}" is not available or is not a valid path.'
)
self.name: str = str(embeddings)
self.static_embeddings = not fine_tune
self.fine_tune = fine_tune
self.force_cpu = force_cpu
self.field = field
self.stable = stable
super().__init__()
if str(embeddings).endswith(".bin"):
precomputed_word_embeddings = gensim.models.KeyedVectors.load_word2vec_format(
str(embeddings), binary=True
)
else:
precomputed_word_embeddings = gensim.models.KeyedVectors.load(
str(embeddings)
)
self.__embedding_length: int = precomputed_word_embeddings.vector_size
vectors = np.row_stack(
(precomputed_word_embeddings.vectors, np.zeros(self.__embedding_length, dtype="float"))
)
self.embedding = nn.Embedding.from_pretrained(torch.FloatTensor(vectors), freeze=not fine_tune)
self.vocab = {
k: v.index
for k, v in precomputed_word_embeddings.vocab.items()
}
if stable:
self.layer_norm = nn.LayerNorm(self.__embedding_length, elementwise_affine=fine_tune)
else:
self.layer_norm = None
self.device = None
self.to(flair.device)
@property
def embedding_length(self) -> int:
return self.__embedding_length
@instance_lru_cache(maxsize=100000, typed=False)
def get_cached_token_index(self, word: str) -> int:
if word in self.vocab:
return self.vocab[word]
elif word.lower() in self.vocab:
return self.vocab[word.lower()]
elif re.sub(r"\d", "#", word.lower()) in self.vocab:
return self.vocab[
re.sub(r"\d", "#", word.lower())
]
elif re.sub(r"\d", "0", word.lower()) in self.vocab:
return self.vocab[
re.sub(r"\d", "0", word.lower())
]
else:
return len(self.vocab) # <unk> token
def get_vec(self, word: str) -> torch.Tensor:
word_embedding = self.vectors[self.get_cached_token_index(word)]
word_embedding = torch.tensor(
word_embedding.tolist(), device=flair.device, dtype=torch.float
)
return word_embedding
def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:
tokens = [token for sentence in sentences for token in sentence.tokens]
word_indices: List[int] = []
for token in tokens:
if "field" not in self.__dict__ or self.field is None:
word = token.text
else:
word = token.get_tag(self.field).value
word_indices.append(self.get_cached_token_index(word))
embeddings = self.embedding(torch.tensor(word_indices, dtype=torch.long, device=self.device))
if self.stable:
embeddings = self.layer_norm(embeddings)
if self.force_cpu:
embeddings = embeddings.to(flair.device)
for emb, token in zip(embeddings, tokens):
token.set_embedding(self.name, emb)
return sentences
def __str__(self):
return self.name
def extra_repr(self):
# fix serialized models
if "embeddings" not in self.__dict__:
self.embeddings = self.name
return f"'{self.embeddings}'"
def train(self, mode=True):
if not self.fine_tune:
pass
else:
super(WordEmbeddings, self).train(mode)
def to(self, device):
if self.force_cpu:
device = torch.device("cpu")
self.device = device
super(WordEmbeddings, self).to(device)
def _apply(self, fn):
if fn.__name__ == "convert" and self.force_cpu:
# this is required to force the module on the cpu,
# if a parent module is put to gpu, the _apply is called to each sub_module
# self.to(..) actually sets the device properly
if not hasattr(self, "device"):
self.to(flair.device)
return
super(WordEmbeddings, self)._apply(fn)
def __getattribute__(self, item):
# this ignores the get_cached_vec method when loading older versions
# it is needed for compatibility reasons
if "get_cached_vec" == item:
return None
return super().__getattribute__(item)
def __setstate__(self, state):
if "get_cached_vec" in state:
del state["get_cached_vec"]
if "force_cpu" not in state:
state["force_cpu"] = True
if "fine_tune" not in state:
state["fine_tune"] = False
if "precomputed_word_embeddings" in state:
precomputed_word_embeddings = state.pop("precomputed_word_embeddings")
vectors = np.row_stack(
(precomputed_word_embeddings.vectors, np.zeros(precomputed_word_embeddings.vector_size, dtype="float"))
)
embedding = nn.Embedding.from_pretrained(torch.FloatTensor(vectors), freeze=not state["fine_tune"])
vocab = {
k: v.index
for k, v in precomputed_word_embeddings.vocab.items()
}
state["embedding"] = embedding
state["vocab"] = vocab
if "stable" not in state:
state["stable"] = False
state["layer_norm"] = None
super().__setstate__(state)
class CharacterEmbeddings(TokenEmbeddings):
"""Character embeddings of words, as proposed in Lample et al., 2016."""
def __init__(
self,
path_to_char_dict: str = None,
char_embedding_dim: int = 25,
hidden_size_char: int = 25,
):
"""Uses the default character dictionary if none provided."""
super().__init__()
self.name = "Char"
self.static_embeddings = False
self.instance_parameters = self.get_instance_parameters(locals=locals())
# use list of common characters if none provided
if path_to_char_dict is None:
self.char_dictionary: Dictionary = Dictionary.load("common-chars")
else:
self.char_dictionary: Dictionary = Dictionary.load_from_file(path_to_char_dict)
self.char_embedding_dim: int = char_embedding_dim
self.hidden_size_char: int = hidden_size_char
self.char_embedding = torch.nn.Embedding(
len(self.char_dictionary.item2idx), self.char_embedding_dim
)
self.char_rnn = torch.nn.LSTM(
self.char_embedding_dim,
self.hidden_size_char,
num_layers=1,
bidirectional=True,
)
self.__embedding_length = self.hidden_size_char * 2
self.to(flair.device)
@property
def embedding_length(self) -> int:
return self.__embedding_length
def _add_embeddings_internal(self, sentences: List[Sentence]):
for sentence in sentences:
tokens_char_indices = []
# translate words in sentence into ints using dictionary
for token in sentence.tokens:
char_indices = [
self.char_dictionary.get_idx_for_item(char) for char in token.text
]
tokens_char_indices.append(char_indices)
# sort words by length, for batching and masking
tokens_sorted_by_length = sorted(
tokens_char_indices, key=lambda p: len(p), reverse=True
)
d = {}
for i, ci in enumerate(tokens_char_indices):
for j, cj in enumerate(tokens_sorted_by_length):
if ci == cj:
d[j] = i
continue
chars2_length = [len(c) for c in tokens_sorted_by_length]
longest_token_in_sentence = max(chars2_length)
tokens_mask = torch.zeros(
(len(tokens_sorted_by_length), longest_token_in_sentence),
dtype=torch.long,
device=flair.device,
)
for i, c in enumerate(tokens_sorted_by_length):
tokens_mask[i, : chars2_length[i]] = torch.tensor(
c, dtype=torch.long, device=flair.device
)
# chars for rnn processing
chars = tokens_mask
character_embeddings = self.char_embedding(chars).transpose(0, 1)
packed = torch.nn.utils.rnn.pack_padded_sequence(
character_embeddings, chars2_length
)
lstm_out, self.hidden = self.char_rnn(packed)
outputs, output_lengths = torch.nn.utils.rnn.pad_packed_sequence(lstm_out)
outputs = outputs.transpose(0, 1)
chars_embeds_temp = torch.zeros(
(outputs.size(0), outputs.size(2)),
dtype=torch.float,
device=flair.device,
)
for i, index in enumerate(output_lengths):
chars_embeds_temp[i] = outputs[i, index - 1]
character_embeddings = chars_embeds_temp.clone()
for i in range(character_embeddings.size(0)):
character_embeddings[d[i]] = chars_embeds_temp[i]
for token_number, token in enumerate(sentence.tokens):
token.set_embedding(self.name, character_embeddings[token_number])
def __str__(self):
return self.name
class FlairEmbeddings(TokenEmbeddings):
"""Contextual string embeddings of words, as proposed in Akbik et al., 2018."""
def __init__(self,
model,
fine_tune: bool = False,
chars_per_chunk: int = 512,
with_whitespace: bool = True,
tokenized_lm: bool = True,
is_lower: bool = False,
):
"""
initializes contextual string embeddings using a character-level language model.
:param model: model string, one of 'news-forward', 'news-backward', 'news-forward-fast', 'news-backward-fast',
'mix-forward', 'mix-backward', 'german-forward', 'german-backward', 'polish-backward', 'polish-forward',
etc (see https://github.com/flairNLP/flair/blob/master/resources/docs/embeddings/FLAIR_EMBEDDINGS.md)
depending on which character language model is desired.
:param fine_tune: if set to True, the gradient will propagate into the language model. This dramatically slows
down training and often leads to overfitting, so use with caution.
:param chars_per_chunk: max number of chars per rnn pass to control speed/memory tradeoff. Higher means faster
but requires more memory. Lower means slower but less memory.
:param with_whitespace: If True, use hidden state after whitespace after word. If False, use hidden
state at last character of word.
:param tokenized_lm: Whether this lm is tokenized. Default is True, but for LMs trained over unprocessed text
False might be better.
"""
super().__init__()
self.instance_parameters = self.get_instance_parameters(locals=locals())
cache_dir = Path("embeddings")
hu_path: str = "https://flair.informatik.hu-berlin.de/resources/embeddings/flair"
clef_hipe_path: str = "https://files.ifi.uzh.ch/cl/siclemat/impresso/clef-hipe-2020/flair"
self.is_lower: bool = is_lower
self.PRETRAINED_MODEL_ARCHIVE_MAP = {
# multilingual models
"multi-forward": f"{hu_path}/lm-jw300-forward-v0.1.pt",
"multi-backward": f"{hu_path}/lm-jw300-backward-v0.1.pt",
"multi-v0-forward": f"{hu_path}/lm-multi-forward-v0.1.pt",
"multi-v0-backward": f"{hu_path}/lm-multi-backward-v0.1.pt",
"multi-forward-fast": f"{hu_path}/lm-multi-forward-fast-v0.1.pt",
"multi-backward-fast": f"{hu_path}/lm-multi-backward-fast-v0.1.pt",
# English models
"en-forward": f"{hu_path}/news-forward-0.4.1.pt",
"en-backward": f"{hu_path}/news-backward-0.4.1.pt",
"en-forward-fast": f"{hu_path}/lm-news-english-forward-1024-v0.2rc.pt",
"en-backward-fast": f"{hu_path}/lm-news-english-backward-1024-v0.2rc.pt",
"news-forward": f"{hu_path}/news-forward-0.4.1.pt",
"news-backward": f"{hu_path}/news-backward-0.4.1.pt",
"news-forward-fast": f"{hu_path}/lm-news-english-forward-1024-v0.2rc.pt",
"news-backward-fast": f"{hu_path}/lm-news-english-backward-1024-v0.2rc.pt",
"mix-forward": f"{hu_path}/lm-mix-english-forward-v0.2rc.pt",
"mix-backward": f"{hu_path}/lm-mix-english-backward-v0.2rc.pt",
# Arabic
"ar-forward": f"{hu_path}/lm-ar-opus-large-forward-v0.1.pt",
"ar-backward": f"{hu_path}/lm-ar-opus-large-backward-v0.1.pt",
# Bulgarian
"bg-forward-fast": f"{hu_path}/lm-bg-small-forward-v0.1.pt",
"bg-backward-fast": f"{hu_path}/lm-bg-small-backward-v0.1.pt",
"bg-forward": f"{hu_path}/lm-bg-opus-large-forward-v0.1.pt",
"bg-backward": f"{hu_path}/lm-bg-opus-large-backward-v0.1.pt",
# Czech
"cs-forward": f"{hu_path}/lm-cs-opus-large-forward-v0.1.pt",
"cs-backward": f"{hu_path}/lm-cs-opus-large-backward-v0.1.pt",
"cs-v0-forward": f"{hu_path}/lm-cs-large-forward-v0.1.pt",
"cs-v0-backward": f"{hu_path}/lm-cs-large-backward-v0.1.pt",
# Danish
"da-forward": f"{hu_path}/lm-da-opus-large-forward-v0.1.pt",
"da-backward": f"{hu_path}/lm-da-opus-large-backward-v0.1.pt",
# German
"de-forward": f"{hu_path}/lm-mix-german-forward-v0.2rc.pt",
"de-backward": f"{hu_path}/lm-mix-german-backward-v0.2rc.pt",
"de-historic-ha-forward": f"{hu_path}/lm-historic-hamburger-anzeiger-forward-v0.1.pt",
"de-historic-ha-backward": f"{hu_path}/lm-historic-hamburger-anzeiger-backward-v0.1.pt",
"de-historic-wz-forward": f"{hu_path}/lm-historic-wiener-zeitung-forward-v0.1.pt",
"de-historic-wz-backward": f"{hu_path}/lm-historic-wiener-zeitung-backward-v0.1.pt",
"de-historic-rw-forward": f"{hu_path}/redewiedergabe_lm_forward.pt",
"de-historic-rw-backward": f"{hu_path}/redewiedergabe_lm_backward.pt",
# Spanish
"es-forward": f"{hu_path}/lm-es-forward.pt",
"es-backward": f"{hu_path}/lm-es-backward.pt",
"es-forward-fast": f"{hu_path}/lm-es-forward-fast.pt",
"es-backward-fast": f"{hu_path}/lm-es-backward-fast.pt",
# Basque
"eu-forward": f"{hu_path}/lm-eu-opus-large-forward-v0.2.pt",
"eu-backward": f"{hu_path}/lm-eu-opus-large-backward-v0.2.pt",
"eu-v1-forward": f"{hu_path}/lm-eu-opus-large-forward-v0.1.pt",
"eu-v1-backward": f"{hu_path}/lm-eu-opus-large-backward-v0.1.pt",
"eu-v0-forward": f"{hu_path}/lm-eu-large-forward-v0.1.pt",
"eu-v0-backward": f"{hu_path}/lm-eu-large-backward-v0.1.pt",
# Persian
"fa-forward": f"{hu_path}/lm-fa-opus-large-forward-v0.1.pt",
"fa-backward": f"{hu_path}/lm-fa-opus-large-backward-v0.1.pt",
# Finnish
"fi-forward": f"{hu_path}/lm-fi-opus-large-forward-v0.1.pt",
"fi-backward": f"{hu_path}/lm-fi-opus-large-backward-v0.1.pt",
# French
"fr-forward": f"{hu_path}/lm-fr-charlm-forward.pt",
"fr-backward": f"{hu_path}/lm-fr-charlm-backward.pt",
# Hebrew
"he-forward": f"{hu_path}/lm-he-opus-large-forward-v0.1.pt",
"he-backward": f"{hu_path}/lm-he-opus-large-backward-v0.1.pt",
# Hindi
"hi-forward": f"{hu_path}/lm-hi-opus-large-forward-v0.1.pt",
"hi-backward": f"{hu_path}/lm-hi-opus-large-backward-v0.1.pt",
# Croatian
"hr-forward": f"{hu_path}/lm-hr-opus-large-forward-v0.1.pt",
"hr-backward": f"{hu_path}/lm-hr-opus-large-backward-v0.1.pt",
# Indonesian
"id-forward": f"{hu_path}/lm-id-opus-large-forward-v0.1.pt",
"id-backward": f"{hu_path}/lm-id-opus-large-backward-v0.1.pt",
# Italian
"it-forward": f"{hu_path}/lm-it-opus-large-forward-v0.1.pt",
"it-backward": f"{hu_path}/lm-it-opus-large-backward-v0.1.pt",
# Japanese
"ja-forward": f"{hu_path}/japanese-forward.pt",
"ja-backward": f"{hu_path}/japanese-backward.pt",
# Malayalam
"ml-forward": f"https://raw.githubusercontent.com/qburst/models-repository/master/FlairMalayalamModels/ml-forward.pt",
"ml-backward": f"https://raw.githubusercontent.com/qburst/models-repository/master/FlairMalayalamModels/ml-backward.pt",
# Dutch
"nl-forward": f"{hu_path}/lm-nl-opus-large-forward-v0.1.pt",
"nl-backward": f"{hu_path}/lm-nl-opus-large-backward-v0.1.pt",
"nl-v0-forward": f"{hu_path}/lm-nl-large-forward-v0.1.pt",
"nl-v0-backward": f"{hu_path}/lm-nl-large-backward-v0.1.pt",
# Norwegian
"no-forward": f"{hu_path}/lm-no-opus-large-forward-v0.1.pt",
"no-backward": f"{hu_path}/lm-no-opus-large-backward-v0.1.pt",
# Polish
"pl-forward": f"{hu_path}/lm-polish-forward-v0.2.pt",
"pl-backward": f"{hu_path}/lm-polish-backward-v0.2.pt",
"pl-opus-forward": f"{hu_path}/lm-pl-opus-large-forward-v0.1.pt",
"pl-opus-backward": f"{hu_path}/lm-pl-opus-large-backward-v0.1.pt",
# Portuguese
"pt-forward": f"{hu_path}/lm-pt-forward.pt",
"pt-backward": f"{hu_path}/lm-pt-backward.pt",
# Pubmed
"pubmed-forward": f"{hu_path}/pubmed-forward.pt",
"pubmed-backward": f"{hu_path}/pubmed-backward.pt",
"pubmed-2015-forward": f"{hu_path}/pubmed-2015-fw-lm.pt",
"pubmed-2015-backward": f"{hu_path}/pubmed-2015-bw-lm.pt",
# Slovenian
"sl-forward": f"{hu_path}/lm-sl-opus-large-forward-v0.1.pt",
"sl-backward": f"{hu_path}/lm-sl-opus-large-backward-v0.1.pt",
"sl-v0-forward": f"{hu_path}/lm-sl-large-forward-v0.1.pt",
"sl-v0-backward": f"{hu_path}/lm-sl-large-backward-v0.1.pt",
# Swedish
"sv-forward": f"{hu_path}/lm-sv-opus-large-forward-v0.1.pt",
"sv-backward": f"{hu_path}/lm-sv-opus-large-backward-v0.1.pt",
"sv-v0-forward": f"{hu_path}/lm-sv-large-forward-v0.1.pt",
"sv-v0-backward": f"{hu_path}/lm-sv-large-backward-v0.1.pt",
# Tamil
"ta-forward": f"{hu_path}/lm-ta-opus-large-forward-v0.1.pt",
"ta-backward": f"{hu_path}/lm-ta-opus-large-backward-v0.1.pt",
# Spanish clinical
"es-clinical-forward": f"{hu_path}/es-clinical-forward.pt",
"es-clinical-backward": f"{hu_path}/es-clinical-backward.pt",
# CLEF HIPE Shared task
"de-impresso-hipe-v1-forward": f"{clef_hipe_path}/de-hipe-flair-v1-forward/best-lm.pt",
"de-impresso-hipe-v1-backward": f"{clef_hipe_path}/de-hipe-flair-v1-backward/best-lm.pt",
"en-impresso-hipe-v1-forward": f"{clef_hipe_path}/en-flair-v1-forward/best-lm.pt",
"en-impresso-hipe-v1-backward": f"{clef_hipe_path}/en-flair-v1-backward/best-lm.pt",
"fr-impresso-hipe-v1-forward": f"{clef_hipe_path}/fr-hipe-flair-v1-forward/best-lm.pt",
"fr-impresso-hipe-v1-backward": f"{clef_hipe_path}/fr-hipe-flair-v1-backward/best-lm.pt",
}
if type(model) == str:
# load model if in pretrained model map
if model.lower() in self.PRETRAINED_MODEL_ARCHIVE_MAP:
base_path = self.PRETRAINED_MODEL_ARCHIVE_MAP[model.lower()]
# Fix for CLEF HIPE models (avoid overwriting best-lm.pt in cache_dir)
if "impresso-hipe" in model.lower():
cache_dir = cache_dir / model.lower()
# CLEF HIPE models are lowercased
self.is_lower = True
model = cached_path(base_path, cache_dir=cache_dir)
elif replace_with_language_code(model) in self.PRETRAINED_MODEL_ARCHIVE_MAP:
base_path = self.PRETRAINED_MODEL_ARCHIVE_MAP[
replace_with_language_code(model)
]
model = cached_path(base_path, cache_dir=cache_dir)
elif not Path(model).exists():
raise ValueError(
f'The given model "{model}" is not available or is not a valid path.'
)
from flair.models import LanguageModel
if type(model) == LanguageModel:
self.lm: LanguageModel = model
self.name = f"Task-LSTM-{self.lm.hidden_size}-{self.lm.nlayers}-{self.lm.is_forward_lm}"
else:
self.lm: LanguageModel = LanguageModel.load_language_model(model)
self.name = str(model)
# embeddings are static if we don't do finetuning
self.fine_tune = fine_tune
self.static_embeddings = not fine_tune
self.is_forward_lm: bool = self.lm.is_forward_lm
self.with_whitespace: bool = with_whitespace
self.tokenized_lm: bool = tokenized_lm
self.chars_per_chunk: int = chars_per_chunk
# embed a dummy sentence to determine embedding_length
dummy_sentence: Sentence = Sentence()
dummy_sentence.add_token(Token("hello"))
embedded_dummy = self.embed(dummy_sentence)
self.__embedding_length: int = len(
embedded_dummy[0].get_token(1).get_embedding()
)
# set to eval mode
self.eval()
def train(self, mode=True):
# make compatible with serialized models (TODO: remove)
if "fine_tune" not in self.__dict__:
self.fine_tune = False
if "chars_per_chunk" not in self.__dict__:
self.chars_per_chunk = 512
# unless fine-tuning is set, do not set language model to train() in order to disallow language model dropout
if not self.fine_tune:
pass
else:
super(FlairEmbeddings, self).train(mode)
@property
def embedding_length(self) -> int:
return self.__embedding_length
def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:
# make compatible with serialized models (TODO: remove)
if "with_whitespace" not in self.__dict__:
self.with_whitespace = True
if "tokenized_lm" not in self.__dict__:
self.tokenized_lm = True
if "is_lower" not in self.__dict__:
self.is_lower = False
# gradients are enable if fine-tuning is enabled
gradient_context = torch.enable_grad() if self.fine_tune else torch.no_grad()
with gradient_context:
# if this is not possible, use LM to generate embedding. First, get text sentences
text_sentences = [sentence.to_tokenized_string() for sentence in sentences] if self.tokenized_lm \
else [sentence.to_plain_string() for sentence in sentences]
if self.is_lower:
text_sentences = [sentence.lower() for sentence in text_sentences]
start_marker = self.lm.document_delimiter if "document_delimiter" in self.lm.__dict__ else '\n'
end_marker = " "
# get hidden states from language model
all_hidden_states_in_lm = self.lm.get_representation(
text_sentences, start_marker, end_marker, self.chars_per_chunk
)
if not self.fine_tune:
all_hidden_states_in_lm = all_hidden_states_in_lm.detach()
# take first or last hidden states from language model as word representation
for i, sentence in enumerate(sentences):
sentence_text = sentence.to_tokenized_string() if self.tokenized_lm else sentence.to_plain_string()
offset_forward: int = len(start_marker)
offset_backward: int = len(sentence_text) + len(start_marker)
for token in sentence.tokens:
offset_forward += len(token.text)
if self.is_forward_lm:
offset_with_whitespace = offset_forward
offset_without_whitespace = offset_forward - 1
else:
offset_with_whitespace = offset_backward
offset_without_whitespace = offset_backward - 1
# offset mode that extracts at whitespace after last character
if self.with_whitespace:
embedding = all_hidden_states_in_lm[offset_with_whitespace, i, :]
# offset mode that extracts at last character
else:
embedding = all_hidden_states_in_lm[offset_without_whitespace, i, :]
if self.tokenized_lm or token.whitespace_after:
offset_forward += 1
offset_backward -= 1
offset_backward -= len(token.text)
# only clone if optimization mode is 'gpu'
if flair.embedding_storage_mode == "gpu":
embedding = embedding.clone()
token.set_embedding(self.name, embedding)
del all_hidden_states_in_lm
return sentences
def __str__(self):
return self.name
class PooledFlairEmbeddings(TokenEmbeddings):
def __init__(
self,
contextual_embeddings: Union[str, FlairEmbeddings],
pooling: str = "min",
only_capitalized: bool = False,
**kwargs,
):
super().__init__()
self.instance_parameters = self.get_instance_parameters(locals=locals())
# use the character language model embeddings as basis
if type(contextual_embeddings) is str:
self.context_embeddings: FlairEmbeddings = FlairEmbeddings(
contextual_embeddings, **kwargs
)
else:
self.context_embeddings: FlairEmbeddings = contextual_embeddings
# length is twice the original character LM embedding length
self.embedding_length = self.context_embeddings.embedding_length * 2
self.name = self.context_embeddings.name + "-context"
# these fields are for the embedding memory
self.word_embeddings = {}
self.word_count = {}
# whether to add only capitalized words to memory (faster runtime and lower memory consumption)
self.only_capitalized = only_capitalized
# we re-compute embeddings dynamically at each epoch
self.static_embeddings = False
# set the memory method
self.pooling = pooling
def train(self, mode=True):
super().train(mode=mode)
if mode:
# memory is wiped each time we do a training run
print("train mode resetting embeddings")
self.word_embeddings = {}
self.word_count = {}
def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:
self.context_embeddings.embed(sentences)
# if we keep a pooling, it needs to be updated continuously
for sentence in sentences:
for token in sentence.tokens:
# update embedding
local_embedding = token._embeddings[self.context_embeddings.name].cpu()
# check token.text is empty or not
if token.text:
if token.text[0].isupper() or not self.only_capitalized:
if token.text not in self.word_embeddings:
self.word_embeddings[token.text] = local_embedding
self.word_count[token.text] = 1
else:
# set aggregation operation
if self.pooling == "mean":
aggregated_embedding = torch.add(self.word_embeddings[token.text], local_embedding)
elif self.pooling == "fade":
aggregated_embedding = torch.add(self.word_embeddings[token.text], local_embedding)
aggregated_embedding /= 2
elif self.pooling == "max":
aggregated_embedding = torch.max(self.word_embeddings[token.text], local_embedding)
elif self.pooling == "min":
aggregated_embedding = torch.min(self.word_embeddings[token.text], local_embedding)
self.word_embeddings[token.text] = aggregated_embedding
self.word_count[token.text] += 1
# add embeddings after updating
for sentence in sentences:
for token in sentence.tokens:
if token.text in self.word_embeddings:
base = (
self.word_embeddings[token.text] / self.word_count[token.text]
if self.pooling == "mean"
else self.word_embeddings[token.text]
)
else:
base = token._embeddings[self.context_embeddings.name]
token.set_embedding(self.name, base)
return sentences
def embedding_length(self) -> int:
return self.embedding_length
def get_names(self) -> List[str]:
return [self.name, self.context_embeddings.name]
def __setstate__(self, d):
self.__dict__ = d
if flair.device != 'cpu':
for key in self.word_embeddings:
self.word_embeddings[key] = self.word_embeddings[key].cpu()
class TransformerWordEmbeddings(TokenEmbeddings):
NO_MAX_SEQ_LENGTH_MODELS = [XLNetModel, TransfoXLModel]
def __init__(
self,
model: str = "bert-base-uncased",
layers: str = "all",
subtoken_pooling: str = "first",
layer_mean: bool = True,
fine_tune: bool = False,
allow_long_sentences: bool = True,
use_context: Union[bool, int] = False,
memory_effective_training: bool = True,
respect_document_boundaries: bool = True,
context_dropout: float = 0.5,
**kwargs
):
"""
Bidirectional transformer embeddings of words from various transformer architectures.
:param model: name of transformer model (see https://huggingface.co/transformers/pretrained_models.html for
options)
:param layers: string indicating which layers to take for embedding (-1 is topmost layer)
:param subtoken_pooling: how to get from token piece embeddings to token embedding. Either take the first
subtoken ('first'), the last subtoken ('last'), both first and last ('first_last') or a mean over all ('mean')
:param layer_mean: If True, uses a scalar mix of layers as embedding
:param fine_tune: If True, allows transformers to be fine-tuned during training
"""
super().__init__()
self.instance_parameters = self.get_instance_parameters(locals=locals())
# temporary fix to disable tokenizer parallelism warning
# (see https://stackoverflow.com/questions/62691279/how-to-disable-tokenizers-parallelism-true-false-warning)
import os
os.environ["TOKENIZERS_PARALLELISM"] = "false"
# do not print transformer warnings as these are confusing in this case
from transformers import logging
logging.set_verbosity_error()
# load tokenizer and transformer model
self.tokenizer: PreTrainedTokenizer = AutoTokenizer.from_pretrained(model, **kwargs)
if not 'config' in kwargs:
config = AutoConfig.from_pretrained(model, output_hidden_states=True, **kwargs)
self.model = AutoModel.from_pretrained(model, config=config)
else:
self.model = AutoModel.from_pretrained(None, **kwargs)
logging.set_verbosity_warning()
if type(self.model) not in self.NO_MAX_SEQ_LENGTH_MODELS:
self.allow_long_sentences = allow_long_sentences
self.truncate = True
self.max_subtokens_sequence_length = self.tokenizer.model_max_length
self.stride = self.tokenizer.model_max_length // 2 if allow_long_sentences else 0
else:
# in the end, these models don't need this configuration
self.allow_long_sentences = False
self.truncate = False
self.max_subtokens_sequence_length = None
self.stride = 0
self.use_lang_emb = hasattr(self.model, "use_lang_emb") and self.model.use_lang_emb
# model name
self.name = 'transformer-word-' + str(model)
self.base_model = str(model)
# whether to detach gradients on overlong sentences
self.memory_effective_training = memory_effective_training
# store whether to use context (and how much)
if type(use_context) == bool:
self.context_length: int = 64 if use_context else 0
if type(use_context) == int:
self.context_length: int = use_context
# dropout contexts
self.context_dropout = context_dropout
# if using context, can we cross document boundaries?
self.respect_document_boundaries = respect_document_boundaries
# send self to flair-device
self.to(flair.device)
# embedding parameters
if layers == 'all':
# send mini-token through to check how many layers the model has
hidden_states = self.model(torch.tensor([1], device=flair.device).unsqueeze(0))[-1]
self.layer_indexes = [int(x) for x in range(len(hidden_states))]
else:
self.layer_indexes = [int(x) for x in layers.split(",")]
self.pooling_operation = subtoken_pooling
self.layer_mean = layer_mean
self.fine_tune = fine_tune
self.static_embeddings = not self.fine_tune
# calculate embedding length
if not self.layer_mean:
length = len(self.layer_indexes) * self.model.config.hidden_size
else:
length = self.model.config.hidden_size
if self.pooling_operation == 'first_last': length *= 2
# return length
self.embedding_length_internal = length
self.special_tokens = []
# check if special tokens exist to circumvent error message
if self.tokenizer._bos_token:
self.special_tokens.append(self.tokenizer.bos_token)
if self.tokenizer._cls_token:
self.special_tokens.append(self.tokenizer.cls_token)
# most models have an intial BOS token, except for XLNet, T5 and GPT2
self.begin_offset = self._get_begin_offset_of_tokenizer(tokenizer=self.tokenizer)
# when initializing, embeddings are in eval mode by default
self.eval()
@staticmethod
def _get_begin_offset_of_tokenizer(tokenizer: PreTrainedTokenizer) -> int:
test_string = 'a'
tokens = tokenizer.encode(test_string)
for begin_offset, token in enumerate(tokens):
if tokenizer.decode([token]) == test_string or tokenizer.decode([token]) == tokenizer.unk_token:
break
return begin_offset
@staticmethod
def _remove_special_markup(text: str):
# remove special markup
text = re.sub('^Ġ', '', text) # RoBERTa models
text = re.sub('^##', '', text) # BERT models
text = re.sub('^▁', '', text) # XLNet models
text = re.sub('</w>$', '', text) # XLM models
return text
def _get_processed_token_text(self, token: Token) -> str:
pieces = self.tokenizer.tokenize(token.text)
token_text = ''
for piece in pieces:
token_text += self._remove_special_markup(piece)
token_text = token_text.lower()
return token_text
def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:
# we require encoded subtokenized sentences, the mapping to original tokens and the number of
# parts that each sentence produces
subtokenized_sentences = []
all_token_subtoken_lengths = []
# if we also use context, first expand sentence to include context
if self.context_length > 0:
# set context if not set already
previous_sentence = None
for sentence in sentences:
if sentence.is_context_set(): continue
sentence._previous_sentence = previous_sentence
sentence._next_sentence = None
if previous_sentence: previous_sentence._next_sentence = sentence
previous_sentence = sentence
original_sentences = []
expanded_sentences = []
context_offsets = []
for sentence in sentences:
# in case of contextualization, we must remember non-expanded sentence
original_sentence = sentence
original_sentences.append(original_sentence)
# create expanded sentence and remember context offsets
expanded_sentence, context_offset = self._expand_sentence_with_context(sentence)
expanded_sentences.append(expanded_sentence)
context_offsets.append(context_offset)
# overwrite sentence with expanded sentence
sentence = expanded_sentence
sentences = expanded_sentences
tokenized_sentences = []
for sentence in sentences:
# subtokenize the sentence
tokenized_string = sentence.to_tokenized_string()
# transformer specific tokenization
subtokenized_sentence = self.tokenizer.tokenize(tokenized_string)
# set zero embeddings for empty sentences and exclude
if len(subtokenized_sentence) == 0:
for token in sentence:
token.set_embedding(self.name, torch.zeros(self.embedding_length))
continue
# determine into how many subtokens each token is split
token_subtoken_lengths = self.reconstruct_tokens_from_subtokens(sentence, subtokenized_sentence)
# remember tokenized sentences and their subtokenization
tokenized_sentences.append(tokenized_string)
all_token_subtoken_lengths.append(token_subtoken_lengths)
# encode inputs
batch_encoding = self.tokenizer(tokenized_sentences,
max_length=self.max_subtokens_sequence_length,
stride=self.stride,
return_overflowing_tokens=self.allow_long_sentences,
truncation=self.truncate,
padding=True,
return_tensors='pt',
)
model_kwargs = {}
input_ids = batch_encoding['input_ids'].to(flair.device)
# Models such as FNet do not have an attention_mask
if 'attention_mask' in batch_encoding:
model_kwargs['attention_mask'] = batch_encoding['attention_mask'].to(flair.device)
# determine which sentence was split into how many parts
sentence_parts_lengths = torch.ones(len(tokenized_sentences), dtype=torch.int) if not self.allow_long_sentences \
else torch.unique(batch_encoding['overflow_to_sample_mapping'], return_counts=True, sorted=True)[1].tolist()
# set language IDs for XLM-style transformers
if self.use_lang_emb:
model_kwargs["langs"] = torch.zeros_like(input_ids, dtype=input_ids.dtype)
for s_id, sentence in enumerate(tokenized_sentences):
sequence_length = len(sentence)
lang_id = self.tokenizer.lang2id.get(sentences[s_id].get_language_code(), 0)
model_kwargs["langs"][s_id][:sequence_length] = lang_id
# put encoded batch through transformer model to get all hidden states of all encoder layers
hidden_states = self.model(input_ids, **model_kwargs)[-1]
# make the tuple a tensor; makes working with it easier.
hidden_states = torch.stack(hidden_states)
sentence_idx_offset = 0
# gradients are enabled if fine-tuning is enabled
gradient_context = torch.enable_grad() if (self.fine_tune and self.training) else torch.no_grad()
with gradient_context:
# iterate over all subtokenized sentences
for sentence_idx, (sentence, subtoken_lengths, nr_sentence_parts) in enumerate(
zip(sentences, all_token_subtoken_lengths, sentence_parts_lengths)):
sentence_hidden_state = hidden_states[:, sentence_idx + sentence_idx_offset, ...]
for i in range(1, nr_sentence_parts):
sentence_idx_offset += 1
remainder_sentence_hidden_state = hidden_states[:, sentence_idx + sentence_idx_offset, ...]
# remove stride_size//2 at end of sentence_hidden_state, and half at beginning of remainder,
# in order to get some context into the embeddings of these words.
# also don't include the embedding of the extra [CLS] and [SEP] tokens.
sentence_hidden_state = torch.cat((sentence_hidden_state[:, :-1 - self.stride // 2, :],
remainder_sentence_hidden_state[:, 1 + self.stride // 2:,
:]), 1)
subword_start_idx = self.begin_offset
# for each token, get embedding
for token_idx, (token, number_of_subtokens) in enumerate(zip(sentence, subtoken_lengths)):
# some tokens have no subtokens at all (if omitted by BERT tokenizer) so return zero vector
if number_of_subtokens == 0:
token.set_embedding(self.name, torch.zeros(self.embedding_length))
continue
subword_end_idx = subword_start_idx + number_of_subtokens
subtoken_embeddings: List[torch.FloatTensor] = []
# get states from all selected layers, aggregate with pooling operation
for layer in self.layer_indexes:
current_embeddings = sentence_hidden_state[layer][subword_start_idx:subword_end_idx]
if self.pooling_operation == "first":
final_embedding: torch.FloatTensor = current_embeddings[0]
if self.pooling_operation == "last":
final_embedding: torch.FloatTensor = current_embeddings[-1]
if self.pooling_operation == "first_last":
final_embedding: torch.Tensor = torch.cat(
[current_embeddings[0], current_embeddings[-1]])
if self.pooling_operation == "mean":
all_embeddings: List[torch.FloatTensor] = [
embedding.unsqueeze(0) for embedding in current_embeddings
]
final_embedding: torch.Tensor = torch.mean(torch.cat(all_embeddings, dim=0), dim=0)
subtoken_embeddings.append(final_embedding)
# use layer mean of embeddings if so selected
if self.layer_mean and len(self.layer_indexes) > 1:
sm_embeddings = torch.mean(torch.stack(subtoken_embeddings, dim=1), dim=1)
subtoken_embeddings = [sm_embeddings]
# set the extracted embedding for the token
token.set_embedding(self.name, torch.cat(subtoken_embeddings))
subword_start_idx += number_of_subtokens
# move embeddings from context back to original sentence (if using context)
if self.context_length > 0:
for original_sentence, expanded_sentence, context_offset in zip(original_sentences,
sentences,
context_offsets):
for token_idx, token in enumerate(original_sentence):
token.set_embedding(self.name,
expanded_sentence[token_idx + context_offset].get_embedding(self.name))
sentence = original_sentence
def _expand_sentence_with_context(self, sentence):
# remember original sentence
original_sentence = sentence
import random
expand_context = False if self.training and random.randint(1, 100) <= (self.context_dropout * 100) else True
left_context = ''
right_context = ''
if expand_context:
# get left context
while True:
sentence = sentence.previous_sentence()
if sentence is None: break
if self.respect_document_boundaries and sentence.is_document_boundary: break
left_context = sentence.to_tokenized_string() + ' ' + left_context
left_context = left_context.strip()
if len(left_context.split(" ")) > self.context_length:
left_context = " ".join(left_context.split(" ")[-self.context_length:])
break
original_sentence.left_context = left_context
sentence = original_sentence
# get right context
while True:
sentence = sentence.next_sentence()
if sentence is None: break
if self.respect_document_boundaries and sentence.is_document_boundary: break
right_context += ' ' + sentence.to_tokenized_string()
right_context = right_context.strip()
if len(right_context.split(" ")) > self.context_length:
right_context = " ".join(right_context.split(" ")[:self.context_length])
break
original_sentence.right_context = right_context
left_context_split = left_context.split(" ")
right_context_split = right_context.split(" ")
# empty contexts should not introduce whitespace tokens
if left_context_split == [""]: left_context_split = []
if right_context_split == [""]: right_context_split = []
# make expanded sentence
expanded_sentence = Sentence()
expanded_sentence.tokens = [Token(token) for token in left_context_split +
original_sentence.to_tokenized_string().split(" ") +
right_context_split]
context_length = len(left_context_split)
return expanded_sentence, context_length
def reconstruct_tokens_from_subtokens(self, sentence, subtokens):
word_iterator = iter(sentence)
token = next(word_iterator)
token_text = self._get_processed_token_text(token)
token_subtoken_lengths = []
reconstructed_token = ''
subtoken_count = 0
# iterate over subtokens and reconstruct tokens
for subtoken_id, subtoken in enumerate(subtokens):
# remove special markup
subtoken = self._remove_special_markup(subtoken)
# TODO check if this is necessary is this method is called before prepare_for_model
# check if reconstructed token is special begin token ([CLS] or similar)
if subtoken in self.special_tokens and subtoken_id == 0:
continue
# some BERT tokenizers somehow omit words - in such cases skip to next token
if subtoken_count == 0 and not token_text.startswith(subtoken.lower()):
while True:
token_subtoken_lengths.append(0)
token = next(word_iterator)
token_text = self._get_processed_token_text(token)
if token_text.startswith(subtoken.lower()): break
subtoken_count += 1
# append subtoken to reconstruct token
reconstructed_token = reconstructed_token + subtoken
# check if reconstructed token is the same as current token
if reconstructed_token.lower() == token_text:
# if so, add subtoken count
token_subtoken_lengths.append(subtoken_count)
# reset subtoken count and reconstructed token
reconstructed_token = ''
subtoken_count = 0
# break from loop if all tokens are accounted for
if len(token_subtoken_lengths) < len(sentence):
token = next(word_iterator)
token_text = self._get_processed_token_text(token)
else:
break
# if tokens are unaccounted for
while len(token_subtoken_lengths) < len(sentence) and len(token.text) == 1:
token_subtoken_lengths.append(0)
if len(token_subtoken_lengths) == len(sentence): break
token = next(word_iterator)
# check if all tokens were matched to subtokens
if token != sentence[-1]:
log.error(f"Tokenization MISMATCH in sentence '{sentence.to_tokenized_string()}'")
log.error(f"Last matched: '{token}'")
log.error(f"Last sentence: '{sentence[-1]}'")
log.error(f"subtokenized: '{subtokens}'")
return token_subtoken_lengths
@property
def embedding_length(self) -> int:
if "embedding_length_internal" in self.__dict__.keys():
return self.embedding_length_internal
# """Returns the length of the embedding vector."""
if not self.layer_mean:
length = len(self.layer_indexes) * self.model.config.hidden_size
else:
length = self.model.config.hidden_size
if self.pooling_operation == 'first_last': length *= 2
self.__embedding_length = length
return length
def __getstate__(self):
# special handling for serializing transformer models
config_state_dict = self.model.config.__dict__
model_state_dict = self.model.state_dict()
if not hasattr(self, "base_model_name"): self.base_model_name = self.name.split('transformer-word-')[-1]
# serialize the transformer models and the constructor arguments (but nothing else)
model_state = {
"config_state_dict": config_state_dict,
"model_state_dict": model_state_dict,
"embedding_length_internal": self.embedding_length,
"base_model_name": self.base_model_name,
"name": self.name,
"layer_indexes": self.layer_indexes,
"subtoken_pooling": self.pooling_operation,
"context_length": self.context_length,
"layer_mean": self.layer_mean,
"fine_tune": self.fine_tune,
"allow_long_sentences": self.allow_long_sentences,
"memory_effective_training": self.memory_effective_training,
"respect_document_boundaries": self.respect_document_boundaries,
"context_dropout": self.context_dropout,
}
return model_state
def __setstate__(self, d):
self.__dict__ = d
# necessary for reverse compatibility with Flair <= 0.7
if 'use_scalar_mix' in self.__dict__.keys():
self.__dict__['layer_mean'] = d['use_scalar_mix']
if not 'memory_effective_training' in self.__dict__.keys():
self.__dict__['memory_effective_training'] = True
if 'pooling_operation' in self.__dict__.keys():
self.__dict__['subtoken_pooling'] = d['pooling_operation']
if not 'context_length' in self.__dict__.keys():
self.__dict__['context_length'] = 0
if 'use_context' in self.__dict__.keys():
self.__dict__['context_length'] = 64 if self.__dict__['use_context'] == True else 0
if not 'context_dropout' in self.__dict__.keys():
self.__dict__['context_dropout'] = 0.5
if not 'respect_document_boundaries' in self.__dict__.keys():
self.__dict__['respect_document_boundaries'] = True
if not 'memory_effective_training' in self.__dict__.keys():
self.__dict__['memory_effective_training'] = True
if not 'base_model_name' in self.__dict__.keys():
self.__dict__['base_model_name'] = self.__dict__['name'].split('transformer-word-')[-1]
# special handling for deserializing transformer models
if "config_state_dict" in d:
# load transformer model
model_type = d["config_state_dict"]["model_type"] if "model_type" in d["config_state_dict"] else "bert"
config_class = CONFIG_MAPPING[model_type]
loaded_config = config_class.from_dict(d["config_state_dict"])
# constructor arguments
layers = ','.join([str(idx) for idx in self.__dict__['layer_indexes']])
# re-initialize transformer word embeddings with constructor arguments
embedding = TransformerWordEmbeddings(
model=self.__dict__['base_model_name'],
layers=layers,
subtoken_pooling=self.__dict__['subtoken_pooling'],
use_context=self.__dict__['context_length'],
layer_mean=self.__dict__['layer_mean'],
fine_tune=self.__dict__['fine_tune'],
allow_long_sentences=self.__dict__['allow_long_sentences'],
respect_document_boundaries=self.__dict__['respect_document_boundaries'],
memory_effective_training=self.__dict__['memory_effective_training'],
context_dropout=self.__dict__['context_dropout'],
config=loaded_config,
state_dict=d["model_state_dict"],
)
# I have no idea why this is necessary, but otherwise it doesn't work
for key in embedding.__dict__.keys():
self.__dict__[key] = embedding.__dict__[key]
else:
# reload tokenizer to get around serialization issues
model_name = self.__dict__['name'].split('transformer-word-')[-1]
try:
tokenizer = AutoTokenizer.from_pretrained(model_name)
except:
pass
self.tokenizer = tokenizer
class FastTextEmbeddings(TokenEmbeddings):
"""FastText Embeddings with oov functionality"""
def __init__(self, embeddings: str, use_local: bool = True, field: str = None):
"""
Initializes fasttext word embeddings. Constructor downloads required embedding file and stores in cache
if use_local is False.
:param embeddings: path to your embeddings '.bin' file
:param use_local: set this to False if you are using embeddings from a remote source
"""
self.instance_parameters = self.get_instance_parameters(locals=locals())
cache_dir = Path("embeddings")
if use_local:
if not Path(embeddings).exists():
raise ValueError(
f'The given embeddings "{embeddings}" is not available or is not a valid path.'
)
else:
embeddings = cached_path(f"{embeddings}", cache_dir=cache_dir)
self.embeddings = embeddings
self.name: str = str(embeddings)
self.static_embeddings = True
self.precomputed_word_embeddings = gensim.models.FastText.load_fasttext_format(
str(embeddings)
)
self.__embedding_length: int = self.precomputed_word_embeddings.vector_size
self.field = field
super().__init__()
@property
def embedding_length(self) -> int:
return self.__embedding_length
@instance_lru_cache(maxsize=10000, typed=False)
def get_cached_vec(self, word: str) -> torch.Tensor:
try:
word_embedding = self.precomputed_word_embeddings[word]
except:
word_embedding = np.zeros(self.embedding_length, dtype="float")
word_embedding = torch.tensor(
word_embedding.tolist(), device=flair.device, dtype=torch.float
)
return word_embedding
def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:
for i, sentence in enumerate(sentences):
for token, token_idx in zip(sentence.tokens, range(len(sentence.tokens))):
if "field" not in self.__dict__ or self.field is None:
word = token.text
else:
word = token.get_tag(self.field).value
word_embedding = self.get_cached_vec(word)
token.set_embedding(self.name, word_embedding)
return sentences
def __str__(self):
return self.name
def extra_repr(self):
return f"'{self.embeddings}'"
class OneHotEmbeddings(TokenEmbeddings):
"""One-hot encoded embeddings. """
def __init__(
self,
corpus: Corpus,
field: str = "text",
embedding_length: int = 300,
min_freq: int = 3,
):
"""
Initializes one-hot encoded word embeddings and a trainable embedding layer
:param corpus: you need to pass a Corpus in order to construct the vocabulary
:param field: by default, the 'text' of tokens is embedded, but you can also embed tags such as 'pos'
:param embedding_length: dimensionality of the trainable embedding layer
:param min_freq: minimum frequency of a word to become part of the vocabulary
"""
super().__init__()
self.name = "one-hot"
self.static_embeddings = False
self.min_freq = min_freq
self.field = field
self.instance_parameters = self.get_instance_parameters(locals=locals())
tokens = list(map((lambda s: s.tokens), corpus.train))
tokens = [token for sublist in tokens for token in sublist]
if field == "text":
most_common = Counter(list(map((lambda t: t.text), tokens))).most_common()
else:
most_common = Counter(
list(map((lambda t: t.get_tag(field).value), tokens))
).most_common()
tokens = []
for token, freq in most_common:
if freq < min_freq:
break
tokens.append(token)
self.vocab_dictionary: Dictionary = Dictionary()
for token in tokens:
self.vocab_dictionary.add_item(token)
# max_tokens = 500
self.__embedding_length = embedding_length
print(self.vocab_dictionary.idx2item)
print(f"vocabulary size of {len(self.vocab_dictionary)}")
# model architecture
self.embedding_layer = torch.nn.Embedding(
len(self.vocab_dictionary), self.__embedding_length
)
torch.nn.init.xavier_uniform_(self.embedding_layer.weight)
self.to(flair.device)
@property
def embedding_length(self) -> int:
return self.__embedding_length
def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:
one_hot_sentences = []
for i, sentence in enumerate(sentences):
if self.field == "text":
context_idxs = [
self.vocab_dictionary.get_idx_for_item(t.text)
for t in sentence.tokens
]
else:
context_idxs = [
self.vocab_dictionary.get_idx_for_item(t.get_tag(self.field).value)
for t in sentence.tokens
]
one_hot_sentences.extend(context_idxs)
one_hot_sentences = torch.tensor(one_hot_sentences, dtype=torch.long).to(
flair.device
)
embedded = self.embedding_layer.forward(one_hot_sentences)
index = 0
for sentence in sentences:
for token in sentence:
embedding = embedded[index]
token.set_embedding(self.name, embedding)
index += 1
return sentences
def __str__(self):
return self.name
def extra_repr(self):
return "min_freq={}".format(self.min_freq)
class HashEmbeddings(TokenEmbeddings):
"""Standard embeddings with Hashing Trick."""
def __init__(
self, num_embeddings: int = 1000, embedding_length: int = 300, hash_method="md5"
):
super().__init__()
self.name = "hash"
self.static_embeddings = False
self.instance_parameters = self.get_instance_parameters(locals=locals())
self.__num_embeddings = num_embeddings
self.__embedding_length = embedding_length
self.__hash_method = hash_method
# model architecture
self.embedding_layer = torch.nn.Embedding(
self.__num_embeddings, self.__embedding_length
)
torch.nn.init.xavier_uniform_(self.embedding_layer.weight)
self.to(flair.device)
@property
def num_embeddings(self) -> int:
return self.__num_embeddings
@property
def embedding_length(self) -> int:
return self.__embedding_length
def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:
def get_idx_for_item(text):
hash_function = hashlib.new(self.__hash_method)
hash_function.update(bytes(str(text), "utf-8"))
return int(hash_function.hexdigest(), 16) % self.__num_embeddings
hash_sentences = []
for i, sentence in enumerate(sentences):
context_idxs = [get_idx_for_item(t.text) for t in sentence.tokens]
hash_sentences.extend(context_idxs)
hash_sentences = torch.tensor(hash_sentences, dtype=torch.long).to(flair.device)
embedded = self.embedding_layer.forward(hash_sentences)
index = 0
for sentence in sentences:
for token in sentence:
embedding = embedded[index]
token.set_embedding(self.name, embedding)
index += 1
return sentences
def __str__(self):
return self.name
class MuseCrosslingualEmbeddings(TokenEmbeddings):
def __init__(self, ):
self.name: str = f"muse-crosslingual"
self.static_embeddings = True
self.__embedding_length: int = 300
self.language_embeddings = {}
super().__init__()
@instance_lru_cache(maxsize=10000, typed=False)
def get_cached_vec(self, language_code: str, word: str) -> torch.Tensor:
current_embedding_model = self.language_embeddings[language_code]
if word in current_embedding_model:
word_embedding = current_embedding_model[word]
elif word.lower() in current_embedding_model:
word_embedding = current_embedding_model[word.lower()]
elif re.sub(r"\d", "#", word.lower()) in current_embedding_model:
word_embedding = current_embedding_model[re.sub(r"\d", "#", word.lower())]
elif re.sub(r"\d", "0", word.lower()) in current_embedding_model:
word_embedding = current_embedding_model[re.sub(r"\d", "0", word.lower())]
else:
word_embedding = np.zeros(self.embedding_length, dtype="float")
word_embedding = torch.tensor(
word_embedding, device=flair.device, dtype=torch.float
)
return word_embedding
def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:
for i, sentence in enumerate(sentences):
language_code = sentence.get_language_code()
supported = [
"en",
"de",
"bg",
"ca",
"hr",
"cs",
"da",
"nl",
"et",
"fi",
"fr",
"el",
"he",
"hu",
"id",
"it",
"mk",
"no",
"pl",
"pt",
"ro",
"ru",
"sk",
]
if language_code not in supported:
language_code = "en"
if language_code not in self.language_embeddings:
log.info(f"Loading up MUSE embeddings for '{language_code}'!")
# download if necessary
hu_path: str = "https://flair.informatik.hu-berlin.de/resources/embeddings/muse"
cache_dir = Path("embeddings") / "MUSE"
cached_path(
f"{hu_path}/muse.{language_code}.vec.gensim.vectors.npy",
cache_dir=cache_dir,
)
embeddings_file = cached_path(
f"{hu_path}/muse.{language_code}.vec.gensim", cache_dir=cache_dir
)
# load the model
self.language_embeddings[
language_code
] = gensim.models.KeyedVectors.load(str(embeddings_file))
for token, token_idx in zip(sentence.tokens, range(len(sentence.tokens))):
if "field" not in self.__dict__ or self.field is None:
word = token.text
else:
word = token.get_tag(self.field).value
word_embedding = self.get_cached_vec(
language_code=language_code, word=word
)
token.set_embedding(self.name, word_embedding)
return sentences
@property
def embedding_length(self) -> int:
return self.__embedding_length
def __str__(self):
return self.name
# TODO: keep for backwards compatibility, but remove in future
class BPEmbSerializable(BPEmb):
def __getstate__(self):
state = self.__dict__.copy()
# save the sentence piece model as binary file (not as path which may change)
state["spm_model_binary"] = open(self.model_file, mode="rb").read()
state["spm"] = None
return state
def __setstate__(self, state):
from bpemb.util import sentencepiece_load
model_file = self.model_tpl.format(lang=state["lang"], vs=state["vs"])
self.__dict__ = state
# write out the binary sentence piece model into the expected directory
self.cache_dir: Path = flair.cache_root / "embeddings"
if "spm_model_binary" in self.__dict__:
# if the model was saved as binary and it is not found on disk, write to appropriate path
if not os.path.exists(self.cache_dir / state["lang"]):
os.makedirs(self.cache_dir / state["lang"])
self.model_file = self.cache_dir / model_file
with open(self.model_file, "wb") as out:
out.write(self.__dict__["spm_model_binary"])
else:
# otherwise, use normal process and potentially trigger another download
self.model_file = self._load_file(model_file)
# once the modes if there, load it with sentence piece
state["spm"] = sentencepiece_load(self.model_file)
class BytePairEmbeddings(TokenEmbeddings):
def __init__(
self,
language: str = None,
dim: int = 50,
syllables: int = 100000,
cache_dir=None,
model_file_path: Path = None,
embedding_file_path: Path = None,
**kwargs,
):
"""
Initializes BP embeddings. Constructor downloads required files if not there.
"""
self.instance_parameters = self.get_instance_parameters(locals=locals())
if not cache_dir:
cache_dir = flair.cache_root / "embeddings"
if language:
self.name: str = f"bpe-{language}-{syllables}-{dim}"
else:
assert (
model_file_path is not None and embedding_file_path is not None
), "Need to specify model_file_path and embedding_file_path if no language is given in BytePairEmbeddings(...)"
dim = None
self.embedder = BPEmbSerializable(
lang=language,
vs=syllables,
dim=dim,
cache_dir=cache_dir,
model_file=model_file_path,
emb_file=embedding_file_path,
**kwargs,
)
if not language:
self.name: str = f"bpe-custom-{self.embedder.vs}-{self.embedder.dim}"
self.static_embeddings = True
self.__embedding_length: int = self.embedder.emb.vector_size * 2
super().__init__()
@property
def embedding_length(self) -> int:
return self.__embedding_length
def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:
for i, sentence in enumerate(sentences):
for token, token_idx in zip(sentence.tokens, range(len(sentence.tokens))):
if "field" not in self.__dict__ or self.field is None:
word = token.text
else:
word = token.get_tag(self.field).value
if word.strip() == "":
# empty words get no embedding
token.set_embedding(
self.name, torch.zeros(self.embedding_length, dtype=torch.float)
)
else:
# all other words get embedded
embeddings = self.embedder.embed(word.lower())
embedding = np.concatenate(
(embeddings[0], embeddings[len(embeddings) - 1])
)
token.set_embedding(
self.name, torch.tensor(embedding, dtype=torch.float)
)
return sentences
def __str__(self):
return self.name
def extra_repr(self):
return "model={}".format(self.name)
class ELMoEmbeddings(TokenEmbeddings):
"""Contextual word embeddings using word-level LM, as proposed in Peters et al., 2018.
ELMo word vectors can be constructed by combining layers in different ways.
Default is to concatene the top 3 layers in the LM."""
def __init__(
self, model: str = "original", options_file: str = None, weight_file: str = None,
embedding_mode: str = "all"
):
super().__init__()
self.instance_parameters = self.get_instance_parameters(locals=locals())
try:
import allennlp.commands.elmo
except ModuleNotFoundError:
log.warning("-" * 100)
log.warning('ATTENTION! The library "allennlp" is not installed!')
log.warning(
'To use ELMoEmbeddings, please first install with "pip install allennlp==0.9.0"'
)
log.warning("-" * 100)
pass
assert embedding_mode in ["all", "top", "average"]
self.name = f"elmo-{model}-{embedding_mode}"
self.static_embeddings = True
if not options_file or not weight_file:
# the default model for ELMo is the 'original' model, which is very large
options_file = allennlp.commands.elmo.DEFAULT_OPTIONS_FILE
weight_file = allennlp.commands.elmo.DEFAULT_WEIGHT_FILE
# alternatively, a small, medium or portuguese model can be selected by passing the appropriate mode name
if model == "small":
options_file = "https://s3-us-west-2.amazonaws.com/allennlp/models/elmo/2x1024_128_2048cnn_1xhighway/elmo_2x1024_128_2048cnn_1xhighway_options.json"
weight_file = "https://s3-us-west-2.amazonaws.com/allennlp/models/elmo/2x1024_128_2048cnn_1xhighway/elmo_2x1024_128_2048cnn_1xhighway_weights.hdf5"
if model == "medium":
options_file = "https://s3-us-west-2.amazonaws.com/allennlp/models/elmo/2x2048_256_2048cnn_1xhighway/elmo_2x2048_256_2048cnn_1xhighway_options.json"
weight_file = "https://s3-us-west-2.amazonaws.com/allennlp/models/elmo/2x2048_256_2048cnn_1xhighway/elmo_2x2048_256_2048cnn_1xhighway_weights.hdf5"
if model in ["large", "5.5B"]:
options_file = "https://s3-us-west-2.amazonaws.com/allennlp/models/elmo/2x4096_512_2048cnn_2xhighway_5.5B/elmo_2x4096_512_2048cnn_2xhighway_5.5B_options.json"
weight_file = "https://s3-us-west-2.amazonaws.com/allennlp/models/elmo/2x4096_512_2048cnn_2xhighway_5.5B/elmo_2x4096_512_2048cnn_2xhighway_5.5B_weights.hdf5"
if model == "pt" or model == "portuguese":
options_file = "https://s3-us-west-2.amazonaws.com/allennlp/models/elmo/contributed/pt/elmo_pt_options.json"
weight_file = "https://s3-us-west-2.amazonaws.com/allennlp/models/elmo/contributed/pt/elmo_pt_weights.hdf5"
if model == "pubmed":
options_file = "https://s3-us-west-2.amazonaws.com/allennlp/models/elmo/contributed/pubmed/elmo_2x4096_512_2048cnn_2xhighway_options.json"
weight_file = "https://s3-us-west-2.amazonaws.com/allennlp/models/elmo/contributed/pubmed/elmo_2x4096_512_2048cnn_2xhighway_weights_PubMed_only.hdf5"
if embedding_mode == "all":
self.embedding_mode_fn = self.use_layers_all
elif embedding_mode == "top":
self.embedding_mode_fn = self.use_layers_top
elif embedding_mode == "average":
self.embedding_mode_fn = self.use_layers_average
# put on Cuda if available
from flair import device
if re.fullmatch(r"cuda:[0-9]+", str(device)):
cuda_device = int(str(device).split(":")[-1])
elif str(device) == "cpu":
cuda_device = -1
else:
cuda_device = 0
self.ee = allennlp.commands.elmo.ElmoEmbedder(
options_file=options_file, weight_file=weight_file, cuda_device=cuda_device
)
# embed a dummy sentence to determine embedding_length
dummy_sentence: Sentence = Sentence()
dummy_sentence.add_token(Token("hello"))
embedded_dummy = self.embed(dummy_sentence)
self.__embedding_length: int = len(
embedded_dummy[0].get_token(1).get_embedding()
)
@property
def embedding_length(self) -> int:
return self.__embedding_length
def use_layers_all(self, x):
return torch.cat(x, 0)
def use_layers_top(self, x):
return x[-1]
def use_layers_average(self, x):
return torch.mean(torch.stack(x), 0)
def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:
# ELMoEmbeddings before Release 0.5 did not set self.embedding_mode_fn
if not getattr(self, "embedding_mode_fn", None):
self.embedding_mode_fn = self.use_layers_all
sentence_words: List[List[str]] = []
for sentence in sentences:
sentence_words.append([token.text for token in sentence])
embeddings = self.ee.embed_batch(sentence_words)
for i, sentence in enumerate(sentences):
sentence_embeddings = embeddings[i]
for token, token_idx in zip(sentence.tokens, range(len(sentence.tokens))):
elmo_embedding_layers = [
torch.FloatTensor(sentence_embeddings[0, token_idx, :]),
torch.FloatTensor(sentence_embeddings[1, token_idx, :]),
torch.FloatTensor(sentence_embeddings[2, token_idx, :])
]
word_embedding = self.embedding_mode_fn(elmo_embedding_layers)
token.set_embedding(self.name, word_embedding)
return sentences
def extra_repr(self):
return "model={}".format(self.name)
def __str__(self):
return self.name
def __setstate__(self, state):
self.__dict__ = state
if re.fullmatch(r"cuda:[0-9]+", str(flair.device)):
cuda_device = int(str(flair.device).split(":")[-1])
elif str(flair.device) == "cpu":
cuda_device = -1
else:
cuda_device = 0
self.ee.cuda_device = cuda_device
self.ee.elmo_bilm.to(device=flair.device)
self.ee.elmo_bilm._elmo_lstm._states = tuple(
[state.to(flair.device) for state in self.ee.elmo_bilm._elmo_lstm._states])
class NILCEmbeddings(WordEmbeddings):
def __init__(self, embeddings: str, model: str = "skip", size: int = 100):
"""
Initializes portuguese classic word embeddings trained by NILC Lab (http://www.nilc.icmc.usp.br/embeddings).
Constructor downloads required files if not there.
:param embeddings: one of: 'fasttext', 'glove', 'wang2vec' or 'word2vec'
:param model: one of: 'skip' or 'cbow'. This is not applicable to glove.
:param size: one of: 50, 100, 300, 600 or 1000.
"""
self.instance_parameters = self.get_instance_parameters(locals=locals())
base_path = "http://143.107.183.175:22980/download.php?file=embeddings/"
cache_dir = Path("embeddings") / embeddings.lower()
# GLOVE embeddings
if embeddings.lower() == "glove":
cached_path(
f"{base_path}{embeddings}/{embeddings}_s{size}.zip", cache_dir=cache_dir
)
embeddings = cached_path(
f"{base_path}{embeddings}/{embeddings}_s{size}.zip", cache_dir=cache_dir
)
elif embeddings.lower() in ["fasttext", "wang2vec", "word2vec"]:
cached_path(
f"{base_path}{embeddings}/{model}_s{size}.zip", cache_dir=cache_dir
)
embeddings = cached_path(
f"{base_path}{embeddings}/{model}_s{size}.zip", cache_dir=cache_dir
)
elif not Path(embeddings).exists():
raise ValueError(
f'The given embeddings "{embeddings}" is not available or is not a valid path.'
)
self.name: str = str(embeddings)
self.static_embeddings = True
log.info("Reading embeddings from %s" % embeddings)
self.precomputed_word_embeddings = gensim.models.KeyedVectors.load_word2vec_format(
open_inside_zip(str(embeddings), cache_dir=cache_dir)
)
self.__embedding_length: int = self.precomputed_word_embeddings.vector_size
super(TokenEmbeddings, self).__init__()
@property
def embedding_length(self) -> int:
return self.__embedding_length
def __str__(self):
return self.name
def replace_with_language_code(string: str):
string = string.replace("arabic-", "ar-")
string = string.replace("basque-", "eu-")
string = string.replace("bulgarian-", "bg-")
string = string.replace("croatian-", "hr-")
string = string.replace("czech-", "cs-")
string = string.replace("danish-", "da-")
string = string.replace("dutch-", "nl-")
string = string.replace("farsi-", "fa-")
string = string.replace("persian-", "fa-")
string = string.replace("finnish-", "fi-")
string = string.replace("french-", "fr-")
string = string.replace("german-", "de-")
string = string.replace("hebrew-", "he-")
string = string.replace("hindi-", "hi-")
string = string.replace("indonesian-", "id-")
string = string.replace("italian-", "it-")
string = string.replace("japanese-", "ja-")
string = string.replace("norwegian-", "no")
string = string.replace("polish-", "pl-")
string = string.replace("portuguese-", "pt-")
string = string.replace("slovenian-", "sl-")
string = string.replace("spanish-", "es-")
string = string.replace("swedish-", "sv-")
return string
| [
"torch.cat",
"torch.nn.LSTM",
"torch.stack",
"torch.enable_grad",
"torch.nn.utils.rnn.pack_padded_sequence",
"torch.nn.LayerNorm",
"torch.FloatTensor",
"torch.tensor",
"torch.zeros_like",
"torch.zeros",
"torch.device",
"torch.min",
"torch.max",
"torch.unique",
"torch.no_grad",
"torch.nn.init.xavier_uniform_",
"torch.add",
"torch.nn.utils.rnn.pad_packed_sequence",
"torch.nn.Embedding"
] | 1.5.0 | seyyaw/flair | 46569a611c6acff739e3c04e78d64f824011e483 |
1.8 | from __future__ import print_function
import argparse
import os
import pickle
from sys import stdout as sysout
import torch
import torchvision
import src.archs as archs
from src.utils.cluster.cluster_eval import cluster_subheads_eval
from src.utils.cluster.data import _create_mapping_loader
from src.utils.cluster.transforms import greyscale_make_transforms
# Reassess IID+ models by doing the mapping_assign with smaller numbers of
# labelled images
# to reassess model as originally done, set new_assign_set_szs_pc to [1.0]
parser = argparse.ArgumentParser()
parser.add_argument("--model_ind", type=int, required=True)
parser.add_argument("--new_assign_set_szs_pc", type=float, nargs="+",
default=[1.0]) # 0.01, 0.02, 0.05, 0.1, 0.5
parser.add_argument("--out_root", type=str,
default="/scratch/shared/slow/xuji/iid_private")
parser.add_argument("--use_eval", default=False, action="store_true")
parser.add_argument("--dont_save", default=False, action="store_true")
parser.add_argument("--rewrite", default=False, action="store_true")
config = parser.parse_args()
if config.rewrite:
assert (not config.dont_save)
new_assign_set_szs_pc = config.new_assign_set_szs_pc
print("given new_assign_set_szs_pc: %s" % new_assign_set_szs_pc)
sysout.flush()
given_config = config
reloaded_config_path = os.path.join(given_config.out_root,
str(given_config.model_ind),
"config.pickle")
print("Loading restarting config from: %s" % reloaded_config_path)
with open(reloaded_config_path, "rb") as config_f:
config = pickle.load(config_f)
assert (config.model_ind == given_config.model_ind)
assert (config.mode == "IID+")
target_transform = None
assert ("MNIST" == config.dataset)
dataset_class = torchvision.datasets.MNIST
assert (config.train_partitions == [True])
assert (config.mapping_assignment_partitions == [True])
assert (config.mapping_test_partitions == [False])
# append to old results
if not hasattr(config, "assign_set_szs_pc_acc") or given_config.rewrite:
print("resetting config.assign_set_szs_pc_acc to empty")
config.assign_set_szs_pc_acc = {}
for pc in new_assign_set_szs_pc:
print("doing %f" % pc)
sysout.flush()
tf1, tf2, tf3 = greyscale_make_transforms(config)
mapping_assignment_dataloader = \
_create_mapping_loader(config, dataset_class, tf3,
partitions=config.mapping_assignment_partitions,
truncate=True, truncate_pc=pc)
mapping_test_dataloader = \
_create_mapping_loader(config, dataset_class, tf3,
partitions=config.mapping_test_partitions)
print("num assign batches: %d" % len(mapping_assignment_dataloader))
num_imgs = len(mapping_assignment_dataloader.dataset)
print("num imgs in assign dataset: %d" % num_imgs)
# networks and optimisers
# ------------------------------------------------------
net = archs.__dict__[config.arch](config)
model_path = os.path.join(config.out_dir, "best_net.pytorch")
net.load_state_dict(
torch.load(model_path, map_location=lambda storage, loc: storage))
net.cuda()
if given_config.use_eval:
print("doing eval mode")
net.eval()
net = torch.nn.DataParallel(net)
acc, nmi, ari, _ = cluster_subheads_eval(config, net,
mapping_assignment_dataloader=mapping_assignment_dataloader,
mapping_test_dataloader=mapping_test_dataloader,
sobel=False)
config.assign_set_szs_pc_acc[str(pc)] = (num_imgs, acc)
print("for model %d assign set sz pc %f, got %f, compared to best stored "
"acc %f" % (config.model_ind, pc, acc, max(config.epoch_acc)))
print(config.assign_set_szs_pc_acc)
sysout.flush()
if not given_config.dont_save:
print("writing to new config")
# store to config
with open(os.path.join(config.out_dir, "config.pickle"),
"wb") as outfile:
pickle.dump(config, outfile)
with open(os.path.join(config.out_dir, "config.txt"),
"w") as text_file:
text_file.write("%s" % config)
| [
"torch.nn.DataParallel",
"torch.load"
] | 1.8.0 | MihaiAnton/tudelft-iic-reproduction | 342247c444aa1f8b09ea18e3ff9135258d599373 |
1.8 | from __future__ import print_function
from src.utils.segmentation.general import set_segmentation_input_channels
from src.utils.segmentation.data import segmentation_create_dataloaders
from src.utils.segmentation.IID_losses import IID_segmentation_loss, \
IID_segmentation_loss_uncollapsed
from src.utils.segmentation.segmentation_eval import \
segmentation_eval
from src.utils.cluster.transforms import sobel_process
from src.utils.cluster.general import config_to_str, get_opt, update_lr, nice
import src.archs as archs
import matplotlib.pyplot as plt
import argparse
import os
import pickle
import sys
from datetime import datetime
import matplotlib
import numpy as np
import torch
matplotlib.use('Agg')
"""
Semisupervised overclustering for segmentation ("IIC+" = "IID+")
Note network is trained entirely unsupervised, as labels are found for
evaluation only and do not affect the network.
Train and test script.
Network has one output head only.
"""
# Options ----------------------------------------------------------------------
parser = argparse.ArgumentParser()
parser.add_argument("--model_ind", type=int, required=True)
parser.add_argument("--arch", type=str, required=True)
parser.add_argument("--opt", type=str, default="Adam")
parser.add_argument("--mode", type=str, default="IID+")
parser.add_argument("--dataset", type=str, required=True)
parser.add_argument("--dataset_root", type=str, required=True)
parser.add_argument("--use_coarse_labels", default=False,
action="store_true") # COCO, Potsdam
parser.add_argument("--fine_to_coarse_dict", type=str, # COCO
default="/users/xuji/iid/iid_private/src/datasets"
"/segmentation/util/out/fine_to_coarse_dict.pickle")
parser.add_argument("--include_things_labels", default=False,
action="store_true") # COCO
parser.add_argument("--incl_animal_things", default=False,
action="store_true") # COCO
parser.add_argument("--coco_164k_curated_version",
type=int, default=-1) # COCO
parser.add_argument("--gt_k", type=int, required=True)
parser.add_argument("--output_k", type=int, required=True)
parser.add_argument("--lamb", type=float, default=1.0)
parser.add_argument("--lr", type=float, default=0.01)
parser.add_argument("--lr_schedule", type=int, nargs="+", default=[])
parser.add_argument("--lr_mult", type=float, default=0.1)
parser.add_argument("--use_uncollapsed_loss", default=False,
action="store_true")
parser.add_argument("--mask_input", default=False, action="store_true")
parser.add_argument("--num_epochs", type=int, default=1000)
parser.add_argument("--batch_sz", type=int, required=True) # num pairs
parser.add_argument("--num_dataloaders", type=int, default=3)
parser.add_argument("--num_sub_heads", type=int, default=5)
parser.add_argument("--out_root", type=str,
default="/scratch/shared/slow/xuji/iid_private")
parser.add_argument("--restart", default=False, action="store_true")
parser.add_argument("--save_freq", type=int, default=5)
parser.add_argument("--test_code", default=False, action="store_true")
parser.add_argument("--batchnorm_track", default=False, action="store_true")
# data transforms
parser.add_argument("--no_sobel", default=False, action="store_true")
parser.add_argument("--include_rgb", default=False, action="store_true")
parser.add_argument("--pre_scale_all", default=False,
action="store_true")
parser.add_argument("--pre_scale_factor", type=float, default=0.5)
parser.add_argument("--use_random_scale", default=False, action="store_true")
parser.add_argument("--scale_min", type=float, default=0.6)
parser.add_argument("--scale_max", type=float, default=1.4)
parser.add_argument("--input_sz", type=int, default=161) # half of kazuto1011
# transforms we learn invariance to
parser.add_argument("--jitter_brightness", type=float, default=0.4)
parser.add_argument("--jitter_contrast", type=float, default=0.4)
parser.add_argument("--jitter_saturation", type=float, default=0.4)
parser.add_argument("--jitter_hue", type=float, default=0.125)
parser.add_argument("--flip_p", type=float, default=0.5)
parser.add_argument("--use_random_affine", default=False,
action="store_true") # new
parser.add_argument("--aff_min_rot", type=float, default=-30.) # degrees
parser.add_argument("--aff_max_rot", type=float, default=30.) # degrees
parser.add_argument("--aff_min_shear", type=float, default=-10.) # degrees
parser.add_argument("--aff_max_shear", type=float, default=10.) # degrees
parser.add_argument("--aff_min_scale", type=float, default=0.8)
parser.add_argument("--aff_max_scale", type=float, default=1.2)
# local spatial invariance. Dense means done convolutionally. Sparse means done
# once in data augmentation phase. These are not mutually exclusive
parser.add_argument("--half_T_side_dense", type=int, default=0)
parser.add_argument("--half_T_side_sparse_min", type=int, default=0)
parser.add_argument("--half_T_side_sparse_max", type=int, default=0)
config = parser.parse_args()
# Setup ------------------------------------------------------------------------
config.out_dir = os.path.join(config.out_root, str(config.model_ind))
config.dataloader_batch_sz = int(config.batch_sz / config.num_dataloaders)
assert (config.mode == "IID+")
assert (config.output_k >= config.gt_k)
assert (not (config.no_sobel and (not config.include_rgb)))
config.eval_mode = "orig"
config.use_doersch_datasets = False
set_segmentation_input_channels(config)
if not os.path.exists(config.out_dir):
os.makedirs(config.out_dir)
if config.restart:
config_name = "config.pickle"
dict_name = "latest.pytorch"
given_config = config
reloaded_config_path = os.path.join(given_config.out_dir, config_name)
print("Loading restarting config from: %s" % reloaded_config_path)
with open(reloaded_config_path, "rb") as config_f:
config = pickle.load(config_f)
assert (config.model_ind == given_config.model_ind)
config.restart = True
# copy over new num_epochs and lr schedule
config.num_epochs = given_config.num_epochs
config.lr_schedule = given_config.lr_schedule
else:
print("Given config: %s" % config_to_str(config))
# Model ------------------------------------------------------------------------
dataloaders, mapping_assignment_dataloader, mapping_test_dataloader = \
segmentation_create_dataloaders(config)
net = archs.__dict__[config.arch](config)
if config.restart:
dict = torch.load(os.path.join(config.out_dir, dict_name),
map_location=lambda storage, loc: storage)
net.load_state_dict(dict["net"])
net.cuda()
net = torch.nn.DataParallel(net)
net.train()
optimiser = get_opt(config.opt)(net.module.parameters(), lr=config.lr)
if config.restart:
optimiser.load_state_dict(dict["opt"])
# Results ----------------------------------------------------------------------
if config.restart:
next_epoch = config.last_epoch + 1 # corresponds to last saved model
print("starting from epoch %d" % next_epoch)
config.epoch_acc = config.epoch_acc[:next_epoch] # in case we overshot
config.epoch_avg_subhead_acc = config.epoch_avg_subhead_acc[:next_epoch]
config.epoch_stats = config.epoch_stats[:next_epoch]
config.epoch_loss = config.epoch_loss[:(next_epoch - 1)]
config.epoch_loss_no_lamb = config.epoch_loss_no_lamb[:(next_epoch - 1)]
else:
config.epoch_acc = []
config.epoch_avg_subhead_acc = []
config.epoch_stats = []
config.epoch_loss = []
config.epoch_loss_no_lamb = []
# torch.cuda.empty_cache()
_ = segmentation_eval(config, net,
mapping_assignment_dataloader=mapping_assignment_dataloader,
mapping_test_dataloader=mapping_test_dataloader,
sobel=(not config.no_sobel),
using_IR=config.using_IR)
print("Pre: time %s: \n %s" %
(datetime.now(), nice(config.epoch_stats[-1])))
sys.stdout.flush()
next_epoch = 1
fig, axarr = plt.subplots(4, sharex=False, figsize=(20, 20))
if not config.use_uncollapsed_loss:
print("using condensed loss (default)")
loss_fn = IID_segmentation_loss
else:
print("using uncollapsed loss")
loss_fn = IID_segmentation_loss_uncollapsed
# Train ------------------------------------------------------------------------
for e_i in range(next_epoch, config.num_epochs):
print("Starting e_i: %d %s" % (e_i, datetime.now()))
sys.stdout.flush()
iterators = (d for d in dataloaders)
b_i = 0
if e_i in config.lr_schedule:
optimiser = update_lr(optimiser, lr_mult=config.lr_mult)
avg_loss = 0.
avg_loss_no_lamb = 0.
avg_loss_count = 0
for tup in zip(*iterators):
net.module.zero_grad()
if not config.no_sobel:
# one less because this is before sobel
pre_channels = config.in_channels - 1
else:
pre_channels = config.in_channels
all_img1 = torch.zeros(config.batch_sz, pre_channels,
config.input_sz, config.input_sz).to(
torch.float32).cuda()
all_img2 = torch.zeros(config.batch_sz, pre_channels,
config.input_sz, config.input_sz).to(
torch.float32).cuda()
all_affine2_to_1 = torch.zeros(config.batch_sz, 2, 3).to(
torch.float32).cuda()
all_mask_img1 = torch.zeros(config.batch_sz, config.input_sz,
config.input_sz).to(torch.float32).cuda()
curr_batch_sz = tup[0][0].shape[0]
for d_i in range(config.num_dataloaders):
img1, img2, affine2_to_1, mask_img1 = tup[d_i]
assert (img1.shape[0] == curr_batch_sz)
actual_batch_start = d_i * curr_batch_sz
actual_batch_end = actual_batch_start + curr_batch_sz
all_img1[actual_batch_start:actual_batch_end, :, :, :] = img1
all_img2[actual_batch_start:actual_batch_end, :, :, :] = img2
all_affine2_to_1[actual_batch_start:actual_batch_end,
:, :] = affine2_to_1
all_mask_img1[actual_batch_start:actual_batch_end,
:, :] = mask_img1
if not (curr_batch_sz == config.dataloader_batch_sz) and (
e_i == next_epoch):
print("last batch sz %d" % curr_batch_sz)
curr_total_batch_sz = curr_batch_sz * config.num_dataloaders
all_img1 = all_img1[:curr_total_batch_sz, :, :, :]
all_img2 = all_img2[:curr_total_batch_sz, :, :, :]
all_affine2_to_1 = all_affine2_to_1[:curr_total_batch_sz, :, :]
all_mask_img1 = all_mask_img1[:curr_total_batch_sz, :, :]
if (not config.no_sobel):
all_img1 = sobel_process(all_img1, config.include_rgb,
using_IR=config.using_IR)
all_img2 = sobel_process(all_img2, config.include_rgb,
using_IR=config.using_IR)
x1_outs = net(all_img1)
x2_outs = net(all_img2)
avg_loss_batch = None # avg over the heads
avg_loss_no_lamb_batch = None
for i in range(config.num_sub_heads):
loss, loss_no_lamb = loss_fn(x1_outs[i],
x2_outs[i],
all_affine2_to_1=all_affine2_to_1,
all_mask_img1=all_mask_img1,
lamb=config.lamb,
half_T_side_dense=config.half_T_side_dense,
half_T_side_sparse_min=config.half_T_side_sparse_min,
half_T_side_sparse_max=config.half_T_side_sparse_max)
if avg_loss_batch is None:
avg_loss_batch = loss
avg_loss_no_lamb_batch = loss_no_lamb
else:
avg_loss_batch += loss
avg_loss_no_lamb_batch += loss_no_lamb
avg_loss_batch /= config.num_sub_heads
avg_loss_no_lamb_batch /= config.num_sub_heads
if ((b_i % 100) == 0) or (e_i == next_epoch):
print("Model ind %d epoch %d batch: %d avg loss %f avg loss no lamb %f "
"time %s" %
(config.model_ind, e_i, b_i, avg_loss_batch.item(),
avg_loss_no_lamb_batch.item(), datetime.now()))
sys.stdout.flush()
if not np.isfinite(float(avg_loss_batch.data)):
print("Loss is not finite... %s:" % str(avg_loss_batch))
exit(1)
avg_loss += avg_loss_batch.item()
avg_loss_no_lamb += avg_loss_no_lamb_batch.item()
avg_loss_count += 1
avg_loss_batch.backward()
optimiser.step()
b_i += 1
if b_i == 2 and config.test_code:
break
# Eval -----------------------------------------------------------------------
avg_loss = float(avg_loss / avg_loss_count)
avg_loss_no_lamb = float(avg_loss_no_lamb / avg_loss_count)
config.epoch_loss.append(avg_loss)
config.epoch_loss_no_lamb.append(avg_loss_no_lamb)
is_best = segmentation_eval(config, net,
mapping_assignment_dataloader=mapping_assignment_dataloader,
mapping_test_dataloader=mapping_test_dataloader,
sobel=(
not config.no_sobel),
using_IR=config.using_IR)
print("Pre: time %s: \n %s" %
(datetime.now(), nice(config.epoch_stats[-1])))
sys.stdout.flush()
axarr[0].clear()
axarr[0].plot(config.epoch_acc)
axarr[0].set_title("acc (best), top: %f" % max(config.epoch_acc))
axarr[1].clear()
axarr[1].plot(config.epoch_avg_subhead_acc)
axarr[1].set_title("acc (avg), top: %f" %
max(config.epoch_avg_subhead_acc))
axarr[2].clear()
axarr[2].plot(config.epoch_loss)
axarr[2].set_title("Loss")
axarr[3].clear()
axarr[3].plot(config.epoch_loss_no_lamb)
axarr[3].set_title("Loss no lamb")
fig.canvas.draw_idle()
fig.savefig(os.path.join(config.out_dir, "plots.png"))
if is_best or (e_i % config.save_freq == 0):
net.module.cpu()
save_dict = {"net": net.module.state_dict(),
"optimiser": optimiser.state_dict()}
if e_i % config.save_freq == 0:
torch.save(save_dict, os.path.join(
config.out_dir, "latest.pytorch"))
config.last_epoch = e_i # for last saved version
if is_best:
torch.save(save_dict, os.path.join(config.out_dir, "best.pytorch"))
with open(os.path.join(config.out_dir, "best_config.pickle"),
'wb') as outfile:
pickle.dump(config, outfile)
with open(os.path.join(config.out_dir, "best_config.txt"),
"w") as text_file:
text_file.write("%s" % config)
net.module.cuda()
with open(os.path.join(config.out_dir, "config.pickle"), 'wb') as outfile:
pickle.dump(config, outfile)
with open(os.path.join(config.out_dir, "config.txt"), "w") as text_file:
text_file.write("%s" % config)
if config.test_code:
exit(0)
| [
"torch.nn.DataParallel",
"torch.zeros"
] | 1.8.0 | MihaiAnton/tudelft-iic-reproduction | 342247c444aa1f8b09ea18e3ff9135258d599373 |
1.3 | #
# Adapted from https://github.com/marcotcr/lime/blob/master/doc/notebooks/Tutorial%20-%20images%20-%20Pytorch.ipynb
#
import copy
import numpy as np
import torch
from lime import lime_image
from lime.wrappers.scikit_image import SegmentationAlgorithm
from interpretability.explanation_methods.utils import ExplainerBase, limit_n_images
from project_utils import to_numpy
class Lime(ExplainerBase, lime_image.LimeImageExplainer):
def __init__(self, trainer, num_samples=256, num_features=3, kernel_size=1, batch_size=2, num_classes=1000):
ExplainerBase.__init__(self, trainer)
lime_image.LimeImageExplainer.__init__(self, verbose=False)
self.segmenter = SegmentationAlgorithm('quickshift', kernel_size=kernel_size, max_dist=200, ratio=0.2)
self.max_imgs_bs = 1
self.num_samples = num_samples
self.num_classes = num_classes
self.num_features = num_features
self.batch_size = batch_size
def pred_f(self, input_samples):
return to_numpy(self.trainer.predict(self.make_input_tensor(input_samples)))
def make_input_tensor(self, img_list):
return torch.stack(tuple(torch.from_numpy(t) for t in img_list), dim=0).permute(0, 3, 1, 2).cuda()
@limit_n_images
@torch.no_grad()
def attribute(self, img, target, return_all=False):
with torch.no_grad():
explanation = self.explain_instance(
to_numpy(img[0].permute(1, 2, 0)),
self.pred_f, labels=range(self.num_classes),
top_labels=None, num_samples=self.num_samples,
segmentation_fn=self.segmenter, batch_size=self.batch_size)
if return_all:
return torch.cat([
torch.from_numpy(np.array(explanation.get_image_and_mask(
t, hide_rest=True, positive_only=True,
num_features=self.num_features)[1][None, None], dtype=float))
for t in range(self.num_classes)], dim=0)
return torch.from_numpy(np.array(explanation.get_image_and_mask(
int(np.array(target)), hide_rest=True, positive_only=True,
num_features=self.num_features)[1][None, None], dtype=float))
def attribute_selection(self, x, tgts):
return self.attribute(x, tgts, return_all=True)[tgts]
def data_labels(self,
image,
fudged_image,
segments,
classifier_fn,
num_samples,
batch_size=10):
"""
SAME AS BASE, just deleted tqdm.
Generates images and predictions in the neighborhood of this image.
Args:
image: 3d numpy array, the image
fudged_image: 3d numpy array, image to replace original image when
superpixel is turned off
segments: segmentation of the image
classifier_fn: function that takes a list of images and returns a
matrix of prediction probabilities
num_samples: size of the neighborhood to learn the linear model
batch_size: classifier_fn will be called on batches of this size.
Returns:
A tuple (data, labels), where:
data: dense num_samples * num_superpixels
labels: prediction probabilities matrix
"""
n_features = np.unique(segments).shape[0]
data = self.random_state.randint(0, 2, num_samples * n_features)\
.reshape((num_samples, n_features))
labels = []
data[0, :] = 1
imgs = []
for row in data:
temp = copy.deepcopy(image)
zeros = np.where(row == 0)[0]
mask = np.zeros(segments.shape).astype(bool)
for z in zeros:
mask[segments == z] = True
temp[mask] = fudged_image[mask]
imgs.append(temp)
if len(imgs) == batch_size:
preds = classifier_fn(np.array(imgs))
labels.extend(preds)
imgs = []
if len(imgs) > 0:
preds = classifier_fn(np.array(imgs))
labels.extend(preds)
return data, np.array(labels)
| [
"torch.no_grad",
"torch.from_numpy"
] | 1.3.1 | moboehle/B-cos | 5f9218f6773534c80367793d1cd767742869764a |
1.0 | #!/usr/bin/env python3
# encoding: utf-8
"""
Copyright (c) 2021, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
# Copyright 2017 Johns Hopkins University (Shinji Watanabe)
# Copyright 2020 Salesforce Research (Weiran Wang)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Training/decoding definition for the speech recognition task."""
import pdb
import json
import logging
import math
import os
import shutil
import tempfile
import time
import numpy as np
from tensorboardX import SummaryWriter
import torch
import torch.distributed as dist
from speech_datasets import SpeechDataLoader
from espnet.asr.pytorch_backend.asr_init import load_trained_modules
from espnet.nets.asr_interface import ASRInterface
from espnet.utils.deterministic_utils import set_deterministic_pytorch
from espnet.utils.dynamic_import import dynamic_import
from espnet.asr.pytorch_backend.pytorch_distributed_utils import DistributedModel
from espnet.asr.pytorch_backend.pytorch_distributed_utils import all_gather_list
from espnet.nets.pytorch_backend.nets_utils import pad_list
from espnet.nets.pytorch_backend.transformer.mask import subsequent_mask
from espnet.nets.pytorch_backend.transformer.subsampling import _context_concat
# For recognizer.
from espnet.asr.pytorch_backend.asr_init import load_trained_model
import espnet.lm.pytorch_backend.extlm_bpe as extlm_pytorch
import espnet.nets.pytorch_backend.lm.default as lm_pytorch
from espnet.asr.asr_utils import get_model_conf
from espnet.asr.asr_utils import torch_load
from espnet.asr.asr_utils import add_results_to_json
from espnet.asr.asr_utils import add_results_to_json_word
CTC_SCORING_RATIO=1.5
def _recursive_to(xs, device):
if torch.is_tensor(xs):
return xs.to(device)
if isinstance(xs, tuple):
return tuple(_recursive_to(x, device) for x in xs)
return xs
class CustomConverter(object):
"""Custom batch converter for Pytorch.
Args:
input_context (int): The context to be augmented to the left and right of each frame.
input_skiprate (int): The subsampling factor.
dtype (torch.dtype): Data type to convert.
"""
def __init__(self, input_context=0, input_skiprate=1, mode="eval", dtype=torch.float32):
"""Construct a CustomConverter object."""
self.input_context = input_context
self.input_skiprate = input_skiprate
self.ignore_id = -1
self.mode = mode
self.dtype = dtype
def __call__(self, batch, device=torch.device('cpu')):
"""Transform a batch and send it to a device.
Args:
batch (list): The batch to transform.
device (torch.device): The device to send to.
Returns:
tuple(torch.Tensor, torch.Tensor, torch.Tensor)
"""
# batch should be a list
xs = [_['x'] for _ in batch]
ys = [_['labels'] for _ in batch]
# perform subsampling
if self.input_context > 0:
xs = [_context_concat(x, self.input_context) for x in xs]
if self.input_skiprate > 1:
if self.mode == "train":
startidx = np.random.randint(low=0, high=2*self.input_context+1)
else:
startidx = 0
xs = [x[startidx::self.input_skiprate, :] for x in xs]
# get batch of lengths of input sequences
ilens = np.array([x.size(0) for x in xs])
# perform padding and convert to tensor
# currently only support real number
xs_pad = pad_list([x.float() for x in xs], 0).to(device, dtype=self.dtype)
ilens = torch.from_numpy(ilens).to(device)
# NOTE: this is for multi-output (e.g., speech translation)
ys_pad = pad_list([y.long() for y in ys], self.ignore_id).to(device)
return xs_pad, ilens, ys_pad
def torch_resume(snapshot_path, model, optimizer):
"""Resume from snapshot for pytorch.
Args:
snapshot_path (str): Snapshot file path.
model: ASR model object.
"""
# load snapshot
snapshot_dict = torch.load(snapshot_path, map_location=lambda storage, loc: storage)
# restore model states
# (for ASR model)
if hasattr(model, "module"):
model.module.load_state_dict(snapshot_dict['model'])
else:
model.load_state_dict(snapshot_dict['model'])
# restore optimizer states
optimizer.load_state_dict(snapshot_dict['optimizer'])
train_dict = snapshot_dict['train_dict']
# delete opened snapshot
del snapshot_dict
return train_dict
def torch_snapshot(model, optimizer, train_dict, fn, outdir):
# make snapshot_dict dictionary
# (for ASR)
if hasattr(model, "module"):
model_state_dict = model.module.state_dict()
else:
model_state_dict = model.state_dict()
snapshot_dict = {
"model": model_state_dict,
"optimizer": optimizer.state_dict(),
"train_dict": train_dict
}
# save snapshot dictionary
prefix = 'tmp' + fn
tmpdir = tempfile.mkdtemp(prefix=prefix, dir=outdir)
tmppath = os.path.join(tmpdir, fn)
try:
torch.save(snapshot_dict, tmppath)
shutil.move(tmppath, os.path.join(outdir, fn))
finally:
shutil.rmtree(tmpdir)
# Multiply the gradient by a scalar.
def multiply_grads(model, c):
for p in model.parameters():
if p.grad is not None:
p.grad.data.mul_(c)
def train(args):
"""Train with the given args.
Args:
args (namespace): The program arguments.
"""
# Get the rank of current process. Determines the portion of data to load.
global_rank = dist.get_rank()
set_deterministic_pytorch(args)
# check cuda availability
if not torch.cuda.is_available():
logging.error('cuda is not available')
# idim and odim are specified by user.
idim = args.idim
odim = args.odim
if global_rank == 0:
logging.info('#input dims : {}'.format(idim))
logging.info('#output dims: {}' .format(odim))
# specify attention, CTC, hybrid mode
if global_rank == 0:
if args.mtlalpha == 1.0:
logging.info('Pure CTC mode')
elif args.mtlalpha == 0.0:
logging.info('Pure attention mode')
else:
logging.info('Multitask learning mode')
if (args.enc_init is not None or args.dec_init is not None):
model = load_trained_modules(idim * (2 * args.input_context + 1), odim, args)
else:
model_class = dynamic_import(args.model_module)
model = model_class(idim * (2 * args.input_context + 1), odim, args)
assert isinstance(model, ASRInterface)
model_parameters = filter(lambda p: p.requires_grad, model.parameters())
params = sum([np.prod(p.size()) for p in model_parameters])
logging.info("total number of params in model: %d" % params)
# write model config
if global_rank == 0 and not os.path.exists(args.outdir):
os.makedirs(args.outdir)
dist.barrier()
model_conf = args.outdir + '/model.json'
if global_rank == 0:
with open(model_conf, 'wb') as f:
logging.info('writing a model config file to ' + model_conf)
f.write(json.dumps((idim * (2 * args.input_context + 1), odim, vars(args)),
indent=4, ensure_ascii=False, sort_keys=True).encode('utf_8'))
for key in sorted(vars(args).keys()):
logging.info('ARGS: ' + key + ': ' + str(vars(args)[key]))
# Reporter.
reporter = SummaryWriter(args.tensorboard_dir)
# check the use of multi-gpu
world_size = dist.get_world_size()
assert args.ngpu > 0 and world_size > 0, "Distributed training requires GPUs ..."
if global_rank == 0:
logging.warning(
'batch size is automatically increased (%d -> %d)' % (args.batch_size, args.batch_size * world_size))
# Set torch device
torch.cuda.set_device(args.local_rank)
model = model.cuda(args.local_rank)
if args.train_dtype in ("float16", "float32", "float64"):
dtype = getattr(torch, args.train_dtype)
else:
dtype = torch.float32
device = args.local_rank
# Setup an optimizer
if args.opt == 'adam':
optimizer = torch.optim.Adam(model.parameters(), lr=args.adam_lr, weight_decay=args.weight_decay)
elif args.opt == 'noam':
from espnet.nets.pytorch_backend.transformer.optimizer import get_std_opt
optimizer = get_std_opt(model, args.adim, args.transformer_warmup_steps, args.transformer_lr)
elif args.opt == 'rampup':
from espnet.nets.pytorch_backend.transformer.rampup import get_std_opt
optimizer = get_std_opt(model, args.adim, args.rampup_sr, args.rampup_si, args.rampup_sf, args.rampup_lr)
else:
raise NotImplementedError("unknown optimizer: " + args.opt)
# Resume from a snapshot
if args.resume:
train_dict = torch_resume(args.resume, model, optimizer)
if global_rank == 0:
logging.info('resumed from %s' % args.resume)
else:
train_dict = dict({"epoch": 0, "iteration": 0, "validation_loss": []})
model = DistributedModel(model, args.local_rank, bucket_cap_mb=256)
# Setup a converter
train_converter = CustomConverter(input_context=args.input_context, input_skiprate=args.input_skiprate,
mode="train", dtype=dtype)
valid_converter = CustomConverter(input_context=args.input_context, input_skiprate=args.input_skiprate,
mode="eval", dtype=dtype)
# create data loaders. train_sets and valid_sets will be dataset names separated by comma
train_data = list(filter(lambda s: len(s) > 0, map(lambda s: s.strip(), args.train_sets.split(","))))
valid_data = list(filter(lambda s: len(s) > 0, map(lambda s: s.strip(), args.valid_sets.split(","))))
train_loader = SpeechDataLoader(train_data, task="asr", shuffle=True, precomputed_feats_type=args.precomputed_feats_type,
batch_size=args.batch_size, max_len=args.maxlen_in, spmodel=args.spmodel, token_list=args.char_list,
transform_conf=args.preprocess_conf, train=True, num_workers=args.loader_num_worker, data_cache_mb=args.loader_cache_mb,
text_filename=args.text_filename)
valid_loader = SpeechDataLoader(valid_data, task="asr", shuffle=False, precomputed_feats_type=args.precomputed_feats_type,
batch_size=args.batch_size, max_len=args.maxlen_in, spmodel=args.spmodel, token_list=args.char_list,
transform_conf=args.preprocess_conf, train=False, num_workers=args.loader_num_worker, data_cache_mb=args.loader_cache_mb,
text_filename=args.text_filename)
epoch = train_dict['epoch']
train_loader.set_epoch(epoch)
# These variables may be nonzero after resuming from snapshot.
# Record the number of updates.
iteration = train_dict["iteration"]
# Record the validation loss.
validation_loss = train_dict["validation_loss"]
# Determine whether to update, and the accurate number of samples used between two updates.
forward_count = 0
xnum_train = 0
ynum_train = 0
loss_train = 0.0
loss_att_train = 0.0
loss_ctc_train = 0.0
acc_train = 0.0
while True:
if 0 < args.epochs <= epoch:
train_loader.close()
valid_loader.close()
break
with model.no_sync():
# Only synchronize the gradients every accum_grad steps, or at the end of an epoch
while forward_count < args.accum_grad - 1 and train_loader.current_position < len(train_loader)-1:
# Get the next batch
logging.info("Getting batch from dataloader")
batch = train_loader.next()
logging.info("CHECK I: position=%d, total=%d, epoch=%d, model_device=%s" % (train_loader.current_position, len(train_loader), train_loader.epoch, next(model.parameters()).device))
forward_count += 1
x = _recursive_to(train_converter(batch), device)
logging.info("Move batch to GPU")
# Weiran: the actual number of utts in the minibatch.
x_num = x[0].size(0)
y_num = float(torch.sum(x[2] != train_converter.ignore_id).cpu())
loss, loss_data, loss_ctc_data, loss_att_data, acc_data, _ = model(*x)
xnum_train += x_num
ynum_train += y_num
loss_train += x_num * loss_data
loss_att_train += x_num * loss_att_data
loss_ctc_train += x_num * loss_ctc_data
acc_train += y_num * acc_data
# loss is the sum, not average.
loss = x_num * loss
loss.backward()
loss.detach() # Truncate the graph
# Perform the same loop as above, but sync the gradients if it's been
# accum_grad steps, or the epoch is about to finish
logging.info("Getting batch from dataloader")
batch = train_loader.next()
logging.info("CHECK II: position=%d, total=%d, epoch=%d, model_device=%s" % (train_loader.current_position, len(train_loader), train_loader.epoch, next(model.parameters()).device))
forward_count += 1
x = _recursive_to(train_converter(batch), device)
logging.info("Move batch to GPU")
# Weiran: the actual number of utts in the minibatch.
x_num = x[0].size(0)
y_num = float(torch.sum(x[2] != train_converter.ignore_id).cpu())
loss, loss_data, loss_ctc_data, loss_att_data, acc_data, _ = model(*x)
xnum_train += x_num
ynum_train += y_num
loss_train += x_num * loss_data
loss_att_train += x_num * loss_att_data
loss_ctc_train += x_num * loss_ctc_data
acc_train += y_num * acc_data
# loss is the sum, not average.
loss = x_num * loss
loss.backward()
loss.detach() # Truncate the graph
# update parameters
is_new_epoch = (not epoch == train_loader.epoch)
assert is_new_epoch or forward_count == args.accum_grad
# Needed when distributed_world_size > 1.
xnum_all, ynum_all, loss_all, loss_att_all, loss_ctc_all, acc_all = \
zip(*all_gather_list([xnum_train, ynum_train, loss_train, loss_att_train, loss_ctc_train, acc_train]))
total_xnum = sum(xnum_all)
total_ynum = sum(ynum_all)
total_loss = sum(loss_all)
total_loss_att = sum(loss_att_all)
total_loss_ctc = sum(loss_ctc_all)
total_acc = sum(acc_all)
# Re-scale gradients, in order to obtain the right loss gradient with simple averaging.
grad_factor = 1.0 / total_xnum
multiply_grads(model, grad_factor)
# compute the gradient norm to check if it is normal or not
grad_norm = torch.nn.utils.clip_grad_norm_(model.parameters(), args.grad_clip)
if global_rank == 0:
logging.info('grad norm={}'.format(grad_norm))
if math.isnan(grad_norm):
logging.warning('grad norm is nan. Do not update model.')
else:
optimizer.step()
optimizer.zero_grad()
iteration += 1
if iteration % args.report_interval_iters == 0 and global_rank == 0:
reporter.add_scalar("train/loss", total_loss / total_xnum, iteration)
reporter.add_scalar("train/loss_att", total_loss_att / total_xnum, iteration)
reporter.add_scalar("train/loss_ctc", total_loss_ctc / total_xnum, iteration)
reporter.add_scalar("train/acc", total_acc / total_ynum, iteration)
reporter.add_scalar("train/lr", optimizer.param_groups[0]['lr'], iteration)
# Reset the accumulation.
forward_count = 0
xnum_train = 0
ynum_train = 0
loss_train = 0.0
loss_att_train = 0.0
loss_ctc_train = 0.0
acc_train = 0.0
# Finished one epoch.
if is_new_epoch:
epoch += 1
# Evaluate the model with the validation dataset at the end of every epoch.
model.eval()
logging.info("Start validation for epoch %d" % train_loader.epoch)
xnum_valid = 0
ynum_valid = 0
loss_valid = 0.0
loss_att_valid = 0.0
loss_ctc_valid = 0.0
err_ctc_valid = 0.0
acc_valid = 0.0
for batch in valid_loader:
logging.info("Getting batch from dataloader")
x = _recursive_to(valid_converter(batch), device)
logging.info("Move batch to GPU")
x_num = x[0].size(0)
y_num = float(torch.sum(x[2] != valid_converter.ignore_id).cpu())
with torch.no_grad():
_, loss_data, loss_ctc_data, loss_att_data, acc_data, cer_ctc_data = model(*x)
xnum_valid += x_num
ynum_valid += y_num
loss_valid += x_num * loss_data
loss_att_valid += x_num * loss_att_data
loss_ctc_valid += x_num * loss_ctc_data
acc_valid += y_num * acc_data
err_ctc_valid += y_num * cer_ctc_data
xnum_all, ynum_all, loss_all, loss_att_all, loss_ctc_all, acc_all, err_ctc_all = \
zip(*all_gather_list(
[xnum_valid, ynum_valid, loss_valid, loss_att_valid, loss_ctc_valid, acc_valid, err_ctc_valid]))
total_xnum = sum(xnum_all)
total_ynum = sum(ynum_all)
total_loss = sum(loss_all)
total_loss_att = sum(loss_att_all)
total_loss_ctc = sum(loss_ctc_all)
total_acc = sum(acc_all)
total_err_ctc = sum(err_ctc_all)
# Each GPU has access to the validation loss in order to adjust learning rate.
validation_loss.append(total_loss / total_xnum)
if global_rank == 0:
reporter.add_scalar("valid/loss", total_loss / total_xnum, iteration)
reporter.add_scalar("valid/loss_att", total_loss_att / total_xnum, iteration)
reporter.add_scalar("valid/loss_ctc", total_loss_ctc / total_xnum, iteration)
reporter.add_scalar("valid/acc", total_acc / total_ynum, iteration)
reporter.add_scalar("valid/err_ctc", total_err_ctc / total_ynum, iteration)
# Save model at the end of each epoch.
if global_rank == 0:
train_dict["epoch"] = train_loader.epoch
train_dict["iteration"] = iteration
train_dict["validation_loss"] = validation_loss
torch_snapshot(model, optimizer, train_dict, 'snapshot.ep.%d' % train_loader.epoch, args.outdir)
logging.info("snapshot saved to snapshot.ep.%d" % train_loader.epoch)
if validation_loss[-1] == min(validation_loss):
torch_snapshot(model, optimizer, train_dict, 'model.loss.best', args.outdir)
logging.info("best model saved to model.loss.best")
# Go back to training again.
model.train()
if args.opt == "adam":
if epoch > 3 and min(validation_loss) < min(validation_loss[-3:]):
for p in optimizer.param_groups:
p["lr"] = max(p["lr"] * args.adam_decay, 1e-6)
if global_rank == 0:
logging.info('adam lr decayed to ' + str(p["lr"]))
def recog(args):
"""Decode with the given args.
Args:
args (namespace): The program arguments.
"""
set_deterministic_pytorch(args)
start_time=time.time()
model, train_args = load_trained_model(args.model)
end_time=time.time()
logging.info("loading model took %f seconds" % (end_time-start_time))
assert isinstance(model, ASRInterface)
model.recog_args = args
# read rnnlm
if args.rnnlm:
rnnlm_args = get_model_conf(args.rnnlm, args.rnnlm_conf)
if getattr(rnnlm_args, "model_module", "default") != "default":
raise ValueError("use '--api v2' option to decode with non-default language model")
rnnlm = lm_pytorch.ClassifierWithState(
lm_pytorch.RNNLM(len(train_args.char_list), rnnlm_args.layer, rnnlm_args.unit))
torch_load(args.rnnlm, rnnlm)
rnnlm.eval()
else:
rnnlm = None
if args.word_rnnlm:
rnnlm_args = get_model_conf(args.word_rnnlm, args.word_rnnlm_conf)
word_dict = rnnlm_args.char_list_dict
char_dict = {x: i for i, x in enumerate(train_args.char_list)}
word_rnnlm = lm_pytorch.ClassifierWithState(lm_pytorch.RNNLM(
max(word_dict.values()) + 1, rnnlm_args.layer, rnnlm_args.unit))
# Weiran: modified the code to infer n_vocab when there are missing keys in char_list_dict.
# len(word_dict), rnnlm_args.layer, rnnlm_args.unit))
torch_load(args.word_rnnlm, word_rnnlm)
word_rnnlm.eval()
if rnnlm is not None:
rnnlm = lm_pytorch.ClassifierWithState(
extlm_pytorch.MultiLevelLM_with_lexicon(word_rnnlm.predictor,
rnnlm.predictor, word_dict, char_dict, args.lexicon_dict, "▁", subwordlm_weight=args.sublm_weight))
else:
rnnlm = lm_pytorch.ClassifierWithState(
extlm_pytorch.LookAheadWordLM(word_rnnlm.predictor, word_dict, char_dict))
# Read truth file.
if args.truth_file:
with open(args.truth_file, "r") as fin:
retval = fin.read().rstrip('\n')
dict_truth = dict([(l.split()[0], " ".join(l.split()[1:])) for l in retval.split("\n")])
# gpu
if args.ngpu == 1:
gpu_id = list(range(args.ngpu))
logging.info('gpu id: ' + str(gpu_id))
model.cuda()
if rnnlm:
rnnlm.cuda()
new_js = {}
recog_converter = CustomConverter(input_context=args.input_context, input_skiprate=args.input_skiprate,
mode="eval", dtype=torch.float32)
recog_data = list(filter(lambda s: len(s) > 0, map(lambda s: s.strip(), args.recog_sets.split(","))))
recog_loader = SpeechDataLoader(recog_data, task="asr", shuffle=False,
precomputed_feats_type=args.precomputed_feats_type,
batch_size=1, spmodel=args.spmodel, token_list=train_args.char_list,
transform_conf=args.preprocess_conf, train=False, num_workers=args.loader_num_worker,
data_cache_mb=args.loader_cache_mb, num_replicas=args.num_replicas, rank=args.jobid-1,
ensure_equal_parts=False, text_filename=args.text_filename)
with torch.no_grad():
idx = 0
for batch in recog_loader:
idx += 1
# pdb.set_trace()
name = batch[0]['uttid']
logging.info('(%d/%d) decoding ' + name, idx, len(recog_loader))
feat = _recursive_to(recog_converter(batch), device=torch.device('cpu'))[0]
feat = feat[0]
inverse_subword_dict = dict([(i,c) for i,c in enumerate(train_args.char_list)])
if args.word_rnnlm:
inverse_word_dict = dict([(word_dict[k], k) for k in word_dict])
else:
inverse_word_dict = None
if args.rnnlm and args.word_rnnlm:
nbest_hyps = recognize_with_lexicon(model, feat, args, rnnlm=rnnlm, inverse_subword_dict=inverse_subword_dict, inverse_word_dict=inverse_word_dict)
else:
nbest_hyps = model.recognize(feat, args, train_args.char_list, rnnlm)
if args.truth_file and name in dict_truth:
truth_text = dict_truth[name]
else:
truth_text = ""
# Weiran: prepare dict in order to add decoding results. Skipped the input and shape information.
gt_tokens = [int(_) for _ in batch[0]["labels"]]
tmp_dict = {"output": [{"name": "target1", "text": batch[0]["text"],
"tokenid": " ".join([str(_) for _ in gt_tokens]).strip(),
"token": " ".join([train_args.char_list[_] for _ in gt_tokens]).strip()}],
"utt2spk": batch[0]["speaker"]}
# Weiran: I am adding text in words in the result json.
if args.word_rnnlm and args.rnnlm:
new_js[name] = add_results_to_json_word(tmp_dict, nbest_hyps, train_args.char_list, inverse_word_dict, truth_text)
else:
new_js[name] = add_results_to_json(tmp_dict, nbest_hyps, train_args.char_list, add_hyp_prefix_wer=args.nbest_compute_wer)
with open(args.result_label, 'wb') as f:
f.write(json.dumps({'utts': new_js}, indent=4, ensure_ascii=False, sort_keys=True).encode('utf_8'))
################################################################################################
################## Weiran: The recognizers below are implemented by myself. ####################
################################################################################################
def recognize_with_lexicon(model, x, recog_args, rnnlm=None, inverse_subword_dict=None, inverse_word_dict=None):
"""Recognize input speech.
:model the asr model, such as e2e_asr_transformer:E2E
:param ndnarray x: input acoustic feature (B, T, D) or (T, D)
:param Namespace recog_args: argument Namespace containing options
:param torch.nn.Module rnnlm: language model module
:param dict inverse_subword_dict: index to str
:param dict inverse_word_dict: index to str
:return: N-best decoding results
:rtype: list
"""
incomplete_word = -99
enc_output = model.encode(x).unsqueeze(0)
if recog_args.ctc_weight > 0.0:
lpz = model.ctc.log_softmax(enc_output)
lpz = lpz.squeeze(0)
else:
lpz = None
h = enc_output.squeeze(0)
logging.info('input lengths: ' + str(h.size(0)))
# search parms
beam = recog_args.beam_size
ctc_weight = recog_args.ctc_weight
word_bonus = recog_args.word_bonus
# prepare sos
y = model.sos
vy = h.new_zeros(1).long()
if recog_args.maxlenratio == 0:
maxlen = h.shape[0]
else:
# maxlen >= 1
maxlen = max(1, int(recog_args.maxlenratio * h.size(0)))
minlen = int(recog_args.minlenratio * h.size(0))
logging.info('max output length: ' + str(maxlen))
logging.info('min output length: ' + str(minlen))
# initialize hypothesis
if rnnlm:
hyp = {'score': 0.0, 'yseq': [y], 'wseq': [], 'rnnlm_prev': None}
else:
hyp = {'score': 0.0, 'yseq': [y], 'wseq': []}
if lpz is not None:
import numpy
from espnet.nets.ctc_prefix_score import CTCPrefixScore
ctc_prefix_score = CTCPrefixScore(lpz.detach().numpy(), 0, model.eos, numpy)
hyp['ctc_state_prev'] = ctc_prefix_score.initial_state()
hyp['ctc_score_prev'] = 0.0
if ctc_weight != 1.0:
# pre-pruning based on attention scores
ctc_beam = min(lpz.shape[-1], int(beam * CTC_SCORING_RATIO))
else:
ctc_beam = lpz.shape[-1]
hyps = [hyp]
ended_hyps = []
import six
for i in six.moves.range(maxlen):
logging.debug('position ' + str(i))
hyps_best_kept = []
for hyp in hyps:
# vy.unsqueeze(1)
# Weiran: get the last hypothesized token.
vy[0] = hyp['yseq'][i]
# get nbest local scores and their ids
ys_mask = subsequent_mask(i + 1).unsqueeze(0)
ys = torch.tensor(hyp['yseq']).unsqueeze(0)
local_att_scores = model.decoder.forward_one_step(ys, ys_mask, enc_output)[0]
if rnnlm:
logging.debug("\nUsing rnnlm ...")
RNNLM_STATE_LIST = rnnlm.predict(hyp['rnnlm_prev'], vy)
else:
# Fake the RNNLM list, with only one item.
RNNLM_STATE_LIST = [(None, local_att_scores, incomplete_word)]
# Weiran: if the list has more than one element, we need to expand the set of hypothesis.
for rnnlm_state, local_lm_scores, word_output in RNNLM_STATE_LIST:
if word_output>0:
logging.debug("\n===================\nHypothesis:")
logging.debug(hyp['yseq'])
logging.debug(" ".join([inverse_subword_dict[int(tid)] for tid in hyp['yseq']]))
logging.debug("current word hypothesis:")
logging.debug(hyp['wseq'])
logging.debug(" ".join([inverse_word_dict[int(wid)] for wid in hyp['wseq'] if wid > 0]))
if word_output>=0:
logging.debug("outputing word: %s (%d)" % (inverse_word_dict[int(word_output)], int(word_output)))
else:
logging.debug("Not outputing word.")
logging.debug("current score=%f" % hyp['score'])
logging.debug("acc. clm score=%f" % rnnlm_state[-1])
if rnnlm:
local_scores = local_att_scores + recog_args.lm_weight * local_lm_scores
else:
local_scores = local_att_scores
# Weiran: correct local_scores if ctc_prefix_score is used.
if lpz is not None:
local_best_scores, local_best_ids = torch.topk(local_att_scores, ctc_beam, dim=1)
ctc_scores, ctc_states = ctc_prefix_score(hyp['yseq'], local_best_ids[0], hyp['ctc_state_prev'])
local_scores = \
(1.0 - ctc_weight) * local_att_scores[:, local_best_ids[0]] \
+ ctc_weight * torch.from_numpy(ctc_scores - hyp['ctc_score_prev'])
if rnnlm:
local_scores += recog_args.lm_weight * local_lm_scores[:, local_best_ids[0]]
local_best_scores, joint_best_ids = torch.topk(local_scores, beam, dim=1)
local_best_ids = local_best_ids[:, joint_best_ids[0]]
else:
local_best_scores, local_best_ids = torch.topk(local_scores, beam, dim=1)
# Going over the beams.
for j in six.moves.range(beam):
new_hyp = {}
new_hyp['score'] = hyp['score'] + float(local_best_scores[0, j])
new_hyp['yseq'] = [0] * (1 + len(hyp['yseq']))
new_hyp['yseq'][:len(hyp['yseq'])] = hyp['yseq']
new_hyp['yseq'][len(hyp['yseq'])] = int(local_best_ids[0, j])
new_hyp['wseq'] = list(hyp['wseq']) + [word_output]
if word_output >= 0:
new_hyp['score'] = new_hyp['score'] + word_bonus
if rnnlm:
new_hyp['rnnlm_prev'] = rnnlm_state
if lpz is not None:
new_hyp['ctc_state_prev'] = ctc_states[joint_best_ids[0, j]]
new_hyp['ctc_score_prev'] = ctc_scores[joint_best_ids[0, j]]
# will be (2 x beam) hyps at most
hyps_best_kept.append(new_hyp)
hyps_best_kept = sorted(hyps_best_kept, key=lambda x: x['score'], reverse=True)[:beam]
# Finish expanding all hypothesis at position i.
# sort and get nbest
hyps = hyps_best_kept
logging.debug('number of pruned hypothes: ' + str(len(hyps)))
if inverse_subword_dict is not None:
logging.debug('best hypo: ' + ''.join([inverse_subword_dict[int(x)] for x in hyps[0]['yseq'][1:]]))
# add eos in the final loop to avoid that there are no ended hyps
if i == maxlen - 1:
logging.info('adding <eos> in the last position in the loop')
for hyp in hyps:
hyp['yseq'].append(model.eos)
# add ended hypothesis to a final list, and removed them from current hypothes
# (this will be a problem, number of hyps < beam)
remained_hyps = []
for hyp in hyps:
if hyp['yseq'][-1] == model.eos:
# only store the sequence that has more than minlen outputs
if len(hyp['yseq']) > minlen:
if rnnlm: # Word LM needs to add final <eos> score
RNNLM_FINAL_LIST = rnnlm.final(hyp['rnnlm_prev'])
for rnnlm_final_score, word_output in RNNLM_FINAL_LIST:
logging.debug("\n===================\nHypothesis ending:")
logging.debug(hyp['yseq'])
logging.debug(" ".join([inverse_subword_dict[int(tid)] for tid in hyp['yseq']]))
logging.debug("current word hypothesis:")
logging.debug(hyp['wseq'])
logging.debug(" ".join([inverse_word_dict[int(wid)] for wid in hyp['wseq'] if wid > 0]))
if word_output >= 0:
logging.debug("outputing word: %s" % inverse_word_dict[int(word_output)])
logging.debug("current score=%f" % hyp['score'])
logging.debug("adding last word+end score=%f\n===================" % (recog_args.lm_weight * rnnlm_final_score))
new_hyp = {}
new_hyp['score'] = hyp['score'] + recog_args.lm_weight * rnnlm_final_score
new_hyp['yseq'] = hyp['yseq']
new_hyp['wseq'] = list(hyp['wseq']) + [word_output]
if word_output >= 0:
new_hyp['score'] = new_hyp['score'] + word_bonus
ended_hyps.append(new_hyp)
else:
remained_hyps.append(hyp)
# end detection
from espnet.nets.e2e_asr_common import end_detect
if end_detect(ended_hyps, i) and recog_args.maxlenratio == 0.0:
logging.info('end detected at %d' % i)
break
hyps = remained_hyps
if len(hyps) > 0:
logging.debug('remained hypothes: ' + str(len(hyps)))
else:
logging.info('no hypothesis. Finish decoding.')
break
if inverse_subword_dict is not None:
for hyp in hyps:
logging.debug(
'hypo: ' + ''.join([inverse_subword_dict[int(x)] for x in hyp['yseq'][1:]]))
logging.debug('number of ended hypothes: ' + str(len(ended_hyps)))
# Finished label-synchronous decoding.
nbest_hyps = sorted(ended_hyps, key=lambda x: x['score'], reverse=True)[:min(len(ended_hyps), recog_args.nbest)]
# check number of hypothesis
if len(nbest_hyps) == 0:
logging.warning('there is no N-best results, perform recognition again with smaller minlenratio.')
# should copy because Namespace will be overwritten globally
from argparse import Namespace
recog_args = Namespace(**vars(recog_args))
recog_args.minlenratio = max(0.0, recog_args.minlenratio - 0.1)
return recognize_with_lexicon(model, x, recog_args, rnnlm=rnnlm, inverse_subword_dict=inverse_subword_dict, inverse_word_dict=inverse_word_dict)
logging.info('total log probability: ' + str(nbest_hyps[0]['score']))
logging.info('normalized log probability: ' + str(nbest_hyps[0]['score'] / len(nbest_hyps[0]['yseq'])))
for hidx in range(min(10, len(nbest_hyps))):
logging.debug("HYP%02d (score=%f): %s" % (hidx + 1, nbest_hyps[hidx]['score'], " ".join([inverse_word_dict[int(wid)] for wid in nbest_hyps[hidx]['wseq'] if wid > 0])))
return nbest_hyps
def recog_with_two_models(args):
"""Decode with the given args.
Args:
args (namespace): The program arguments.
"""
set_deterministic_pytorch(args)
# Weiran: load both models.
model1, train_args1 = load_trained_model(args.model1)
assert isinstance(model1, ASRInterface)
model1.recog_args = args
# Read the rnnlm for model1.
rnnlm1_args = get_model_conf(args.rnnlm1, args.rnnlm1_conf)
subword_dict1 = rnnlm1_args.char_list_dict
rnnlm1 = lm_pytorch.ClassifierWithState(lm_pytorch.RNNLM(
len(subword_dict1), rnnlm1_args.layer, rnnlm1_args.unit))
torch_load(args.rnnlm1, rnnlm1)
rnnlm1.eval()
model2, train_args2 = load_trained_model(args.model2)
assert isinstance(model2, ASRInterface)
model2.recog_args = args
# Read the rnnlm for model2.
rnnlm2_args = get_model_conf(args.rnnlm2, args.rnnlm2_conf)
subword_dict2 = rnnlm2_args.char_list_dict
rnnlm2 = lm_pytorch.ClassifierWithState(lm_pytorch.RNNLM(
len(subword_dict2), rnnlm2_args.layer, rnnlm2_args.unit))
torch_load(args.rnnlm2, rnnlm2)
rnnlm2.eval()
# Weiran: There is only one word-level language model.
word_rnnlm_args = get_model_conf(args.word_rnnlm, args.word_rnnlm_conf)
word_dict = word_rnnlm_args.char_list_dict
word_rnnlm = lm_pytorch.ClassifierWithState(lm_pytorch.RNNLM(
max(word_dict.values()) + 1, word_rnnlm_args.layer, word_rnnlm_args.unit))
torch_load(args.word_rnnlm, word_rnnlm)
word_rnnlm.eval()
# There two lexicons for the two models.
rnnlm1 = lm_pytorch.ClassifierWithState(extlm_pytorch.MultiLevelLM_with_lexicon(word_rnnlm.predictor,
rnnlm1.predictor, word_dict, subword_dict1, args.lexicon1_dict, "▁", subwordlm_weight=args.sublm_weight))
# Char-based system. There is no ambiguities in lexicon2_dict.
with open(args.lexicon2_dict, "r") as fin:
lines = fin.read().rstrip('\n').split("\n")
lexicon2_dict = dict([(l.split()[0], l.split()[1:]) for l in lines])
# Weiran (04/17/2020): switching to not using subwordLM from subword LM for model2.
rnnlm2 = lm_pytorch.ClassifierWithState(extlm_pytorch.MultiLevelLM_with_lexicon(word_rnnlm.predictor,
rnnlm2.predictor, word_dict, subword_dict2, args.lexicon2_dict, "▁", subwordlm_weight=0.0))
# Read truth file.
if args.truth_file:
with open(args.truth_file, "r") as fin:
retval = fin.read().rstrip('\n')
dict_truth = dict([(l.split()[0], " ".join(l.split()[1:])) for l in retval.split("\n")])
# gpu
if args.ngpu == 1:
gpu_id = list(range(args.ngpu))
logging.info('gpu id: ' + str(gpu_id))
model1.cuda()
model2.cuda()
rnnlm1.cuda()
rnnlm2.cuda()
new_js = {}
recog_converter = CustomConverter(input_context=args.input_context, input_skiprate=args.input_skiprate,
mode="eval", dtype=torch.float32)
recog_data = list(filter(lambda s: len(s) > 0, map(lambda s: s.strip(), args.recog_sets.split(","))))
recog_loader = SpeechDataLoader(recog_data, task="asr", shuffle=False,
precomputed_feats_type=args.precomputed_feats_type,
batch_size=1, spmodel=args.spmodel, token_list=train_args1.char_list,
transform_conf=args.preprocess_conf, train=False, num_workers=args.loader_num_worker,
data_cache_mb=args.loader_cache_mb, num_replicas=args.num_replicas, rank=args.jobid-1,
ensure_equal_parts=False, text_filename=args.text_filename)
inverse_subword_dict1 = dict([(subword_dict1[k], k) for k in subword_dict1])
inverse_subword_dict2 = dict([(subword_dict2[k], k) for k in subword_dict2])
inverse_word_dict = dict([(word_dict[k], k) for k in word_dict])
with torch.no_grad():
idx=0
for batch in recog_loader:
idx += 1
name = batch[0]['uttid']
logging.info('(%d/%d) decoding ' + name, idx, len(recog_loader))
feat = _recursive_to(recog_converter(batch), device=torch.device('cpu'))[0]
feat = feat[0]
nbest_hyps = recognize_with_two_transformers(feat, args, model1, rnnlm1, inverse_subword_dict1,
model2, rnnlm2, lexicon2_dict, inverse_subword_dict2, inverse_word_dict)
# pdb.set_trace()
if args.truth_file and name in dict_truth:
truth_text = dict_truth[name]
else:
truth_text = ""
logging.info("groundtruth: %s" % truth_text)
logging.info("Best hypothesis combined:")
for hidx in range(min(10, len(nbest_hyps))):
logging.info("HYP%02d (score=%f, score1=%f, score2=%f): %s" % (hidx+1, nbest_hyps[hidx]['score'], nbest_hyps[hidx]['score1'], nbest_hyps[hidx]['score2'],
" ".join([inverse_word_dict[int(wid)] for wid in nbest_hyps[hidx]['wseq'] if wid > 0])))
# Weiran: prepare dict in order to add decoding results.
# I skipped the input and shape information.
gt_tokens = [int(_) for _ in batch[0]["labels"]]
tmp_dict = {"output": [{"name": "target1", "text": batch[0]["text"],
"tokenid": " ".join([str(_) for _ in gt_tokens]).strip(),
"token": " ".join([train_args1.char_list[_] for _ in gt_tokens]).strip()}],
"utt2spk": batch[0]["speaker"]}
# Weiran: I am adding text in words in the result json.
new_js[name] = add_results_to_json_word(tmp_dict, nbest_hyps, train_args1.char_list, inverse_word_dict, truth_text)
with open(args.result_label, 'wb') as f:
f.write(json.dumps({'utts': new_js}, indent=4, ensure_ascii=False, sort_keys=True).encode('utf_8'))
# Weiran: model1 is phone-BPE based, model2 is char-BPE based.
def recognize_with_two_transformers(x, recog_args, model1, rnnlm1, inverse_subword_dict1, model2, rnnlm2, lexicon2, inverse_subword_dict2, inverse_word_dict):
"""Recognize input speech.
:param ndnarray x: input acoustic feature (B, T, D) or (T, D)
:param Namespace recog_args: argment Namespace contraining options
:param torch.nn.Module rnnlm: language model module
:param dict inverse_subword_dict: index to str
:param dict inverse_word_dict: index to str
:return: N-best decoding results
:rtype: list
"""
# subword_dict1 = dict([(inverse_subword_dict1[k], k) for k in inverse_subword_dict1])
subword_dict2 = dict([(inverse_subword_dict2[k], k) for k in inverse_subword_dict2])
# Search parameters.
beam = recog_args.beam_size
word_bonus = recog_args.word_bonus
ctc_weight = recog_args.ctc_weight
logging.info("using word_bonus=%f" % word_bonus)
logging.info("using ctc_weight=%f" % ctc_weight)
from espnet.nets.ctc_prefix_score import CTCPrefixScore
# Prepare encoder outputs.
enc_output1 = model1.encode(x).unsqueeze(0)
lpz1 = model1.ctc.log_softmax(enc_output1).squeeze(0)
h1 = enc_output1.squeeze(0)
logging.info('input lengths: ' + str(h1.size(0)))
if ctc_weight != 1.0:
ctc1_beam = min(lpz1.shape[-1], int(beam * CTC_SCORING_RATIO))
else:
ctc1_beam = lpz1.shape[-1]
enc_output2 = model2.encode(x).unsqueeze(0)
lpz2 = model2.ctc.log_softmax(enc_output2).squeeze(0)
h2 = enc_output2.squeeze(0)
# Output length.
if recog_args.maxlenratio == 0:
maxlen = h1.shape[0]
else:
maxlen = max(1, int(recog_args.maxlenratio * h1.size(0)))
minlen = int(recog_args.minlenratio * h1.size(0))
logging.info('max output length: ' + str(maxlen))
logging.info('min output length: ' + str(minlen))
# Weiran: prepare sos. y1 is for token index, vy1 is tensor for rnnlm model.
y1 = model1.sos
vy1 = h1.new_zeros(1).long()
y2 = model2.sos
vy2 = h2.new_zeros(1).long()
# initialize hypothesis
hyp = {'score': 0.0, 'score1': 0.0, 'yseq': [y1], 'wseq': [], 'rnnlm1_prev': None,
'score2': 0.0, 'yseq2': [y2], 'wseq2': [], 'rnnlm2_prev': None}
# CTC scoring.
ctc1_prefix_score = CTCPrefixScore(lpz1.detach().numpy(), 0, model1.eos, np)
hyp['ctc1_state_prev'] = ctc1_prefix_score.initial_state()
hyp['ctc1_score_prev'] = 0.0
ctc2_prefix_score = CTCPrefixScore(lpz2.detach().numpy(), 0, model2.eos, np)
hyp['ctc2_state_prev'] = ctc2_prefix_score.initial_state()
hyp['ctc2_score_prev'] = 0.0
# Main loop.
hyps = [hyp]
ended_hyps = []
# Weiran: word-synchronous decoding.
import six
for i in six.moves.range(maxlen):
logging.debug('position ' + str(i))
hyps_best_kept = []
for hyp in hyps:
logging.debug("\n&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&\n")
logging.debug("Extending hypothesis %s" % (" ".join([inverse_subword_dict1[int(tid)] for tid in hyp['yseq']])))
# Weiran: use model1 to propose new word hypothesis.
# vy1.unsqueeze(1)
# Weiran: get the last hypothesized token.
vy1[0] = hyp['yseq'][i]
# get nbest local scores and their ids
ys_mask1 = subsequent_mask(i + 1).unsqueeze(0)
ys1 = torch.tensor(hyp['yseq']).unsqueeze(0)
local_att_scores = model1.decoder.forward_one_step(ys1, ys_mask1, enc_output1)[0]
logging.debug("\nUsing rnnlm1 ...")
RNNLM_STATE_LIST = rnnlm1.predict(hyp['rnnlm1_prev'], vy1)
# Weiran: if the list has more than one element, we need to expand the set of hypothesis.
for rnnlm1_state, local_lm_scores1, word_output1 in RNNLM_STATE_LIST:
logging.debug("\nFor rnnlm1 word_output1=%d" % word_output1)
local_scores = local_att_scores + recog_args.lm_weight * local_lm_scores1
local_best_scores, local_best_ids = torch.topk(local_scores, ctc1_beam, dim=1)
ctc_scores, ctc_states = ctc1_prefix_score(hyp['yseq'], local_best_ids[0], hyp['ctc1_state_prev'])
local_scores = (1.0 - ctc_weight) * local_att_scores[:, local_best_ids[0]] \
+ ctc_weight * torch.from_numpy(ctc_scores - hyp['ctc1_score_prev']) \
+ recog_args.lm_weight * local_lm_scores1[:, local_best_ids[0]]
local_best_scores, joint_best_ids = torch.topk(local_scores, beam, dim=1)
local_best_ids = local_best_ids[:, joint_best_ids[0]]
# Prepare for model2, since it may go through a loop when the word boundary is met by model1.
yseq2 = list(hyp['yseq2'])
wseq2 = list(hyp['wseq2'])
score2 = hyp['score2']
ctc2_state_prev = hyp['ctc2_state_prev']
ctc2_score_prev = hyp['ctc2_score_prev']
rnnlm2_state = hyp['rnnlm2_prev']
if word_output1 >= 0:
if inverse_word_dict[word_output1] == "<unk>":
# Weiran (05/08/2020): shall consider changing this to <unk> of model2, whose ID is 1.
# The choice below is trying to kill the hypothesis.
tokens_for_model2 = [1] # [model2.eos]
else:
# Weiran: use scores from model2 and rnnlm2.
output_word_text = inverse_word_dict[word_output1]
logging.debug("model2 is consuming word output: %s (%d)" % (output_word_text, word_output1))
if ("." in output_word_text) and (output_word_text not in lexicon2):
logging.debug("handling abbreviation %s" % output_word_text)
fields = output_word_text.split("_")
fields_remove_dot = [x.replace(".", "") for x in fields]
tokens_for_model2 = []
for x in fields_remove_dot:
tokens_for_model2.extend([subword_dict2[z] if z in subword_dict2 else 1 for z in lexicon2[x]])
else:
tokens_for_model2 = [subword_dict2[x] if x in subword_dict2 else 1 for x in lexicon2[output_word_text]]
#logging.debug("model2 is expecting tokens:")
#logging.debug(" ".join(["%s (%d)" % (inverse_subword_dict2[x], x) for x in tokens_for_model2]))
for j in range(len(tokens_for_model2)):
logging.debug("Using rnnlm2 ...")
# vy2.unsqueeze(1)
vy2[0] = yseq2[-1]
# Weiran: note that rnnlm2_state is updated.
rnnlm2_state, local_lm_scores2, word_output2 = rnnlm2.predict(rnnlm2_state, vy2)[0]
# Accept the new token.
new_token = tokens_for_model2[j]
ys_mask2 = subsequent_mask(len(yseq2)).unsqueeze(0)
ys2 = torch.tensor(yseq2).unsqueeze(0)
local_att_scores2 = model2.decoder.forward_one_step(ys2, ys_mask2, enc_output2)[0]
ctc2_score, ctc2_state = ctc2_prefix_score(yseq2, [new_token], ctc2_state_prev)
score2 += (1.0 - ctc_weight) * float(local_att_scores2[:, new_token]) \
+ ctc_weight * (ctc2_score[0] - ctc2_score_prev) \
+ recog_args.lm_weight * float(local_lm_scores2[:, new_token])
ctc2_score_prev = ctc2_score[0]
ctc2_state_prev = ctc2_state[0]
# Weiran: update token list and word list.
yseq2.append(new_token)
wseq2.append(word_output2)
if True: # word_output1 >= 0:
logging.debug("\n================================================\nHypothesis in model1:")
logging.debug(hyp['yseq'])
logging.debug(" ".join([inverse_subword_dict1[int(tid)] for tid in hyp['yseq']]))
logging.debug("model1 current word hypothesis:")
logging.debug(hyp['wseq'])
logging.debug(" ".join([inverse_word_dict[int(wid)] for wid in hyp['wseq'] if wid > 0]))
if word_output1 >= 0:
logging.debug("outputing word: %s (%d)" % (inverse_word_dict[int(word_output1)], int(word_output1)))
else:
logging.debug("Not outputing word.")
logging.debug("current score1=%f" % hyp['score1'])
logging.debug("acc. clm score=%f" % rnnlm1_state[-1])
logging.debug("************************************************\nHypothesis in model2:")
logging.debug(yseq2)
logging.debug(" ".join([inverse_subword_dict2[int(tid)] for tid in yseq2]))
logging.debug("model2 current word hypothesis:")
logging.debug(wseq2)
logging.debug(" ".join([inverse_word_dict[int(wid)] for wid in wseq2 if wid > 0]))
logging.debug("================================================\n")
# Going over the beams.
for j in six.moves.range(beam):
new_hyp = {}
new_hyp['score1'] = hyp['score1'] + float(local_best_scores[0, j])
new_hyp['yseq'] = list(hyp['yseq']) + [int(local_best_ids[0, j])]
new_hyp['wseq'] = list(hyp['wseq']) + [word_output1]
new_hyp['rnnlm1_prev'] = rnnlm1_state
new_hyp['ctc1_state_prev'] = ctc_states[joint_best_ids[0, j]]
new_hyp['ctc1_score_prev'] = ctc_scores[joint_best_ids[0, j]]
new_hyp['score2'] = score2
new_hyp['yseq2'] = yseq2
new_hyp['wseq2'] = wseq2
new_hyp['rnnlm2_prev'] = rnnlm2_state
new_hyp['ctc2_state_prev'] = ctc2_state_prev
new_hyp['ctc2_score_prev'] = ctc2_score_prev
if word_output1 >= 0:
new_hyp['score'] = hyp['score1'] * (1 - recog_args.model2_weight) + new_hyp['score2'] * recog_args.model2_weight + float(local_best_scores[0, j])
else:
new_hyp['score'] = hyp['score'] + float(local_best_scores[0, j])
if word_output1 >= 0:
new_hyp['score'] += word_bonus
new_hyp['score1'] += word_bonus
new_hyp['score2'] += word_bonus
# will be (2 x beam) hyps at most
hyps_best_kept.append(new_hyp)
hyps_best_kept = sorted(hyps_best_kept, key=lambda x: x['score'], reverse=True)[:beam]
# Finish expanding all hypothesis at position i.
# sort and get nbest
hyps = hyps_best_kept
logging.debug('number of pruned hypothes: ' + str(len(hyps)))
if inverse_subword_dict1 is not None:
logging.debug(
'best hypo: ' + ''.join([inverse_subword_dict1[int(x)] for x in hyps[0]['yseq'][1:]]))
# add eos in the final loop to avoid that there are no ended hyps
if i == maxlen - 1:
logging.info('adding <eos> in the last position in the loop')
for hyp in hyps:
if not hyp['yseq'][-1] == model1.eos:
hyp['yseq'].append(model1.eos)
# add ended hypothesis to a final list, and removed them from current hypothes
# (this will be a problem, number of hyps < beam)
remained_hyps = []
for hyp in hyps:
if hyp['yseq'][-1] == model1.eos:
# only store the sequence that has more than minlen outputs
if len(hyp['yseq']) > minlen:
if rnnlm1: # Word LM needs to add final <eos> score
RNNLM_FINAL_LIST = rnnlm1.final(hyp['rnnlm1_prev'])
for rnnlm1_final_score, word_output1 in RNNLM_FINAL_LIST:
logging.debug("\nFor rnnlm1 word_output1=%d" % word_output1)
yseq2 = list(hyp['yseq2'])
wseq2 = list(hyp['wseq2'])
score2 = hyp['score2']
rnnlm2_state = hyp['rnnlm2_prev']
ctc2_state_prev = hyp['ctc2_state_prev']
ctc2_score_prev = hyp['ctc2_score_prev']
# We have now restricted search space to a size of ctc_beam.
if word_output1 >= 0:
if inverse_word_dict[word_output1] == "<unk>":
tokens_for_model2 = [1] # [model2.eos]
else:
# Weiran: use scores from model2 and rnnlm2.
output_word_text = inverse_word_dict[word_output1]
logging.debug("model2 is consuming word output: %s (%d)" % (output_word_text, word_output1))
if ("." in output_word_text) and (output_word_text not in lexicon2):
logging.debug("handling abbreviation %s" % output_word_text)
fields = output_word_text.split("_")
fields_remove_dot = [x.replace(".", "") for x in fields]
tokens_for_model2 = []
for x in fields_remove_dot:
tokens_for_model2.extend([subword_dict2[z] if z in subword_dict2 else 1 for z in lexicon2[x]])
else:
tokens_for_model2 = [subword_dict2[x] if x in subword_dict2 else 1 for x in lexicon2[output_word_text]]
# Weiran: force model2 to accept eos.
tokens_for_model2.append(model2.eos)
# logging.debug("model2 is expecting tokens:")
# logging.debug(" ".join(["%s (%d)" % (inverse_subword_dict2[x], x) for x in tokens_for_model2]))
for j in range(len(tokens_for_model2)):
logging.debug("Using rnnlm2 ...")
# vy2.unsqueeze(1)
vy2[0] = yseq2[-1]
# Weiran: note that rnnlm2_state is updated.
rnnlm2_state, local_lm_scores2, word_output2 = rnnlm2.predict(rnnlm2_state, vy2)[0]
new_token = tokens_for_model2[j]
ys_mask2 = subsequent_mask(len(yseq2)).unsqueeze(0)
ys2 = torch.tensor(yseq2).unsqueeze(0)
local_att_scores2 = model2.decoder.forward_one_step(ys2, ys_mask2, enc_output2)[0]
ctc2_score, ctc2_state = ctc2_prefix_score(yseq2, [new_token], ctc2_state_prev)
score2 += (1.0 - ctc_weight) * float(local_att_scores2[:, new_token]) \
+ ctc_weight * (ctc2_score[0] - ctc2_score_prev) \
+ recog_args.lm_weight * float(local_lm_scores2[:, new_token])
ctc2_score_prev = ctc2_score[0]
ctc2_state_prev = ctc2_state[0]
yseq2.append(new_token)
wseq2.append(word_output2)
rnnlm2_final_score, word_output2 = rnnlm2.final(rnnlm2_state)[0]
score2 += recog_args.lm_weight * float(rnnlm2_final_score)
wseq2.append(word_output2)
if True:
logging.debug("\n================================================\nHypothesis in model1 ending:")
logging.debug(hyp['yseq'])
logging.debug(" ".join([inverse_subword_dict1[int(tid)] for tid in hyp['yseq']]))
logging.debug("current word hypothesis:")
logging.debug(hyp['wseq'])
logging.debug(" ".join([inverse_word_dict[int(wid)] for wid in hyp['wseq'] if wid > 0]))
if word_output1 >= 0:
logging.debug("outputing word: %s" % inverse_word_dict[int(word_output1)])
logging.debug("current score1=%f" % hyp['score1'])
logging.debug("adding last word+end score=%f" % (recog_args.lm_weight * rnnlm1_final_score))
logging.debug("************************************************\nHypothesis in model2:")
logging.debug(yseq2)
logging.debug(" ".join([inverse_subword_dict2[int(tid)] for tid in yseq2]))
logging.debug("model2 current word hypothesis:")
logging.debug(wseq2)
logging.debug(" ".join([inverse_word_dict[int(wid)] for wid in wseq2 if wid > 0]))
logging.debug("================================================")
new_hyp = {}
new_hyp['score1'] = hyp['score1'] + recog_args.lm_weight * rnnlm1_final_score
new_hyp['yseq'] = hyp['yseq']
new_hyp['wseq'] = list(hyp['wseq']) + [word_output1]
new_hyp['score2'] = score2
new_hyp['yseq2'] = yseq2
new_hyp['wseq2'] = wseq2
# Final score combines complete scores from both models.
new_hyp['score'] = new_hyp['score1'] * (1 - recog_args.model2_weight) + new_hyp['score2'] * recog_args.model2_weight
if word_output1 >= 0:
new_hyp['score'] += word_bonus
new_hyp['score1'] += word_bonus
new_hyp['score2'] += word_bonus
ended_hyps.append(new_hyp)
else:
remained_hyps.append(hyp)
# end detection
from espnet.nets.e2e_asr_common import end_detect
if end_detect(ended_hyps, i) and recog_args.maxlenratio == 0.0:
logging.info('end detected at %d' % i)
break
hyps = remained_hyps
if len(hyps) > 0:
logging.debug('remained hypothes: ' + str(len(hyps)))
else:
logging.info('no hypothesis. Finish decoding.')
break
if inverse_subword_dict1 is not None:
for hyp in hyps:
logging.debug(
'hypo: ' + ''.join([inverse_subword_dict1[int(x)] for x in hyp['yseq'][1:]]))
logging.debug('number of ended hypothes: ' + str(len(ended_hyps)))
# Finishing position i.
# Finished label-synchronous decoding.
nbest_hyps = sorted(ended_hyps, key=lambda x: x['score'], reverse=True)[:min(len(ended_hyps), recog_args.nbest)]
# check number of hypothesis
if len(nbest_hyps) == 0:
logging.warning('there is no N-best results, perform recognition again with smaller minlenratio.')
# should copy because Namespace will be overwritten globally
from argparse import Namespace
recog_args = Namespace(**vars(recog_args))
recog_args.minlenratio = max(0.0, recog_args.minlenratio - 0.1)
return recognize_with_two_transformers(x, recog_args, model1, rnnlm1, inverse_subword_dict1, model2, rnnlm2, lexicon2, inverse_subword_dict2, inverse_word_dict)
logging.info('total log probability for model1: ' + str(nbest_hyps[0]['score1']))
logging.info('normalized log probability for model1: ' + str(nbest_hyps[0]['score1'] / len(nbest_hyps[0]['yseq'])))
logging.info('total log probability for model2: ' + str(nbest_hyps[0]['score2']))
logging.info('normalized log probability for model2: ' + str(nbest_hyps[0]['score2'] / len(nbest_hyps[0]['yseq2'])))
return nbest_hyps
| [
"torch.distributed.get_world_size",
"torch.device",
"torch.is_tensor",
"torch.save",
"torch.no_grad",
"torch.from_numpy",
"torch.sum",
"torch.cuda.set_device",
"torch.cuda.is_available",
"torch.tensor",
"torch.load",
"torch.distributed.get_rank",
"torch.distributed.barrier",
"torch.topk"
] | 1.0.1 | salesforce/TransformerASR | 1de0c16573bc6f47c0b16d3f07af0f670db8f30f |
1.0 | """Vanilla Policy Gradient (REINFORCE)."""
import collections
import copy
from dowel import tabular
import numpy as np
import torch
import torch.nn.functional as F
from garage import log_performance, TrajectoryBatch
from garage.misc import tensor_utils as tu
from garage.np.algos.rl_algorithm import RLAlgorithm
from garage.sampler import RaySampler
from garage.torch import (compute_advantages, filter_valids, pad_to_last)
from garage.torch.optimizers import OptimizerWrapper
class VPG(RLAlgorithm):
"""Vanilla Policy Gradient (REINFORCE).
VPG, also known as Reinforce, trains stochastic policy in an on-policy way.
Args:
env_spec (garage.envs.EnvSpec): Environment specification.
policy (garage.torch.policies.Policy): Policy.
value_function (garage.torch.value_functions.ValueFunction): The value
function.
policy_optimizer (garage.torch.optimizer.OptimizerWrapper): Optimizer
for policy.
vf_optimizer (garage.torch.optimizer.OptimizerWrapper): Optimizer for
value function.
max_path_length (int): Maximum length of a single rollout.
num_train_per_epoch (int): Number of train_once calls per epoch.
discount (float): Discount.
gae_lambda (float): Lambda used for generalized advantage
estimation.
center_adv (bool): Whether to rescale the advantages
so that they have mean 0 and standard deviation 1.
positive_adv (bool): Whether to shift the advantages
so that they are always positive. When used in
conjunction with center_adv the advantages will be
standardized before shifting.
policy_ent_coeff (float): The coefficient of the policy entropy.
Setting it to zero would mean no entropy regularization.
use_softplus_entropy (bool): Whether to estimate the softmax
distribution of the entropy to prevent the entropy from being
negative.
stop_entropy_gradient (bool): Whether to stop the entropy gradient.
entropy_method (str): A string from: 'max', 'regularized',
'no_entropy'. The type of entropy method to use. 'max' adds the
dense entropy to the reward for each time step. 'regularized' adds
the mean entropy to the surrogate objective. See
https://arxiv.org/abs/1805.00909 for more details.
"""
def __init__(
self,
env_spec,
policy,
value_function,
policy_optimizer=None,
vf_optimizer=None,
max_path_length=500,
num_train_per_epoch=1,
discount=0.99,
gae_lambda=1,
center_adv=True,
positive_adv=False,
policy_ent_coeff=0.0,
use_softplus_entropy=False,
stop_entropy_gradient=False,
entropy_method='no_entropy',
):
self.discount = discount
self.policy = policy
self.max_path_length = max_path_length
self._value_function = value_function
self._gae_lambda = gae_lambda
self._center_adv = center_adv
self._positive_adv = positive_adv
self._policy_ent_coeff = policy_ent_coeff
self._use_softplus_entropy = use_softplus_entropy
self._stop_entropy_gradient = stop_entropy_gradient
self._entropy_method = entropy_method
self._n_samples = num_train_per_epoch
self._env_spec = env_spec
self._maximum_entropy = (entropy_method == 'max')
self._entropy_regularzied = (entropy_method == 'regularized')
self._check_entropy_configuration(entropy_method, center_adv,
stop_entropy_gradient,
policy_ent_coeff)
self._episode_reward_mean = collections.deque(maxlen=100)
self.sampler_cls = RaySampler
if policy_optimizer:
self._policy_optimizer = policy_optimizer
else:
self._policy_optimizer = OptimizerWrapper(torch.optim.Adam, policy)
if vf_optimizer:
self._vf_optimizer = vf_optimizer
else:
self._vf_optimizer = OptimizerWrapper(torch.optim.Adam,
value_function)
self._old_policy = copy.deepcopy(self.policy)
@staticmethod
def _check_entropy_configuration(entropy_method, center_adv,
stop_entropy_gradient, policy_ent_coeff):
if entropy_method not in ('max', 'regularized', 'no_entropy'):
raise ValueError('Invalid entropy_method')
if entropy_method == 'max':
if center_adv:
raise ValueError('center_adv should be False when '
'entropy_method is max')
if not stop_entropy_gradient:
raise ValueError('stop_gradient should be True when '
'entropy_method is max')
if entropy_method == 'no_entropy':
if policy_ent_coeff != 0.0:
raise ValueError('policy_ent_coeff should be zero '
'when there is no entropy method')
def train_once(self, itr, paths):
"""Train the algorithm once.
Args:
itr (int): Iteration number.
paths (list[dict]): A list of collected paths.
Returns:
numpy.float64: Calculated mean value of undiscounted returns.
"""
obs, actions, rewards, returns, valids, baselines = \
self.process_samples(paths)
if self._maximum_entropy:
policy_entropies = self._compute_policy_entropy(obs)
rewards += self._policy_ent_coeff * policy_entropies
obs_flat = torch.cat(filter_valids(obs, valids))
actions_flat = torch.cat(filter_valids(actions, valids))
rewards_flat = torch.cat(filter_valids(rewards, valids))
returns_flat = torch.cat(filter_valids(returns, valids))
advs_flat = self._compute_advantage(rewards, valids, baselines)
with torch.no_grad():
policy_loss_before = self._compute_loss_with_adv(
obs_flat, actions_flat, rewards_flat, advs_flat)
vf_loss_before = self._value_function.compute_loss(
obs_flat, returns_flat)
kl_before = self._compute_kl_constraint(obs)
self._train(obs_flat, actions_flat, rewards_flat, returns_flat,
advs_flat)
with torch.no_grad():
policy_loss_after = self._compute_loss_with_adv(
obs_flat, actions_flat, rewards_flat, advs_flat)
vf_loss_after = self._value_function.compute_loss(
obs_flat, returns_flat)
kl_after = self._compute_kl_constraint(obs)
policy_entropy = self._compute_policy_entropy(obs)
with tabular.prefix(self.policy.name):
tabular.record('/LossBefore', policy_loss_before.item())
tabular.record('/LossAfter', policy_loss_after.item())
tabular.record('/dLoss',
(policy_loss_before - policy_loss_after).item())
tabular.record('/KLBefore', kl_before.item())
tabular.record('/KL', kl_after.item())
tabular.record('/Entropy', policy_entropy.mean().item())
with tabular.prefix(self._value_function.name):
tabular.record('/LossBefore', vf_loss_before.item())
tabular.record('/LossAfter', vf_loss_after.item())
tabular.record('/dLoss',
vf_loss_before.item() - vf_loss_after.item())
self._old_policy.load_state_dict(self.policy.state_dict())
undiscounted_returns = log_performance(
itr,
TrajectoryBatch.from_trajectory_list(self._env_spec, paths),
discount=self.discount)
return np.mean(undiscounted_returns)
def train(self, runner):
"""Obtain samplers and start actual training for each epoch.
Args:
runner (LocalRunner): LocalRunner is passed to give algorithm
the access to runner.step_epochs(), which provides services
such as snapshotting and sampler control.
Returns:
float: The average return in last epoch cycle.
"""
last_return = None
for _ in runner.step_epochs():
for _ in range(self._n_samples):
runner.step_path = runner.obtain_samples(runner.step_itr)
last_return = self.train_once(runner.step_itr,
runner.step_path)
runner.step_itr += 1
return last_return
def _train(self, obs, actions, rewards, returns, advs):
r"""Train the policy and value function with minibatch.
Args:
obs (torch.Tensor): Observation from the environment with shape
:math:`(N, O*)`.
actions (torch.Tensor): Actions fed to the environment with shape
:math:`(N, A*)`.
rewards (torch.Tensor): Acquired rewards with shape :math:`(N, )`.
returns (torch.Tensor): Acquired returns with shape :math:`(N, )`.
advs (torch.Tensor): Advantage value at each step with shape
:math:`(N, )`.
"""
for dataset in self._policy_optimizer.get_minibatch(
obs, actions, rewards, advs):
self._train_policy(*dataset)
for dataset in self._vf_optimizer.get_minibatch(obs, returns):
self._train_value_function(*dataset)
def _train_policy(self, obs, actions, rewards, advantages):
r"""Train the policy.
Args:
obs (torch.Tensor): Observation from the environment
with shape :math:`(N, O*)`.
actions (torch.Tensor): Actions fed to the environment
with shape :math:`(N, A*)`.
rewards (torch.Tensor): Acquired rewards
with shape :math:`(N, )`.
advantages (torch.Tensor): Advantage value at each step
with shape :math:`(N, )`.
Returns:
torch.Tensor: Calculated mean scalar value of policy loss (float).
"""
self._policy_optimizer.zero_grad()
loss = self._compute_loss_with_adv(obs, actions, rewards, advantages)
loss.backward()
self._policy_optimizer.step()
return loss
def _train_value_function(self, obs, returns):
r"""Train the value function.
Args:
obs (torch.Tensor): Observation from the environment
with shape :math:`(N, O*)`.
returns (torch.Tensor): Acquired returns
with shape :math:`(N, )`.
Returns:
torch.Tensor: Calculated mean scalar value of value function loss
(float).
"""
self._vf_optimizer.zero_grad()
loss = self._value_function.compute_loss(obs, returns)
loss.backward()
self._vf_optimizer.step()
return loss
def _compute_loss(self, obs, actions, rewards, valids, baselines):
r"""Compute mean value of loss.
Notes: P is the maximum path length (self.max_path_length)
Args:
obs (torch.Tensor): Observation from the environment
with shape :math:`(N, P, O*)`.
actions (torch.Tensor): Actions fed to the environment
with shape :math:`(N, P, A*)`.
rewards (torch.Tensor): Acquired rewards
with shape :math:`(N, P)`.
valids (list[int]): Numbers of valid steps in each paths
baselines (torch.Tensor): Value function estimation at each step
with shape :math:`(N, P)`.
Returns:
torch.Tensor: Calculated negative mean scalar value of
objective (float).
"""
obs_flat = torch.cat(filter_valids(obs, valids))
actions_flat = torch.cat(filter_valids(actions, valids))
rewards_flat = torch.cat(filter_valids(rewards, valids))
advantages_flat = self._compute_advantage(rewards, valids, baselines)
return self._compute_loss_with_adv(obs_flat, actions_flat,
rewards_flat, advantages_flat)
def _compute_loss_with_adv(self, obs, actions, rewards, advantages):
r"""Compute mean value of loss.
Args:
obs (torch.Tensor): Observation from the environment
with shape :math:`(N \dot [T], O*)`.
actions (torch.Tensor): Actions fed to the environment
with shape :math:`(N \dot [T], A*)`.
rewards (torch.Tensor): Acquired rewards
with shape :math:`(N \dot [T], )`.
advantages (torch.Tensor): Advantage value at each step
with shape :math:`(N \dot [T], )`.
Returns:
torch.Tensor: Calculated negative mean scalar value of objective.
"""
objectives = self._compute_objective(advantages, obs, actions, rewards)
if self._entropy_regularzied:
policy_entropies = self._compute_policy_entropy(obs)
objectives += self._policy_ent_coeff * policy_entropies
return -objectives.mean()
def _compute_advantage(self, rewards, valids, baselines):
r"""Compute mean value of loss.
Notes: P is the maximum path length (self.max_path_length)
Args:
rewards (torch.Tensor): Acquired rewards
with shape :math:`(N, P)`.
valids (list[int]): Numbers of valid steps in each paths
baselines (torch.Tensor): Value function estimation at each step
with shape :math:`(N, P)`.
Returns:
torch.Tensor: Calculated advantage values given rewards and
baselines with shape :math:`(N \dot [T], )`.
"""
advantages = compute_advantages(self.discount, self._gae_lambda,
self.max_path_length, baselines,
rewards)
advantage_flat = torch.cat(filter_valids(advantages, valids))
if self._center_adv:
means = advantage_flat.mean()
variance = advantage_flat.var()
advantage_flat = (advantage_flat - means) / (variance + 1e-8)
if self._positive_adv:
advantage_flat -= advantage_flat.min()
return advantage_flat
def _compute_kl_constraint(self, obs):
r"""Compute KL divergence.
Compute the KL divergence between the old policy distribution and
current policy distribution.
Notes: P is the maximum path length (self.max_path_length)
Args:
obs (torch.Tensor): Observation from the environment
with shape :math:`(N, P, O*)`.
Returns:
torch.Tensor: Calculated mean scalar value of KL divergence
(float).
"""
with torch.no_grad():
old_dist = self._old_policy(obs)[0]
new_dist = self.policy(obs)[0]
kl_constraint = torch.distributions.kl.kl_divergence(
old_dist, new_dist)
return kl_constraint.mean()
def _compute_policy_entropy(self, obs):
r"""Compute entropy value of probability distribution.
Notes: P is the maximum path length (self.max_path_length)
Args:
obs (torch.Tensor): Observation from the environment
with shape :math:`(N, P, O*)`.
Returns:
torch.Tensor: Calculated entropy values given observation
with shape :math:`(N, P)`.
"""
if self._stop_entropy_gradient:
with torch.no_grad():
policy_entropy = self.policy(obs)[0].entropy()
else:
policy_entropy = self.policy(obs)[0].entropy()
# This prevents entropy from becoming negative for small policy std
if self._use_softplus_entropy:
policy_entropy = F.softplus(policy_entropy)
return policy_entropy
def _compute_objective(self, advantages, obs, actions, rewards):
r"""Compute objective value.
Args:
advantages (torch.Tensor): Advantage value at each step
with shape :math:`(N \dot [T], )`.
obs (torch.Tensor): Observation from the environment
with shape :math:`(N \dot [T], O*)`.
actions (torch.Tensor): Actions fed to the environment
with shape :math:`(N \dot [T], A*)`.
rewards (torch.Tensor): Acquired rewards
with shape :math:`(N \dot [T], )`.
Returns:
torch.Tensor: Calculated objective values
with shape :math:`(N \dot [T], )`.
"""
del rewards
log_likelihoods = self.policy(obs)[0].log_prob(actions)
return log_likelihoods * advantages
def process_samples(self, paths):
r"""Process sample data based on the collected paths.
Notes: P is the maximum path length (self.max_path_length)
Args:
paths (list[dict]): A list of collected paths
Returns:
torch.Tensor: The observations of the environment
with shape :math:`(N, P, O*)`.
torch.Tensor: The actions fed to the environment
with shape :math:`(N, P, A*)`.
torch.Tensor: The acquired rewards with shape :math:`(N, P)`.
list[int]: Numbers of valid steps in each paths.
torch.Tensor: Value function estimation at each step
with shape :math:`(N, P)`.
"""
valids = torch.Tensor([len(path['actions']) for path in paths]).int()
obs = torch.stack([
pad_to_last(path['observations'],
total_length=self.max_path_length,
axis=0) for path in paths
])
actions = torch.stack([
pad_to_last(path['actions'],
total_length=self.max_path_length,
axis=0) for path in paths
])
rewards = torch.stack([
pad_to_last(path['rewards'], total_length=self.max_path_length)
for path in paths
])
returns = torch.stack([
pad_to_last(tu.discount_cumsum(path['rewards'],
self.discount).copy(),
total_length=self.max_path_length) for path in paths
])
with torch.no_grad():
baselines = self._value_function(obs)
return obs, actions, rewards, returns, valids, baselines
| [
"torch.distributions.kl.kl_divergence",
"torch.no_grad",
"torch.nn.functional.softplus"
] | 1.0.0 | maciejwolczyk/garage-1 | c78843115a51d63f86bca0f3518d8f68fb81bce3 |
1.4 | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from audio_separation.rl.ppo.ddppo_utils import distributed_mean_and_var
EPS_PPO = 1e-5
class PPO(nn.Module):
def __init__(
self,
actor_critic,
clip_param,
ppo_epoch,
num_mini_batch,
value_loss_coef,
bin_separation_loss_coef,
mono_conversion_loss_coef,
entropy_coef,
lr_pol=None,
lr_sep=None,
eps=None,
max_grad_norm=None,
freeze_passive_separators=False,
use_clipped_value_loss=True,
use_normalized_advantage=True,
):
super().__init__()
self.actor_critic = actor_critic
self.clip_param = clip_param
self.ppo_epoch = ppo_epoch
self.num_mini_batch = num_mini_batch
self.value_loss_coef = value_loss_coef
self.bin_separation_loss_coef = bin_separation_loss_coef
self.mono_conversion_loss_coef = mono_conversion_loss_coef
self.entropy_coef = entropy_coef
self.max_grad_norm = max_grad_norm
self.use_clipped_value_loss = use_clipped_value_loss
self.use_normalized_advantage = use_normalized_advantage
self.freeze_passive_separators=freeze_passive_separators
pol_params = list(actor_critic.pol_net.parameters()) + list(actor_critic.action_dist.parameters()) +\
list(actor_critic.critic.parameters())
self.optimizer_pol = optim.Adam(pol_params, lr=lr_pol, eps=eps)
sep_params = list(actor_critic.binSep_enc.parameters()) + list(actor_critic.binSep_dec.parameters()) +\
list(actor_critic.bin2mono_enc.parameters()) + list(actor_critic.bin2mono_dec.parameters()) +\
list(actor_critic.acoustic_mem.parameters())
self.optimizer_sep = optim.Adam(sep_params, lr=lr_sep, eps=eps)
self.device = next(actor_critic.parameters()).device
def load_pretrained_passive_separators(self, state_dict):
# loading pretrained weights from passive binaural separator
for name in self.actor_critic.binSep_enc.state_dict():
self.actor_critic.binSep_enc.state_dict()[name].copy_(state_dict["actor_critic.binSep_enc." + name])
for name in self.actor_critic.binSep_dec.state_dict():
self.actor_critic.binSep_dec.state_dict()[name].copy_(state_dict["actor_critic.binSep_dec." + name])
# loading pretrained weights from passive bin2mono separator
for name in self.actor_critic.bin2mono_enc.state_dict():
self.actor_critic.bin2mono_enc.state_dict()[name].copy_(state_dict["actor_critic.bin2mono_enc." + name])
for name in self.actor_critic.bin2mono_dec.state_dict():
self.actor_critic.bin2mono_dec.state_dict()[name].copy_(state_dict["actor_critic.bin2mono_dec." + name])
def forward(self, *x):
raise NotImplementedError
def get_advantages(self, rollouts_pol):
advantages = rollouts_pol.returns[:-1] - rollouts_pol.value_preds[:-1]
if not self.use_normalized_advantage:
return advantages
return (advantages - advantages.mean()) / (advantages.std() + EPS_PPO)
def update_pol(self, rollouts_pol):
advantages = self.get_advantages(rollouts_pol)
value_loss_epoch = 0
action_loss_epoch = 0
dist_entropy_epoch = 0
for e in range(self.ppo_epoch):
data_generator = rollouts_pol.recurrent_generator(
advantages, self.num_mini_batch
)
for sample in data_generator:
(
obs_batch,
recurrent_hidden_states_pol_batch,
pred_binSepMasks_batch,
pred_mono_batch,
pred_monoFromMem_batch,
value_preds_batch,
return_batch,
adv_targ,
actions_batch,
old_action_log_probs_batch,
masks_batch,
) = sample
(
values,
action_log_probs,
dist_entropy,
_,
) = self.actor_critic.evaluate_actions(
obs_batch,
recurrent_hidden_states_pol_batch,
masks_batch,
actions_batch,
pred_binSepMasks=pred_binSepMasks_batch,
pred_mono=pred_mono_batch,
pred_monoFromMem=pred_monoFromMem_batch,
)
ratio = torch.exp(
action_log_probs - old_action_log_probs_batch
)
surr1 = ratio * adv_targ
surr2 = (
torch.clamp(
ratio, 1.0 - self.clip_param, 1.0 + self.clip_param
)
* adv_targ
)
action_loss = -torch.min(surr1, surr2).mean()
if self.use_clipped_value_loss:
value_pred_clipped = value_preds_batch + (
values - value_preds_batch
).clamp(-self.clip_param, self.clip_param)
value_losses = (values - return_batch).pow(2)
value_losses_clipped = (
value_pred_clipped - return_batch
).pow(2)
value_loss = (
0.5
* torch.max(value_losses, value_losses_clipped).mean()
)
else:
value_loss = 0.5 * (return_batch - values).pow(2).mean()
self.optimizer_pol.zero_grad()
total_loss = (
value_loss * self.value_loss_coef
+ action_loss
- dist_entropy * self.entropy_coef
)
self.before_backward(total_loss)
total_loss.backward()
self.after_backward(total_loss)
self.before_step_pol()
self.optimizer_pol.step()
self.after_step()
action_loss_epoch += action_loss.item()
value_loss_epoch += value_loss.item()
dist_entropy_epoch += dist_entropy.item()
num_updates = self.ppo_epoch * self.num_mini_batch
action_loss_epoch /= num_updates
value_loss_epoch /= num_updates
dist_entropy_epoch /= num_updates
return value_loss_epoch, action_loss_epoch, dist_entropy_epoch
def update_sep(self, rollouts_sep):
bin_loss_epoch = 0.
mono_loss_epoch = 0.
monoFromMem_loss_epoch = 0.
for e in range(self.ppo_epoch):
data_generator = rollouts_sep.recurrent_generator(self.num_mini_batch)
for sample in data_generator:
(
obs_batch,
pred_monoFromMem_batch,
prev_pred_monoFromMem_batch,
masks_batch
) = sample
# use torch.no_grad since passive separators are frozen
with torch.no_grad():
pred_binSepMasks =\
self.actor_critic.get_binSepMasks(
obs_batch,
)
pred_mono =\
self.actor_critic.convert_bin2mono(pred_binSepMasks.detach(),
mixed_audio=obs_batch["mixed_bin_audio_mag"],
)
prev_pred_monoFromMem_masked = prev_pred_monoFromMem_batch *\
masks_batch.unsqueeze(1).unsqueeze(2).repeat(1,
*pred_mono.size()[1:]
)
pred_monoFromMem =\
self.actor_critic.get_monoFromMem(pred_mono, prev_pred_monoFromMem_masked)
# get monoFromMem loss
gt_mono_mag = obs_batch["gt_mono_comps"][..., 0::2].clone()[..., :1]
monoFromMem_loss = F.l1_loss(pred_monoFromMem, gt_mono_mag)
# get bin2mono loss
mono_loss = F.l1_loss(pred_mono, gt_mono_mag)
# get bin-sep loss
gt_bin_mag = obs_batch["gt_bin_comps"][..., 0::2].clone()[..., :2]
pred_bin = (torch.exp(obs_batch["mixed_bin_audio_mag"]) - 1) * pred_binSepMasks
bin_loss = F.l1_loss(pred_bin, gt_bin_mag)
self.optimizer_sep.zero_grad()
total_loss = monoFromMem_loss
self.before_backward(total_loss)
total_loss.backward()
self.after_backward(total_loss)
self.before_step_sep()
self.optimizer_sep.step()
self.after_step()
bin_loss_epoch += bin_loss.item()
mono_loss_epoch += mono_loss.item()
monoFromMem_loss_epoch += monoFromMem_loss.item()
num_updates = self.ppo_epoch * self.num_mini_batch
bin_loss_epoch /= num_updates
mono_loss_epoch /= num_updates
monoFromMem_loss_epoch /= num_updates
return bin_loss_epoch, mono_loss_epoch, monoFromMem_loss_epoch
def before_backward(self, loss):
pass
def after_backward(self, loss):
pass
def before_step_pol(self):
pol_params = list(self.actor_critic.pol_net.parameters()) +\
list(self.actor_critic.action_dist.parameters()) +\
list(self.actor_critic.critic.parameters())
nn.utils.clip_grad_norm_(
pol_params, self.max_grad_norm
)
def before_step_sep(self):
sep_params = list(self.actor_critic.binSep_enc.parameters()) + list(self.actor_critic.binSep_dec.parameters()) +\
list(self.actor_critic.bin2mono_enc.parameters()) + list(self.actor_critic.bin2mono_dec.parameters()) +\
list(self.actor_critic.acoustic_mem.parameters())
nn.utils.clip_grad_norm_(
sep_params, self.max_grad_norm
)
def after_step(self):
pass
class DecentralizedDistributedMixin:
def _get_advantages_distributed(
self, rollouts_nav
) -> torch.Tensor:
advantages = rollouts_nav.returns[:-1] - rollouts_nav.value_preds[:-1]
if not self.use_normalized_advantage:
return advantages
mean, var = distributed_mean_and_var(advantages)
return (advantages - mean) / (var.sqrt() + EPS_PPO)
def init_distributed(self, find_unused_params: bool = True) -> None:
r"""Initializes distributed training for the model
1. Broadcasts the model weights from world_rank 0 to all other workers
2. Adds gradient hooks to the model
:param find_unused_params: Whether or not to filter out unused parameters
before gradient reduction. This *must* be True if
there are any parameters in the model that where unused in the
forward pass, otherwise the gradient reduction
will not work correctly.
"""
# NB: Used to hide the hooks from the nn.Module,
# so they don't show up in the state_dict
class Guard:
def __init__(self, model, device):
if torch.cuda.is_available():
self.ddp = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[device], output_device=device
)
else:
self.ddp = torch.nn.parallel.DistributedDataParallel(model)
self._ddp_hooks = Guard(self.actor_critic, self.device)
self.get_advantages = self._get_advantages_distributed
self.reducer = self._ddp_hooks.ddp.reducer
self.find_unused_params = find_unused_params
def before_backward(self, loss):
super().before_backward(loss)
if self.find_unused_params:
self.reducer.prepare_for_backward([loss])
else:
self.reducer.prepare_for_backward([])
class DDPPO(DecentralizedDistributedMixin, PPO):
pass
| [
"torch.min",
"torch.nn.utils.clip_grad_norm_",
"torch.nn.functional.l1_loss",
"torch.max",
"torch.no_grad",
"torch.optim.Adam",
"torch.nn.parallel.DistributedDataParallel",
"torch.clamp",
"torch.cuda.is_available",
"torch.exp"
] | 1.4.0 | SAGNIKMJR/move2hear-active-AV-separation | 3c6887aeb94b2a07983469bfd517ca277bd4124a |
1.2 | import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.optim as optim
from torch.utils.data import DataLoader
from tqdm import tqdm
from nets.frcnn import FasterRCNN
from trainer import FasterRCNNTrainer
from utils.dataloader import FRCNNDataset, frcnn_dataset_collate
from utils.utils import LossHistory, weights_init
def get_lr(optimizer):
for param_group in optimizer.param_groups:
return param_group['lr']
def fit_ont_epoch(net, epoch, epoch_size, epoch_size_val, gen, genval, Epoch, cuda):
total_loss = 0
rpn_loc_loss = 0
rpn_cls_loss = 0
roi_loc_loss = 0
roi_cls_loss = 0
val_toal_loss = 0
with tqdm(total=epoch_size, desc=f'Epoch {epoch + 1}/{Epoch}', postfix=dict, mininterval=0.3) as pbar:
for iteration, batch in enumerate(gen):
if iteration >= epoch_size:
break
imgs, boxes, labels = batch[0], batch[1], batch[2] # numpy.ndarray、list、list。
with torch.no_grad():
if cuda:
imgs = torch.from_numpy(imgs).type(torch.FloatTensor).cuda()
else:
imgs = torch.from_numpy(imgs).type(torch.FloatTensor)
losses = train_util.train_step(imgs, boxes, labels, 1)
rpn_loc, rpn_cls, roi_loc, roi_cls, total = losses
total_loss += total.item()
rpn_loc_loss += rpn_loc.item()
rpn_cls_loss += rpn_cls.item()
roi_loc_loss += roi_loc.item()
roi_cls_loss += roi_cls.item()
pbar.set_postfix(**{'total': total_loss / (iteration + 1),
'rpn_loc': rpn_loc_loss / (iteration + 1),
'rpn_cls': rpn_cls_loss / (iteration + 1),
'roi_loc': roi_loc_loss / (iteration + 1),
'roi_cls': roi_cls_loss / (iteration + 1),
'lr': get_lr(optimizer)})
pbar.update(1)
exit(0)
print('Start Validation')
with tqdm(total=epoch_size_val, desc=f'Epoch {epoch + 1}/{Epoch}', postfix=dict, mininterval=0.3) as pbar:
for iteration, batch in enumerate(genval):
if iteration >= epoch_size_val:
break
imgs, boxes, labels = batch[0], batch[1], batch[2]
with torch.no_grad():
if cuda:
imgs = torch.from_numpy(imgs).type(torch.FloatTensor).cuda()
else:
imgs = torch.from_numpy(imgs).type(torch.FloatTensor)
train_util.optimizer.zero_grad()
losses = train_util.forward(imgs, boxes, labels, 1)
_, _, _, _, val_total = losses
val_toal_loss += val_total.item()
pbar.set_postfix(**{'total_loss': val_toal_loss / (iteration + 1)})
pbar.update(1)
loss_history.append_loss(total_loss / (epoch_size + 1), val_toal_loss / (epoch_size_val + 1))
print('Finish Validation')
print('Epoch:' + str(epoch + 1) + '/' + str(Epoch))
print('Total Loss: %.4f || Val Loss: %.4f ' % (total_loss / (epoch_size + 1), val_toal_loss / (epoch_size_val + 1)))
print('Saving state, iter:', str(epoch + 1))
torch.save(model.state_dict(), 'logs/Epoch%d-Total_Loss%.4f-Val_Loss%.4f.pth' % (
(epoch + 1), total_loss / (epoch_size + 1), val_toal_loss / (epoch_size_val + 1)))
if __name__ == "__main__":
# 1. 参数。
Cuda = False
NUM_CLASSES = 20
input_shape = [800, 800, 3]
# 2.创建网络并初始化参数。
backbone = "resnet50"
model = FasterRCNN(NUM_CLASSES, backbone=backbone)
weights_init(model)
# 3.使用预训练模型初始化。
# model_path = 'model_data/voc_weights_resnet.pth'
# print('Loading weights into state dict...')
# device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# model_dict = model.state_dict()
# pretrained_dict = torch.load(model_path, map_location=device)
# pretrained_dict = {k: v for k, v in pretrained_dict.items() if np.shape(model_dict[k]) == np.shape(v)}
# model_dict.update(pretrained_dict)
# model.load_state_dict(model_dict)
# print('Finished!')
# 4.设置训练模式并使用cuda。
net = model.train()
if Cuda:
net = torch.nn.DataParallel(model)
cudnn.benchmark = True
net = net.cuda()
# 5.记录loss值用于绘图。
loss_history = LossHistory("logs/")
# 6.读取数据。
annotation_path = '2007_train.txt'
val_split = 0.1
with open(annotation_path) as f:
lines = f.readlines()
np.random.seed(10101)
np.random.shuffle(lines)
np.random.seed(None)
num_val = int(len(lines) * val_split)
num_train = len(lines) - num_val
# 7.前50轮只训练主干网络之外的参数,后50轮训练所有参数。
if True:
# 7.1参数。
lr = 1e-4
Batch_size = 1
Init_Epoch = 0
Freeze_Epoch = 50
# 7.2数据加载器。
train_dataset = FRCNNDataset(lines[:num_train], (input_shape[0], input_shape[1]), is_train=True)
val_dataset = FRCNNDataset(lines[num_train:], (input_shape[0], input_shape[1]), is_train=False)
gen = DataLoader(train_dataset, shuffle=True, batch_size=Batch_size, num_workers=4, pin_memory=True,
drop_last=True, collate_fn=frcnn_dataset_collate)
gen_val = DataLoader(val_dataset, shuffle=True, batch_size=Batch_size, num_workers=4, pin_memory=True,
drop_last=True, collate_fn=frcnn_dataset_collate)
# 7.3每轮数据量。
epoch_size = num_train // Batch_size
epoch_size_val = num_val // Batch_size
if epoch_size == 0 or epoch_size_val == 0:
raise ValueError("数据集过小,无法进行训练,请扩充数据集。")
# 7.4固定参数。
for param in model.extractor.parameters():
param.requires_grad = False
model.freeze_bn()
# 7.5优化器和学习率。
optimizer = optim.Adam(net.parameters(), lr, weight_decay=5e-4)
lr_scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.95)
# 7.6这个类读取数据输出损失函数。
train_util = FasterRCNNTrainer(model, optimizer)
# 7.7迭代。
for epoch in range(Init_Epoch, Freeze_Epoch):
fit_ont_epoch(net, epoch, epoch_size, epoch_size_val, gen, gen_val, Freeze_Epoch, Cuda)
lr_scheduler.step()
if True:
lr = 1e-5
Batch_size = 2
Freeze_Epoch = 50
Unfreeze_Epoch = 100
optimizer = optim.Adam(net.parameters(), lr, weight_decay=5e-4)
lr_scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.95)
train_dataset = FRCNNDataset(lines[:num_train], (input_shape[0], input_shape[1]), is_train=True)
val_dataset = FRCNNDataset(lines[num_train:], (input_shape[0], input_shape[1]), is_train=False)
gen = DataLoader(train_dataset, shuffle=True, batch_size=Batch_size, num_workers=4, pin_memory=True,
drop_last=True, collate_fn=frcnn_dataset_collate)
gen_val = DataLoader(val_dataset, shuffle=True, batch_size=Batch_size, num_workers=4, pin_memory=True,
drop_last=True, collate_fn=frcnn_dataset_collate)
epoch_size = num_train // Batch_size
epoch_size_val = num_val // Batch_size
if epoch_size == 0 or epoch_size_val == 0:
raise ValueError("数据集过小,无法进行训练,请扩充数据集。")
# ------------------------------------#
# 解冻后训练
# ------------------------------------#
for param in model.extractor.parameters():
param.requires_grad = True
# ------------------------------------#
# 冻结bn层
# ------------------------------------#
model.freeze_bn()
train_util = FasterRCNNTrainer(model, optimizer)
for epoch in range(Freeze_Epoch, Unfreeze_Epoch):
fit_ont_epoch(net, epoch, epoch_size, epoch_size_val, gen, gen_val, Unfreeze_Epoch, Cuda)
lr_scheduler.step()
| [
"torch.optim.lr_scheduler.StepLR",
"torch.no_grad",
"torch.from_numpy",
"torch.utils.data.DataLoader",
"torch.nn.DataParallel"
] | 1.2.0 | boyob/Faster-RCNN | 43625f6e9a19e7860ceb3478ba3c43fd430f3d8f |
1.7 | import torch
import torch.nn as nn
import torch.optim as optim
from collections import deque
from .. import epiopt
class PPO(nn.Module):
def __init__(self,
actor_critic,
clip_param,
ppo_epoch,
num_mini_batch,
value_loss_coef,
entropy_coef,
lr=None,
eps=None,
max_grad_norm=None,
use_clipped_value_loss=True, args=None):
super(PPO, self).__init__()
self.actor_critic = actor_critic
self.clip_param = clip_param
self.ppo_epoch = ppo_epoch
self.num_mini_batch = num_mini_batch
self.value_loss_coef = value_loss_coef
self.entropy_coef = entropy_coef
self.max_grad_norm = max_grad_norm
self.use_clipped_value_loss = use_clipped_value_loss
self.optimizer = optim.Adam(actor_critic.parameters(), lr=lr, eps=eps)
if args.optimizer == "sgd":
self.optimizer = optim.SGD(actor_critic.parameters(), lr=lr)
self.last_returns = deque(maxlen=5)
self.last_action_losses = deque(maxlen=200)
self.last_value_losses = deque(maxlen=200)
self.args = args
if args.use_mem:
self.epi_opt = epiopt.EPIOPT(actor_critic, args)
def update(self, rollouts, update_step=0, env_step=0):
advantages = rollouts.returns[:-1] - rollouts.value_preds[:-1]
advantages = (advantages - advantages.mean()) / (
advantages.std() + 1e-5)
value_loss_epoch = 0
action_loss_epoch = 0
dist_entropy_epoch = 0
prev_loss = 0
gstep = 0
R = 0
# print(len(self.traj_buffer), self.dnd.get_mem_size())
# print(self.traj_buffer)
if self.args.use_mem:
self.epi_opt.insert2mem(torch.mean(rollouts.returns), update_step=update_step, gstep=-1)
self.last_returns.append(torch.mean(rollouts.returns))
for e in range(self.ppo_epoch):
num_mini_batch = self.num_mini_batch
if self.args.adaptive_opt>0 and 'mb' in self.epi_opt.opt_values:
num_mini_batch = max(int(self.epi_opt.opt_values['mb']*self.num_mini_batch),1)
if self.actor_critic.is_recurrent:
data_generator = rollouts.recurrent_generator(
advantages, num_mini_batch)
else:
data_generator = rollouts.feed_forward_generator(
advantages, num_mini_batch)
for sample in data_generator:
if self.args.use_mem:
self.epi_opt.take_action(update_step, env_step, gstep)
obs_batch, recurrent_hidden_states_batch, actions_batch, \
value_preds_batch, return_batch, masks_batch, old_action_log_probs_batch, \
adv_targ = sample
# Reshape to do in a single forward pass for all steps
values, action_log_probs, dist_entropy, _ = self.actor_critic.evaluate_actions(
obs_batch, recurrent_hidden_states_batch, masks_batch,
actions_batch)
nps = self.args.num_processes*self.args.num_steps//self.num_mini_batch
if self.args.adaptive_opt > 0 and 'np' in self.epi_opt.opt_values:
nps = nps * self.epi_opt.opt_values['np']
nps = max(1, int(nps))
action_log_probs = action_log_probs[:nps]
old_action_log_probs_batch = old_action_log_probs_batch[:nps]
values = values[:nps]
advantages = advantages[:nps]
adv_targ = adv_targ[:nps]
value_preds_batch = value_preds_batch[:nps]
return_batch = return_batch[:nps]
approx_kl = 0.5 * torch.mean((old_action_log_probs_batch - action_log_probs) ** 2)
ratio = torch.exp(action_log_probs -
old_action_log_probs_batch)
surr1 = ratio * adv_targ
surr2 = torch.clamp(ratio, 1.0 - self.clip_param,
1.0 + self.clip_param) * adv_targ
if self.args.adaptive_opt > 0 and 'clip' in self.epi_opt.opt_values:
surr2 = torch.clamp(ratio, 1.0 - self.epi_opt.opt_values['clip'],
1.0 + self.epi_opt.opt_values['clip']) * adv_targ
action_loss = -torch.min(surr1, surr2).mean()
if self.args.use_mem and gstep > 0:
if action_loss < prev_loss:
epo_reward = 0
else:
epo_reward = 0
self.epi_opt.add2buffer(epo_reward, gstep)
prev_loss = action_loss
self.last_action_losses.append(action_loss.item())
if self.use_clipped_value_loss:
value_pred_clipped = value_preds_batch + \
(values - value_preds_batch).clamp(-self.clip_param, self.clip_param)
value_losses = (values - return_batch).pow(2)
value_losses_clipped = (
value_pred_clipped - return_batch).pow(2)
value_loss = 0.5 * torch.max(value_losses,
value_losses_clipped).mean()
else:
value_loss = 0.5 * (return_batch - values).pow(2).mean()
self.last_value_losses.append(value_loss.item())
if self.args and self.args.use_mem:
if gstep>0:
self.epi_opt.compute_last_grad()
lrs = []
if self.args.adaptive_opt > 0 and 'lr' in self.epi_opt.opt_values:
for param_group in self.optimizer.param_groups:
lrs.append(param_group['lr'])
param_group['lr'] *= self.epi_opt.opt_values['lr']
self.optimizer.zero_grad()
value_loss_coef = self.value_loss_coef
if self.args.adaptive_opt > 0 and 'vlc' in self.epi_opt.opt_values:
value_loss_coef = self.value_loss_coef*self.epi_opt.opt_values['vlc']
entropy_coef = self.entropy_coef
if self.args.adaptive_opt > 0 and 'enc' in self.epi_opt.opt_values:
entropy_coef = self.entropy_coef*self.epi_opt.opt_values['enc']
(value_loss * value_loss_coef + action_loss -
dist_entropy * entropy_coef).backward()
max_grad_norm = self.max_grad_norm
if self.args.adaptive_opt > 0 and 'mgn' in self.epi_opt.opt_values:
max_grad_norm = self.max_grad_norm * self.epi_opt.opt_values['mgn']
nn.utils.clip_grad_norm_(self.actor_critic.parameters(),
max_grad_norm)
self.optimizer.step()
gstep += 1
value_loss_epoch += value_loss.item()
action_loss_epoch += action_loss.item()
dist_entropy_epoch += dist_entropy.item()
if self.args.adaptive_opt > 0 and 'lr' in self.epi_opt.opt_values:
pi = 0
for param_group in self.optimizer.param_groups:
param_group['lr'] = lrs[pi]
pi+=1
num_updates = self.ppo_epoch * self.num_mini_batch
value_loss_epoch /= num_updates
action_loss_epoch /= num_updates
dist_entropy_epoch /= num_updates
return value_loss_epoch, action_loss_epoch, dist_entropy_epoch
| [
"torch.min",
"torch.max",
"torch.clamp",
"torch.exp",
"torch.mean"
] | 1.7.1 | thaihungle/EPGT | 0c0196d9c4137f52f4d05888ad753cb915a5c761 |
1.6 | import math
import typing
import torch
from torch import nn
from torch.nn import functional as F
from vits_train.commons import subsequent_mask
from vits_train.modules import LayerNorm
class Encoder(nn.Module):
def __init__(
self,
hidden_channels: int,
filter_channels: int,
n_heads: int,
n_layers: int,
kernel_size: int = 1,
p_dropout: float = 0.0,
window_size: int = 4,
**kwargs
):
super().__init__()
self.hidden_channels = hidden_channels
self.filter_channels = filter_channels
self.n_heads = n_heads
self.n_layers = n_layers
self.kernel_size = kernel_size
self.p_dropout = p_dropout
self.window_size = window_size
self.drop = nn.Dropout(p_dropout)
self.attn_layers = nn.ModuleList()
self.norm_layers_1 = nn.ModuleList()
self.ffn_layers = nn.ModuleList()
self.norm_layers_2 = nn.ModuleList()
for i in range(self.n_layers):
self.attn_layers.append(
MultiHeadAttention(
hidden_channels,
hidden_channels,
n_heads,
p_dropout=p_dropout,
window_size=window_size,
)
)
self.norm_layers_1.append(LayerNorm(hidden_channels))
self.ffn_layers.append(
FFN(
hidden_channels,
hidden_channels,
filter_channels,
kernel_size,
p_dropout=p_dropout,
)
)
self.norm_layers_2.append(LayerNorm(hidden_channels))
def forward(self, x, x_mask):
attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
x = x * x_mask
for i in range(self.n_layers):
y = self.attn_layers[i](x, x, attn_mask)
y = self.drop(y)
x = self.norm_layers_1[i](x + y)
y = self.ffn_layers[i](x, x_mask)
y = self.drop(y)
x = self.norm_layers_2[i](x + y)
x = x * x_mask
return x
class Decoder(nn.Module):
def __init__(
self,
hidden_channels: int,
filter_channels: int,
n_heads: int,
n_layers: int,
kernel_size: int = 1,
p_dropout: float = 0.0,
proximal_bias: bool = False,
proximal_init: bool = True,
**kwargs
):
super().__init__()
self.hidden_channels = hidden_channels
self.filter_channels = filter_channels
self.n_heads = n_heads
self.n_layers = n_layers
self.kernel_size = kernel_size
self.p_dropout = p_dropout
self.proximal_bias = proximal_bias
self.proximal_init = proximal_init
self.drop = nn.Dropout(p_dropout)
self.self_attn_layers = nn.ModuleList()
self.norm_layers_0 = nn.ModuleList()
self.encdec_attn_layers = nn.ModuleList()
self.norm_layers_1 = nn.ModuleList()
self.ffn_layers = nn.ModuleList()
self.norm_layers_2 = nn.ModuleList()
for i in range(self.n_layers):
self.self_attn_layers.append(
MultiHeadAttention(
hidden_channels,
hidden_channels,
n_heads,
p_dropout=p_dropout,
proximal_bias=proximal_bias,
proximal_init=proximal_init,
)
)
self.norm_layers_0.append(LayerNorm(hidden_channels))
self.encdec_attn_layers.append(
MultiHeadAttention(
hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout
)
)
self.norm_layers_1.append(LayerNorm(hidden_channels))
self.ffn_layers.append(
FFN(
hidden_channels,
hidden_channels,
filter_channels,
kernel_size,
p_dropout=p_dropout,
causal=True,
)
)
self.norm_layers_2.append(LayerNorm(hidden_channels))
def forward(self, x, x_mask, h, h_mask):
"""
x: decoder input
h: encoder output
"""
self_attn_mask = subsequent_mask(x_mask.size(2)).type_as(x)
encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
x = x * x_mask
for i in range(self.n_layers):
y = self.self_attn_layers[i](x, x, self_attn_mask)
y = self.drop(y)
x = self.norm_layers_0[i](x + y)
y = self.encdec_attn_layers[i](x, h, encdec_attn_mask)
y = self.drop(y)
x = self.norm_layers_1[i](x + y)
y = self.ffn_layers[i](x, x_mask)
y = self.drop(y)
x = self.norm_layers_2[i](x + y)
x = x * x_mask
return x
class MultiHeadAttention(nn.Module):
def __init__(
self,
channels: int,
out_channels: int,
n_heads: int,
p_dropout: float = 0.0,
window_size: typing.Optional[int] = None,
heads_share: bool = True,
block_length: typing.Optional[int] = None,
proximal_bias: bool = False,
proximal_init: bool = False,
):
super().__init__()
assert channels % n_heads == 0
self.channels = channels
self.out_channels = out_channels
self.n_heads = n_heads
self.p_dropout = p_dropout
self.window_size = window_size
self.heads_share = heads_share
self.block_length = block_length
self.proximal_bias = proximal_bias
self.proximal_init = proximal_init
self.attn = None
self.k_channels = channels // n_heads
self.conv_q = nn.Conv1d(channels, channels, 1)
self.conv_k = nn.Conv1d(channels, channels, 1)
self.conv_v = nn.Conv1d(channels, channels, 1)
self.conv_o = nn.Conv1d(channels, out_channels, 1)
self.drop = nn.Dropout(p_dropout)
if window_size is not None:
n_heads_rel = 1 if heads_share else n_heads
rel_stddev = self.k_channels ** -0.5
self.emb_rel_k = nn.Parameter(
torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels)
* rel_stddev
)
self.emb_rel_v = nn.Parameter(
torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels)
* rel_stddev
)
nn.init.xavier_uniform_(self.conv_q.weight)
nn.init.xavier_uniform_(self.conv_k.weight)
nn.init.xavier_uniform_(self.conv_v.weight)
if proximal_init:
with torch.no_grad():
self.conv_k.weight.copy_(self.conv_q.weight)
self.conv_k.bias.copy_(self.conv_q.bias)
def forward(self, x, c, attn_mask=None):
q = self.conv_q(x)
k = self.conv_k(c)
v = self.conv_v(c)
x, self.attn = self.attention(q, k, v, mask=attn_mask)
x = self.conv_o(x)
return x
def attention(self, query, key, value, mask=None):
# reshape [b, d, t] -> [b, n_h, t, d_k]
b, d, t_s, t_t = (*key.size(), query.size(2))
query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3)
key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1))
if self.window_size is not None:
assert (
t_s == t_t
), "Relative attention is only available for self-attention."
key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s)
rel_logits = self._matmul_with_relative_keys(
query / math.sqrt(self.k_channels), key_relative_embeddings
)
scores_local = self._relative_position_to_absolute_position(rel_logits)
scores = scores + scores_local
if self.proximal_bias:
assert t_s == t_t, "Proximal bias is only available for self-attention."
scores = scores + self._attention_bias_proximal(t_s).type_as(scores)
if mask is not None:
scores = scores.masked_fill(mask == 0, -1e4)
if self.block_length is not None:
assert (
t_s == t_t
), "Local attention is only available for self-attention."
block_mask = (
torch.ones_like(scores)
.triu(-self.block_length)
.tril(self.block_length)
)
scores = scores.masked_fill(block_mask == 0, -1e4)
p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s]
p_attn = self.drop(p_attn)
output = torch.matmul(p_attn, value)
if self.window_size is not None:
relative_weights = self._absolute_position_to_relative_position(p_attn)
value_relative_embeddings = self._get_relative_embeddings(
self.emb_rel_v, t_s
)
output = output + self._matmul_with_relative_values(
relative_weights, value_relative_embeddings
)
output = (
output.transpose(2, 3).contiguous().view(b, d, t_t)
) # [b, n_h, t_t, d_k] -> [b, d, t_t]
return output, p_attn
def _matmul_with_relative_values(self, x, y):
"""
x: [b, h, l, m]
y: [h or 1, m, d]
ret: [b, h, l, d]
"""
ret = torch.matmul(x, y.unsqueeze(0))
return ret
def _matmul_with_relative_keys(self, x, y):
"""
x: [b, h, l, d]
y: [h or 1, m, d]
ret: [b, h, l, m]
"""
ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1))
return ret
def _get_relative_embeddings(self, relative_embeddings, length):
# max_relative_position = 2 * self.window_size + 1
# Pad first before slice to avoid using cond ops.
pad_length = max(length - (self.window_size + 1), 0)
slice_start_position = max((self.window_size + 1) - length, 0)
slice_end_position = slice_start_position + 2 * length - 1
if pad_length > 0:
padded_relative_embeddings = F.pad(
relative_embeddings,
# convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]]),
(0, 0, pad_length, pad_length, 0, 0),
)
else:
padded_relative_embeddings = relative_embeddings
used_relative_embeddings = padded_relative_embeddings[
:, slice_start_position:slice_end_position
]
return used_relative_embeddings
def _relative_position_to_absolute_position(self, x):
"""
x: [b, h, l, 2*l-1]
ret: [b, h, l, l]
"""
batch, heads, length, _ = x.size()
# Concat columns of pad to shift from relative to absolute indexing.
# x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, 1]]))
x = F.pad(x, (0, 1, 0, 0, 0, 0, 0, 0))
# Concat extra elements so to add up to shape (len+1, 2*len-1).
x_flat = x.view([batch, heads, length * 2 * length])
# x_flat = F.pad(x_flat, convert_pad_shape([[0, 0], [0, 0], [0, length - 1]]))
x_flat = F.pad(x_flat, (0, length - 1, 0, 0, 0, 0))
# Reshape and slice out the padded elements.
x_final = x_flat.view([batch, heads, length + 1, (2 * length) - 1])[
:, :, :length, length - 1 :
]
return x_final
def _absolute_position_to_relative_position(self, x):
"""
x: [b, h, l, l]
ret: [b, h, l, 2*l-1]
"""
batch, heads, length, _ = x.size()
# padd along column
# x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length - 1]]))
x = F.pad(x, (0, length - 1, 0, 0, 0, 0, 0, 0))
x_flat = x.view([batch, heads, (length * length) + (length * (length - 1))])
# add 0's in the beginning that will skew the elements after reshape
# x_flat = F.pad(x_flat, convert_pad_shape([[0, 0], [0, 0], [length, 0]]))
x_flat = F.pad(x_flat, (length, 0, 0, 0, 0, 0))
x_final = x_flat.view([batch, heads, length, 2 * length])[:, :, :, 1:]
return x_final
def _attention_bias_proximal(self, length):
"""Bias for self-attention to encourage attention to close positions.
Args:
length: an integer scalar.
Returns:
a Tensor with shape [1, 1, length, length]
"""
r = torch.arange(length, dtype=torch.float32)
diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1)
return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0)
class FFN(nn.Module):
def __init__(
self,
in_channels: int,
out_channels: int,
filter_channels: int,
kernel_size: int,
p_dropout: float = 0.0,
activation: typing.Optional[str] = None,
causal: bool = False,
):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.filter_channels = filter_channels
self.kernel_size = kernel_size
self.p_dropout = p_dropout
self.activation = activation
self.causal = causal
if causal:
self.padding = self._causal_padding
else:
self.padding = self._same_padding
self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size)
self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size)
self.drop = nn.Dropout(p_dropout)
def forward(self, x, x_mask):
x = self.conv_1(self.padding(x * x_mask))
if self.activation == "gelu":
x = x * torch.sigmoid(1.702 * x)
else:
x = torch.relu(x)
x = self.drop(x)
x = self.conv_2(self.padding(x * x_mask))
return x * x_mask
def _causal_padding(self, x):
if self.kernel_size == 1:
return x
pad_l = self.kernel_size - 1
pad_r = 0
# padding = [[0, 0], [0, 0], [pad_l, pad_r]]
# x = F.pad(x, convert_pad_shape(padding))
x = F.pad(x, (pad_l, pad_r, 0, 0, 0, 0))
return x
def _same_padding(self, x):
if self.kernel_size == 1:
return x
pad_l = (self.kernel_size - 1) // 2
pad_r = self.kernel_size // 2
# padding = [[0, 0], [0, 0], [pad_l, pad_r]]
# x = F.pad(x, convert_pad_shape(padding))
x = F.pad(x, (pad_l, pad_r, 0, 0, 0, 0))
return x
| [
"torch.sigmoid",
"torch.nn.Dropout",
"torch.relu",
"torch.nn.ModuleList",
"torch.nn.Conv1d",
"torch.arange",
"torch.no_grad",
"torch.nn.init.xavier_uniform_",
"torch.unsqueeze",
"torch.abs",
"torch.ones_like",
"torch.nn.functional.softmax",
"torch.nn.functional.pad",
"torch.matmul",
"torch.randn"
] | 1.6.0 | mbarnig/vits-train | cfb8a0fc91daad868fe3d062ebf85d62edbd7506 |
1.0 | #!/usr/bin/python
import torch
import torchvision
dummy_input = torch.randn(10, 3, 224, 224, device='cpu') # or cuda
model = torchvision.models.alexnet(pretrained=True).cpu() # or cuda()
input_names = ["actual_input_1"] + ["learned_%d" % i for i in range(16)]
output_names = ["output1"]
torch.onnx.export(model, dummy_input, "model.onnx", verbose=True, input_names=input_names, output_names=output_names)
# Test the model and exit with an error if it does not pass quality gateway
# If gateway tests are passed, all onnx files saved will be pushed to the associated -service project for deployment
| [
"torch.randn",
"torch.onnx.export"
] | 1.0.1 | rajattyagipvr/ml-pytorch-training | aeb992d1035f25a9920ce5dbab01f18fc29a687c |
1.2 | #! /usr/bin/python
# -*- encoding: utf-8 -*-
## Fast re-implementation of the GE2E loss (https://arxiv.org/abs/1710.10467)
## Numerically checked against https://github.com/cvqluu/GE2E-Loss
import torch
import torch.nn as nn
import torch.nn.functional as F
import time, pdb, numpy
class GE2ELoss(nn.Module):
def __init__(self, init_w=10.0, init_b=-5.0):
super(GE2ELoss, self).__init__()
self.w = nn.Parameter(torch.tensor(init_w))
self.b = nn.Parameter(torch.tensor(init_b))
self.criterion = torch.nn.CrossEntropyLoss()
print('Initialised GE2E')
def forward(self, x, label=None):
gsize = x.size()[1]
centroids = torch.mean(x, 1)
stepsize = x.size()[0]
cos_sim_matrix = []
for ii in range(0,gsize):
idx = [*range(0,gsize)]
idx.remove(ii)
exc_centroids = torch.mean(x[:,idx,:], 1)
cos_sim_diag = F.cosine_similarity(x[:,ii,:],exc_centroids)
cos_sim = F.cosine_similarity(x[:,ii,:].unsqueeze(-1).expand(-1,-1,stepsize),centroids.unsqueeze(-1).expand(-1,-1,stepsize).transpose(0,2))
cos_sim[range(0,stepsize),range(0,stepsize)] = cos_sim_diag
cos_sim_matrix.append(torch.clamp(cos_sim,1e-6))
cos_sim_matrix = torch.stack(cos_sim_matrix,dim=1)
torch.clamp(self.w, 1e-6)
cos_sim_matrix = cos_sim_matrix * self.w + self.b
label = torch.from_numpy(numpy.asarray(range(0,stepsize))).cuda()
nloss = self.criterion(cos_sim_matrix.view(-1,stepsize), torch.repeat_interleave(label,repeats=gsize,dim=0).cuda())
return nloss, 0 | [
"torch.stack",
"torch.nn.CrossEntropyLoss",
"torch.repeat_interleave",
"torch.clamp",
"torch.tensor",
"torch.nn.functional.cosine_similarity",
"torch.mean"
] | 1.2.0 | Jungjee/voxceleb_trainer | 52fc63eff4ab3c86016a076bde52e7bcd0e5c3da |
1.10 | import sys
sys.path.append("..")
def print_as_comment(obj):
print("\n".join(f"# {line}" for line in str(obj).splitlines()))
if __name__ == "__main__":
import torch
torch.manual_seed(42)
# --- Prepare instruments
from pfhedge.instruments import BrownianStock
from pfhedge.instruments import EuropeanOption
stock = BrownianStock(cost=1e-4)
derivative = EuropeanOption(stock)
print(">>> stock")
print_as_comment(stock)
print(">>> derivative")
print_as_comment(derivative)
# --- Fit and price
from pfhedge.nn import Hedger
from pfhedge.nn import MultiLayerPerceptron
model = MultiLayerPerceptron()
hedger = Hedger(model, ["log_moneyness", "expiry_time", "volatility", "prev_hedge"])
hedger
print(">>> hedger")
print_as_comment(hedger)
hedger.fit(derivative)
price = hedger.price(derivative)
print(">>> price")
print_as_comment(price)
# --- Black-Scholes and Whalley-Wilmott
from pfhedge.nn import BlackScholes
from pfhedge.nn import WhalleyWilmott
derivative = EuropeanOption(BrownianStock(cost=1e-4))
model = BlackScholes(derivative)
hedger_bs = Hedger(model, model.inputs())
hedger_bs
print(">>> hedger_bs")
print_as_comment(hedger_bs)
model = WhalleyWilmott(derivative)
hedger_ww = Hedger(model, model.inputs())
hedger_ww
print(">>> hedger_ww")
print_as_comment(hedger_ww)
price_bs = hedger_bs.price(derivative)
price_ww = hedger_ww.price(derivative)
print(">>> price_bs")
print_as_comment(price_bs)
print(">>> price_ww")
print_as_comment(price_ww)
| [
"torch.manual_seed"
] | 1.10.0 | vishalbelsare/pfhedge | bc4ae304f9dc887b0e4d581f8ad42700a4eea9ad |
1.8 | import sys
import shutil, os
from shutil import copyfile
import sklearn
from sklearn import metrics
from collections import OrderedDict
import numpy as np
import random
import pdb
from sklearn.metrics import roc_curve, auc, roc_auc_score, f1_score
from sklearn.preprocessing import LabelBinarizer
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
import gc
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from torch.utils.data import DataLoader, TensorDataset
from collections import OrderedDict
from sklearn.preprocessing import LabelEncoder, normalize
from sklearn.utils import class_weight
from deap.algorithms import varOr
from deap import base
from deap import creator
from deap import tools
gpu_id = '3'
os.environ["CUDA_VISIBLE_DEVICES"] = gpu_id
mbest = 10
cxpb = 0.5
mutpb = 0.5
mu = 0.5
u = 10
lam = 60
ks = 1
kv = 1
ak = 0.9
bk = 1.1
sigma = []
random.seed(42)
num_epochs = 5
batch_size = 100
generation = 0
dataset_name = sys.argv[1]
def batch(tensor, batch_size=1000):
tensor_list = []
length = tensor.shape[0]
i = 0
while True:
if (i + 1) * batch_size >= length:
tensor_list.append(tensor[i * batch_size: length])
return tensor_list
tensor_list.append(tensor[i * batch_size: (i + 1) * batch_size])
i += 1
class CNN(nn.Module):
def __init__(self, nb_filter, channel=7, num_classes=2, kernel_size=(4, 10), pool_size=(1, 3), labcounts=32,
window_size=12, hidden_size=200, stride=(1, 1), padding=0):
super(CNN, self).__init__()
self.layer1 = nn.Sequential(
nn.Conv2d(channel, nb_filter, kernel_size, stride=stride, padding=padding),
# nn.BatchNorm2d(nb_filter),
nn.ReLU())
self.pool1 = nn.MaxPool2d(pool_size, stride=stride)
out1_size = (window_size + 2 * padding - (kernel_size[1] - 1) - 1) / stride[1] + 1
maxpool_size = (out1_size + 2 * padding - (pool_size[1] - 1) - 1) / stride[1] + 1
self.layer2 = nn.Sequential(
nn.Conv2d(nb_filter, nb_filter, kernel_size=(1, 10), stride=stride, padding=padding),
# nn.BatchNorm2d(nb_filter),
nn.ReLU(),
nn.MaxPool2d(pool_size, stride=stride))
out2_size = (maxpool_size + 2 * padding - (kernel_size[1] - 1) - 1) / stride[1] + 1
maxpool2_size = (out2_size + 2 * padding - (pool_size[1] - 1) - 1) / stride[1] + 1
# self.drop1 = nn.Dropout(p=0.25)
print('maxpool_size', maxpool_size)
self.fc1 = nn.Linear(int(maxpool2_size * nb_filter), hidden_size)
# self.fc1 = nn.Linear(7760, hidden_size)
# self.drop2 = nn.Dropout(p=0.25)
self.relu1 = nn.ReLU()
self.fc2 = nn.Linear(hidden_size, num_classes)
def forward(self, x):
out = self.layer1(x)
out = self.pool1(out)
out = self.layer2(out)
out = out.view(out.size(0), -1)
# out = self.drop1(out)
out = self.fc1(out)
# out = self.drop2(out)
out = self.relu1(out)
out = self.fc2(out)
out = torch.sigmoid(out)
torch.cuda.empty_cache()
return out
def layer1out(self, x):
if type(x) is np.ndarray:
x = torch.from_numpy(x.astype(np.float32))
x = Variable(x)
if cuda:
x = x.cuda()
out = self.layer1(x)
temp = out.data.cpu().numpy()
x = x.cpu()
torch.cuda.empty_cache()
return temp
def predict_proba(self, x):
if type(x) is np.ndarray:
x = torch.from_numpy(x.astype(np.float32))
x = Variable(x)
if cuda:
x = x.cuda()
y = self.forward(x)
temp = y.data.cpu().numpy()
x = x.cpu()
torch.cuda.empty_cache()
return temp[:, 1]
class Estimator(object):
def __init__(self, model):
self.model = model
def compile(self, optimizer, loss):
self.optimizer = optimizer
self.loss_f = loss
def _fit(self, train_loader):
"""
train one epoch
"""
loss_list = []
acc_list = []
for idx, (X, y) in enumerate(train_loader):
X_v = Variable(X)
y_v = Variable(y)
if cuda:
X_v = X_v.cuda()
y_v = y_v.cuda()
self.optimizer.zero_grad()
y_pred = self.model(X_v)
loss = self.loss_f(y_pred, y_v)
loss.backward()
self.optimizer.step()
## for log
loss_list.append(loss.item()) # need change to loss_list.append(loss.item()) for pytorch v0.4 or above
if cuda:
X_v = X_v.cpu()
y_v = y_v.cpu()
torch.cuda.empty_cache()
return sum(loss_list) / len(loss_list)
def fit(self, X, y, batch_size=32, nb_epoch=10, validation_data=()):
# X_list = batch(X, batch_size)
# y_list = batch(y, batch_size)
# pdb.set_trace()
print('X.shape', X.shape)
train_set = TensorDataset(torch.from_numpy(X.astype(np.float32)),
torch.from_numpy(y.astype(np.float32)).long().view(-1))
train_loader = DataLoader(dataset=train_set, batch_size=batch_size, shuffle=True)
self.model.train()
for t in range(nb_epoch):
loss = self._fit(train_loader)
print('loss', loss)
# self.model.eval()
#
# rint("Epoch %s/%s loss: %06.4f - acc: %06.4f %s" % (t, nb_epoch, loss, acc, val_log))
def evaluate(ind):
auc = 0
return auc
def _accuracy(self, y_pred, y):
return float(sum(y_pred == y)) / y.shape[0]
def predict(self, X):
X = Variable(torch.from_numpy(X.astype(np.float32)))
if cuda:
X = X.cuda()
y_pred = self.model(X)
X = X.cpu()
torch.cuda.empty_cache()
return y_pred
def predict_proba(X):
self.model.eval()
return self.model.predict_proba(X)
net1 = CNN(nb_filter=16, labcounts=4, window_size=507, channel=1)
net2 = CNN(nb_filter=16, labcounts=4, window_size=107, channel=7)
def GENnetwork():
a = [net1.state_dict(), net2.state_dict()]
return a
def mate1(toolbox, ind1, ind2):
for k, v in ind1.items():
if str(k).find('num_batches_tracked', 0, len(k)) == -1:
ind1[k], ind2[k] = toolbox.mate(ind1[k], ind2[k])
return ind1, ind2
def mate2(toolbox, ind1, ind2):
mate1(toolbox, ind1[0], ind2[0])
mate1(toolbox, ind1[1], ind2[1])
return ind1, ind2
def mutate1(ind, generation):
sigma1 = 0.01 / (generation + 1)
# sigma1 = 0.01-0.0005*generation
for k, v in ind.items():
if str(k).find('num_batches_tracked', 0, len(k)) == -1:
ind[k] = tools.mutGaussian(ind[k], 0, sigma1, indpb=0.05)
return ind
def mutate2(ind, generation):
ind[0] = mutate1(ind[0], generation)
ind[1] = mutate1(ind[1], generation)
return ind
def recombin1(ind, ind1, ind2):
for k, v in ind1.items():
if str(k).find('num_batches_tracked', 0, len(k)) == -1:
ind[k] = (ind1[k] + ind2[k]) / 2
return ind
def recombin2(ind, ind1, ind2):
ind[0] = recombin1(ind[0], ind1[0], ind2[0])
ind[1] = recombin1(ind[1], ind1[1], ind2[1])
return ind
def recombin_mutate(toolbox, ind, ind1, ind2):
ind = recombin2(ind, ind1, ind2)
ind = mutate2(ind, generation)
return ind
def evalFitness(ind, X, y, batch_size=32):
y_pred = ind.predict(X)
y_v = Variable(torch.from_numpy(y).long(), requires_grad=False)
if cuda:
y_v = y_v.cuda()
loss = ind.loss_f(y_pred, y_v)
predict = y_pred.data.cpu().numpy()[:, 1].flatten()
auc = roc_auc_score(y, predict)
# print(auc)
# lasses = torch.topk(y_pred, 1)[1].data.numpy().flatten()
# cc = self._accuracy(classes, y)
# loss.item(), auc
return auc,
def generate_offspring(population, toolbox, lambda_, cxpb, mutpb, generation):
offspring = []
p = []
for i in range(10):
p.append(0.01 + 0.02 * (9 - i))
p = np.array(p)
index_p = []
for i in range(10):
index_p.append(i)
for _ in range(lambda_):
index1 = np.random.choice(index_p, p=p.ravel())
index2 = np.random.choice(index_p, p=p.ravel())
print(index1)
print(index2)
aa = []
aa.append(population[index1])
aa.append(population[index2])
ind3, ind4 = list(map(toolbox.clone, random.sample(aa, 2)))
ind = toolbox.clone(random.choice(population))
ind = recombin_mutate(toolbox, ind, ind3, ind4)
# del ind.fitness.values
offspring.append(ind)
return offspring
creator.create("FitnessMax", base.Fitness, weights=(1.0,))
creator.create("Individual", list, fitness=creator.FitnessMax)
toolbox = base.Toolbox()
toolbox.register("network", GENnetwork)
toolbox.register("individual", tools.initIterate, creator.Individual, toolbox.network)
toolbox.register("population", tools.initRepeat, list, toolbox.individual)
toolbox.register("mate", tools.cxTwoPoint)
toolbox.register("mutate", tools.mutGaussian, 0, 0.01, indpb=0.05)
toolbox.register("select", tools.selTournament, tournsize=1)
# 可以设置多个目标,
toolbox.register("evaluate", evalFitness)
def fitscore(individual):
return individual.fitness.values
def split_training_validation(classes, validation_size=0.2, shuffle=True):
"""split sampels based on balnace classes"""
num_samples = len(classes)
classes = np.array(classes)
classes_unique = np.unique(classes)
num_classes = len(classes_unique)
indices = np.arange(num_samples)
# indices_folds=np.zeros([num_samples],dtype=int)
training_indice = []
training_label = []
validation_indice = []
validation_label = []
for cl in classes_unique:
indices_cl = indices[classes == cl]
num_samples_cl = len(indices_cl)
# split this class into k parts
if shuffle:
random.shuffle(indices_cl) # in-place shuffle
# module and residual
num_samples_each_split = int(num_samples_cl * validation_size)
res = num_samples_cl - num_samples_each_split
training_indice = training_indice + [val for val in indices_cl[num_samples_each_split:]]
training_label = training_label + [cl] * res
validation_indice = validation_indice + [val for val in indices_cl[:num_samples_each_split]]
validation_label = validation_label + [cl] * num_samples_each_split
training_index = np.arange(len(training_label))
random.shuffle(training_index)
training_indice = np.array(training_indice)[training_index]
training_label = np.array(training_label)[training_index]
validation_index = np.arange(len(validation_label))
random.shuffle(validation_index)
validation_indice = np.array(validation_indice)[validation_index]
validation_label = np.array(validation_label)[validation_index]
return training_indice, training_label, validation_indice, validation_label
def read_fasta_file(fasta_file):
seq_dict = {}
fp = open(fasta_file, 'r')
name = ''
for line in fp:
line = line.rstrip()
# distinguish header from sequence
if line[0] == '>': # or line.startswith('>')
# it is the header
name = line[1:] # discarding the initial >
seq_dict[name] = ''
else:
seq_dict[name] = seq_dict[name] + line.upper()
fp.close()
return seq_dict
def read_fasta_file_new(fasta_file='../data/UTR_hg19.fasta'):
seq_dict = {}
fp = open(fasta_file, 'r')
name = ''
for line in fp:
line = line.rstrip()
# distinguish header from sequence
if line[0] == '>': # or line.startswith('>')
# it is the header
name = line[1:].split()[0] # discarding the initial >
seq_dict[name] = ''
else:
seq_dict[name] = seq_dict[name] + line.upper()
fp.close()
return seq_dict
def load_rnacomend_data(datadir='../data/'):
pair_file = datadir + 'interactions_HT.txt'
# rbp_seq_file = datadir + 'rbps_HT.fa'
rna_seq_file = datadir + 'utrs.fa'
rna_seq_dict = read_fasta_file(rna_seq_file)
protein_set = set()
inter_pair = {}
new_pair = {}
with open(pair_file, 'r') as fp:
for line in fp:
values = line.rstrip().split()
protein = values[0]
protein_set.add(protein)
rna = values[1]
inter_pair.setdefault(rna, []).append(protein)
new_pair.setdefault(protein, []).append(rna)
for protein, rna in new_pair.items():
if len(rna) > 2000:
print(protein)
return inter_pair, rna_seq_dict, protein_set, new_pair
def er_get_all_rna_mildata(seqs, training_val_indice, test_indice):
index = 0
train_seqs = []
for val in training_val_indice:
train_seqs.append(seqs[val])
test_seqs = []
for val in test_indice:
test_seqs.append(seqs[val])
return train_seqs, test_seqs
criterion = nn.CrossEntropyLoss()
if torch.cuda.is_available():
cuda = True
# torch.cuda.set_device(1)
print('===> Using GPU')
else:
cuda = False
print('===> Using CPU')
def padding_sequence_new(seq, max_len=101, repkey='N'):
seq_len = len(seq)
new_seq = seq
if seq_len < max_len:
gap_len = max_len - seq_len
new_seq = seq + repkey * gap_len
return new_seq
def get_bag_data(data, channel=7, window_size=101):
bags = []
seqs = data["seq"]
labels = data["Y"]
for seq in seqs:
# pdb.set_trace()
bag_seqs = split_overlap_seq(seq, window_size=window_size)
# flat_array = []
bag_subt = []
for bag_seq in bag_seqs:
tri_fea = get_RNA_seq_concolutional_array(bag_seq)
bag_subt.append(tri_fea.T)
num_of_ins = len(bag_subt)
if num_of_ins > channel:
start = int((num_of_ins - channel) / 2)
bag_subt = bag_subt[start: start + channel]
if len(bag_subt) < channel:
rand_more = channel - len(bag_subt)
for ind in range(rand_more):
# bag_subt.append(random.choice(bag_subt))
tri_fea = get_RNA_seq_concolutional_array('N' * window_size)
bag_subt.append(tri_fea.T)
bags.append(np.array(bag_subt))
return bags, labels
def split_overlap_seq(seq, window_size=101):
overlap_size = 20
# pdb.set_trace()
bag_seqs = []
seq_len = len(seq)
if seq_len >= window_size:
num_ins = (seq_len - window_size) / (window_size - overlap_size) + 1
remain_ins = (seq_len - window_size) % (window_size - overlap_size)
else:
num_ins = 0
bag = []
end = 0
for ind in range(int(num_ins)):
start = end - overlap_size
if start < 0:
start = 0
end = start + window_size
subseq = seq[start:end]
bag_seqs.append(subseq)
if num_ins == 0:
seq1 = seq
pad_seq = padding_sequence_new(seq1)
bag_seqs.append(pad_seq)
else:
if remain_ins > 10:
# pdb.set_trace()
# start = len(seq) -window_size
seq1 = seq[-window_size:]
pad_seq = padding_sequence_new(seq1, max_len=window_size)
bag_seqs.append(pad_seq)
return bag_seqs
def get_RNA_seq_concolutional_array(seq, motif_len=4):
seq = seq.replace('U', 'T')
alpha = 'ACGT'
# for seq in seqs:
# for key, seq in seqs.iteritems():
row = (len(seq) + 2 * motif_len - 2)
new_array = np.zeros((row, 4))
for i in range(motif_len - 1):
new_array[i] = np.array([0.25] * 4)
for i in range(row - 3, row):
new_array[i] = np.array([0.25] * 4)
# pdb.set_trace()
for i, val in enumerate(seq):
i = i + motif_len - 1
if val not in 'ACGT':
new_array[i] = np.array([0.25] * 4)
continue
# if val == 'N' or i < motif_len or i > len(seq) - motif_len:
# new_array[i] = np.array([0.25]*4)
# else:
try:
index = alpha.index(val)
new_array[i][index] = 1
except:
pdb.set_trace()
# data[key] = new_array
return new_array
def padding_sequence(seq, max_len=501, repkey='N'):
seq_len = len(seq)
if seq_len < max_len:
gap_len = max_len - seq_len
new_seq = seq + repkey * gap_len
else:
new_seq = seq[:max_len]
return new_seq
def get_class_weight(df_y):
y_classes = df_y.idxmax(1, skipna=False)
from sklearn.preprocessing import LabelEncoder
# Instantiate the label encoder
le = LabelEncoder()
# Fit the label encoder to our label series
le.fit(list(y_classes))
# Create integer based labels Series
y_integers = le.transform(list(y_classes))
# Create dict of labels : integer representation
labels_and_integers = dict(zip(y_classes, y_integers))
from sklearn.utils.class_weight import compute_class_weight, compute_sample_weight
class_weights = compute_class_weight('balanced', np.unique(y_integers), y_integers)
sample_weights = compute_sample_weight('balanced', y_integers)
class_weights_dict = dict(zip(le.transform(list(le.classes_)), class_weights))
return class_weights_dict
def get_all_rna_mildata(data, max_len=501):
train_bags, train_labels = get_bag_data_1_channel(data, max_len)
return train_bags, train_labels # , test_bags, test_labels
def get_bag_data_1_channel(data, max_len=501):
bags = []
seqs = data["seq"]
labels = data["Y"]
for seq in seqs:
# pdb.set_trace()
# bag_seqs = split_overlap_seq(seq)
bag_seq = padding_sequence(seq, max_len=max_len)
# flat_array = []
bag_subt = []
# for bag_seq in bag_seqs:
tri_fea = get_RNA_seq_concolutional_array(bag_seq)
bag_subt.append(tri_fea.T)
bags.append(np.array(bag_subt))
return bags, labels
def get_domain_features(in_file='rbps_HT.txt'):
protein_list = []
with open('protein_list', 'r') as fp:
for line in fp:
protein_list.append(line[1:-1])
domain_dict = {}
fp = open(in_file, 'r')
index = 0
for line in fp:
values = line.rstrip().split()
vals = [float(val) for val in values]
domain_dict[protein_list[index]] = vals
index = index + 1
fp.close()
return domain_dict
def runRBP47():
inter_pair_dict, rna_seq_dict, protein_set, new_pair = load_rnacomend_data()
protein_list = []
for protein in protein_set:
protein_list.append(protein)
runEDCNN(inter_pair_dict, rna_seq_dict, protein_list, new_pair)
def train_network(model_type, X_train, y_train, channel=7, window_size=107, model_file='model.pkl', batch_size=100,
n_epochs=50, num_filters=16):
print('model training for ', model_type)
if model_type == 'CNN':
model = CNN(nb_filter=num_filters, labcounts=4, window_size=window_size, channel=channel)
# model.fc1 = nn.Linear(int(1360), 200)
# model.fc2 = nn.Linear(200, 2)
cwd = os.getcwd()
path_ = cwd + '/' + model_file
if os.path.isfile(path_):
new_model = torch.load(model_file)
model.load_state_dict(new_model)
if cuda:
model = model.cuda()
clf = Estimator(model)
rand = random.random()
lr1 = random.uniform(0.01, 0.1)
lr2 = random.uniform(1e-4, 1e-3)
lr3 = random.uniform(1e-5, 1e-4)
mt = random.uniform(0.9, 0.99)
wd = random.uniform(1e-6, 1e-5)
if rand < 0.2:
clf.compile(optimizer=torch.optim.SGD(model.parameters(), lr=lr2, momentum=mt, weight_decay=wd),
loss=nn.CrossEntropyLoss())
elif rand < 0.3:
clf.compile(optimizer=torch.optim.Adagrad(model.parameters(), lr=lr2, weight_decay=wd),
loss=nn.CrossEntropyLoss()) # 1e-2,lr3 work
elif rand < 0.5:
clf.compile(optimizer=torch.optim.RMSprop(model.parameters(), lr=lr3, momentum=mt, weight_decay=wd),
loss=nn.CrossEntropyLoss()) # 1e-2,1e-3 not juge,1e-4 work
elif rand < 0.7:
clf.compile(optimizer=torch.optim.Adadelta(model.parameters(), lr=lr1, weight_decay=wd),
loss=nn.CrossEntropyLoss()) # la ji youhuaqi
elif rand < 0.8:
clf.compile(optimizer=torch.optim.Adamax(model.parameters(), lr=lr3, weight_decay=wd),
loss=nn.CrossEntropyLoss()) # 2e-3, not juge,1e-4 work
else:
clf.compile(optimizer=torch.optim.Adam(model.parameters(), lr=lr3, weight_decay=wd),
loss=nn.CrossEntropyLoss()) # 1e-3,1e-4 work
clf.fit(X_train, y_train, batch_size=batch_size, nb_epoch=5)
torch.save(model.state_dict(), model_file)
model = model.cpu()
torch.cuda.empty_cache()
return model
def predict_network(model_type, X_test, channel=7, window_size=107, model_file='model.pkl', batch_size=100, n_epochs=50,
num_filters=16):
print('model training for ', model_type)
# nb_epos= 5
print(window_size)
print(num_filters)
print(channel)
print(model_file)
if model_type == 'CNN':
model = CNN(nb_filter=num_filters, labcounts=4, window_size=window_size, channel=channel)
if cuda:
model = model.cuda()
print(model.fc1)
model.load_state_dict(torch.load(model_file))
# cpy(model.state_dict(),torch.load(model_file))
model.eval()
try:
pred = model.predict_proba(X_test)
torch.cuda.empty_cache()
except: # to handle the out-of-memory when testing
test_batch = batch(X_test)
pred = []
for test in test_batch:
pred_test1 = model.predict_proba(test)[:, 1]
pred = np.concatenate((pred, pred_test1), axis=0)
torch.cuda.empty_cache()
model = model.cpu()
torch.cuda.empty_cache()
del model
gc.collect()
return pred
def individual_evaluation(model_type, X_test, channel=7, window_size=107, individual=None, batch_size=100, n_epochs=50,
num_filters=16):
print('model training for ', model_type)
# nb_epos= 5
print(window_size)
print(num_filters)
print(channel)
if model_type == 'CNN':
model = CNN(nb_filter=num_filters, labcounts=4, window_size=window_size, channel=channel)
if cuda:
model = model.cuda()
s = (12, 34.56)
for k, v in individual.items():
if type(individual[k]) == type(s):
individual[k] = individual[k][0]
model.load_state_dict(individual)
model.eval()
try:
pred = model.predict_proba(X_test)
torch.cuda.empty_cache()
except: # to handle the out-of-memory when testing
test_batch = batch(X_test)
pred = []
for test in test_batch:
pred_test1 = model.predict_proba(test)[:, 1]
pred = np.concatenate((pred, pred_test1), axis=0)
torch.cuda.empty_cache()
model = model.cpu()
torch.cuda.empty_cache()
del model
gc.collect()
return pred
def runEDCNN(inter_pair_dict, rna_seq_dict, protein_list, new_pair):
max_num_targets = 0
batch_size = 100
n_epochs = 20
num_filters = 16
window_size = 101
max_size = 501
model_type = 'CNN'
data = {}
labels = []
rna_seqs = []
protein_list.append("negative")
# tt = 0
all_hg19_utrs = read_fasta_file_new()
# for i in inter_pair_dict.keys():
# if i not in all_hg19_utrs.keys():
# tt = tt + 1
# print('142424tt', tt)
print('inter_pair_dict.keys()', len(inter_pair_dict.keys()))
print('all_hg19_utrs.keys()', len(all_hg19_utrs.keys()))
remained_rnas = list(set(all_hg19_utrs.keys()) - set(inter_pair_dict.keys()))
# pdb.set_trace()
flag = 0
for protein1, rna1 in new_pair.items():
print(protein1, len(rna1))
# print(sys.argv[1])
if protein1 == dataset_name:
flag = 1
print(protein1)
max_num_targets = len(rna1)
for i in range(len(rna1)):
rna_seq = rna_seq_dict[rna1[i]]
rna_seq = rna_seq.replace('T', 'U')
rna_seqs.append(rna_seq)
labels.append(1)
else:
continue
if flag:
break
random.shuffle(remained_rnas)
for rna in remained_rnas[:max_num_targets]:
rna_seq = all_hg19_utrs[rna]
rna_seq = rna_seq.replace('T', 'U')
rna_seqs.append(rna_seq)
labels.append(0)
# x_index = range(len(labels))
print(len(rna_seqs))
print(len(labels))
train_seqs, test_seqs, train_label, test_label = train_test_split(rna_seqs, labels, random_state=42, shuffle=True,
test_size=0.2)
print('len(train_seqs)', len(train_seqs))
print('len(test_seqs)', len(test_seqs))
train_seqs, val_seqs, train_label, val_label = train_test_split(train_seqs, train_label, random_state=42, shuffle=True,
test_size=0.1)
data = dict()
data['seq'] = train_seqs
data['Y'] = train_label
train_bags1, train_labels1 = get_bag_data_1_channel(data, max_len=501)
train_bags2, train_labels2 = get_bag_data(data, channel=7, window_size=101)
data_val = dict()
data_val['seq'] = val_seqs
data_val['Y'] = val_label
val_bags1, val_labels1 = get_bag_data_1_channel(data_val, max_len=501)
val_bags2, val_labels2 = get_bag_data(data_val, channel=7, window_size=101)
model_file = dataset_name
population = toolbox.population(u)
for k in range(20):
generation = k
print('generation %d' % (k))
for j in range(u):
print('individual:%d' % (j))
model_1 = CNN(nb_filter=num_filters, labcounts=4, window_size=window_size + 6,
channel=1)
model_1 = train_network('CNN', np.array(train_bags1), np.array(train_labels1),
channel=1,
window_size=501 + 6,
model_file=str(
j) + '.' + model_file + '.global',
batch_size=100,
n_epochs=5,
num_filters=16)
model_2 = CNN(nb_filter=num_filters, labcounts=4, window_size=window_size + 6,
channel=7)
model_2 = train_network('CNN', np.array(train_bags2), np.array(train_labels2),
channel=7,
window_size=101 + 6,
model_file=str(
j) + '.' + model_file + '.local',
batch_size=100,
n_epochs=5,
num_filters=16)
population[j][0] = model_1.state_dict()
population[j][1] = model_2.state_dict()
torch.cuda.empty_cache()
for v in range(kv):
for ind in population:
predict1 = individual_evaluation(model_type, np.array(val_bags1), channel=1,
window_size=max_size + 6,
individual=ind[0],
batch_size=batch_size,
n_epochs=n_epochs,
num_filters=num_filters)
predict2 = individual_evaluation(model_type, np.array(val_bags2), channel=7,
window_size=window_size + 6,
individual=ind[1],
batch_size=batch_size,
n_epochs=n_epochs,
num_filters=num_filters)
predict = (predict1 + predict2) / 2.0
auc = roc_auc_score(val_labels2, predict)
gg = []
gg.append(auc)
ind.fitness.values = gg
torch.cuda.empty_cache()
population.sort(key=fitscore, reverse=True)
offspring = generate_offspring(population, toolbox, 4 * u, cxpb, mutpb, k)
for ind in offspring:
predict1 = individual_evaluation(model_type, np.array(val_bags1), channel=1,
window_size=max_size + 6,
individual=ind[0],
batch_size=batch_size,
n_epochs=n_epochs,
num_filters=num_filters)
predict2 = individual_evaluation(model_type, np.array(val_bags2), channel=7,
window_size=window_size + 6,
individual=ind[1],
batch_size=batch_size,
n_epochs=n_epochs,
num_filters=num_filters)
predict = (predict1 + predict2) / 2.0
auc = roc_auc_score(val_labels2, predict)
gg = []
gg.append(auc)
ind.fitness.values = gg
torch.cuda.empty_cache()
population = population + offspring
del offspring
gc.collect()
print('fitness populaton1')
for ind in population:
print(ind.fitness)
m = int(0.6 * u)
population.sort(key=fitscore, reverse=True)
elist = tools.selBest(population, m)
population = elist + tools.selRandom(population[m:], u - m)
print('fitness population1')
for ind in population:
print(ind.fitness)
cnt = 0
for ind in population:
torch.save(ind[0], str(
cnt) + '.' + model_file + '.global')
torch.save(ind[1], str(
cnt) + '.' + model_file + '.local')
cnt = cnt + 1
torch.cuda.empty_cache()
with torch.no_grad():
data2 = dict()
data2['seq'] = test_seqs
data2['Y'] = test_label
test_bags1, test_labels1 = get_bag_data_1_channel(data2, max_len=501)
test_bags2, test_labels2 = get_bag_data(data2, channel=7, window_size=101)
auclist = []
best_score = 0
for j in range(u):
predict1 = predict_network(model_type, np.array(test_bags1), channel=1, window_size=max_size + 6,
model_file=str(
j) + '.' + model_file + '.global', batch_size=batch_size,
n_epochs=n_epochs,
num_filters=num_filters)
predict2 = predict_network(model_type, np.array(test_bags2), channel=7, window_size=window_size + 6,
model_file=str(
j) + '.' + model_file + '.local', batch_size=batch_size,
n_epochs=n_epochs,
num_filters=num_filters)
predict = (predict1 + predict2) / 2.0
auc = roc_auc_score(test_labels2, predict)
auclist.append(auc)
if auc > best_score:
best_score = auc
copyfile(str(j) + '.' + model_file + '.global',
'best' + '.' + model_file + '.global')
copyfile(str(j) + '.' + model_file + '.local',
'best' + '.' + model_file + '.local')
torch.cuda.empty_cache()
auclist.sort(reverse=True)
f1 = open(dataset_name + '.txt', 'w')
for i in auclist:
f1.write('AUC:' + str(i) + '\n')
f1.write('end')
f1.close()
gc.collect()
torch.cuda.empty_cache()
del train_bags1, train_labels1
del train_bags2, train_labels2
gc.collect()
torch.cuda.empty_cache()
def run_edcnn():
runRBP47()
run_edcnn()
| [
"torch.nn.Linear",
"torch.cuda.is_available",
"torch.load",
"torch.nn.CrossEntropyLoss",
"torch.sigmoid",
"torch.nn.MaxPool2d",
"torch.autograd.Variable",
"torch.utils.data.DataLoader",
"torch.nn.ReLU",
"torch.cuda.empty_cache",
"torch.nn.Conv2d",
"torch.no_grad",
"torch.from_numpy"
] | 1.8.1 | yaweiwang1232/EDCNN | 9f86adafe8cbefe37dc6b4929f5eded55dcf391c |
1.9 | import torch
from torch import Tensor
def generate_mask(*size: int, mask_prob: float) -> Tensor:
"""
Args:
*size (int): Shape of the expected values.
mask_prob (float): The probability of masking.
Returns:
Tensor: If True, these features should be masked.
Otherwise, these features should be not changed.
Examples:
>>> generate_mask(4, mask_prob=0.5)
tensor([0, 1, 1, 0])
"""
mask = (torch.rand(*size) > mask_prob).long()
return mask
def masking(x: Tensor, x_mask: Tensor, mask: Tensor) -> Tensor:
"""
Args:
x (Tensor):
x_mask (Tensor):
mask (Tensor):
Returns:
Tensor: Masked deatures.
.. math:: x * mask + x_mask * (1 - mask).
Examples:
>>> import torch
>>> x = torch.tensor([[1, 2, 3]])
>>> x_mask = torch.tensor([[0, 0, 0]])
>>> mask = torch.tensor([[0.5, 0.5, 0.5]])
>>> masking(x, x_mask, mask)
tensor([[0.5000, 1.0000, 1.5000]])
"""
if x.dim() == 3:
mask = mask.unsqueeze(-1)
elif x.dim() > 3:
raise ValueError(f"{x.dim()}D tensor is invalid for masking")
return x * mask + x_mask * (1 - mask)
| [
"torch.rand"
] | 1.9.0 | pfnet-research/deep-table | a19c0c3048484017d5f24806604c3b3470bcf550 |
1.9 | """
The models implemented here are the architectures to use the original EmbraceNets and the proposed EmbraceNet +.
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from .utils.componets import WeightedSum, EmbraceNet
import numpy as np
class ModelOne(nn.Module):
'''
EembraceNet
'''
def __init__(self, num_classes, input_sizes, embrace_size, docker_architecture, finalouts,
device, use_ll, ll_config, trainable_probs):
super(ModelOne, self).__init__()
self.NClasses = num_classes
self.InputSize = len(input_sizes)
self.Device = device
self.EmbNet = EmbraceNet(input_sizes, embrace_size, docker_architecture, self.Device)
self.FinalOut = finalouts
self.UseLL = use_ll
self.TrainableProbs = trainable_probs
self.initProbabilities()
if use_ll or num_classes != embrace_size:
self.UseLL = True
self.LL = self.gen_ll(ll_config, embrace_size)
def gen_ll(self, config ,embrace_size):
layers = []
inC = embrace_size
for x in config:
if x == 'D':
layers += [nn.Dropout()]
elif x == 'R':
layers += [nn.ReLU()]
else:
layers += [nn.Linear(inC, x)]
inC = x
return nn.Sequential(*layers)
def initProbabilities(self):
p = torch.ones(1, self.InputSize, dtype=torch.float)
self.p = torch.div(p, torch.sum(p, dim=-1, keepdim=True)).to(self.Device)
self.P = nn.Parameter(self.p, requires_grad=self.TrainableProbs)
def forward(self, outputs1, outputs2, available):
batch_size = outputs1[0].shape[0]
availabilities = torch.ones(batch_size , self.InputSize, dtype=torch.float, device=self.Device)
for i, av in enumerate(available):
if av == 0.0:
availabilities[:,i] = 0.0
probabilities = torch.stack([self.p]*batch_size, dim=0).view(batch_size, self.InputSize)
if self.FinalOut:
out = self.EmbNet.forward(outputs2, availabilities, probabilities)
else:
out = self.EmbNet.forward(outputs1, availabilities, probabilities)
if self.UseLL:
outl = self.LL(out)
return outl, out
return out, None
class ModelNewFour(nn.Module):
'''
EmbraceNet +, which integrate three EmbraceNets and add a naive concatenation and a weighted sum
'''
def __init__(self, num_classes, input_sizes, final_input_sizes,
embrace1_param, embrace2_param, embrace3_param, wsum_confg,
device, trainable_probs, useffinal, use_ws, use_ll, ll_configs):
super(ModelNewFour, self).__init__()
self.NClasses = num_classes
self.InputSize = input_sizes
self.FinalInputSize = final_input_sizes
self.Device = device
self.EmbNet1 = EmbraceNet(**embrace1_param)
self.EmbNet2 = EmbraceNet(**embrace2_param)
self.EmbNet3 = EmbraceNet(**embrace3_param)
self.WeightedSum = WeightedSum(**wsum_confg)
self.UseLL1 = use_ll[0]
self.UseLL2 = use_ll[1]
self.UseLL3 = use_ll[2]
self.UseFinalsInFinal = useffinal
self.UseWSum = use_ws
self.TrainableProbs = trainable_probs
self.initProbabilities()
if self.UseLL1:
self.LL1 = self.gen_ll(**ll_configs[0])
if self.UseLL2:
self.LL2 = self.gen_ll(**ll_configs[1])
if self.UseLL3:
self.LL3 = self.gen_ll(**ll_configs[2])
def gen_ll(self, config ,embrace_size):
layers = []
inC = embrace_size
for x in config:
if x == 'D':
layers += [nn.Dropout()]
elif x == 'R':
layers += [nn.ReLU()]
else:
layers += [nn.Linear(inC, x)]
inC = x
return nn.Sequential(*layers)
def initProbabilities(self):
p1 = torch.ones(1, self.InputSize, dtype=torch.float)
p2 = torch.ones(1, self.InputSize, dtype=torch.float)
p3 = torch.ones(1, self.FinalInputSize, dtype=torch.float)
self.p1 = torch.div(p1, torch.sum(p1, dim=-1, keepdim=True)).to(self.Device)
self.p2 = torch.div(p2, torch.sum(p2, dim=-1, keepdim=True)).to(self.Device)
self.p3 = torch.div(p3, torch.sum(p3, dim=-1, keepdim=True)).to(self.Device)
self.P1 = nn.Parameter(p1,requires_grad=self.TrainableProbs)
self.P2 = nn.Parameter(p2,requires_grad=self.TrainableProbs)
self.P3 = nn.Parameter(p3, requires_grad=self.TrainableProbs)
def forward(self, outputs1, outputs2, available):
batch_size = outputs1[0].shape[0]
availabilities = torch.ones(batch_size , self.InputSize+4, dtype=torch.float, device=self.Device)
for i, av in enumerate(available):
if av == 0.0:
availabilities[:,i] = 0.0
probabilities1 = torch.stack([self.p1]*batch_size,dim=0).view(batch_size, self.InputSize)
out1 = self.EmbNet1.forward(outputs1, availabilities[:,:-4], probabilities1)
if self.UseLL1:
out1 = self.LL1(out1)
probabilities2 = torch.stack([self.p2]*batch_size,dim=0).view(batch_size, self.InputSize)
out2 = self.EmbNet2.forward(outputs2, availabilities[:,:-4], probabilities2)
if self.UseLL2:
out2 = self.LL2(out2)
wsout = self.WeightedSum.forward(torch.stack(outputs2, dim=1), availabilities[:,:-4])
concat = torch.cat(outputs2, dim=-1)
probabilities3 = torch.stack([self.p3]*batch_size, dim=0).view(batch_size, self.FinalInputSize)
if not self.UseFinalsInFinal:
availabilities[:, -1] = 0.0
if not self.UseWSum:
availabilities[:, -2] = 0.0
out = self.EmbNet3.forward([out1,out2,wsout,concat], availabilities[:, 4:], probabilities3)
if self.UseLL3:
out = self.LL3(out)
return out, (out1, out2, wsout)
class MergeClass():
'''
This is a wrapper class for the true trainable fusion class
'''
def __init__(self, models={}, config={}, device=torch.device('cpu'),
labels={}, self_embeding=False, debug_mode=False):
'''
models : dictionary with the models already loaded and in eval mode
config : dictionary with the parameters to define the merge module
device : torch device
dataset : dataset already loaded
tags : dictionary with the tags used
self_embeding : boolean, if true then embed heuristic is used
debug_mode : bolean, if true, show various final and average informations
'''
self.Modalities = models
self.MergeConfig = config
self.Device = device
self.MergeModel = self.get_model(self.MergeConfig)
self.Classes = labels
self.SelfEmbeddin = self_embeding
def get_model(self, config):
type = config['type']
if type == 1:
return ModelOne(**config['parameters']).to(self.Device)
elif type == 5:
return ModelNewFour(**config['parameters']).to(self.Device)
else:
raise NameError('type {} is not supported yet'.format(type))
# models 2, 3 and 4 was discarded
def parameters(self):
return self.MergeModel.parameters()
def train(self):
self.MergeModel.train()
def eval(self):
self.MergeModel.eval()
def state_dict(self):
return self.MergeModel.state_dict()
def load_state_dict(self, dict):
self.MergeModel.load_state_dict(dict)
def forward(self, data):
availables = [1.0] *4
fb, _, mb = self.Modalities['body'].forward(data['body'])
fc, _, mc = self.Modalities['context'].forward(data['context'])
middle_out = [mb[3], mc[3]]
final_out = [fb, fc]
if data['face'].sum().item() != 0.0:
ff, mf = self.Modalities['face'].forward(data['face'])
middle_out += [mf]
final_out += [ff]
else:
availables[2] = 0.0
middle_out += [mc[3]]
final_out += [fc]
if data['joint'].sum().item() != 0.0:
fs, ms = self.Modalities['pose'].forward((data['joint'],data['bone']),0)
ms = torch.cat((ms[0], ms[1]), dim=-1)
middle_out += [ms]
final_out += [fs]
else:
availables[3] = 0.0
middle_out += [mc[3]]
final_out += [fc]
out, middle = self.MergeModel.forward(middle_out, final_out, availables)
return out, middle | [
"torch.nn.Linear",
"torch.device",
"torch.cat",
"torch.nn.Dropout",
"torch.stack",
"torch.nn.Sequential",
"torch.nn.Parameter",
"torch.ones",
"torch.nn.ReLU",
"torch.sum"
] | 1.9.0 | WGraterol64/SentiLib | 79401f8275e2ad438033b6810a4da2ae8a763d7a |
1.9 | from __future__ import division
import os
import sys
import cv2
import numpy as np
import torch
from torchvision.transforms import transforms
sys.path.append(os.path.join(os.getcwd(), 'models', 'detectors', 'yolo'))
from .utils.YOLOmodels import Darknet
from .utils.YOLOutils import load_classes, non_max_suppression
def filter_classes(detections, classes):
mask = torch.stack([torch.stack([detections[:, -1] == cls]) for cls in classes])
mask = torch.sum(torch.squeeze(mask, dim=1), dim=0)
return detections[mask > 0]
# derived from https://github.com/ultralytics/yolov3/
def letterbox(img, new_shape=416, color=(127.5, 127.5, 127.5), mode='auto'):
# Resize a rectangular image to a 32 pixel multiple rectangle
shape = img.shape[:2] # current shape [height, width]
if isinstance(new_shape, int):
ratio = float(new_shape) / max(shape)
else:
ratio = max(new_shape) / max(shape) # ratio = new / old
new_unpad = (int(round(shape[1] * ratio)), int(round(shape[0] * ratio)))
if mode is 'auto': # minimum rectangle
dw = np.mod(new_shape - new_unpad[0], 32) / 2 # width padding
dh = np.mod(new_shape - new_unpad[1], 32) / 2 # height padding
elif mode is 'square': # square
dw = (new_shape - new_unpad[0]) / 2 # width padding
dh = (new_shape - new_unpad[1]) / 2 # height padding
else:
raise NotImplementedError
top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR) # resized, no border
img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # padded square
return img, ratio, dw, dh
# derived from https://github.com/ultralytics/yolov3/
def scale_coords(coords, from_image_shape, to_image_shape):
# Rescale coords (xyxy) from from_image_shape to to_image_shape
gain = max(from_image_shape) / max(to_image_shape) # gain = old / new
coords[:, [0, 2]] -= (from_image_shape[1] - to_image_shape[1] * gain) / 2 # x padding
coords[:, [1, 3]] -= (from_image_shape[0] - to_image_shape[0] * gain) / 2 # y padding
coords[:, :4] /= gain
coords[:, :4] = coords[:, :4].clamp(min=0)
return coords
def prepare_data(images, color_mode='BGR', new_shape=416, color=(127.5, 127.5, 127.5), mode='square'):
images_ok = np.zeros((images.shape[0], new_shape, new_shape, 3), dtype=images[0].dtype)
images_tensor = torch.zeros((images.shape[0], 3, new_shape, new_shape), dtype=torch.float32)
for i in range(len(images)):
if color_mode == 'BGR':
images[i] = cv2.cvtColor(images[i], cv2.COLOR_BGR2RGB)
elif color_mode == 'RGB':
pass
else:
raise NotImplementedError
images_ok[i], _, _, _ = letterbox(images[i], new_shape, color, mode)
images_tensor[i] = transforms.Compose([
transforms.ToPILImage(),
transforms.ToTensor(),
])(images_ok[i])
return images_tensor
class YOLOv3:
def __init__(self,
model_def="config/yolov3.cfg",
class_path="data/coco.names",
weights_path="weights/yolov3.weights",
conf_thres=0.2,
nms_thres=0.4,
img_size=416,
classes=None,
max_batch_size=16,
device=torch.device('cpu')):
self.model_def = model_def
self.weights_path = weights_path
self.class_path = class_path
self.conf_thres = conf_thres
self.nms_thres = nms_thres
self.img_size = img_size
self.max_batch_size = max_batch_size
self.device = device
# Set up model
self.model = Darknet(model_def, img_size=img_size).to(self.device)
if weights_path.endswith(".weights"):
# Load darknet weights
self.model.load_darknet_weights(weights_path)
else:
# Load checkpoint weights
self.model.load_state_dict(torch.load(weights_path))
self.model.eval() # Set in evaluation mode
self.classes_file = load_classes(class_path) # Extracts class labels from file
self.classes = classes
self.classes_id = []
for i, c in enumerate(self.classes_file):
if c in self.classes:
self.classes_id.append(i)
def predict_single(self, image, color_mode='BGR'):
return self.predict(np.expand_dims(image.copy(), axis=0), color_mode=color_mode)[0]
def predict(self, images, color_mode='BGR'):
images_rescaled = prepare_data(images.copy(), color_mode=color_mode)
with torch.no_grad():
images_rescaled = images_rescaled.to(self.device)
if len(images_rescaled) <= self.max_batch_size:
detections = self.model(images_rescaled)
else:
detections = torch.empty((images_rescaled.shape[0], 10647, 85)).to(self.device)
for i in range(0, len(images_rescaled), self.max_batch_size):
detections[i:i + self.max_batch_size] = self.model(images_rescaled[i:i + self.max_batch_size]).detach()
detections = non_max_suppression(detections, self.conf_thres, self.nms_thres)
for i in range(len(images)):
if detections[i] is not None:
detections[i] = filter_classes(detections[i], self.classes_id)
detections[i] = scale_coords(detections[i], images_rescaled[i].shape[1:], images[i].shape[:2])
return detections | [
"torch.zeros",
"torch.device",
"torch.stack",
"torch.no_grad",
"torch.squeeze",
"torch.load",
"torch.empty"
] | 1.9.0 | WGraterol64/SentiLib | 79401f8275e2ad438033b6810a4da2ae8a763d7a |
1.9 | """
The model implemented here is the same as https://github.com/machine-perception-robotics-group/attention_branch_network (ResNet)
The functions for creating the model and loading the imagenet weights have been redone.
"""
import torch
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
import math
from torchvision.models.utils import load_state_dict_from_url
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000):
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0], down_size=True)
self.layer2 = self._make_layer(block, 128, layers[1], stride=2, down_size=True)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2, down_size=True)
self.att_layer4 = self._make_layer(block, 512, layers[3], stride=1, down_size=False)
self.bn_att = nn.BatchNorm2d(512 * block.expansion)
self.att_conv = nn.Conv2d(512 * block.expansion, num_classes,
kernel_size=1, padding=0, bias=False)
self.bn_att2 = nn.BatchNorm2d(num_classes)
self.att_conv2 = nn.Conv2d(num_classes, num_classes,
kernel_size=1, padding=0, bias=False)
self.att_conv3 = nn.Conv2d(num_classes, 1,
kernel_size=3, padding=1, bias=False)
self.bn_att3 = nn.BatchNorm2d(1)
self.att_gap = nn.AvgPool2d(14)
self.sigmoid = nn.Sigmoid()
self.layer4 = self._make_layer(block, 512, layers[3], stride=2, down_size=True)
self.avgpool = nn.AvgPool2d(7, stride=1)
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def _make_layer(self, block, planes, blocks, stride=1, down_size=True):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(nn.Conv2d(self.inplanes,
planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion))
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
if down_size:
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
else:
inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
fe = x
ax = self.bn_att(self.att_layer4(x))
ax = self.relu(self.bn_att2(self.att_conv(ax)))
bs, cs, ys, xs = ax.shape
self.att = self.sigmoid(self.bn_att3(self.att_conv3(ax)))
# self.att = self.att.view(bs, 1, ys, xs)
ax = self.att_conv2(ax)
ax = self.att_gap(ax)
out_ax = ax.view(ax.size(0), -1)
rx = x * self.att
rx = rx + x
per = rx
rx = self.layer4(rx)
rx = self.avgpool(rx)
rx = rx.view(rx.size(0), -1)
out_rx = self.fc(rx)
return out_rx, out_ax, [fe, self.att, per, rx]
def resnet18(pretrained=False, num_classes=1000):
model = ResNet(BasicBlock, [2, 2, 2, 2], num_classes)
if pretrained:
state_dict = load_state_dict_from_url(model_urls['resnet18'])
#model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))
currstate = model.state_dict()
ml, mm = 0, 0
for name, param in state_dict.items():
if name not in currstate:
print('no match', name)
mm += 1
continue
if isinstance(param, torch.nn.parameter.Parameter):
# backwards compatibility for serialized parameters
param = param.data
try:
currstate[name].copy_(param)
# currstate[name].requires_grad = False
ml += 1
except:
print('missing', name)
mm += 1
pass
model.load_state_dict(currstate)
print('{} modules loaded and {} modules missing or no matching'.format(ml,mm))
return model
def resnet34(pretrained=False, num_classes=1000):
model = ResNet(BasicBlock, [3, 4, 6, 3], num_classes)
if pretrained:
state_dict = load_state_dict_from_url(model_urls['resnet34'])
#model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))
currstate = model.state_dict()
ml, mm = 0, 0
for name, param in state_dict.items():
if name not in currstate:
print('no match', name)
mm += 1
continue
if isinstance(param, torch.nn.parameter.Parameter):
# backwards compatibility for serialized parameters
param = param.data
try:
currstate[name].copy_(param)
# currstate[name].requires_grad = False
ml += 1
except:
print('missing', name)
mm += 1
pass
model.load_state_dict(currstate)
print('{} modules loaded and {} modules missing or no matching'.format(ml,mm))
return model
def resnet50(pretrained=False, num_classes=1000):
model = ResNet(Bottleneck, [3, 4, 6, 3], num_classes)
if pretrained:
state_dict = load_state_dict_from_url(model_urls['resnet50'])
#model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))
currstate = model.state_dict()
ml, mm = 0, 0
for name, param in state_dict.items():
if name not in currstate:
print('no match', name)
mm += 1
continue
if isinstance(param, torch.nn.parameter.Parameter):
# backwards compatibility for serialized parameters
param = param.data
try:
currstate[name].copy_(param)
# currstate[name].requires_grad = False
ml += 1
except:
print('missing', name)
mm += 1
pass
model.load_state_dict(currstate)
print('{} modules loaded and {} modules missing or no matching'.format(ml,mm))
return model
def resnet101(pretrained=False, num_classes=1000):
model = ResNet(Bottleneck, [3, 4, 23, 3], num_classes)
if pretrained:
state_dict = load_state_dict_from_url(model_urls['resnet101'])
#model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))
currstate = model.state_dict()
ml, mm = 0, 0
for name, param in state_dict.items():
if name not in currstate:
print('no match', name)
mm += 1
continue
if isinstance(param, torch.nn.parameter.Parameter):
# backwards compatibility for serialized parameters
param = param.data
try:
currstate[name].copy_(param)
# currstate[name].requires_grad = False
ml += 1
except:
print('missing', name)
mm += 1
pass
model.load_state_dict(currstate)
print('{} modules loaded and {} modules missing or no matching'.format(ml,mm))
return model
def resnet152(pretrained=False, num_classes=1000):
model = ResNet(Bottleneck, [3, 8, 36, 3], num_classes=1000)
if pretrained:
state_dict = load_state_dict_from_url(model_urls['resnet152'])
#model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))
currstate = model.state_dict()
ml, mm = 0, 0
for name, param in state_dict.items():
if name not in currstate:
print('no match', name)
mm += 1
continue
if isinstance(param, torch.nn.parameter.Parameter):
# backwards compatibility for serialized parameters
param = param.data
try:
currstate[name].copy_(param)
# currstate[name].requires_grad = False
ml += 1
except:
print('missing', name)
mm += 1
pass
model.load_state_dict(currstate)
print('{} modules loaded and {} modules missing or no matching'.format(ml,mm))
return model | [
"torch.nn.Linear",
"torch.nn.MaxPool2d",
"torch.nn.Sigmoid",
"torch.nn.AvgPool2d",
"torch.nn.BatchNorm2d",
"torch.nn.Sequential",
"torch.nn.init.kaiming_normal_",
"torch.nn.init.constant_",
"torch.nn.ReLU",
"torch.nn.Conv2d"
] | 1.9.0 | WGraterol64/SentiLib | 79401f8275e2ad438033b6810a4da2ae8a763d7a |
1.4 | import torch
import numpy as np
from copy import deepcopy
from typing import Dict, Tuple, Union, Optional
from tianshou.policy import BasePolicy
from tianshou.exploration import BaseNoise, GaussianNoise
from tianshou.data import Batch, ReplayBuffer, to_torch_as
class DDPGPolicy(BasePolicy):
"""Implementation of Deep Deterministic Policy Gradient. arXiv:1509.02971
:param torch.nn.Module actor: the actor network following the rules in
:class:`~tianshou.policy.BasePolicy`. (s -> logits)
:param torch.optim.Optimizer actor_optim: the optimizer for actor network.
:param torch.nn.Module critic: the critic network. (s, a -> Q(s, a))
:param torch.optim.Optimizer critic_optim: the optimizer for critic
network.
:param float tau: param for soft update of the target network, defaults to
0.005.
:param float gamma: discount factor, in [0, 1], defaults to 0.99.
:param BaseNoise exploration_noise: the exploration noise,
add to the action, defaults to ``GaussianNoise(sigma=0.1)``.
:param action_range: the action range (minimum, maximum).
:type action_range: (float, float)
:param bool reward_normalization: normalize the reward to Normal(0, 1),
defaults to ``False``.
:param bool ignore_done: ignore the done flag while training the policy,
defaults to ``False``.
:param int estimation_step: greater than 1, the number of steps to look
ahead.
.. seealso::
Please refer to :class:`~tianshou.policy.BasePolicy` for more detailed
explanation.
"""
def __init__(self,
actor: torch.nn.Module,
actor_optim: torch.optim.Optimizer,
critic: torch.nn.Module,
critic_optim: torch.optim.Optimizer,
tau: float = 0.005,
gamma: float = 0.99,
exploration_noise: Optional[BaseNoise]
= GaussianNoise(sigma=0.1),
action_range: Optional[Tuple[float, float]] = None,
reward_normalization: bool = False,
ignore_done: bool = False,
estimation_step: int = 1,
**kwargs) -> None:
super().__init__(**kwargs)
if actor is not None:
self.actor, self.actor_old = actor, deepcopy(actor)
self.actor_old.eval()
self.actor_optim = actor_optim
if critic is not None:
self.critic, self.critic_old = critic, deepcopy(critic)
self.critic_old.eval()
self.critic_optim = critic_optim
assert 0 <= tau <= 1, 'tau should in [0, 1]'
self._tau = tau
assert 0 <= gamma <= 1, 'gamma should in [0, 1]'
self._gamma = gamma
self._noise = exploration_noise
assert action_range is not None
self._range = action_range
self._action_bias = (action_range[0] + action_range[1]) / 2
self._action_scale = (action_range[1] - action_range[0]) / 2
# it is only a little difference to use rand_normal
# self.noise = OUNoise()
self._rm_done = ignore_done
self._rew_norm = reward_normalization
assert estimation_step > 0, 'estimation_step should greater than 0'
self._n_step = estimation_step
def set_exp_noise(self, noise: Optional[BaseNoise]) -> None:
"""Set the exploration noise."""
self._noise = noise
def train(self, mode=True) -> torch.nn.Module:
"""Set the module in training mode, except for the target network."""
self.training = mode
self.actor.train(mode)
self.critic.train(mode)
return self
def sync_weight(self) -> None:
"""Soft-update the weight for the target network."""
for o, n in zip(self.actor_old.parameters(), self.actor.parameters()):
o.data.copy_(o.data * (1 - self._tau) + n.data * self._tau)
for o, n in zip(
self.critic_old.parameters(), self.critic.parameters()):
o.data.copy_(o.data * (1 - self._tau) + n.data * self._tau)
def _target_q(self, buffer: ReplayBuffer,
indice: np.ndarray) -> torch.Tensor:
batch = buffer[indice] # batch.obs_next: s_{t+n}
with torch.no_grad():
target_q = self.critic_old(batch.obs_next, self(
batch, model='actor_old', input='obs_next',
explorating=False).act)
return target_q
def process_fn(self, batch: Batch, buffer: ReplayBuffer,
indice: np.ndarray) -> Batch:
if self._rm_done:
batch.done = batch.done * 0.
batch = self.compute_nstep_return(
batch, buffer, indice, self._target_q,
self._gamma, self._n_step, self._rew_norm)
return batch
def forward(self, batch: Batch,
state: Optional[Union[dict, Batch, np.ndarray]] = None,
model: str = 'actor',
input: str = 'obs',
explorating: bool = True,
**kwargs) -> Batch:
"""Compute action over the given batch data.
:return: A :class:`~tianshou.data.Batch` which has 2 keys:
* ``act`` the action.
* ``state`` the hidden state.
.. seealso::
Please refer to :meth:`~tianshou.policy.BasePolicy.forward` for
more detailed explanation.
"""
model = getattr(self, model)
obs = getattr(batch, input)
actions, h = model(obs, state=state, info=batch.info)
actions += self._action_bias
if self.training and explorating:
actions += to_torch_as(self._noise(actions.shape), actions)
actions = actions.clamp(self._range[0], self._range[1])
return Batch(act=actions, state=h)
def learn(self, batch: Batch, **kwargs) -> Dict[str, float]:
current_q = self.critic(batch.obs, batch.act).flatten()
target_q = batch.returns.flatten()
td = current_q - target_q
critic_loss = (td.pow(2) * batch.weight).mean()
batch.weight = td # prio-buffer
self.critic_optim.zero_grad()
critic_loss.backward()
self.critic_optim.step()
action = self(batch, explorating=False).act
actor_loss = -self.critic(batch.obs, action).mean()
self.actor_optim.zero_grad()
actor_loss.backward()
self.actor_optim.step()
self.sync_weight()
return {
'loss/actor': actor_loss.item(),
'loss/critic': critic_loss.item(),
}
| [
"torch.no_grad"
] | 1.4.0 | youkaichao/tianshou | a9f9940d17e0ec464611f9fa535de391e62fe8bd |
1.0 | from typing import Any, Callable, Optional
import torch
import torch.nn.functional as F
import torch.nn as nn
from torch.utils.data import DataLoader, TensorDataset
from tqdm import tqdm
from autoencoders.dae import DenoisingAutoencoder
from autoencoders.sdae import StackedDenoisingAutoEncoder
def train(
dataset: torch.utils.data.Dataset,
autoencoder: torch.nn.Module,
epochs: int,
batch_size: int,
optimizer: torch.optim.Optimizer,
scheduler: Any = None,
validation: Optional[torch.utils.data.Dataset] = None,
corruption: Optional[float] = None,
cuda: bool = True,
sampler: Optional[torch.utils.data.sampler.Sampler] = None,
silent: bool = False,
update_freq: Optional[int] = 1,
update_callback: Optional[Callable[[float, float], None]] = None,
num_workers: Optional[int] = None,
epoch_callback: Optional[Callable[[int, torch.nn.Module], None]] = None,
) -> None:
"""
Function to train an autoencoder using the provided dataset. If the dataset consists of 2-tuples or lists of
(feature, prediction), then the prediction is stripped away.
:param dataset: training Dataset, consisting of tensors shape [batch_size, features]
:param autoencoder: autoencoder to train
:param epochs: number of training epochs
:param batch_size: batch size for training
:param optimizer: optimizer to use
:param scheduler: scheduler to use, or None to disable, defaults to None
:param corruption: proportion of masking corruption to apply, set to None to disable, defaults to None
:param validation: instance of Dataset to use for validation, set to None to disable, defaults to None
:param cuda: whether CUDA is used, defaults to True
:param sampler: sampler to use in the DataLoader, set to None to disable, defaults to None
:param silent: set to True to prevent printing out summary statistics, defaults to False
:param update_freq: frequency of batches with which to update counter, set to None disables, default 1
:param update_callback: optional function of loss and validation loss to update
:param num_workers: optional number of workers to use for data loading
:param epoch_callback: optional function of epoch and model
:return: None
"""
dataloader = DataLoader(
dataset,
batch_size=batch_size,
pin_memory=False,
sampler=sampler,
shuffle=True if sampler is None else False,
num_workers=num_workers if num_workers is not None else 0,
)
if validation is not None:
validation_loader = DataLoader(
validation,
batch_size=batch_size,
pin_memory=False,
sampler=None,
shuffle=False,
)
else:
validation_loader = None
loss_function = nn.MSELoss()
autoencoder.train()
validation_loss_value = -1
loss_value = 0
for epoch in range(epochs):
if scheduler is not None:
scheduler.step()
data_iterator = tqdm(
dataloader,
leave=True,
unit="batch",
postfix={"epo": epoch, "lss": "%.6f" % 0.0, "vls": "%.6f" % -1,},
disable=silent,
)
for index, batch in enumerate(data_iterator):
if (
isinstance(batch, tuple)
or isinstance(batch, list)
and len(batch) in [1, 2]
):
batch = batch[0]
if cuda:
batch = batch.cuda(non_blocking=True)
# run the batch through the autoencoder and obtain the output
if corruption is not None:
output = autoencoder(F.dropout(batch, corruption))
else:
output = autoencoder(batch)
loss = loss_function(output, batch)
# accuracy = pretrain_accuracy(output, batch)
loss_value = float(loss.item())
optimizer.zero_grad()
loss.backward()
optimizer.step(closure=None)
data_iterator.set_postfix(
epo=epoch, lss="%.6f" % loss_value, vls="%.6f" % validation_loss_value,
)
if update_freq is not None and epoch % update_freq == 0:
if validation_loader is not None:
validation_output = predict(
validation,
autoencoder,
batch_size,
cuda=cuda,
silent=True,
encode=False,
)
validation_inputs = []
for val_batch in validation_loader:
if (
isinstance(val_batch, tuple) or isinstance(val_batch, list)
) and len(val_batch) in [1, 2]:
validation_inputs.append(val_batch[0])
else:
validation_inputs.append(val_batch)
validation_actual = torch.cat(validation_inputs)
if cuda:
validation_actual = validation_actual.cuda(non_blocking=True)
validation_output = validation_output.cuda(non_blocking=True)
validation_loss = loss_function(validation_output, validation_actual)
# validation_accuracy = pretrain_accuracy(validation_output, validation_actual)
validation_loss_value = float(validation_loss.item())
data_iterator.set_postfix(
epo=epoch,
lss="%.6f" % loss_value,
vls="%.6f" % validation_loss_value,
)
autoencoder.train()
else:
validation_loss_value = -1
# validation_accuracy = -1
data_iterator.set_postfix(
epo=epoch, lss="%.6f" % loss_value, vls="%.6f" % -1,
)
if update_callback is not None:
update_callback(
epoch,
optimizer.param_groups[0]["lr"],
loss_value,
validation_loss_value,
)
if epoch_callback is not None:
autoencoder.eval()
epoch_callback(epoch, autoencoder)
autoencoder.train()
def pretrain(
dataset,
autoencoder: StackedDenoisingAutoEncoder,
epochs: int,
batch_size: int,
optimizer: Callable[[torch.nn.Module], torch.optim.Optimizer],
scheduler: Optional[Callable[[torch.optim.Optimizer], Any]] = None,
validation: Optional[torch.utils.data.Dataset] = None,
corruption: Optional[float] = None,
cuda: bool = True,
sampler: Optional[torch.utils.data.sampler.Sampler] = None,
silent: bool = False,
update_freq: Optional[int] = 1,
update_callback: Optional[Callable[[float, float], None]] = None,
num_workers: Optional[int] = None,
epoch_callback: Optional[Callable[[int, torch.nn.Module], None]] = None,
) -> None:
"""
Given an autoencoder, train it using the data provided in the dataset; for simplicity the accuracy is reported only
on the training dataset. If the training dataset is a 2-tuple or list of (feature, prediction), then the prediction
is stripped away.
:param dataset: instance of Dataset to use for training
:param autoencoder: instance of an autoencoder to train
:param epochs: number of training epochs
:param batch_size: batch size for training
:param corruption: proportion of masking corruption to apply, set to None to disable, defaults to None
:param optimizer: function taking model and returning optimizer
:param scheduler: function taking optimizer and returning scheduler, or None to disable
:param validation: instance of Dataset to use for validation
:param cuda: whether CUDA is used, defaults to True
:param sampler: sampler to use in the DataLoader, defaults to None
:param silent: set to True to prevent printing out summary statistics, defaults to False
:param update_freq: frequency of batches with which to update counter, None disables, default 1
:param update_callback: function of loss and validation loss to update
:param num_workers: optional number of workers to use for data loading
:param epoch_callback: function of epoch and model
:return: None
"""
current_dataset = dataset
current_validation = validation
number_of_subautoencoders = len(autoencoder.dimensions) - 1
for index in range(number_of_subautoencoders):
encoder, decoder = autoencoder.get_stack(index)
embedding_dimension = autoencoder.dimensions[index]
hidden_dimension = autoencoder.dimensions[index + 1]
# manual override to prevent corruption for the last subautoencoder
if index == (number_of_subautoencoders - 1):
corruption = None
# initialise the subautoencoder
sub_autoencoder = DenoisingAutoencoder(
embedding_dimension=embedding_dimension,
hidden_dimension=hidden_dimension,
activation=torch.nn.ReLU()
if index != (number_of_subautoencoders - 1)
else None,
corruption=nn.Dropout(corruption) if corruption is not None else None,
)
if cuda:
sub_autoencoder = sub_autoencoder.cuda()
ae_optimizer = optimizer(sub_autoencoder)
ae_scheduler = scheduler(ae_optimizer) if scheduler is not None else scheduler
train(
current_dataset,
sub_autoencoder,
epochs,
batch_size,
ae_optimizer,
validation=current_validation,
corruption=None, # already have dropout in the DAE
scheduler=ae_scheduler,
cuda=cuda,
sampler=sampler,
silent=silent,
update_freq=update_freq,
update_callback=update_callback,
num_workers=num_workers,
epoch_callback=epoch_callback,
)
# copy the weights
sub_autoencoder.copy_weights(encoder, decoder)
# pass the dataset through the encoder part of the subautoencoder
if index != (number_of_subautoencoders - 1):
current_dataset = TensorDataset(
predict(
current_dataset,
sub_autoencoder,
batch_size,
cuda=cuda,
silent=silent,
)
)
if current_validation is not None:
current_validation = TensorDataset(
predict(
current_validation,
sub_autoencoder,
batch_size,
cuda=cuda,
silent=silent,
)
)
else:
current_dataset = None # minor optimisation on the last subautoencoder
current_validation = None
def predict(
dataset: torch.utils.data.Dataset,
model: torch.nn.Module,
batch_size: int,
cuda: bool = True,
silent: bool = False,
encode: bool = True,
) -> torch.Tensor:
"""
Given a dataset, run the model in evaluation mode with the inputs in batches and concatenate the
output.
:param dataset: evaluation Dataset
:param model: autoencoder for prediction
:param batch_size: batch size
:param cuda: whether CUDA is used, defaults to True
:param silent: set to True to prevent printing out summary statistics, defaults to False
:param encode: whether to encode or use the full autoencoder
:return: predicted features from the Dataset
"""
dataloader = DataLoader(
dataset, batch_size=batch_size, pin_memory=False, shuffle=False
)
data_iterator = tqdm(dataloader, leave=False, unit="batch", disable=silent,)
features = []
if isinstance(model, torch.nn.Module):
model.eval()
for batch in data_iterator:
if isinstance(batch, tuple) or isinstance(batch, list) and len(batch) in [1, 2]:
batch = batch[0]
if cuda:
batch = batch.cuda(non_blocking=True)
batch = batch.squeeze(1).view(batch.size(0), -1)
if encode:
output = model.encode(batch)
else:
output = model(batch)
features.append(
output.detach().cpu()
) # move to the CPU to prevent out of memory on the GPU
return torch.cat(features)
| [
"torch.cat",
"torch.nn.Dropout",
"torch.nn.MSELoss",
"torch.nn.functional.dropout",
"torch.nn.ReLU",
"torch.utils.data.DataLoader"
] | 1.0.0 | viniciusdsmello/pt-autoencoders | c06a12cdf71ef9301543c07c7af96d9740e264b8 |
1.2 | import copy
from pathlib import Path
import shutil
import tempfile
import torch
from tonks.ensemble.models import BertResnetEnsembleForMultiTaskClassification
def test_exporting_and_loading_works_correctly():
image_task_dict = {
'task1_task2': {
'fake_attribute1': 10,
'fake_attribute2': 3
},
'task3': {
'fake_attribute3': 4
}
}
model = BertResnetEnsembleForMultiTaskClassification(
image_task_dict=image_task_dict
)
model_id = 27
test_dir = Path() / tempfile.mkdtemp()
model.export(test_dir, model_id)
new_model = BertResnetEnsembleForMultiTaskClassification(
image_task_dict=image_task_dict
)
new_model.load_state_dict(torch.load(
test_dir / f'multi_task_ensemble_{model_id}.pth',
map_location=lambda storage,
loc: storage
))
shutil.rmtree(test_dir)
for original_val, new_val in zip(model.state_dict().values(), new_model.state_dict().values()):
assert torch.equal(original_val, new_val)
def test_exporting_does_not_modify_original():
image_task_dict = {
'task1_task2': {
'fake_attribute1': 10,
'fake_attribute2': 3
},
'task3': {
'fake_attribute3': 4
}
}
model = BertResnetEnsembleForMultiTaskClassification(
image_task_dict=image_task_dict
)
model_copy = copy.deepcopy(model)
model_id = 27
test_dir = tempfile.mkdtemp()
model.export(test_dir, model_id)
shutil.rmtree(test_dir)
for orig_item, copy_item in zip(model.state_dict().items(), model_copy.state_dict().items()):
assert orig_item[0] == copy_item[0]
assert torch.equal(orig_item[1], copy_item[1])
| [
"torch.equal",
"torch.load"
] | 1.2.0 | vanderveld/tonks | e87afbd9614b276b443b4a7527fd1fda01a8be4c |
1.0 | # coding=utf-8
# Copyright 2021, Google and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch PEGASUS model. """
import pdb
import copy
import math
import random
from typing import Optional, Tuple
import numpy as np
import torch
import torch.nn.functional as F
import torch.utils.checkpoint
from torch import nn
from torch.nn import CrossEntropyLoss
from ...activations import ACT2FN
from ...file_utils import (
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
replace_return_docstrings,
)
from ...modeling_outputs import (
BaseModelOutput,
BaseModelOutputWithPastAndCrossAttentions,
CausalLMOutputWithCrossAttentions,
Seq2SeqLMOutput,
Seq2SeqModelOutput,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_pegasus import PegasusConfig
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = "PegasusConfig"
_TOKENIZER_FOR_DOC = "PegasusTokenizer"
PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST = [
"google/pegasus-large",
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
]
# Copied from transformers.models.bart.modeling_bart.shift_tokens_right
def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int, decoder_start_token_id: int):
"""
Shift input ids one token to the right.
"""
shifted_input_ids = input_ids.new_zeros(input_ids.shape)
shifted_input_ids[:, 1:] = input_ids[:, :-1].clone()
shifted_input_ids[:, 0] = decoder_start_token_id
assert pad_token_id is not None, "self.model.config.pad_token_id has to be defined."
# replace possible -100 values in labels by `pad_token_id`
shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id)
return shifted_input_ids
# Copied from transformers.models.bart.modeling_bart._make_causal_mask
def _make_causal_mask(input_ids_shape: torch.Size, dtype: torch.dtype, past_key_values_length: int = 0):
"""
Make causal mask used for bi-directional self-attention.
"""
bsz, tgt_len = input_ids_shape
mask = torch.full((tgt_len, tgt_len), float("-inf"))
mask_cond = torch.arange(mask.size(-1))
mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0)
mask = mask.to(dtype)
if past_key_values_length > 0:
mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype), mask], dim=-1)
return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length)
# Copied from transformers.models.bart.modeling_bart._expand_mask
def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None):
"""
Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
"""
bsz, src_len = mask.size()
tgt_len = tgt_len if tgt_len is not None else src_len
expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype)
inverted_mask = 1.0 - expanded_mask
return inverted_mask.masked_fill(inverted_mask.bool(), torch.finfo(dtype).min)
# Copied from transformers.models.marian.modeling_marian.MarianSinusoidalPositionalEmbedding with Marian->Pegasus
class PegasusSinusoidalPositionalEmbedding(nn.Embedding):
"""This module produces sinusoidal positional embeddings of any length."""
def __init__(self, num_positions: int, embedding_dim: int, padding_idx: Optional[int] = None):
super().__init__(num_positions, embedding_dim)
self.weight = self._init_weight(self.weight)
@staticmethod
def _init_weight(out: nn.Parameter):
"""
Identical to the XLM create_sinusoidal_embeddings except features are not interleaved. The cos features are in
the 2nd half of the vector. [dim // 2:]
"""
n_pos, dim = out.shape
position_enc = np.array(
[[pos / np.power(10000, 2 * (j // 2) / dim) for j in range(dim)] for pos in range(n_pos)]
)
out.requires_grad = False # set early to avoid an error in pytorch-1.8+
sentinel = dim // 2 if dim % 2 == 0 else (dim // 2) + 1
out[:, 0:sentinel] = torch.FloatTensor(np.sin(position_enc[:, 0::2]))
out[:, sentinel:] = torch.FloatTensor(np.cos(position_enc[:, 1::2]))
out.detach_()
return out
@torch.no_grad()
def forward(self, input_ids_shape: torch.Size, past_key_values_length: int = 0):
"""`input_ids_shape` is expected to be [bsz x seqlen]."""
bsz, seq_len = input_ids_shape[:2]
positions = torch.arange(
past_key_values_length, past_key_values_length + seq_len, dtype=torch.long, device=self.weight.device
)
return super().forward(positions)
# Copied from transformers.models.bart.modeling_bart.BartAttention with Bart->Pegasus
class PegasusAttention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(
self,
embed_dim: int,
num_heads: int,
dropout: float = 0.0,
is_decoder: bool = False,
bias: bool = True,
):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
assert (
self.head_dim * num_heads == self.embed_dim
), f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {num_heads})."
self.scaling = self.head_dim ** -0.5
self.is_decoder = is_decoder
self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
def forward(
self,
hidden_states: torch.Tensor,
key_value_states: Optional[torch.Tensor] = None,
past_key_value: Optional[Tuple[torch.Tensor]] = None,
attention_mask: Optional[torch.Tensor] = None,
layer_head_mask: Optional[torch.Tensor] = None,
output_attentions: bool = False,
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
"""Input shape: Batch x Time x Channel"""
# if key_value_states are provided this layer is used as a cross-attention layer
# for the decoder
is_cross_attention = key_value_states is not None
bsz, tgt_len, embed_dim = hidden_states.size()
# get query proj
query_states = self.q_proj(hidden_states) * self.scaling
# get key, value proj
if is_cross_attention and past_key_value is not None:
# reuse k,v, cross_attentions
key_states = past_key_value[0]
value_states = past_key_value[1]
elif is_cross_attention:
# cross_attentions
key_states = self._shape(self.k_proj(key_value_states), -1, bsz)
value_states = self._shape(self.v_proj(key_value_states), -1, bsz)
elif past_key_value is not None:
# reuse k, v, self_attention
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
key_states = torch.cat([past_key_value[0], key_states], dim=2)
value_states = torch.cat([past_key_value[1], value_states], dim=2)
else:
# self_attention
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
if self.is_decoder:
# if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
# Further calls to cross_attention layer can then reuse all cross-attention
# key/value_states (first "if" case)
# if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
# all previous decoder key/value_states. Further calls to uni-directional self-attention
# can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
# if encoder bi-directional self-attention `past_key_value` is always `None`
past_key_value = (key_states, value_states)
proj_shape = (bsz * self.num_heads, -1, self.head_dim)
query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
key_states = key_states.view(*proj_shape)
value_states = value_states.view(*proj_shape)
src_len = key_states.size(1)
attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
assert attn_weights.size() == (
bsz * self.num_heads,
tgt_len,
src_len,
), f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is {attn_weights.size()}"
if attention_mask is not None:
assert attention_mask.size() == (
bsz,
1,
tgt_len,
src_len,
), f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}"
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
attn_weights = F.softmax(attn_weights, dim=-1)
if layer_head_mask is not None:
assert layer_head_mask.size() == (
self.num_heads,
), f"Head mask for a single layer should be of size {(self.num_heads,)}, but is {layer_head_mask.size()}"
attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
if output_attentions:
# this operation is a bit akward, but it's required to
# make sure that attn_weights keeps its gradient.
# In order to do so, attn_weights have to reshaped
# twice and have to be reused in the following
attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)
else:
attn_weights_reshaped = None
attn_probs = F.dropout(attn_weights, p=self.dropout, training=self.training)
attn_output = torch.bmm(attn_probs, value_states)
assert attn_output.size() == (
bsz * self.num_heads,
tgt_len,
self.head_dim,
), f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is {attn_output.size()}"
attn_output = (
attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
.transpose(1, 2)
.reshape(bsz, tgt_len, embed_dim)
)
attn_output = self.out_proj(attn_output)
return attn_output, attn_weights_reshaped, past_key_value
# Copied from transformers.models.mbart.modeling_mbart.MBartEncoderLayer with MBart->Pegasus
class PegasusEncoderLayer(nn.Module):
def __init__(self, config: PegasusConfig):
super().__init__()
self.embed_dim = config.d_model
self.self_attn = PegasusAttention(
embed_dim=self.embed_dim,
num_heads=config.encoder_attention_heads,
dropout=config.attention_dropout,
)
self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.dropout = config.dropout
self.activation_fn = ACT2FN[config.activation_function]
self.activation_dropout = config.activation_dropout
self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim)
self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim)
self.final_layer_norm = nn.LayerNorm(self.embed_dim)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: torch.Tensor,
layer_head_mask: torch.Tensor,
output_attentions: bool = False,
):
"""
Args:
hidden_states (:obj:`torch.FloatTensor`): input to the layer of shape `(seq_len, batch, embed_dim)`
attention_mask (:obj:`torch.FloatTensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
layer_head_mask (:obj:`torch.FloatTensor`): mask for attention heads in a given layer of size
`(config.encoder_attention_heads,)`.
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under
returned tensors for more detail.
"""
residual = hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
hidden_states, attn_weights, _ = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
layer_head_mask=layer_head_mask,
output_attentions=output_attentions,
)
hidden_states = F.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
residual = hidden_states
hidden_states = self.final_layer_norm(hidden_states)
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = F.dropout(hidden_states, p=self.activation_dropout, training=self.training)
hidden_states = self.fc2(hidden_states)
hidden_states = F.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
if hidden_states.dtype == torch.float16 and (
torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any()
):
clamp_value = torch.finfo(hidden_states.dtype).max - 1000
hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights,)
return outputs
# Copied from transformers.models.mbart.modeling_mbart.MBartDecoderLayer with MBart->Pegasus
class PegasusDecoderLayer(nn.Module):
def __init__(self, config: PegasusConfig):
super().__init__()
self.embed_dim = config.d_model
self.self_attn = PegasusAttention(
embed_dim=self.embed_dim,
num_heads=config.decoder_attention_heads,
dropout=config.attention_dropout,
is_decoder=True,
)
self.dropout = config.dropout
self.activation_fn = ACT2FN[config.activation_function]
self.activation_dropout = config.activation_dropout
self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.encoder_attn = PegasusAttention(
self.embed_dim,
config.decoder_attention_heads,
dropout=config.attention_dropout,
is_decoder=True,
)
self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim)
self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim)
self.final_layer_norm = nn.LayerNorm(self.embed_dim)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
encoder_hidden_states: Optional[torch.Tensor] = None,
encoder_attention_mask: Optional[torch.Tensor] = None,
layer_head_mask: Optional[torch.Tensor] = None,
encoder_layer_head_mask: Optional[torch.Tensor] = None,
past_key_value: Optional[Tuple[torch.Tensor]] = None,
output_attentions: Optional[bool] = False,
use_cache: Optional[bool] = True,
):
"""
Args:
hidden_states (:obj:`torch.FloatTensor`): input to the layer of shape `(seq_len, batch, embed_dim)`
attention_mask (:obj:`torch.FloatTensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
encoder_hidden_states (:obj:`torch.FloatTensor`): cross attention input to the layer of shape `(seq_len, batch, embed_dim)`
encoder_attention_mask (:obj:`torch.FloatTensor`): encoder attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
layer_head_mask (:obj:`torch.FloatTensor`): mask for attention heads in a given layer of size
`(config.encoder_attention_heads,)`.
encoder_layer_head_mask (:obj:`torch.FloatTensor`): mask for encoder attention heads in a given layer of
size `(config.encoder_attention_heads,)`.
past_key_value (:obj:`Tuple(torch.FloatTensor)`): cached past key and value projection states
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under
returned tensors for more detail.
"""
residual = hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
# Self Attention
# decoder uni-directional self-attention cached key/values tuple is at positions 1,2
self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
# add present self-attn cache to positions 1,2 of present_key_value tuple
hidden_states, self_attn_weights, present_key_value = self.self_attn(
hidden_states=hidden_states,
past_key_value=self_attn_past_key_value,
attention_mask=attention_mask,
layer_head_mask=layer_head_mask,
output_attentions=output_attentions,
)
hidden_states = F.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
# Cross-Attention Block
cross_attn_present_key_value = None
cross_attn_weights = None
if encoder_hidden_states is not None:
residual = hidden_states
hidden_states = self.encoder_attn_layer_norm(hidden_states)
# cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple
cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn(
hidden_states=hidden_states,
key_value_states=encoder_hidden_states,
attention_mask=encoder_attention_mask,
layer_head_mask=layer_head_mask,
past_key_value=cross_attn_past_key_value,
output_attentions=output_attentions,
)
hidden_states = F.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
# add cross-attn to positions 3,4 of present_key_value tuple
present_key_value = present_key_value + cross_attn_present_key_value
# Fully Connected
residual = hidden_states
hidden_states = self.final_layer_norm(hidden_states)
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = F.dropout(hidden_states, p=self.activation_dropout, training=self.training)
hidden_states = self.fc2(hidden_states)
hidden_states = F.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
outputs = (hidden_states,)
if output_attentions:
outputs += (self_attn_weights, cross_attn_weights)
if use_cache:
outputs += (present_key_value,)
return outputs
class PegasusPreTrainedModel(PreTrainedModel):
config_class = PegasusConfig
base_model_prefix = "model"
def _init_weights(self, module):
std = self.config.init_std
if isinstance(module, nn.Linear):
module.weight.data.normal_(mean=0.0, std=std)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, PegasusSinusoidalPositionalEmbedding):
pass
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=std)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
@property
def dummy_inputs(self):
pad_token = self.config.pad_token_id
input_ids = torch.tensor([[0, 6, 10, 4, 2], [0, 8, 12, 2, pad_token]], device=self.device)
dummy_inputs = {
"attention_mask": input_ids.ne(pad_token),
"input_ids": input_ids,
"decoder_input_ids": input_ids,
}
return dummy_inputs
PEGASUS_START_DOCSTRING = r"""
This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic
methods the library implements for all its model (such as downloading or saving, resizing the input embeddings,
pruning heads etc.)
This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__
subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to
general usage and behavior.
Parameters:
config (:class:`~transformers.PegasusConfig`):
Model configuration class with all the parameters of the model. Initializing with a config file does not
load the weights associated with the model, only the configuration. Check out the
:meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.
"""
PEGASUS_GENERATION_EXAMPLE = r"""
Summarization example::
>>> from transformers import PegasusTokenizer, PegasusForConditionalGeneration
>>> model = PegasusForConditionalGeneration.from_pretrained('google/pegasus-xsum')
>>> tokenizer = PegasusTokenizer.from_pretrained('google/pegasus-xsum')
>>> ARTICLE_TO_SUMMARIZE = (
... "PG&E stated it scheduled the blackouts in response to forecasts for high winds "
... "amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were "
... "scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow."
... )
>>> inputs = tokenizer([ARTICLE_TO_SUMMARIZE], max_length=1024, return_tensors='pt')
>>> # Generate Summary
>>> summary_ids = model.generate(inputs['input_ids'])
>>> print([tokenizer.decode(g, skip_special_tokens=True, clean_up_tokenization_spaces=False) for g in summary_ids])
"""
PEGASUS_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
it.
Indices can be obtained using :class:`~transformers.PegasusTokenizer`. See
:meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for
details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
decoder_input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, target_sequence_length)`, `optional`):
Indices of decoder input sequence tokens in the vocabulary.
Indices can be obtained using :class:`~transformers.PegasusTokenizer`. See
:meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for
details.
`What are input IDs? <../glossary.html#input-ids>`__
Pegasus uses the :obj:`pad_token_id` as the starting token for :obj:`decoder_input_ids` generation. If
:obj:`past_key_values` is used, optionally only the last :obj:`decoder_input_ids` have to be input (see
:obj:`past_key_values`).
decoder_attention_mask (:obj:`torch.LongTensor` of shape :obj:`(batch_size, target_sequence_length)`, `optional`):
Default behavior: generate a tensor that ignores pad tokens in :obj:`decoder_input_ids`. Causal mask will
also be used by default.
If you want to change padding behavior, you should read :func:`modeling_pegasus._prepare_decoder_inputs`
and modify to your needs. See diagram 1 in `the paper <https://arxiv.org/abs/1910.13461>`__ for more
information on the default strategy.
head_mask (:obj:`torch.Tensor` of shape :obj:`(num_layers, num_heads)`, `optional`):
Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the heas is **masked**.
decoder_head_mask (:obj:`torch.Tensor` of shape :obj:`(num_layers, num_heads)`, `optional`):
Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
encoder_outputs (:obj:`tuple(tuple(torch.FloatTensor)`, `optional`):
Tuple consists of (:obj:`last_hidden_state`, `optional`: :obj:`hidden_states`, `optional`:
:obj:`attentions`) :obj:`last_hidden_state` of shape :obj:`(batch_size, sequence_length, hidden_size)`,
`optional`) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the
cross-attention of the decoder.
past_key_values (:obj:`Tuple[Tuple[torch.Tensor]]` of length :obj:`config.n_layers` with each tuple having 2 tuples each of which has 2 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up decoding.
If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`
(those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
instead of all :obj:`decoder_input_ids`` of shape :obj:`(batch_size, sequence_length)`.
inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert :obj:`input_ids` indices into associated
vectors than the model's internal embedding lookup matrix.
decoder_inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, target_sequence_length, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`decoder_input_ids` you can choose to directly pass an embedded
representation. If :obj:`past_key_values` is used, optionally only the last :obj:`decoder_inputs_embeds`
have to be input (see :obj:`past_key_values`). This is useful if you want more control over how to convert
:obj:`decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix.
If :obj:`decoder_input_ids` and :obj:`decoder_inputs_embeds` are both unset, :obj:`decoder_inputs_embeds`
takes the value of :obj:`inputs_embeds`.
use_cache (:obj:`bool`, `optional`):
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
decoding (see :obj:`past_key_values`).
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned
tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for
more detail.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
"""
class PegasusEncoder(PegasusPreTrainedModel):
"""
Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a
:class:`PegasusEncoderLayer`.
Args:
config: PegasusConfig
embed_tokens (torch.nn.Embedding): output embedding
"""
def __init__(self, config: PegasusConfig, embed_tokens: Optional[nn.Embedding] = None):
super().__init__(config)
self.dropout = config.dropout
self.layerdrop = config.encoder_layerdrop
embed_dim = config.d_model
self.padding_idx = config.pad_token_id
self.max_source_positions = config.max_position_embeddings
self.embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0
if embed_tokens is not None:
self.embed_tokens = embed_tokens
else:
self.embed_tokens = nn.Embedding(config.vocab_size, embed_dim, self.padding_idx)
self.embed_positions = PegasusSinusoidalPositionalEmbedding(
config.max_position_embeddings,
embed_dim,
self.padding_idx,
)
self.layers = nn.ModuleList([PegasusEncoderLayer(config) for _ in range(config.encoder_layers)])
self.layer_norm = nn.LayerNorm(config.d_model)
self.init_weights()
def forward(
self,
input_ids=None,
attention_mask=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
provide it.
Indices can be obtained using :class:`~transformers.PegasusTokenizer`. See
:meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__`
for details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
head_mask (:obj:`torch.Tensor` of shape :obj:`(num_layers, num_heads)`, `optional`):
Mask to nullify selected heads of the attention modules. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the heas is **masked**.
inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded
representation. This is useful if you want more control over how to convert :obj:`input_ids` indices
into associated vectors than the model's internal embedding lookup matrix.
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under
returned tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors
for more detail.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# retrieve input_ids and inputs_embeds
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
# change
try:
embed_pos = self.embed_positions(input_shape)
except:
print("Embed Position Err")
pdb.set_trace()
hidden_states = inputs_embeds + embed_pos
hidden_states = F.dropout(hidden_states, p=self.dropout, training=self.training)
# expand attention_mask
if attention_mask is not None:
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
attention_mask = _expand_mask(attention_mask, inputs_embeds.dtype)
encoder_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
# check if head_mask has a correct number of layers specified if desired
if head_mask is not None:
assert head_mask.size()[0] == (
len(self.layers)
), f"The head_mask should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}."
for idx, encoder_layer in enumerate(self.layers):
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
dropout_probability = random.uniform(0, 1)
if self.training and (dropout_probability < self.layerdrop): # skip the layer
layer_outputs = (None, None)
else:
if getattr(self.config, "gradient_checkpointing", False) and self.training:
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, output_attentions)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(encoder_layer),
hidden_states,
attention_mask,
(head_mask[idx] if head_mask is not None else None),
)
else:
layer_outputs = encoder_layer(
hidden_states,
attention_mask,
layer_head_mask=(head_mask[idx] if head_mask is not None else None),
output_attentions=output_attentions,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
hidden_states = self.layer_norm(hidden_states)
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
return BaseModelOutput(
last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
)
class PegasusDecoder(PegasusPreTrainedModel):
"""
Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a :class:`PegasusDecoderLayer`
Args:
config: PegasusConfig
embed_tokens (torch.nn.Embedding): output embedding
"""
def __init__(self, config: PegasusConfig, embed_tokens: Optional[nn.Embedding] = None):
super().__init__(config)
self.dropout = config.dropout
self.layerdrop = config.decoder_layerdrop
self.padding_idx = config.pad_token_id
self.max_target_positions = config.max_position_embeddings
self.embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0
if embed_tokens is not None:
self.embed_tokens = embed_tokens
else:
self.embed_tokens = nn.Embedding(config.vocab_size, config.d_model, self.padding_idx)
self.embed_positions = PegasusSinusoidalPositionalEmbedding(
config.max_position_embeddings,
config.d_model,
self.padding_idx,
)
self.layers = nn.ModuleList([PegasusDecoderLayer(config) for _ in range(config.decoder_layers)])
self.layer_norm = nn.LayerNorm(config.d_model)
self.init_weights()
# change
self.max_position_embeddings = config.max_position_embeddings
self.d_model = config.d_model
def get_input_embeddings(self):
return self.embed_tokens
def set_input_embeddings(self, value):
self.embed_tokens = value
# Copied from transformers.models.bart.modeling_bart.BartDecoder._prepare_decoder_attention_mask
def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds, past_key_values_length):
# create causal mask
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
combined_attention_mask = None
if input_shape[-1] > 1:
combined_attention_mask = _make_causal_mask(
input_shape, inputs_embeds.dtype, past_key_values_length=past_key_values_length
).to(self.device)
if attention_mask is not None:
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1])
combined_attention_mask = (
expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask
)
return combined_attention_mask
def forward(
self,
input_ids=None,
attention_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
head_mask=None,
encoder_head_mask=None,
past_key_values=None,
inputs_embeds=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
provide it.
Indices can be obtained using :class:`~transformers.PegasusTokenizer`. See
:meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__`
for details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, encoder_sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
of the decoder.
encoder_attention_mask (:obj:`torch.LongTensor` of shape :obj:`(batch_size, encoder_sequence_length)`, `optional`):
Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values
selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
head_mask (:obj:`torch.Tensor` of shape :obj:`(num_layers, num_heads)`, `optional`):
Mask to nullify selected heads of the attention modules. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the heas is **masked**.
encoder_head_mask (:obj:`torch.Tensor` of shape :obj:`(num_layers, num_heads)`, `optional`):
Mask to nullify selected heads of the attention modules in encoder to avoid performing cross-attention
on hidden heads. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the heas is **masked**.
past_key_values (:obj:`Tuple[Tuple[torch.Tensor]]` of length :obj:`config.n_layers` with each tuple having 2 tuples each of which has 2 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up
decoding.
If :obj:`past_key_values` are used, the user can optionally input only the last
:obj:`decoder_input_ids` (those that don't have their past key value states given to this model) of
shape :obj:`(batch_size, 1)` instead of all :obj:`decoder_input_ids`` of shape :obj:`(batch_size,
sequence_length)`.
inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded
representation. This is useful if you want more control over how to convert :obj:`input_ids` indices
into associated vectors than the model's internal embedding lookup matrix.
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under
returned tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors
for more detail.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# retrieve input_ids and inputs_embeds
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
# past_key_values_length
past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
attention_mask = self._prepare_decoder_attention_mask(
attention_mask, input_shape, inputs_embeds, past_key_values_length
)
# expand encoder attention mask
if encoder_hidden_states is not None and encoder_attention_mask is not None:
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
encoder_attention_mask = _expand_mask(encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1])
# embed positions
positions = self.embed_positions(input_shape, past_key_values_length)
hidden_states = inputs_embeds + positions
hidden_states = F.dropout(hidden_states, p=self.dropout, training=self.training)
# decoder layers
all_hidden_states = () if output_hidden_states else None
all_self_attns = () if output_attentions else None
all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None
next_decoder_cache = () if use_cache else None
# check if head_mask has a correct number of layers specified if desired
if head_mask is not None:
assert head_mask.size()[0] == (
len(self.layers)
), f"The head_mask should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}."
for idx, decoder_layer in enumerate(self.layers):
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
if output_hidden_states:
all_hidden_states += (hidden_states,)
dropout_probability = random.uniform(0, 1)
if self.training and (dropout_probability < self.layerdrop):
continue
past_key_value = past_key_values[idx] if past_key_values is not None else None
if getattr(self.config, "gradient_checkpointing", False) and self.training:
if use_cache:
logger.warn(
"`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting "
"`use_cache=False`..."
)
use_cache = False
def create_custom_forward(module):
def custom_forward(*inputs):
# None for past_key_value
return module(*inputs, output_attentions, use_cache)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(decoder_layer),
hidden_states,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
head_mask[idx] if head_mask is not None else None,
encoder_head_mask[idx] if encoder_head_mask is not None else None,
None,
)
else:
layer_outputs = decoder_layer(
hidden_states,
attention_mask=attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
layer_head_mask=(head_mask[idx] if head_mask is not None else None),
encoder_layer_head_mask=(encoder_head_mask[idx] if encoder_head_mask is not None else None),
past_key_value=past_key_value,
output_attentions=output_attentions,
use_cache=use_cache,
)
hidden_states = layer_outputs[0]
if use_cache:
next_decoder_cache += (layer_outputs[3 if output_attentions else 1],)
if output_attentions:
all_self_attns += (layer_outputs[1],)
if encoder_hidden_states is not None:
all_cross_attentions += (layer_outputs[2],)
hidden_states = self.layer_norm(hidden_states)
# add hidden states from the last decoder layer
if output_hidden_states:
all_hidden_states += (hidden_states,)
next_cache = next_decoder_cache if use_cache else None
if not return_dict:
return tuple(
v
for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, all_cross_attentions]
if v is not None
)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=next_cache,
hidden_states=all_hidden_states,
attentions=all_self_attns,
cross_attentions=all_cross_attentions,
)
@add_start_docstrings(
"The bare PEGASUS Model outputting raw hidden-states without any specific head on top.",
PEGASUS_START_DOCSTRING,
)
class PegasusModel(PegasusPreTrainedModel):
def __init__(self, config: PegasusConfig):
super().__init__(config)
padding_idx, vocab_size = config.pad_token_id, config.vocab_size
self.shared = nn.Embedding(vocab_size, config.d_model, padding_idx)
self.encoder = PegasusEncoder(config, self.shared)
self.decoder = PegasusDecoder(config, self.shared)
self.init_weights()
def get_input_embeddings(self):
return self.shared
def set_input_embeddings(self, value):
self.shared = value
self.encoder.embed_tokens = self.shared
self.decoder.embed_tokens = self.shared
def get_encoder(self):
return self.encoder
def get_decoder(self):
return self.decoder
@add_start_docstrings_to_model_forward(PEGASUS_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=Seq2SeqModelOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
decoder_input_ids=None,
decoder_attention_mask=None,
head_mask=None,
decoder_head_mask=None,
encoder_outputs=None,
past_key_values=None,
inputs_embeds=None,
decoder_inputs_embeds=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
Returns:
Example::
>>> from transformers import PegasusTokenizer, PegasusModel
>>> tokenizer = PegasusTokenizer.from_pretrained("google/pegasus-large")
>>> model = PegasusModel.from_pretrained("google/pegasus-large")
>>> input_ids = tokenizer("Studies have been shown that owning a dog is good for you", return_tensors="pt").input_ids # Batch size 1
>>> decoder_input_ids = tokenizer("Studies show that", return_tensors="pt").input_ids # Batch size 1
>>> outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids)
>>> last_hidden_states = outputs.last_hidden_state
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if encoder_outputs is None:
encoder_outputs = self.encoder(
input_ids=input_ids,
attention_mask=attention_mask,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
# If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True
elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):
encoder_outputs = BaseModelOutput(
last_hidden_state=encoder_outputs[0],
hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
)
# decoder outputs consists of (dec_features, past_key_value, dec_hidden, dec_attn)
decoder_outputs = self.decoder(
input_ids=decoder_input_ids,
attention_mask=decoder_attention_mask,
encoder_hidden_states=encoder_outputs[0],
encoder_attention_mask=attention_mask,
head_mask=decoder_head_mask,
encoder_head_mask=head_mask,
past_key_values=past_key_values,
inputs_embeds=decoder_inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
if not return_dict:
return decoder_outputs + encoder_outputs
return Seq2SeqModelOutput(
last_hidden_state=decoder_outputs.last_hidden_state,
past_key_values=decoder_outputs.past_key_values,
decoder_hidden_states=decoder_outputs.hidden_states,
decoder_attentions=decoder_outputs.attentions,
cross_attentions=decoder_outputs.cross_attentions,
encoder_last_hidden_state=encoder_outputs.last_hidden_state,
encoder_hidden_states=encoder_outputs.hidden_states,
encoder_attentions=encoder_outputs.attentions,
)
@add_start_docstrings(
"The PEGASUS Model with a language modeling head. Can be used for summarization.", PEGASUS_START_DOCSTRING
)
class PegasusForConditionalGeneration(PegasusPreTrainedModel):
base_model_prefix = "model"
_keys_to_ignore_on_load_missing = [
r"final_logits_bias",
r"encoder\.version",
r"decoder\.version",
r"lm_head\.weight",
r"embed_positions\.weight",
]
def __init__(self, config: PegasusConfig):
super().__init__(config)
self.model = PegasusModel(config)
self.register_buffer("final_logits_bias", torch.zeros((1, self.model.shared.num_embeddings)))
self.lm_head = nn.Linear(config.d_model, self.model.shared.num_embeddings, bias=False)
self.init_weights()
def get_encoder(self):
return self.model.get_encoder()
def get_decoder(self):
return self.model.get_decoder()
def resize_token_embeddings(self, new_num_tokens: int) -> nn.Embedding:
new_embeddings = super().resize_token_embeddings(new_num_tokens)
self._resize_final_logits_bias(new_num_tokens)
return new_embeddings
def _resize_final_logits_bias(self, new_num_tokens: int) -> None:
old_num_tokens = self.final_logits_bias.shape[-1]
if new_num_tokens <= old_num_tokens:
new_bias = self.final_logits_bias[:, :new_num_tokens]
else:
extra_bias = torch.zeros((1, new_num_tokens - old_num_tokens), device=self.final_logits_bias.device)
new_bias = torch.cat([self.final_logits_bias, extra_bias], dim=1)
self.register_buffer("final_logits_bias", new_bias)
def get_output_embeddings(self):
return self.lm_head
def set_output_embeddings(self, new_embeddings):
self.lm_head = new_embeddings
@add_start_docstrings_to_model_forward(PEGASUS_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)
@add_end_docstrings(PEGASUS_GENERATION_EXAMPLE)
def forward(
self,
input_ids=None,
attention_mask=None,
decoder_input_ids=None,
decoder_attention_mask=None,
head_mask=None,
decoder_head_mask=None,
encoder_outputs=None,
past_key_values=None,
inputs_embeds=None,
decoder_inputs_embeds=None,
labels=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the masked language modeling loss. Indices should either be in ``[0, ...,
config.vocab_size]`` or -100 (see ``input_ids`` docstring). Tokens with indices set to ``-100`` are ignored
(masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``.
Returns:
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if labels is not None:
if decoder_input_ids is None:
decoder_input_ids = shift_tokens_right(
labels, self.config.pad_token_id, self.config.decoder_start_token_id
)
outputs = self.model(
input_ids,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids,
encoder_outputs=encoder_outputs,
decoder_attention_mask=decoder_attention_mask,
head_mask=head_mask,
decoder_head_mask=decoder_head_mask,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
decoder_inputs_embeds=decoder_inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
lm_logits = self.lm_head(outputs[0]) + self.final_logits_bias
masked_lm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (lm_logits,) + outputs[1:]
return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
return Seq2SeqLMOutput(
loss=masked_lm_loss,
logits=lm_logits,
past_key_values=outputs.past_key_values,
decoder_hidden_states=outputs.decoder_hidden_states,
decoder_attentions=outputs.decoder_attentions,
cross_attentions=outputs.cross_attentions,
encoder_last_hidden_state=outputs.encoder_last_hidden_state,
encoder_hidden_states=outputs.encoder_hidden_states,
encoder_attentions=outputs.encoder_attentions,
)
def prepare_inputs_for_generation(
self,
decoder_input_ids,
past=None,
attention_mask=None,
head_mask=None,
use_cache=None,
encoder_outputs=None,
**kwargs
):
# cut decoder_input_ids if past is used
if past is not None:
decoder_input_ids = decoder_input_ids[:, -1:]
return {
"input_ids": None, # encoder_outputs is defined. input_ids not needed
"encoder_outputs": encoder_outputs,
"past_key_values": past,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"head_mask": head_mask,
"use_cache": use_cache, # change this to avoid caching (presumably for debugging)
}
def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor):
return shift_tokens_right(labels, self.config.pad_token_id, self.config.decoder_start_token_id)
@staticmethod
def _reorder_cache(past, beam_idx):
reordered_past = ()
for layer_past in past:
# cached cross_attention states don't have to be reordered -> they are always the same
reordered_past += (
tuple(past_state.index_select(0, beam_idx) for past_state in layer_past[:2]) + layer_past[2:],
)
return reordered_past
# Copied from transformers.models.bart.modeling_bart.BartDecoderWrapper with Bart->Pegasus
class PegasusDecoderWrapper(PegasusPreTrainedModel):
"""
This wrapper class is a helper class to correctly load pretrained checkpoints when the causal language model is
used in combination with the :class:`~transformers.EncoderDecoderModel` framework.
"""
def __init__(self, config):
super().__init__(config)
self.decoder = PegasusDecoder(config)
def forward(self, *args, **kwargs):
return self.decoder(*args, **kwargs)
# Copied from transformers.models.bart.modeling_bart.BartForCausalLM with Bart->Pegasus
class PegasusForCausalLM(PegasusPreTrainedModel):
def __init__(self, config):
super().__init__(config)
config = copy.deepcopy(config)
config.is_decoder = True
config.is_encoder_decoder = False
self.model = PegasusDecoderWrapper(config)
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.init_weights()
def get_input_embeddings(self):
return self.model.decoder.embed_tokens
def set_input_embeddings(self, value):
self.model.decoder.embed_tokens = value
def get_output_embeddings(self):
return self.lm_head
def set_output_embeddings(self, new_embeddings):
self.lm_head = new_embeddings
def set_decoder(self, decoder):
self.model.decoder = decoder
def get_decoder(self):
return self.model.decoder
@replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
head_mask=None,
encoder_head_mask=None,
past_key_values=None,
inputs_embeds=None,
labels=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
provide it.
Indices can be obtained using :class:`~transformers.PegasusTokenizer`. See
:meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__`
for details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
if the model is configured as a decoder.
encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used
in the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:
head_mask (:obj:`torch.Tensor` of shape :obj:`(num_layers, num_heads)`, `optional`):
Mask to nullify selected heads of the attention modules. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the heas is **masked**.
encoder_head_mask (:obj:`torch.Tensor` of shape :obj:`(num_layers, num_heads)`, `optional`):
Mask to nullify selected heads of the attention modules in encoder to avoid performing cross-attention
on hidden heads. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the heas is **masked**.
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up
decoding.
If :obj:`past_key_values` are used, the user can optionally input only the last ``decoder_input_ids``
(those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
instead of all ``decoder_input_ids`` of shape :obj:`(batch_size, sequence_length)`.
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the masked language modeling loss. Indices should either be in ``[0, ...,
config.vocab_size]`` or -100 (see ``input_ids`` docstring). Tokens with indices set to ``-100`` are
ignored (masked), the loss is only computed for the tokens with labels in ``[0, ...,
config.vocab_size]``.
use_cache (:obj:`bool`, `optional`):
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
decoding (see :obj:`past_key_values`).
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under
returned tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors
for more detail.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
Returns:
Example::
>>> from transformers import PegasusTokenizer, PegasusForCausalLM
>>> tokenizer = PegasusTokenizer.from_pretrained('facebook/bart-large')
>>> model = PegasusForCausalLM.from_pretrained('facebook/bart-large', add_cross_attention=False)
>>> assert model.config.is_decoder, f"{model.__class__} has to be configured as a decoder."
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> last_hidden_states = outputs.last_hidden_state
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
outputs = self.model.decoder(
input_ids=input_ids,
attention_mask=attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
head_mask=head_mask,
encoder_head_mask=encoder_head_mask,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
logits = self.lm_head(outputs[0])
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[1:]
return (loss,) + output if loss is not None else output
return CausalLMOutputWithCrossAttentions(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
cross_attentions=outputs.cross_attentions,
)
def prepare_inputs_for_generation(self, input_ids, past=None, attention_mask=None, use_cache=None, **kwargs):
# if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
if attention_mask is None:
attention_mask = input_ids.new_ones(input_ids.shape)
if past:
input_ids = input_ids[:, -1:]
# first step, decoder_cached_states are empty
return {
"input_ids": input_ids, # encoder_outputs is defined. input_ids not needed
"attention_mask": attention_mask,
"past_key_values": past,
"use_cache": use_cache,
}
@staticmethod
def _reorder_cache(past, beam_idx):
reordered_past = ()
for layer_past in past:
reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),)
return reordered_past
| [
"torch.nn.Linear",
"torch.cat",
"torch.isnan",
"torch.finfo",
"torch.bmm",
"torch.nn.CrossEntropyLoss",
"torch.nn.LayerNorm",
"torch.tensor",
"torch.zeros",
"torch.nn.functional.dropout",
"torch.clamp",
"torch.isinf",
"torch.nn.functional.softmax",
"torch.arange",
"torch.no_grad",
"torch.nn.Embedding"
] | 1.0 | Nativeatom/transformers | 8df267588e1ce30b8270eeaefbcc9011d6e06b12 |
1.8 | import pytorch_lightning as pl
import torch
from pytorch_lightning import loggers as pl_loggers
# from pytorch_lightning.callbacks import ModelCheckpoint
from data.RACEDataModule import RACEDataModule
from model.BertForRace import BertForRace
if __name__ == '__main__':
tb_logger = pl_loggers.TensorBoardLogger('./result/asc01/')
pl.seed_everything(42)
model = BertForRace(
pretrained_model='./model/bert-large-uncased',
learning_rate=2e-5,
num_train_epochs=20,
train_batch_size=32,
train_all=True,
use_bert_adam=True,
)
dm = RACEDataModule(
model_name_or_path='./model/bert-large-uncased',
datasets_loader='./data/RACELocalLoader.py',
train_batch_size=32,
max_seq_length=128,
num_workers=8,
num_preprocess_processes=96,
use_sentence_selection=True,
best_k_sentences=5,
)
# checkpoint_callback = ModelCheckpoint(
# dirpath='./result/checkpoints/',
# filename='epoch{epoch:02d}',
# save_top_k=-1,
# )
trainer = pl.Trainer(
logger=tb_logger,
gpus=-1 if torch.cuda.is_available() else None,
# callbacks=[checkpoint_callback],
amp_backend='native',
amp_level='O2',
precision=16,
accelerator='ddp',
gradient_clip_val=1.0,
max_epochs=1,
plugins='ddp_sharded',
val_check_interval=0.2,
# limit_train_batches=0.1,
# limit_val_batches=0.1,
# accumulate_grad_batches=2,
)
trainer.fit(model, dm)
torch.save(model.model.state_dict(), 'pytorch_model.bin')
trainer.test(datamodule=dm)
| [
"torch.cuda.is_available"
] | 1.8.0 | iamNCJ/bert-race-pytorch-lightning | 93abcc5d5c80790e16114fb021870593cb60f1f2 |
1.0 | import time
from threading import Thread,Event
import numpy as np
import torch
import time
from espnet.asr.pytorch_backend.asr_init import load_trained_model
import torchaudio
import argparse
import pickle
from espnet.nets.pytorch_backend.nets_utils import pad_list
from online_inference import online_inference
def load_model(model_src, parser):
model, train_args = load_trained_model(model_src)
is_sync = 'sync' in model_src
online_model = online_inference(model, train_args, parser, is_sync)
return online_model
def apply_cmvn(mat, stats):
mean = stats[0, :-1] / stats[0, -1]
variance = stats[1, :-1] / stats[0, -1] - np.square(mean)
return np.divide(np.subtract(mat, mean), np.sqrt(variance))
def normalization(feature):
feature = torch.as_tensor(feature)
std, mean = torch.std_mean(feature, dim=0)
return (feature - mean) / std
def convert_fbank(wav_src):
wavform, sample_frequency = torchaudio.load_wav(wav_src)
feature = torchaudio.compliance.kaldi.fbank(wavform*32768, num_mel_bins=80, sample_frequency=sample_frequency, dither=1)
return torch.as_tensor(feature)
def get_parser():
parser = argparse.ArgumentParser()
parser.ctc_weight = 0.0
parser.beam_size = 5
parser.penalty = 0.0
parser.maxlenratio= 0.0
parser.minlenratio= 0.0
parser.nbest = 1
torch.manual_seed(1)
torch.cuda.manual_seed_all(1)
return parser
def combine_all_speech(list_src):
if len(list_src)==0: return
all_feature = None
for l in list_src:
all_feature = torch.cat((all_feature,convert_fbank(l)),dim=0) if all_feature is not None else convert_fbank(l)
return all_feature
if __name__ == "__main__":
parser = get_parser()
model_src_nopitch = "model/model.last5.avg.best35"
no_pitch = load_model(model_src_nopitch, parser)
all_feature = combine_all_speech(["demo/demo%03d.wav" % (i) for i in range(1,6)])
std, mean = torch.std_mean(all_feature, dim=0)
# std, mean = torch.std_mean(all_feature, dim=0)
wavform, sample_frequency = torchaudio.load_wav("demo/demo002.wav")
#0.015*16000=240
no_pitch.setup()
feature_all = None
# feature_all = torchaudio.compliance.kaldi.fbank(wavform[:,0:(36*160+240)]*32768, num_mel_bins=80, sample_frequency=sample_frequency, dither=1)
for i in range(0,14):
feature_x = torchaudio.compliance.kaldi.fbank(wavform[:,36*i*160:36*160*(i+1)+3*160+240]*32768, num_mel_bins=80, sample_frequency=sample_frequency, dither=1)
feature_all = torch.cat((feature_all,feature_x),dim=0) if feature_all is not None else feature_x
feature_x = (feature_x-mean)/std
no_pitch.get_inference_wav(feature_x)
print("".join(no_pitch.text_l), sum(no_pitch.time_l))
demo_no_pitch = (feature_all-mean)/std
no_pitch.test_recognize_speed(demo_no_pitch)
pass
| [
"torch.cat",
"torch.cuda.manual_seed_all",
"torch.manual_seed",
"torch.std_mean",
"torch.as_tensor"
] | 1.0.1 | NCTUMLlab/Chi-Hang-Leong-Online_Compressive_Transformer_for_Speech_Recognition | 3f159ba9cf1ca9baabf0782d8acef4bb7332d8b9 |
1.8 | # !/usr/bin/env python
# coding=UTF-8
"""
@Author: WEN Hao
@LastEditors: WEN Hao
@Description:
@Date: 2021-11-19
@LastEditTime: 2022-04-01
预置模型,roberta_chinanews
"""
import os
from typing import NoReturn, Optional, Union, Sequence, List
import torch
import numpy as np
import transformers
from ..hf_model import HuggingFaceNLPVictimModel
from utils.misc import nlp_cache_dir
from utils._download_data import download_if_needed
__all__ = [
"VictimRoBERTaIFeng",
]
class VictimRoBERTaIFeng(HuggingFaceNLPVictimModel):
"""
5分类模型,中文
"""
__name__ = "VictimRoBERTaIFeng"
def __init__(self, path: Optional[str] = None) -> NoReturn:
""" """
self._path = path or os.path.join(
nlp_cache_dir, "roberta-base-finetuned-ifeng-chinese"
)
if not os.path.exists(self._path):
# raise ValueError("暂不支持在线下载模型")
download_if_needed(
uri="roberta-base-finetuned-ifeng-chinese",
source="aitesting",
dst_dir=nlp_cache_dir,
extract=True,
)
model = transformers.AutoModelForSequenceClassification.from_pretrained(
self._path
)
tokenizer = transformers.AutoTokenizer.from_pretrained(self._path)
tokenizer.convert_id_to_word = tokenizer._convert_id_to_token
super().__init__(model, tokenizer)
self._pipeline = transformers.pipeline(
"sentiment-analysis", model=model, tokenizer=tokenizer
)
@torch.no_grad()
def predict(
self, sentences: Union[str, Sequence[str]], to_label: bool = False
) -> Union[int, List[int]]:
""" """
if isinstance(sentences, str):
single_prediction = True
pred = self([sentences])
else:
single_prediction = False
pred = self(sentences)
pred = np.argmax(pred.detach().cpu().numpy(), axis=1).tolist()
if to_label:
pred = [self.id2label[i] for i in pred]
if single_prediction:
pred = pred[0]
return pred
@property
def path(self) -> str:
return self._path
def extra_repr_keys(self) -> List[str]:
return ["path"]
| [
"torch.no_grad"
] | 1.8.1 | Yzx835/AISafety | eb09551814898c7f6d86641b47faf7845c948640 |
1.5 | from machin.frame.algorithms import DQN
from machin.utils.logging import default_logger as logger
from machin.model.nets import static_module_wrapper, dynamic_module_wrapper
import torch as t
import torch.nn as nn
import gym
# configurations
env = gym.make("CartPole-v0")
observe_dim = 4
action_num = 2
max_episodes = 1000
max_steps = 200
solved_reward = 190
solved_repeat = 5
# model definition
class QNet(nn.Module):
def __init__(self, state_dim, action_num):
super(QNet, self).__init__()
self.fc1 = nn.Linear(state_dim, 16)
self.fc2 = nn.Linear(16, 16)
self.fc3 = nn.Linear(16, action_num)
def forward(self, some_state):
a = t.relu(self.fc1(some_state))
a = t.relu(self.fc2(a))
return self.fc3(a)
if __name__ == "__main__":
# let framework determine input/output device based on parameter location
# a warning will be thrown.
q_net = QNet(observe_dim, action_num)
q_net_t = QNet(observe_dim, action_num)
# to mark the input/output device Manually
# will not work if you move your model to other devices
# after wrapping
# q_net = static_module_wrapper(q_net, "cpu", "cpu")
# q_net_t = static_module_wrapper(q_net_t, "cpu", "cpu")
# to mark the input/output device Automatically
# will not work if you model locates on multiple devices
# q_net = dynamic_module_wrapper(q_net)
# q_net_t = dynamic_module_wrapper(q_net_t)
dqn = DQN(q_net, q_net_t,
t.optim.Adam,
nn.MSELoss(reduction='sum'))
episode, step, reward_fulfilled = 0, 0, 0
smoothed_total_reward = 0
while episode < max_episodes:
episode += 1
total_reward = 0
terminal = False
step = 0
state = t.tensor(env.reset(), dtype=t.float32).view(1, observe_dim)
while not terminal and step <= max_steps:
step += 1
with t.no_grad():
old_state = state
# agent model inference
action = dqn.act_discrete_with_noise(
{"some_state": old_state}
)
state, reward, terminal, _ = env.step(action.item())
state = t.tensor(state, dtype=t.float32).view(1, observe_dim)
total_reward += reward
dqn.store_transition({
"state": {"some_state": old_state},
"action": {"action": action},
"next_state": {"some_state": state},
"reward": reward,
"terminal": terminal or step == max_steps
})
# update, update more if episode is longer, else less
if episode > 100:
for _ in range(step):
dqn.update()
# show reward
smoothed_total_reward = (smoothed_total_reward * 0.9 +
total_reward * 0.1)
logger.info("Episode {} total reward={:.2f}"
.format(episode, smoothed_total_reward))
if smoothed_total_reward > solved_reward:
reward_fulfilled += 1
if reward_fulfilled >= solved_repeat:
logger.info("Environment solved!")
exit(0)
else:
reward_fulfilled = 0
| [
"torch.nn.Linear",
"torch.no_grad",
"torch.tensor",
"torch.nn.MSELoss"
] | 1.5.0 | lethaiq/machin | 7873cada457328952310394afeedcad4bb6a7c4a |
1.3 | # https://github.com/NVIDIA/apex/blob/master/examples/imagenet/main_amp.py
import csv
import argparse
import os
import random
import shutil
import time
import warnings
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.utils.data
from torch.utils.data import Dataset
import torch.utils.data.distributed
from tqdm import tqdm
from apex import amp
from apex.parallel import DistributedDataParallel
# ATTENTION: 这个没有验证 因为pip安装apex出了问题
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
parser.add_argument('-b',
'--batch-size',
default=6400,
type=int,
metavar='N',
help='mini-batch size (default: 6400), this is the total '
'batch size of all GPUs on the current node when '
'using Data Parallel or Distributed Data Parallel')
parser.add_argument('--lr',
'--learning-rate',
default=0.1,
type=float,
metavar='LR',
help='initial learning rate',
dest='lr')
parser.add_argument('--local_rank', default=-1, type=int,
help='node rank for distributed training')
def main():
args = parser.parse_args()
main_worker(args.local_rank, 2, args)
def main_worker(gpu, ngpus_per_node, args):
dist.init_process_group(backend='nccl')
torch.cuda.set_device(gpu)
model = MyModel()
model.cuda()
args.batch_size = int(args.batch_size / ngpus_per_node)
criterion = nn.CrossEntropyLoss().cuda()
optimizer = torch.optim.SGD(model.parameters(), args.lr)
model, optimizer = amp.initialize(model,
optimizer)
model = DistributedDataParallel(model)
cudnn.benchmark = True
train_dataset = MyDataset()
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset,
num_replicas=hvd.size(),
rank=hvd.rank())
train_loader = torch.utils.data.DataLoader(train_dataset,
batch_size=args.batch_size,
shuffle=(train_sampler is None),
num_workers=2,
pin_memory=True,
sampler=train_sampler)
train_loader2 = torch.utils.data.DataLoader(train_dataset,
batch_size=args.batch_size,
shuffle=True,
num_workers=2,
pin_memory=True)
for epoch in range(5):
train_sampler.set_epoch(epoch)
model.train()
for i, (data, label) in enumerate(train_loader):
data = data.cuda(non_blocking=True)
label = label.cuda(non_blocking=True)
output = model(data)
loss = criterion(output, label)
# print('epoch', epoch, 'gpu', gpu)
# params = list(model.named_parameters())
# for i in range(len(params)):
# (name, param) = params[i]
# print(name)
# print(param.grad)
print('epoch', epoch, 'iter', i, 'gpu', gpu)
print(data)
optimizer.zero_grad()
loss.backward()
optimizer.step()
# 5个epoch 2个gpu 不加控制这个会写10次哦
# 如果不像每个gpu都做 那么就
# if gpu == 0:
# # with open('./hehe.txt', 'a') as f:
# # f.write(str(gpu)+'\n')
# time.sleep(5)
class MyModel(nn.Module):
def __init__(self):
super(MyModel, self).__init__()
self.net1 = nn.Linear(10, 10)
self.relu = nn.ReLU()
self.net2 = nn.Linear(10, 5)
def forward(self, x):
# print(x.size())
# print(x)
return self.net2(self.relu(self.net1(x)))
class MyDataset(Dataset):
def __init__(self):
super().__init__()
self.data = torch.randn(10,10)
self.data[:,0] = torch.arange(10)
self.labels = torch.ones(10).long()
def __getitem__(self, index):
return (self.data[index], self.labels[index])
def __len__(self):
return 10
if __name__ == '__main__':
main() | [
"torch.nn.Linear",
"torch.arange",
"torch.distributed.init_process_group",
"torch.nn.CrossEntropyLoss",
"torch.ones",
"torch.nn.ReLU",
"torch.cuda.set_device",
"torch.utils.data.DataLoader",
"torch.randn"
] | 1.3.0 | RunxinXu/pytorch-distributed | be1e34fee968142dc4e9b5adaf8a14b34943388d |
1.6 | r"""
Ensemble classification with Forest and Qiskit devices
=======================================================
.. meta::
:property="og:description": We demonstrate how two QPUs can be
combined in parallel to help solve a machine learning classification problem,
using PyTorch and PennyLane.
:property="og:image": https://pennylane.ai/qml/_images/ensemble_diagram.png
This tutorial outlines how two QPUs can be combined in parallel to help solve a machine learning
classification problem.
We use the ``forest.qvm`` device to simulate one QPU and the ``qiskit.aer`` device to
simulate another. Each QPU makes an independent prediction, and an ensemble model is
formed by choosing the prediction of the most confident QPU. The iris dataset is used in this
tutorial, consisting of three classes of iris flower. Using a pre-trained model and the PyTorch
interface, we'll see that ensembling allows the QPUs to specialize towards
different classes.
Let's begin by importing the prerequisite libraries:
"""
from collections import Counter
import matplotlib.pyplot as plt
import numpy as np
import pennylane as qml
import sklearn.datasets
import sklearn.decomposition
import torch
from matplotlib.lines import Line2D
from matplotlib.patches import Patch
##############################################################################
# This tutorial requires the ``pennylane-forest`` and ``pennylane-qiskit`` packages, which can be
# installed by following the instructions `here <https://pennylane.ai/install.html>`__. We also
# make use of the `PyTorch interface <https://pennylane.readthedocs.io/en/stable/introduction
# /interfaces.html>`_, which can be installed from `here
# <https://pytorch.org/get-started/locally/>`__.
#
# Load data
# ---------
#
# The next step is to load the iris dataset.
n_features = 2
n_classes = 3
n_samples = 150
data = sklearn.datasets.load_iris()
x = data["data"]
y = data["target"]
##############################################################################
# We shuffle the data and then embed the four features into a two-dimensional space for ease of
# plotting later on. The first two principal components of the data are used.
np.random.seed(1967)
x, y = zip(*np.random.permutation(list(zip(x, y))))
pca = sklearn.decomposition.PCA(n_components=n_features)
pca.fit(x)
x = pca.transform(x)
##############################################################################
# We will be encoding these two features into quantum circuits using :class:`~.pennylane.RX`
# rotations, and hence renormalize our features to be between :math:`[-\pi, \pi]`.
x_min = np.min(x, axis=0)
x_max = np.max(x, axis=0)
x = 2 * np.pi * (x - x_min) / (x_max - x_min) - np.pi
##############################################################################
# The data is split between a training and a test set. This tutorial uses a model that is
# pre-trained on the training set.
split = 125
x_train = x[:split]
x_test = x[split:]
y_train = y[:split]
y_test = y[split:]
##############################################################################
# Finally, let's take a quick look at our data:
colours = ["#ec6f86", "#4573e7", "#ad61ed"]
def plot_points(x_train, y_train, x_test, y_test):
c_train = []
c_test = []
for y in y_train:
c_train.append(colours[y])
for y in y_test:
c_test.append(colours[y])
plt.scatter(x_train[:, 0], x_train[:, 1], c=c_train)
plt.scatter(x_test[:, 0], x_test[:, 1], c=c_test, marker="x")
plt.xlabel("Feature 1", fontsize=16)
plt.ylabel("Feature 2", fontsize=16)
ax = plt.gca()
ax.set_aspect(1)
c_transparent = "#00000000"
custom_lines = [
Patch(facecolor=colours[0], edgecolor=c_transparent, label="Class 0"),
Patch(facecolor=colours[1], edgecolor=c_transparent, label="Class 1"),
Patch(facecolor=colours[2], edgecolor=c_transparent, label="Class 2"),
Line2D([0], [0], marker="o", color=c_transparent, label="Train",
markerfacecolor="black", markersize=10),
Line2D([0], [0], marker="x", color=c_transparent, label="Test",
markerfacecolor="black", markersize=10),
]
ax.legend(handles=custom_lines, bbox_to_anchor=(1.0, 0.75))
plot_points(x_train, y_train, x_test, y_test)
##############################################################################
# This plot shows us that class 0 points can be nicely separated, but that there is an overlap
# between points from classes 1 and 2.
#
# Define model
# ------------
#
# Our model is summarized in the figure below. We use two 4-qubit devices: ``4q-qvm``
# from the PennyLane-Forest plugin and ``qiskit.aer`` from the PennyLane-Qiskit plugin.
#
# Data is input using :class:`~.pennylane.RX` rotations and then a different circuit is enacted
# for each device with a unique set of trainable parameters. The output of both circuits is a
# :class:`~.pennylane.PauliZ` measurement on three of the qubits. This is then fed through a
# softmax function, resulting in two 3-dimensional probability vectors corresponding to the 3
# classes.
#
# Finally, the ensemble model chooses the QPU which is most confident about its prediction
# (i.e., the class with the highest overall probability over all QPUs) and uses that to make a
# prediction.
#
# .. figure:: /demonstrations/ensemble_multi_qpu/ensemble_diagram.png
# :width: 50%
# :align: center
#
# Quantum nodes
# ^^^^^^^^^^^^^
#
# We begin by defining the two quantum devices and the circuits to be run on them.
n_wires = 4
dev0 = qml.device("forest.qvm", device="4q-qvm")
dev1 = qml.device("qiskit.aer", wires=4)
devs = [dev0, dev1]
##############################################################################
# .. note::
# If you have access to Rigetti hardware, you can swap out ``forest.qvm`` for ``forest.qpu``
# and specify the hardware device to run on. Users with access to the IBM Q Experience can
# swap ``qiskit.aer`` for ``qiskit.ibmq`` and specify their chosen backend (see `here
# <https://pennylane-qiskit.readthedocs.io/en/latest/gettingstarted.html#ibm-q-experience>`__).
#
# .. warning::
# Rigetti's QVM and Quil Compiler services must be running for this tutorial to execute. They
# can be installed by consulting the `Rigetti documentation
# <http://docs.rigetti.com/en/stable/>`__ or, for users with Docker, by running:
#
# .. code-block:: bash
#
# docker run -d -p 5555:5555 rigetti/quilc -R -p 5555
# docker run -d -p 5000:5000 rigetti/qvm -S -p 5000
#
# The circuits for both QPUs are shown in the figure below:
#
# .. figure:: /demonstrations/ensemble_multi_qpu/diagram_circuits.png
# :width: 50%
# :align: center
def circuit0(params, x=None):
for i in range(n_wires):
qml.RX(x[i % n_features], wires=i)
qml.Rot(*params[1, 0, i], wires=i)
qml.CZ(wires=[1, 0])
qml.CZ(wires=[1, 2])
qml.CZ(wires=[3, 0])
for i in range(n_wires):
qml.Rot(*params[1, 1, i], wires=i)
return qml.expval(qml.PauliZ(0)), qml.expval(qml.PauliZ(1)), qml.expval(qml.PauliZ(2))
def circuit1(params, x=None):
for i in range(n_wires):
qml.RX(x[i % n_features], wires=i)
qml.Rot(*params[0, 0, i], wires=i)
qml.CZ(wires=[0, 1])
qml.CZ(wires=[1, 2])
qml.CZ(wires=[1, 3])
for i in range(n_wires):
qml.Rot(*params[0, 1, i], wires=i)
return qml.expval(qml.PauliZ(0)), qml.expval(qml.PauliZ(1)), qml.expval(qml.PauliZ(2))
##############################################################################
# We finally combine the two devices into a :class:`~.pennylane.QNodeCollection` that uses the
# PyTorch interface:
qnodes = qml.QNodeCollection(
[qml.QNode(circuit0, dev0, interface="torch"),
qml.QNode(circuit1, dev1, interface="torch")]
)
##############################################################################
# Postprocessing into a prediction
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# The ``predict_point`` function below allows us to find the ensemble prediction, as well as keeping
# track of the individual predictions from each QPU.
#
# We include a ``parallel`` keyword argument for evaluating the :class:`~.pennylane.QNodeCollection`
# in a parallel asynchronous manner. This feature requires the ``dask`` library, which can be
# installed using ``pip install "dask[delayed]"``. When ``parallel=True``, we are able to make
# predictions faster because we do not need to wait for one QPU to output before running on the
# other.
def decision(softmax):
return int(torch.argmax(softmax))
def predict_point(params, x_point=None, parallel=True):
results = qnodes(params, x=x_point, parallel=parallel)
softmax = torch.nn.functional.softmax(results, dim=1)
choice = torch.where(softmax == torch.max(softmax))[0][0]
chosen_softmax = softmax[choice]
return decision(chosen_softmax), decision(softmax[0]), decision(softmax[1]), int(choice)
##############################################################################
# Next, let's define a function to make a predictions over multiple data points.
def predict(params, x=None, parallel=True):
predictions_ensemble = []
predictions_0 = []
predictions_1 = []
choices = []
for i, x_point in enumerate(x):
if i % 10 == 0 and i > 0:
print("Completed up to iteration {}".format(i))
results = predict_point(params, x_point=x_point, parallel=parallel)
predictions_ensemble.append(results[0])
predictions_0.append(results[1])
predictions_1.append(results[2])
choices.append(results[3])
return predictions_ensemble, predictions_0, predictions_1, choices
##############################################################################
# Make predictions
# ----------------
#
# To test our model, we first load a pre-trained set of parameters which can also be downloaded
# by clicking :download:`here <../demonstrations/ensemble_multi_qpu/params.npy>`.
params = np.load("ensemble_multi_qpu/params.npy")
##############################################################################
# We can then make predictions for the training and test datasets.
print("Predicting on training dataset")
p_train, p_train_0, p_train_1, choices_train = predict(params, x=x_train)
print("Predicting on test dataset")
p_test, p_test_0, p_test_1, choices_test = predict(params, x=x_test)
##############################################################################
# Analyze performance
# -------------------
#
# The last thing to do is test how well the model performs. We begin by looking at the accuracy.
#
# Accuracy
# ^^^^^^^^
def accuracy(predictions, actuals):
count = 0
for i in range(len(predictions)):
if predictions[i] == actuals[i]:
count += 1
accuracy = count / (len(predictions))
return accuracy
##############################################################################
print("Training accuracy (ensemble): {}".format(accuracy(p_train, y_train)))
print("Training accuracy (QPU0): {}".format(accuracy(p_train_0, y_train)))
print("Training accuracy (QPU1): {}".format(accuracy(p_train_1, y_train)))
##############################################################################
print("Test accuracy (ensemble): {}".format(accuracy(p_test, y_test)))
print("Test accuracy (QPU0): {}".format(accuracy(p_test_0, y_test)))
print("Test accuracy (QPU1): {}".format(accuracy(p_test_1, y_test)))
##############################################################################
# These numbers tell us a few things:
#
# - On both training and test datasets, the ensemble model outperforms the predictions from each
# QPU. This provides a nice example of how QPUs can be used in parallel to gain a performance
# advantage.
#
# - The accuracy of QPU0 is much higher than the accuracy of QPU1. This does not mean that one
# device is intrinsically better than the other. In fact, another set of parameters can lead to
# QPU1 becoming more accurate. We will see in the next section that the difference in accuracy
# is due to specialization of each QPU, which leads to overall better performance of the
# ensemble model.
#
# - The test accuracy is lower than the training accuracy. Here our focus is on analyzing the
# performance of the ensemble model, rather than minimizing the generalization error.
#
# Choice of QPU
# ^^^^^^^^^^^^^
#
# Is there a link between the class of a datapoint and the QPU chosen to make the prediction in
# the ensemble model? Let's investigate.
# Combine choices_train and choices_test to simplify analysis
choices = np.append(choices_train, choices_test)
print("Choices: {}".format(choices))
print("Choices counts: {}".format(Counter(choices)))
##############################################################################
# The following lines keep track of choices and corresponding predictions in the ensemble model.
predictions = np.append(p_train, p_test)
choice_vs_prediction = np.array([(choices[i], predictions[i]) for i in range(n_samples)])
##############################################################################
# We can hence find the predictions each QPU was responsible for.
choices_vs_prediction_0 = choice_vs_prediction[choice_vs_prediction[:, 0] == 0]
choices_vs_prediction_1 = choice_vs_prediction[choice_vs_prediction[:, 0] == 1]
predictions_0 = choices_vs_prediction_0[:, 1]
predictions_1 = choices_vs_prediction_1[:, 1]
expl = "When QPU{} was chosen by the ensemble, it made the following distribution of " \
"predictions:\n{}"
print(expl.format("0", Counter(predictions_0)))
print("\n" + expl.format("1", Counter(predictions_1)))
print("\nDistribution of classes in iris dataset: {}".format(Counter(y)))
##############################################################################
# These results show us that QPU0 specializes to making predictions on classes 0 and 2,
# while QPU1 specializes to class 1.
#
# Visualization
# ^^^^^^^^^^^^^
#
# We conclude by visualizing the correct and incorrect predictions on the dataset. The following
# function plots correctly predicted points in green and incorrectly predicted points in red.
colours_prediction = {"correct": "#83b5b9", "incorrect": "#f98d91"}
markers = ["o", "v", "d"]
def plot_points_prediction(x, y, p, title):
c = {0: [], 1: [], 2: []}
x_ = {0: [], 1: [], 2: []}
for i in range(n_samples):
x_[y[i]].append(x[i])
if p[i] == y[i]:
c[y[i]].append(colours_prediction["correct"])
else:
c[y[i]].append(colours_prediction["incorrect"])
for i in range(n_classes):
x_class = np.array(x_[i])
plt.scatter(x_class[:, 0], x_class[:, 1], c=c[i], marker=markers[i])
plt.xlabel("Feature 1", fontsize=16)
plt.ylabel("Feature 2", fontsize=16)
plt.title("Predictions from {} model".format(title))
ax = plt.gca()
ax.set_aspect(1)
c_transparent = "#00000000"
custom_lines = [
Patch(
facecolor=colours_prediction["correct"],
edgecolor=c_transparent, label="Correct"
),
Patch(
facecolor=colours_prediction["incorrect"],
edgecolor=c_transparent, label="Incorrect"
),
Line2D([0], [0], marker=markers[0], color=c_transparent, label="Class 0",
markerfacecolor="black", markersize=10),
Line2D([0], [0], marker=markers[1], color=c_transparent, label="Class 1",
markerfacecolor="black", markersize=10),
Line2D([0], [0], marker=markers[2], color=c_transparent, label="Class 2",
markerfacecolor="black", markersize=10),
]
ax.legend(handles=custom_lines, bbox_to_anchor=(1.0, 0.75))
##############################################################################
# We can again compare the ensemble model with the individual models from each QPU.
plot_points_prediction(x, y, predictions, "ensemble") # ensemble
##############################################################################
plot_points_prediction(x, y, np.append(p_train_0, p_test_0), "QPU0") # QPU 0
##############################################################################
plot_points_prediction(x, y, np.append(p_train_1, p_test_1), "QPU1") # QPU 1
##############################################################################
# These plots reinforce the specialization of the two QPUs. QPU1 concentrates on doing a good job
# at predicting class 1, while QPU0 is focused on classes 0 and 2. By combining together,
# the resultant ensemble performs better.
#
# This tutorial shows how QPUs can work in parallel to realize a performance advantage. Check out
# our :doc:`tutorial_vqe_parallel` tutorial to see how multiple QPUs can be
# evaluated asynchronously to speed up calculating the potential energy surface of molecular
# hydrogen!
| [
"torch.nn.functional.softmax",
"torch.max",
"torch.argmax"
] | 1.6.0 | brettkoonce/qml | fe78edd7e724ee18acf6ea5c30fd96aafe179e09 |
1.1 | import copy
import torch
import torch.nn as nn
from transformers import BertModel, BertConfig
from torch.nn.init import xavier_uniform_
from models.decoder import TransformerDecoder
from models.encoder import Classifier, ExtTransformerEncoder
from models.optimizers import Optimizer
def build_optim(args, model, checkpoint):
""" Build optimizer """
if checkpoint is not None:
try:
optim = checkpoint['optim'][0]
except:
optim = checkpoint['optim']
saved_optimizer_state_dict = optim.optimizer.state_dict()
optim.optimizer.load_state_dict(saved_optimizer_state_dict)
if args.visible_gpus != '-1':
for state in optim.optimizer.state.values():
for k, v in state.items():
if torch.is_tensor(v):
state[k] = v.cuda()
if (optim.method == 'adam') and (len(optim.optimizer.state) < 1):
raise RuntimeError(
"Error: loaded Adam optimizer from existing model" +
" but optimizer state is empty")
else:
optim = Optimizer(
args.optim, args.lr, args.max_grad_norm,
beta1=args.beta1, beta2=args.beta2,
decay_method='noam',
warmup_steps=args.warmup_steps)
optim.set_parameters(list(model.named_parameters()))
return optim
def build_optim_bert(args, model, checkpoint):
""" Build optimizer """
if checkpoint is not None:
optim = checkpoint['optims'][0]
saved_optimizer_state_dict = optim.optimizer.state_dict()
optim.optimizer.load_state_dict(saved_optimizer_state_dict)
if args.visible_gpus != '-1':
for state in optim.optimizer.state.values():
for k, v in state.items():
if torch.is_tensor(v):
state[k] = v.cuda()
if (optim.method == 'adam') and (len(optim.optimizer.state) < 1):
raise RuntimeError(
"Error: loaded Adam optimizer from existing model" +
" but optimizer state is empty")
else:
optim = Optimizer(
args.optim, args.lr_bert, args.max_grad_norm,
beta1=args.beta1, beta2=args.beta2,
decay_method='noam',
warmup_steps=args.warmup_steps_bert)
params = [(n, p) for n, p in list(model.named_parameters()) if n.startswith('bert.model')]
optim.set_parameters(params)
return optim
def build_optim_dec(args, model, checkpoint):
""" Build optimizer """
if checkpoint is not None:
optim = checkpoint['optims'][1]
saved_optimizer_state_dict = optim.optimizer.state_dict()
optim.optimizer.load_state_dict(saved_optimizer_state_dict)
if args.visible_gpus != '-1':
for state in optim.optimizer.state.values():
for k, v in state.items():
if torch.is_tensor(v):
state[k] = v.cuda()
if (optim.method == 'adam') and (len(optim.optimizer.state) < 1):
raise RuntimeError(
"Error: loaded Adam optimizer from existing model" +
" but optimizer state is empty")
else:
optim = Optimizer(
args.optim, args.lr_dec, args.max_grad_norm,
beta1=args.beta1, beta2=args.beta2,
decay_method='noam',
warmup_steps=args.warmup_steps_dec)
params = [(n, p) for n, p in list(model.named_parameters()) if not n.startswith('bert.model')]
optim.set_parameters(params)
return optim
def get_generator(vocab_size, dec_hidden_size, device):
gen_func = nn.LogSoftmax(dim=-1)
generator = nn.Sequential(
nn.Linear(dec_hidden_size, vocab_size),
gen_func
)
generator.to(device)
return generator
class Bert(nn.Module):
def __init__(self, large, temp_dir, args=None, finetune=False):
super(Bert, self).__init__()
if(large):
self.model = BertModel.from_pretrained('bert-large-uncased', cache_dir=temp_dir)
else:
# configuration = BertConfig()
# configuration.max_position_embeddings = 1024
# import pdb;pdb.set_trace()
# self.model = BertModel.from_pretrained('bert-base-uncased', cache_dir=temp_dir, config=configuration)
self.model = BertModel.from_pretrained('bert-base-uncased', cache_dir=temp_dir)
# if(args.max_pos>512):
# my_pos_embeddings = nn.Embedding(args.max_pos, self.model.config.hidden_size)
# my_pos_embeddings.weight.data[:512] = self.model.embeddings.position_embeddings.weight.data
# my_pos_embeddings.weight.data[512:] = self.model.embeddings.position_embeddings.weight.data[-1][None,
# :].repeat(args.max_pos - 512, 1)
# self.model.embeddings.position_embeddings = my_pos_embeddings
self.finetune = finetune
def forward(self, x, segs, mask):
if(self.finetune):
top_vec = self.model(input_ids=x, token_type_ids=segs, attention_mask=mask)['last_hidden_state']
# top_vec, _ = self.model(x, token_type_ids=segs, attention_mask=mask)
else:
self.eval()
with torch.no_grad():
top_vec = self.model(input_ids=x, token_type_ids=segs, attention_mask=mask)['last_hidden_state']
# top_vec, _ = self.model(x, token_type_ids=segs, attention_mask=mask)
return top_vec
class ExtSummarizer(nn.Module):
def __init__(self, args, device, checkpoint):
super(ExtSummarizer, self).__init__()
self.args = args
self.device = device
self.bert = Bert(args.large, args.temp_dir, args=args, finetune=args.finetune_bert)
self.ext_layer = ExtTransformerEncoder(self.bert.model.config.hidden_size, args.ext_ff_size, args.ext_heads,
args.ext_dropout, args.ext_layers)
if (args.encoder == 'baseline'):
bert_config = BertConfig(self.bert.model.config.vocab_size, hidden_size=args.ext_hidden_size,
num_hidden_layers=args.ext_layers, num_attention_heads=args.ext_heads, intermediate_size=args.ext_ff_size)
self.bert.model = BertModel(bert_config)
self.ext_layer = Classifier(self.bert.model.config.hidden_size)
# state_dict['bert.embeddings.position_embeddings.weight']
# if(args.max_pos>512):
# my_pos_embeddings = nn.Embedding(args.max_pos, self.bert.model.config.hidden_size)
# my_pos_embeddings.weight.data[:512] = self.bert.model.embeddings.position_embeddings.weight.data
# my_pos_embeddings.weight.data[512:] = self.bert.model.embeddings.position_embeddings.weight.data[-1][None,:].repeat(args.max_pos-512,1)
# self.bert.model.embeddings.position_embeddings = my_pos_embeddings
# my_pos_embeddings = nn.Embedding(1024, 768)
# my_pos_embeddings.weight.data[:512] = state_dict['bert.embeddings.position_embeddings.weight']
# my_pos_embeddings.weight.data[512:] = state_dict['bert.embeddings.position_embeddings.weight'][-1][None,
# :].repeat(args.max_pos - 512, 1)
# state_dict['bert.embeddings.position_embeddings.weight'] = my_pos_embeddings
if checkpoint is not None:
self.load_state_dict(checkpoint['model'], strict=True)
else:
if args.param_init != 0.0:
for p in self.ext_layer.parameters():
p.data.uniform_(-args.param_init, args.param_init)
if args.param_init_glorot:
for p in self.ext_layer.parameters():
if p.dim() > 1:
xavier_uniform_(p)
self.to(device)
def forward(self, src, segs, clss, mask_src, mask_cls):
top_vec = self.bert(src, segs, mask_src)
sents_vec = top_vec[torch.arange(top_vec.size(0)).unsqueeze(1), clss]
sents_vec = sents_vec * mask_cls[:, :, None].float()
sent_scores = self.ext_layer(sents_vec, mask_cls).squeeze(-1)
return sent_scores, mask_cls
class AbsSummarizer(nn.Module):
def __init__(self, args, device, checkpoint=None, bert_from_extractive=None):
super(AbsSummarizer, self).__init__()
self.args = args
self.device = device
self.bert = Bert(args.large, args.temp_dir, args=args, finetune=args.finetune_bert)
if bert_from_extractive is not None:
self.bert.model.load_state_dict(dict([(n[11:], p) for n, p in bert_from_extractive.items() if n.startswith('bert.model')]), strict=True)
if (args.encoder == 'baseline'):
bert_config = BertConfig(self.bert.model.config.vocab_size, hidden_size=args.enc_hidden_size,
num_hidden_layers=args.enc_layers, num_attention_heads=8,
intermediate_size=args.enc_ff_size,
hidden_dropout_prob=args.enc_dropout,
attention_probs_dropout_prob=args.enc_dropout)
self.bert.model = BertModel(bert_config)
# if(args.max_pos>512):
# my_pos_embeddings = nn.Embedding(args.max_pos, self.bert.model.config.hidden_size)
# my_pos_embeddings.weight.data[:512] = self.bert.model.embeddings.position_embeddings.weight.data
# my_pos_embeddings.weight.data[512:] = self.bert.model.embeddings.position_embeddings.weight.data[-1][None,:].repeat(args.max_pos-512,1)
# self.bert.model.embeddings.position_embeddings = my_pos_embeddings
self.vocab_size = self.bert.model.config.vocab_size
tgt_embeddings = nn.Embedding(self.vocab_size, self.bert.model.config.hidden_size, padding_idx=0)
if (self.args.share_emb):
tgt_embeddings.weight = copy.deepcopy(self.bert.model.embeddings.word_embeddings.weight)
self.decoder = TransformerDecoder(
self.args.dec_layers,
self.args.dec_hidden_size, heads=self.args.dec_heads,
d_ff=self.args.dec_ff_size, dropout=self.args.dec_dropout, embeddings=tgt_embeddings)
self.generator = get_generator(self.vocab_size, self.args.dec_hidden_size, device)
self.generator[0].weight = self.decoder.embeddings.weight
if checkpoint is not None:
self.load_state_dict(checkpoint['model'], strict=True)
else:
for module in self.decoder.modules():
if isinstance(module, (nn.Linear, nn.Embedding)):
module.weight.data.normal_(mean=0.0, std=0.02)
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
for p in self.generator.parameters():
if p.dim() > 1:
xavier_uniform_(p)
else:
p.data.zero_()
if(args.use_bert_emb):
tgt_embeddings = nn.Embedding(self.vocab_size, self.bert.model.config.hidden_size, padding_idx=0)
tgt_embeddings.weight = copy.deepcopy(self.bert.model.embeddings.word_embeddings.weight)
self.decoder.embeddings = tgt_embeddings
self.generator[0].weight = self.decoder.embeddings.weight
self.to(device)
def forward(self, src, tgt, segs, clss, mask_src, mask_tgt, mask_cls):
top_vec = self.bert(src, segs, mask_src)
dec_state = self.decoder.init_decoder_state(src, top_vec)
decoder_outputs, state = self.decoder(tgt[:, :-1], top_vec, dec_state)
return decoder_outputs, None
| [
"torch.nn.LogSoftmax",
"torch.nn.Linear",
"torch.is_tensor",
"torch.no_grad",
"torch.nn.init.xavier_uniform_",
"torch.nn.Embedding"
] | 1.1.0 | sajastu/RedSumm2021 | 4c08d6a1f48a64774635512b16afb9c312331ce7 |
1.4 | import ipdb
import torch
import collections
import random
from torch import nn
from torch.utils.data import Dataset
from pytorch_lightning import Trainer, seed_everything
class ChurnPredictToyDataset(Dataset):
def __init__(self, data_size, seq_len, vocab_size, input_size,
mask_zero_prob=0.05,
use_transformer_feature=True,
transformer_dim=768):
random_embedding = nn.Embedding(vocab_size + 1, input_size)
self.examples = []
for i in range(data_size):
x = torch.tensor([random.randint(1, vocab_size) for _ in range(seq_len)])
x_indices = torch.tensor(range(len(x)))
zero_prob = torch.full_like(x_indices, mask_zero_prob, dtype=torch.float)
zero_mask = torch.bernoulli(zero_prob).bool()
mask_indices = x_indices[zero_mask]
x[mask_indices] = 0
zero_count = int(torch.bincount(x)[0])
if zero_count >= 1:
y = 1
else:
y = 0
embedding_x = random_embedding(x).detach()
if use_transformer_feature:
transformer_embedding = torch.zeros((seq_len, transformer_dim))
else:
transformer_embedding = torch.empty((seq_len, transformer_dim))
self.examples.append((embedding_x, transformer_embedding, torch.tensor(y)))
print(collections.Counter([int(x[2]) for x in self.examples]))
def __len__(self):
return len(self.examples)
def __getitem__(self, idx):
return self.examples[idx]
class ToyDataset1(Dataset):
def __init__(self, data_size, vocab_size, max_seq_len, padding_index, transformer_input_size,
use_transformer_feature):
seed_everything(1)
self.examples = []
int_pool = list(range(max_seq_len))
if padding_index in int_pool:
int_pool.remove(0)
for i in range(data_size):
x_pad = torch.full((max_seq_len,), padding_index, dtype=torch.long)
x_len = random.choice(int_pool)
x = torch.randint(0, vocab_size, (x_len,))
x_pad[:len(x)] = x
# # 用和的奇偶性来决定y
# if int(sum(x)) % 2 == 0:
# y = torch.tensor(0)
# else:
# y = torch.tensor(1)
# # 用最后一位的奇偶性来决定y
# if int(x[-1]) % 2 == 0:
# y = torch.tensor(0)
# else:
# y = torch.tensor(1)
# # 用第一位的奇偶性来决定y
# if int(x[0]) % 2 == 0:
# y = torch.tensor(0)
# else:
# y = torch.tensor(1)
# # 用最后一位和第一位的和的奇偶性来决定y
# if int(x[-1] + x[0]) % 2 == 0:
# y = torch.tensor(0)
# else:
# y = torch.tensor(1)
# 用最后一位和第一位的和的奇偶性来决定y
if int(x[-1]) % 2 == 0 or int(x[0]) % 2 == 0:
y = torch.tensor(0)
else:
y = torch.tensor(1)
# add transformer features
if use_transformer_feature:
transformer_x = torch.zeros((transformer_input_size,))
else:
transformer_x = torch.empty((transformer_input_size,))
self.examples.append(((x_pad, torch.tensor(x_len)), transformer_x, y))
def __len__(self):
return len(self.examples)
def __getitem__(self, idx):
return self.examples[idx]
class MapPreloadDataset1(Dataset):
def __init__(self, data_size, vocab_size, max_seq_len, padding_index, transformer_input_size,
use_transformer_feature):
seed_everything(1)
self.examples = []
int_pool = list(range(max_seq_len))
if padding_index in int_pool:
int_pool.remove(0)
for i in range(data_size):
x_pad = torch.randint(0, vocab_size, (max_seq_len,))
# 用前面出现次数最多的输入作为输出
x_counter = list(collections.Counter(x_pad.tolist()).items())
x_counter = sorted(x_counter, key=lambda x: x[1], reverse=True)
y = torch.tensor([x_counter[0][0]])
# add transformer features
if use_transformer_feature:
transformer_x = torch.zeros((transformer_input_size,))
else:
transformer_x = torch.empty((transformer_input_size,))
self.examples.append(((x_pad, torch.tensor(max_seq_len)), transformer_x, y))
def __len__(self):
return len(self.examples)
def __getitem__(self, idx):
return self.examples[idx]
def collate_fn_variable_len(batch):
batch_x_pad = []
batch_x_len = []
batch_transformer_embedding = []
batch_y = []
for ((x_pad, x_len), transformer_x, y) in batch:
batch_x_pad.append(x_pad)
batch_x_len.append(x_len)
batch_transformer_embedding.append(transformer_x)
batch_y.append(y)
# Sort from longest to shortest
batch_x_len = torch.stack(batch_x_len)
sort_indices = torch.argsort(batch_x_len, descending=True)
batch_x_pad = torch.stack(batch_x_pad)[sort_indices]
batch_x_len = batch_x_len[sort_indices]
batch_transformer_embedding = torch.stack(batch_transformer_embedding)[sort_indices]
batch_y = torch.stack(batch_y)[sort_indices]
return ((batch_x_pad, batch_x_len), batch_transformer_embedding, batch_y)
| [
"torch.zeros",
"torch.bincount",
"torch.stack",
"torch.argsort",
"torch.full_like",
"torch.randint",
"torch.full",
"torch.tensor",
"torch.bernoulli",
"torch.empty",
"torch.nn.Embedding"
] | 1.4.0 | fuxiAIlab/PMTC | bea55d821bc8adf64044194a0b72d8ce913a6213 |
1.4 | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch - TF 2.0 general utilities."""
import os
import re
import numpy
from .utils import logging
logger = logging.get_logger(__name__)
def convert_tf_weight_name_to_pt_weight_name(tf_name, start_prefix_to_remove=""):
"""Convert a TF 2.0 model variable name in a pytorch model weight name.
Conventions for TF2.0 scopes -> PyTorch attribute names conversions:
- '$1___$2' is replaced by $2 (can be used to duplicate or remove layers in TF2.0 vs PyTorch)
- '_._' is replaced by a new level separation (can be used to convert TF2.0 lists in PyTorch nn.ModulesList)
return tuple with:
- pytorch model weight name
- transpose: boolean indicating weither TF2.0 and PyTorch weights matrices are transposed with regards to each other
"""
tf_name = tf_name.replace(":0", "") # device ids
tf_name = re.sub(
r"/[^/]*___([^/]*)/", r"/\1/", tf_name
) # '$1___$2' is replaced by $2 (can be used to duplicate or remove layers in TF2.0 vs PyTorch)
tf_name = tf_name.replace(
"_._", "/"
) # '_._' is replaced by a level separation (can be used to convert TF2.0 lists in PyTorch nn.ModulesList)
tf_name = re.sub(r"//+", "/", tf_name) # Remove empty levels at the end
tf_name = tf_name.split("/") # Convert from TF2.0 '/' separators to PyTorch '.' separators
tf_name = tf_name[1:] # Remove level zero
# When should we transpose the weights
transpose = bool(tf_name[-1] == "kernel" or "emb_projs" in tf_name or "out_projs" in tf_name)
# Convert standard TF2.0 names in PyTorch names
if tf_name[-1] == "kernel" or tf_name[-1] == "embeddings" or tf_name[-1] == "gamma":
tf_name[-1] = "weight"
if tf_name[-1] == "beta":
tf_name[-1] = "bias"
# Remove prefix if needed
tf_name = ".".join(tf_name)
if start_prefix_to_remove:
tf_name = tf_name.replace(start_prefix_to_remove, "", 1)
return tf_name, transpose
#####################
# PyTorch => TF 2.0 #
#####################
def load_pytorch_checkpoint_in_tf2_model(tf_model, pytorch_checkpoint_path, tf_inputs=None, allow_missing_keys=False):
"""Load pytorch checkpoints in a TF 2.0 model"""
try:
import tensorflow as tf # noqa: F401
import torch # noqa: F401
except ImportError:
logger.error(
"Loading a PyTorch model in TensorFlow, requires both PyTorch and TensorFlow to be installed. Please see "
"https://pytorch.org/ and https://www.tensorflow.org/install/ for installation instructions."
)
raise
pt_path = os.path.abspath(pytorch_checkpoint_path)
logger.info("Loading PyTorch weights from {}".format(pt_path))
pt_state_dict = torch.load(pt_path, map_location="cpu")
logger.info("PyTorch checkpoint contains {:,} parameters".format(sum(t.numel() for t in pt_state_dict.values())))
return load_pytorch_weights_in_tf2_model(
tf_model, pt_state_dict, tf_inputs=tf_inputs, allow_missing_keys=allow_missing_keys
)
def load_pytorch_model_in_tf2_model(tf_model, pt_model, tf_inputs=None, allow_missing_keys=False):
"""Load pytorch checkpoints in a TF 2.0 model"""
pt_state_dict = pt_model.state_dict()
return load_pytorch_weights_in_tf2_model(
tf_model, pt_state_dict, tf_inputs=tf_inputs, allow_missing_keys=allow_missing_keys
)
def load_pytorch_weights_in_tf2_model(tf_model, pt_state_dict, tf_inputs=None, allow_missing_keys=False):
"""Load pytorch state_dict in a TF 2.0 model."""
try:
import tensorflow as tf # noqa: F401
import torch # noqa: F401
from tensorflow.python.keras import backend as K
except ImportError:
logger.error(
"Loading a PyTorch model in TensorFlow, requires both PyTorch and TensorFlow to be installed. Please see "
"https://pytorch.org/ and https://www.tensorflow.org/install/ for installation instructions."
)
raise
if tf_inputs is None:
tf_inputs = tf_model.dummy_inputs
if tf_inputs is not None:
tf_model(tf_inputs, training=False) # Make sure model is built
# Adapt state dict - TODO remove this and update the AWS weights files instead
# Convert old format to new format if needed from a PyTorch state_dict
old_keys = []
new_keys = []
for key in pt_state_dict.keys():
new_key = None
if "gamma" in key:
new_key = key.replace("gamma", "weight")
if "beta" in key:
new_key = key.replace("beta", "bias")
if new_key:
old_keys.append(key)
new_keys.append(new_key)
for old_key, new_key in zip(old_keys, new_keys):
pt_state_dict[new_key] = pt_state_dict.pop(old_key)
# Make sure we are able to load PyTorch base models as well as derived models (with heads)
# TF models always have a prefix, some of PyTorch models (base ones) don't
start_prefix_to_remove = ""
if not any(s.startswith(tf_model.base_model_prefix) for s in pt_state_dict.keys()):
start_prefix_to_remove = tf_model.base_model_prefix + "."
symbolic_weights = tf_model.trainable_weights + tf_model.non_trainable_weights
tf_loaded_numel = 0
weight_value_tuples = []
all_pytorch_weights = set(list(pt_state_dict.keys()))
missing_keys = []
for symbolic_weight in symbolic_weights:
sw_name = symbolic_weight.name
name, transpose = convert_tf_weight_name_to_pt_weight_name(
sw_name, start_prefix_to_remove=start_prefix_to_remove
)
# Find associated numpy array in pytorch model state dict
if name not in pt_state_dict:
if allow_missing_keys:
missing_keys.append(name)
continue
raise AttributeError("{} not found in PyTorch model".format(name))
array = pt_state_dict[name].numpy()
if transpose:
array = numpy.transpose(array)
if len(symbolic_weight.shape) < len(array.shape):
array = numpy.squeeze(array)
elif len(symbolic_weight.shape) > len(array.shape):
array = numpy.expand_dims(array, axis=0)
try:
assert list(symbolic_weight.shape) == list(array.shape)
except AssertionError as e:
e.args += (symbolic_weight.shape, array.shape)
raise e
tf_loaded_numel += array.size
# logger.warning("Initialize TF weight {}".format(symbolic_weight.name))
weight_value_tuples.append((symbolic_weight, array))
all_pytorch_weights.discard(name)
K.batch_set_value(weight_value_tuples)
if tf_inputs is not None:
tf_model(tf_inputs, training=False) # Make sure restore ops are run
logger.info("Loaded {:,} parameters in the TF 2.0 model.".format(tf_loaded_numel))
unexpected_keys = list(all_pytorch_weights)
if len(unexpected_keys) > 0:
logger.warning(
f"Some weights of the PyTorch model were not used when "
f"initializing the TF 2.0 model {tf_model.__class__.__name__}: {unexpected_keys}\n"
f"- This IS expected if you are initializing {tf_model.__class__.__name__} from a PyTorch model trained on another task "
f"or with another architecture (e.g. initializing a TFBertForSequenceClassification model from a BertForPretraining model).\n"
f"- This IS NOT expected if you are initializing {tf_model.__class__.__name__} from a PyTorch model that you expect "
f"to be exactly identical (e.g. initializing a TFBertForSequenceClassification model from a BertForSequenceClassification model)."
)
else:
logger.warning(f"All PyTorch model weights were used when initializing {tf_model.__class__.__name__}.\n")
if len(missing_keys) > 0:
logger.warning(
f"Some weights or buffers of the TF 2.0 model {tf_model.__class__.__name__} were not initialized from the PyTorch model "
f"and are newly initialized: {missing_keys}\n"
f"You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference."
)
else:
logger.warning(
f"All the weights of {tf_model.__class__.__name__} were initialized from the PyTorch model.\n"
f"If your task is similar to the task the model of the ckeckpoint was trained on, "
f"you can already use {tf_model.__class__.__name__} for predictions without further training."
)
return tf_model
#####################
# TF 2.0 => PyTorch #
#####################
def load_tf2_checkpoint_in_pytorch_model(pt_model, tf_checkpoint_path, tf_inputs=None, allow_missing_keys=False):
"""Load TF 2.0 HDF5 checkpoint in a PyTorch model
We use HDF5 to easily do transfer learning
(see https://github.com/tensorflow/tensorflow/blob/ee16fcac960ae660e0e4496658a366e2f745e1f0/tensorflow/python/keras/engine/network.py#L1352-L1357).
"""
try:
import tensorflow as tf # noqa: F401
import torch # noqa: F401
except ImportError:
logger.error(
"Loading a TensorFlow model in PyTorch, requires both PyTorch and TensorFlow to be installed. Please see "
"https://pytorch.org/ and https://www.tensorflow.org/install/ for installation instructions."
)
raise
import transformers
logger.info("Loading TensorFlow weights from {}".format(tf_checkpoint_path))
# Instantiate and load the associated TF 2.0 model
tf_model_class_name = "TF" + pt_model.__class__.__name__ # Add "TF" at the beggining
tf_model_class = getattr(transformers, tf_model_class_name)
tf_model = tf_model_class(pt_model.config)
if tf_inputs is None:
tf_inputs = tf_model.dummy_inputs
if tf_inputs is not None:
tf_model(tf_inputs, training=False) # Make sure model is built
tf_model.load_weights(tf_checkpoint_path, by_name=True)
return load_tf2_model_in_pytorch_model(pt_model, tf_model, allow_missing_keys=allow_missing_keys)
def load_tf2_model_in_pytorch_model(pt_model, tf_model, allow_missing_keys=False):
"""Load TF 2.0 model in a pytorch model"""
weights = tf_model.weights
return load_tf2_weights_in_pytorch_model(pt_model, weights, allow_missing_keys=allow_missing_keys)
def load_tf2_weights_in_pytorch_model(pt_model, tf_weights, allow_missing_keys=False):
"""Load TF2.0 symbolic weights in a PyTorch model"""
try:
import tensorflow as tf # noqa: F401
import torch # noqa: F401
except ImportError:
logger.error(
"Loading a TensorFlow model in PyTorch, requires both PyTorch and TensorFlow to be installed. Please see "
"https://pytorch.org/ and https://www.tensorflow.org/install/ for installation instructions."
)
raise
new_pt_params_dict = {}
current_pt_params_dict = dict(pt_model.named_parameters())
# Make sure we are able to load PyTorch base models as well as derived models (with heads)
# TF models always have a prefix, some of PyTorch models (base ones) don't
start_prefix_to_remove = ""
if not any(s.startswith(pt_model.base_model_prefix) for s in current_pt_params_dict.keys()):
start_prefix_to_remove = pt_model.base_model_prefix + "."
# Build a map from potential PyTorch weight names to TF 2.0 Variables
tf_weights_map = {}
for tf_weight in tf_weights:
pt_name, transpose = convert_tf_weight_name_to_pt_weight_name(
tf_weight.name, start_prefix_to_remove=start_prefix_to_remove
)
tf_weights_map[pt_name] = (tf_weight.numpy(), transpose)
all_tf_weights = set(list(tf_weights_map.keys()))
loaded_pt_weights_data_ptr = {}
missing_keys_pt = []
for pt_weight_name, pt_weight in current_pt_params_dict.items():
# Handle PyTorch shared weight ()not duplicated in TF 2.0
if pt_weight.data_ptr() in loaded_pt_weights_data_ptr:
new_pt_params_dict[pt_weight_name] = loaded_pt_weights_data_ptr[pt_weight.data_ptr()]
continue
# Find associated numpy array in pytorch model state dict
if pt_weight_name not in tf_weights_map:
if allow_missing_keys:
missing_keys_pt.append(pt_weight_name)
continue
raise AttributeError("{} not found in TF 2.0 model".format(pt_weight_name))
array, transpose = tf_weights_map[pt_weight_name]
if transpose:
array = numpy.transpose(array)
if len(pt_weight.shape) < len(array.shape):
array = numpy.squeeze(array)
elif len(pt_weight.shape) > len(array.shape):
array = numpy.expand_dims(array, axis=0)
try:
assert list(pt_weight.shape) == list(array.shape)
except AssertionError as e:
e.args += (pt_weight.shape, array.shape)
raise e
# logger.warning("Initialize PyTorch weight {}".format(pt_weight_name))
new_pt_params_dict[pt_weight_name] = torch.from_numpy(array)
loaded_pt_weights_data_ptr[pt_weight.data_ptr()] = torch.from_numpy(array)
all_tf_weights.discard(pt_weight_name)
missing_keys, unexpected_keys = pt_model.load_state_dict(new_pt_params_dict, strict=False)
missing_keys += missing_keys_pt
if len(unexpected_keys) > 0:
logger.warning(
f"Some weights of the TF 2.0 model were not used when "
f"initializing the PyTorch model {pt_model.__class__.__name__}: {unexpected_keys}\n"
f"- This IS expected if you are initializing {pt_model.__class__.__name__} from a TF 2.0 model trained on another task "
f"or with another architecture (e.g. initializing a BertForSequenceClassification model from a TFBertForPretraining model).\n"
f"- This IS NOT expected if you are initializing {pt_model.__class__.__name__} from a TF 2.0 model that you expect "
f"to be exactly identical (e.g. initializing a BertForSequenceClassification model from a TFBertForSequenceClassification model)."
)
else:
logger.warning(f"All TF 2.0 model weights were used when initializing {pt_model.__class__.__name__}.\n")
if len(missing_keys) > 0:
logger.warning(
f"Some weights of {pt_model.__class__.__name__} were not initialized from the TF 2.0 model "
f"and are newly initialized: {missing_keys}\n"
f"You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference."
)
else:
logger.warning(
f"All the weights of {pt_model.__class__.__name__} were initialized from the TF 2.0 model.\n"
f"If your task is similar to the task the model of the ckeckpoint was trained on, "
f"you can already use {pt_model.__class__.__name__} for predictions without further training."
)
logger.info("Weights or buffers not loaded from TF 2.0 model: {}".format(all_tf_weights))
return pt_model
| [
"torch.from_numpy",
"torch.load"
] | 1.4.0 | fuxiAIlab/PMTC | bea55d821bc8adf64044194a0b72d8ce913a6213 |
1.7 | import pickle
import numpy as np
import cv2
import torch
import torch.nn as nn
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
import math
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torch.distributions as TD
from torch.autograd import Variable
from torchvision import datasets, transforms
from collections import OrderedDict
np.set_printoptions(precision=2)
torch.set_printoptions(precision=2)
print("PyTorch version:[%s]." % (torch.__version__))
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
print("device:[%s]." % (device))
class TransitionDataset(Dataset):
def __init__(self, data_root_prefix='../prev_data2/img_data', total_file=171):
self.total_file = total_file
self.data_root_prefix = data_root_prefix
self.img = None
self.label = []
for idx in range(total_file):
with open(self.data_root_prefix + str(idx) + '.pkl', 'rb') as f:
data = pickle.load(f)
color1 = [[np.array(cv2.resize(cv2.cvtColor(img[0], cv2.COLOR_RGB2GRAY), dsize=(64, 64), interpolation=cv2.INTER_AREA),np.float32)/255.] for img in data['color1_list']]
color2 = [[np.array(cv2.resize(cv2.cvtColor(img[0], cv2.COLOR_RGB2GRAY), dsize=(64, 64), interpolation=cv2.INTER_AREA),np.float32)/255.] for img in data['color2_list']]
color = np.concatenate([color1, color2], axis=1)
# depth = np.concatenate([data['depth1_list'], data['depth2_list']], axis=1)
if self.img is None:
self.img = color
else:
self.img = np.concatenate([self.img, color], axis=0)
self.label += data['label']
self.total_num = len(self.label)
self.label = np.asarray(self.label, np.long)
print(self.img.shape)
print(self.label.shape)
def __len__(self):
return self.total_num
def __getitem__(self, idx):
return (self.img[idx], self.label[idx])
training_dataset = TransitionDataset(data_root_prefix='../prev_data2/img_data_test', total_file=143)
test_dataset = TransitionDataset(data_root_prefix='../prev_data2/img_data', total_file=34)
def np2tc(x_np): return torch.from_numpy(x_np).float().to(device)
def tc2np(x_tc): return x_tc.detach().cpu().numpy()
class MixtureOfLogits(nn.Module):
def __init__(self,
in_dim = 64, # input feature dimension
y_dim = 10, # number of classes
k = 5, # number of mixtures
sig_min = 1e-4, # minimum sigma
sig_max = None, # maximum sigma
SHARE_SIG = True # share sigma among mixture
):
super(MixtureOfLogits,self).__init__()
self.in_dim = in_dim # Q
self.y_dim = y_dim # D
self.k = k # K
self.sig_min = sig_min
self.sig_max = sig_max
self.SHARE_SIG = SHARE_SIG
self.build_graph()
def build_graph(self):
self.fc_pi = nn.Linear(self.in_dim,self.k)
self.fc_mu = nn.Linear(self.in_dim,self.k*self.y_dim)
if self.SHARE_SIG:
self.fc_sigma = nn.Linear(self.in_dim,self.k)
else:
self.fc_sigma = nn.Linear(self.in_dim,self.k*self.y_dim)
def forward(self,x):
"""
:param x: [N x Q]
"""
pi_logit = self.fc_pi(x) # [N x K]
pi = torch.softmax(pi_logit,dim=1) # [N x K]
mu = self.fc_mu(x) # [N x KD]
mu = torch.reshape(mu,(-1,self.k,self.y_dim)) # [N x K x D]
if self.SHARE_SIG:
sigma = self.fc_sigma(x) # [N x K]
sigma = sigma.unsqueeze(dim=-1) # [N x K x 1]
sigma = sigma.expand_as(mu) # [N x K x D]
else:
sigma = self.fc_sigma(x) # [N x KD]
sigma = torch.reshape(sigma,(-1,self.k,self.y_dim)) # [N x K x D]
if self.sig_max is None:
sigma = self.sig_min + torch.exp(sigma) # [N x K x D]
else:
sig_range = (self.sig_max-self.sig_min)
sigma = self.sig_min + sig_range*torch.sigmoid(sigma) # [N x K x D]
mol_out = {'pi':pi,'mu':mu,'sigma':sigma}
return mol_out
class MixtureLogitNetwork(nn.Module):
def __init__(self,
name = 'mln', # name
x_dim = [2,64,64], # input dimension
k_size = 3, # kernel size
c_dims = [32,64], # conv channel dimensions
p_sizes = [2,2], # pooling sizes
h_dims = [128], # hidden dimensions
y_dim = 10, # output dimension
USE_BN = True, # whether to use batch-norm
k = 5, # number of mixtures
sig_min = 1e-4, # minimum sigma
sig_max = 10, # maximum sigma
mu_min = -3, # minimum mu (init)
mu_max = +3, # maximum mu (init)
SHARE_SIG = True
):
super(MixtureLogitNetwork,self).__init__()
self.name = name
self.x_dim = x_dim
self.k_size = k_size
self.c_dims = c_dims
self.p_sizes = p_sizes
self.h_dims = h_dims
self.y_dim = y_dim
self.USE_BN = USE_BN
self.k = k
self.sig_min = sig_min
self.sig_max = sig_max
self.mu_min = mu_min
self.mu_max = mu_max
self.SHARE_SIG = SHARE_SIG
self.build_graph()
self.init_param()
def build_graph(self):
self.layers = []
# Conv layers
prev_c_dim = self.x_dim[0] # input channel
for (c_dim,p_size) in zip(self.c_dims,self.p_sizes):
self.layers.append(
nn.Conv2d(
in_channels = prev_c_dim,
out_channels = c_dim,
kernel_size = self.k_size,
stride = (1,1),
padding = self.k_size//2
) # conv
)
if self.USE_BN:
self.layers.append(
nn.BatchNorm2d(num_features=c_dim)
)
self.layers.append(nn.ReLU())
self.layers.append(
nn.MaxPool2d(kernel_size=(p_size,p_size),stride=(p_size,p_size))
)
# self.layers.append(nn.Dropout2d(p=0.1)) # p: to be zero-ed
prev_c_dim = c_dim
# Dense layers
self.layers.append(nn.Flatten())
p_prod = np.prod(self.p_sizes)
prev_h_dim = prev_c_dim*(self.x_dim[1]//p_prod)*(self.x_dim[2]//p_prod)
for h_dim in self.h_dims:
self.layers.append(
nn.Linear(
in_features = prev_h_dim,
out_features = h_dim,
bias = True
)
)
self.layers.append(nn.ReLU(True)) # activation
self.layers.append(nn.Dropout2d(p=0.1)) # p: to be zero-ed
prev_h_dim = h_dim
# Final mixture of logits layer
mol = MixtureOfLogits(
in_dim = prev_h_dim,
y_dim = self.y_dim,
k = self.k,
sig_min = self.sig_min,
sig_max = self.sig_max,
SHARE_SIG = self.SHARE_SIG
)
self.layers.append(mol)
# Concatanate all layers
self.net = nn.Sequential()
for l_idx,layer in enumerate(self.layers):
layer_name = "%s_%02d"%(type(layer).__name__.lower(),l_idx)
self.net.add_module(layer_name,layer)
def forward(self,x):
mln_out = self.net(x)
return mln_out # mu:[N x K x D] / pi:[N x K] / sigma:[N x K x D]
def init_param(self):
for m in self.modules():
if isinstance(m,nn.Conv2d): # init conv
nn.init.kaiming_normal_(m.weight)
nn.init.zeros_(m.bias)
if isinstance(m,nn.Linear): # lnit dense
nn.init.kaiming_normal_(m.weight)
nn.init.zeros_(m.bias)
# Heuristic: fc_mu.bias ~ Uniform(mu_min,mu_max)
self.layers[-1].fc_mu.bias.data.uniform_(self.mu_min,self.mu_max)
def mln_uncertainties(pi,mu,sigma):
"""
:param pi: [N x K]
:param mu: [N x K x D]
:param sigma: [N x K x D]
"""
# $\pi$
mu_hat = torch.softmax(mu,dim=2) # logit to prob [N x K x D]
pi_usq = torch.unsqueeze(pi,2) # [N x K x 1]
pi_exp = pi_usq.expand_as(sigma) # [N x K x D]
# softmax($\mu$) average
mu_hat_avg = torch.sum(torch.mul(pi_exp,mu_hat),dim=1).unsqueeze(1) # [N x 1 x D]
mu_hat_avg_exp = mu_hat_avg.expand_as(mu) # [N x K x D]
mu_hat_diff_sq = torch.square(mu_hat-mu_hat_avg_exp) # [N x K x D]
# Epistemic uncertainty
epis = torch.sum(torch.mul(pi_exp,mu_hat_diff_sq), dim=1) # [N x D]
epis = torch.sqrt(torch.sum(epis,dim=1)) # [N]
# Aleatoric uncertainty
alea = torch.sum(torch.mul(pi_exp,sigma), dim=1) # [N x D]
alea = torch.sqrt(torch.mean(alea,dim=1)) # [N]
# Return
unct_out = {'epis':epis, # [N]
'alea':alea # [N]
}
return unct_out
def mace_loss(pi,mu,sigma,target):
"""
:param pi: [N x K]
:param mu: [N x K x D]
:param sigma: [N x K x D]
:param target: [N x D]
"""
# $\mu$
mu_hat = torch.softmax(mu,dim=2) # logit to prob [N x K x D]
log_mu_hat = torch.log(mu_hat+1e-6) # [N x K x D]
# $\pi$
pi_usq = torch.unsqueeze(pi,2) # [N x K x 1]
pi_exp = pi_usq.expand_as(mu) # [N x K x D]
# target
target_usq = torch.unsqueeze(target,1) # [N x 1 x D]
target_exp = target_usq.expand_as(mu) # [N x K x D]
# CE loss
ce_exp = -target_exp*log_mu_hat # CE [N x K x D]
ace_exp = ce_exp / sigma # attenuated CE [N x K x D]
mace_exp = torch.mul(pi_exp,ace_exp) # mixtured attenuated CE [N x K x D]
mace = torch.sum(mace_exp,dim=1) # [N x D]
mace = torch.sum(mace,dim=1) # [N]
mace_avg = torch.mean(mace) # [1]
# Compute uncertainties (epis and alea)
unct_out = mln_uncertainties(pi,mu,sigma)
epis = unct_out['epis'] # [N]
alea = unct_out['alea'] # [N]
epis_avg = torch.mean(epis) # [1]
alea_avg = torch.mean(alea) # [1]
# Return
loss_out = {'mace':mace, # [N]
'mace_avg':mace_avg, # [1]
'epis':epis, # [N]
'alea':alea, # [N]
'epis_avg':epis_avg, # [1]
'alea_avg':alea_avg # [1]
}
return loss_out
def mln_gather(pi,mu,sigma):
"""
:param pi: [N x K]
:param mu: [N x K x D]
:param sigma: [N x K x D]
"""
max_idx = torch.argmax(pi,dim=1) # [N]
idx_gather = max_idx.unsqueeze(dim=-1).repeat(1,mu.shape[2]).unsqueeze(1) # [N x 1 x D]
mu_sel = torch.gather(mu,dim=1,index=idx_gather).squeeze(dim=1) # [N x D]
sigma_sel = torch.gather(sigma,dim=1,index=idx_gather).squeeze(dim=1) # [N x D]
out = {'max_idx':max_idx, # [N]
'idx_gather':idx_gather, # [N x 1 x D]
'mu_sel':mu_sel, # [N x D]
'sigma_sel':sigma_sel # [N x D]
}
return out
def func_eval(model,data_iter,device):
with torch.no_grad():
n_total,n_correct,epis_unct_sum,alea_unct_sum = 0,0,0,0
model.eval() # evaluate (affects DropOut and BN)
for batch_in,batch_out in data_iter:
# Foraward path
y_trgt = batch_out.to(device)
mln_out = model.forward(batch_in.view(-1,2,64,64).to(device))
pi,mu,sigma = mln_out['pi'],mln_out['mu'],mln_out['sigma']
out = mln_gather(pi,mu,sigma)
model_pred = out['mu_sel']
# Compute uncertainty
unct_out = mln_uncertainties(pi,mu,sigma)
epis_unct = unct_out['epis'] # [N]
alea_unct = unct_out['alea'] # [N]
epis_unct_sum += torch.sum(epis_unct)
alea_unct_sum += torch.sum(alea_unct)
# Check predictions
_,y_pred = torch.max(model_pred,1)
n_correct += (y_pred==y_trgt).sum().item()
n_total += batch_in.size(0)
val_accr = (n_correct/n_total)
epis = (epis_unct_sum/n_total).detach().cpu().item()
alea = (alea_unct_sum/n_total).detach().cpu().item()
model.train() # back to train mode
out_eval = {'val_accr':val_accr,'epis':epis,'alea':alea}
return out_eval
# Demo forward path of MLN
M = MixtureLogitNetwork(k=32,SHARE_SIG=True).to(device)
x = torch.rand([2]+M.x_dim).to(device)
target = F.one_hot(torch.randint(low=0,high=10,size=(2,)),num_classes=10).to(device)
mln_out = M.forward(x)
pi,mu,sigma = mln_out['pi'],mln_out['mu'],mln_out['sigma']
mu_sel = mln_gather(pi,mu,sigma)['mu_sel']
loss_out = mace_loss(pi,mu,sigma,target)
loss = loss_out['mace_avg'] - loss_out['epis_avg'] # epis as a regularizer
loss.backward() # backward propagation
print ("x: %s"%(tc2np(x).shape,))
print ("=>")
print ("pi: %s\n%s"%(tc2np(pi).shape,tc2np(pi)))
print ("mu: %s\n%s"%(tc2np(mu).shape,tc2np(mu)))
print ("sigma: %s\n%s"%(tc2np(sigma).shape,tc2np(sigma)))
print ("=>")
print ("mace:[%.3f] epis:[%.3f] alea:[%.3f]"%
(loss_out['mace_avg'],loss_out['epis_avg'],loss_out['alea_avg']))
# total_dataset = transition_dataset.total_num
# training_len = int(total_dataset*0.8)
# val_len = total_dataset - training_len
# train_set, test_set = torch.utils.data.random_split(transition_dataset, [training_len, val_len])
batch_size = 128
train_iter = torch.utils.data.DataLoader(training_dataset, batch_size=batch_size, shuffle=True)
test_iter = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size)
M = MixtureLogitNetwork(k=3,SHARE_SIG=True).to(device)
M.init_param()
train_accr = func_eval(M,train_iter,device)['val_accr']
test_accr = func_eval(M,test_iter,device)['val_accr']
print ("[Initial try] train_accr:[%.3f] test_accr:[%.3f]."%
(train_accr,test_accr))
def train_wrapper(EPOCHS=50):
np.set_printoptions(formatter={'float_kind':'{:.2f}'.format})
M = MixtureLogitNetwork(name='mln', k_size=32, k=3, SHARE_SIG=True).to(device)
np.random.seed(seed=0)
torch.manual_seed(seed=0) # fix random seed
M.init_param()
optm = optim.Adam(M.parameters(), lr=1e-5, weight_decay=1e-6)
M.train() # train mode
train_iter = torch.utils.data.DataLoader(training_dataset, batch_size=128, shuffle=True)
test_iter = torch.utils.data.DataLoader(test_dataset, batch_size=128)
print_every = 1
for epoch in range(EPOCHS):
loss_sum = 0.0
for batch_in,batch_out in train_iter:
# Forward path
mln_out = M.forward(batch_in.view(-1, 2, 64, 64).to(device))
pi,mu,sigma = mln_out['pi'],mln_out['mu'],mln_out['sigma']
target = torch.eye(M.y_dim)[batch_out].to(device)
loss_out = mace_loss(pi,mu,sigma,target) # 'mace_avg','epis_avg','alea_avg'
loss = loss_out['mace_avg'] - loss_out['epis_avg'] + loss_out['alea_avg']
# Update
optm.zero_grad() # reset gradient
loss.backward() # back-propagation
optm.step() # optimizer update
# Track losses
loss_sum += loss
loss_avg = loss_sum/len(train_iter)
# Print
if ((epoch%print_every)==0) or (epoch==(EPOCHS-1)):
train_res = func_eval(M,train_iter,device)
test_res = func_eval(M,test_iter,device)
print ("epoch:[%d/%d]\n loss:[%.3f] train_accr:[%.3f] test_accr:[%.3f]."%
(epoch,EPOCHS,loss_avg,train_res['val_accr'],test_res['val_accr']))
print (" [Train] alea:[%.3f] epis:[%.3f]\n [Test] alea:[%.3f] epis:[%.3f]"%
(train_res['alea'],train_res['epis'],test_res['alea'],test_res['epis']))
out = {'M': M, 'train_iter': train_iter, 'test_iter': test_iter}
# Check the trained results
# M = out['M']
# test_iter,train_iter = out['test_iter'],out['train_iter']
# mnist_test = datasets.MNIST(root='./data/',train=False,transform=transforms.ToTensor(),download=True)
# n_sample = 25
# sample_indices = np.random.choice(len(mnist_test.targets),n_sample,replace=False)
# test_x = mnist_test.data[sample_indices]
# test_y = mnist_test.targets[sample_indices]
# x = test_x.view(-1,2,128,128).type(torch.float).to(device)/255.
# mln_out = M.forward(x)
# pi,mu,sigma = mln_out['pi'],mln_out['mu'],mln_out['sigma']
# target = torch.eye(M.y_dim)[test_y].to(device)
# loss_out = mace_loss(pi,mu,sigma,target) # 'mace_avg','epis_avg','alea_avg'
# # Get the first and second-best prediction
# y_pred = []
# y_second = []
# pi_np,mu_np,sigma_np = tc2np(pi),tc2np(mu),tc2np(sigma)
# for idx in range(n_sample):
# pi_i,mu_i = pi_np[idx,:],mu_np[idx,:]
# sort_idx = np.argsort(-pi_i)
# y_pred.append(np.argmax(mu_i[sort_idx[0]]))
# y_second.append(np.argmax(mu_i[sort_idx[1]]))
# # Plot results
# plt.figure(figsize=(10,10))
# for idx in range(n_sample):
# plt.subplot(5, 5, idx+1)
# plt.imshow(test_x[idx][0], cmap='gray')
# plt.axis('off')
# plt.title("[%d] 1st:[%d] 2nd:[%d]"%
# (test_y[idx],y_pred[idx],y_second[idx]))
# plt.show()
# Print-out the mixture wegiths
# print ('pi:\n%s'%(pi_np[:5,:])) # print upto five
return out
print ("Done.")
out = train_wrapper() | [
"torch.nn.Linear",
"torch.nn.BatchNorm2d",
"torch.nn.init.kaiming_normal_",
"torch.set_printoptions",
"torch.cuda.is_available",
"torch.eye",
"torch.exp",
"torch.reshape",
"torch.sum",
"torch.sigmoid",
"torch.mul",
"torch.gather",
"torch.nn.MaxPool2d",
"torch.unsqueeze",
"torch.manual_seed",
"torch.randint",
"torch.utils.data.DataLoader",
"torch.nn.init.zeros_",
"torch.nn.Flatten",
"torch.max",
"torch.square",
"torch.nn.Sequential",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.log",
"torch.argmax",
"torch.rand",
"torch.no_grad",
"torch.softmax",
"torch.from_numpy",
"torch.nn.Dropout2d",
"torch.mean"
] | 1.7.1 | kyungjaelee/robosuite | 0d73fcca9ed8e638632f4bd7b0f1b8ebf4640fb1 |
1.1 | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
from dataset.dataset_configs import STICKS
from tools.so3 import so3_exponential_map, rand_rot
from tools.functions import masked_kp_mean, \
argmin_translation, argmin_scale, \
avg_l2_huber
from tools.vis_utils import get_visdom_connection, \
show_projections, \
visdom_plot_pointclouds
from tools.utils import auto_init_args
import numpy as np
import torch.nn.functional as Fu
from torch import nn as nn
import torch
class C3DPO(torch.nn.Module):
def __init__(self, n_keypoints=17,
shape_basis_size=10,
n_fully_connected=1024,
n_layers=6,
keypoint_rescale=float(1),
keypoint_norm_type='to_mean',
projection_type='orthographic',
z_augment=True,
z_augment_rot_angle=float(np.pi)/8,
z_equivariance=True,
z_equivariance_rot_angle=float(np.pi)/8,
camera_translation=False,
camera_xy_translation=False,
argmin_translation=False,
camera_scale=False,
connectivity_setup='NONE',
huber_scaling=0.01,
reprojection_normalization='kp_total_count',
independent_phi_for_aug=False,
canonicalization={
'use': True,
'n_layers': 6,
'n_rand_samples': 4,
'rot_angle': float(np.pi),
'n_fully_connected': 1024,
},
perspective_depth_threshold=0.1,
depth_offset=0.,
replace_keypoints_with_input=True,
root_joint=0,
weight_init_std=0.01,
loss_weights={'l_reprojection': 1.,
'l_canonicalization': 1.},
log_vars=[
'objective',
'dist_reprojection',
'l_reprojection',
'l_canonicalization'],
**kwargs):
super(C3DPO, self).__init__()
# autoassign constructor params to self
auto_init_args(self)
# factorization net
self.phi = nn.Sequential(
*self.make_trunk(dim_in=self.n_keypoints * 3,
# 2 dim loc, 1 dim visibility
n_fully_connected=self.n_fully_connected,
n_layers=self.n_layers))
# shape coefficient predictor
self.alpha_layer = conv1x1(self.n_fully_connected,
self.shape_basis_size,
std=weight_init_std)
# 3D shape predictor
self.shape_layer = conv1x1(self.shape_basis_size, 3*n_keypoints,
std=weight_init_std)
# rotation predictor (predicts log-rotation)
self.rot_layer = conv1x1(self.n_fully_connected, 3,
std=weight_init_std)
if self.camera_translation:
# camera translation
self.translation_layer = conv1x1(self.n_fully_connected, 3,
std=weight_init_std)
if self.camera_scale:
# camera scale (with final sofplus to ensure positive outputs)
self.scale_layer = nn.Sequential(conv1x1(self.n_fully_connected, 3,
std=weight_init_std),
nn.Softplus())
if self.canonicalization['use']:
# canonicalization net:
self.psi = nn.Sequential(
*self.make_trunk(dim_in=self.n_keypoints*3,
n_fully_connected=self.canonicalization['n_fully_connected'],
n_layers=self.canonicalization['n_layers']))
self.alpha_layer_psi = conv1x1(self.n_fully_connected,
self.shape_basis_size,
std=weight_init_std)
def make_trunk(self,
n_fully_connected=None,
dim_in=None,
n_layers=None,
use_bn=True):
layer1 = ConvBNLayer(dim_in,
n_fully_connected,
use_bn=use_bn)
layers = [layer1]
for l in range(n_layers):
layers.append(ResLayer(n_fully_connected,
int(n_fully_connected/4)))
return layers
def forward(self, kp_loc=None, kp_vis=None,
class_mask=None, K=None, **kwargs):
# dictionary with outputs of the fw pass
preds = {}
# input sizes ...
ba, kp_dim, n_kp = kp_loc.shape
assert kp_dim == 2, 'bad input keypoint dim'
assert n_kp == self.n_keypoints, 'bad # of keypoints!'
if self.projection_type == 'perspective':
assert K is not None
kp_loc_cal = self.calibrate_keypoints(kp_loc, K)
else:
kp_loc_cal = kp_loc
# normalize keypoints
kp_loc_norm, kp_mean = \
self.normalize_keypoints(
kp_loc_cal, kp_vis, rescale=self.keypoint_rescale)
# save for later visualisations ...
preds['kp_loc_norm'] = kp_loc_norm
preds['kp_mean'] = kp_mean
# run the shape predictor
preds['phi'] = self.run_phi(kp_loc_norm, kp_vis, class_mask=class_mask)
if self.canonicalization['use']:
preds['l_canonicalization'], preds['psi'] = \
self.canonicalization_loss(preds['phi'],
class_mask=class_mask)
# 3D->2D project shape to camera
kp_reprojected, depth = self.camera_projection(
preds['phi']['shape_camera_coord'])
preds['kp_reprojected'] = kp_reprojected
# compute the repro loss for backpropagation
if self.reprojection_normalization == 'kp_count_per_image':
preds['l_reprojection'] = avg_l2_huber(
kp_reprojected,
kp_loc_norm,
mask=kp_vis,
squared=self.squared_reprojection_loss)
elif self.reprojection_normalization == 'kp_total_count':
def flatten_(x): return x.permute(
1, 2, 0).contiguous().view(1, 2, self.n_keypoints*ba)
preds['l_reprojection'] = avg_l2_huber(
flatten_(kp_reprojected),
flatten_(kp_loc_norm),
mask=kp_vis.permute(1, 0).contiguous().view(1, -1),
scaling=self.huber_scaling)
else:
raise ValueError('unknown loss normalization %s' %
self.loss_normalization)
# unnormalize the shape projections
kp_reprojected_image = \
self.unnormalize_keypoints(kp_reprojected, kp_mean,
rescale=self.keypoint_rescale)
# projections in the image coordinate frame
if self.replace_keypoints_with_input and not self.training:
# use the input points
kp_reprojected_image = \
(1-kp_vis[:, None, :]) * kp_reprojected_image + \
kp_vis[:, None, :] * kp_loc_cal
preds['kp_reprojected_image'] = kp_reprojected_image
# projected 3D shape in the image space
# = unprojection of kp_reprojected_image
shape_image_coord = self.camera_unprojection(
kp_reprojected_image, depth,
rescale=self.keypoint_rescale)
if self.projection_type == 'perspective':
preds['shape_image_coord_cal'] = shape_image_coord
shape_image_coord = \
self.uncalibrate_keypoints(shape_image_coord, K)
preds['kp_reprojected_image_uncal'], _ = \
self.camera_projection(shape_image_coord)
preds['shape_image_coord'] = shape_image_coord
# get the final loss
preds['objective'] = self.get_objective(preds)
assert np.isfinite(
preds['objective'].sum().data.cpu().numpy()), "nans!"
return preds
def camera_projection(self, shape):
depth = shape[:, 2:3, :]
if self.projection_type == 'perspective':
if self.perspective_depth_threshold > 0:
depth = torch.clamp(depth, self.perspective_depth_threshold)
projections = shape[:, 0:2, :] / depth
elif self.projection_type == 'orthographic':
projections = shape[:, 0:2, :]
else:
raise ValueError('no such projection type %s' %
self.projection_type)
return projections, depth
def camera_unprojection(self, kp_loc, depth, rescale=float(1)):
depth = depth / rescale
if self.projection_type == 'perspective':
shape = torch.cat((kp_loc * depth, depth), dim=1)
elif self.projection_type == 'orthographic':
shape = torch.cat((kp_loc, depth), dim=1)
else:
raise ValueError('no such projection type %s' %
self.projection_type)
return shape
def calibrate_keypoints(self, kp_loc, K):
# undo the projection matrix
assert K is not None
kp_loc = kp_loc - K[:, 0:2, 2:3]
focal = torch.stack((K[:, 0, 0], K[:, 1, 1]), dim=1)
kp_loc = kp_loc / focal[:, :, None]
return kp_loc
def uncalibrate_keypoints(self, kp_loc, K):
assert K is not None
kp_loc = torch.bmm(K, kp_loc)
return kp_loc
def normalize_keypoints(self,
kp_loc,
kp_vis,
rescale=1.,
K=None):
if self.keypoint_norm_type == 'to_root':
# center around the root joint
kp_mean = kp_loc[:, :, self.root_joint]
kp_loc_norm = kp_loc - kp_mean[:, :, None]
elif self.keypoint_norm_type == 'to_mean':
# calc the mean of visible points
kp_mean = masked_kp_mean(kp_loc, kp_vis)
# remove the mean from the keypoint locations
kp_loc_norm = kp_loc - kp_mean[:, :, None]
else:
raise ValueError('no such kp norm %s' %
self.keypoint_norm_type)
# rescale
kp_loc_norm = kp_loc_norm * rescale
return kp_loc_norm, kp_mean
def unnormalize_keypoints(self,
kp_loc_norm,
kp_mean,
rescale=1.,
K=None):
kp_loc = kp_loc_norm * (1. / rescale)
kp_loc = kp_loc + kp_mean[:, :, None]
return kp_loc
def run_phi(self,
kp_loc,
kp_vis,
class_mask=None,
):
preds = {}
# batch size
ba = kp_loc.shape[0]
dtype = kp_loc.type()
kp_loc_orig = kp_loc.clone()
if self.z_augment and self.training:
R_rand = rand_rot(ba,
dtype=dtype,
max_rot_angle=float(self.z_augment_rot_angle),
axes=(0, 0, 1))
kp_loc_in = torch.bmm(R_rand[:, 0:2, 0:2], kp_loc)
else:
R_rand = torch.eye(3).type(dtype)[None].repeat((ba, 1, 1))
kp_loc_in = kp_loc_orig
if self.z_equivariance and self.training:
# random xy rot
R_rand_eq = rand_rot(ba,
dtype=dtype,
max_rot_angle=float(
self.z_equivariance_rot_angle),
axes=(0, 0, 1))
kp_loc_in = torch.cat(
(kp_loc_in,
torch.bmm(R_rand_eq[:, 0:2, 0:2], kp_loc_in)
), dim=0)
kp_vis_in = kp_vis.repeat((2, 1))
else:
kp_vis_in = kp_vis
# mask kp_loc by kp_visibility
kp_loc_masked = kp_loc_in * kp_vis_in[:, None, :]
# vectorize
kp_loc_flatten = kp_loc_masked.view(-1, 2*self.n_keypoints)
# concatenate visibilities and kp locations
l1_input = torch.cat((kp_loc_flatten, kp_vis_in), dim=1)
# pass to network
if self.independent_phi_for_aug and l1_input.shape[0] == 2*ba:
feats = torch.cat([self.phi(l1_[:, :, None, None]) for
l1_ in l1_input.split(ba, dim=0)], dim=0)
else:
feats = self.phi(l1_input[:, :, None, None])
# coefficients into the linear basis
shape_coeff = self.alpha_layer(feats)[:, :, 0, 0]
if self.z_equivariance and self.training:
# use the shape coeff from the second set of preds
shape_coeff = shape_coeff[ba:]
# take the feats from the first set
feats = feats[:ba]
# shape prediction is just a linear layer implemented as a conv
shape_canonical = self.shape_layer(
shape_coeff[:, :, None, None])[:, :, 0, 0]
shape_canonical = shape_canonical.view(ba, 3, self.n_keypoints)
if self.keypoint_norm_type == 'to_root':
# make sure we fix the root at 0
root_j = shape_canonical[:, :, self.root_joint]
shape_canonical = shape_canonical - root_j[:, :, None]
# predict camera params
# ... log rotation (exponential representation)
R_log = self.rot_layer(feats)[:, :, 0, 0]
# convert from the 3D to 3x3 rot matrix
R = so3_exponential_map(R_log)
# T vector of the camera
if self.camera_translation:
T = self.translation_layer(feats)[:, :, 0, 0]
if self.camera_xy_translation: # kill the last z-dim
T = T * torch.tensor([1., 1., 0.]).type(dtype)[None, :]
else:
T = R_log.new_zeros(ba, 3)
# offset the translation vector of the camera
if self.depth_offset > 0.:
T[:, 2] = T[:, 2] + self.depth_offset
# scale of the camera
if self.camera_scale:
scale = self.scale_layer(feats)[:, 0, 0, 0]
else:
scale = R_log.new_ones(ba)
# rotated+scaled shape into the camera ( Y = sRX + T )
shape_camera_coord = self.apply_similarity_t(
shape_canonical, R, T, scale)
# undo equivariant transformation
if (self.z_equivariance or self.z_augment) and self.training:
R_rand_inv = R_rand.transpose(2, 1)
R = torch.bmm(R_rand_inv, R)
T = torch.bmm(R_rand_inv, T[:, :, None])[:, :, 0]
shape_camera_coord = torch.bmm(R_rand_inv, shape_camera_coord)
# estimate translation
if self.argmin_translation:
assert self.projection_type == 'orthographic'
projection, _ = self.camera_projection(shape_camera_coord)
T_amin = argmin_translation(projection, kp_loc_orig, v=kp_vis)
T_amin = Fu.pad(T_amin, (0, 1), 'constant', float(0))
shape_camera_coord = shape_camera_coord + T_amin[:, :, None]
T = T + T_amin
if class_mask is not None:
shape_camera_coord = shape_camera_coord * class_mask[:, None, :]
shape_canonical = shape_canonical * class_mask[:, None, :]
preds['R_log'] = R_log
preds['R'] = R
preds['scale'] = scale
preds['T'] = T
preds['shape_camera_coord'] = shape_camera_coord
preds['shape_coeff'] = shape_coeff
preds['shape_canonical'] = shape_canonical
return preds
def apply_similarity_t(self, S, R, T, s):
return torch.bmm(R, s[:, None, None] * S) + T[:, :, None]
def canonicalization_loss(self, phi_out, class_mask=None):
shape_canonical = phi_out['shape_canonical']
dtype = shape_canonical.type()
ba = shape_canonical.shape[0]
n_sample = self.canonicalization['n_rand_samples']
# rotate the canonical point cloud
# generate random rotation around all axes
R_rand = rand_rot(ba * n_sample,
dtype=dtype,
max_rot_angle=self.canonicalization['rot_angle'],
axes=(1, 1, 1))
unrotated = shape_canonical.repeat(n_sample, 1, 1)
rotated = torch.bmm(R_rand, unrotated)
psi_out = self.run_psi(rotated) # psi3( Rrand X )
a, b = psi_out['shape_canonical'], unrotated
l_canonicalization = avg_l2_huber(a, b,
scaling=self.huber_scaling,
mask=class_mask.repeat(n_sample, 1)
if class_mask is not None else None)
# reshape the outputs in the output list
psi_out = {k: v.view(
self.canonicalization['n_rand_samples'],
ba, *v.shape[1:]) for k, v in psi_out.items()}
return l_canonicalization, psi_out
def run_psi(self, shape_canonical):
preds = {}
# batch size
ba = shape_canonical.shape[0]
assert shape_canonical.shape[1] == 3, '3d inputs only please'
# reshape and pass to the network ...
l1_input = shape_canonical.view(ba, 3*self.n_keypoints)
# pass to network
feats = self.psi(l1_input[:, :, None, None])
# coefficients into the linear basis
shape_coeff = self.alpha_layer_psi(feats)[:, :, 0, 0]
preds['shape_coeff'] = shape_coeff
# use the shape_pred_layer from 2d predictor
shape_pred = self.shape_layer(
shape_coeff[:, :, None, None])[:, :, 0, 0]
shape_pred = shape_pred.view(ba, 3, self.n_keypoints)
preds['shape_canonical'] = shape_pred
return preds
def get_objective(self, preds):
losses_weighted = [preds[k] * float(w) for k, w in
self.loss_weights.items()
if k in preds]
if (not hasattr(self, '_loss_weights_printed') or
not self._loss_weights_printed) and self.training:
print('-------\nloss_weights:')
for k, w in self.loss_weights.items():
print('%20s: %1.2e' % (k, w))
print('-------')
self._loss_weights_printed = True
loss = torch.stack(losses_weighted).sum()
return loss
def visualize(self, visdom_env, trainmode,
preds, stats, clear_env=False):
viz = get_visdom_connection(server=stats.visdom_server,
port=stats.visdom_port)
if not viz.check_connection():
print("no visdom server! -> skipping batch vis")
return
if clear_env: # clear visualisations
print(" ... clearing visdom environment")
viz.close(env=visdom_env, win=None)
print('vis into env:\n %s' % visdom_env)
it = stats.it[trainmode]
epoch = stats.epoch
idx_image = 0
title = "e%d_it%d_im%d" % (epoch, it, idx_image)
# get the connectivity pattern
sticks = STICKS[self.connectivity_setup] if \
self.connectivity_setup in STICKS else None
var_kp = {'orthographic': 'kp_reprojected_image',
'perspective': 'kp_reprojected_image_uncal'
}[self.projection_type]
# show reprojections
p = np.stack(
[preds[k][idx_image].detach().cpu().numpy()
for k in (var_kp, 'kp_loc')])
v = preds['kp_vis'][idx_image].detach().cpu().numpy()
show_projections(p, visdom_env=visdom_env, v=v,
title='projections_'+title, cmap__='gist_ncar',
markersize=50, sticks=sticks,
stickwidth=1, plot_point_order=False,
image_path=preds['image_path'][idx_image],
visdom_win='projections')
# show 3d reconstruction
if True:
var3d = {'orthographic': 'shape_image_coord',
'perspective': 'shape_image_coord_cal'
}[self.projection_type]
pcl = {'pred': preds[var3d]
[idx_image].detach().cpu().numpy().copy()}
if 'kp_loc_3d' in preds:
pcl['gt'] = preds['kp_loc_3d'][idx_image].detach(
).cpu().numpy().copy()
if self.projection_type == 'perspective':
# for perspective projections, we dont know the scale
# so we estimate it here ...
scale = argmin_scale(torch.from_numpy(pcl['pred'][None]),
torch.from_numpy(pcl['gt'][None]))
pcl['pred'] = pcl['pred'] * float(scale)
elif self.projection_type == 'orthographic':
# here we depth-center gt and predictions
for k in ('pred', 'gt'):
pcl_ = pcl[k].copy()
meanz = pcl_.mean(1) * np.array([0., 0., 1.])
pcl[k] = pcl_ - meanz[:, None]
else:
raise ValueError(self.projection_type)
visdom_plot_pointclouds(viz, pcl, visdom_env, '3d_'+title,
plot_legend=False, markersize=20,
sticks=sticks, win='3d')
def pytorch_ge12():
v = torch.__version__
v = float('.'.join(v.split('.')[0:2]))
return v >= 1.2
def conv1x1(in_planes, out_planes, std=0.01):
"""1x1 convolution"""
cnv = nn.Conv2d(in_planes, out_planes, bias=True, kernel_size=1)
cnv.weight.data.normal_(0., std)
if cnv.bias is not None:
cnv.bias.data.fill_(0.)
return cnv
class ConvBNLayer(nn.Module):
def __init__(self, inplanes, planes, use_bn=True, stride=1, ):
super(ConvBNLayer, self).__init__()
# do a reasonable init
self.conv1 = conv1x1(inplanes, planes)
self.use_bn = use_bn
if use_bn:
self.bn1 = nn.BatchNorm2d(planes)
if pytorch_ge12():
self.bn1.weight.data.uniform_(0., 1.)
self.relu = nn.ReLU(inplace=True)
self.stride = stride
def forward(self, x):
out = self.conv1(x)
if self.use_bn:
out = self.bn1(out)
out = self.relu(out)
return out
class ResLayer(nn.Module):
def __init__(self, inplanes, planes, expansion=4):
super(ResLayer, self).__init__()
self.expansion = expansion
self.conv1 = conv1x1(inplanes, planes)
self.bn1 = nn.BatchNorm2d(planes)
if pytorch_ge12():
self.bn1.weight.data.uniform_(0., 1.)
self.conv2 = conv1x1(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
if pytorch_ge12():
self.bn2.weight.data.uniform_(0., 1.)
self.conv3 = conv1x1(planes, planes * self.expansion)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
if pytorch_ge12():
self.bn3.weight.data.uniform_(0., 1.)
self.relu = nn.ReLU(inplace=True)
self.skip = inplanes == (planes*self.expansion)
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.skip:
out += residual
out = self.relu(out)
return out
| [
"torch.cat",
"torch.stack",
"torch.nn.BatchNorm2d",
"torch.bmm",
"torch.clamp",
"torch.nn.ReLU",
"torch.from_numpy",
"torch.nn.Conv2d",
"torch.eye",
"torch.tensor",
"torch.nn.Softplus"
] | 1.1.0 | tusharc31/c3dpo_nrsfm | bffe61ddd84eb2aab8d6f18c3534107f616d0fd2 |
0.4 | #!/usr/bin/env python
# USAGE:
import sys
import torch
corr_file = sys.argv[1]
network = int(sys.argv[2])
layer = int(sys.argv[3])
out = sys.argv[4]
correlations = torch.load(corr_file)
correlations = correlations[network][layer][0]
maxs, _ = torch.max(
torch.abs(correlations) *
(1 - torch.eq(
torch.arange(correlations.shape[0]),
network
).float())[:, None],
dim=0
)
_, permutation = torch.sort(maxs, descending=True)
rank = torch.eye(correlations.shape[1])
rank = rank.index_select(0, permutation)
torch.save(rank, out)
| [
"torch.arange",
"torch.save",
"torch.abs",
"torch.eye",
"torch.load",
"torch.sort"
] | 0.4.0 | oserikov/opennmt-inspection | 0686a43aceb2620272ecda44ad08ba3bc9cb6e9b |
1.8 | """
This code is extended from Jin-Hwa Kim, Jaehyun Jun, Byoung-Tak Zhang's repository.
https://github.com/jnhwkim/ban-vqa
This code is modified from ZCYang's repository.
https://github.com/zcyang/imageqa-san
"""
import torch
import torch.nn as nn
from torch.nn.utils.weight_norm import weight_norm
from bc import BCNet
# Bilinear Attention
class BiAttention(nn.Module):
def __init__(self, x_dim, y_dim, z_dim, glimpse, dropout=[.2,.5]):
super(BiAttention, self).__init__()
self.glimpse = glimpse
self.logits = weight_norm(BCNet(x_dim, y_dim, z_dim, glimpse, dropout=dropout, k=3), \
name='h_mat', dim=None)
def forward(self, v, q, v_mask=True):
"""
v: [batch, k, vdim]
q: [batch, qdim]
"""
p, logits = self.forward_all(v, q, v_mask)
return p, logits
def forward_all(self, v, q, v_mask=True):
v_num = v.size(1)
q_num = q.size(1)
logits = self.logits(v, q) # b x g x v x q
if v_mask:
mask = (0 == v.abs().sum(2)).unsqueeze(1).unsqueeze(3).expand(logits.size())
logits.data.masked_fill_(mask.data, -float('inf'))
p = nn.functional.softmax(logits.view(-1, self.glimpse, v_num * q_num), 2)
return p.view(-1, self.glimpse, v_num, q_num), logits
# Stacked Attention
class StackedAttention(nn.Module):
def __init__(self, num_stacks, img_feat_size, ques_feat_size, att_size, output_size, drop_ratio):
super(StackedAttention, self).__init__()
self.img_feat_size = img_feat_size
self.ques_feat_size = ques_feat_size
self.att_size = att_size
self.output_size = output_size
self.drop_ratio = drop_ratio
self.num_stacks = num_stacks
self.layers = nn.ModuleList()
self.dropout = nn.Dropout(drop_ratio)
self.tanh = nn.Tanh()
self.softmax = nn.Softmax(dim=1)
self.fc11 = nn.Linear(ques_feat_size, att_size, bias=True)
self.fc12 = nn.Linear(img_feat_size, att_size, bias=False)
self.fc13 = nn.Linear(att_size, 1, bias=True)
for stack in range(num_stacks - 1):
self.layers.append(nn.Linear(att_size, att_size, bias=True))
self.layers.append(nn.Linear(img_feat_size, att_size, bias=False))
self.layers.append(nn.Linear(att_size, 1, bias=True))
def forward(self, img_feat, ques_feat, v_mask=True):
# Batch size
B = ques_feat.size(0)
# Stack 1
ques_emb_1 = self.fc11(ques_feat)
img_emb_1 = self.fc12(img_feat)
# Compute attention distribution
h1 = self.tanh(ques_emb_1.view(B, 1, self.att_size) + img_emb_1)
h1_emb = self.fc13(self.dropout(h1))
# Mask actual bounding box sizes before calculating softmax
if v_mask:
mask = (0 == img_emb_1.abs().sum(2)).unsqueeze(2).expand(h1_emb.size())
h1_emb.data.masked_fill_(mask.data, -float('inf'))
p1 = self.softmax(h1_emb)
# Compute weighted sum
img_att_1 = img_emb_1*p1
weight_sum_1 = torch.sum(img_att_1, dim=1)
# Combine with question vector
u1 = ques_emb_1 + weight_sum_1
# Other stacks
us = []
ques_embs = []
img_embs = []
hs = []
h_embs =[]
ps = []
img_atts = []
weight_sums = []
us.append(u1)
for stack in range(self.num_stacks - 1):
ques_embs.append(self.layers[3 * stack + 0](us[-1]))
img_embs.append(self.layers[3 * stack + 1](img_feat))
# Compute attention distribution
hs.append(self.tanh(ques_embs[-1].view(B, -1, self.att_size) + img_embs[-1]))
h_embs.append(self.layers[3*stack + 2](self.dropout(hs[-1])))
# Mask actual bounding box sizes before calculating softmax
if v_mask:
mask = (0 == img_embs[-1].abs().sum(2)).unsqueeze(2).expand(h_embs[-1].size())
h_embs[-1].data.masked_fill_(mask.data, -float('inf'))
ps.append(self.softmax(h_embs[-1]))
# Compute weighted sum
img_atts.append(img_embs[-1] * ps[-1])
weight_sums.append(torch.sum(img_atts[-1], dim=1))
# Combine with previous stack
ux = us[-1] + weight_sums[-1]
# Combine with previous stack by multiple
us.append(ux)
return us[-1]
| [
"torch.nn.Linear",
"torch.nn.Dropout",
"torch.nn.Softmax",
"torch.nn.ModuleList",
"torch.nn.Tanh",
"torch.sum"
] | 1.8.0 | gdemelo/PubMedCLIP | 1c0892af2b640e7e6c3ac7846c12a596418eda2a |
1.5 | import torch
import torch.nn as nn
from torch.autograd import Function
from losses.sigma.polynomial.divide_conquer import divide_and_conquer
from losses.sigma.polynomial.multiplication import Multiplication
from losses.sigma.polynomial.grad import d_logS_d_expX
from itertools import combinations
def CNK(n,k):
combins = [c for c in combinations(range(n), k)]
return combins
class LogSumExp(nn.Module):
def __init__(self, k, p=None, thresh=1e-5):
super(LogSumExp, self).__init__()
self.k = k
self.p = int(1 + 0.2 * k) if p is None else p
self.mul = Multiplication(self.k + self.p - 1)
self.thresh = thresh
self.register_buffer('grad_k', torch.Tensor(0))
self.register_buffer('grad_km1', torch.Tensor(0))
self.buffers = (self.grad_km1, self.grad_k)
def forward(self, x):
f = LogSumExp_F(self.k, self.p, self.thresh, self.mul, self.buffers)
return f(x)
class LogSumExp_F(Function):
def __init__(self, k, p, thresh, mul, buffers):
self.k = k
self.p = p
self.mul = mul
self.thresh = thresh
# unpack buffers
self.grad_km1, self.grad_k = buffers
def forward(self, x):
"""
Returns a matrix of size (2, n_samples) with sigma_{k-1} and sigma_{k}
for each sample of the mini-batch.
"""
self.save_for_backward(x)
# number of samples and number of coefficients to compute
n_s = x.size(0)
kp = self.k + self.p - 1
assert kp <= x.size(1)
# clone to allow in-place operations
x = x.clone()
# pre-compute normalization
x_summed = x.sum(1)
# invert in log-space
x.t_().mul_(-1)
#print(x)
# initialize polynomials (in log-space)
x = [x, x.clone().fill_(0)]
#print(x)
# polynomial multiplications
log_res = divide_and_conquer(x, kp, mul=self.mul)
# re-normalize
coeff = log_res + x_summed[None, :]
# avoid broadcasting issues (in particular if n_s = 1)
coeff = coeff.view(kp + 1, n_s)
# save all coeff for backward
self.saved_coeff = coeff
#print(coeff)
res=coeff[self.k - 1: self.k + 1]
#print(res)
#print(res[1])
return coeff[self.k - 1: self.k + 1]
def backward(self, grad_sk):
"""
Compute backward pass of LogSumExp.
Python variables with an upper case first letter are in
log-space, other are in standard space.
"""
# tensors from forward pass
X, = self.saved_tensors
S = self.saved_coeff
# extend to shape (self.k + 1, n_samples, n_classes) for backward
S = S.unsqueeze(2).expand(S.size(0), X.size(0), X.size(1))
# compute gradients for coeff of degree k and k - 1
self.grad_km1 = d_logS_d_expX(S, X, self.k - 1, self.p, self.grad_km1, self.thresh)
self.grad_k = d_logS_d_expX(S, X, self.k, self.p, self.grad_k, self.thresh)
# chain rule: combine with incoming gradients (broadcast to all classes on third dim)
grad_x = grad_sk[0, :, None] * self.grad_km1 + grad_sk[1, :, None] * self.grad_k
return grad_x
def log_sum_exp(x):
"""
Compute log(sum(exp(x), 1)) in a numerically stable way.
Assumes x is 2d.
"""
max_score, _ = x.max(1)
return max_score + torch.log(torch.sum(torch.exp(x - max_score[:, None]), 1))
def straight_forward_log_sum_exp(x,k):
combins=CNK(x.size()[1],k)
new_x=torch.zeros(x.size()[0],len(combins)).cuda()
for i in range(len(combins)):
for j in range(k):
new_x[:,i]+=x[:,combins[i][j]]
return log_sum_exp(new_x)
def hard_topk(x,k):
max_1, _ = x.topk(k, dim=1)
max_1 = max_1.sum(1)+2.0/40 * (1 + (40 * (max_1 - 0.5)).exp().sum(1)).log()
return max_1
class LogSumExp_new(Function):
def __init__(self, k):
self.k = k
def forward(self,x):
M=x.sum(1)
n=x.size()[1]
batch_size=x.size()[0]
tbl=torch.ones(batch_size,n,self.k+1).cuda()
tbl=tbl*(-100)
for m in range(batch_size):
tbl[m][0][0]=M[m]-x[m][0]
tbl[m][0][1]=M[m]
for i in range(n-1):
tbl[m][i+1][0]=tbl[m][i][0]-x[m][i+1]
for k in range(self.k):
temp=torch.max(tbl[m][i][k],tbl[m][i][k+1]-x[m][i+1])
#print(temp)
tbl[m][i+1][k+1]=temp+torch.log(torch.exp(tbl[m][i][k]-temp)+torch.exp(tbl[m][i][k+1]-x[m][i+1]-temp))
self.save_for_backward(x,tbl)
#print(tbl)
return tbl[:,n-1,self.k]
def backward(self,loss):
x,tbl = self.saved_tensors
n=x.size()[1]
batch_size=x.size()[0]
tbl_gradient=torch.zeros(batch_size,self.k,n).cuda()
for m in range(batch_size):
for i in range(n):
tbl_gradient[m][0][i]=1
for j in range(self.k-1):
tbl_gradient[m][j+1][i]=torch.exp(tbl[m][n-1][j+1])-x[m][i]*tbl_gradient[m][j][i]
gradient=tbl_gradient[:,self.k-1,:]
#print(gradient)
return gradient
| [
"torch.zeros",
"torch.max",
"torch.ones",
"torch.Tensor",
"torch.exp"
] | 1.5.0 | satya77/transformer_rankers | 0d2c20bd26041d887fb65102020a0b609ec967fc |
1.6 | # MIT License
#
# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import math
import torch.nn as nn
from torch import Tensor
class TransformerEmbedding(nn.Module):
r"""
Embedding layer. Similarly to other sequence transduction models, transformer use learned embeddings
to convert the input tokens and output tokens to vectors of dimension d_model.
In the embedding layers, transformer multiply those weights by sqrt(d_model)
Args:
num_embeddings (int): the number of embedding size
pad_id (int): identification of pad token
d_model (int): dimension of model
Inputs:
inputs (torch.FloatTensor): input of embedding layer
Returns:
outputs (torch.FloatTensor): output of embedding layer
"""
def __init__(self, num_embeddings: int, pad_id: int, d_model: int = 512) -> None:
super(TransformerEmbedding, self).__init__()
self.sqrt_dim = math.sqrt(d_model)
self.embedding = nn.Embedding(num_embeddings, d_model, padding_idx=pad_id)
def forward(self, inputs: Tensor) -> Tensor:
r"""
Forward propagate of embedding layer.
Inputs:
inputs (torch.FloatTensor): input of embedding layer
Returns:
outputs (torch.FloatTensor): output of embedding layer
"""
return self.embedding(inputs) * self.sqrt_dim
| [
"torch.nn.Embedding"
] | 1.6.0 | CanYouImagine/openspeech | 10307587f08615224df5a868fb5249c68c70b12d |
1.0 | """
This script trains sentence transformers with a triplet loss function.
As corpus, we use the wikipedia sections dataset that was describd by Dor et al., 2018, Learning Thematic Similarity Metric Using Triplet Networks.
See docs/pretrained-models/wikipedia-sections-modesl.md for further details.
You can get the dataset by running examples/datasets/get_data.py
"""
from sentence_transformers import SentenceTransformer, SentencesDataset, LoggingHandler, losses, models
from torch.utils.data import DataLoader
from sentence_transformers.readers import TripletReader
from sentence_transformers.evaluation import TripletEvaluator
from datetime import datetime
import csv
import logging
logging.basicConfig(format='%(asctime)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=logging.INFO,
handlers=[LoggingHandler()])
### Create a torch.DataLoader that passes training batch instances to our model
train_batch_size = 16
triplet_reader = TripletReader('datasets/wikipedia-sections-triplets', s1_col_idx=1, s2_col_idx=2, s3_col_idx=3, delimiter=',', quoting=csv.QUOTE_MINIMAL, has_header=True)
output_path = "output/bert-base-wikipedia-sections-mean-tokens-"+datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
num_epochs = 1
### Configure sentence transformers for training and train on the provided dataset
# Use BERT for mapping tokens to embeddings
word_embedding_model = models.BERT('bert-base-uncased')
# Apply mean pooling to get one fixed sized sentence vector
pooling_model = models.Pooling(word_embedding_model.get_word_embedding_dimension(),
pooling_mode_mean_tokens=True,
pooling_mode_cls_token=False,
pooling_mode_max_tokens=False)
model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
logging.info("Read Triplet train dataset")
train_data = SentencesDataset(examples=triplet_reader.get_examples('train.csv'), model=model)
train_dataloader = DataLoader(train_data, shuffle=True, batch_size=train_batch_size)
train_loss = losses.TripletLoss(model=model)
logging.info("Read Wikipedia Triplet dev dataset")
dev_data = SentencesDataset(examples=triplet_reader.get_examples('validation.csv', 1000), model=model)
dev_dataloader = DataLoader(dev_data, shuffle=False, batch_size=train_batch_size)
evaluator = TripletEvaluator(dev_dataloader)
warmup_steps = int(len(train_data)*num_epochs/train_batch_size*0.1) #10% of train data
# Train the model
model.fit(train_objectives=[(train_dataloader, train_loss)],
evaluator=evaluator,
epochs=num_epochs,
evaluation_steps=1000,
warmup_steps=warmup_steps,
output_path=output_path)
##############################################################################
#
# Load the stored model and evaluate its performance on STS benchmark dataset
#
##############################################################################
model = SentenceTransformer(output_path)
test_data = SentencesDataset(examples=triplet_reader.get_examples('test.csv'), model=model)
test_dataloader = DataLoader(test_data, shuffle=False, batch_size=train_batch_size)
evaluator = TripletEvaluator(test_dataloader)
model.evaluate(evaluator)
| [
"torch.utils.data.DataLoader"
] | 1.0.1 | fhaase2/sentence-transformers | 40f994e5e3ce3e2819833773117d788dfa0c7e7f |
1.0 | """
This examples trains BERT for the STSbenchmark from scratch. It generates sentence embeddings
that can be compared using cosine-similarity to measure the similarity.
"""
from torch.utils.data import DataLoader
import math
from sentence_transformers import SentenceTransformer, SentencesDataset, LoggingHandler, losses, models
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
from sentence_transformers.readers import STSDataReader
import logging
from datetime import datetime
#### Just some code to print debug information to stdout
logging.basicConfig(format='%(asctime)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=logging.INFO,
handlers=[LoggingHandler()])
#### /print debug information to stdout
# Read the dataset
train_batch_size = 16
num_epochs = 4
model_save_path = 'output/training_stsbenchmark_bert-'+datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
sts_reader = STSDataReader('datasets/stsbenchmark', normalize_scores=True)
# Use BERT for mapping tokens to embeddings
word_embedding_model = models.BERT('bert-base-uncased')
# Apply mean pooling to get one fixed sized sentence vector
pooling_model = models.Pooling(word_embedding_model.get_word_embedding_dimension(),
pooling_mode_mean_tokens=True,
pooling_mode_cls_token=False,
pooling_mode_max_tokens=False)
model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
# Convert the dataset to a DataLoader ready for training
logging.info("Read STSbenchmark train dataset")
train_data = SentencesDataset(sts_reader.get_examples('sts-train.csv'), model)
train_dataloader = DataLoader(train_data, shuffle=True, batch_size=train_batch_size)
train_loss = losses.CosineSimilarityLoss(model=model)
logging.info("Read STSbenchmark dev dataset")
dev_data = SentencesDataset(examples=sts_reader.get_examples('sts-dev.csv'), model=model)
dev_dataloader = DataLoader(dev_data, shuffle=False, batch_size=train_batch_size)
evaluator = EmbeddingSimilarityEvaluator(dev_dataloader)
# Configure the training. We skip evaluation in this example
warmup_steps = math.ceil(len(train_data)*num_epochs/train_batch_size*0.1) #10% of train data for warm-up
logging.info("Warmup-steps: {}".format(warmup_steps))
# Train the model
model.fit(train_objectives=[(train_dataloader, train_loss)],
evaluator=evaluator,
epochs=num_epochs,
evaluation_steps=1000,
warmup_steps=warmup_steps,
output_path=model_save_path)
##############################################################################
#
# Load the stored model and evaluate its performance on STS benchmark dataset
#
##############################################################################
model = SentenceTransformer(model_save_path)
test_data = SentencesDataset(examples=sts_reader.get_examples("sts-test.csv"), model=model)
test_dataloader = DataLoader(test_data, shuffle=False, batch_size=train_batch_size)
evaluator = EmbeddingSimilarityEvaluator(test_dataloader)
model.evaluate(evaluator)
| [
"torch.utils.data.DataLoader"
] | 1.0.1 | fhaase2/sentence-transformers | 40f994e5e3ce3e2819833773117d788dfa0c7e7f |
1.1 | import os
import cv2
import sys
import time
import scipy
import torch
import argparse
import numpy as np
import torch.optim
from formal_utils import *
from skimage.transform import resize
from PIL import ImageFilter, Image
use_cuda = torch.cuda.is_available()
# Fixing for deterministic results
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def numpy_to_torch(img, requires_grad=True):
if len(img.shape) < 3:
output = np.float32([img])
else:
output = np.transpose(img, (2, 0, 1))
output = torch.from_numpy(output)
if use_cuda:
output = output.to('cuda') # cuda()
output.unsqueeze_(0)
output.requires_grad = requires_grad
return output
def create_blurred_circular_mask(mask_shape, radius, center=None, sigma=10):
assert (len(mask_shape) == 2)
if center is None:
x_center = int(mask_shape[1] / float(2))
y_center = int(mask_shape[0] / float(2))
center = (x_center, y_center)
y, x = np.ogrid[-y_center:mask_shape[0] - y_center, -x_center:mask_shape[1] - x_center]
mask = x * x + y * y <= radius * radius
grid = np.zeros(mask_shape)
grid[mask] = 1
if sigma is not None:
grid = scipy.ndimage.filters.gaussian_filter(grid, sigma)
return grid
def create_blurred_circular_mask_pyramid(mask_shape, radii, sigma=10):
assert (len(mask_shape) == 2)
num_masks = len(radii)
masks = np.zeros((num_masks, 3, mask_shape[0], mask_shape[1]))
for i in range(num_masks):
masks[i, :, :, :] = create_blurred_circular_mask(mask_shape, radii[i], sigma=sigma)
return masks
def test_circular_masks(args, model, inpaint_model, o_img, upsample, gt_category, radii=np.arange(0, 175, 5),
thres=1e-2):
masks = create_blurred_circular_mask_pyramid((args.size, args.size), radii)
masks = 1 - masks
u_mask = upsample(torch.from_numpy(masks)).float().to('cuda')
num_masks = len(radii)
img = preprocess_image(np.float32(o_img) / 255, size)
gradient = np.zeros((1, 1000))
gradient[0][gt_category] = 1
scores = np.zeros(num_masks)
batch_masked_img = []
for i in range(num_masks):
if args.algo == 'MP':
null_img = preprocess_image(get_blurred_img(np.float32(o_img)), args.size)
masked_img = img.mul(u_mask[i]) + null_img.mul(1 - u_mask[i])
elif args.algo == 'MPG':
# Use inpainted image for optimization
temp_inpaint_img, _ = inpaint_model.generate_background(img, u_mask[i].unsqueeze(0))
if args.perturb_binary:
thresh = max(0.5, args.thresh * (torch.max(u_mask[i]).cpu().item() + torch.min(
u_mask[i]).cpu().item()))
u_mask[i].data = torch.where(u_mask[i].data > thresh,
torch.ones_like(u_mask[i].data),
torch.zeros_like(u_mask[i].data))
masked_img = img.mul(u_mask[i]) + temp_inpaint_img.mul(1 - u_mask[i])
else:
print('Invalid heatmap style!!')
exit(0)
outputs = torch.nn.Softmax(dim=1)(model(masked_img))
scores[i] = outputs[0, gt_category].cpu().detach()
batch_masked_img.append(masked_img)
img_output = torch.nn.Softmax(dim=1)(model(img)).cpu().detach()
orig_score = img_output[0, gt_category]
percs = (scores - scores[-1]) / float(orig_score - scores[-1])
try:
first_i = np.where(percs < thres)[0][0]
except:
first_i = -1
return radii[first_i]
def get_blurred_img(img, radius=10):
img = Image.fromarray(np.uint8(img))
blurred_img = img.filter(ImageFilter.GaussianBlur(radius))
return np.array(blurred_img) / float(255)
if __name__ == '__main__':
# Hyper parameters.
parser = argparse.ArgumentParser(description='Processing Meaningful Perturbation data')
parser.add_argument('--img_path', type=str,
default='/home/chirag/ILSVRC2012_img_val_bb/ILSVRC2012_img_val/',
help='filepath for the example image')
parser.add_argument('--algo', type=str,
default='MP', help='MP|MPG')
parser.add_argument('--mask_init', type=str,
default='random', help='random|circular')
parser.add_argument('--perturb_binary', type=int,
default=0,
help='flag for using binary mask just for perturbation')
parser.add_argument('--learning_rate', type=float,
default=0.1,
help='flag for using binary mask just for perturbation')
parser.add_argument('--size', type=int,
default=224, help='mask size to be optimized')
parser.add_argument('--true_class', type=int,
default=565,
help='target class of the image you want to explain')
parser.add_argument('--num_iter', type=int,
default=300, help='enter number of optimization iterations')
parser.add_argument('--jitter', type=int,
default=4, help='jitter')
parser.add_argument('--l1_coeff', type=float,
default=1e-4, help='L1 coefficient regularizer')
parser.add_argument('--tv_coeff', type=float,
default=1e-2, help='TV coefficient regularizer')
parser.add_argument('--thresh', type=float,
default=0.5, help='threshold for binarizing mask')
parser.add_argument('--dataset', type=str,
default='imagenet',
help='dataset to run on imagenet | places365')
parser.add_argument('--save_path', type=str,
default='./',
help='filepath for the example image')
parser.add_argument('--weight_file', type=str,
default='/home/chirag/gpu3_codes/generative_inpainting_FIDO/model_logs/release_imagenet_256/',
help='path for the weight files of the inpainter model for imagenet | places365')
args = parser.parse_args()
# PyTorch random seed
torch.manual_seed(0)
tv_beta = 3
learning_rate = args.learning_rate
max_iterations = args.num_iter
l1_coeff = args.l1_coeff
tv_coeff = args.tv_coeff
size = args.size
if args.dataset == 'imagenet':
model = load_model(arch_name='resnet50')
# load the class label
label_map = load_imagenet_label_map()
elif args.dataset == 'places365':
model = load_model_places365(arch_name='resnet50')
# load the class label
label_map = load_class_label()
else:
print('Invalid datasest!!')
exit(0)
model = torch.nn.DataParallel(model).to('cuda')
model.eval()
for p in model.parameters():
p.requires_grad = False
if args.algo == 'MPG':
# Tensorflow CA-inpainter from FIDO
sys.path.insert(0, './generative_inpainting')
from CAInpainter import CAInpainter
inpaint_model = CAInpainter(1, checkpoint_dir=args.weight_file)
if use_cuda:
upsample = torch.nn.UpsamplingNearest2d(size=(size, size)).to('cuda')
else:
upsample = torch.nn.UpsamplingNearest2d(size=(size, size))
init_time = time.time()
# Read image
original_img = cv2.imread(args.img_path, 1)
shape = original_img.shape
img = np.float32(original_img) / 255
gt_category = args.true_class
# define jitter function
jitter = args.jitter
# Path to the output folder
save_path = os.path.join(args.save_path, '{}'.format(args.algo), '{}'.format(args.dataset))
mkdir_p(os.path.join(save_path))
# Compute original output
org_softmax = torch.nn.Softmax(dim=1)(model(preprocess_image(img, size)))
eval0 = org_softmax.data[0, gt_category]
pill_transf = get_pil_transform()
o_img_path = os.path.join(save_path, 'real_{}_{:.3f}_image.jpg'
.format(label_map[gt_category].split(',')[0].split(' ')[0].split('-')[0], eval0))
cv2.imwrite(os.path.abspath(o_img_path), cv2.cvtColor(np.array(pill_transf(get_image(args.img_path))), cv2.COLOR_BGR2RGB))
# Convert to torch variables
img = preprocess_image(img, size + jitter)
if use_cuda:
img = img.to('cuda')
# Modified
if args.mask_init == 'random':
np.random.seed(seed=0)
mask = np.random.rand(28, 28)
mask = numpy_to_torch(mask)
elif args.mask_init == 'circular':
# CAFFE mask_init
if args.algo == 'MP':
mask_radius = test_circular_masks(args, model, model, original_img, upsample, gt_category)
elif args.algo == 'MPG':
mask_radius = test_circular_masks(args, model, inpaint_model, original_img, upsample, gt_category)
mask = 1 - create_blurred_circular_mask((size, size), mask_radius, center=None, sigma=10)
mask = resize(mask.astype(float), (size, size))
mask = numpy_to_torch(mask)
else:
print('Invalid mask init!!')
exit(0)
if args.algo == 'MP':
null_img = preprocess_image(get_blurred_img(np.float32(original_img), radius=10), size + jitter)
optimizer = torch.optim.Adam([mask], lr=learning_rate)
for i in range(max_iterations):
if jitter != 0:
j1 = np.random.randint(jitter)
j2 = np.random.randint(jitter)
else:
j1 = 0
j2 = 0
upsampled_mask = upsample(mask)
# The single channel mask is used with an RGB image,
# so the mask is duplicated to have 3 channel,
upsampled_mask = upsampled_mask.expand(1, 3, upsampled_mask.size(2), upsampled_mask.size(3))
if args.algo == 'MPG':
# Tensorflow CA-inpainter
inpaint_img, _ = inpaint_model.generate_background(img[:, :, j1:(size + j1), j2:(size + j2)],
upsampled_mask)
if args.perturb_binary:
thresh = max(0.5, args.thresh * (torch.max(upsampled_mask).cpu().item() + torch.min(
upsampled_mask).cpu().item()))
upsampled_mask.data = torch.where(upsampled_mask.data > thresh,
torch.ones_like(upsampled_mask.data),
torch.zeros_like(upsampled_mask.data))
perturbated_input = img[:, :, j1:(size + j1), j2:(size + j2)].mul(upsampled_mask) + \
inpaint_img.mul(1 - upsampled_mask)
else:
perturbated_input = img[:, :, j1:(size + j1), j2:(size + j2)].mul(upsampled_mask) + \
inpaint_img.mul(1 - upsampled_mask)
elif args.algo == 'MP':
if args.perturb_binary:
thresh = max(0.5, args.thresh * (torch.max(upsampled_mask).cpu().item() + torch.min(
upsampled_mask).cpu().item()))
upsampled_mask.data = torch.where(upsampled_mask.data > thresh,
torch.ones_like(upsampled_mask.data),
torch.zeros_like(upsampled_mask.data))
perturbated_input = img[:, :, j1:(size + j1), j2:(size + j2)].mul(upsampled_mask) + \
null_img[:, :, j1:(size + j1), j2:(size + j2)].mul(
1 - upsampled_mask)
else:
perturbated_input = img[:, :, j1:(size + j1), j2:(size + j2)].mul(upsampled_mask) + \
null_img[:, :, j1:(size + j1), j2:(size + j2)].mul(
1 - upsampled_mask)
else:
print('Invalid heatmap style!!')
exit(0)
optimizer.zero_grad()
outputs = torch.nn.Softmax(dim=1)(model(perturbated_input))
loss = l1_coeff * torch.sum(torch.abs(1 - mask)) + tv_coeff * tv_norm(mask, tv_beta) + \
outputs[0, gt_category]
loss.backward()
optimizer.step()
mask.data.clamp_(0, 1)
# Create save_path for storing intermediate steps
path = os.path.join(save_path, 'intermediate_steps')
mkdir_p(path)
# Save intermediate steps
amax, aind = outputs.max(dim=1)
gt_val = outputs.data[:, gt_category]
temp_intermediate = np.uint8(
255 * unnormalize(
np.moveaxis(perturbated_input[0, :].cpu().detach().numpy().transpose(), 0, 1)))
cv2.imwrite(
os.path.abspath(os.path.join(path, 'intermediate_{:05d}_{}_{:.3f}_{}_{:.3f}.jpg'
.format(i, label_map[aind.item()].split(',')[0].split(' ')[0].split('-')[0],
amax.item(), label_map[gt_category].split(',')[0].split(' ')[0].split('-')[0],
gt_val.item()))), cv2.cvtColor(temp_intermediate, cv2.COLOR_BGR2RGB))
np.save(os.path.abspath(os.path.join(save_path, "mask_{}.npy".format(args.algo))),
1 - mask.cpu().detach().numpy()[0, 0, :])
# print('Time taken: {:.3f}'.format(time.time() - init_time))
| [
"torch.cuda.is_available",
"torch.nn.DataParallel",
"torch.nn.Softmax",
"torch.manual_seed",
"torch.abs",
"torch.zeros_like",
"torch.min",
"torch.max",
"torch.optim.Adam",
"torch.from_numpy",
"torch.ones_like",
"torch.nn.UpsamplingNearest2d"
] | 1.1.0 | anguyen8/generative-attribution-methods | b533ac799d14e66f9da9123266b83f3c942653d0 |
1.4 | # encoding: utf-8
"""
@author : zhirui zhou
@contact: [email protected]
@time : 2020/4/1 17:03
"""
import os
import torch
from torch.utils.tensorboard import SummaryWriter
from deepseries.log import get_logger
import numpy as np
import time
import copy
class EarlyStopping(Exception):
pass
logger = get_logger(__name__)
class Learner:
def __init__(self, model, optimizer, root_dir, verbose=32, lr_scheduler=None):
self.model = model
self.optimizer = optimizer
self.lr_scheduler = lr_scheduler
self.root_dir = root_dir
self.log_dir = os.path.join(root_dir, 'logs')
self.model_dir = os.path.join(root_dir, 'checkpoints')
for i in [self.root_dir, self.log_dir, self.model_dir]:
if not os.path.exists(i):
os.mkdir(i)
self.epochs = 0
self.best_epoch = -1
self.best_loss = np.inf
self.global_steps = 0
self.use_patient = 0
self.losses = []
self.verbose = verbose
def finish_info(self, message=None):
if message is not None:
logger.info(message)
logger.info(f"training finished, best epoch {self.best_epoch}, best valid loss {self.best_loss:.4f}")
def eval_cycle(self, data_ld):
self.model.eval()
with torch.no_grad():
valid_loss = 0.
for x, y in data_ld:
loss = self.model.batch_loss(x, y).item()
valid_loss += loss / len(data_ld)
return valid_loss
def fit(self, max_epochs, train_dl, valid_dl, early_stopping=True, patient=10, start_save=-1):
with SummaryWriter(self.log_dir) as writer:
# writer.add_graph(self.model)
logger.info(f"start training >>>>>>>>>>> "
f"see log: tensorboard --logdir {self.log_dir}")
start_epoch = copy.copy(self.epochs)
try:
for i in range(max_epochs):
self.epochs += 1
time_start = time.time()
self.model.train()
train_loss = 0
for j, (x, y) in enumerate(train_dl):
self.optimizer.zero_grad()
loss = self.model.batch_loss(x, y)
loss.backward()
self.optimizer.step()
loss = loss.item()
writer.add_scalar("Loss/train", loss, self.global_steps)
self.global_steps += 1
train_loss += loss
if self.verbose > 0 and self.global_steps % self.verbose == 0:
logger.info(f"epoch {self.epochs} / {max_epochs + start_epoch}, "
f"batch {j / len(train_dl) * 100:3.0f}%, "
f"train loss {train_loss / (j + 1):.4f}")
valid_loss = self.eval_cycle(valid_dl)
writer.add_scalar("Loss/valid", valid_loss, self.global_steps)
epoch_use_time = (time.time() - time_start) / 60
logger.info(f"epoch {self.epochs} / {max_epochs + start_epoch}, batch 100%, "
f"train loss {train_loss / len(train_dl):.4f}, valid loss {valid_loss:.4f}, "
f"cost {epoch_use_time:.1f} min")
self.losses.append(valid_loss)
writer.add_scalar('lr', self.optimizer.param_groups[0]['lr'], self.global_steps)
if self.epochs >= start_save:
self.save()
if early_stopping:
if self.epochs > 1:
if valid_loss > self.best_loss:
self.use_patient += 1
else:
self.use_patient = 0
if self.use_patient >= patient:
raise EarlyStopping
if valid_loss <= self.best_loss:
self.best_loss = valid_loss
self.best_epoch = self.epochs
if self.lr_scheduler is not None:
self.lr_scheduler.step()
except KeyboardInterrupt:
self.finish_info("KeyboardInterrupt")
return
except EarlyStopping:
self.finish_info("EarlyStopping")
return
self.finish_info()
def load(self, epoch, checkpoint_dir=None):
if checkpoint_dir is None:
checkpoint_dir = self.model_dir
checkpoint = torch.load(os.path.join(checkpoint_dir, f"model-epoch-{epoch}.pkl"))
self.model.load_state_dict(checkpoint['model'])
self.optimizer.load_state_dict(checkpoint['optimizer'])
self.epochs = checkpoint['epochs']
self.lr_scheduler = checkpoint['lr_scheduler']
self.epochs = epoch
self.losses = checkpoint['losses']
self.best_loss = checkpoint['best_loss']
self.best_epoch = checkpoint['best_epoch']
self.global_steps = checkpoint['global_steps']
def save(self):
checkpoint = {
"model": self.model.state_dict(),
"optimizer": self.optimizer.state_dict(),
"epochs": self.epochs,
'lr_scheduler': self.lr_scheduler,
'losses': self.losses,
'best_loss': self.best_loss,
'best_epoch': self.best_epoch,
'use_patient': self.use_patient,
'global_steps': self.global_steps,
}
name = f"model-epoch-{self.epochs}.pkl"
torch.save(checkpoint, os.path.join(self.model_dir, name))
| [
"torch.no_grad",
"torch.utils.tensorboard.SummaryWriter"
] | 1.4 | EvilPsyCHo/Deep-Time-Series-Prediction | f6a6da060bb3f7d07f2a61967ee6007e9821064e |
1.3 | from PIL import Image
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.nn.functional as F
# efficient gradient descents
import torch.optim as optim
# Prevent browser caching of generated image
import time
#to deep copy the models
import copy
import os
# Function to check if uploaded file has an acceptable extension
def is_file_allowed(filename):
if not "." in filename:
return False
suffix = filename.rsplit('.', 1)[1]
if suffix.lower() in ['jpeg', 'jpg', 'png']:
return True
else:
return False
# Function to add time component to an image name
def add_time(filename):
img_name = filename.rsplit('.')[0]
img_suffix = filename.rsplit('.')[1]
filename = str(time.time()).replace('.','_') +'.'+img_suffix
return filename
# Convert image to torch tensor
def image_loader(img_path, loader, device):
img = Image.open(img_path)
# If PNG file, get rid of 4th channel (alpha) by converting image to JPG
if img_path[-3:].lower() == 'png':
img = img.convert('RGB')
# Insert 1 in shape of the tensor at axis 0 (batch size)
# Extra dimension is required to fit the network's input dimensions
img = loader(img).unsqueeze(0)
return img.to(device, torch.float)
def imshow(tensor, loader, unloader, folder='', title=None, output=False):
# Clone the tensor so it's not changed in-place
image = tensor.cpu().clone()
# Removed the extra dimension added previously
image = image.squeeze(0)
image = unloader(image)
# Now we have a normal image, let's display it
plt.imshow(image)
if title is not None:
plt.title(title)
if output:
output_name = 'result' + '?' + str(time.time()) + '.png'
plt.savefig(
folder + '/' + output_name,
bbox_inches=None,
pad_inches=0.)
plt.close()
return output_name
class ContentLoss(nn.Module):
def __init__(self, target):
# Sub-class this class
super(ContentLoss, self).__init__()
# we 'detach' the target content from the tree used
# to dynamically compute the gradient: this is a stated value,
# not a variable. Otherwise the forward method of the criterion
# will throw an error.
self.target = target.detach()
def forward(self, input):
self.loss = F.mse_loss(input, self.target)
return input
def gram_matrix(input):
# a is batch size, equal to 1
# b is the number of feature maps
# (c,d) are dimensions of feature map
a, b, c, d = input.size()
# resize matrix to [b,(c*d)] form
features = input.view(a * b, c * d)
# Compute the Gram-Matrix and normalize it
G = torch.mm(features, features.t())
return G.div(a * b * c * d)
class StyleLoss(nn.Module):
def __init__(self, target_feature):
# Sub-class this class
super(StyleLoss, self).__init__()
self.target = gram_matrix(target_feature).detach()
def forward(self, input):
G = gram_matrix(input)
self.loss = F.mse_loss(G, self.target)
return input
class Normalization(nn.Module):
def __init__(self, mean, std):
# Sub-class this class
super(Normalization, self).__init__()
# Use .view to change the shape of the mean and std
# tensors. They take the form [num_channels x 1 x 1]
# and become compatible with the image tensors
self.mean = torch.tensor(mean).view(-1, 1, 1)
self.std = torch.tensor(std).view(-1, 1, 1)
def forward(self, img):
# normalize the image with VGG stats
return (img - self.mean) / self.std
# Calculate content loss at conv level 4
content_layers_default = ['conv_4']
# Calculate style loss at each level
style_layers_default = [
'conv_1',
'conv_2',
'conv_3',
'conv_4',
'conv_5'
]
def get_style_model_and_losses(cnn,
normalization_mean,
normalization_std,
device,
style_img,
content_img,
content_layers=content_layers_default,
style_layers=style_layers_default
):
cnn = copy.deepcopy(cnn)
normalization = Normalization(normalization_mean, normalization_std).to(device)
content_losses = []
style_losses = []
model = nn.Sequential(normalization)
i = 0
for layer in cnn.children():
if isinstance(layer, nn.Conv2d):
# If we see a conv layer, increment i
i += 1
name = f'conv_{i}'
elif isinstance(layer, nn.ReLU):
name = f'relu_{i}'
# Replace in-place version with out-of-place as it
# doesn't work too well with style/content losses
layer = nn.ReLU(inplace=False)
elif isinstance(layer, nn.MaxPool2d):
name = f'pool_{i}'
elif isinstance(layer, nn.BatchNorm2d):
name = f'bn_{i}'
else:
raise RuntimeError(f'Unrecognized layer: {layer.__class__.__name__}')
model.add_module(name, layer)
if name in content_layers:
# add content loss
target = model(content_img).detach()
content_loss = ContentLoss(target)
model.add_module(f'content_loss_{i}', content_loss)
content_losses.append(content_loss)
if name in style_layers:
# add style loss
target_feature = model(style_img).detach()
style_loss = StyleLoss(target_feature)
model.add_module(f'style_loss_{i}', style_loss)
style_losses.append(style_loss)
# We then trim off the layers after the last content and style losses
for i in range(len(model) - 1, -1, -1):
# As soon as we encounter the last style/content layer, break the loop
if isinstance(model[i], ContentLoss) or isinstance(model[i], StyleLoss):
break
# Grab the model until the cut-off point
model = model[:(i + 1)]
return model, style_losses, content_losses
def get_input_optimizer(input_img):
optimizer = optim.LBFGS([input_img.requires_grad_()])
return optimizer
def run_style_transfer(
cnn,
normalization_mean,
normalization_std,
content_img,
style_img,
input_img,
device,
loader,
unloader,
folder,
num_steps=300, # Kept it low for quick debugging
style_weight=1000000,
content_weight=1):
"""Run the style transfer."""
print('Building the style transfer model..')
model, style_losses, content_losses = get_style_model_and_losses(cnn,
normalization_mean, normalization_std, device, style_img, content_img)
optimizer = get_input_optimizer(input_img)
print('Optimizing...')
print()
run = [0]
while run[0] <= num_steps:
def closure():
# Clamp the image tensor to (0,1) range
input_img.data.clamp_(0, 1)
optimizer.zero_grad()
model(input_img)
style_score = 0
content_score = 0
for sl in style_losses:
style_score += sl.loss
for cl in content_losses:
content_score += cl.loss
style_score *= style_weight
content_score *= content_weight
loss = style_score + content_score
loss.backward()
run[0] += 1
if run[0] % 50 == 0:
print(f'Run {run[0]}:')
print(f'Style Loss : {style_score.item():4f}')
print(f'Content Loss: {content_score.item():4f}')
print()
plt.figure(figsize=(8,8))
title = f'Run {run[0]} Image'
imshow(input_img, loader, unloader, folder,
title=title, output=True)
return style_score + content_score
optimizer.step(closure)
# Clamp the data one last time
input_img.data.clamp_(0, 1)
return input_img | [
"torch.nn.Sequential",
"torch.nn.functional.mse_loss",
"torch.nn.ReLU",
"torch.tensor"
] | 1.3.1 | sarthakbatragatech/style_transfer | a29a43b2b161784a51dc81c75c4320abf683e67b |
1.8 | import torch
torch.backends.cudnn.benchmark = True
import torch.nn.functional as F
from torch.distributions import Normal, Categorical
import numpy as np
from .reinforce import REINFORCE
class PPO(REINFORCE):
"""Proximal Policy Optimization (PPO) agent.
Args:
batch_size (int): the number of samples in the one batch.
n_step (int): The number of steps to run for each environment per update.
n_epoch (int): Number of epoch when optimizing the surrogate.
_lambda (float): Factor for trade-off of bias vs variance for Generalized Advantage Estimator.
epsilon_clip (float): probability ratio clipping interval.
vf_coef (float): Value function coefficient for the loss calculation.
ent_coef (float): Entropy coefficient for the loss calculation.
clip_grad_norm (float): gradient clipping threshold.
num_workers: the number of agents in distributed learning.
"""
def __init__(
self,
network="discrete_policy_value",
batch_size=32,
n_step=128,
n_epoch=3,
_lambda=0.95,
epsilon_clip=0.1,
vf_coef=1.0,
ent_coef=0.01,
clip_grad_norm=1.0,
num_workers=1,
**kwargs,
):
super(PPO, self).__init__(network=network, **kwargs)
self.batch_size = batch_size
self.n_step = n_step
self.n_epoch = n_epoch
self._lambda = _lambda
self.epsilon_clip = epsilon_clip
self.vf_coef = vf_coef
self.ent_coef = ent_coef
self.clip_grad_norm = clip_grad_norm
self.num_workers = num_workers
self.time_t = 0
self.learn_stamp = 0
@torch.no_grad()
def act(self, state, training=True):
self.network.train(training)
if self.action_type == "continuous":
mu, std, _ = self.network(self.as_tensor(state))
z = torch.normal(mu, std) if training else mu
action = torch.tanh(z)
else:
pi, _ = self.network(self.as_tensor(state))
action = (
torch.multinomial(pi, 1)
if training
else torch.argmax(pi, dim=-1, keepdim=True)
)
return {"action": action.cpu().numpy()}
def learn(self):
transitions = self.memory.sample()
for key in transitions.keys():
transitions[key] = self.as_tensor(transitions[key])
state = transitions["state"]
action = transitions["action"]
reward = transitions["reward"]
next_state = transitions["next_state"]
done = transitions["done"]
# set prob_a_old and advantage
with torch.no_grad():
if self.action_type == "continuous":
mu, std, value = self.network(state)
m = Normal(mu, std)
z = torch.atanh(torch.clamp(action, -1 + 1e-7, 1 - 1e-7))
prob = m.log_prob(z).exp()
else:
pi, value = self.network(state)
prob = pi.gather(1, action.long())
prob_old = prob
next_value = self.network(next_state)[-1]
delta = reward + (1 - done) * self.gamma * next_value - value
adv = delta.clone()
adv, done = adv.view(-1, self.n_step), done.view(-1, self.n_step)
for t in reversed(range(self.n_step - 1)):
adv[:, t] += (
(1 - done[:, t]) * self.gamma * self._lambda * adv[:, t + 1]
)
if self.use_standardization:
adv = (adv - adv.mean(dim=1, keepdim=True)) / (
adv.std(dim=1, keepdim=True) + 1e-7
)
adv = adv.view(-1, 1)
ret = adv + value
# start train iteration
actor_losses, critic_losses, entropy_losses, ratios, probs = [], [], [], [], []
idxs = np.arange(len(reward))
for _ in range(self.n_epoch):
np.random.shuffle(idxs)
for offset in range(0, len(reward), self.batch_size):
idx = idxs[offset : offset + self.batch_size]
_state, _action, _ret, _next_state, _adv, _prob_old = map(
lambda x: [_x[idx] for _x in x] if isinstance(x, list) else x[idx],
[state, action, ret, next_state, adv, prob_old],
)
if self.action_type == "continuous":
mu, std, value = self.network(_state)
m = Normal(mu, std)
z = torch.atanh(torch.clamp(_action, -1 + 1e-7, 1 - 1e-7))
prob = m.log_prob(z).exp()
else:
pi, value = self.network(_state)
m = Categorical(pi)
prob = pi.gather(1, _action.long())
ratio = (prob / (_prob_old + 1e-7)).prod(1, keepdim=True)
surr1 = ratio * _adv
surr2 = (
torch.clamp(
ratio, min=1 - self.epsilon_clip, max=1 + self.epsilon_clip
)
* _adv
)
actor_loss = -torch.min(surr1, surr2).mean()
critic_loss = F.mse_loss(value, _ret).mean()
entropy_loss = -m.entropy().mean()
loss = (
actor_loss
+ self.vf_coef * critic_loss
+ self.ent_coef * entropy_loss
)
self.optimizer.zero_grad(set_to_none=True)
loss.backward()
torch.nn.utils.clip_grad_norm_(
self.network.parameters(), self.clip_grad_norm
)
self.optimizer.step()
probs.append(prob.min().item())
ratios.append(ratio.max().item())
actor_losses.append(actor_loss.item())
critic_losses.append(critic_loss.item())
entropy_losses.append(entropy_loss.item())
result = {
"actor_loss": np.mean(actor_losses),
"critic_loss": np.mean(critic_losses),
"entropy_loss": np.mean(entropy_losses),
"max_ratio": max(ratios),
"min_prob": min(probs),
"min_prob_old": prob_old.min().item(),
}
return result
def process(self, transitions, step):
result = {}
# Process per step
self.memory.store(transitions)
delta_t = step - self.time_t
self.time_t = step
self.learn_stamp += delta_t
# Process per epi
if self.learn_stamp >= self.n_step:
result = self.learn()
self.learn_stamp = 0
return result
| [
"torch.distributions.Categorical",
"torch.min",
"torch.no_grad",
"torch.distributions.Normal",
"torch.normal",
"torch.clamp",
"torch.multinomial",
"torch.nn.functional.mse_loss",
"torch.tanh",
"torch.argmax"
] | 1.8.1 | zenoengine/JORLDY | 1eb867e52a03e0282a55fa612cbc5b5de701ffe7 |
1.2 | import numpy as np
import torch
def format_attention(attention):
squeezed = []
for layer_attention in attention:
# 1 x num_heads x seq_len x seq_len
if len(layer_attention.shape) != 4:
raise ValueError("The attention tensor does not have the correct number of dimensions. Make sure you set "
"output_attentions=True when initializing your model.")
squeezed.append(layer_attention.squeeze(0))
# num_layers x num_heads x seq_len x seq_len
return torch.stack(squeezed)
def find_sub_list(sl,l):
results=[]
sll=len(sl)
for ind in (i for i,e in enumerate(l) if e==sl[0]):
if l[ind:ind+sll]==sl:
results.append((ind,ind+sll-1))
return results
def MAS(model, tokenizer, pronoun, candidate_a, candidate_b, sentence_a, sentence_b=None, layer=None, head=None):
"""
Computes the Maximum Attention Score (MAS) given a sentence, a pronoun and candidates for substitution.
Parameters
----------
model : transformers.BertModel
BERT model from BERT visualization that provides access to attention
tokenizer: transformers.tokenization.BertTokenizer
BERT tolenizer
pronoun: string
pronoun to be replaced by a candidate
candidate_a: string
First pronoun replacement candidate
candidate_b: string
Second pronoun replacement candidate
sentence_a: string
First, or only sentence
sentence_b: string (optional)
Optional, second sentence
layer: None, int
If none, MAS will be computed over all layers, otherwise a specific layer
head: None, int
If none, MAS will be compputer over all attention heads, otherwise only at specific head
Returns
-------
activity : list
List of scores [score for candidate_a, score for candidate_b]
"""
inputs = tokenizer.encode_plus(sentence_a, sentence_b, return_tensors='pt', add_special_tokens=True)
input_ids = inputs['input_ids']
token_type_ids = inputs['token_type_ids']
candidate_a_ids = tokenizer.encode(candidate_a)[1:-1]
candidate_b_ids = tokenizer.encode(candidate_b)[1:-1]
pronoun_ids = tokenizer.encode(pronoun)[1:-1]
if next(model.parameters()).is_cuda:
attention = model(input_ids.cuda(), token_type_ids=token_type_ids.cuda())[-1]
else:
attention = model(input_ids, token_type_ids=token_type_ids)[-1]
attn = format_attention(attention)
if next(model.parameters()).is_cuda:
A = torch.zeros((attn.shape[0], attn.shape[1])).cuda()
B = torch.zeros((attn.shape[0], attn.shape[1])).cuda()
else:
A = torch.zeros((attn.shape[0], attn.shape[1]))
B = torch.zeros((attn.shape[0], attn.shape[1]))
if not layer is None:
assert layer<attn.shape[0], "Maximum layer number "+str(attn.shape[0])+" exceeded"
layer_slice = slice(layer,layer+1,1)
else:
layer_slice = slice(None,None,None)
if not head is None:
assert head<attn.shape[1], "Maximum head number "+str(attn.shape[1])+" exceeded"
head_slice = slice(head,head+1,1)
else:
head_slice = slice(None,None,None)
assert len(find_sub_list(pronoun_ids, input_ids[0].tolist())) > 0, "pronoun not found in sentence"
assert len(find_sub_list(candidate_a_ids, input_ids[0].tolist())) > 0, "candidate_a not found in sentence"
assert len(find_sub_list(candidate_b_ids, input_ids[0].tolist())) > 0, "candidate_b not found in sentence"
for _,src in enumerate(find_sub_list(pronoun_ids, input_ids[0].tolist())):
for _, tar_a in enumerate(find_sub_list(candidate_a_ids, input_ids[0].tolist())):
A=A+attn[layer_slice,head_slice, slice(tar_a[0],tar_a[1]+1,1), slice(src[0],src[0]+1,1)].mean(axis=2).mean(axis=2)
for _, tar_b in enumerate(find_sub_list(candidate_b_ids, input_ids[0].tolist())):
B=B+attn[layer_slice,head_slice, slice(tar_b[0],tar_b[1]+1,1),slice(src[0],src[0]+1,1)].mean(axis=2).mean(axis=2)
score = sum((A >= B).flatten()).item()/(A.shape[0]*A.shape[1])
return [score, 1.0-score] | [
"torch.zeros",
"torch.stack"
] | 1.2.0 | moinnabi/acl2019-commonsense | 0dab6ac8cb58ba724a4b091172656b922dafc740 |
1.2 | '''
Adversarial Attacks on Neural Networks for Graph Data. ICML 2018.
https://arxiv.org/abs/1806.02371
Author's Implementation
https://github.com/Hanjun-Dai/graph_adversarial_attack
This part of code is adopted from the author's implementation (Copyright (c) 2018 Dai, Hanjun and Li, Hui and Tian, Tian and Huang, Xin and Wang, Lin and Zhu, Jun and Song, Le) but modified
to be integrated into the repository.
'''
import os
import sys
import numpy as np
import torch
import networkx as nx
import random
from torch.nn.parameter import Parameter
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from tqdm import tqdm
from deeprobust.graph.rl.env import GraphNormTool
class QNetNode(nn.Module):
def __init__(self, node_features, node_labels, list_action_space, bilin_q=1, embed_dim=64, mlp_hidden=64, max_lv=1, gm='mean_field', device='cpu'):
'''
bilin_q: bilinear q or not
mlp_hidden: mlp hidden layer size
mav_lv: max rounds of message passing
'''
super(QNetNode, self).__init__()
self.node_features = node_features
self.node_labels = node_labels
self.list_action_space = list_action_space
self.total_nodes = len(list_action_space)
self.bilin_q = bilin_q
self.embed_dim = embed_dim
self.mlp_hidden = mlp_hidden
self.max_lv = max_lv
self.gm = gm
if bilin_q:
last_wout = embed_dim
else:
last_wout = 1
self.bias_target = Parameter(torch.Tensor(1, embed_dim))
if mlp_hidden:
self.linear_1 = nn.Linear(embed_dim * 2, mlp_hidden)
self.linear_out = nn.Linear(mlp_hidden, last_wout)
else:
self.linear_out = nn.Linear(embed_dim * 2, last_wout)
self.w_n2l = Parameter(torch.Tensor(node_features.size()[1], embed_dim))
self.bias_n2l = Parameter(torch.Tensor(embed_dim))
self.bias_picked = Parameter(torch.Tensor(1, embed_dim))
self.conv_params = nn.Linear(embed_dim, embed_dim)
self.norm_tool = GraphNormTool(normalize=True, gm=self.gm, device=device)
weights_init(self)
def make_spmat(self, n_rows, n_cols, row_idx, col_idx):
idxes = torch.LongTensor([[row_idx], [col_idx]])
values = torch.ones(1)
sp = torch.sparse.FloatTensor(idxes, values, torch.Size([n_rows, n_cols]))
if next(self.parameters()).is_cuda:
sp = sp.cuda()
return sp
def forward(self, time_t, states, actions, greedy_acts=False, is_inference=False):
if self.node_features.data.is_sparse:
input_node_linear = torch.spmm(self.node_features, self.w_n2l)
else:
input_node_linear = torch.mm(self.node_features, self.w_n2l)
input_node_linear += self.bias_n2l
# TODO the number of target nodes is batch_size, it actually parallizes
target_nodes, batch_graph, picked_nodes = zip(*states)
list_pred = []
prefix_sum = []
for i in range(len(batch_graph)):
region = self.list_action_space[target_nodes[i]]
node_embed = input_node_linear.clone()
if picked_nodes is not None and picked_nodes[i] is not None:
with torch.set_grad_enabled(mode=not is_inference):
picked_sp = self.make_spmat(self.total_nodes, 1, picked_nodes[i], 0)
node_embed += torch.spmm(picked_sp, self.bias_picked)
region = self.list_action_space[picked_nodes[i]]
if not self.bilin_q:
with torch.set_grad_enabled(mode=not is_inference):
# with torch.no_grad():
target_sp = self.make_spmat(self.total_nodes, 1, target_nodes[i], 0)
node_embed += torch.spmm(target_sp, self.bias_target)
with torch.set_grad_enabled(mode=not is_inference):
device = self.node_features.device
adj = self.norm_tool.norm_extra( batch_graph[i].get_extra_adj(device))
lv = 0
input_message = node_embed
node_embed = F.relu(input_message)
while lv < self.max_lv:
n2npool = torch.spmm(adj, node_embed)
node_linear = self.conv_params( n2npool )
merged_linear = node_linear + input_message
node_embed = F.relu(merged_linear)
lv += 1
target_embed = node_embed[target_nodes[i], :].view(-1, 1)
if region is not None:
node_embed = node_embed[region]
graph_embed = torch.mean(node_embed, dim=0, keepdim=True)
if actions is None:
graph_embed = graph_embed.repeat(node_embed.size()[0], 1)
else:
if region is not None:
act_idx = region.index(actions[i])
else:
act_idx = actions[i]
node_embed = node_embed[act_idx, :].view(1, -1)
embed_s_a = torch.cat((node_embed, graph_embed), dim=1)
if self.mlp_hidden:
embed_s_a = F.relu( self.linear_1(embed_s_a) )
raw_pred = self.linear_out(embed_s_a)
if self.bilin_q:
raw_pred = torch.mm(raw_pred, target_embed)
list_pred.append(raw_pred)
if greedy_acts:
actions, _ = node_greedy_actions(target_nodes, picked_nodes, list_pred, self)
return actions, list_pred
class NStepQNetNode(nn.Module):
def __init__(self, num_steps, node_features, node_labels, list_action_space, bilin_q=1, embed_dim=64, mlp_hidden=64, max_lv=1, gm='mean_field', device='cpu'):
super(NStepQNetNode, self).__init__()
self.node_features = node_features
self.node_labels = node_labels
self.list_action_space = list_action_space
self.total_nodes = len(list_action_space)
list_mod = []
for i in range(0, num_steps):
# list_mod.append(QNetNode(node_features, node_labels, list_action_space))
list_mod.append(QNetNode(node_features, node_labels, list_action_space, bilin_q, embed_dim, mlp_hidden, max_lv, gm=gm, device=device))
self.list_mod = nn.ModuleList(list_mod)
self.num_steps = num_steps
def forward(self, time_t, states, actions, greedy_acts = False, is_inference=False):
assert time_t >= 0 and time_t < self.num_steps
return self.list_mod[time_t](time_t, states, actions, greedy_acts, is_inference)
def glorot_uniform(t):
if len(t.size()) == 2:
fan_in, fan_out = t.size()
elif len(t.size()) == 3:
# out_ch, in_ch, kernel for Conv 1
fan_in = t.size()[1] * t.size()[2]
fan_out = t.size()[0] * t.size()[2]
else:
fan_in = np.prod(t.size())
fan_out = np.prod(t.size())
limit = np.sqrt(6.0 / (fan_in + fan_out))
t.uniform_(-limit, limit)
def _param_init(m):
if isinstance(m, Parameter):
glorot_uniform(m.data)
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
glorot_uniform(m.weight.data)
def weights_init(m):
for p in m.modules():
if isinstance(p, nn.ParameterList):
for pp in p:
_param_init(pp)
else:
_param_init(p)
for name, p in m.named_parameters():
if not '.' in name: # top-level parameters
_param_init(p)
def node_greedy_actions(target_nodes, picked_nodes, list_q, net):
assert len(target_nodes) == len(list_q)
actions = []
values = []
for i in range(len(target_nodes)):
region = net.list_action_space[target_nodes[i]]
if picked_nodes is not None and picked_nodes[i] is not None:
region = net.list_action_space[picked_nodes[i]]
if region is None:
assert list_q[i].size()[0] == net.total_nodes
else:
assert len(region) == list_q[i].size()[0]
val, act = torch.max(list_q[i], dim=0)
values.append(val)
if region is not None:
act = region[act.data.cpu().numpy()[0]]
# act = Variable(torch.LongTensor([act]))
act = torch.LongTensor([act])
actions.append(act)
else:
actions.append(act)
return torch.cat(actions, dim=0).data, torch.cat(values, dim=0).data
| [
"torch.nn.Linear",
"torch.Size",
"torch.cat",
"torch.nn.ModuleList",
"torch.max",
"torch.ones",
"torch.mm",
"torch.nn.functional.relu",
"torch.mean",
"torch.LongTensor",
"torch.spmm",
"torch.Tensor",
"torch.set_grad_enabled"
] | 1.2.0 | shixiongjing/DeepRobust | 276a7048aded2cf3a190d3851ffd4587b7d1dd49 |
1.2 | """
Our attacking based on spectral of graph Laplacian
"""
import numpy as np
import scipy.sparse as sp
import torch
from torch import optim
from torch.nn import functional as F
from torch.nn.parameter import Parameter
from tqdm import tqdm
from deeprobust.graph import utils
from deeprobust.graph.global_attack import BaseAttack
class SpectralAttack(BaseAttack):
"""
Spectral attack for graph data.
Parameters
"""
def __init__(self,
model=None,
nnodes=None,
loss_type='CE',
feature_shape=None,
attack_structure=True,
attack_features=False,
regularization_weight=5.0,
device='cpu'):
super(SpectralAttack, self).__init__(model,
nnodes,
attack_structure,
attack_features,
device)
assert attack_structure or attack_features, 'attack_feature or attack_structure cannot be both False'
self.loss_type = loss_type
self.modified_adj = None
self.modified_features = None
self.regularization_weight = regularization_weight
if attack_features:
assert True, 'Current Spectral Attack does not support attack feature'
if attack_structure:
assert nnodes is not None, 'Please give nnodes='
self.adj_changes = Parameter(torch.FloatTensor(int(nnodes*(nnodes-1)/2)))
self.adj_changes.data.fill_(0)
self.complementary = None
def attack(self, ori_features, ori_adj, labels, idx_train, n_perturbations, epochs=200, **kwargs):
"""
Generate perturbations on the input graph
"""
victim_model = self.surrogate
self.sparse_features = sp.issparse(ori_features)
ori_adj, ori_features, labels = utils.to_tensor(ori_adj, ori_features, labels, device=self.device)
victim_model.eval()
for t in tqdm(range(epochs)):
modified_adj = self.get_modified_adj(ori_adj)
adj_norm = utils.normalize_adj_tensor(modified_adj, device=self.device)
output = victim_model(ori_features, adj_norm)
loss = self._loss(output[idx_train], labels[idx_train])
# New: add regularization term for spectral distance
if self.regularization_weight != 0:
ori_adj_norm = utils.normalize_adj_tensor(ori_adj, device=self.device)
ori_e, ori_v = torch.symeig(ori_adj_norm, eigenvectors=True)
e, v = torch.symeig(adj_norm, eigenvectors=True)
regularization = F.mse_loss(ori_e, e)
loss += regularization * self.regularization_weight
adj_grad = torch.autograd.grad(loss, self.adj_changes)[0]
if self.loss_type == 'CE':
lr = 200 / np.sqrt(t+1)
self.adj_changes.data.add_(lr * adj_grad)
if self.loss_type == 'CW':
lr = 0.1 / np.sqrt(t+1)
self.adj_changes.data.add_(lr * adj_grad)
self.projection(n_perturbations)
self.random_sample(ori_adj, ori_features, labels, idx_train, n_perturbations)
self.modified_adj = self.get_modified_adj(ori_adj).detach()
self.check_adj_tensor(self.modified_adj)
# for sanity check
ori_adj_norm = utils.normalize_adj_tensor(ori_adj, device=self.device)
ori_e, ori_v = torch.symeig(ori_adj_norm, eigenvectors=True)
adj_norm = utils.normalize_adj_tensor(self.modified_adj, device=self.device)
e, v = torch.symeig(adj_norm, eigenvectors=True)
self.adj = ori_adj.detach()
self.labels = labels.detach()
self.ori_e = ori_e
self.ori_v = ori_v
self.e = e
self.v = v
def random_sample(self, ori_adj, ori_features, labels, idx_train, n_perturbations):
K = 20
best_loss = -1000
victim_model = self.surrogate
with torch.no_grad():
s = self.adj_changes.cpu().detach().numpy()
for i in range(K):
sampled = np.random.binomial(1, s)
# print(sampled.sum())
if sampled.sum() > n_perturbations:
continue
self.adj_changes.data.copy_(torch.tensor(sampled))
modified_adj = self.get_modified_adj(ori_adj)
adj_norm = utils.normalize_adj_tensor(modified_adj, device=self.device)
output = victim_model(ori_features, adj_norm)
loss = self._loss(output[idx_train], labels[idx_train])
# loss = F.nll_loss(output[idx_train], labels[idx_train])
# print(loss)
if best_loss < loss:
best_loss = loss
best_s = sampled
self.adj_changes.data.copy_(torch.tensor(best_s))
def get_modified_adj(self, ori_adj):
if self.complementary is None:
self.complementary = (torch.ones_like(ori_adj) - torch.eye(self.nnodes).to(self.device) - ori_adj) - ori_adj
m = torch.zeros((self.nnodes, self.nnodes)).to(self.device)
tril_indices = torch.tril_indices(row=self.nnodes, col=self.nnodes, offset=-1)
m[tril_indices[0], tril_indices[1]] = self.adj_changes
m = m + m.t()
modified_adj = self.complementary * m + ori_adj
return modified_adj
def projection(self, n_perturbations):
if torch.clamp(self.adj_changes, 0, 1).sum() > n_perturbations:
left = (self.adj_changes - 1).min()
right = self.adj_changes.max()
miu = self.bisection(left, right, n_perturbations, epsilon=1e-5)
self.adj_changes.data.copy_(torch.clamp(self.adj_changes.data - miu, min=0, max=1))
else:
self.adj_changes.data.copy_(torch.clamp(self.adj_changes.data, min=0, max=1))
def _loss(self, output, labels):
if self.loss_type == "CE":
loss = F.nll_loss(output, labels)
if self.loss_type == "CW":
onehot = utils.tensor2onehot(labels)
best_second_class = (output - 1000*onehot).argmax(1)
margin = output[np.arange(len(output)), labels] - \
output[np.arange(len(output)), best_second_class]
k = 0
loss = -torch.clamp(margin, min=k).mean()
# loss = torch.clamp(margin.sum()+50, min=k)
return loss
def bisection(self, a, b, n_perturbations, epsilon):
def func(x):
return torch.clamp(self.adj_changes-x, 0, 1).sum() - n_perturbations
miu = a
while ((b-a) >= epsilon):
miu = (a+b)/2
# Check if middle point is root
if (func(miu) == 0.0):
break
# Decide the side to repeat the steps
if (func(miu)*func(a) < 0):
b = miu
else:
a = miu
# print("The value of root is : ","%.4f" % miu)
return miu
class MinMaxSpectral(SpectralAttack):
"""MinMax attack for graph data.
Parameters
----------
model :
model to attack. Default `None`.
nnodes : int
number of nodes in the input graph
loss_type: str
attack loss type, chosen from ['CE', 'CW']
feature_shape : tuple
shape of the input node features
attack_structure : bool
whether to attack graph structure
attack_features : bool
whether to attack node features
device: str
'cpu' or 'cuda'
Examples
--------
>>> from deeprobust.graph.data import Dataset
>>> from deeprobust.graph.defense import GCN
>>> from deeprobust.graph.global_attack import MinMax
>>> from deeprobust.graph.utils import preprocess
>>> data = Dataset(root='/tmp/', name='cora')
>>> adj, features, labels = data.adj, data.features, data.labels
>>> adj, features, labels = preprocess(adj, features, labels, preprocess_adj=False) # conver to tensor
>>> idx_train, idx_val, idx_test = data.idx_train, data.idx_val, data.idx_test
>>> # Setup Victim Model
>>> victim_model = GCN(nfeat=features.shape[1], nclass=labels.max().item()+1,
nhid=16, dropout=0.5, weight_decay=5e-4, device='cpu').to('cpu')
>>> victim_model.fit(features, adj, labels, idx_train)
>>> # Setup Attack Model
>>> model = MinMax(model=victim_model, nnodes=adj.shape[0], loss_type='CE', device='cpu').to('cpu')
>>> model.attack(features, adj, labels, idx_train, n_perturbations=10)
>>> modified_adj = model.modified_adj
"""
def __init__(self,
model=None,
nnodes=None,
loss_type='CE',
feature_shape=None,
attack_structure=True,
attack_features=False,
regularization_weight=5.0,
device='cpu'):
super(MinMaxSpectral, self).__init__(model,
nnodes,
loss_type,
feature_shape,
attack_structure,
attack_features,
regularization_weight,
device=device)
def attack(self, ori_features, ori_adj, labels, idx_train,
n_perturbations, epochs=200, **kwargs):
"""Generate perturbations on the input graph.
Parameters
----------
ori_features :
Original (unperturbed) node feature matrix
ori_adj :
Original (unperturbed) adjacency matrix
labels :
node labels
idx_train :
node training indices
n_perturbations : int
Number of perturbations on the input graph. Perturbations could
be edge removals/additions or feature removals/additions.
epochs:
number of training epochs
"""
victim_model = self.surrogate
self.sparse_features = sp.issparse(ori_features)
ori_adj, ori_features, labels = utils.to_tensor(ori_adj, ori_features, labels, device=self.device)
# optimizer
# optimizer = optim.Adam(victim_model.parameters(), lr=0.01)
victim_model.eval()
for t in tqdm(range(epochs)):
# update victim model
victim_model.train()
modified_adj = self.get_modified_adj(ori_adj)
adj_norm = utils.normalize_adj_tensor(modified_adj)
output = victim_model(ori_features, adj_norm)
loss = self._loss(output[idx_train], labels[idx_train])
optimizer.zero_grad()
loss.backward()
optimizer.step()
# generate pgd attack
victim_model.eval()
modified_adj = self.get_modified_adj(ori_adj)
adj_norm = utils.normalize_adj_tensor(modified_adj)
output = victim_model(ori_features, adj_norm)
loss = self._loss(output[idx_train], labels[idx_train])
# New: add regularization term for spectral distance
if self.regularization_weight != 0:
ori_adj_norm = utils.normalize_adj_tensor(ori_adj, device=self.device)
ori_e, ori_v = torch.symeig(ori_adj_norm, eigenvectors=True)
e, v = torch.symeig(adj_norm, eigenvectors=True)
regularization = F.mse_loss(ori_e, e)
loss += regularization * self.regularization_weight
adj_grad = torch.autograd.grad(loss, self.adj_changes)[0]
# adj_grad = self.adj_changes.grad
if self.loss_type == 'CE':
lr = 200 / np.sqrt(t+1)
self.adj_changes.data.add_(lr * adj_grad)
if self.loss_type == 'CW':
lr = 0.1 / np.sqrt(t+1)
self.adj_changes.data.add_(lr * adj_grad)
# self.adj_changes.grad.zero_()
self.projection(n_perturbations)
self.random_sample(ori_adj, ori_features, labels, idx_train, n_perturbations)
self.modified_adj = self.get_modified_adj(ori_adj).detach()
self.check_adj_tensor(self.modified_adj)
# for sanity check
ori_adj_norm = utils.normalize_adj_tensor(ori_adj, device=self.device)
ori_e, ori_v = torch.symeig(ori_adj_norm, eigenvectors=True)
adj_norm = utils.normalize_adj_tensor(self.modified_adj, device=self.device)
e, v = torch.symeig(adj_norm, eigenvectors=True)
self.adj = ori_adj.detach()
self.labels = labels.detach()
self.ori_e = ori_e
self.ori_v = ori_v
self.e = e
self.v = v
| [
"torch.zeros",
"torch.symeig",
"torch.no_grad",
"torch.tril_indices",
"torch.clamp",
"torch.nn.functional.mse_loss",
"torch.autograd.grad",
"torch.tensor",
"torch.eye",
"torch.ones_like",
"torch.nn.functional.nll_loss"
] | 1.2.0 | Louise-LuLin/DeepRobust | a91b2d321f45cd7b24873220bd62a60911829d2c |
1.0 | # Copyright 2019 The Texar Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Unit tests for data iterator related operations.
"""
import copy
import tempfile
import unittest
from unittest.mock import patch
from typing import List, Tuple
import numpy as np
import torch
from texar.torch.data.data.data_base import (
DatasetBase, IterDataSource, SequenceDataSource, ZipDataSource)
from texar.torch.data.data.data_iterators import (
DataIterator, TrainTestDataIterator)
from texar.torch.data.data.dataset_utils import Batch
from texar.torch.data.data.mono_text_data import MonoTextData
from texar.torch.data.data.sampler import TokenCountBatchingStrategy
class DataIteratorTest(unittest.TestCase):
r"""Tests data iterators.
"""
def setUp(self):
# Create data
self.train_text = list(np.linspace(1, 1000, num=1000, dtype=np.int64))
self.train_text = [str(x) for x in self.train_text]
train_text_file = tempfile.NamedTemporaryFile()
train_text_file.write('\n'.join(self.train_text).encode("utf-8"))
train_text_file.flush()
self._train_text_file = train_text_file
test_text = list(np.linspace(1001, 2000, num=1000, dtype=np.int64))
test_text = [str(x) for x in test_text]
test_text_file = tempfile.NamedTemporaryFile()
test_text_file.write('\n'.join(test_text).encode("utf-8"))
test_text_file.flush()
self._test_text_file = test_text_file
vocab_list = self.train_text + test_text
vocab_file = tempfile.NamedTemporaryFile()
vocab_file.write('\n'.join(vocab_list).encode("utf-8"))
vocab_file.flush()
self._vocab_file = vocab_file
self._vocab_size = len(vocab_list)
self._train_hparams = {
"num_epochs": 1,
"batch_size": 1,
"shuffle": False,
"dataset": {
"files": self._train_text_file.name,
"vocab_file": self._vocab_file.name,
"bos_token": '',
"eos_token": ''
},
"name": "train"
}
self._test_hparams = {
"num_epochs": 1,
"batch_size": 2,
"shuffle": False,
"dataset": {
"files": self._test_text_file.name,
"vocab_file": self._vocab_file.name,
"bos_token": '',
"eos_token": ''
},
"name": "test"
}
def test_iterator_single_dataset(self):
r"""Tests iterating over a single dataset.
"""
data = MonoTextData(self._test_hparams)
data_iterator = DataIterator(data)
data_iterator.switch_to_dataset(dataset_name="data")
iterator = data_iterator.get_iterator()
i = 1001
for idx, batch in enumerate(iterator):
self.assertEqual(batch.batch_size, self._test_hparams['batch_size'])
np.testing.assert_array_equal(batch['length'], [1, 1])
for example in batch['text']:
self.assertEqual(example[0], str(i))
i += 1
self.assertEqual(i, 2001)
def test_iterator_single_dataset_parallel(self):
r"""Tests iterating over a single dataset with multiple workers.
"""
hparams = copy.deepcopy(self._test_hparams)
hparams['num_parallel_calls'] = 2
data = MonoTextData(hparams)
data_iterator = DataIterator(data)
data_iterator.switch_to_dataset(dataset_name="data")
iterator = data_iterator.get_iterator()
i = 1001
for idx, batch in enumerate(iterator):
self.assertEqual(batch.batch_size, self._test_hparams['batch_size'])
np.testing.assert_array_equal(batch['length'], [1, 1])
for example in batch['text']:
self.assertEqual(example[0], str(i))
i += 1
self.assertEqual(i, 2001)
def test_iterator_multi_datasets(self):
r"""Tests iterating over multiple datasets.
"""
train = MonoTextData(self._train_hparams)
test = MonoTextData(self._test_hparams)
train_batch_size = self._train_hparams["batch_size"]
test_batch_size = self._test_hparams["batch_size"]
data_iterator = DataIterator({"train": train, "test": test})
data_iterator.switch_to_dataset(dataset_name="train")
iterator = data_iterator.get_iterator()
for idx, val in enumerate(iterator):
self.assertEqual(len(val), train_batch_size)
number = idx * train_batch_size + 1
self.assertEqual(val.text[0], [str(number)])
# numbers: 1 - 2000, first 4 vocab entries are special tokens
self.assertEqual(val.text_ids[0], torch.tensor(number + 3))
data_iterator.switch_to_dataset(dataset_name="test")
iterator = data_iterator.get_iterator()
for idx, val in enumerate(iterator):
self.assertEqual(len(val), test_batch_size)
number = idx * test_batch_size + 1001
self.assertEqual(val.text[0], [str(number)])
self.assertEqual(val.text_ids[0], torch.tensor(number + 3))
# test `get_iterator` interface
for idx, val in enumerate(data_iterator.get_iterator('train')):
self.assertEqual(len(val), train_batch_size)
number = idx * train_batch_size + 1
self.assertEqual(val.text[0], [str(number)])
self.assertEqual(val.text_ids[0], torch.tensor(number + 3))
# test exception for invalid dataset name
with self.assertRaises(ValueError) as context:
data_iterator.switch_to_dataset('val')
self.assertTrue('not found' in str(context.exception))
def test_train_test_data_iterator(self):
r"""Tests :class:`texar.torch.data.TrainTestDataIterator`
"""
train = MonoTextData(self._train_hparams)
test = MonoTextData(self._test_hparams)
train_batch_size = self._train_hparams["batch_size"]
test_batch_size = self._test_hparams["batch_size"]
data_iterator = TrainTestDataIterator(train=train, test=test)
data_iterator.switch_to_train_data()
iterator = data_iterator.get_iterator()
for idx, val in enumerate(iterator):
self.assertEqual(len(val), train_batch_size)
number = idx * train_batch_size + 1
self.assertEqual(val.text[0], [str(number)])
# numbers: 1 - 2000, first 4 vocab entries are special tokens
self.assertEqual(val.text_ids[0], torch.tensor(number + 3))
data_iterator.switch_to_test_data()
iterator = data_iterator.get_iterator()
for idx, val in enumerate(iterator):
self.assertEqual(len(val), test_batch_size)
number = idx * test_batch_size + 1001
self.assertEqual(val.text[0], [str(number)])
self.assertEqual(val.text_ids[0], torch.tensor(number + 3))
# test `get_*_iterator` interface
for idx, val in enumerate(data_iterator.get_test_iterator()):
self.assertEqual(len(val), test_batch_size)
number = idx * test_batch_size + 1001
self.assertEqual(val.text[0], [str(number)])
self.assertEqual(val.text_ids[0], torch.tensor(number + 3))
# test exception for invalid dataset name
with self.assertRaises(ValueError) as context:
data_iterator.switch_to_val_data()
def test_dynamic_batching(self):
r"""Tests dynamic batching using :class:`texar.torch.data.BatchingStrategy`.
"""
sent_lengths = np.random.randint(10, 20, size=(100,))
sentences = [['a'] * length for length in sent_lengths]
data_source = SequenceDataSource(sentences)
class CustomData(DatasetBase):
def __init__(self, source):
super().__init__(source)
def collate(self, examples):
return Batch(len(examples), text=examples)
train_data = CustomData(data_source)
batch_size = 5
max_tokens = 75
strategy = TokenCountBatchingStrategy(
max_tokens, batch_size, len)
iterator = DataIterator(train_data, strategy)
for batch in iterator:
self.assertLessEqual(len(batch), batch_size)
self.assertLessEqual(sum(len(s) for s in batch.text), max_tokens)
@patch("torch.cuda.is_available", lambda: True)
def test_auto_storage_moving(self):
cuda_tensors = set()
def move_tensor(tensor, device, non_blocking=False):
if isinstance(device, torch.device) and device.type == "cuda":
self.assertTrue(non_blocking)
cuda_tensors.add(id(tensor))
return tensor
device = torch.device("cuda:0")
with patch.object(torch.Tensor, "to", move_tensor):
train = MonoTextData(self._train_hparams, device=device)
iterator = DataIterator(train)
for batch in iterator:
self.assertTrue(id(batch.text_ids) in cuda_tensors)
self.assertTrue(id(batch.length) in cuda_tensors)
RawExample = Tuple[List[int], str]
Example = Tuple[List[int], List[str]]
class MockDataBase(DatasetBase[RawExample, Example]):
def process(self, raw_example: RawExample) -> Example:
numbers, string = raw_example
numbers = [x + 1 for x in numbers]
strings = string.split()
return numbers, strings
def collate(self, examples: List[Example]) -> Batch:
numbers = np.asarray([ex[0] for ex in examples])
strings = np.asarray([ex[1] for ex in examples])
return Batch(len(numbers), numbers=numbers, strings=strings)
class LazinessCachingTest(unittest.TestCase):
def setUp(self) -> None:
self.size = 102
self.seq_len = 10
self.batch_size = 5
self.num_workers = 3
def _test_modes_with_workers(self, lazy_mode: str, cache_mode: str,
num_workers: int,
parallelize_processing: bool = True,
support_random_access: bool = False,
shuffle: bool = False,
**kwargs):
hparams = {
'batch_size': self.batch_size,
'lazy_strategy': lazy_mode,
'cache_strategy': cache_mode,
'num_parallel_calls': num_workers,
'shuffle': shuffle,
'shuffle_buffer_size': self.size // 5,
'parallelize_processing': parallelize_processing,
'allow_smaller_final_batch': False,
**kwargs,
}
numbers_data = [[x] * self.seq_len for x in range(self.size)]
string_data = [' '.join(map(str, range(self.seq_len)))
for _ in range(self.size)]
if not support_random_access:
source = ZipDataSource( # type: ignore
IterDataSource(numbers_data),
SequenceDataSource(string_data))
else:
source = ZipDataSource(
SequenceDataSource(numbers_data),
SequenceDataSource(string_data))
data = MockDataBase(source, hparams) # type: ignore
iterator = DataIterator(data)
if data._hparams.allow_smaller_final_batch:
total_examples = self.size
total_batches = (self.size + self.batch_size - 1) // self.batch_size
else:
total_examples = self.size // self.batch_size * self.batch_size
total_batches = self.size // self.batch_size
def check_batch(idx, batch):
if idx == total_batches - 1:
batch_size = (total_examples - 1) % self.batch_size + 1
else:
batch_size = self.batch_size
self.assertEqual(batch.numbers.shape,
(batch_size, self.seq_len))
if not shuffle:
numbers = np.asarray([idx * self.batch_size + x + 1
for x in range(batch_size)])
self.assertTrue(np.all(batch.numbers == numbers[:, np.newaxis]))
# check laziness
if parallelize_processing:
if lazy_mode == 'none':
self.assertEqual(len(data._processed_cache), self.size)
else:
self.assertEqual(len(data._processed_cache), 0)
if not support_random_access:
if lazy_mode == 'process':
self.assertEqual(len(data._cached_source._cache),
self.size)
else:
self.assertEqual(len(data._cached_source._cache), 0)
# first epoch
cnt = 0
for idx, batch in enumerate(iterator):
check_batch(idx, batch)
cnt += 1
self.assertEqual(cnt, total_batches)
# check cache
if parallelize_processing:
if cache_mode == 'none':
self.assertEqual(len(data._processed_cache), 0)
elif cache_mode == 'loaded':
self.assertEqual(len(data._processed_cache), 0)
else:
self.assertEqual(len(data._processed_cache), self.size)
if lazy_mode != 'none' and not support_random_access:
if cache_mode == 'none':
self.assertEqual(len(data._cached_source._cache), 0)
elif cache_mode == 'loaded':
self.assertEqual(len(data._cached_source._cache), self.size)
else:
self.assertEqual(len(data._cached_source._cache), 0)
# second epoch
cnt = 0
for idx, batch in enumerate(iterator):
check_batch(idx, batch)
cnt += 1
self.assertEqual(cnt, total_batches)
# check again
if parallelize_processing:
if cache_mode == 'none':
self.assertEqual(len(data._processed_cache), 0)
elif cache_mode == 'loaded':
self.assertEqual(len(data._processed_cache), 0)
else:
self.assertEqual(len(data._processed_cache), self.size)
if lazy_mode != 'none' and not support_random_access:
if cache_mode == 'none':
self.assertEqual(len(data._cached_source._cache), 0)
elif cache_mode == 'loaded':
self.assertEqual(len(data._cached_source._cache), self.size)
else:
self.assertEqual(len(data._cached_source._cache), 0)
def _test_modes(self, lazy_mode: str, cache_mode: str):
self._test_modes_with_workers(lazy_mode, cache_mode, self.num_workers)
self._test_modes_with_workers(lazy_mode, cache_mode, self.num_workers,
parallelize_processing=False)
self._test_modes_with_workers(lazy_mode, cache_mode, 1)
self._test_modes_with_workers(lazy_mode, cache_mode, self.num_workers,
support_random_access=True)
self._test_modes_with_workers(lazy_mode, cache_mode, self.num_workers,
shuffle=True)
def test_none_processed(self):
self._test_modes('none', 'processed')
def test_process_loaded(self):
self._test_modes('process', 'loaded')
def test_process_processed(self):
self._test_modes('process', 'processed')
def test_all_none(self):
self._test_modes('all', 'none')
def test_all_loaded(self):
self._test_modes('all', 'loaded')
def test_all_processed(self):
self._test_modes('all', 'processed')
if __name__ == "__main__":
unittest.main()
| [
"torch.device",
"torch.tensor"
] | 1.0.0 | qinzzz/texar-pytorch | d66258a599a291418004170e62864b001b650926 |
1.1 | import numpy as np
import torch
import torch.nn as nn
from ....ops.iou3d_nms import iou3d_nms_utils
class ProposalTargetLayer(nn.Module):
def __init__(self, roi_sampler_cfg):
super().__init__()
self.roi_sampler_cfg = roi_sampler_cfg
def forward(self, batch_dict):
"""
Args:
batch_dict:
batch_size:
rois: (B, num_rois, 7 + C)
roi_scores: (B, num_rois)
gt_boxes: (B, N, 7 + C + 1)
roi_labels: (B, num_rois)
Returns:
batch_dict:
rois: (B, M, 7 + C)
gt_of_rois: (B, M, 7 + C)
gt_iou_of_rois: (B, M)
roi_scores: (B, M)
roi_labels: (B, M)
reg_valid_mask: (B, M)
rcnn_cls_labels: (B, M)
"""
batch_rois, batch_gt_of_rois, batch_roi_ious, batch_roi_scores, batch_roi_labels = self.sample_rois_for_rcnn(
batch_dict=batch_dict
)
batch_cls_of_rois = batch_gt_of_rois[:, :, -1:]
# regression valid mask
reg_valid_mask = (batch_roi_ious > self.roi_sampler_cfg.REG_FG_THRESH).long()
# classification label
if self.roi_sampler_cfg.CLS_SCORE_TYPE == 'cls':
batch_cls_labels = (batch_roi_ious > self.roi_sampler_cfg.CLS_FG_THRESH).long()
ignore_mask = (batch_roi_ious > self.roi_sampler_cfg.CLS_BG_THRESH) & \
(batch_roi_ious < self.roi_sampler_cfg.CLS_FG_THRESH)
batch_cls_labels[ignore_mask > 0] = -1
elif self.roi_sampler_cfg.CLS_SCORE_TYPE == 'roi_iou':
iou_bg_thresh = self.roi_sampler_cfg.CLS_BG_THRESH
iou_fg_thresh = self.roi_sampler_cfg.CLS_FG_THRESH
fg_mask = batch_roi_ious > iou_fg_thresh
bg_mask = batch_roi_ious < iou_bg_thresh
interval_mask = (fg_mask == 0) & (bg_mask == 0)
batch_cls_labels = (fg_mask > 0).float()
batch_cls_labels[interval_mask] = \
(batch_roi_ious[interval_mask] - iou_bg_thresh) / (iou_fg_thresh - iou_bg_thresh)
else:
raise NotImplementedError
targets_dict = {'rois': batch_rois, 'gt_of_rois': batch_gt_of_rois, 'gt_iou_of_rois': batch_roi_ious,
'roi_scores': batch_roi_scores, 'roi_labels': batch_roi_labels,
'reg_valid_mask': reg_valid_mask, 'gt_cls_of_rois':batch_cls_of_rois,
'rcnn_cls_labels': batch_cls_labels}
return targets_dict
def sample_rois_for_rcnn(self, batch_dict):
"""
Args:
batch_dict:
batch_size:
rois: (B, num_rois, 7 + C)
roi_scores: (B, num_rois)
gt_boxes: (B, N, 7 + C + 1)
roi_labels: (B, num_rois)
Returns:
"""
batch_size = batch_dict['batch_size']
rois = batch_dict['rois']
roi_scores = batch_dict['roi_scores']
roi_labels = batch_dict['roi_labels']
gt_boxes = batch_dict['gt_boxes']
code_size = rois.shape[-1]
batch_rois = rois.new_zeros(batch_size, self.roi_sampler_cfg.ROI_PER_IMAGE, code_size)
batch_gt_of_rois = rois.new_zeros(batch_size, self.roi_sampler_cfg.ROI_PER_IMAGE, code_size + 1)
batch_roi_ious = rois.new_zeros(batch_size, self.roi_sampler_cfg.ROI_PER_IMAGE)
batch_roi_scores = rois.new_zeros(batch_size, self.roi_sampler_cfg.ROI_PER_IMAGE)
batch_roi_labels = rois.new_zeros((batch_size, self.roi_sampler_cfg.ROI_PER_IMAGE), dtype=torch.long)
for index in range(batch_size):
cur_roi, cur_gt, cur_roi_labels, cur_roi_scores = \
rois[index], gt_boxes[index], roi_labels[index], roi_scores[index]
k = cur_gt.__len__() - 1
while k > 0 and cur_gt[k].sum() == 0:
k -= 1
cur_gt = cur_gt[:k + 1]
cur_gt = cur_gt.new_zeros((1, cur_gt.shape[1])) if len(cur_gt) == 0 else cur_gt
if self.roi_sampler_cfg.get('SAMPLE_ROI_BY_EACH_CLASS', False):
max_overlaps, gt_assignment = self.get_max_iou_with_same_class(
rois=cur_roi, roi_labels=cur_roi_labels,
gt_boxes=cur_gt[:, 0:7], gt_labels=cur_gt[:, -1].long()
)
else:
iou3d = iou3d_nms_utils.boxes_iou3d_gpu(cur_roi, cur_gt[:, 0:7]) # (M, N)
max_overlaps, gt_assignment = torch.max(iou3d, dim=1)
sampled_inds = self.subsample_rois(max_overlaps=max_overlaps)
batch_rois[index] = cur_roi[sampled_inds]
batch_roi_labels[index] = cur_roi_labels[sampled_inds]
batch_roi_ious[index] = max_overlaps[sampled_inds]
batch_roi_scores[index] = cur_roi_scores[sampled_inds]
batch_gt_of_rois[index] = cur_gt[gt_assignment[sampled_inds]]
return batch_rois, batch_gt_of_rois, batch_roi_ious, batch_roi_scores, batch_roi_labels
def subsample_rois(self, max_overlaps):
# sample fg, easy_bg, hard_bg
fg_rois_per_image = int(np.round(self.roi_sampler_cfg.FG_RATIO * self.roi_sampler_cfg.ROI_PER_IMAGE))
fg_thresh = min(self.roi_sampler_cfg.REG_FG_THRESH, self.roi_sampler_cfg.CLS_FG_THRESH)
fg_inds = ((max_overlaps >= fg_thresh)).nonzero().view(-1)
easy_bg_inds = ((max_overlaps < self.roi_sampler_cfg.CLS_BG_THRESH_LO)).nonzero().view(-1)
hard_bg_inds = ((max_overlaps < self.roi_sampler_cfg.REG_FG_THRESH) &
(max_overlaps >= self.roi_sampler_cfg.CLS_BG_THRESH_LO)).nonzero().view(-1)
fg_num_rois = fg_inds.numel()
bg_num_rois = hard_bg_inds.numel() + easy_bg_inds.numel()
if fg_num_rois > 0 and bg_num_rois > 0:
# sampling fg
fg_rois_per_this_image = min(fg_rois_per_image, fg_num_rois)
rand_num = torch.from_numpy(np.random.permutation(fg_num_rois)).type_as(max_overlaps).long()
fg_inds = fg_inds[rand_num[:fg_rois_per_this_image]]
# sampling bg
bg_rois_per_this_image = self.roi_sampler_cfg.ROI_PER_IMAGE - fg_rois_per_this_image
bg_inds = self.sample_bg_inds(
hard_bg_inds, easy_bg_inds, bg_rois_per_this_image, self.roi_sampler_cfg.HARD_BG_RATIO
)
elif fg_num_rois > 0 and bg_num_rois == 0:
# sampling fg
rand_num = np.floor(np.random.rand(self.roi_sampler_cfg.ROI_PER_IMAGE) * fg_num_rois)
rand_num = torch.from_numpy(rand_num).type_as(max_overlaps).long()
fg_inds = fg_inds[rand_num]
bg_inds = []
elif bg_num_rois > 0 and fg_num_rois == 0:
# sampling bg
bg_rois_per_this_image = self.roi_sampler_cfg.ROI_PER_IMAGE
bg_inds = self.sample_bg_inds(
hard_bg_inds, easy_bg_inds, bg_rois_per_this_image, self.roi_sampler_cfg.HARD_BG_RATIO
)
else:
print('maxoverlaps:(min=%f, max=%f)' % (max_overlaps.min().item(), max_overlaps.max().item()))
print('ERROR: FG=%d, BG=%d' % (fg_num_rois, bg_num_rois))
raise NotImplementedError
sampled_inds = torch.cat((fg_inds, bg_inds), dim=0)
return sampled_inds
@staticmethod
def sample_bg_inds(hard_bg_inds, easy_bg_inds, bg_rois_per_this_image, hard_bg_ratio):
if hard_bg_inds.numel() > 0 and easy_bg_inds.numel() > 0:
hard_bg_rois_num = min(int(bg_rois_per_this_image * hard_bg_ratio), len(hard_bg_inds))
easy_bg_rois_num = bg_rois_per_this_image - hard_bg_rois_num
# sampling hard bg
rand_idx = torch.randint(low=0, high=hard_bg_inds.numel(), size=(hard_bg_rois_num,)).long()
hard_bg_inds = hard_bg_inds[rand_idx]
# sampling easy bg
rand_idx = torch.randint(low=0, high=easy_bg_inds.numel(), size=(easy_bg_rois_num,)).long()
easy_bg_inds = easy_bg_inds[rand_idx]
bg_inds = torch.cat([hard_bg_inds, easy_bg_inds], dim=0)
elif hard_bg_inds.numel() > 0 and easy_bg_inds.numel() == 0:
hard_bg_rois_num = bg_rois_per_this_image
# sampling hard bg
rand_idx = torch.randint(low=0, high=hard_bg_inds.numel(), size=(hard_bg_rois_num,)).long()
bg_inds = hard_bg_inds[rand_idx]
elif hard_bg_inds.numel() == 0 and easy_bg_inds.numel() > 0:
easy_bg_rois_num = bg_rois_per_this_image
# sampling easy bg
rand_idx = torch.randint(low=0, high=easy_bg_inds.numel(), size=(easy_bg_rois_num,)).long()
bg_inds = easy_bg_inds[rand_idx]
else:
raise NotImplementedError
return bg_inds
@staticmethod
def get_max_iou_with_same_class(rois, roi_labels, gt_boxes, gt_labels):
"""
Args:
rois: (N, 7)
roi_labels: (N)
gt_boxes: (N, )
gt_labels:
Returns:
"""
"""
:param rois: (N, 7)
:param roi_labels: (N)
:param gt_boxes: (N, 8)
:return:
"""
max_overlaps = rois.new_zeros(rois.shape[0])
gt_assignment = roi_labels.new_zeros(roi_labels.shape[0])
for k in range(gt_labels.min().item(), gt_labels.max().item() + 1):
roi_mask = (roi_labels == k)
gt_mask = (gt_labels == k)
if roi_mask.sum() > 0 and gt_mask.sum() > 0:
cur_roi = rois[roi_mask]
cur_gt = gt_boxes[gt_mask]
original_gt_assignment = gt_mask.nonzero().view(-1)
iou3d = iou3d_nms_utils.boxes_iou3d_gpu(cur_roi, cur_gt) # (M, N)
cur_max_overlaps, cur_gt_assignment = torch.max(iou3d, dim=1)
max_overlaps[roi_mask] = cur_max_overlaps
gt_assignment[roi_mask] = original_gt_assignment[cur_gt_assignment]
return max_overlaps, gt_assignment
| [
"torch.cat",
"torch.max",
"torch.from_numpy"
] | 1.1 | EmiyaNing/OpenPCDet | 41ff28209cb000b51626a0ed8593b0adbe3dd447 |
1.9 | import spacy
import numpy as np
import torch
from sklearn.metrics.pairwise import cosine_similarity
from transformers import AutoModel, AutoTokenizer
from sklearn.feature_extraction.text import CountVectorizer
from KinGBERT.utils import extractor_topic_rank, extractor_topical_page_rank, extractor_single_rank, extractor_multipartite_rank
class KinGBERTExtractor:
def __init__(
self,
extract_methods = ['TopicRank', "TopicalPageRank", 'SingleRank','MultipartiteRank'],
top_k = 5,
n_gram_range=(1, 2),
spacy_model="en_core_web_sm",
bert_model="sentence-transformers/distilbert-base-nli-stsb-mean-tokens",
):
self.n_gram_range = n_gram_range
self.nlp = spacy.load(spacy_model)
self.model = AutoModel.from_pretrained(bert_model)
self.tokenizer = AutoTokenizer.from_pretrained(bert_model)
self.extract_methods = extract_methods
self.top_k = top_k
self.candidates = []
def squash(self, value):
if not torch.is_tensor(value):
raise ValueError(f"unexpected `value` of type {value.__class__}")
if value.ndim == 2:
return value
return value.mean(dim=1)
def get_all_candidates(self, text):
stop_words = "english"
count = CountVectorizer(ngram_range=self.n_gram_range, stop_words=stop_words).fit([text])
self.candidates = count.get_feature_names()
if 'TopicRank' in self.extract_methods:
self.candidates += extractor_topic_rank(text)
if 'TopicalPageRank' in self.extract_methods:
self.candidates += extractor_topical_page_rank(text)
if 'SingleRank' in self.extract_methods:
self.candidates += extractor_single_rank(text)
if 'MultipartiteRank' in self.extract_methods:
self.candidates += extractor_multipartite_rank(text)
self.candidates = np.unique(self.candidates).tolist()
def generate(self, text):
text = text[:1000].lower()
candidates = self.get_candidates(text)
text_embedding = self.get_embedding(text)
candidate_embeddings = self.get_embedding(candidates)
distances = cosine_similarity(text_embedding, candidate_embeddings)
keywords = [candidates[index] for index in distances.argsort()[0]][::-1]
keywords_list = []
for keyphrase in keywords:
if (len([el.lower() for el in keyphrase.split(' ') if el.lower() in ' '.join(keywords_list).lower()])==0)&(len(keywords_list)<self.top_k):
keywords_list.append(keyphrase)
return keywords_list
def get_candidates(self, text):
nouns = self.get_nouns(text)
self.get_all_candidates(text)
candidates = list(filter(lambda candidate: candidate in nouns, self.candidates))
return candidates
def get_nouns(self, text):
doc = self.nlp(text)
nouns = set()
for token in doc:
if token.pos_ == "NOUN":
nouns.add(token.text)
noun_phrases = set(chunk.text.strip() for chunk in doc.noun_chunks)
return nouns.union(noun_phrases)
@torch.no_grad()
def get_embedding(self, source):
if isinstance(source, str):
source = [source]
tokens = self.tokenizer(source, padding=True, return_tensors="pt")
outputs = self.model(**tokens, return_dict=True)
embedding = self.parse_outputs(outputs)
embedding = embedding.detach().numpy()
return embedding
def parse_outputs(self, outputs):
value = None
outputs_keys = outputs.keys()
if len(outputs_keys) == 1:
value = tuple(outputs.values())[0]
else:
for key in ["pooler_output", "last_hidden_state"]:
if key in output_keys:
value = outputs[key]
break
if value is None:
raise RuntimeError("no matching BERT keys found for `outputs`")
return self.squash(value)
if __name__ == '__main__':
doc = """What is data science?
Data science is a multidisciplinary approach to extracting actionable insights from the large and ever-increasing volumes of data collected and created by today’s organizations.
Data science encompasses preparing data for analysis and processing, performing advanced data analysis, and presenting the results to reveal patterns and enable stakeholders to draw informed conclusions.
Data preparation can involve cleansing, aggregating, and manipulating it to be ready for specific types of processing. Analysis requires the development and use of algorithms, analysis and AI models. It’s driven by software that combs through data to find patterns within to transform these patterns into predictions that support business decision-making. The accuracy of these predictions must be validated through scientifically designed tests and experiments. And the results should be shared through the skillful use of data visualization tools that make it possible for anyone to see the patterns and understand trends."""
extractor = KinGBERTExtractor()
keywords = extractor.generate(doc)
print(keywords)
| [
"torch.is_tensor",
"torch.no_grad"
] | 1.9.0 | sokolheavy/KinGBERT | c7c91d92bd7d6cc725375d8c3d541040e8a22184 |
1.5 | """Mathematical functions."""
# [email protected]
# [email protected]
import torch
from .constants import inf, ninf
def round(t, decimals=0):
""" Round a tensor to the given number of decimals.
Args:
t (torch.tensor) Tensor.
decimals (int, optional): Round to this decimal, defaults to zero.
Returns:
t (torch.tensor): Rounded tensor.
"""
return torch.round(t * 10 ** decimals) / (10 ** decimals)
def nansum(input, *args, inplace=False, **kwargs):
"""Compute the sum of a tensor, excluding nans.
Parameters
----------
input : tensor
Input tensor.
dim : int or list[int], optional
Dimensions to reduce.
keepdim : bool, default=False
Keep reduced dimensions.
inplace : bool, default=False
Authorize working inplace.
dtype : dtype, default=input.dtype
Accumulator data type
out : tensor, optional
Output placeholder.
Returns
-------
out : tensor
Output tensor
"""
input = torch.as_tensor(input)
if not inplace:
input = input.clone()
mask = torch.isnan(input)
if input.requires_grad:
zero = torch.as_tensor(0, dtype=input.dtype, device=input.device)
input = torch.where(mask, zero, input)
else:
input[mask] = 0
return torch.sum(input, *args, **kwargs)
def nanmean(input, *args, inplace=False, **kwargs):
"""Compute the mean of a tensor, excluding nans.
Parameters
----------
input : tensor
Input tensor.
dim : int or list[int], optional
Dimensions to reduce.
keepdim : bool, default=False
Keep reduced dimensions.
inplace : bool, default=False
Authorize working inplace.
dtype : dtype, default=input.dtype
Accumulator data type
out : tensor, optional
Output placeholder.
Returns
-------
out : tensor
Output tensor
"""
input = torch.as_tensor(input)
if not inplace:
input = input.clone()
mask = torch.isnan(input)
if input.requires_grad:
zero = torch.as_tensor(0, dtype=input.dtype, device=input.device)
input = torch.where(mask, zero, input)
else:
input[mask] = 0
mask = ~mask
weights = mask.sum(*args, **kwargs).to(kwargs.get('dtype', input.dtype))
return torch.sum(input, *args, **kwargs) / weights
def nanvar(input, *args, unbiased=True, inplace=False, **kwargs):
"""Compute the variance of a tensor, excluding nans.
Parameters
----------
input : tensor
Input tensor.
dim : int or list[int], optional
Dimensions to reduce.
keepdim : bool, default=False
Keep reduced dimensions.
unbiased : bool, default=True
Whether to use the unbiased estimation or not.
inplace : bool, default=False
Authorize working inplace.
dtype : dtype, default=input.dtype
Accumulator data type
Returns
-------
out : tensor
Output tensor
"""
input = torch.as_tensor(input)
requires_grad = input.requires_grad
if not inplace:
input = input.clone()
mask = torch.isnan(input)
if requires_grad:
zero = torch.as_tensor(0, dtype=input.dtype, device=input.device)
input = torch.where(mask, zero, input)
else:
input[mask] = 0
mask = ~mask
weights = mask.sum(*args, **kwargs).to(kwargs.get('dtype', input.dtype))
mean = torch.sum(input, *args, **kwargs) / weights
input = input.square() if requires_grad else input.square_()
var = torch.sum(input, *args, **kwargs) / weights
if requires_grad:
var = var - mean
if unbiased:
var = var * weights / (weights - 1)
else:
var -= mean
if unbiased:
weights /= (weights - 1)
var *= weights
return var
def nanstd(input, *args, unbiased=True, inplace=False, **kwargs):
"""Compute the standard deviation of a tensor, excluding nans.
Parameters
----------
input : tensor
Input tensor.
dim : int or list[int], optional
Dimensions to reduce.
keepdim : bool, default=False
Keep reduced dimensions.
unbiased : bool, default=True
Whether to use the unbiased estimation or not.
inplace : bool, default=False
Authorize working inplace.
dtype : dtype, default=input.dtype
Accumulator data type
Returns
-------
out : tensor
Output tensor
"""
input = nanvar(input, *args, unbiased=unbiased, inplace=inplace, **kwargs)
input = input.sqrt_() if not input.requires_grad else input.sqrt()
return input
def nanmin(input, *args, inplace=False, **kwargs):
"""Compute the minimum of a tensor, excluding nans.
Notes
-----
.. This function cannot compute the minimum of two tensors, it only
computes the minimum of one tensor (along a dimension).
.. If all values (across a dimension) are nans, the output value
will be inf.
Parameters
----------
input : tensor
Input tensor.
dim : int or list[int], optional
Dimensions to reduce.
keepdim : bool, default=False
Keep reduced dimensions.
inplace : bool, default=False
Authorize working inplace.
out : tensor, optional
Output placeholder.
Returns
-------
values : tensor
Output tensor
indices : tensor[long], if `dim is not None`
Index location of each minimum value found
"""
# TODO: minimum of two tensors
input = torch.as_tensor(input)
mask = torch.isnan(input)
if inplace and not input.requires_grad:
input[mask] = inf
else:
val_inf = torch.as_tensor(inf, dtype=input.dtype, device=input.device)
input = torch.where(mask, val_inf, input)
return torch.min(input, *args, **kwargs)
def nanmax(input, *args, inplace=False, **kwargs):
"""Compute the maximum of a tensor, excluding nans.
Notes
-----
.. This function cannot compute the maximum of two tensors, it only
computes the maximum of one tensor (along a dimension).
.. If all values (across a dimension) are nans, the output value
will be -inf.
Parameters
----------
input : tensor
Input tensor.
dim : int or list[int], optional
Dimensions to reduce.
keepdim : bool, default=False
Keep reduced dimensions.
inplace : bool, default=False
Authorize working inplace.
out : tensor, optional
Output placeholder.
Returns
-------
values : tensor
Output tensor
indices : tensor[long], if `dim is not None`
Index location of each maximum value found
"""
# TODO: minimum of two tensors
input = torch.as_tensor(input)
mask = torch.isnan(input)
if inplace and not input.requires_grad:
input[mask] = ninf
else:
val_ninf = torch.as_tensor(ninf, dtype=input.dtype, device=input.device)
input = torch.where(mask, val_ninf, input)
return torch.max(input, *args, **kwargs)
def softmax(Z, dim=-1, get_ll=False, W=None):
""" SoftMax (safe).
Parameters
----------
Z : torch.tensor
Tensor with values.
dim : int, default=-1
Dimension to take softmax, defaults to last dimensions.
get_ll : bool, default=False
Compute log-likelihood, defaults to False.
W : torch.tensor, optional:
Observation weights.
Returns
-------
Z : torch.tensor
Soft-maxed tensor with values.
"""
Z_max, _ = torch.max(Z, dim=dim)
Z = torch.exp(Z - Z_max[:, None])
Z_sum = torch.sum(Z, dim=dim)
if get_ll:
# Compute log-likelihood
if W is None:
ll = torch.sum(torch.log(Z_sum) + Z_max, dtype=torch.float64)
else:
ll = torch.sum((torch.log(Z_sum) + Z_max)*W.squeeze(), dtype=torch.float64)
else:
ll = None
Z = Z / Z_sum[:, None]
return Z, ll
# TODO:
# The following functions should be replaced by tensor-compatible
# equivalents in linalg
from numpy import real
from scipy.linalg import expm as expm_scipy
from scipy.linalg import logm as logm_scipy
def expm(M):
""" Computes the matrix exponential of M.
Args:
M (torch.tensor): Square matrix (N, N)
Returns:
M (torch.tensor): Matrix exponential (N, N)
"""
device = M.device
dtype = M.dtype
M = M.detach().cpu().numpy()
M = expm_scipy(M)
M = torch.from_numpy(M).type(dtype).to(device)
return M
def logm(M):
""" Computes the real matrix logarithm of M.
Args:
M (torch.tensor): Square matrix (N, N)
Returns:
M (torch.tensor): Matrix logarithm (N, N)
"""
device = M.device
dtype = M.dtype
M = M.detach().cpu().numpy()
M = logm_scipy(M)
M = real(M)
M = torch.from_numpy(M).type(dtype).to(device)
return M
def besseli(X, order=0, Nk=50):
""" Approximates the modified Bessel function of the first kind,
of either order zero or one.
OBS: Inputing float32 can lead to numerical issues.
Args:
X (torch.tensor): Input (N, 1).
order (int, optional): 0 or 1, defaults to 0.
Nk (int, optional): Terms in summation, higher number, better approximation.
Defaults to 50.
Returns:
I (torch.tensor): Modified Bessel function of the first kind (N, 1).
See also:
https://mathworld.wolfram.com/ModifiedBesselFunctionoftheFirstKind.html
"""
device = X.device
dtype = X.dtype
if len(X.shape) == 1:
X = X[:, None]
N = X.shape[0]
else:
N = 1
# Compute factorial term
X = X.repeat(1, Nk)
K = torch.arange(0, Nk, dtype=dtype, device=device)
K = K.repeat(N, 1)
K_factorial = (K + 1).lgamma().exp()
if order == 0:
# ..0th order
i = torch.sum((0.25 * X ** 2) ** K / (K_factorial ** 2), dim=1, dtype=torch.float64)
else:
# ..1st order
i = torch.sum(
0.5 * X * ((0.25 * X ** 2) ** K /
(K_factorial * torch.exp(torch.lgamma(K + 2)))), dim=1, dtype=torch.float64)
return i
| [
"torch.round",
"torch.min",
"torch.isnan",
"torch.max",
"torch.arange",
"torch.lgamma",
"torch.from_numpy",
"torch.as_tensor",
"torch.log",
"torch.exp",
"torch.where",
"torch.sum"
] | 1.5 | wyli/nitorch | 3ecd18944cf45fb9193c4c6ffc32953c4d1c71ac |
1.0 | # Borrowed from https://github.com/meetshah1995/pytorch-semseg
import torch
import torch.nn as nn
import numpy as np
import torch.nn.functional as F
from torch.autograd import Variable
class conv2DBatchNorm(nn.Module):
def __init__(
self,
in_channels,
n_filters,
k_size,
stride,
padding,
bias=True,
dilation=1,
is_batchnorm=True,
):
super(conv2DBatchNorm, self).__init__()
conv_mod = nn.Conv2d(
int(in_channels),
int(n_filters),
kernel_size=k_size,
padding=padding,
stride=stride,
bias=bias,
dilation=dilation,
)
if is_batchnorm:
self.cb_unit = nn.Sequential(conv_mod, nn.BatchNorm2d(int(n_filters)))
else:
self.cb_unit = nn.Sequential(conv_mod)
def forward(self, inputs):
outputs = self.cb_unit(inputs)
return outputs
class conv2DGroupNorm(nn.Module):
def __init__(
self, in_channels, n_filters, k_size, stride, padding, bias=True, dilation=1, n_groups=16
):
super(conv2DGroupNorm, self).__init__()
conv_mod = nn.Conv2d(
int(in_channels),
int(n_filters),
kernel_size=k_size,
padding=padding,
stride=stride,
bias=bias,
dilation=dilation,
)
self.cg_unit = nn.Sequential(conv_mod, nn.GroupNorm(n_groups, int(n_filters)))
def forward(self, inputs):
outputs = self.cg_unit(inputs)
return outputs
class deconv2DBatchNorm(nn.Module):
def __init__(self, in_channels, n_filters, k_size, stride, padding, bias=True):
super(deconv2DBatchNorm, self).__init__()
self.dcb_unit = nn.Sequential(
nn.ConvTranspose2d(
int(in_channels),
int(n_filters),
kernel_size=k_size,
padding=padding,
stride=stride,
bias=bias,
),
nn.BatchNorm2d(int(n_filters)),
)
def forward(self, inputs):
outputs = self.dcb_unit(inputs)
return outputs
class conv2DBatchNormRelu(nn.Module):
def __init__(
self,
in_channels,
n_filters,
k_size,
stride,
padding,
bias=True,
dilation=1,
is_batchnorm=True,
):
super(conv2DBatchNormRelu, self).__init__()
conv_mod = nn.Conv2d(
int(in_channels),
int(n_filters),
kernel_size=k_size,
padding=padding,
stride=stride,
bias=bias,
dilation=dilation,
)
if is_batchnorm:
self.cbr_unit = nn.Sequential(
conv_mod, nn.BatchNorm2d(int(n_filters)), nn.ReLU(inplace=True)
)
else:
self.cbr_unit = nn.Sequential(conv_mod, nn.ReLU(inplace=True))
def forward(self, inputs):
outputs = self.cbr_unit(inputs)
return outputs
class conv2DGroupNormRelu(nn.Module):
def __init__(
self, in_channels, n_filters, k_size, stride, padding, bias=True, dilation=1, n_groups=16
):
super(conv2DGroupNormRelu, self).__init__()
conv_mod = nn.Conv2d(
int(in_channels),
int(n_filters),
kernel_size=k_size,
padding=padding,
stride=stride,
bias=bias,
dilation=dilation,
)
self.cgr_unit = nn.Sequential(
conv_mod, nn.GroupNorm(n_groups, int(n_filters)), nn.ReLU(inplace=True)
)
def forward(self, inputs):
outputs = self.cgr_unit(inputs)
return outputs
class deconv2DBatchNormRelu(nn.Module):
def __init__(self, in_channels, n_filters, k_size, stride, padding, bias=True):
super(deconv2DBatchNormRelu, self).__init__()
self.dcbr_unit = nn.Sequential(
nn.ConvTranspose2d(
int(in_channels),
int(n_filters),
kernel_size=k_size,
padding=padding,
stride=stride,
bias=bias,
),
nn.BatchNorm2d(int(n_filters)),
nn.ReLU(inplace=True),
)
def forward(self, inputs):
outputs = self.dcbr_unit(inputs)
return outputs
class segnetDown2(nn.Module):
def __init__(self, in_size, out_size, withFeatureMap=False):
super(segnetDown2, self).__init__()
self.conv1 = conv2DBatchNormRelu(in_size, out_size, 3, 1, 1)
self.conv2 = conv2DBatchNormRelu(out_size, out_size, 3, 1, 1)
self.maxpool_with_argmax = nn.MaxPool2d(2, 2, return_indices=True)
self.withFeatureMap = withFeatureMap
def forward(self, inputs):
outputs = self.conv1(inputs)
outputs = self.conv2(outputs)
FeatureMap = outputs
outputs, indices = self.maxpool_with_argmax(outputs)
if self.withFeatureMap:
return outputs, indices, FeatureMap
return outputs, indices, None
class segnetDown3(nn.Module):
def __init__(self, in_size, out_size, withFeatureMap=False):
super(segnetDown3, self).__init__()
self.conv1 = conv2DBatchNormRelu(in_size, out_size, 3, 1, 1)
self.conv2 = conv2DBatchNormRelu(out_size, out_size, 3, 1, 1)
self.conv3 = conv2DBatchNormRelu(out_size, out_size, 3, 1, 1)
self.maxpool_with_argmax = nn.MaxPool2d(2, 2, return_indices=True)
self.withFeatureMap = withFeatureMap
def forward(self, inputs):
outputs = self.conv1(inputs)
outputs = self.conv2(outputs)
outputs = self.conv3(outputs)
FeatureMap = outputs
outputs, indices = self.maxpool_with_argmax(outputs)
if self.withFeatureMap:
return outputs, indices, FeatureMap
return outputs, indices, None
class segnetUp2(nn.Module):
def __init__(self, in_size, out_size, withSkipConnections=False):
super().__init__()
self.withSkipConnections = withSkipConnections
self.unpool = nn.MaxUnpool2d(2, 2)
if self.withSkipConnections:
self.conv1 = conv2DBatchNormRelu(2*in_size, 2*in_size, 3, 1, 1)
self.conv2 = conv2DBatchNormRelu(2*in_size, out_size, 3, 1, 1)
else:
self.conv1 = conv2DBatchNormRelu(in_size, in_size, 3, 1, 1)
self.conv2 = conv2DBatchNormRelu(in_size, out_size, 3, 1, 1)
def forward(self, inputs, indices, output_shape, SkipFeatureMap=None):
if self.withSkipConnections and SkipFeatureMap is None:
raise RuntimeError('Created SegNet with skip connections. But no feature map is passed.')
outputs = self.unpool(input=inputs, indices=indices, output_size=output_shape)
if self.withSkipConnections:
outputs = torch.cat((SkipFeatureMap, outputs), 1)
outputs = self.conv1(outputs)
outputs = self.conv2(outputs)
return outputs
class segnetUp3(nn.Module):
def __init__(self, in_size, out_size, withSkipConnections=False):
super().__init__()
self.withSkipConnections = withSkipConnections
self.unpool = nn.MaxUnpool2d(2, 2)
if self.withSkipConnections:
self.conv1 = conv2DBatchNormRelu(2*in_size, 2*in_size, 3, 1, 1)
self.conv2 = conv2DBatchNormRelu(2*in_size, 2*in_size, 3, 1, 1)
self.conv3 = conv2DBatchNormRelu(2*in_size, out_size, 3, 1, 1)
else:
self.conv1 = conv2DBatchNormRelu(in_size, in_size, 3, 1, 1)
self.conv2 = conv2DBatchNormRelu(in_size, in_size, 3, 1, 1)
self.conv3 = conv2DBatchNormRelu(in_size, out_size, 3, 1, 1)
def forward(self, inputs, indices, output_shape, SkipFeatureMap=None):
if self.withSkipConnections and SkipFeatureMap is None:
raise RuntimeError('Created SegNet with skip connections. But no feature map is passed.')
outputs = self.unpool(input=inputs, indices=indices, output_size=output_shape)
if self.withSkipConnections:
outputs = torch.cat((SkipFeatureMap, outputs), 1)
outputs = self.conv1(outputs)
outputs = self.conv2(outputs)
outputs = self.conv3(outputs)
return outputs
# ------- UNet
class UNet_ConvBlock(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0):
super().__init__()
self.Conv = nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=padding)
self.BN = nn.BatchNorm2d(out_channels)
self.ReLU = nn.ReLU()
def forward(self, x):
x = self.Conv(x)
x = self.BN(x)
x = self.ReLU(x)
return x
class UNet_DownBlock(nn.Module):
def __init__(self, in_channels, out_channels):
super().__init__()
self.Block1 = UNet_ConvBlock(in_channels, out_channels, kernel_size=(3, 3), stride=1, padding=0) # Fixed kernel sizes
self.Block2 = UNet_ConvBlock(out_channels, out_channels, kernel_size=(3, 3), stride=1, padding=0)
self.Pool = nn.MaxPool2d(kernel_size=(2, 2), stride=2)
def forward(self, x):
x = self.Block1(x)
x = self.Block2(x)
FeatureMap = x
x = self.Pool(x)
return x, FeatureMap
class UNet_UpBlock(nn.Module):
def __init__(self, in_channels, out_channels, up_size):
super().__init__()
# Doing what's in the original paper: Upsample the feature map and then a 2x2 conv (and another upsample to match feature sizes)
self.UpSample = nn.Upsample(size=up_size, mode='bilinear')
self.Conv2 = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=(2, 2), stride=1, padding=0)
self.UpSample2 = nn.Upsample(size=up_size, mode='bilinear')
self.Block1 = UNet_ConvBlock(in_channels, out_channels, kernel_size=(3, 3), stride=1, padding=0)
self.Block2 = UNet_ConvBlock(out_channels, out_channels, kernel_size=(3, 3), stride=1, padding=0)
def CopyCropConcat(self, Upsampled, CopiedFeatureMap):
PadHalfSize = (CopiedFeatureMap.size()[2] - Upsampled.size()[2]) // 2 # Floor division //
# print('PadHalfSize:', PadHalfSize)
# Crop copied feature map
# Remove PadHalfSize from both sides for both dimensions (starting from the last: width, then height)
CopiedFeatureMap = F.pad(CopiedFeatureMap, (-PadHalfSize, -PadHalfSize, -PadHalfSize, -PadHalfSize))
# print('CopiedFeatureMap:', CopiedFeatureMap.size())
# print('Upsampled:', Upsampled.size())
# Concat the features
Concated = torch.cat((CopiedFeatureMap, Upsampled), 1) # Is this correct?
# print('Concated:', Concated.size())
return Concated
def forward(self, x, CopiedFeatureMap):
# print('-----------------------')
# print('Input:', x.size())
# Doing what's in the original paper: Upsample the feature map and then a 2x2 conv
x = self.UpSample(x)
x = self.Conv2(x)
x = self.UpSample2(x)
# Copy and crop here
x = self.CopyCropConcat(x, CopiedFeatureMap)
# print('After copycropconcat:', x.size())
x = self.Block1(x)
x = self.Block2(x)
# print('Output:', x.size())
# print('-----------------------')
return x
| [
"torch.cat",
"torch.nn.MaxPool2d",
"torch.nn.Sequential",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU",
"torch.nn.Upsample",
"torch.nn.Conv2d",
"torch.nn.MaxUnpool2d",
"torch.nn.functional.pad"
] | 1.0.1 | vikasTmz/tk3dv | 48430cbc80113ed9c51bdcd3fb577da22af66473 |
1.1 | import configparser
import os
import re
import string
import pickle
import copy
import random
import numpy as np
import torch
from torch.utils.data import DataLoader
from fastNLP import Vocabulary
from dataset_my import Dataset
from dataloader import TrainDataLoader
from utils import padding, batch_padding
def _parse_list(data_path, list_name):
domain = set()
with open(os.path.join(data_path, list_name), 'r', encoding='utf-8') as f:
for line in f:
domain.add(line.strip('\n'))
return domain
def get_domains(data_path, filtered_name, target_name):
all_domains = _parse_list(data_path, filtered_name)
test_domains = _parse_list(data_path, target_name)
train_domains = all_domains - test_domains
print('train domains', len(train_domains), 'test_domains', len(test_domains))
return sorted(list(train_domains)), sorted(list(test_domains))
def _parse_data(data_path, filename):
neg = {
'filename': filename,
'data': [],
'target': []
}
pos = {
'filename': filename,
'data': [],
'target': []
}
with open(os.path.join(data_path, filename), 'r', encoding='utf-8') as f:
for line in f:
line = line.strip('\n')
if line[-2:] == '-1':
neg['data'].append(line[:-2])
neg['target'].append(0)
else:
pos['data'].append(line[:-1])
pos['target'].append(1)
# check
print(filename, 'neg', len(neg['data']), 'pos', len(pos['data']))
return neg, pos
def _process_data(data_dict):
for i in range(len(data_dict['data'])):
text = data_dict['data'][i]
# ignore string.punctuation
text = re.sub('[%s]' % re.escape(string.punctuation), ' ', text)
# string.whitespace -> space
text = re.sub('[%s]' % re.escape(string.whitespace), ' ', text)
# lower case
text = text.lower()
# split by whitespace
text = text.split()
# replace
data_dict['data'][i] = text
return data_dict
def _get_data(data_path, domains, usage):
# usage in ['train', 'dev', 'test']
data = {}
for domain in domains:
for t in ['t2', 't4', 't5']:
filename = '.'.join([domain, t, usage])
neg, pos = _parse_data(data_path, filename)
neg = _process_data(neg)
pos = _process_data(pos)
data[filename] = {'neg': neg, 'pos': pos}
return data
def get_train_data(data_path, domains):
train_data = _get_data(data_path, domains, 'train')
print('train data', len(train_data))
return train_data
def _combine_data(support_data, data):
# support -> dev, test
for key in data:
key_split = key.split('.')[0:-1] + ['train']
support_key = '.'.join(key_split)
for value in data[key]:
data[key][value]['support_data'] = copy.deepcopy(support_data[support_key][value]['data'])
data[key][value]['support_target'] = copy.deepcopy(support_data[support_key][value]['target'])
return data
def get_test_data(data_path, domains):
# get dev, test data
support_data = _get_data(data_path, domains, 'train')
dev_data = _get_data(data_path, domains, 'dev')
test_data = _get_data(data_path, domains, 'test')
# support -> dev, test
dev_data = _combine_data(support_data, dev_data)
test_data = _combine_data(support_data, test_data)
print('dev data', len(dev_data), 'test data', len(test_data))
return dev_data, test_data
def get_vocabulary(data, min_freq):
# train data -> vocabulary
vocabulary = Vocabulary(min_freq=min_freq, padding='<pad>', unknown='<unk>')
for filename in data:
for value in data[filename]:
for word_list in data[filename][value]['data']:
vocabulary.add_word_lst(word_list)
vocabulary.build_vocab()
print('vocab size', len(vocabulary), 'pad', vocabulary.padding_idx, 'unk', vocabulary.unknown_idx)
return vocabulary
def _idx_text(text_list, vocabulary):
for i in range(len(text_list)):
for j in range(len(text_list[i])):
text_list[i][j] = vocabulary.to_index(text_list[i][j])
return text_list
def idx_all_data(data, vocabulary):
for filename in data:
for value in data[filename]:
for key in data[filename][value]:
if key in ['data', 'support_data']:
data[filename][value][key] = _idx_text(data[filename][value][key], vocabulary)
return data
def get_train_loader(train_data, support, query, pad_idx):
batch_size = support + query
train_loaders = {}
for filename in train_data:
neg_dl = DataLoader(Dataset(train_data[filename]['neg'], pad_idx), batch_size=batch_size, shuffle=True, drop_last=False, **kwargs)
pos_dl = DataLoader(Dataset(train_data[filename]['pos'], pad_idx), batch_size=batch_size, shuffle=True, drop_last=False, **kwargs)
if min(len(neg_dl), len(pos_dl)) > 0:
train_loaders[filename] = {
'neg': neg_dl,
'pos': pos_dl
}
print('train loaders', len(train_loaders))
return TrainDataLoader(train_loaders, support=support, query=query, pad_idx=pad_idx)
def get_test_loader(full_data, support, query, pad_idx):
loader = []
for filename in full_data:
# support
support_data = full_data[filename]['neg']['support_data'][0:support] + full_data[filename]['pos']['support_data'][0:support]
support_data = batch_padding(support_data, pad_idx)
support_target = full_data[filename]['neg']['support_target'][0:support] + full_data[filename]['pos']['support_target'][0:support]
support_target = torch.tensor(support_target)
# query
neg_dl = DataLoader(Dataset(full_data[filename]['neg'], pad_idx), batch_size=query * 2, shuffle=False, drop_last=False, **kwargs)
pos_dl = DataLoader(Dataset(full_data[filename]['pos'], pad_idx), batch_size=query * 2, shuffle=False, drop_last=False, **kwargs)
# combine
for dl in [neg_dl, pos_dl]:
for batch_data, batch_target in dl:
support_data_cp, support_target_cp = copy.deepcopy(support_data), copy.deepcopy(support_target)
support_data_cp, batch_data = padding(support_data_cp, batch_data, pad_idx)
data = torch.cat([support_data_cp, batch_data], dim=0)
target = torch.cat([support_target_cp, batch_target], dim=0)
loader.append((data, target))
print('test loader length', len(loader))
return loader
def main():
train_domains, test_domains = get_domains(data_path, config['data']['filtered_list'], config['data']['target_list'])
train_data = get_train_data(data_path, train_domains)
dev_data, test_data = get_test_data(data_path, test_domains)
# print(dev_data['books.t2.dev']['neg']['support_data'])
# print(dev_data['books.t2.dev']['neg']['support_target'])
vocabulary = get_vocabulary(train_data, min_freq=int(config['data']['min_freq']))
pad_idx = vocabulary.padding_idx
pickle.dump(vocabulary, open(os.path.join(config['data']['path'], config['data']['vocabulary']), 'wb'))
train_data = idx_all_data(train_data, vocabulary)
dev_data = idx_all_data(dev_data, vocabulary)
test_data = idx_all_data(test_data, vocabulary)
# print(dev_data['books.t2.dev']['neg']['support_data'])
# print(dev_data['books.t2.dev']['neg']['support_target'])
support = int(config['model']['support'])
query = int(config['model']['query'])
train_loader = get_train_loader(train_data, support, query, pad_idx)
dev_loader = get_test_loader(dev_data, support, query, pad_idx)
test_loader = get_test_loader(test_data, support, query, pad_idx)
pickle.dump(train_loader, open(os.path.join(config['data']['path'], config['data']['train_loader']), 'wb'))
pickle.dump(dev_loader, open(os.path.join(config['data']['path'], config['data']['dev_loader']), 'wb'))
pickle.dump(test_loader, open(os.path.join(config['data']['path'], config['data']['test_loader']), 'wb'))
if __name__ == "__main__":
# config
config = configparser.ConfigParser()
config.read("config.ini")
# seed
seed = int(config['data']['seed'])
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
kwargs = {'num_workers': 0, 'pin_memory': True} if torch.cuda.is_available() else {}
data_path = config['data']['path']
main()
| [
"torch.cat",
"torch.cuda.manual_seed_all",
"torch.manual_seed",
"torch.cuda.is_available",
"torch.tensor"
] | 1.1.0 | ChenZhannnnn/chenzhan | b26a9512bbd1efe86c35c91a625da40b6f94dfc7 |
1.1 | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from mmskeleton.ops.st_gcn import ConvTemporalGraphicalBatchA, Graph
"""
change from 21
A矩阵对称版本
A 3*25*25
a.triu !!!!
besides the max link , other link do not set 1
"""
def zero(x):
return 0
def iden(x):
return x
class ANet(torch.nn.Module): # 继承 torch 的 Module
def __init__(self, n_feature, n_hidden, n_output,dropout_value=0.3):
super(ANet, self).__init__() # 继承 __init__ 功能
# 定义每层用什么样的形式
self.conv1 = nn.Conv1d(in_channels=300, out_channels=5, kernel_size=1)
self.anet = nn.Sequential(
nn.BatchNorm1d(n_feature),
nn.ReLU(inplace=True),
nn.Linear(n_feature, n_hidden),
nn.ReLU(inplace=True),
nn.Dropout(dropout_value),
nn.Linear(n_hidden, n_hidden),
nn.ReLU(inplace=True),
nn.Dropout(dropout_value),
nn.Linear(n_hidden, n_output),
)
# 输出层线性输出
def forward(self, x): # 这同时也是 Module 中的 forward 功能
# 正向传播输入值, 神经网络分析出输出值
N, T, F = x.size()
x = self.conv1(x)
x = x.view(N ,-1)
x = self.anet(x).view(N,4,25, 25)
x=torch.softmax(x, dim = 3)
x = torch.argmax(x,dim = 3).view(N,4,25,1).cuda()
x = torch.zeros((N,4,25,25)).cuda().scatter(3 ,x,0.3)
# torch.set_printoptions(precision=None, threshold=10000, edgeitems=None, linewidth=None, profile=None)
# print(x)
return x
class ST_GCN_ALN38(nn.Module):
r"""Spatial temporal graph convolutional networks.
Args:
in_channels (int): Number of channels in the input data
num_class (int): Number of classes for the classification task
graph_cfg (dict): The arguments for building the graph
edge_importance_weighting (bool): If ``True``, adds a learnable
importance weighting to the edges of the graph
**kwargs (optional): Other parameters for graph convolution units
Shape:
- Input: :math:`(N, in_channels, T_{in}, V_{in}, M_{in})`
- Output: :math:`(N, num_class)` where
:math:`N` is a batch size,
:math:`T_{in}` is a length of input sequence,
:math:`V_{in}` is the number of graph nodes,
:math:`M_{in}` is the number of instance in a frame.
"""
def __init__(self,
in_channels,
num_class,
graph_cfg,
edge_importance_weighting=True,
data_bn=True,
**kwargs):
super().__init__()
# load graph
self.graph = Graph(**graph_cfg)
A = torch.tensor(self.graph.A,
dtype=torch.float32,
requires_grad=False)
self.register_buffer('A', A)
# build networks
spatial_kernel_size = 4
temporal_kernel_size = 9
kernel_size = (temporal_kernel_size, spatial_kernel_size)
self.data_bn = nn.BatchNorm1d(in_channels *
A.size(1)) if data_bn else iden
kwargs0 = {k: v for k, v in kwargs.items() if k != 'dropout'}
self.st_gcn_networks = nn.ModuleList((
st_gcn_block(in_channels,
64,
kernel_size,
1,
residual=False,
**kwargs0),
st_gcn_block(64, 64, kernel_size, 1, **kwargs),
st_gcn_block(64, 64, kernel_size, 1, **kwargs),
st_gcn_block(64, 64, kernel_size, 1, **kwargs),
st_gcn_block(64, 128, kernel_size, 2, **kwargs),
st_gcn_block(128, 128, kernel_size, 1, **kwargs),
st_gcn_block(128, 128, kernel_size, 1, **kwargs),
st_gcn_block(128, 256, kernel_size, 2, **kwargs),
st_gcn_block(256, 256, kernel_size, 1, **kwargs),
st_gcn_block(256, 256, kernel_size, 1, **kwargs),
))
# initialize parameters for edge importance weighting
# fcn for prediction
self.fcn = nn.Conv2d(256, num_class, kernel_size=1)
# self.ALN = ANet(150,800, 625)
self.ALN = ANet(375,1500, 625*4)
def forward(self, x):
# data normalization
N, C, T, V, M = x.size()
x = x.permute(0, 4, 3, 1, 2).contiguous()
x = x.view(N * M, V * C, T)
x = self.data_bn(x)
x = x.view(N, M, V, C, T)
x = x.permute(0, 1, 3, 4, 2).contiguous()
x = x.view(N * M, C, T, V)
# input_ILN = x.mean(dim=2).view(N*M, -1)
input_ILN = x.permute(0, 2, 1, 3).contiguous()
input_ILN=input_ILN.view(N*M,T,C*V)
ALN_out = self.ALN(input_ILN)
# ALN_out = ALN_out.view(N,-1).cuda()
A = ALN_out.cuda()
# index = 0
# for i in range(25):
# for j in range(i + 1):
# for n in range(N*M):
# A[n][i][j] = ALN_out[n][index]
# if (i != j): A[n][j][i] = ALN_out[n][index]
# index += 1
# A=A.view(-1, 1, 25, 25).cuda()
# forward
for gcn in self.st_gcn_networks:
x, _ = gcn(x, A)
# global pooling
x = F.avg_pool2d(x, x.size()[2:])
x = x.view(N, M, -1, 1, 1).mean(dim=1)
# prediction
x = self.fcn(x)
x = x.view(x.size(0), -1)
return x
def extract_feature(self, x):
# data normalization
N, C, T, V, M = x.size()
x = x.permute(0, 4, 3, 1, 2).contiguous()
x = x.view(N * M, V * C, T)
x = self.data_bn(x)
x = x.view(N, M, V, C, T)
x = x.permute(0, 1, 3, 4, 2).contiguous()
x = x.view(N * M, C, T, V)
# forwad
for gcn, importance in zip(self.st_gcn_networks, self.edge_importance):
x, _ = gcn(x, self.A * importance)
_, c, t, v = x.size()
feature = x.view(N, M, c, t, v).permute(0, 2, 3, 4, 1)
# prediction
x = self.fcn(x)
output = x.view(N, M, -1, t, v).permute(0, 2, 3, 4, 1)
return output, feature
class st_gcn_block(nn.Module):
r"""Applies a spatial temporal graph convolution over an input graph sequence.
Args:
in_channels (int): Number of channels in the input sequence data
out_channels (int): Number of channels produced by the convolution
kernel_size (tuple): Size of the temporal convolving kernel and graph convolving kernel
stride (int, optional): Stride of the temporal convolution. Default: 1
dropout (int, optional): Dropout rate of the final output. Default: 0
residual (bool, optional): If ``True``, applies a residual mechanism. Default: ``True``
Shape:
- Input[0]: Input graph sequence in :math:`(N, in_channels, T_{in}, V)` format
- Input[1]: Input graph adjacency matrix in :math:`(K, V, V)` format
- Output[0]: Outpu graph sequence in :math:`(N, out_channels, T_{out}, V)` format
- Output[1]: Graph adjacency matrix for output data in :math:`(K, V, V)` format
where
:math:`N` is a batch size,
:math:`K` is the spatial kernel size, as :math:`K == kernel_size[1]`,
:math:`T_{in}/T_{out}` is a length of input/output sequence,
:math:`V` is the number of graph nodes.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
dropout=0,
residual=True):
super().__init__()
assert len(kernel_size) == 2
assert kernel_size[0] % 2 == 1
padding = ((kernel_size[0] - 1) // 2, 0)
self.gcn = ConvTemporalGraphicalBatchA(in_channels, out_channels,
kernel_size[1])
self.tcn = nn.Sequential(
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
nn.Conv2d(
out_channels,
out_channels,
(kernel_size[0], 1),
(stride, 1),
padding,
),
nn.BatchNorm2d(out_channels),
nn.Dropout(dropout, inplace=True),
)
if not residual:
self.residual = zero
elif (in_channels == out_channels) and (stride == 1):
self.residual = iden
else:
self.residual = nn.Sequential(
nn.Conv2d(in_channels,
out_channels,
kernel_size=1,
stride=(stride, 1)),
nn.BatchNorm2d(out_channels),
)
self.relu = nn.ReLU(inplace=True)
def forward(self, x, A):
res = self.residual(x)
x, A = self.gcn(x, A)
x = self.tcn(x) + res
return self.relu(x), A
| [
"torch.nn.Linear",
"torch.zeros",
"torch.nn.Dropout",
"torch.nn.Conv1d",
"torch.nn.BatchNorm2d",
"torch.softmax",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.nn.BatchNorm1d",
"torch.tensor",
"torch.argmax"
] | 1.1 | niujunyu/mmskeleton | 0936b610f0c44e87f6886d34a9f43eda872ed6d8 |
1.7 | import torch
from face_detection.Config import cig
def smooth_l1_loss(x, t, in_weight, sigma : float):
sigma2 = sigma ** 2
diff = in_weight * (x - t)
abs_diff = diff.abs()
flag = (abs_diff.data < (1. / sigma2)).float()
y = (flag * (sigma2 / 2.) * (diff ** 2) +
(1 - flag) * (abs_diff - 0.5 / sigma2))
return y.sum()
def fast_rcnn_loc_loss(pred_loc, gt_loc, gt_label, sigma : float):
in_weight = torch.zeros(gt_loc.shape)
if cig.use_cuda:
in_weight = in_weight.cuda()
# Localization loss is calculated only for positive rois.
# NOTE: unlike origin implementation,
# we don't need inside_weight and outside_weight, they can calculate by gt_label
indices : torch.Tensor = (gt_label > 0).view(-1, 1).expand_as(in_weight)
in_weight[indices.cuda() if cig.use_cuda else indices.cpu()] = 1
loc_loss = smooth_l1_loss(pred_loc, gt_loc, in_weight.detach(), sigma)
# Normalize by total number of negtive and positive rois.
loc_loss /= ((gt_label >= 0).sum().float()) # ignore gt_label==-1 for rpn_loss
return loc_loss | [
"torch.zeros"
] | 1.7.0 | We-Union/FaceDetection | 5f0d0010fc24d801101cea0a03c95cf5a91f8f90 |
1.0 | import unittest
import torch
import bert_score
from transformers import __version__ as ht_version
EPS = 1e-5
cands = [
"28-year-old chef found dead in San Francisco mall",
"A 28-year-old chef who recently moved to San Francisco was found dead in the staircase of a local shopping center.",
"The victim's brother said he cannot imagine anyone who would want to harm him,\"Finally, it went uphill again at him.\"",
]
refs = [
"28-Year-Old Chef Found Dead at San Francisco Mall",
"A 28-year-old chef who had recently moved to San Francisco was found dead in the stairwell of a local mall this week.",
"But the victim's brother says he can't think of anyone who would want to hurt him, saying, \"Things were finally going well for him.\""
]
class TestScore(unittest.TestCase):
def test_score(self):
(P, R, F), hash_code = bert_score.score(
cands, refs, model_type='roberta-large', num_layers=17,
idf=False, batch_size=3, return_hash=True
)
# print(P.tolist(), R.tolist(), F.tolist())
self.assertTrue(torch.is_tensor(P))
self.assertTrue(torch.is_tensor(R))
self.assertTrue(torch.is_tensor(F))
self.assertEqual(hash_code, f'roberta-large_L17_no-idf_version={bert_score.__version__}(hug_trans={ht_version})')
self.assertTrue((P - torch.tensor([0.9843302369117737, 0.9832239747047424, 0.9120386242866516])).abs_().max() < EPS)
self.assertTrue((R - torch.tensor([0.9823839068412781, 0.9732863903045654, 0.920428991317749])).abs_().max() < EPS)
self.assertTrue((F - torch.tensor([0.9833561182022095, 0.9782299995422363, 0.916214644908905])).abs_().max() < EPS)
def test_idf_score(self):
(P, R, F), hash_code = bert_score.score(
cands, refs, model_type='roberta-large', num_layers=17,
idf=True, batch_size=3, return_hash=True
)
# print(P.tolist(), R.tolist(), F.tolist())
self.assertTrue(torch.is_tensor(P))
self.assertTrue(torch.is_tensor(R))
self.assertTrue(torch.is_tensor(F))
self.assertEqual(hash_code, f'roberta-large_L17_idf_version={bert_score.__version__}(hug_trans={ht_version})')
self.assertTrue((P - torch.tensor([0.9837872385978699, 0.9754738807678223, 0.8947395086288452])).abs_().max() < EPS)
self.assertTrue((R - torch.tensor([0.9827190637588501, 0.9697767496109009, 0.9172918796539307])).abs_().max() < EPS)
self.assertTrue((F - torch.tensor([0.9832529425621033, 0.972616970539093, 0.9058753848075867])).abs_().max() < EPS)
def test_score_rescale(self):
(P, R, F), hash_code = bert_score.score(
cands, refs, model_type='roberta-large', num_layers=17,
idf=False, batch_size=3, return_hash=True,
lang="en", rescale_with_baseline=True
)
# print(P.tolist(), R.tolist(), F.tolist())
self.assertTrue(torch.is_tensor(P))
self.assertTrue(torch.is_tensor(R))
self.assertTrue(torch.is_tensor(F))
self.assertEqual(hash_code, f'roberta-large_L17_no-idf_version={bert_score.__version__}(hug_trans={ht_version})-rescaled')
self.assertTrue((P - torch.tensor([0.907000780105591,0.900435566902161,0.477955609560013])).abs_().max() < EPS)
self.assertTrue((R - torch.tensor([0.895456790924072,0.841467440128326,0.527785062789917])).abs_().max() < EPS)
self.assertTrue((F - torch.tensor([0.901383399963379,0.871010780334473,0.503565192222595])).abs_().max() < EPS)
def test_idf_score_rescale(self):
(P, R, F), hash_code = bert_score.score(
cands, refs, model_type='roberta-large', num_layers=17,
idf=True, batch_size=3, return_hash=True,
lang="en", rescale_with_baseline=True
)
# print(P.tolist(), R.tolist(), F.tolist())
self.assertTrue(torch.is_tensor(P))
self.assertTrue(torch.is_tensor(R))
self.assertTrue(torch.is_tensor(F))
self.assertEqual(hash_code, f'roberta-large_L17_idf_version={bert_score.__version__}(hug_trans={ht_version})-rescaled')
self.assertTrue((P - torch.tensor([0.903778135776520,0.854439020156860,0.375287383794785])).abs_().max() < EPS)
self.assertTrue((R - torch.tensor([0.897446095943451,0.820639789104462,0.509167850017548])).abs_().max() < EPS)
self.assertTrue((F - torch.tensor([0.900772094726562,0.837753534317017,0.442304641008377])).abs_().max() < EPS)
if __name__ == '__main__':
unittest.main()
| [
"torch.is_tensor",
"torch.tensor"
] | 1.0.0 | karinseve/bert_score | 926c516ac516411c560918dddce7755e6ea9aa70 |
1.9 | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import mmcv
import os
import torch
import warnings
from mmcv import Config, DictAction
from mmcv.cnn import fuse_conv_bn
from mmcv.parallel import MMDataParallel, MMDistributedDataParallel
from mmcv.runner import (get_dist_info, init_dist, load_checkpoint,
wrap_fp16_model)
from mmdet3d.apis import single_gpu_test
from mmdet3d.datasets import build_dataloader, build_dataset
from mmdet3d.models import build_model
from mmdet.apis import multi_gpu_test, set_random_seed
from mmdet.datasets import replace_ImageToTensor
from projects.mmdet3d_plugin.models.backbones import *
from projects.mmdet3d_plugin.models.dense_heads import *
from projects.mmdet3d_plugin.models.detectors import *
from projects.mmdet3d_plugin.core.bbox import *
from projects.mmdet3d_plugin.datasets import *
def parse_args():
parser = argparse.ArgumentParser(
description='MMDet test (and eval) a model')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument('--out', help='output result file in pickle format')
parser.add_argument(
'--fuse-conv-bn',
action='store_true',
help='Whether to fuse conv and bn, this will slightly increase'
'the inference speed')
parser.add_argument(
'--format-only',
action='store_true',
help='Format the output results without perform evaluation. It is'
'useful when you want to format the result to a specific format and '
'submit it to the test server')
parser.add_argument(
'--eval',
type=str,
nargs='+',
help='evaluation metrics, which depends on the dataset, e.g., "bbox",'
' "segm", "proposal" for COCO, and "mAP", "recall" for PASCAL VOC')
parser.add_argument('--show', action='store_true', help='show results')
parser.add_argument(
'--show-dir', help='directory where results will be saved')
parser.add_argument(
'--gpu-collect',
action='store_true',
help='whether to use gpu to collect results.')
parser.add_argument(
'--tmpdir',
help='tmp directory used for collecting results from multiple '
'workers, available when gpu-collect is not specified')
parser.add_argument('--seed', type=int, default=0, help='random seed')
parser.add_argument(
'--deterministic',
action='store_true',
help='whether to set deterministic options for CUDNN backend.')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--options',
nargs='+',
action=DictAction,
help='custom options for evaluation, the key-value pair in xxx=yyy '
'format will be kwargs for dataset.evaluate() function (deprecate), '
'change to --eval-options instead.')
parser.add_argument(
'--eval-options',
nargs='+',
action=DictAction,
help='custom options for evaluation, the key-value pair in xxx=yyy '
'format will be kwargs for dataset.evaluate() function')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
if args.options and args.eval_options:
raise ValueError(
'--options and --eval-options cannot be both specified, '
'--options is deprecated in favor of --eval-options')
if args.options:
warnings.warn('--options is deprecated in favor of --eval-options')
args.eval_options = args.options
return args
def main():
args = parse_args()
assert args.out or args.eval or args.format_only or args.show \
or args.show_dir, \
('Please specify at least one operation (save/eval/format/show the '
'results / save the results) with the argument "--out", "--eval"'
', "--format-only", "--show" or "--show-dir"')
if args.eval and args.format_only:
raise ValueError('--eval and --format_only cannot be both specified')
if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
raise ValueError('The output file must be a pkl file.')
cfg = Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# import modules from string list.
if cfg.get('custom_imports', None):
from mmcv.utils import import_modules_from_strings
import_modules_from_strings(**cfg['custom_imports'])
## import modules from plguin/xx, registry will be updated
#if hasattr(cfg, 'plugin'):
# if cfg.plugin:
# import importlib
# if hasattr(cfg, 'plugin_dir'):
# plugin_dir = cfg.plugin_dir
# _module_dir = os.path.dirname(plugin_dir)
# _module_dir = _module_dir.split('/')
# _module_path = _module_dir[0]
#
# for m in _module_dir[1:]:
# _module_path = _module_path + '.' + m
# print(_module_path)
# plg_lib = importlib.import_module(_module_path)
# else:
# # import dir is the dirpath for the config file
# _module_dir = os.path.dirname(args.config)
# _module_dir = _module_dir.split('/')
# _module_path = _module_dir[0]
# for m in _module_dir[1:]:
# _module_path = _module_path + '.' + m
# print(_module_path)
# plg_lib = importlib.import_module(_module_path)
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
cfg.model.pretrained = None
# in case the test dataset is concatenated
samples_per_gpu = 1
if isinstance(cfg.data.test, dict):
cfg.data.test.test_mode = True
samples_per_gpu = cfg.data.test.pop('samples_per_gpu', 1)
if samples_per_gpu > 1:
# Replace 'ImageToTensor' to 'DefaultFormatBundle'
cfg.data.test.pipeline = replace_ImageToTensor(
cfg.data.test.pipeline)
elif isinstance(cfg.data.test, list):
for ds_cfg in cfg.data.test:
ds_cfg.test_mode = True
samples_per_gpu = max(
[ds_cfg.pop('samples_per_gpu', 1) for ds_cfg in cfg.data.test])
if samples_per_gpu > 1:
for ds_cfg in cfg.data.test:
ds_cfg.pipeline = replace_ImageToTensor(ds_cfg.pipeline)
# init distributed env first, since logger depends on the dist info.
if args.launcher == 'none':
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
# set random seeds
if args.seed is not None:
set_random_seed(args.seed, deterministic=args.deterministic)
# build the dataloader
dataset = build_dataset(cfg.data.test)
data_loader = build_dataloader(
dataset,
samples_per_gpu=samples_per_gpu,
workers_per_gpu=cfg.data.workers_per_gpu,
dist=distributed,
shuffle=False)
# build the model and load checkpoint
cfg.model.train_cfg = None
model = build_model(cfg.model, test_cfg=cfg.get('test_cfg'))
fp16_cfg = cfg.get('fp16', None)
if fp16_cfg is not None:
wrap_fp16_model(model)
checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu')
if args.fuse_conv_bn:
model = fuse_conv_bn(model)
# old versions did not save class info in checkpoints, this walkaround is
# for backward compatibility
if 'CLASSES' in checkpoint.get('meta', {}):
model.CLASSES = checkpoint['meta']['CLASSES']
else:
model.CLASSES = dataset.CLASSES
# palette for visualization in segmentation tasks
if 'PALETTE' in checkpoint.get('meta', {}):
model.PALETTE = checkpoint['meta']['PALETTE']
elif hasattr(dataset, 'PALETTE'):
# segmentation dataset has `PALETTE` attribute
model.PALETTE = dataset.PALETTE
if not distributed:
model = MMDataParallel(model, device_ids=[0])
outputs = single_gpu_test(model, data_loader, args.show, args.show_dir)
else:
model = MMDistributedDataParallel(
model.cuda(),
device_ids=[torch.cuda.current_device()],
broadcast_buffers=False)
outputs = multi_gpu_test(model, data_loader, args.tmpdir,
args.gpu_collect)
rank, _ = get_dist_info()
if rank == 0:
if args.out:
print(f'\nwriting results to {args.out}')
mmcv.dump(outputs, args.out)
kwargs = {} if args.eval_options is None else args.eval_options
if args.format_only:
dataset.format_results(outputs, **kwargs)
if args.eval:
eval_kwargs = cfg.get('evaluation', {}).copy()
# hard-code way to remove EvalHook args
for key in [
'interval', 'tmpdir', 'start', 'gpu_collect', 'save_best',
'rule'
]:
eval_kwargs.pop(key, None)
eval_kwargs.update(dict(metric=args.eval, **kwargs))
print(dataset.evaluate(outputs, **eval_kwargs))
if __name__ == '__main__':
main()
| [
"torch.cuda.current_device"
] | 1.9.0 | zmehdiz97/Object-DGCNN | b1561db745575ffa0528402d66511f64670f8d7f |
1.2 | import copy
import argparse
import torch
import torch.nn.functional as F
from torch.nn import Parameter, ModuleDict, ModuleList, Linear, ParameterDict
from torch_sparse import SparseTensor
from ogb.nodeproppred import PygNodePropPredDataset, Evaluator
from logger import Logger
class RGCNConv(torch.nn.Module):
def __init__(self, in_channels, out_channels, node_types, edge_types):
super(RGCNConv, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
# `ModuleDict` does not allow tuples :(
self.rel_lins = ModuleDict({
f'{key[0]}_{key[1]}_{key[2]}': Linear(in_channels, out_channels,
bias=False)
for key in edge_types
})
self.root_lins = ModuleDict({
key: Linear(in_channels, out_channels, bias=True)
for key in node_types
})
self.reset_parameters()
def reset_parameters(self):
for lin in self.rel_lins.values():
lin.reset_parameters()
for lin in self.root_lins.values():
lin.reset_parameters()
def forward(self, x_dict, adj_t_dict):
out_dict = {}
for key, x in x_dict.items():
out_dict[key] = self.root_lins[key](x)
for key, adj_t in adj_t_dict.items():
key_str = f'{key[0]}_{key[1]}_{key[2]}'
x = x_dict[key[0]]
out = self.rel_lins[key_str](adj_t.matmul(x, reduce='mean'))
out_dict[key[2]].add_(out)
return out_dict
class RGCN(torch.nn.Module):
def __init__(self, in_channels, hidden_channels, out_channels, num_layers,
dropout, num_nodes_dict, x_types, edge_types):
super(RGCN, self).__init__()
node_types = list(num_nodes_dict.keys())
self.embs = ParameterDict({
key: Parameter(torch.Tensor(num_nodes_dict[key], in_channels))
for key in set(node_types).difference(set(x_types))
})
self.convs = ModuleList()
self.convs.append(
RGCNConv(in_channels, hidden_channels, node_types, edge_types))
for _ in range(num_layers - 2):
self.convs.append(
RGCNConv(hidden_channels, hidden_channels, node_types,
edge_types))
self.convs.append(
RGCNConv(hidden_channels, out_channels, node_types, edge_types))
self.dropout = dropout
self.reset_parameters()
def reset_parameters(self):
for emb in self.embs.values():
torch.nn.init.xavier_uniform_(emb)
for conv in self.convs:
conv.reset_parameters()
def forward(self, x_dict, adj_t_dict):
x_dict = copy.copy(x_dict)
for key, emb in self.embs.items():
x_dict[key] = emb
for conv in self.convs[:-1]:
x_dict = conv(x_dict, adj_t_dict)
for key, x in x_dict.items():
x_dict[key] = F.relu(x)
x_dict[key] = F.dropout(x, p=self.dropout,
training=self.training)
return self.convs[-1](x_dict, adj_t_dict)
def train(model, x_dict, adj_t_dict, y_true, train_idx, optimizer):
model.train()
optimizer.zero_grad()
out = model(x_dict, adj_t_dict)['paper'].log_softmax(dim=-1)
loss = F.nll_loss(out[train_idx], y_true[train_idx].squeeze())
loss.backward()
optimizer.step()
return loss.item()
@torch.no_grad()
def test(model, x_dict, adj_t_dict, y_true, split_idx, evaluator):
model.eval()
out = model(x_dict, adj_t_dict)['paper']
y_pred = out.argmax(dim=-1, keepdim=True)
train_acc = evaluator.eval({
'y_true': y_true[split_idx['train']['paper']],
'y_pred': y_pred[split_idx['train']['paper']],
})['acc']
valid_acc = evaluator.eval({
'y_true': y_true[split_idx['valid']['paper']],
'y_pred': y_pred[split_idx['valid']['paper']],
})['acc']
test_acc = evaluator.eval({
'y_true': y_true[split_idx['test']['paper']],
'y_pred': y_pred[split_idx['test']['paper']],
})['acc']
return train_acc, valid_acc, test_acc
def main():
parser = argparse.ArgumentParser(description='OGBN-MAG (Full-Batch)')
parser.add_argument('--device', type=int, default=0)
parser.add_argument('--log_steps', type=int, default=1)
parser.add_argument('--num_layers', type=int, default=2)
parser.add_argument('--hidden_channels', type=int, default=64)
parser.add_argument('--dropout', type=float, default=0.5)
parser.add_argument('--lr', type=float, default=0.01)
parser.add_argument('--epochs', type=int, default=50)
parser.add_argument('--runs', type=int, default=10)
args = parser.parse_args()
print(args)
device = f'cuda:{args.device}' if torch.cuda.is_available() else 'cpu'
device = torch.device(device)
dataset = PygNodePropPredDataset(name='ogbn-mag')
split_idx = dataset.get_idx_split()
data = dataset[0]
# We do not consider those attributes for now.
data.node_year_dict = None
data.edge_reltype_dict = None
print(data)
# Convert to new transposed `SparseTensor` format and add reverse edges.
data.adj_t_dict = {}
for keys, (row, col) in data.edge_index_dict.items():
sizes = (data.num_nodes_dict[keys[0]], data.num_nodes_dict[keys[2]])
adj = SparseTensor(row=row, col=col, sparse_sizes=sizes)
# adj = SparseTensor(row=row, col=col)[:sizes[0], :sizes[1]] # TEST
if keys[0] != keys[2]:
data.adj_t_dict[keys] = adj.t()
data.adj_t_dict[(keys[2], 'to', keys[0])] = adj
else:
data.adj_t_dict[keys] = adj.to_symmetric()
data.edge_index_dict = None
x_types = list(data.x_dict.keys())
edge_types = list(data.adj_t_dict.keys())
model = RGCN(data.x_dict['paper'].size(-1), args.hidden_channels,
dataset.num_classes, args.num_layers, args.dropout,
data.num_nodes_dict, x_types, edge_types)
data = data.to(device)
model = model.to(device)
train_idx = split_idx['train']['paper'].to(device)
evaluator = Evaluator(name='ogbn-mag')
logger = Logger(args.runs, args)
for run in range(args.runs):
model.reset_parameters()
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
for epoch in range(1, 1 + args.epochs):
loss = train(model, data.x_dict, data.adj_t_dict,
data.y_dict['paper'], train_idx, optimizer)
result = test(model, data.x_dict, data.adj_t_dict,
data.y_dict['paper'], split_idx, evaluator)
logger.add_result(run, result)
if epoch % args.log_steps == 0:
train_acc, valid_acc, test_acc = result
print(f'Run: {run + 1:02d}, '
f'Epoch: {epoch:02d}, '
f'Loss: {loss:.4f}, '
f'Train: {100 * train_acc:.2f}%, '
f'Valid: {100 * valid_acc:.2f}% '
f'Test: {100 * test_acc:.2f}%')
logger.print_statistics(run)
logger.print_statistics()
if __name__ == "__main__":
main()
| [
"torch.nn.Linear",
"torch.device",
"torch.nn.ModuleList",
"torch.no_grad",
"torch.nn.functional.dropout",
"torch.nn.init.xavier_uniform_",
"torch.cuda.is_available",
"torch.nn.functional.relu",
"torch.Tensor"
] | 1.2.0 | rryoung98/ogb | 34a8ac53dc6dab215938b5b54caeff37a06300d6 |
1.6 | # Copyright 2021 One Theta. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
FeaturesDataReader is basically PredictionDataReader without the sentence embeddings.
The idea is to only relay these raw features and text without any neural features,
to be used for the full pass through ReadabilityTransformers.fit().
In contrast, PredictionDataReader was for rp_model.fit() and it served that
purpose by having the sentence embeddings be part of the dataset, since rp_model.fit()
only trains the prediction layer, separate from the transformer neural features.
"""
from typing import List
import torch
import numpy as np
import pandas as pd
from torch.utils.data import Dataset, DataLoader
from . import DataReader
class FeaturesDataset(Dataset):
def __init__(self, inputs, targets, standard_err=None, data_ids=None):
self.inputs = inputs
self.targets = targets
self.standard_err = standard_err if standard_err is not None else None
self.data_ids = data_ids if data_ids is not None else None
def __len__(self):
return len(self.inputs)
def __getitem__(self, idx):
return_dict = {
"inputs": self.inputs[idx],
}
if self.targets is not None:
return_dict["target"] = self.targets[idx]
if self.standard_err is not None:
return_dict["standard_err"] = self.standard_err[idx]
if self.data_ids is not None:
return_dict["data_ids"] = self.data_ids[idx]
return return_dict
class FeaturesDataReader(DataReader):
def __init__(
self,
feature_df: pd.DataFrame,
features: List[str],
text_column: str = "excerpt",
target_column: str = "target",
id_column: str = None,
classification: bool = False,
labels: List[str] = None,
no_targets: bool = False,
double: bool = True
):
"""Takes a pandas dataframe of features and text and an embedding matrix made from a SentenceTransformer
to create a general purpose datareader that the model code can utilize.
Args:
feature_df (pd.DataFrame): DF object with columns [excerpt, target, standard_error, feature_*]
embedding_matrix (np.ndarray): ST embedding matrix with same size.
features (List[str]): list of features in order to pull from df
no_targets (bool): for inference, we don't have a targets column. defaults to True.
"""
super(FeaturesDataReader, self).__init__()
self.standard_err = None
self.targets = None
self.classification = classification
self.labels = labels
if "standard_error" in feature_df.columns.values:
self.standard_err = feature_df["standard_error"].values
if not no_targets:
if target_column not in feature_df.columns.values:
raise Exception("Target column not found. If this is for inference, use no_targets=True.")
self.targets = feature_df[target_column].tolist()
if self.classification:
# this is a datareader for a classifcation task.
if self.labels is None:
raise Exception("Target labels not given for a classification task.")
else:
target_index_list = []
for target in self.targets:
try:
index = self.labels.index(target)
target_index_list.append(index)
except:
raise Exception(f"target column has value {target} not found in labels={self.labels}")
self.targets = target_index_list
self.features = features
if id_column is not None:
self.data_ids = feature_df[id_column].values
else:
self.data_ids = None
self.inputs_features = feature_df[self.features].values
if not double: # since by default the above values are float64/double.
self.inputs_features = self.inputs_features.astype(np.float32)
self.standard_err = self.standard_err.astype(np.float32)
if not self.classification:
self.targets = self.targets.astype(np.float32)
N = len(feature_df)
F = len(self.inputs_features[0])
torch_dtype = torch.double if double else torch.float32
self.inputs_features = torch.tensor(self.inputs_features, dtype=torch_dtype)
self.texts = feature_df[text_column].values
# What was i thinking here?? inputs_features is supposed to come in ALREADY normalized with special configurations.
# self.inputs_features = (self.inputs_features - self.inputs_features.min()) / (self.inputs_features.max() - self.inputs_features.min()) # normalize data
self.inputs = []
for (passage, extracted_features) in zip(self.texts, self.inputs_features):
one_input = {
"text": passage,
"features": extracted_features
}
self.inputs.append(one_input)
self.dataset = FeaturesDataset(self.inputs, self.targets, self.standard_err, self.data_ids)
def get_input_features_in_order(self):
return self.input_feature_names
def get_standard_err_stats(self):
standard_error = self.standard_err
return {
"min": standard_error.min(),
"max": standard_error.max()
}
def get_dataset(self):
return self.dataset
def get_dataloader(self, batch_size: int, shuffle: bool = True):
dataloader = DataLoader(
self.dataset,
batch_size=batch_size,
shuffle=shuffle
)
return dataloader
def __len__(self):
return len(self.inputs.size(0))
| [
"torch.tensor",
"torch.utils.data.DataLoader"
] | 1.6.0 | OneTheta/readability-transformers | 3c122c98a90c67add8eafad16563b269d5e3124a |
1.7 | import numpy as np
import torch
import logging
import losses
import json
from tqdm import tqdm
import torch.nn.functional as F
import math
def l2_norm(input):
input_size = input.size()
buffer = torch.pow(input, 2)
normp = torch.sum(buffer, 1).add_(1e-12)
norm = torch.sqrt(normp)
_output = torch.div(input, norm.view(-1, 1).expand_as(input))
output = _output.view(input_size)
return output
def calc_recall_at_k(T, Y, k):
"""
T : [nb_samples] (target labels)
Y : [nb_samples x k] (k predicted labels/neighbours)
"""
s = 0
for t,y in zip(T,Y):
if t in torch.Tensor(y).long()[:k]:
s += 1
return s / (1. * len(T))
def predict_batchwise(model, dataloader):
device = "cuda"
model_is_training = model.training
model.eval()
ds = dataloader.dataset
A = [[] for i in range(len(ds[0]))]
with torch.no_grad():
# extract batches (A becomes list of samples)
for batch in tqdm(dataloader):
for i, J in enumerate(batch):
# i = 0: sz_batch * images
# i = 1: sz_batch * labels
# i = 2: sz_batch * indices
if i == 0:
# move images to device of model (approximate device)
J = model(J.cuda())
for j in J:
A[i].append(j)
model.train()
model.train(model_is_training) # revert to previous training state
return [torch.stack(A[i]) for i in range(len(A))]
def proxy_init_calc(model, dataloader):
nb_classes = dataloader.dataset.nb_classes()
X, T, *_ = predict_batchwise(model, dataloader)
proxy_mean = torch.stack([X[T==class_idx].mean(0) for class_idx in range(nb_classes)])
return proxy_mean
def evaluate_cos(model, dataloader):
nb_classes = dataloader.dataset.nb_classes()
# calculate embeddings with model and get targets
X, T = predict_batchwise(model, dataloader)
X2 = l2_norm(X)
# get predictions by assigning nearest 8 neighbors with cosine
K = 32
Y = []
xs = []
cos_sim = F.linear(X2, X2)
Y = T[cos_sim.topk(1 + K)[1][:,1:]]
Y = Y.float().cpu()
recall = []
for k in [1, 2, 4, 8, 16, 32]:
r_at_k = calc_recall_at_k(T, Y, k)
recall.append(r_at_k)
print("R@{} : {:.3f}".format(k, 100 * r_at_k))
return recall
def evaluate_norm(model, dataloader):
nb_classes = dataloader.dataset.nb_classes()
# calculate embeddings with model and get targets
X, T = predict_batchwise(model, dataloader)
# X2 = l2_norm(X)
# get predictions by assigning nearest 8 neighbors with cosine
K = 32
Y = []
xs = []
norm_sim = torch.cdist(X, X, p=2)
Y = T[norm_sim.topk(1 + K)[1][:, 1:]]
Y = Y.float().cpu()
recall = []
for k in [1, 2, 4, 8, 16, 32]:
r_at_k = calc_recall_at_k(T, Y, k)
recall.append(r_at_k)
print("R@{} : {:.3f}".format(k, 100 * r_at_k))
return recall
def evaluate_cos_Inshop(model, query_dataloader, gallery_dataloader):
nb_classes = query_dataloader.dataset.nb_classes()
# calculate embeddings with model and get targets
query_X, query_T = predict_batchwise(model, query_dataloader)
gallery_X, gallery_T = predict_batchwise(model, gallery_dataloader)
query_X = l2_norm(query_X)
gallery_X = l2_norm(gallery_X)
# get predictions by assigning nearest 8 neighbors with cosine
K = 50
Y = []
xs = []
cos_sim = F.linear(query_X, gallery_X)
def recall_k(cos_sim, query_T, gallery_T, k):
m = len(cos_sim)
match_counter = 0
for i in range(m):
pos_sim = cos_sim[i][gallery_T == query_T[i]]
neg_sim = cos_sim[i][gallery_T != query_T[i]]
thresh = torch.max(pos_sim).item()
if torch.sum(neg_sim > thresh) < k:
match_counter += 1
return match_counter / m
# calculate recall @ 1, 2, 4, 8
recall = []
for k in [1, 10, 20, 30, 40, 50]:
r_at_k = recall_k(cos_sim, query_T, gallery_T, k)
recall.append(r_at_k)
print("R@{} : {:.3f}".format(k, 100 * r_at_k))
return recall
def evaluate_cos_SOP(model, dataloader):
nb_classes = dataloader.dataset.nb_classes()
# calculate embeddings with model and get targets
X, T = predict_batchwise(model, dataloader)
X = l2_norm(X)
# get predictions by assigning nearest 8 neighbors with cosine
K = 1000
Y = []
xs = []
for x in X:
if len(xs)<10000:
xs.append(x)
else:
xs.append(x)
xs = torch.stack(xs,dim=0)
cos_sim = F.linear(xs,X)
y = T[cos_sim.topk(1 + K)[1][:,1:]]
Y.append(y.float().cpu())
xs = []
# Last Loop
xs = torch.stack(xs,dim=0)
cos_sim = F.linear(xs,X)
y = T[cos_sim.topk(1 + K)[1][:,1:]]
Y.append(y.float().cpu())
Y = torch.cat(Y, dim=0)
# calculate recall @ 1, 2, 4, 8
recall = []
for k in [1, 10, 100, 1000]:
r_at_k = calc_recall_at_k(T, Y, k)
recall.append(r_at_k)
print("R@{} : {:.3f}".format(k, 100 * r_at_k))
return recall
| [
"torch.cat",
"torch.sqrt",
"torch.stack",
"torch.max",
"torch.no_grad",
"torch.sum",
"torch.nn.functional.linear",
"torch.Tensor",
"torch.cdist",
"torch.pow"
] | 1.7.1 | i92run/my_Proxy_Anchor_Loss | 42b008b1878f33cc276cda13752f1a57b29466c8 |
1.1 | import torch
from torch.autograd import Function
import torch.nn as nn
import sys
from typing import Optional, Any, Tuple
import torch_points_kernels.points_cpu as tpcpu
from .knn import knn
if torch.cuda.is_available():
import torch_points_kernels.points_cuda as tpcuda
def furthest_point_sample(xyz, npoint):
# type: (Any, torch.Tensor, int) -> torch.Tensor
r"""
Uses iterative furthest point sampling to select a set of npoint features that have the largest
minimum distance
Parameters
----------
xyz : torch.Tensor
(B, N, 3) tensor where N > npoint
npoint : int32
number of features in the sampled set
Returns
-------
torch.Tensor
(B, npoint) tensor containing the set
"""
if npoint > xyz.shape[1]:
raise ValueError("caanot sample %i points from an input set of %i points" % (npoint, xyz.shape[1]))
if xyz.is_cuda:
return tpcuda.furthest_point_sampling(xyz, npoint)
else:
return tpcpu.fps(xyz, npoint, True)
def three_nn(unknown, known):
r"""
Find the three nearest neighbors of unknown in known
Parameters
----------
unknown : torch.Tensor
(B, n, 3) tensor of known features
known : torch.Tensor
(B, m, 3) tensor of unknown features
Returns
-------
dist : torch.Tensor
(B, n, 3) l2 distance to the three nearest neighbors
idx : torch.Tensor
(B, n, 3) index of 3 nearest neighbors
"""
if unknown.shape[1] < 3:
raise ValueError("Not enough points. unknown should ahve at least 3 points.")
if unknown.is_cuda:
dist2, idx = tpcuda.three_nn(unknown, known)
else:
idx, dist2 = knn(known, unknown, 3)
return torch.sqrt(dist2), idx
class ThreeInterpolate(Function):
@staticmethod
def forward(ctx, features, idx, weight):
# type(Any, torch.Tensor, torch.Tensor, torch.Tensor) -> Torch.Tensor
B, c, m = features.size()
n = idx.size(1)
ctx.three_interpolate_for_backward = (idx, weight, m)
if features.is_cuda:
return tpcuda.three_interpolate(features, idx, weight)
else:
return tpcpu.knn_interpolate(features, idx, weight)
@staticmethod
def backward(ctx, grad_out):
# type: (Any, torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]
r"""
Parameters
----------
grad_out : torch.Tensor
(B, c, n) tensor with gradients of ouputs
Returns
-------
grad_features : torch.Tensor
(B, c, m) tensor with gradients of features
None
None
"""
idx, weight, m = ctx.three_interpolate_for_backward
if grad_out.is_cuda:
grad_features = tpcuda.three_interpolate_grad(grad_out.contiguous(), idx, weight, m)
else:
grad_features = tpcpu.knn_interpolate_grad(grad_out.contiguous(), idx, weight, m)
return grad_features, None, None
def three_interpolate(features, idx, weight):
r"""
Performs weight linear interpolation on 3 features
Parameters
----------
features : torch.Tensor
(B, c, m) Features descriptors to be interpolated from
idx : torch.Tensor
(B, n, 3) three nearest neighbors of the target features in features
weight : torch.Tensor
(B, n, 3) weights
Returns
-------
torch.Tensor
(B, c, n) tensor of the interpolated features
"""
return ThreeInterpolate.apply(features, idx, weight)
def grouping_operation(features, idx):
r"""
Parameters
----------
features : torch.Tensor
(B, C, N) tensor of features to group
idx : torch.Tensor
(B, npoint, nsample) tensor containing the indicies of features to group with
Returns
-------
torch.Tensor
(B, C, npoint, nsample) tensor
"""
all_idx = idx.reshape(idx.shape[0], -1)
all_idx = all_idx.unsqueeze(1).repeat(1, features.shape[1], 1)
grouped_features = features.gather(2, all_idx)
return grouped_features.reshape(idx.shape[0], features.shape[1], idx.shape[1], idx.shape[2])
def ball_query_dense(radius, nsample, xyz, new_xyz, batch_xyz=None, batch_new_xyz=None, sort=False):
# type: (Any, float, int, torch.Tensor, torch.Tensor) -> torch.Tensor
if new_xyz.is_cuda:
if sort:
raise NotImplementedError("CUDA version does not sort the neighbors")
ind, dist = tpcuda.ball_query_dense(new_xyz, xyz, radius, nsample)
else:
ind, dist = tpcpu.dense_ball_query(new_xyz, xyz, radius, nsample, mode=0, sorted=sort)
return ind, dist
def ball_query_partial_dense(radius, nsample, x, y, batch_x, batch_y, sort=False):
# type: (Any, float, int, torch.Tensor, torch.Tensor) -> torch.Tensor
if x.is_cuda:
if sort:
raise NotImplementedError("CUDA version does not sort the neighbors")
ind, dist = tpcuda.ball_query_partial_dense(x, y, batch_x, batch_y, radius, nsample)
else:
ind, dist = tpcpu.batch_ball_query(x, y, batch_x, batch_y, radius, nsample, mode=0, sorted=sort)
return ind, dist
def ball_query(
radius: float,
nsample: int,
x: torch.Tensor,
y: torch.Tensor,
mode: Optional[str] = "dense",
batch_x: Optional[torch.tensor] = None,
batch_y: Optional[torch.tensor] = None,
sort: Optional[bool] = False,
) -> torch.Tensor:
"""
Arguments:
radius {float} -- radius of the balls
nsample {int} -- maximum number of features in the balls
x {torch.Tensor} --
(M, 3) [partial_dense] or (B, M, 3) [dense] xyz coordinates of the features
y {torch.Tensor} --
(npoint, 3) [partial_dense] or or (B, npoint, 3) [dense] centers of the ball query
mode {str} -- switch between "dense" or "partial_dense" data layout
Keyword Arguments:
batch_x -- (M, ) [partial_dense] or (B, M, 3) [dense] Contains indexes to indicate within batch it belongs to.
batch_y -- (N, ) Contains indexes to indicate within batch it belongs to
sort -- bool wether the neighboors are sorted or not (closests first)
Returns:
idx: (npoint, nsample) or (B, npoint, nsample) [dense] It contains the indexes of the element within x at radius distance to y
dist: (N, nsample) or (B, npoint, nsample) Default value: -1.
It contains the squared distance of the element within x at radius distance to y
"""
if mode is None:
raise Exception('The mode should be defined within ["partial_dense | dense"]')
if mode.lower() == "partial_dense":
if (batch_x is None) or (batch_y is None):
raise Exception("batch_x and batch_y should be provided")
assert x.size(0) == batch_x.size(0)
assert y.size(0) == batch_y.size(0)
assert x.dim() == 2
return ball_query_partial_dense(radius, nsample, x, y, batch_x, batch_y, sort=sort)
elif mode.lower() == "dense":
if (batch_x is not None) or (batch_y is not None):
raise Exception("batch_x and batch_y should not be provided")
assert x.dim() == 3
return ball_query_dense(radius, nsample, x, y, sort=sort)
else:
raise Exception("unrecognized mode {}".format(mode))
| [
"torch.cuda.is_available",
"torch.sqrt"
] | 1.1.0 | duducheng/torch-points-kernels | aed9cf56ca61fe34b4880159951760e5dcb3a1db |
1.8 | import sys
sys.path.append('..')
from utilities import labels_from_ids
from dataset import BalancedBatchSampler, make_dataset
from nets import EmbeddingNet
from plots import extract_embeddings, plot_embeddings
from losses import OnlineTripletLoss, AverageNonzeroTripletsMetric
from deep_ranking_utils import HardestNegativeTripletSelector, \
SemihardNegativeTripletSelector, \
RandomNegativeTripletSelector, Experiment
from torch.optim import lr_scheduler
from torch.utils.tensorboard import SummaryWriter # to print to tensorboard
from sklearn import preprocessing
from utilities import dict_from_json
from sklearn.model_selection import ParameterGrid
import torch
import torch.optim as optim # For all Optimization algorithms, SGD, Adam, etc.
from torch.utils.data import (
DataLoader,
) # Gives easier dataset management and creates mini batches
from datetime import date
cuda = torch.cuda.is_available()
#print('device:', str(torch.cuda.get_device_name()))
# PARAMETERS TO SEARCH:
param_grid = {'n_epochs': [1, 10, 15, 25], 'lr': [0.0001, 0.005, 0.1],'margin':[1]}
# PARAMETERS THAT CAN BE MANUALLY ADJUSTED:
# datasets:
n_test_products = 200 # the amount of products that goes into the test dataset
n_train_classes = 40 # the amount of products per batch in the balancedbatch sampler in the train dataloader
n_test_classes = 40 # the amount of products per batch in the balancedbatch sampler in the test dataloader
n_samples = 20
# model training:
#margin = 1 # can't go into search?
# MAKING THE DATASETS
# fit the encoder:
label_encoder = preprocessing.LabelEncoder()
catalog = dict_from_json('../catalog.json')
label_encoder.fit(list(catalog.keys()))
# make the 'normal' datasets:
train_dataset, test_dataset = make_dataset(label_encoder, n_val_products=n_test_products)
# make the batch samplers:
train_batch_sampler = BalancedBatchSampler(train_dataset, n_classes=n_train_classes, n_samples=n_samples)
test_batch_sampler = BalancedBatchSampler(test_dataset, n_classes=n_test_classes, n_samples=n_samples)
# make the dataloaders:
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=n_train_classes*n_samples, shuffle=True)
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=n_test_classes*n_samples, shuffle=False)
# load the dataset:
kwargs = {'num_workers': 1, 'pin_memory': True} if cuda else {}
online_train_loader = torch.utils.data.DataLoader(train_dataset, batch_sampler=train_batch_sampler, **kwargs)
online_test_loader = torch.utils.data.DataLoader(test_dataset, batch_sampler=test_batch_sampler, **kwargs)
# run experiments:
experiments = []
for experiment in list(ParameterGrid(param_grid)):
# make the model:
embedding_net = EmbeddingNet()
model = embedding_net
# make the sampling methods:
random = RandomNegativeTripletSelector(experiment['margin'])
semi_hard = SemihardNegativeTripletSelector(experiment['margin'])
hard = HardestNegativeTripletSelector(experiment['margin'])
for sampling_method in [random, semi_hard, hard]:
# HardestNegativeTripletSelector, RandomNegativeTripletSelector, SemihardNegativeTripletSelector
loss_fn = OnlineTripletLoss(experiment['margin'], sampling_method)
optimizer = optim.Adam(model.parameters(), lr=experiment['lr'], weight_decay=1e-4)
scheduler = lr_scheduler.StepLR(optimizer, 8, gamma=0.1, last_epoch=-1)
if cuda:
model.cuda()
# make the whole grid thing here
run = Experiment(train_loader=online_train_loader, val_loader=online_test_loader, model=model, label_encoder=label_encoder, loss_fn=loss_fn,
optimizer=optimizer, scheduler=scheduler, cuda=cuda, kind=sampling_method.name,
to_tensorboard=True, metrics=[AverageNonzeroTripletsMetric()], start_epoch=0, margin=experiment['margin'], lr=experiment['lr'],
n_epochs=experiment['n_epochs'])
experiments.append(run)
torch.save(run.model.state_dict(), 'models/online_{}_model_ep_{}_lr_{}_margin_{}_date_{}_loss_{}.pth'.format(sampling_method.name, experiment['n_epochs'], experiment['lr'], experiment['margin'], date.today(),round(run.val_loss, 4)))
| [
"torch.optim.lr_scheduler.StepLR",
"torch.cuda.is_available",
"torch.utils.data.DataLoader"
] | 1.8 | natashanorsker/fagprojekt | ef9a8cc2128c43d891c8a7a47e14916af2b9c602 |
1.7 | #!/usr/bin/env python3
import os
import copy
import torch
import os.path
import argparse
import numpy as np
import torch.nn as nn
import torch.optim as optim
from torchvision import models
import torch.utils.data as Data
import torch.nn.functional as F
from torch.autograd import Variable
from torchvision.models.vgg import VGG
import torchvision.transforms as transforms
from torchvision.datasets import CocoDetection
from memory import Memory
from torchutil import Split2d, Merge2d
class Interestingness(nn.Module):
def __init__(self, autoencoder, N, C, H, W, h, w):
super().__init__()
self.ae = autoencoder
self.memory = Memory(N, C, h, w)
self.split2d = Split2d(kernel_size=(h, w))
self.merge2d = Merge2d(output_size=(H, W), kernel_size=(h, w))
self.set_parameters()
self.set_train(False)
def forward(self, x):
coding = self.ae.encoder(x)
coding = self.split2d(coding)
if self.train:
self.memory.write(coding)
states = self.memory.read(coding)
states = self.merge2d(states)
output = self.ae.decoder(states)
return output
else:
# self.coding, self.states, saved for human interaction package
# Go https://github.com/wang-chen/interaction.git
self.states, self.coding = self.memory.read(coding), coding
self.memory.write(coding)
self.reads = self.merge2d(self.states)
return 1-F.cosine_similarity(coding.view(coding.size(1),-1), self.reads.view(self.reads.size(1),-1),dim=-1).mean()
def output(self):
return self.ae.decoder(self.reads)
def listen(self, x):
coding = self.ae.encoder(x)
coding = self.split2d(coding)
states = self.memory.read(coding)
states = self.merge2d(states)
return self.ae.decoder(states)
def set_parameters(self):
for param in self.ae.parameters():
param.requires_grad = False
for param in self.memory.parameters():
param.requires_grad = True
def set_train(self, train):
self.train = train
if __name__ == "__main__":
from autoencoder import AutoEncoder
x = torch.rand(15, 3, 320, 320)
ae = AutoEncoder()
net = Interestingness(ae, 200, 512, 10, 10, 10, 10)
y = net(x)
| [
"torch.rand"
] | 1.7.0 | wang-chen/interestingness | 505ed5af22cf92fe3af095b3a56cdc78078c86c6 |
1.8 | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from LibMTL._record import _PerformanceMeter
from LibMTL.utils import count_parameters
class Trainer(nn.Module):
r'''A Multi-Task Learning Trainer.
This is a unified and extensible training framework for multi-task learning.
Args:
task_dict (dict): A dictionary of name-information pairs of type (:class:`str`, :class:`dict`). \
The sub-dictionary for each task has four entries whose keywords are named **metrics**, \
**metrics_fn**, **loss_fn**, **weight** and each of them corresponds to a :class:`list`.
The list of **metrics** has ``m`` strings, repersenting the name of ``m`` metrics \
for this task. The list of **metrics_fn** has two elements, i.e., the updating and score \
functions, meaning how to update thoes objectives in the training process and obtain the final \
scores, respectively. The list of **loss_fn** has ``m`` loss functions corresponding to each \
metric. The list of **weight** has ``m`` binary integers corresponding to each \
metric, where ``1`` means the higher the score is, the better the performance, \
``0`` means the opposite.
weighting (class): A weighting strategy class based on :class:`LibMTL.weighting.abstract_weighting.AbsWeighting`.
architecture (class): An architecture class based on :class:`LibMTL.architecture.abstract_arch.AbsArchitecture`.
encoder_class (class): A neural network class.
decoders (dict): A dictionary of name-decoder pairs of type (:class:`str`, :class:`torch.nn.Module`).
rep_grad (bool): If ``True``, the gradient of the representation for each task can be computed.
multi_input (bool): Is ``True`` if each task has its own input data, otherwise is ``False``.
optim_param (dict): A dictionary of configurations for the optimizier.
scheduler_param (dict): A dictionary of configurations for learning rate scheduler. \
Set it to ``None`` if you do not use a learning rate scheduler.
kwargs (dict): A dictionary of hyperparameters of weighting and architecture methods.
.. note::
It is recommended to use :func:`LibMTL.config.prepare_args` to return the dictionaries of ``optim_param``, \
``scheduler_param``, and ``kwargs``.
Examples::
import torch.nn as nn
from LibMTL import Trainer
from LibMTL.loss import CE_loss_fn
from LibMTL.metrics import acc_update_fun, acc_score_fun
from LibMTL.weighting import EW
from LibMTL.architecture import HPS
from LibMTL.model import ResNet18
from LibMTL.config import prepare_args
task_dict = {'A': {'metrics': ['Acc'],
'metrics_fn': [acc_update_fun, acc_score_fun],
'loss_fn': [CE_loss_fn],
'weight': [1]}}
decoders = {'A': nn.Linear(512, 31)}
# You can use command-line arguments and return configurations by ``prepare_args``.
# kwargs, optim_param, scheduler_param = prepare_args(params)
optim_param = {'optim': 'adam', 'lr': 1e-3, 'weight_decay': 1e-4}
scheduler_param = {'scheduler': 'step'}
kwargs = {'weight_args': {}, 'arch_args': {}}
trainer = Trainer(task_dict=task_dict,
weighting=EW,
architecture=HPS,
encoder_class=ResNet18,
decoders=decoders,
rep_grad=False,
multi_input=False,
optim_param=optim_param,
scheduler_param=scheduler_param,
**kwargs)
'''
def __init__(self, task_dict, weighting, architecture, encoder_class, decoders,
rep_grad, multi_input, optim_param, scheduler_param, **kwargs):
super(Trainer, self).__init__()
self.device = torch.device('cuda:0')
self.kwargs = kwargs
self.task_dict = task_dict
self.task_num = len(task_dict)
self.task_name = list(task_dict.keys())
self.rep_grad = rep_grad
self.multi_input = multi_input
self._prepare_model(weighting, architecture, encoder_class, decoders)
self._prepare_optimizer(optim_param, scheduler_param)
self.meter = _PerformanceMeter(self.task_dict, self.multi_input)
def _prepare_model(self, weighting, architecture, encoder_class, decoders):
class MTLmodel(architecture, weighting):
def __init__(self, task_name, encoder_class, decoders, rep_grad, multi_input, device, kwargs):
super(MTLmodel, self).__init__(task_name, encoder_class, decoders, rep_grad, multi_input, device, **kwargs)
self.init_param()
self.model = MTLmodel(task_name=self.task_name,
encoder_class=encoder_class,
decoders=decoders,
rep_grad=self.rep_grad,
multi_input=self.multi_input,
device=self.device,
kwargs=self.kwargs['arch_args']).to(self.device)
count_parameters(self.model)
def _prepare_optimizer(self, optim_param, scheduler_param):
optim_dict = {
'sgd': torch.optim.SGD,
'adam': torch.optim.Adam,
'adagrad': torch.optim.Adagrad,
'rmsprop': torch.optim.RMSprop,
}
scheduler_dict = {
'exp': torch.optim.lr_scheduler.ExponentialLR,
'step': torch.optim.lr_scheduler.StepLR,
'cos': torch.optim.lr_scheduler.CosineAnnealingLR,
}
optim_arg = {k: v for k, v in optim_param.items() if k != 'optim'}
self.optimizer = optim_dict[optim_param['optim']](self.model.parameters(), **optim_arg)
if scheduler_param is not None:
scheduler_arg = {k: v for k, v in scheduler_param.items() if k != 'scheduler'}
self.scheduler = scheduler_dict[scheduler_param['scheduler']](self.optimizer, **scheduler_arg)
else:
self.scheduler = None
def _process_data(self, loader):
try:
data, label = loader[1].next()
except:
loader[1] = iter(loader[0])
data, label = loader[1].next()
data = data.to(self.device, non_blocking=True)
if not self.multi_input:
for task in self.task_name:
label[task] = label[task].to(self.device, non_blocking=True)
else:
label = label.to(self.device, non_blocking=True)
return data, label
def process_preds(self, preds, task_name=None):
r'''The processing of prediction for each task.
- The default is no processing. If necessary, you can rewrite this function.
- If ``multi_input`` is ``True``, ``task_name`` is valid and ``preds`` with type :class:`torch.Tensor` is the prediction of this task.
- otherwise, ``task_name`` is invalid and ``preds`` is a :class:`dict` of name-prediction pairs of all tasks.
Args:
preds (dict or torch.Tensor): The prediction of ``task_name`` or all tasks.
task_name (str): The string of task name.
'''
return preds
def _compute_loss(self, preds, gts, task_name=None):
if not self.multi_input:
train_losses = torch.zeros(self.task_num).to(self.device)
for tn, task in enumerate(self.task_name):
train_losses[tn] = self.meter.losses[task]._update_loss(preds[task], gts[task])
else:
train_losses = self.meter.losses[task_name]._update_loss(preds, gts)
return train_losses
def _prepare_dataloaders(self, dataloaders):
if not self.multi_input:
loader = [dataloaders, iter(dataloaders)]
return loader, len(dataloaders)
else:
loader = {}
batch_num = []
for task in self.task_name:
loader[task] = [dataloaders[task], iter(dataloaders[task])]
batch_num.append(len(dataloaders[task]))
return loader, batch_num
def train(self, train_dataloaders, test_dataloaders, epochs,
val_dataloaders=None, return_weight=False):
r'''The training process of multi-task learning.
Args:
train_dataloaders (dict or torch.utils.data.DataLoader): The dataloaders used for training. \
If ``multi_input`` is ``True``, it is a dictionary of name-dataloader pairs. \
Otherwise, it is a single dataloader which returns data and a dictionary \
of name-label pairs in each iteration.
test_dataloaders (dict or torch.utils.data.DataLoader): The dataloaders used for the validation or testing. \
The same structure with ``train_dataloaders``.
epochs (int): The total training epochs.
return_weight (bool): if ``True``, the loss weights will be returned.
'''
train_loader, train_batch = self._prepare_dataloaders(train_dataloaders)
train_batch = max(train_batch) if self.multi_input else train_batch
self.batch_weight = np.zeros([self.task_num, epochs, train_batch])
self.model.train_loss_buffer = np.zeros([self.task_num, epochs])
for epoch in range(epochs):
self.model.epoch = epoch
self.model.train()
self.meter.record_time('begin')
for batch_index in range(train_batch):
if not self.multi_input:
train_inputs, train_gts = self._process_data(train_loader)
train_preds = self.model(train_inputs)
train_preds = self.process_preds(train_preds)
train_losses = self._compute_loss(train_preds, train_gts)
self.meter.update(train_preds, train_gts)
else:
train_losses = torch.zeros(self.task_num).to(self.device)
for tn, task in enumerate(self.task_name):
train_input, train_gt = self._process_data(train_loader[task])
train_pred = self.model(train_input, task)
train_pred = train_pred[task]
train_pred = self.process_preds(train_pred, task)
train_losses[tn] = self._compute_loss(train_pred, train_gt, task)
self.meter.update(train_pred, train_gt, task)
self.optimizer.zero_grad()
w = self.model.backward(train_losses, **self.kwargs['weight_args'])
if w is not None:
self.batch_weight[:, epoch, batch_index] = w
self.optimizer.step()
self.meter.record_time('end')
self.meter.get_score()
self.model.train_loss_buffer[:, epoch] = self.meter.loss_item
self.meter.display(epoch=epoch, mode='train')
self.meter.reinit()
if val_dataloaders is not None:
self.meter.has_val = True
self.test(val_dataloaders, epoch, mode='val')
self.test(test_dataloaders, epoch, mode='test')
if self.scheduler is not None:
self.scheduler.step()
self.meter.display_best_result()
if return_weight:
return self.batch_weight
def test(self, test_dataloaders, epoch=None, mode='test'):
r'''The test process of multi-task learning.
Args:
test_dataloaders (dict or torch.utils.data.DataLoader): If ``multi_input`` is ``True``, \
it is a dictionary of name-dataloader pairs. Otherwise, it is a single \
dataloader which returns data and a dictionary of name-label pairs in each iteration.
epoch (int, default=None): The current epoch.
'''
test_loader, test_batch = self._prepare_dataloaders(test_dataloaders)
self.model.eval()
self.meter.record_time('begin')
with torch.no_grad():
if not self.multi_input:
for batch_index in range(test_batch):
test_inputs, test_gts = self._process_data(test_loader)
test_preds = self.model(test_inputs)
test_preds = self.process_preds(test_preds)
test_losses = self._compute_loss(test_preds, test_gts)
self.meter.update(test_preds, test_gts)
else:
for tn, task in enumerate(self.task_name):
for batch_index in range(test_batch[tn]):
test_input, test_gt = self._process_data(test_loader[task])
test_pred = self.model(test_input, task)
test_pred = test_pred[task]
test_pred = self.process_preds(test_pred)
test_loss = self._compute_loss(test_pred, test_gt, task)
self.meter.update(test_pred, test_gt, task)
self.meter.record_time('end')
self.meter.get_score()
self.meter.display(epoch=epoch, mode=mode)
self.meter.reinit()
| [
"torch.zeros",
"torch.device",
"torch.no_grad"
] | 1.8.0 | median-research-group/LibMTL | b0937ff59429a7cfbca66fe6bc3385258c2d6461 |
1.8 | from torch.utils.data import DataLoader, Dataset
import os
import torch
import torch.nn.functional as F
import torchvision.transforms as transforms
from PIL import Image
class office_Dataset(Dataset):
def __init__(self, dataset, root_path, task, mode):
self.transform = transforms.Compose([
transforms.Resize((224, 224)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],std=[0.229, 0.224, 0.225]),
])
f = open('./data_txt/{}/{}_{}.txt'.format(dataset, task, mode), 'r')
self.img_list = f.readlines()
f.close()
self.root_path = root_path
def __getitem__(self, i):
img_path = self.img_list[i][:-1].split(' ')[0]
y = int(self.img_list[i][:-1].split(' ')[1])
img = Image.open(os.path.join(self.root_path, img_path)).convert('RGB')
return self.transform(img), y
def __len__(self):
return len(self.img_list)
def office_dataloader(dataset, batchsize, root_path):
if dataset == 'office-31':
tasks = ['amazon', 'dslr', 'webcam']
elif dataset == 'office-home':
tasks = ['Art', 'Clipart', 'Product', 'Real_World']
data_loader = {}
iter_data_loader = {}
for k, d in enumerate(tasks):
data_loader[d] = {}
iter_data_loader[d] = {}
for mode in ['train', 'val', 'test']:
shuffle = True if mode == 'train' else False
drop_last = True if mode == 'train' else False
txt_dataset = office_Dataset(dataset, root_path, d, mode)
# print(d, mode, len(txt_dataset))
data_loader[d][mode] = DataLoader(txt_dataset,
num_workers=0,
pin_memory=True,
batch_size=batchsize,
shuffle=shuffle,
drop_last=drop_last)
iter_data_loader[d][mode] = iter(data_loader[d][mode])
return data_loader, iter_data_loader
| [
"torch.utils.data.DataLoader"
] | 1.8.0 | median-research-group/LibMTL | b0937ff59429a7cfbca66fe6bc3385258c2d6461 |
1.6 | import json
from glob import glob
import numpy as np
import pytorch_lightning as pl
import torch
from audio_processing import random_crop
from prepare_data import get_id_from_path
from pytorch_lightning.loggers import TensorBoardLogger
from sklearn.model_selection import train_test_split
from torch.nn import functional as F
from torch.utils.data import DataLoader
from pytorch_lightning.callbacks import ModelCheckpoint
class AudioDataset(torch.utils.data.Dataset):
def __init__(self, data, max_len=512):
self.data = data
self.max_len = max_len
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
npy_path = self.data[idx][0]
label = self.data[idx][1]
array = np.load(npy_path)
array = random_crop(array, crop_size=self.max_len)
tokens = torch.tensor(array, dtype=torch.float)
label = torch.tensor(label, dtype=torch.long)
return tokens, label
class AudioClassifier(pl.LightningModule):
def __init__(self, classes=8, input_size=128, reconstruction_weight=0.1, p=0.3):
super().__init__()
self.save_hyperparameters()
self.reconstruction_weight = reconstruction_weight
self.input_size = input_size
self.p = p
self.do = torch.nn.Dropout(p=self.p)
self.lstm1 = torch.nn.LSTM(
input_size=self.input_size,
hidden_size=self.input_size,
bidirectional=True,
batch_first=True,
)
self.lstm2 = torch.nn.LSTM(
input_size=2 * self.input_size,
hidden_size=self.input_size,
bidirectional=True,
batch_first=True,
)
self.fc1 = torch.nn.Linear(self.input_size * 2, self.input_size)
self.fy = torch.nn.Linear(self.input_size, classes)
self.fc2 = torch.nn.Linear(self.input_size * 2, input_size)
def forward(self, x):
x = self.do(x)
x, _ = self.lstm1(x)
x_seq, _ = self.lstm2(x)
x, _ = torch.max(self.do(x_seq), dim=1)
x = F.relu(self.do(self.fc1(x)))
y_hat = self.fy(x)
x_reconstruction = torch.clamp(self.fc2(self.do(x_seq)), -1.0, 1.0)
return y_hat, x_reconstruction
def training_step(self, batch, batch_idx):
x, y = batch
y_hat, x_reconstruction = self(x)
loss_y = F.cross_entropy(y_hat, y)
loss_x = F.l1_loss(x, x_reconstruction)
return loss_y + self.reconstruction_weight * loss_x
def validation_step(self, batch, batch_idx):
x, y = batch
y_hat, x_reconstruction = self(x)
loss_y = F.cross_entropy(y_hat, y)
loss_x = F.l1_loss(x, x_reconstruction)
loss = loss_y + self.reconstruction_weight * loss_x
_, predicted = torch.max(y_hat, 1)
acc = (predicted == y).double().mean()
self.log("valid_loss", loss)
self.log("valid_loss_y", loss_y)
self.log("valid_loss_x", loss_x)
self.log("valid_acc", acc)
def test_step(self, batch, batch_idx):
x, y = batch
y_hat, x_reconstruction = self(x)
loss_y = F.cross_entropy(y_hat, y)
loss_x = F.l1_loss(x, x_reconstruction)
loss = loss_y + self.reconstruction_weight * loss_x
_, predicted = torch.max(y_hat, 1)
acc = (predicted == y).double().mean()
self.log("test_loss", loss)
self.log("test_loss_y", loss_y)
self.log("test_loss_x", loss_x)
self.log("test_acc", acc)
def configure_optimizers(self):
return torch.optim.Adam(self.parameters(), lr=1e-4)
class DecayLearningRate(pl.Callback):
def __init__(self):
self.old_lrs = []
def on_train_start(self, trainer, pl_module):
# track the initial learning rates
for opt_idx, optimizer in enumerate(trainer.optimizers):
group = []
for param_group in optimizer.param_groups:
group.append(param_group["lr"])
self.old_lrs.append(group)
def on_train_epoch_end(self, trainer, pl_module, outputs):
for opt_idx, optimizer in enumerate(trainer.optimizers):
old_lr_group = self.old_lrs[opt_idx]
new_lr_group = []
for p_idx, param_group in enumerate(optimizer.param_groups):
old_lr = old_lr_group[p_idx]
new_lr = old_lr * 0.97
new_lr_group.append(new_lr)
param_group["lr"] = new_lr
self.old_lrs[opt_idx] = new_lr_group
if __name__ == "__main__":
import argparse
from pathlib import Path
parser = argparse.ArgumentParser()
parser.add_argument("--metadata_path")
parser.add_argument("--mp3_path")
parser.add_argument("--reconstruction_weight", type=float)
args = parser.parse_args()
metadata_path = Path(args.metadata_path)
mp3_path = Path(args.mp3_path)
batch_size = 32
epochs = 256
reconstruction_weight = args.reconstruction_weight
CLASS_MAPPING = json.load(open(metadata_path / "mapping.json"))
id_to_genres = json.load(open(metadata_path / "tracks_genre.json"))
id_to_genres = {int(k): v for k, v in id_to_genres.items()}
files = sorted(list(glob(str(mp3_path / "*/*.npy"))))
labels = [CLASS_MAPPING[id_to_genres[int(get_id_from_path(x))]] for x in files]
print(len(labels))
samples = list(zip(files, labels))
_train, test = train_test_split(
samples, test_size=0.2, random_state=1337, stratify=[a[1] for a in samples]
)
train, val = train_test_split(
_train, test_size=0.1, random_state=1337, stratify=[a[1] for a in _train]
)
train_data = AudioDataset(train)
test_data = AudioDataset(test)
val_data = AudioDataset(val)
train_loader = DataLoader(train_data, batch_size=batch_size, num_workers=8, shuffle=True)
val_loader = DataLoader(val_data, batch_size=batch_size, num_workers=8, shuffle=True)
test_loader = DataLoader(
test_data, batch_size=batch_size, shuffle=False, num_workers=8
)
model = AudioClassifier(reconstruction_weight=reconstruction_weight)
logger = TensorBoardLogger(
save_dir="../",
version="Lambda=%s" % reconstruction_weight,
name="lightning_logs",
)
checkpoint_callback = ModelCheckpoint(
monitor="valid_acc",
mode="max",
filepath="../models/",
prefix="model_%s" % reconstruction_weight,
)
trainer = pl.Trainer(
max_epochs=epochs,
gpus=1,
logger=logger,
checkpoint_callback=checkpoint_callback,
callbacks=[DecayLearningRate()],
)
trainer.fit(model, train_loader, val_loader)
trainer.test(test_dataloaders=test_loader)
| [
"torch.nn.Linear",
"torch.nn.Dropout",
"torch.nn.LSTM",
"torch.max",
"torch.nn.functional.l1_loss",
"torch.nn.functional.cross_entropy",
"torch.tensor",
"torch.utils.data.DataLoader"
] | 1.6.0 | CVxTz/ReconstructionAuxLoss | 0d3b6651bd72aace2f10a698ae2d907a1b4f1bd5 |
0.4 | from __future__ import absolute_import, division, print_function
import numpy as np
import torch
import torch.nn as nn
from torch.autograd import grad
class BaseFlow(nn.Module):
""" """
def __init__(self, n_inputs, **kwargs):
super(BaseFlow, self).__init__()
self.n_inputs = n_inputs
def forward(self, x, **kwargs):
raise NotImplementedError
def generate_samples(self, n_samples=1, u=None, **kwargs):
raise NotImplementedError
def log_likelihood(self, x, **kwargs):
""" Calculates log p(x) with a Gaussian base density """
u, logdet_dudx = self.forward(x, **kwargs)
constant = float(- 0.5 * self.n_inputs * np.log(2. * np.pi))
log_likelihood = constant - 0.5 * torch.sum(u ** 2, dim=1) + logdet_dudx
return u, log_likelihood
def log_likelihood_and_score(self, x, **kwargs):
""" Calculates log p(x) and t(x) with a Gaussian base density """
u, log_likelihood = self.log_likelihood(x, **kwargs)
return u, log_likelihood, None
class BaseConditionalFlow(nn.Module):
def __init__(self, n_conditionals, n_inputs, **kwargs):
super(BaseConditionalFlow, self).__init__()
self.n_conditionals = n_conditionals
self.n_inputs = n_inputs
def forward(self, theta, x, **kwargs):
raise NotImplementedError
def generate_samples(self, theta, u=None, **kwargs):
raise NotImplementedError
def log_likelihood(self, theta, x, **kwargs):
""" Calculates u(x) and log p(x) with a Gaussian base density """
u, logdet_dudx = self.forward(theta, x, **kwargs)
constant = float(- 0.5 * self.n_inputs * np.log(2. * np.pi))
log_likelihood = constant - 0.5 * torch.sum(u ** 2, dim=1) + logdet_dudx
return u, log_likelihood
def log_likelihood_and_score(self, theta, x, **kwargs):
""" Calculates u(x), log p(x), and the score t(x) with a Gaussian base density """
if theta.shape[0] == 1:
theta = theta.expand(x.shape[0], -1)
if not theta.requires_grad:
theta.requires_grad = True
u, log_likelihood = self.log_likelihood(theta, x, **kwargs)
score = grad(log_likelihood, theta,
grad_outputs=torch.ones_like(log_likelihood.data),
only_inputs=True, create_graph=True)[0]
return u, log_likelihood, score
| [
"torch.ones_like",
"torch.sum"
] | 0.4.0 | vischia/madminer | 98c2bcfb93d0fd84ff1872b344c4d89adf51217f |
0.4 | import torch
import numpy as np
import torch.nn as nn
from math import ceil
from torch.autograd import Variable
from semseg import caffe_pb2
from semseg.models.utils import *
from semseg.loss import *
icnet_specs = {
"cityscapes": {
"n_classes": 19,
"input_size": (1025, 2049),
"block_config": [3, 4, 6, 3],
}
}
class icnet(nn.Module):
"""
Image Cascade Network
URL: https://arxiv.org/abs/1704.08545
References:
1) Original Author's code: https://github.com/hszhao/ICNet
2) Chainer implementation by @mitmul: https://github.com/mitmul/chainer-pspnet
3) TensorFlow implementation by @hellochick: https://github.com/hellochick/ICNet-tensorflow
"""
def __init__(
self,
n_classes=19,
block_config=[3, 4, 6, 3],
input_size=(1025, 2049),
version=None,
with_bn=True,
):
super(icnet, self).__init__()
bias = not with_bn
self.block_config = (
icnet_specs[version]["block_config"]
if version is not None
else block_config
)
self.n_classes = (
icnet_specs[version]["n_classes"] if version is not None else n_classes
)
self.input_size = (
icnet_specs[version]["input_size"] if version is not None else input_size
)
# Encoder
self.convbnrelu1_1 = conv2DBatchNormRelu(
in_channels=3,
k_size=3,
n_filters=32,
padding=1,
stride=2,
bias=bias,
with_bn=with_bn,
)
self.convbnrelu1_2 = conv2DBatchNormRelu(
in_channels=32,
k_size=3,
n_filters=32,
padding=1,
stride=1,
bias=bias,
with_bn=with_bn,
)
self.convbnrelu1_3 = conv2DBatchNormRelu(
in_channels=32,
k_size=3,
n_filters=64,
padding=1,
stride=1,
bias=bias,
with_bn=with_bn,
)
# Vanilla Residual Blocks
self.res_block2 = residualBlockPSP(
self.block_config[0], 64, 32, 128, 1, 1, with_bn=with_bn
)
self.res_block3_conv = residualBlockPSP(
self.block_config[1],
128,
64,
256,
2,
1,
include_range="conv",
with_bn=with_bn,
)
self.res_block3_identity = residualBlockPSP(
self.block_config[1],
128,
64,
256,
2,
1,
include_range="identity",
with_bn=with_bn,
)
# Dilated Residual Blocks
self.res_block4 = residualBlockPSP(
self.block_config[2], 256, 128, 512, 1, 2, with_bn=with_bn
)
self.res_block5 = residualBlockPSP(
self.block_config[3], 512, 256, 1024, 1, 4, with_bn=with_bn
)
# Pyramid Pooling Module
self.pyramid_pooling = pyramidPooling(
1024, [6, 3, 2, 1], model_name="icnet", fusion_mode="sum", with_bn=with_bn
)
# Final conv layer with kernel 1 in sub4 branch
self.conv5_4_k1 = conv2DBatchNormRelu(
in_channels=1024,
k_size=1,
n_filters=256,
padding=0,
stride=1,
bias=bias,
with_bn=with_bn,
)
# High-resolution (sub1) branch
self.convbnrelu1_sub1 = conv2DBatchNormRelu(
in_channels=3,
k_size=3,
n_filters=32,
padding=1,
stride=2,
bias=bias,
with_bn=with_bn,
)
self.convbnrelu2_sub1 = conv2DBatchNormRelu(
in_channels=32,
k_size=3,
n_filters=32,
padding=1,
stride=2,
bias=bias,
with_bn=with_bn,
)
self.convbnrelu3_sub1 = conv2DBatchNormRelu(
in_channels=32,
k_size=3,
n_filters=64,
padding=1,
stride=2,
bias=bias,
with_bn=with_bn,
)
self.classification = nn.Conv2d(128, self.n_classes, 1, 1, 0)
# Cascade Feature Fusion Units
self.cff_sub24 = cascadeFeatureFusion(
self.n_classes, 256, 256, 128, with_bn=with_bn
)
self.cff_sub12 = cascadeFeatureFusion(
self.n_classes, 128, 64, 128, with_bn=with_bn
)
# Define auxiliary loss function
self.loss = multi_scale_cross_entropy2d
def forward(self, x):
h, w = x.shape[2:]
# H, W -> H/2, W/2
x_sub2 = interp(x, output_size=get_interp_size(x, s_factor=2))
# H/2, W/2 -> H/4, W/4
x_sub2 = self.convbnrelu1_1(x_sub2)
x_sub2 = self.convbnrelu1_2(x_sub2)
x_sub2 = self.convbnrelu1_3(x_sub2)
# H/4, W/4 -> H/8, W/8
x_sub2 = F.max_pool2d(x_sub2, 3, 2, 1)
# H/8, W/8 -> H/16, W/16
x_sub2 = self.res_block2(x_sub2)
x_sub2 = self.res_block3_conv(x_sub2)
# H/16, W/16 -> H/32, W/32
x_sub4 = interp(x_sub2, output_size=get_interp_size(x_sub2, s_factor=2))
x_sub4 = self.res_block3_identity(x_sub4)
x_sub4 = self.res_block4(x_sub4)
x_sub4 = self.res_block5(x_sub4)
x_sub4 = self.pyramid_pooling(x_sub4)
x_sub4 = self.conv5_4_k1(x_sub4)
x_sub1 = self.convbnrelu1_sub1(x)
x_sub1 = self.convbnrelu2_sub1(x_sub1)
x_sub1 = self.convbnrelu3_sub1(x_sub1)
x_sub24, sub4_cls = self.cff_sub24(x_sub4, x_sub2)
x_sub12, sub24_cls = self.cff_sub12(x_sub24, x_sub1)
x_sub12 = F.upsample(
x_sub12, size=get_interp_size(x_sub12, z_factor=2), mode="bilinear"
)
sub124_cls = self.classification(x_sub12)
if self.training:
return sub4_cls, sub24_cls, sub124_cls
else: # eval mode
sub124_cls = F.upsample(
sub124_cls,
size=get_interp_size(sub124_cls, z_factor=4),
mode="bilinear",
) # Test only
return sub124_cls
def load_pretrained_model(self, model_path):
"""
Load weights from caffemodel w/o caffe dependency
and plug them in corresponding modules
"""
# My eyes and my heart both hurt when writing this method
# Only care about layer_types that have trainable parameters
ltypes = [
"BNData",
"ConvolutionData",
"HoleConvolutionData",
"Convolution",
] # Convolution type for conv3_sub1_proj
def _get_layer_params(layer, ltype):
if ltype == "BNData":
gamma = np.array(layer.blobs[0].data)
beta = np.array(layer.blobs[1].data)
mean = np.array(layer.blobs[2].data)
var = np.array(layer.blobs[3].data)
return [mean, var, gamma, beta]
elif ltype in ["ConvolutionData", "HoleConvolutionData", "Convolution"]:
is_bias = layer.convolution_param.bias_term
weights = np.array(layer.blobs[0].data)
bias = []
if is_bias:
bias = np.array(layer.blobs[1].data)
return [weights, bias]
elif ltype == "InnerProduct":
raise Exception(
"Fully connected layers {}, not supported".format(ltype)
)
else:
raise Exception("Unkown layer type {}".format(ltype))
net = caffe_pb2.NetParameter()
with open(model_path, "rb") as model_file:
net.MergeFromString(model_file.read())
# dict formatted as -> key:<layer_name> :: value:<layer_type>
layer_types = {}
# dict formatted as -> key:<layer_name> :: value:[<list_of_params>]
layer_params = {}
for l in net.layer:
lname = l.name
ltype = l.type
lbottom = l.bottom
ltop = l.top
if ltype in ltypes:
print("Processing layer {} | {}, {}".format(lname, lbottom, ltop))
layer_types[lname] = ltype
layer_params[lname] = _get_layer_params(l, ltype)
# if len(l.blobs) > 0:
# print(lname, ltype, lbottom, ltop, len(l.blobs))
# Set affine=False for all batchnorm modules
def _no_affine_bn(module=None):
if isinstance(module, nn.BatchNorm2d):
module.affine = False
if len([m for m in module.children()]) > 0:
for child in module.children():
_no_affine_bn(child)
# _no_affine_bn(self)
def _transfer_conv(layer_name, module):
weights, bias = layer_params[layer_name]
w_shape = np.array(module.weight.size())
print(
"CONV {}: Original {} and trans weights {}".format(
layer_name, w_shape, weights.shape
)
)
module.weight.data.copy_(torch.from_numpy(weights).view_as(module.weight))
if len(bias) != 0:
b_shape = np.array(module.bias.size())
print(
"CONV {}: Original {} and trans bias {}".format(
layer_name, b_shape, bias.shape
)
)
module.bias.data.copy_(torch.from_numpy(bias).view_as(module.bias))
def _transfer_bn(conv_layer_name, bn_module):
mean, var, gamma, beta = layer_params[conv_layer_name + "/bn"]
print(
"BN {}: Original {} and trans weights {}".format(
conv_layer_name, bn_module.running_mean.size(), mean.shape
)
)
bn_module.running_mean.copy_(
torch.from_numpy(mean).view_as(bn_module.running_mean)
)
bn_module.running_var.copy_(
torch.from_numpy(var).view_as(bn_module.running_var)
)
bn_module.weight.data.copy_(
torch.from_numpy(gamma).view_as(bn_module.weight)
)
bn_module.bias.data.copy_(torch.from_numpy(beta).view_as(bn_module.bias))
def _transfer_conv_bn(conv_layer_name, mother_module):
conv_module = mother_module[0]
_transfer_conv(conv_layer_name, conv_module)
if conv_layer_name + "/bn" in layer_params.keys():
bn_module = mother_module[1]
_transfer_bn(conv_layer_name, bn_module)
def _transfer_residual(block_name, block):
block_module, n_layers = block[0], block[1]
prefix = block_name[:5]
if ("bottleneck" in block_name) or (
"identity" not in block_name
): # Conv block
bottleneck = block_module.layers[0]
bottleneck_conv_bn_dic = {
prefix + "_1_1x1_reduce": bottleneck.cbr1.cbr_unit,
prefix + "_1_3x3": bottleneck.cbr2.cbr_unit,
prefix + "_1_1x1_proj": bottleneck.cb4.cb_unit,
prefix + "_1_1x1_increase": bottleneck.cb3.cb_unit,
}
for k, v in bottleneck_conv_bn_dic.items():
_transfer_conv_bn(k, v)
if ("identity" in block_name) or (
"bottleneck" not in block_name
): # Identity blocks
base_idx = 2 if "identity" in block_name else 1
for layer_idx in range(2, n_layers + 1):
residual_layer = block_module.layers[layer_idx - base_idx]
residual_conv_bn_dic = {
"_".join(
map(str, [prefix, layer_idx, "1x1_reduce"])
): residual_layer.cbr1.cbr_unit,
"_".join(
map(str, [prefix, layer_idx, "3x3"])
): residual_layer.cbr2.cbr_unit,
"_".join(
map(str, [prefix, layer_idx, "1x1_increase"])
): residual_layer.cb3.cb_unit,
}
for k, v in residual_conv_bn_dic.items():
_transfer_conv_bn(k, v)
convbn_layer_mapping = {
"conv1_1_3x3_s2": self.convbnrelu1_1.cbr_unit,
"conv1_2_3x3": self.convbnrelu1_2.cbr_unit,
"conv1_3_3x3": self.convbnrelu1_3.cbr_unit,
"conv1_sub1": self.convbnrelu1_sub1.cbr_unit,
"conv2_sub1": self.convbnrelu2_sub1.cbr_unit,
"conv3_sub1": self.convbnrelu3_sub1.cbr_unit,
# 'conv5_3_pool6_conv': self.pyramid_pooling.paths[0].cbr_unit,
# 'conv5_3_pool3_conv': self.pyramid_pooling.paths[1].cbr_unit,
# 'conv5_3_pool2_conv': self.pyramid_pooling.paths[2].cbr_unit,
# 'conv5_3_pool1_conv': self.pyramid_pooling.paths[3].cbr_unit,
"conv5_4_k1": self.conv5_4_k1.cbr_unit,
"conv_sub4": self.cff_sub24.low_dilated_conv_bn.cb_unit,
"conv3_1_sub2_proj": self.cff_sub24.high_proj_conv_bn.cb_unit,
"conv_sub2": self.cff_sub12.low_dilated_conv_bn.cb_unit,
"conv3_sub1_proj": self.cff_sub12.high_proj_conv_bn.cb_unit,
}
residual_layers = {
"conv2": [self.res_block2, self.block_config[0]],
"conv3_bottleneck": [self.res_block3_conv, self.block_config[1]],
"conv3_identity": [self.res_block3_identity, self.block_config[1]],
"conv4": [self.res_block4, self.block_config[2]],
"conv5": [self.res_block5, self.block_config[3]],
}
# Transfer weights for all non-residual conv+bn layers
for k, v in convbn_layer_mapping.items():
_transfer_conv_bn(k, v)
# Transfer weights for final non-bn conv layer
_transfer_conv("conv6_cls", self.classification)
_transfer_conv("conv6_sub4", self.cff_sub24.low_classifier_conv)
_transfer_conv("conv6_sub2", self.cff_sub12.low_classifier_conv)
# Transfer weights for all residual layers
for k, v in residual_layers.items():
_transfer_residual(k, v)
def tile_predict(self, imgs, include_flip_mode=True):
"""
Predict by takin overlapping tiles from the image.
Strides are adaptively computed from the imgs shape
and input size
:param imgs: torch.Tensor with shape [N, C, H, W] in BGR format
:param side: int with side length of model input
:param n_classes: int with number of classes in seg output.
"""
side_x, side_y = self.input_size
n_classes = self.n_classes
n_samples, c, h, w = imgs.shape
# n = int(max(h,w) / float(side) + 1)
n_x = int(h / float(side_x) + 1)
n_y = int(w / float(side_y) + 1)
stride_x = (h - side_x) / float(n_x)
stride_y = (w - side_y) / float(n_y)
x_ends = [
[int(i * stride_x), int(i * stride_x) + side_x] for i in range(n_x + 1)
]
y_ends = [
[int(i * stride_y), int(i * stride_y) + side_y] for i in range(n_y + 1)
]
pred = np.zeros([n_samples, n_classes, h, w])
count = np.zeros([h, w])
slice_count = 0
for sx, ex in x_ends:
for sy, ey in y_ends:
slice_count += 1
imgs_slice = imgs[:, :, sx:ex, sy:ey]
if include_flip_mode:
imgs_slice_flip = torch.from_numpy(
np.copy(imgs_slice.cpu().numpy()[:, :, :, ::-1])
).float()
is_model_on_cuda = next(self.parameters()).is_cuda
inp = Variable(imgs_slice, volatile=True)
if include_flip_mode:
flp = Variable(imgs_slice_flip, volatile=True)
if is_model_on_cuda:
inp = inp.cuda()
if include_flip_mode:
flp = flp.cuda()
psub1 = F.softmax(self.forward(inp), dim=1).data.cpu().numpy()
if include_flip_mode:
psub2 = F.softmax(self.forward(flp), dim=1).data.cpu().numpy()
psub = (psub1 + psub2[:, :, :, ::-1]) / 2.0
else:
psub = psub1
pred[:, :, sx:ex, sy:ey] = psub
count[sx:ex, sy:ey] += 1.0
score = (pred / count[None, None, ...]).astype(np.float32)
return score / np.expand_dims(score.sum(axis=1), axis=1)
# For Testing Purposes only
if __name__ == "__main__":
cd = 0
import os
from torch.autograd import Variable
import matplotlib.pyplot as plt
import scipy.misc as m
from semseg.loader.cityscapes_loader import cityscapesLoader as cl
ic = icnet(version="cityscapes", with_bn=False)
# Just need to do this one time
caffemodel_dir_path = "PATH_TO_ICNET_DIR/evaluation/model"
ic.load_pretrained_model(
model_path=os.path.join(
caffemodel_dir_path, "icnet_cityscapes_train_30k.caffemodel"
)
)
# ic.load_pretrained_model(model_path=os.path.join(caffemodel_dir_path, 'icnet_cityscapes_train_30k_bnnomerge.caffemodel'))
# ic.load_pretrained_model(model_path=os.path.join(caffemodel_dir_path, 'icnet_cityscapes_trainval_90k.caffemodel'))
# ic.load_pretrained_model(model_path=os.path.join(caffemodel_dir_path, 'icnet_cityscapes_trainval_90k_bnnomerge.caffemodel'))
# ic.load_state_dict(torch.load('ic.pth'))
ic.float()
ic.cuda(cd)
ic.eval()
dataset_root_dir = "PATH_TO_CITYSCAPES_DIR"
dst = cl(root=dataset_root_dir)
img = m.imread(
os.path.join(
dataset_root_dir,
"leftImg8bit/demoVideo/stuttgart_00/stuttgart_00_000000_000010_leftImg8bit.png",
)
)
m.imsave("test_input.png", img)
orig_size = img.shape[:-1]
img = m.imresize(img, ic.input_size) # uint8 with RGB mode
img = img.transpose(2, 0, 1)
img = img.astype(np.float64)
img -= np.array([123.68, 116.779, 103.939])[:, None, None]
img = np.copy(img[::-1, :, :])
img = torch.from_numpy(img).float()
img = img.unsqueeze(0)
out = ic.tile_predict(img)
pred = np.argmax(out, axis=1)[0]
pred = pred.astype(np.float32)
pred = m.imresize(pred, orig_size, "nearest", mode="F") # float32 with F mode
decoded = dst.decode_segmap(pred)
m.imsave("test_output.png", decoded)
# m.imsave('test_output.png', pred)
checkpoints_dir_path = "checkpoints"
if not os.path.exists(checkpoints_dir_path):
os.mkdir(checkpoints_dir_path)
ic = torch.nn.DataParallel(ic, device_ids=range(torch.cuda.device_count()))
state = {"model_state": ic.state_dict()}
torch.save(
state, os.path.join(checkpoints_dir_path, "icnet_cityscapes_train_30k.pth")
)
# torch.save(state, os.path.join(checkpoints_dir_path, "icnetBN_cityscapes_train_30k.pth"))
# torch.save(state, os.path.join(checkpoints_dir_path, "icnet_cityscapes_trainval_90k.pth"))
# torch.save(state, os.path.join(checkpoints_dir_path, "icnetBN_cityscapes_trainval_90k.pth"))
print("Output Shape {} \t Input Shape {}".format(out.shape, img.shape))
| [
"torch.autograd.Variable",
"torch.from_numpy",
"torch.cuda.device_count",
"torch.nn.Conv2d"
] | 0.4.1 | ManuelFritsche/flow-consistency | 90625fe25855aa11c6245ca242ab8d66c41f4726 |
0.4 | import numpy as np
import torch
from torch.autograd import Variable
from .get_nets import PNet, RNet, ONet
from .box_utils import nms, calibrate_box, get_image_boxes, convert_to_square
from .first_stage import run_first_stage
def detect_faces(image, min_face_size=20.0,
thresholds=[0.6, 0.7, 0.8],
nms_thresholds=[0.7, 0.7, 0.7]):
"""
Arguments:
image: an instance of PIL.Image.
min_face_size: a float number.
thresholds: a list of length 3.
nms_thresholds: a list of length 3.
Returns:
two float numpy arrays of shapes [n_boxes, 4] and [n_boxes, 10],
bounding boxes and facial landmarks.
"""
# LOAD MODELS
pnet = PNet()
rnet = RNet()
onet = ONet()
onet.eval()
# BUILD AN IMAGE PYRAMID
width, height = image.size
min_length = min(height, width)
min_detection_size = 12
factor = 0.707 # sqrt(0.5)
# scales for scaling the image
scales = []
# scales the image so that
# minimum size that we can detect equals to
# minimum face size that we want to detect
m = min_detection_size/min_face_size
min_length *= m
factor_count = 0
while min_length > min_detection_size:
scales.append(m*factor**factor_count)
min_length *= factor
factor_count += 1
# STAGE 1
# it will be returned
bounding_boxes = []
# run P-Net on different scales
for s in scales:
boxes = run_first_stage(image, pnet, scale=s, threshold=thresholds[0])
bounding_boxes.append(boxes)
# collect boxes (and offsets, and scores) from different scales
bounding_boxes = [i for i in bounding_boxes if i is not None]
bounding_boxes = np.vstack(bounding_boxes)
keep = nms(bounding_boxes[:, 0:5], nms_thresholds[0])
bounding_boxes = bounding_boxes[keep]
# use offsets predicted by pnet to transform bounding boxes
bounding_boxes = calibrate_box(bounding_boxes[:, 0:5], bounding_boxes[:, 5:])
# shape [n_boxes, 5]
bounding_boxes = convert_to_square(bounding_boxes)
bounding_boxes[:, 0:4] = np.round(bounding_boxes[:, 0:4])
# STAGE 2
img_boxes = get_image_boxes(bounding_boxes, image, size=24)
img_boxes = Variable(torch.FloatTensor(img_boxes), volatile=True)
output = rnet(img_boxes)
offsets = output[0].data.numpy() # shape [n_boxes, 4]
probs = output[1].data.numpy() # shape [n_boxes, 2]
keep = np.where(probs[:, 1] > thresholds[1])[0]
bounding_boxes = bounding_boxes[keep]
bounding_boxes[:, 4] = probs[keep, 1].reshape((-1,))
offsets = offsets[keep]
keep = nms(bounding_boxes, nms_thresholds[1])
bounding_boxes = bounding_boxes[keep]
bounding_boxes = calibrate_box(bounding_boxes, offsets[keep])
bounding_boxes = convert_to_square(bounding_boxes)
bounding_boxes[:, 0:4] = np.round(bounding_boxes[:, 0:4])
# STAGE 3
img_boxes = get_image_boxes(bounding_boxes, image, size=48)
if len(img_boxes) == 0:
return [], []
img_boxes = Variable(torch.FloatTensor(img_boxes), volatile=True)
output = onet(img_boxes)
landmarks = output[0].data.numpy() # shape [n_boxes, 10]
offsets = output[1].data.numpy() # shape [n_boxes, 4]
probs = output[2].data.numpy() # shape [n_boxes, 2]
keep = np.where(probs[:, 1] > thresholds[2])[0]
bounding_boxes = bounding_boxes[keep]
bounding_boxes[:, 4] = probs[keep, 1].reshape((-1,))
offsets = offsets[keep]
landmarks = landmarks[keep]
# compute landmark points
width = bounding_boxes[:, 2] - bounding_boxes[:, 0] + 1.0
height = bounding_boxes[:, 3] - bounding_boxes[:, 1] + 1.0
xmin, ymin = bounding_boxes[:, 0], bounding_boxes[:, 1]
landmarks[:, 0:5] = np.expand_dims(xmin, 1) + np.expand_dims(width, 1)*landmarks[:, 0:5]
landmarks[:, 5:10] = np.expand_dims(ymin, 1) + np.expand_dims(height, 1)*landmarks[:, 5:10]
bounding_boxes = calibrate_box(bounding_boxes, offsets)
keep = nms(bounding_boxes, nms_thresholds[2], mode='min')
bounding_boxes = bounding_boxes[keep]
landmarks = landmarks[keep]
return bounding_boxes, landmarks
| [
"torch.FloatTensor"
] | 0.4.0 | prography/ddeep_KYJ_JSY | 2da506cfd9e792a2d391de6f390b8b3b509b6c54 |
1.2 | #! /usr/bin/python
# -*- encoding: utf-8 -*-
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import Parameter
from models.ResNetBlocks import *
class ResNetSE(nn.Module):
def __init__(self, block, layers, num_filters, nOut, encoder_type='SAP', **kwargs):
print('Embedding size is %d, encoder %s.'%(nOut, encoder_type))
self.inplanes = num_filters[0]
self.encoder_type = encoder_type
super(ResNetSE, self).__init__()
self.conv1 = nn.Conv2d(1, num_filters[0] , kernel_size=7, stride=(2, 1), padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(num_filters[0])
self.relu = nn.ReLU(inplace=True)
self.maxPool = nn.MaxPool2d(kernel_size=(3, 3), stride=(2, 1), padding=1)
self.layer1 = self._make_layer(block, num_filters[0], layers[0])
self.layer2 = self._make_layer(block, num_filters[1], layers[1], stride=(2, 2))
self.layer3 = self._make_layer(block, num_filters[2], layers[2], stride=(2, 2))
self.layer4 = self._make_layer(block, num_filters[3], layers[3], stride=(2, 2))
self.avgpool = nn.AvgPool2d((9, 1), stride=1)
self.instancenorm = nn.InstanceNorm1d(257)
if self.encoder_type == "SAP":
self.sap_linear = nn.Linear(num_filters[3] * block.expansion, num_filters[3] * block.expansion)
self.attention = self.new_parameter(num_filters[3] * block.expansion, 1)
out_dim = num_filters[3] * block.expansion
else:
raise ValueError('Undefined encoder')
self.fc = nn.Linear(out_dim, nOut)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def new_parameter(self, *size):
out = nn.Parameter(torch.FloatTensor(*size))
nn.init.xavier_normal_(out)
return out
def forward(self, x):
stft = torch.stft(x, 512, hop_length=int(0.01*16000), win_length=int(0.025*16000), window=torch.hann_window(int(0.025*16000)), center=False, normalized=False, onesided=True)
stft = (stft[:,:,:,0].pow(2)+stft[:,:,:,1].pow(2)).pow(0.5)
x = self.instancenorm(stft).unsqueeze(1).detach()
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxPool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
if self.encoder_type == "SAP":
x = x.permute(0, 2, 1, 3)
x = x.squeeze(dim=1).permute(0, 2, 1) # batch * L * D
h = torch.tanh(self.sap_linear(x))
w = torch.matmul(h, self.attention).squeeze(dim=2)
w = F.softmax(w, dim=1).view(x.size(0), x.size(1), 1)
x = torch.sum(x * w, dim=1)
else:
raise ValueError('Undefined encoder')
x = x.view(x.size()[0], -1)
x = self.fc(x)
return x
def ResNetSE34(nOut=256, **kwargs):
# Number of filters
num_filters = [16, 32, 64, 128]
model = ResNetSE(SEBasicBlock, [3, 4, 6, 3], num_filters, nOut, **kwargs)
return model
| [
"torch.nn.InstanceNorm1d",
"torch.nn.Linear",
"torch.nn.MaxPool2d",
"torch.nn.Sequential",
"torch.nn.AvgPool2d",
"torch.nn.BatchNorm2d",
"torch.FloatTensor",
"torch.nn.init.kaiming_normal_",
"torch.nn.init.constant_",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.nn.functional.softmax",
"torch.nn.init.xavier_normal_",
"torch.matmul",
"torch.sum"
] | 1.2.0 | entn-at/voxceleb_trainer | b288f2a2175ff772647343567395db3b645a2124 |
1.2 | import requests
from torch import Tensor, device
from typing import Tuple, List
from tqdm import tqdm
import sys
import importlib
import os
import torch
import numpy as np
import queue
def pytorch_cos_sim(a: Tensor, b: Tensor):
"""
Computes the cosine similarity cos_sim(a[i], b[j]) for all i and j.
This function can be used as a faster replacement for 1-scipy.spatial.distance.cdist(a,b)
:return: Matrix with res[i][j] = cos_sim(a[i], b[j])
"""
if len(a.shape) == 1:
a = a.unsqueeze(0)
if len(b.shape) == 1:
b = b.unsqueeze(0)
a_norm = a / a.norm(dim=1)[:, None]
b_norm = b / b.norm(dim=1)[:, None]
return torch.mm(a_norm, b_norm.transpose(0, 1))
def paraphrase_mining(model,
sentences: List[str],
show_progress_bar=False,
batch_size=32,
query_chunk_size: int = 5000,
corpus_chunk_size: int = 100000,
max_pairs: int = 500000,
top_k: int = 100):
"""
Given a list of sentences / texts, this function performs paraphrase mining. It compares all sentences against all
other sentences and returns a list with the pairs that have the highest cosine similarity score.
:param model: SentenceTransformer model for embedding computation
:param sentences: A list of strings (texts or sentences)
:param show_progress_bar: Plotting of a progress bar
:param batch_size: Number of texts that are encoded simultaneously by the model
:param query_chunk_size: Search for most similar pairs for #query_chunk_size at the same time. Decrease, to lower memory footprint (increases run-time).
:param corpus_chunk_size: Compare a sentence simultaneously against #corpus_chunk_size other sentences. Decrease, to lower memory footprint (increases run-time).
:param max_pairs: Maximal number of text pairs returned.
:param top_k: For each sentence, we retrieve up to top_k other sentences
:return: Returns a list of triplets with the format [score, id1, id2]
"""
# Compute embedding for the sentences
embeddings = model.encode(sentences, show_progress_bar=show_progress_bar, batch_size=batch_size,
convert_to_tensor=True)
# Mine for duplicates
pairs = queue.PriorityQueue()
min_score = -1
num_added = 0
for corpus_start_idx in range(0, len(embeddings), corpus_chunk_size):
corpus_end_idx = min(corpus_start_idx + corpus_chunk_size, len(embeddings))
for query_start_idx in range(0, len(embeddings), query_chunk_size):
query_end_idx = min(query_start_idx + query_chunk_size, len(embeddings))
# logging.info("Compute cosine similarities")
cos_scores = pytorch_cos_sim(embeddings[query_start_idx:query_end_idx],
embeddings[corpus_start_idx:corpus_end_idx]).cpu().numpy()
cos_scores = np.nan_to_num(cos_scores)
# logging.info("Sort scores")
cos_score_argpartition = np.argpartition(-cos_scores, min(len(cos_scores)-1, top_k))
# logging.info("Find most similar pairs out of {} queries".format(len(cos_scores)))
for query_itr in range(len(cos_scores)):
for corpus_itr in cos_score_argpartition[query_itr][0:top_k]:
i = query_start_idx + query_itr
j = corpus_start_idx + corpus_itr
if i != j and cos_scores[query_itr][corpus_itr] > min_score:
pairs.put((cos_scores[query_itr][corpus_itr], i, j))
num_added += 1
if num_added >= max_pairs:
entry = pairs.get()
min_score = entry[0]
# Get the pairs
added_pairs = set() # Used for duplicate detection
pairs_list = []
while not pairs.empty():
score, i, j = pairs.get()
id1, id2 = sorted([i, j])
if id1 != id2 and (id1, id2) not in added_pairs:
added_pairs.add((id1, id2))
pairs_list.append([score, id1, id2])
# Highest scores first
pairs_list = sorted(pairs_list, key=lambda x: x[0], reverse=True)
return pairs_list
def information_retrieval(query_embeddings: Tensor,
corpus_embeddings: Tensor,
query_chunk_size: int = 100,
corpus_chunk_size: int = 100000,
top_k: int = 10):
"""
This function performs a cosine similarity search between a list of query embeddings and a list of corpus embeddings.
It can be used for Information Retrieval / Semantic Search for corpora up to about 1 Million entries.
:param query_embeddings: A 2 dimensional tensor with the query embeddings.
:param corpus_embeddings: A 2 dimensional tensor with the corpus embeddings.
:param query_chunk_size: Process 100 queries simultaneously. Increasing that value increases the speed, but requires more memory.
:param corpus_chunk_size: Scans the corpus 100k entries at a time. Increasing that value increases the speed, but requires more memory.
:param top_k: Retrieve top k matching entries. Note, if your corpus is larger than query_chunk_size, |Chunks|*top_k are returned
:return: Returns a sorted list with decreasing cosine similarity scores. Entries are dictionaries with the keys 'corpus_id' and 'score'
"""
if isinstance(query_embeddings, list):
query_embeddings = torch.stack(query_embeddings)
if len(query_embeddings.shape) == 1:
query_embeddings = query_embeddings.unsqueeze(0)
if isinstance(corpus_embeddings, list):
corpus_embeddings = torch.stack(corpus_embeddings)
#Normalize scores, so that the dot-product is equivalent to cosine similarity
query_embeddings = query_embeddings / query_embeddings.norm(dim=1)[:, None]
corpus_embeddings = corpus_embeddings / corpus_embeddings.norm(dim=1)[:, None]
queries_result_list = [[] for _ in range(len(query_embeddings))]
for query_start_idx in range(0, len(query_embeddings), query_chunk_size):
query_end_idx = min(query_start_idx + query_chunk_size, len(query_embeddings))
# Iterate over chunks of the corpus
for corpus_start_idx in range(0, len(corpus_embeddings), corpus_chunk_size):
corpus_end_idx = min(corpus_start_idx + corpus_chunk_size, len(corpus_embeddings))
# Compute cosine similarites
cos_scores = torch.mm(query_embeddings[query_start_idx:query_end_idx], corpus_embeddings[corpus_start_idx:corpus_end_idx].transpose(0, 1)).cpu().numpy()
cos_scores = np.nan_to_num(cos_scores)
# Partial sort scores
cos_score_argpartition = np.argpartition(-cos_scores, top_k)[:, 0:top_k]
for query_itr in range(len(cos_scores)):
for sub_corpus_id in cos_score_argpartition[query_itr]:
corpus_id = corpus_start_idx + sub_corpus_id
query_id = query_start_idx + query_itr
score = cos_scores[query_itr][sub_corpus_id]
queries_result_list[query_id].append({'corpus_id': corpus_id, 'score': score})
#Sort
for idx in range(len(queries_result_list)):
queries_result_list[idx] = sorted(queries_result_list[idx], key=lambda x: x['score'], reverse=True)
return queries_result_list
def http_get(url, path):
"""
Downloads a URL to a given path on disc
"""
req = requests.get(url, stream=True)
if req.status_code != 200:
print("Exception when trying to download {}. Response {}".format(url, req.status_code), file=sys.stderr)
req.raise_for_status()
return
download_filepath = path+"_part"
with open(download_filepath, "wb") as file_binary:
content_length = req.headers.get('Content-Length')
total = int(content_length) if content_length is not None else None
progress = tqdm(unit="B", total=total, unit_scale=True)
for chunk in req.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
progress.update(len(chunk))
file_binary.write(chunk)
os.rename(download_filepath, path)
progress.close()
def batch_to_device(batch, target_device: device):
"""
send a pytorch batch to a device (CPU/GPU)
"""
features = batch['features']
for paired_sentence_idx in range(len(features)):
for feature_name in features[paired_sentence_idx]:
features[paired_sentence_idx][feature_name] = features[paired_sentence_idx][feature_name].to(target_device)
labels = batch['labels'].to(target_device)
return features, labels
def fullname(o):
"""
Gives a full name (package_name.class_name) for a class / object in Python. Will
be used to load the correct classes from JSON files
"""
module = o.__class__.__module__
if module is None or module == str.__class__.__module__:
return o.__class__.__name__ # Avoid reporting __builtin__
else:
return module + '.' + o.__class__.__name__
def import_from_string(dotted_path):
"""
Import a dotted module path and return the attribute/class designated by the
last name in the path. Raise ImportError if the import failed.
"""
try:
module_path, class_name = dotted_path.rsplit('.', 1)
except ValueError:
msg = "%s doesn't look like a module path" % dotted_path
raise ImportError(msg)
module = importlib.import_module(module_path)
try:
return getattr(module, class_name)
except AttributeError:
msg = 'Module "%s" does not define a "%s" attribute/class' % (module_path, class_name)
raise ImportError(msg) | [
"torch.stack"
] | 1.2.0 | azdaly/sentence-transformers | d365d14e6eb3a79b7589c6404020833d5bda7322 |
1.0 | import torch
from torch.autograd import Variable
import time
import numpy as np
from tqdm import tqdm
import matplotlib.pyplot as plt
def compute_test_time(network_class, input_size, max_batch_size, step_size=1, is_cuda=False):
backend = "cpu"
if is_cuda:
backend = "cuda"
model = network_class()
if is_cuda:
model = model.cuda()
model.eval()
time_log = []
# make sure that everything is in memorybefore the actual tests
batch = Variable(torch.FloatTensor(1, *input_size))
if is_cuda:
batch = batch.cuda()
model(batch)
print("Compute {} test time".format(backend))
for i in tqdm(range(0, max_batch_size, step_size)):
batch = Variable(torch.FloatTensor(i+1, *input_size))
if is_cuda:
batch = batch.cuda()
time_start = time.time()
model(batch)
time_log.append(time.time() - time_start)
plt.plot(np.arange(1, max_batch_size + 1, step_size), time_log)
plt.title("{} test time w.r.t minibatch size".format(backend))
plt.ylabel("Time (s)")
plt.xlabel("Batch size")
def compute_train_time(network_class, input_size, max_batch_size, step_size=1, is_cuda=False, backward_only=False):
backend = "cpu"
if is_cuda:
backend = "cuda"
model = network_class()
if is_cuda:
model = model.cuda()
model.train()
time_log = []
# make sure that everything is in memorybefore the actual tests
batch = Variable(torch.FloatTensor(1, *input_size))
if is_cuda:
batch = batch.cuda()
model(batch)
print("Compute {} test time".format(backend))
for i in tqdm(range(0, max_batch_size, step_size)):
batch = Variable(torch.FloatTensor(i+1, *input_size))
if is_cuda:
batch = batch.cuda()
time_start = time.time()
prediction = model(batch)
out = torch.sum(prediction)
if backward_only:
time_start = time.time()
out.backward()
time_log.append(time.time() - time_start)
plt.plot(np.arange(1, max_batch_size + 1, step_size), time_log)
plt.title("{} train time w.r.t minibatch size".format(backend))
plt.ylabel("Time (s)")
plt.xlabel("Batch size") | [
"torch.FloatTensor",
"torch.sum"
] | 1.0.1 | MathGaron/pytorch_toolbox | 2afd13e50ba71dfce66467a4b070d9b922668502 |
1.1 | import torch
from overrides import overrides
from reclib.training.metrics.average import Average
from reclib.training.metrics.metric import Metric
@Metric.register("perplexity")
class Perplexity(Average):
"""
Perplexity is a common metric used for evaluating how well a language model
predicts a sample.
Notes
-----
Assumes negative log likelihood loss of each batch (base e). Provides the
average perplexity of the batches.
"""
@overrides
def get_metric(self, reset: bool = False) -> float:
"""
Returns
-------
The accumulated perplexity.
"""
average_loss = super().get_metric(reset)
if average_loss == 0:
return 0.
# Exponentiate the loss to compute perplexity
return float(torch.exp(average_loss))
| [
"torch.exp"
] | 1.1.0 | tingkai-zhang/reclib | 3c56dd7f811ab4d4f9f692efd0ee5e171a5f818b |
0.4 | # coding: utf-8
import torch
from joeynmt.constants import PAD_TOKEN
from joeynmt.helpers import load_data, arrays_to_sentences, bpe_postprocess, \
load_config, get_latest_checkpoint, make_data_iter, \
load_model_from_checkpoint, store_attention_plots
from joeynmt.metrics import bleu, chrf, token_accuracy, sequence_accuracy
from joeynmt.model import build_model
from joeynmt.batch import Batch
def validate_on_data(model, data, batch_size, use_cuda, max_output_length,
level, eval_metric, criterion, beam_size=0, beam_alpha=-1):
"""
Generate translations for the given data.
If `criterion` is not None and references are given, also compute the loss.
:param model:
:param data:
:param batch_size:
:param use_cuda:
:param max_output_length:
:param level:
:param eval_metric:
:param criterion:
:param beam_size:
:param beam_alpha:
:return:
"""
valid_iter = make_data_iter(dataset=data, batch_size=batch_size,
shuffle=False, train=False)
valid_sources_raw = [s for s in data.src]
pad_index = model.src_vocab.stoi[PAD_TOKEN]
# disable dropout
model.eval()
# don't track gradients during validation
with torch.no_grad():
all_outputs = []
valid_attention_scores = []
total_loss = 0
total_ntokens = 0
for valid_i, valid_batch in enumerate(iter(valid_iter), 1):
# run as during training to get validation loss (e.g. xent)
batch = Batch(valid_batch, pad_index, use_cuda=use_cuda)
# sort batch now by src length and keep track of order
sort_reverse_index = batch.sort_by_src_lengths()
# TODO save computation: forward pass is computed twice
# run as during training with teacher forcing
if criterion is not None and batch.trg is not None:
batch_loss = model.get_loss_for_batch(
batch, criterion=criterion)
total_loss += batch_loss
total_ntokens += batch.ntokens
# run as during inference to produce translations
output, attention_scores = model.run_batch(
batch=batch, beam_size=beam_size, beam_alpha=beam_alpha,
max_output_length=max_output_length)
# sort outputs back to original order
all_outputs.extend(output[sort_reverse_index])
valid_attention_scores.extend(
attention_scores[sort_reverse_index]
if attention_scores is not None else [])
assert len(all_outputs) == len(data)
if criterion is not None and total_ntokens > 0:
# total validation loss
valid_loss = total_loss
# exponent of token-level negative log prob
valid_ppl = torch.exp(total_loss / total_ntokens)
else:
valid_loss = -1
valid_ppl = -1
# decode back to symbols
decoded_valid = arrays_to_sentences(arrays=all_outputs,
vocabulary=model.trg_vocab,
cut_at_eos=True)
# evaluate with metric on full dataset
join_char = " " if level in ["word", "bpe"] else ""
valid_sources = [join_char.join(s) for s in data.src]
valid_references = [join_char.join(t) for t in data.trg]
valid_hypotheses = [join_char.join(t) for t in decoded_valid]
# post-process
if level == "bpe":
valid_sources = [bpe_postprocess(s) for s in valid_sources]
valid_references = [bpe_postprocess(v)
for v in valid_references]
valid_hypotheses = [bpe_postprocess(v) for
v in valid_hypotheses]
# if references are given, evaluate against them
if len(valid_references) > 0:
assert len(valid_hypotheses) == len(valid_references)
current_valid_score = 0
if eval_metric.lower() == 'bleu':
# this version does not use any tokenization
current_valid_score = bleu(valid_hypotheses, valid_references)
elif eval_metric.lower() == 'chrf':
current_valid_score = chrf(valid_hypotheses, valid_references)
elif eval_metric.lower() == 'token_accuracy':
current_valid_score = token_accuracy(valid_hypotheses,
valid_references, level=level)
elif eval_metric.lower() == 'sequence_accuracy':
current_valid_score = sequence_accuracy(valid_hypotheses,
valid_references)
else:
current_valid_score = -1
return current_valid_score, valid_loss, valid_ppl, valid_sources, \
valid_sources_raw, valid_references, valid_hypotheses, \
decoded_valid, \
valid_attention_scores
def test(cfg_file,
ckpt: str = None,
output_path: str = None,
save_attention: bool = False):
"""
Main test function. Handles loading a model from checkpoint, generating
translations and storing them and attention plots.
:param cfg_file:
:param ckpt:
:param output_path:
:param save_attention:
:return:
"""
cfg = load_config(cfg_file)
if "test" not in cfg["data"].keys():
raise ValueError("Test data must be specified in config.")
# when checkpoint is not specified, take oldest from model dir
if ckpt is None:
dir = cfg["training"]["model_dir"]
ckpt = get_latest_checkpoint(dir)
try:
step = ckpt.split(dir+"/")[1].split(".ckpt")[0]
except IndexError:
step = "best"
batch_size = cfg["training"]["batch_size"]
use_cuda = cfg["training"].get("use_cuda", False)
level = cfg["data"]["level"]
eval_metric = cfg["training"]["eval_metric"]
max_output_length = cfg["training"].get("max_output_length", None)
# load the data
# TODO load only test data
train_data, dev_data, test_data, src_vocab, trg_vocab = \
load_data(cfg=cfg)
# TODO specify this differently
data_to_predict = {"dev": dev_data, "test": test_data}
# load model state from disk
model_checkpoint = load_model_from_checkpoint(ckpt, use_cuda=use_cuda)
# build model and load parameters into it
model = build_model(cfg["model"], src_vocab=src_vocab, trg_vocab=trg_vocab)
model.load_state_dict(model_checkpoint["model_state"])
if use_cuda:
model.cuda()
# whether to use beam search for decoding, 0: greedy decoding
if "testing" in cfg.keys():
beam_size = cfg["testing"].get("beam_size", 0)
beam_alpha = cfg["testing"].get("alpha", -1)
else:
beam_size = 0
beam_alpha = -1
for data_set_name, data_set in data_to_predict.items():
score, loss, ppl, sources, sources_raw, references, hypotheses, \
hypotheses_raw, attention_scores = validate_on_data(
model, data=data_set, batch_size=batch_size, level=level,
max_output_length=max_output_length, eval_metric=eval_metric,
use_cuda=use_cuda, criterion=None, beam_size=beam_size,
beam_alpha=beam_alpha)
if "trg" in data_set.fields:
decoding_description = "Greedy decoding" if beam_size == 0 else \
"Beam search decoding with beam size = {} and alpha = {}".format(
beam_size, beam_alpha)
print("{:4s} {}: {} [{}]".format(
data_set_name, eval_metric, score, decoding_description))
else:
print("No references given for {} -> no evaluation.".format(
data_set_name))
if attention_scores is not None and save_attention:
attention_path = "{}/{}.{}.att".format(dir, data_set_name, step)
print("Attention plots saved to: {}.xx".format(attention_path))
store_attention_plots(attentions=attention_scores,
targets=hypotheses_raw,
sources=[s for s in data_set.src],
idx=range(len(hypotheses)),
output_prefix=attention_path)
if output_path is not None:
output_path_set = "{}.{}".format(output_path, data_set_name)
with open(output_path_set, mode="w", encoding="utf-8") as f:
for h in hypotheses:
f.write(h + "\n")
print("Translations saved to: {}".format(output_path_set)) | [
"torch.no_grad",
"torch.exp"
] | 0.4.1 | MStaniek/joeynmt | a3151cec04ace0921bb36f44abf6ea17dbe3bde6 |
1.4 | import torch
import sys
sys.path.append('..')
# def collate(batch):
# """A custom collate function for dealing with batches of features that have a different number of associated targets
# (action instances).
# """
# max_len = max([len(feat) for feat,_,_ in batch])
#
# features = []
# targets = []
# idxs = []
#
# for feat, label, idx in batch:
# features.append(feat)
# targets.append(label)
# idxs.append(idx)
#
# return torch.stack(features, 0), targets, idxs
def pad_sequence(sequences, max_len, batch_first=False, padding_value=0.0):
# type: (List[Tensor], bool, float) -> Tensor
# assuming trailing dimensions and type of all the Tensors
# in sequences are same and fetching those from sequences[0]
max_size = sequences[0].size()
# print('max_size is {}'.format(max_size))
trailing_dims = max_size[1:]
if max_len == 0:
max_len = max([s.size(0) for s in sequences])
if batch_first:
out_dims = (len(sequences), max_len) + trailing_dims
else:
out_dims = (max_len, len(sequences)) + trailing_dims
# print('out_dims {}'.format(out_dims))
out_tensor = sequences[0].new_full(out_dims, padding_value)
# print('out_tensor.size() {}'.format(out_tensor.size()))
for i, tensor in enumerate(sequences):
length = min(tensor.size(0), max_len)
# use index notation to prevent duplicate references to the tensor
if batch_first:
out_tensor[i, :length, ...] = tensor[:length]
else:
out_tensor[:length, i, ...] = tensor[:length]
return out_tensor
def collate_fn_padd(batch, max_len=0):
'''
Padds batch of variable length
note: it converts things ToTensor manually here since the ToTensor transform
assume it takes in images rather than arbitrary tensors.
'''
## padd
features = []
targets = []
idxs = []
lengths = []
# max_len = 0
# print(batch)
for l, t, label, idx in batch:
features.append(torch.Tensor(t))
targets.append(label)
idxs.append(idx)
if max_len == 0:
lengths.append(l)
else:
lengths.append(min(l, max_len))
batch = pad_sequence(features, max_len, True)
## compute mask
mask = (batch != 0).float()
return batch, torch.tensor(targets), idxs, torch.tensor(lengths), mask[..., 0:1] | [
"torch.Tensor",
"torch.tensor"
] | 1.4.0 | June01/WFSAL-icmr21 | 86fd6e9e34483ea17e088e4c1ee8f66edf3aecce |
1.6 | from math import pi, log
from functools import wraps
import torch
from torch import nn, einsum
import torch.nn.functional as F
from einops import rearrange, repeat
# helpers
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def cache_fn(f):
cache = None
@wraps(f)
def cached_fn(*args, _cache = True, **kwargs):
if not _cache:
return f(*args, **kwargs)
nonlocal cache
if cache is not None:
return cache
cache = f(*args, **kwargs)
return cache
return cached_fn
def fourier_encode(x, max_freq, num_bands = 4, base = 2):
x = x.unsqueeze(-1)
device, dtype, orig_x = x.device, x.dtype, x
scales = torch.logspace(0., log(max_freq / 2) / log(base), num_bands, base = base, device = device, dtype = dtype)
scales = scales[(*((None,) * (len(x.shape) - 1)), Ellipsis)]
x = x * scales * pi
x = torch.cat([x.sin(), x.cos()], dim=-1)
x = torch.cat((x, orig_x), dim = -1)
return x
# helper classes
class PreNorm(nn.Module):
def __init__(self, dim, fn, context_dim = None):
super().__init__()
self.fn = fn
self.norm = nn.LayerNorm(dim)
self.norm_context = nn.LayerNorm(context_dim) if exists(context_dim) else None
def forward(self, x, **kwargs):
x = self.norm(x)
if exists(self.norm_context):
context = kwargs['context']
normed_context = self.norm_context(context)
kwargs.update(context = normed_context)
return self.fn(x, **kwargs)
class GEGLU(nn.Module):
def forward(self, x):
x, gates = x.chunk(2, dim = -1)
return x * F.gelu(gates)
class FeedForward(nn.Module):
def __init__(self, dim, mult = 4, dropout = 0.):
super().__init__()
self.net = nn.Sequential(
nn.Linear(dim, dim * mult * 2),
GEGLU(),
nn.Dropout(dropout),
nn.Linear(dim * mult, dim)
)
def forward(self, x):
return self.net(x)
class Attention(nn.Module):
def __init__(self, query_dim, context_dim = None, heads = 8, dim_head = 64, dropout = 0.):
super().__init__()
inner_dim = dim_head * heads
context_dim = default(context_dim, query_dim)
self.scale = dim_head ** -0.5
self.heads = heads
self.to_q = nn.Linear(query_dim, inner_dim, bias = False)
self.to_kv = nn.Linear(context_dim, inner_dim * 2, bias = False)
self.to_out = nn.Sequential(
nn.Linear(inner_dim, query_dim),
nn.Dropout(dropout)
)
def forward(self, x, context = None, mask = None):
h = self.heads
q = self.to_q(x)
context = default(context, x)
k, v = self.to_kv(context).chunk(2, dim = -1)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h = h), (q, k, v))
sim = einsum('b i d, b j d -> b i j', q, k) * self.scale
if exists(mask):
mask = rearrange(mask, 'b ... -> b (...)')
max_neg_value = -torch.finfo(sim.dtype).max
mask = repeat(mask, 'b j -> (b h) () j', h = h)
sim.masked_fill_(~mask, max_neg_value)
# attention, what we cannot get enough of
attn = sim.softmax(dim = -1)
out = einsum('b i j, b j d -> b i d', attn, v)
out = rearrange(out, '(b h) n d -> b n (h d)', h = h)
return self.to_out(out)
# main class
class Perceiver(nn.Module):
def __init__(
self,
*,
num_freq_bands,
depth,
max_freq,
freq_base = 2,
input_channels = 3,
input_axis = 2,
num_latents = 512,
latent_dim = 512,
cross_heads = 1,
latent_heads = 8,
cross_dim_head = 64,
latent_dim_head = 64,
num_classes = 1000,
attn_dropout = 0.,
ff_dropout = 0.,
weight_tie_layers = False,
fourier_encode_data = True,
self_per_cross_attn = 1
):
"""The shape of the final attention mechanism will be:
depth * (cross attention -> self_per_cross_attn * self attention)
Args:
num_freq_bands: Number of freq bands, with original value (2 * K + 1)
depth: Depth of net.
max_freq: Maximum frequency, hyperparameter depending on how
fine the data is.
freq_base: Base for the frequency
input_channels: Number of channels for each token of the input.
input_axis: Number of axes for input data (2 for images, 3 for video)
num_latents: Number of latents, or induced set points, or centroids.
Different papers giving it different names.
latent_dim: Latent dimension.
cross_heads: Number of heads for cross attention. Paper said 1.
latent_heads: Number of heads for latent self attention, 8.
cross_dim_head: Number of dimensions per cross attention head.
latent_dim_head: Number of dimensions per latent self attention head.
num_classes: Output number of classes.
attn_dropout: Attention dropout
ff_dropout: Feedforward dropout
weight_tie_layers: Whether to weight tie layers (optional).
fourier_encode_data: Whether to auto-fourier encode the data, using
the input_axis given. defaults to True, but can be turned off
if you are fourier encoding the data yourself.
self_per_cross_attn: Number of self attention blocks per cross attn.
"""
super().__init__()
self.input_axis = input_axis
self.max_freq = max_freq
self.num_freq_bands = num_freq_bands
self.freq_base = freq_base
self.fourier_encode_data = fourier_encode_data
fourier_channels = (input_axis * ((num_freq_bands * 2) + 1)) if fourier_encode_data else 0
input_dim = fourier_channels + input_channels
self.latents = nn.Parameter(torch.randn(num_latents, latent_dim))
get_cross_attn = lambda: PreNorm(latent_dim, Attention(latent_dim, input_dim, heads = cross_heads, dim_head = cross_dim_head, dropout = attn_dropout), context_dim = input_dim)
get_cross_ff = lambda: PreNorm(latent_dim, FeedForward(latent_dim, dropout = ff_dropout))
get_latent_attn = lambda: PreNorm(latent_dim, Attention(latent_dim, heads = latent_heads, dim_head = latent_dim_head, dropout = attn_dropout))
get_latent_ff = lambda: PreNorm(latent_dim, FeedForward(latent_dim, dropout = ff_dropout))
get_cross_attn, get_cross_ff, get_latent_attn, get_latent_ff = map(cache_fn, (get_cross_attn, get_cross_ff, get_latent_attn, get_latent_ff))
self.layers = nn.ModuleList([])
for i in range(depth):
should_cache = i > 0 and weight_tie_layers
cache_args = {'_cache': should_cache}
self_attns = nn.ModuleList([])
for _ in range(self_per_cross_attn):
self_attns.append(nn.ModuleList([
get_latent_attn(**cache_args),
get_latent_ff(**cache_args)
]))
self.layers.append(nn.ModuleList([
get_cross_attn(**cache_args),
get_cross_ff(**cache_args),
self_attns
]))
self.to_logits = nn.Sequential(
nn.LayerNorm(latent_dim),
nn.Linear(latent_dim, num_classes)
)
def forward(self, data, mask = None):
b, *axis, _, device = *data.shape, data.device
assert len(axis) == self.input_axis, 'input data must have the right number of axis'
if self.fourier_encode_data:
# calculate fourier encoded positions in the range of [-1, 1], for all axis
axis_pos = list(map(lambda size: torch.linspace(-1., 1., steps = size, device = device), axis))
pos = torch.stack(torch.meshgrid(*axis_pos), dim = -1)
enc_pos = fourier_encode(pos, self.max_freq, self.num_freq_bands, base = self.freq_base)
enc_pos = rearrange(enc_pos, '... n d -> ... (n d)')
enc_pos = repeat(enc_pos, '... -> b ...', b = b)
data = torch.cat((data, enc_pos), dim = -1)
# concat to channels of data and flatten axis
data = rearrange(data, 'b ... d -> b (...) d')
x = repeat(self.latents, 'n d -> b n d', b = b)
# layers
for cross_attn, cross_ff, self_attns in self.layers:
x = cross_attn(x, context = data, mask = mask) + x
x = cross_ff(x) + x
for self_attn, self_ff in self_attns:
x = self_attn(x) + x
x = self_ff(x) + x
x = x.mean(dim = -2)
return self.to_logits(x)
| [
"torch.nn.Linear",
"torch.cat",
"torch.nn.LayerNorm",
"torch.nn.Dropout",
"torch.nn.ModuleList",
"torch.einsum",
"torch.finfo",
"torch.nn.functional.gelu",
"torch.linspace",
"torch.meshgrid",
"torch.randn"
] | 1.6 | mesur-io/perceiver-pytorch | 188158299f90b4e8874614dbc2cb050336e1c4df |
1.5 | import torch
import torch.nn as nn
import torch.nn.functional as F
from subnet import ImageConvNet, AudioConvNet
class AVENet(nn.Module):
def __init__(self):
super(AVENet, self).__init__()
# image subnetwork
self.icn = ImageConvNet()
self.img_pool = nn.AdaptiveMaxPool2d(1)
self.img_fc1 = nn.Linear(512, 128)
self.img_fc2 = nn.Linear(128, 128)
# audio subnetwork
self.acn = AudioConvNet()
self.aud_pool = nn.AdaptiveMaxPool2d(1)
self.aud_fc1 = nn.Linear(512, 128)
self.aud_fc2 = nn.Linear(128, 128)
# fusion network
self.fc3 = nn.Linear(1, 2)
def forward(self, img, aud):
# image subnetwork
img = self.icn(img)
img = self.img_pool(img)
img = img.squeeze(2).squeeze(2) # [N, 512, 1, 1] to [N, 512]
img = F.relu(self.img_fc1(img))
img_emb = F.normalize(self.img_fc2(img), p=2, dim=1) # L2 normalization
# audio subnetwork
aud = self.acn(aud)
aud = self.aud_pool(aud)
aud = aud.squeeze(2).squeeze(2) # [N, 512, 1, 1] to [N, 512]
aud = F.relu(self.aud_fc1(aud))
aud_emb = F.normalize(self.aud_fc2(aud), p=2, dim=1) # L2 normalization
# fusion network
euc_dist = ((img_emb - aud_emb) ** 2).sum(dim=1, keepdim=True).sqrt() # Euclidean distance
out = self.fc3(euc_dist)
return out, img_emb, aud_emb
# forward propagation test
if __name__ == "__main__":
img = torch.rand((16, 3, 224, 224))
aud = torch.rand((16, 1, 257, 200))
avenet = AVENet()
print(avenet(img, aud).shape)
| [
"torch.nn.Linear",
"torch.rand",
"torch.nn.AdaptiveMaxPool2d"
] | 1.5.0 | kyuyeonpooh/objects-that-sound | 962031567f7e5657637d5518dff4f9a44af1c7eb |
1.7 | import torch, torchvision
import torch.nn as nn
import torch.nn.functional as F
from torchvision.models.resnet import Bottleneck
import torch.backends.cudnn as cudnn
import torch.utils.model_zoo as model_zoo
from typing import List
from collections import defaultdict
import os
from backbone import construct_backbone
from refineNet import RefineNet
from layers.discritizer import Discritizer
from utils.functions import MovingAverage, make_net
from utils import timer
from matplotlib import pyplot as plt
# This is required for Pytorch 1.0.1 on Windows to initialize Cuda on some driver versions.
# See the bug report here: https://github.com/pytorch/pytorch/issues/17108
torch.cuda.current_device()
# As of March 10, 2019, Pytorch DataParallel still doesn't support JIT Script Modules
use_jit = torch.cuda.device_count() <= 1
if not use_jit:
print('Multiple GPUs detected! Turning off JIT.')
ScriptModuleWrapper = torch.jit.ScriptModule if use_jit else nn.Module
script_method_wrapper = torch.jit.script_method if use_jit else lambda fn, _rcn=None: fn
prior_cache = defaultdict(lambda: None)
class FPN(ScriptModuleWrapper):
"""
Implements a general version of the FPN introduced in
https://arxiv.org/pdf/1612.03144.pdf
Parameters (in cfg.fpn):
- num_features (int): The number of output features in the fpn layers.
- interpolation_mode (str): The mode to pass to F.interpolate.
- num_downsample (int): The number of downsampled layers to add onto the selected layers.
These extra layers are downsampled from the last selected layer.
Args:
- in_channels (list): For each conv layer you supply in the forward pass,
how many features will it have?
"""
__constants__ = ['interpolation_mode', 'num_downsample', 'use_conv_downsample', 'relu_pred_layers',
'lat_layers', 'pred_layers', 'downsample_layers', 'relu_downsample_layers']
def __init__(self, cfg, in_channels):
super().__init__()
self.lat_layers = nn.ModuleList([
nn.Conv2d(x, cfg.fpn.num_features, kernel_size=1)
for x in reversed(in_channels)
])
# This is here for backwards compatability
padding = 1 if cfg.fpn.pad else 0
self.pred_layers = nn.ModuleList([
nn.Conv2d(cfg.fpn.num_features, cfg.fpn.num_features, kernel_size=3, padding=padding)
for _ in in_channels
])
if cfg.fpn.use_conv_downsample:
self.downsample_layers = nn.ModuleList([
nn.Conv2d(cfg.fpn.num_features, cfg.fpn.num_features, kernel_size=3, padding=1, stride=2)
for _ in range(cfg.fpn.num_downsample)
])
self.interpolation_mode = cfg.fpn.interpolation_mode
self.num_downsample = cfg.fpn.num_downsample
self.use_conv_downsample = cfg.fpn.use_conv_downsample
self.relu_downsample_layers = cfg.fpn.relu_downsample_layers
self.relu_pred_layers = cfg.fpn.relu_pred_layers
@script_method_wrapper
def forward(self, convouts:List[torch.Tensor]):
"""
Args:
- convouts (list): A list of convouts for the corresponding layers in in_channels.
Returns:
- A list of FPN convouts in the same order as x with extra downsample layers if requested.
"""
out = []
x = torch.zeros(1, device=convouts[0].device)
for i in range(len(convouts)):
out.append(x)
# For backward compatability, the conv layers are stored in reverse but the input and output is
# given in the correct order. Thus, use j=-i-1 for the input and output and i for the conv layers.
j = len(convouts)
for lat_layer in self.lat_layers:
j -= 1
if j < len(convouts) - 1:
_, _, h, w = convouts[j].size()
x = F.interpolate(x, size=(h, w), mode=self.interpolation_mode, align_corners=False)
x = x + lat_layer(convouts[j])
out[j] = x
# This janky second loop is here because TorchScript.
j = len(convouts)
for pred_layer in self.pred_layers:
j -= 1
out[j] = pred_layer(out[j])
if self.relu_pred_layers:
F.relu(out[j], inplace=True)
cur_idx = len(out)
# In the original paper, this takes care of P6
if self.use_conv_downsample:
for downsample_layer in self.downsample_layers:
out.append(downsample_layer(out[-1]))
else:
for idx in range(self.num_downsample):
# Note: this is an untested alternative to out.append(out[-1][:, :, ::2, ::2]). Thanks TorchScript.
out.append(nn.functional.max_pool2d(out[-1], 1, stride=2))
if self.relu_downsample_layers:
for idx in range(len(out) - cur_idx):
out[idx] = F.relu(out[idx + cur_idx], inplace=False)
return out
class DVIS(nn.Module):
"""
████████║ ██ ██ ██████████ █████████
██ █║ █ █ ║██║ █║
██ █║ █ █ ║██║ █████████
██ █║ █ █ ║██║ ║██
███████║ █████ ██████████ █████████
You can set the arguments by changing them in the backbone config object in config.py.
Parameters (in cfg.backbone):
- selected_layers: The indices of the conv layers to use for prediction.
"""
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
self.backbone = construct_backbone(cfg.backbone, cfg.net_in_channels)
if cfg.freeze_bn:
self.freeze_bn()
# Compute mask_dim here and add it back to the config. Make sure DVIS's constructor is called early!
if cfg.fpn is not None:
in_channels = cfg.fpn.num_features
else:
in_channels = self.backbone.channels[0]
src_channels = self.backbone.channels
self.selected_layers = cfg.backbone.selected_layers
if cfg.fpn is not None:
# Some hacky rewiring to accomodate the FPN
self.fpn = FPN(cfg, [src_channels[i] for i in self.selected_layers])
self.selected_layers = list(range(len(self.selected_layers) + cfg.fpn.num_downsample))
src_channels = [cfg.fpn.num_features] * len(self.selected_layers)
# The include_last_relu=false here is because we might want to change it to another function
self.proto_net, cfg.mask_dim = make_net(in_channels, cfg.mask_proto_net, include_last_relu=False)
# the mask branch output concatenate to the backbone features for classification
if cfg.classify_en:
self.classifyModule_SC(fea_layers=[in_channels])
return
def classifyModule_SC(self, fea_layers=[256]):
candidate_params = [{'mf_sradius': self.cfg.mf_spatial_radius[k],
'mf_rradius': self.cfg.mf_range_radius[k],
'mf_num_keep': self.cfg.mf_num_keep[k],
'mf_size_thr': self.cfg.mf_size_thr[k]
} for k in range(len(self.cfg.mf_spatial_radius))]
self.discritizers = []
for cand_param in candidate_params:
self.discritizers.append(Discritizer(cand_param))
num_classes = self.cfg.num_fg_classes+1
self.refine_net = RefineNet(self.cfg.roi_size, fea_layers, num_classes)
return
def forward_classify_SC(self, mask_logits, net_fea):
''''
@Param: mask_logits -- instance mask logits, in size [bs, 1, ht, wd]
net_fea -- backbone feature, in size [bs', ch', ht', wd']
@Output:
labelI -- list (len= # of mf bandwidth) of list (# batch size),
for each element is a onehot label tensor in size [N, ht, wd]
obj_bboxes -- list of tensor object boxes with size [N, 7],
(bs_idx, x0, y0, x1, y1,label_idx, real_label)
boxes coordinate depends on pred_label
cls_logits -- classify_logits with size [N, num_classes]
iou_scores -- iou score with size [N, 1]
obj_masks -- object mask with size [N, 1, msize, msize]
'''
labelImgs, obj_bboxes = [], []
for discritizer in self.discritizers:
dsc_out = discritizer(mask_logits)
labelImgs.append(dsc_out['mask'])
obj_bboxes.append(dsc_out['bboxes'])
rois = torch.cat(obj_bboxes, dim=0)
rfn_out = self.refine_net(mask_logits, net_fea, rois)
return {'labelI': labelImgs,
'obj_bboxes': obj_bboxes,
'cls_logits': rfn_out['cls'],
'iou_scores': rfn_out['iou'],
'obj_masks': rfn_out['mask']}
def save_weights(self, path):
""" Saves the model's weights using compression because the file sizes were getting too big. """
torch.save(self.state_dict(), path)
def load_weights(self, path, load_firstLayer=True, load_lastLayer=True, load_clsLayer=True):
""" Loads weights from a compressed save file. """
map_device = torch.device(0) if torch.cuda.is_available() else 'cpu'
state_dict = torch.load(path, map_location=map_device)
# For backward compatability, remove these (the new variable is called layers)
for key in list(state_dict.keys()):
if key.startswith('backbone.layer') and not key.startswith('backbone.layers'):
del state_dict[key]
if (not load_firstLayer) and \
(key.startswith('backbone.layers.0.0.conv1') or key.startswith('backbone.conv1')):
del state_dict[key]
if (not load_lastLayer) and key.startswith('proto_net.10'):
del state_dict[key]
if (not load_clsLayer) and key.startswith('refine'):
del state_dict[key]
# Also for backward compatibility with v1.0 weights, do this check
if key.startswith('fpn.downsample_layers.'):
if self.cfg.fpn is not None and \
int(key.split('.')[2]) >= self.cfg.fpn.num_downsample:
del state_dict[key]
self.load_state_dict(state_dict, strict=False)
def init_weights(self, backbone_path):
""" Initialize weights for training. """
# Initialize the backbone with the pretrained weights.
parent_path = os.path.dirname(backbone_path)
if not os.path.exists(parent_path):
os.makedirs(parent_path)
if not os.path.exists(backbone_path):
model_zoo.load_url('https://download.pytorch.org/models/resnet50-19c8e357.pth', model_dir=parent_path)
self.backbone.init_backbone(backbone_path)
conv_constants = getattr(nn.Conv2d(1, 1, 1), '__constants__')
# Quick lambda to test if one list contains the other
def all_in(x, y):
for _x in x:
if _x not in y:
return False
return True
# Initialize the rest of the conv layers with xavier
for name, module in self.named_modules():
# See issue #127 for why we need such a complicated condition if the module is a WeakScriptModuleProxy
# Broke in 1.3 (see issue #175), WeakScriptModuleProxy was turned into just ScriptModule.
# Broke in 1.4 (see issue #292), where RecursiveScriptModule is the new star of the show.
# Note that this might break with future pytorch updates, so let me know if it does
is_script_conv = False
if 'Script' in type(module).__name__:
# 1.4 workaround: now there's an original_name member so just use that
if hasattr(module, 'original_name'):
is_script_conv = 'Conv' in module.original_name
# 1.3 workaround: check if this has the same constants as a conv module
else:
is_script_conv = (
all_in(module.__dict__['_constants_set'], conv_constants)
and all_in(conv_constants, module.__dict__['_constants_set']))
is_conv_layer = isinstance(module, nn.Conv2d) or is_script_conv
is_linear_layer = isinstance(module, nn.Linear)
if (is_conv_layer or is_linear_layer) and module not in self.backbone.backbone_modules:
nn.init.xavier_uniform_(module.weight.data)
if module.bias is not None:
module.bias.data.zero_()
def train(self, mode=True):
super().train(mode)
if self.cfg.freeze_bn:
self.freeze_bn()
def freeze_bn(self, enable=False):
""" Adapted from https://discuss.pytorch.org/t/how-to-train-with-frozen-batchnorm/12106/8 """
for module in self.modules():
if isinstance(module, nn.BatchNorm2d):
module.train() if enable else module.eval()
module.weight.requires_grad = enable
module.bias.requires_grad = enable
def forward(self, x):
""" The input should be of size [batch_size, 3, img_h, img_w] """
""" output: proto -- in shape [bs, ch, ht, wd],
fea -- list of features in different size: [[bs, ch, ht, wd], ...]
"""
bs, _, img_h, img_w = x.size()
self.cfg._tmp_img_h = img_h
self.cfg._tmp_img_w = img_w
with timer.env('backbone'):
outs = self.backbone(x)
if self.cfg.fpn is not None:
with timer.env('fpn'):
# Use backbone.selected_layers
outs = [outs[i] for i in self.cfg.backbone.selected_layers]
fpn_outs = self.fpn(outs)
else:
fpn_outs = outs
with timer.env('proto'):
proto_out = self.proto_net(fpn_outs[0])
# base return components
ret_dict = {'proto': proto_out, 'fea':outs, 'cls_logits': None}
# if enable classify
if self.cfg.classify_en:
with timer.env('classify'):
ret_dict['cls_logits'] = self.forward_classify_SC(proto_out, fpn_outs[0])
return ret_dict
# Some testing code
if __name__ == '__main__':
from utils.functions import init_console
init_console()
# Use the first argument to set the config if you want
import sys
from data.config import cfg
if len(sys.argv) > 1:
from data.config import set_cfg
set_cfg(sys.argv[1])
net = Yolact()
net.train()
net.init_weights(backbone_path='weights/' + cfg.backbone.path)
# GPU
if torch.cuda.is_available():
net = net.cuda()
torch.set_default_tensor_type('torch.cuda.FloatTensor')
x = torch.zeros((1, 3, cfg.max_size, cfg.max_size))
y = net(x)
for p in net.prediction_layers:
print(p.last_conv_size)
print()
for k, a in y.items():
print(k + ': ', a.size(), torch.sum(a))
exit()
net(x)
# timer.disable('pass2')
avg = MovingAverage()
try:
while True:
timer.reset()
with timer.env('everything else'):
net(x)
avg.add(timer.total_time())
print('\033[2J') # Moves console cursor to 0,0
timer.print_stats()
print('Avg fps: %.2f\tAvg ms: %.2f ' % (1/avg.get_avg(), avg.get_avg()*1000))
except KeyboardInterrupt:
pass
| [
"torch.zeros",
"torch.device",
"torch.cat",
"torch.set_default_tensor_type",
"torch.nn.functional.interpolate",
"torch.utils.model_zoo.load_url",
"torch.nn.init.xavier_uniform_",
"torch.cuda.device_count",
"torch.cuda.current_device",
"torch.nn.Conv2d",
"torch.cuda.is_available",
"torch.load",
"torch.nn.functional.relu",
"torch.nn.functional.max_pool2d",
"torch.sum"
] | 1.7.1 | DerekRay/2020-instanceSeg | a08ad95e64726db53cc32a5f90aaa13ae3cdb6a3 |
1.4 | import torch
import numpy as np
from misc.replay_memory import ReplayMemory
def collect_trajectory(agents, actors, env, args):
"""Collect batch of trajectories
Args:
agents (list): Contains agents that act in the environment
actors (list): Contains parameters that agents use to select action
env (gym): OpenAI Gym environment that agents execute actions
args (argparse): Python argparse that contains arguments
Returns:
memory (ReplayMemory): Class that includes trajectories
scores (list): Contains scores for each agent
"""
# Initialize memory
memory = ReplayMemory(args)
# Initialize LSTM state
for agent in agents:
agent.reset_lstm_state()
# Begin to collect trajectory
obs = env.reset()
scores = [0. for _ in range(args.n_agent)]
for timestep in range(args.ep_horizon):
# Get actions
actions, logprobs, entropies, values = [], [], [], []
for agent, actor in zip(agents, actors):
action, logprob, entropy, value = agent.act(obs, actor)
actions.append(action)
logprobs.append(logprob)
entropies.append(entropy)
values.append(value)
# Take step in the environment
next_obs, rewards, _, _ = env.step(actions)
# Add to memory
memory.add(
obs=obs,
logprobs=logprobs,
entropies=entropies,
values=values,
rewards=rewards)
# Update scores
for i_agent in range(args.n_agent):
if isinstance(rewards, list):
reward = np.mean(rewards[i_agent]) / float(args.ep_horizon)
else:
reward = np.mean(rewards[:, i_agent]) / float(args.ep_horizon)
scores[i_agent] += reward
# For next timestep
obs = next_obs
return memory, scores
def get_return(reward, args):
"""Compute episodic return given trajectory
Args:
reward (list): Contains rewards across trajectories for specific agent
args (argparse): Python argparse that contains arguments
Returns:
return_ (torch.Tensor): Episodic return with shape: (batch, ep_horizon)
"""
reward = torch.stack(reward, dim=1)
assert reward.shape == (args.traj_batch_size, args.ep_horizon), \
"Shape must be: (batch, ep_horizon)"
R, return_ = 0., []
for timestep in reversed(range(args.ep_horizon)):
R = reward[:, timestep] + args.discount * R
return_.insert(0, R)
return_ = torch.stack(return_, dim=1)
return return_
def get_entropy_loss(memory, args, i_agent):
"""Compute entropy loss for exploration
Args:
memory (ReplayMemory): Class that includes trajectories
args (argparse): Python argparse that contains arguments
i_agent (int): Index of agent to compute entropy loss
Returns:
entropy_loss (torch.Tensor): Entropy loss for encouraging exploration
"""
_, _, entropies, _, _ = memory.sample()
entropy = torch.stack(entropies[i_agent], dim=1)
assert entropy.shape == (args.traj_batch_size, args.ep_horizon), \
"Shape must be: (batch, ep_horizon)"
entropy_loss = -args.entropy_weight * torch.mean(torch.sum(entropy, dim=1))
return entropy_loss
def get_gae(value, reward, args, is_normalize=False, eps=1e-8):
"""Compute generalized advantage estimator
Args:
value (list): Contains value function across trajectories
reward (list): Contains rewards across trajectories for specific agent
args (argparse): Python argparse that contains arguments
is_normalize (bool): Normalize baseline if flag is True. Default: False
eps (float): Epsilon for numerical stability. Default: 1e-8
Returns:
GAE (torch.Tensor): Estimated generalized advantage function
References:
https://github.com/dgriff777/rl_a3c_pytorch/blob/master/train.py
"""
value = torch.stack(value, dim=1)
assert value.shape == (args.traj_batch_size, args.ep_horizon), \
"Shape must be: (batch, ep_horizon)"
value = torch.cat((value, torch.zeros(value.shape[0], 1)), dim=1)
reward = torch.stack(reward, dim=1)
assert reward.shape == (args.traj_batch_size, args.ep_horizon), \
"Shape must be: (batch, ep_horizon)"
gae, advantage = 0., []
for timestep in reversed(range(args.ep_horizon)):
delta = (reward[:, timestep] + args.discount * value[:, timestep + 1]) - value[:, timestep]
gae = gae * args.discount * args.lambda_ + delta
advantage.insert(0, gae)
advantage = torch.stack(advantage, dim=1)
assert reward.shape == advantage.shape
if is_normalize:
advantage = advantage - torch.mean(advantage)
std = torch.sqrt(torch.mean(advantage ** 2))
advantage.div_(std + eps)
return advantage
| [
"torch.zeros",
"torch.stack",
"torch.mean",
"torch.sum"
] | 1.4.0 | dkkim93/meta-mapg | 98d4d1be3f5fd628b42e451bed99004f02f553d0 |
1.5 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import argparse
import datetime
import json
import random
import time
from pathlib import Path
import numpy as np
import torch
from torch.utils.data import DataLoader, DistributedSampler
import datasets
import util.misc as utils
from datasets import build_dataset, get_coco_api_from_dataset
from engine import evaluate, train_one_epoch
from models import build_model
def get_args_parser():
parser = argparse.ArgumentParser('Set transformer detector', add_help=False)
parser.add_argument('--lr', default=1e-4, type=float)
parser.add_argument('--lr_backbone', default=1e-5, type=float)
parser.add_argument('--batch_size', default=2, type=int)
parser.add_argument('--weight_decay', default=1e-4, type=float)
parser.add_argument('--epochs', default=300, type=int)
parser.add_argument('--lr_drop', default=200, type=int)
parser.add_argument('--clip_max_norm', default=0.1, type=float,
help='gradient clipping max norm')
# Model parameters
parser.add_argument('--frozen_weights', type=str, default=None,
help="Path to the pretrained model. If set, only the mask head will be trained")
parser.add_argument('--train_only_head', action='store_true',
help="Train only classification head for the detection model")
# * Backbone
parser.add_argument('--backbone', default='resnet50', type=str,
help="Name of the convolutional backbone to use")
parser.add_argument('--dilation', action='store_true',
help="If true, we replace stride with dilation in the last convolutional block (DC5)")
parser.add_argument('--position_embedding', default='sine', type=str, choices=('sine', 'learned'),
help="Type of positional embedding to use on top of the image features")
# * Transformer
parser.add_argument('--enc_layers', default=6, type=int,
help="Number of encoding layers in the transformer")
parser.add_argument('--dec_layers', default=6, type=int,
help="Number of decoding layers in the transformer")
parser.add_argument('--dim_feedforward', default=2048, type=int,
help="Intermediate size of the feedforward layers in the transformer blocks")
parser.add_argument('--hidden_dim', default=256, type=int,
help="Size of the embeddings (dimension of the transformer)")
parser.add_argument('--dropout', default=0.1, type=float,
help="Dropout applied in the transformer")
parser.add_argument('--nheads', default=8, type=int,
help="Number of attention heads inside the transformer's attentions")
parser.add_argument('--num_queries', default=100, type=int,
help="Number of query slots")
parser.add_argument('--pre_norm', action='store_true')
# * Segmentation
parser.add_argument('--masks', action='store_true',
help="Train segmentation head if the flag is provided")
# Loss
parser.add_argument('--no_aux_loss', dest='aux_loss', action='store_false',
help="Disables auxiliary decoding losses (loss at each layer)")
# * Matcher
parser.add_argument('--set_cost_class', default=1, type=float,
help="Class coefficient in the matching cost")
parser.add_argument('--set_cost_bbox', default=5, type=float,
help="L1 box coefficient in the matching cost")
parser.add_argument('--set_cost_giou', default=2, type=float,
help="giou box coefficient in the matching cost")
# * Loss coefficients
parser.add_argument('--mask_loss_coef', default=1, type=float)
parser.add_argument('--dice_loss_coef', default=1, type=float)
parser.add_argument('--bbox_loss_coef', default=5, type=float)
parser.add_argument('--giou_loss_coef', default=2, type=float)
parser.add_argument('--eos_coef', default=0.1, type=float,
help="Relative classification weight of the no-object class")
# dataset parameters
parser.add_argument('--dataset_file', default='coco')
parser.add_argument('--coco_path', type=str)
parser.add_argument('--coco_panoptic_path', type=str)
parser.add_argument('--remove_difficult', action='store_true')
parser.add_argument('--output_dir', default='',
help='path where to save, empty for no saving')
parser.add_argument('--device', default='cuda',
help='device to use for training / testing')
parser.add_argument('--seed', default=42, type=int)
parser.add_argument('--resume', default='', help='resume from checkpoint')
parser.add_argument('--start_epoch', default=0, type=int, metavar='N',
help='start epoch')
parser.add_argument('--eval', action='store_true')
parser.add_argument('--num_workers', default=2, type=int)
# distributed training parameters
parser.add_argument('--world_size', default=1, type=int,
help='number of distributed processes')
parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training')
return parser
def main(args):
utils.init_distributed_mode(args)
print("git:\n {}\n".format(utils.get_sha()))
if args.frozen_weights is not None:
assert args.masks, "Frozen training is meant for segmentation only"
if args.train_only_head:
assert not args.masks, "Head training is only available for detection"
print(args)
device = torch.device(args.device)
# fix the seed for reproducibility
seed = args.seed + utils.get_rank()
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
model, criterion, postprocessors = build_model(args)
model.to(device)
model_without_ddp = model
if args.distributed:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
model_without_ddp = model.module
n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)
print('number of params:', n_parameters)
param_dicts = [
{"params": [p for n, p in model_without_ddp.named_parameters() if "backbone" not in n and p.requires_grad]},
{
"params": [p for n, p in model_without_ddp.named_parameters() if "backbone" in n and p.requires_grad],
"lr": args.lr_backbone,
},
]
optimizer = torch.optim.AdamW(param_dicts, lr=args.lr,
weight_decay=args.weight_decay)
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, args.lr_drop)
dataset_train = build_dataset(image_set='train', args=args)
dataset_val = build_dataset(image_set='val', args=args)
if args.distributed:
sampler_train = DistributedSampler(dataset_train)
sampler_val = DistributedSampler(dataset_val, shuffle=False)
else:
sampler_train = torch.utils.data.RandomSampler(dataset_train)
sampler_val = torch.utils.data.SequentialSampler(dataset_val)
batch_sampler_train = torch.utils.data.BatchSampler(
sampler_train, args.batch_size, drop_last=True)
data_loader_train = DataLoader(dataset_train, batch_sampler=batch_sampler_train,
collate_fn=utils.collate_fn, num_workers=args.num_workers)
data_loader_val = DataLoader(dataset_val, args.batch_size, sampler=sampler_val,
drop_last=False, collate_fn=utils.collate_fn, num_workers=args.num_workers)
if args.dataset_file == "coco_panoptic":
# We also evaluate AP during panoptic training, on original coco DS
coco_val = datasets.coco.build("val", args)
base_ds = get_coco_api_from_dataset(coco_val)
else:
base_ds = get_coco_api_from_dataset(dataset_val)
if args.frozen_weights is not None:
checkpoint = torch.load(args.frozen_weights, map_location='cpu')
model_without_ddp.detr.load_state_dict(checkpoint['model'])
output_dir = Path(args.output_dir)
if args.resume:
if args.resume.startswith('https'):
checkpoint = torch.hub.load_state_dict_from_url(
args.resume, map_location='cpu', check_hash=True)
else:
checkpoint = torch.load(args.resume, map_location='cpu')
model_without_ddp.load_state_dict(checkpoint['model'], strict=False)
if not args.eval and 'optimizer' in checkpoint and 'lr_scheduler' in checkpoint and 'epoch' in checkpoint:
optimizer.load_state_dict(checkpoint['optimizer'])
lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])
args.start_epoch = checkpoint['epoch'] + 1
if args.eval:
test_stats, coco_evaluator = evaluate(model, criterion, postprocessors,
data_loader_val, base_ds, device, args.output_dir)
if args.output_dir:
utils.save_on_master(coco_evaluator.coco_eval["bbox"].eval, output_dir / "eval.pth")
return
if args.train_only_head:
for param in model_without_ddp.parameters():
param.requires_grad = False
model_without_ddp.class_embed.weight.requires_grad = True
model_without_ddp.class_embed.bias.requires_grad = True
print("Start training")
start_time = time.time()
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
sampler_train.set_epoch(epoch)
train_stats = train_one_epoch(
model, criterion, data_loader_train, optimizer, device, epoch,
args.clip_max_norm)
lr_scheduler.step()
if args.output_dir:
checkpoint_paths = [output_dir / 'checkpoint.pth']
# extra checkpoint before LR drop and every 100 epochs
if (epoch + 1) % args.lr_drop == 0 or (epoch + 1) % 100 == 0:
checkpoint_paths.append(output_dir / f'checkpoint{epoch:04}.pth')
for checkpoint_path in checkpoint_paths:
utils.save_on_master({
'model': model_without_ddp.state_dict(),
'optimizer': optimizer.state_dict(),
'lr_scheduler': lr_scheduler.state_dict(),
'epoch': epoch,
'args': args,
}, checkpoint_path)
test_stats, coco_evaluator = evaluate(
model, criterion, postprocessors, data_loader_val, base_ds, device, args.output_dir
)
log_stats = {**{f'train_{k}': v for k, v in train_stats.items()},
**{f'test_{k}': v for k, v in test_stats.items()},
'epoch': epoch,
'n_parameters': n_parameters}
if args.output_dir and utils.is_main_process():
with (output_dir / "log.txt").open("a") as f:
f.write(json.dumps(log_stats) + "\n")
# for evaluation logs
if coco_evaluator is not None:
(output_dir / 'eval').mkdir(exist_ok=True)
if "bbox" in coco_evaluator.coco_eval:
filenames = ['latest.pth']
if epoch % 50 == 0:
filenames.append(f'{epoch:03}.pth')
for name in filenames:
torch.save(coco_evaluator.coco_eval["bbox"].eval,
output_dir / "eval" / name)
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('Training time {}'.format(total_time_str))
if __name__ == '__main__':
parser = argparse.ArgumentParser('DETR training and evaluation script', parents=[get_args_parser()])
args = parser.parse_args()
if args.output_dir:
Path(args.output_dir).mkdir(parents=True, exist_ok=True)
main(args)
| [
"torch.device",
"torch.optim.AdamW",
"torch.optim.lr_scheduler.StepLR",
"torch.utils.data.RandomSampler",
"torch.utils.data.DistributedSampler",
"torch.save",
"torch.utils.data.SequentialSampler",
"torch.nn.parallel.DistributedDataParallel",
"torch.manual_seed",
"torch.utils.data.BatchSampler",
"torch.utils.data.DataLoader",
"torch.load",
"torch.hub.load_state_dict_from_url"
] | 1.5.0 | cadgip/detr | a032506f145d0f5e7951e002b3100b30de6a7d58 |
1.7 | import torch
import gym
import random
import numpy as np
torch.backends.cudnn.deterministic=True
class Environment:
def __init__(self, render=False, seed=None):
self.render = render
self.env_seed = seed
def set_seed(self):
if self.env_seed is not None:
self.env.seed(self.env_seed)
self.env.action_space.seed(self.env_seed)
torch.manual_seed(0)
torch.cuda.manual_seed(0)
torch.random.manual_seed(0)
np.random.seed(0)
random.seed(0)
def simulate(self, N, policy=None, verbose=False):
states_n = []
actions_n = []
rewards_n = []
tot_reward = 0
for episode in range(N):
if verbose:
print("episode {} of {}\n".format(episode+1, N))
states = []
actions = []
rewards = []
done = False
observation = self.env.reset()
while not done:
if self.render:
self.env.render()
states.append(observation.tolist())
if policy==None:
action = self.env.action_space.sample()
action = np.asarray(action)
else:
policy.distribution(torch.tensor([observation], dtype=torch.float32))
action = policy.sample()[0].numpy()
observation, reward, done, _ = self.env.step(action)
tot_reward += reward
rewards.append(reward)
actions.append(action.tolist())
states_n.append(states)
actions_n.append(actions)
rewards_n.append(rewards)
tot_reward = tot_reward/N
self.env.close()
return {"states": states_n, "actions": actions_n, "rewards": rewards_n}
#CartPole
class CartPole(Environment):
def __init__(self, render=False, seed=None):
super().__init__(render, seed)
self.env = gym.make('CartPole-v1')
self.state_space = ("Continuous", 4)
self.action_space = ("Discrete", 2, [0,1])
#Pendulum
class Pendulum(Environment):
def __init__(self, render=False, seed=None):
super().__init__(render, seed)
self.env = gym.make('Pendulum-v0')
self.state_space = ("Continuous", 3)
self.action_space = ("Continuous", 1)
#MountainCar
class MountainCarContinous(Environment):
def __init__(self, render=False, seed=None):
super().__init__(render, seed)
self.env = gym.make('MountainCarContinuous-v0')
self.state_space = ("Continuous", 2)
self.action_space = ("Continuous", 1)
#MountainCar-Discrete
class MountainCar(Environment):
def __init__(self, render=False, seed=None):
super().__init__(render, seed)
self.env = gym.make('MountainCar-v0')
self.state_space = ("Continuous", 2)
self.action_space = ("Discrete", 3, [0,1,2])
#Acrobot-Discrete
class Acrobot(Environment):
def __init__(self, render=False, seed=None):
super().__init__(render, seed)
self.env = gym.make('Acrobot-v1')
self.state_space = ("Continuous", 6)
self.action_space = ("Discrete", 3, [0,1,2])
#BipedalWalker
class BipedalWalker(Environment):
def __init__(self, render=False, seed=None):
super().__init__(render, seed)
self.env = gym.make('BipedalWalker-v3')
self.state_space = ("Continuous", 24)
self.action_space = ("Continuous", 4)
#LunarLanderContinuous
class LunarLanderContinuous(Environment):
def __init__(self, render=False, seed=None):
super().__init__(render, seed)
self.env = gym.make('LunarLanderContinuous-v2')
self.state_space = ("Continuous", 8)
self.action_space = ("Continuous", 2)
#LunarLander
class LunarLander(Environment):
def __init__(self, render=False, seed=None):
super().__init__(render, seed)
self.env = gym.make('LunarLander-v2')
self.state_space = ("Continuous", 8)
self.action_space = ("Discrete", 4, [0, 1, 2, 3])
if __name__ == "__main__":
#cartpole
cartpole = CartPole(render=True)
cartpole.simulate(6)
#pendulum
pendulum = Pendulum(render=True)
pendulum.flag_terminal_reward = True
pendulum.simulate(3)
#mountaincar
mountaincar = MountainCar(render=True)
mountaincar.simulate(3)
#acrobot
acrobot = Acrobot(render=True)
acrobot.simulate(3)
#bipedalwalker
bipedalwalker = BipedalWalker(render=True)
bipedalwalker.simulate(3)
#lunarlander
lunarlander = LunarLander(render=True)
lunarlander.simulate(3) | [
"torch.cuda.manual_seed",
"torch.manual_seed",
"torch.random.manual_seed",
"torch.tensor"
] | 1.7.0 | gargiani/VRPG | 429fe58b089df2f4cdedab01b05564230e2317ac |
1.8 | #!/usr/bin/env python3
import io
import logging
import multiprocessing
import os
import time
import imghdr
from pathlib import Path
from typing import Union
import cv2
import torch
import numpy as np
from loguru import logger
from lama_cleaner.model_manager import ModelManager
from lama_cleaner.schema import Config
try:
torch._C._jit_override_can_fuse_on_cpu(False)
torch._C._jit_override_can_fuse_on_gpu(False)
torch._C._jit_set_texpr_fuser_enabled(False)
torch._C._jit_set_nvfuser_enabled(False)
except:
pass
from flask import Flask, request, send_file, cli
# Disable ability for Flask to display warning about using a development server in a production environment.
# https://gist.github.com/jerblack/735b9953ba1ab6234abb43174210d356
cli.show_server_banner = lambda *_: None
from flask_cors import CORS
from lama_cleaner.helper import (
load_img,
numpy_to_bytes,
resize_max_size,
)
NUM_THREADS = str(multiprocessing.cpu_count())
os.environ["OMP_NUM_THREADS"] = NUM_THREADS
os.environ["OPENBLAS_NUM_THREADS"] = NUM_THREADS
os.environ["MKL_NUM_THREADS"] = NUM_THREADS
os.environ["VECLIB_MAXIMUM_THREADS"] = NUM_THREADS
os.environ["NUMEXPR_NUM_THREADS"] = NUM_THREADS
if os.environ.get("CACHE_DIR"):
os.environ["TORCH_HOME"] = os.environ["CACHE_DIR"]
BUILD_DIR = os.environ.get("LAMA_CLEANER_BUILD_DIR", "app/build")
class NoFlaskwebgui(logging.Filter):
def filter(self, record):
return "GET //flaskwebgui-keep-server-alive" not in record.getMessage()
logging.getLogger("werkzeug").addFilter(NoFlaskwebgui())
app = Flask(__name__, static_folder=os.path.join(BUILD_DIR, "static"))
app.config["JSON_AS_ASCII"] = False
CORS(app, expose_headers=["Content-Disposition"])
model: ModelManager = None
device = None
input_image_path: str = None
def get_image_ext(img_bytes):
w = imghdr.what("", img_bytes)
if w is None:
w = "jpeg"
return w
@app.route("/inpaint", methods=["POST"])
def process():
input = request.files
# RGB
origin_image_bytes = input["image"].read()
image, alpha_channel = load_img(origin_image_bytes)
original_shape = image.shape
interpolation = cv2.INTER_CUBIC
form = request.form
size_limit: Union[int, str] = form.get("sizeLimit", "1080")
if size_limit == "Original":
size_limit = max(image.shape)
else:
size_limit = int(size_limit)
config = Config(
ldm_steps=form["ldmSteps"],
ldm_sampler=form["ldmSampler"],
hd_strategy=form["hdStrategy"],
hd_strategy_crop_margin=form["hdStrategyCropMargin"],
hd_strategy_crop_trigger_size=form["hdStrategyCropTrigerSize"],
hd_strategy_resize_limit=form["hdStrategyResizeLimit"],
)
logger.info(f"Origin image shape: {original_shape}")
image = resize_max_size(image, size_limit=size_limit, interpolation=interpolation)
logger.info(f"Resized image shape: {image.shape}")
mask, _ = load_img(input["mask"].read(), gray=True)
mask = resize_max_size(mask, size_limit=size_limit, interpolation=interpolation)
start = time.time()
res_np_img = model(image, mask, config)
logger.info(f"process time: {(time.time() - start) * 1000}ms")
torch.cuda.empty_cache()
if alpha_channel is not None:
if alpha_channel.shape[:2] != res_np_img.shape[:2]:
alpha_channel = cv2.resize(
alpha_channel, dsize=(res_np_img.shape[1], res_np_img.shape[0])
)
res_np_img = np.concatenate(
(res_np_img, alpha_channel[:, :, np.newaxis]), axis=-1
)
ext = get_image_ext(origin_image_bytes)
return send_file(
io.BytesIO(numpy_to_bytes(res_np_img, ext)),
mimetype=f"image/{ext}",
)
@app.route("/model")
def current_model():
return model.name, 200
@app.route("/model_downloaded/<name>")
def model_downloaded(name):
return str(model.is_downloaded(name)), 200
@app.route("/model", methods=["POST"])
def switch_model():
new_name = request.form.get("name")
if new_name == model.name:
return "Same model", 200
try:
model.switch(new_name)
except NotImplementedError:
return f"{new_name} not implemented", 403
return f"ok, switch to {new_name}", 200
@app.route("/")
def index():
return send_file(os.path.join(BUILD_DIR, "index.html"))
@app.route("/inputimage")
def set_input_photo():
if input_image_path:
with open(input_image_path, "rb") as f:
image_in_bytes = f.read()
return send_file(
input_image_path,
as_attachment=True,
attachment_filename=Path(input_image_path).name,
mimetype=f"image/{get_image_ext(image_in_bytes)}",
)
else:
return "No Input Image"
def main(args):
global model
global device
global input_image_path
device = torch.device(args.device)
input_image_path = args.input
model = ModelManager(name=args.model, device=device)
if args.gui:
app_width, app_height = args.gui_size
from flaskwebgui import FlaskUI
ui = FlaskUI(
app, width=app_width, height=app_height, host=args.host, port=args.port
)
ui.run()
else:
app.run(host=args.host, port=args.port, debug=args.debug)
| [
"torch.device",
"torch._C._jit_override_can_fuse_on_gpu",
"torch.cuda.empty_cache",
"torch._C._jit_set_nvfuser_enabled",
"torch._C._jit_set_texpr_fuser_enabled",
"torch._C._jit_override_can_fuse_on_cpu"
] | 1.8.2 | Sanster/lama-cleaner | 20acc58435639d6438a962e392ca1cf89b9755f6 |
1.5 | import os
import re
import json
import math
import logging
import pickle
import random
import importlib
import functools
import itertools
from collections import defaultdict
from typing import List, Dict, Optional
import numpy as np
import networkx as nx
import matplotlib as mpl
mpl.use("Agg")
import matplotlib.pyplot as plt # noqa: E402
from loguru import logger # noqa: E402
from transformers import BertTokenizer # noqa: E402
from torch.optim.optimizer import Optimizer # noqa: E402
from torch.optim.lr_scheduler import LambdaLR # noqa: E402
mpl.rcParams["font.sans-serif"] = ["SimHei"] # 指定默认字体
mpl.rcParams["axes.unicode_minus"] = False # 解决保存图像是负号'-'显示为方块的问题
mpl.rcParams["axes.titlesize"] = 20
EPS = 1e-10
def get_cosine_schedule_with_warmup(
optimizer: Optimizer,
num_warmup_steps: int,
num_training_steps: int,
num_cycles: float = 0.5,
last_epoch: int = -1,
):
"""
Create a schedule with a learning rate that decreases following the values of the cosine function between the
initial lr set in the optimizer to 0, after a warmup period during which it increases linearly between 0 and the
initial lr set in the optimizer.
Args:
optimizer (:class:`~torch.optim.Optimizer`):
The optimizer for which to schedule the learning rate.
num_warmup_steps (:obj:`int`):
The number of steps for the warmup phase.
num_training_steps (:obj:`int`):
The total number of training steps.
num_cycles (:obj:`float`, `optional`, defaults to 0.5):
The number of waves in the cosine schedule (the defaults is to just decrease from the max value to 0
following a half-cosine).
last_epoch (:obj:`int`, `optional`, defaults to -1):
The index of the last epoch when resuming training.
Return:
:obj:`torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.
"""
def lr_lambda(current_step):
if current_step < num_warmup_steps:
return float(current_step) / float(max(1, num_warmup_steps))
progress = float(current_step - num_warmup_steps) / float(
max(1, num_training_steps - num_warmup_steps)
)
return max(
0.0, 0.5 * (1.0 + math.cos(math.pi * float(num_cycles) * 2.0 * progress))
)
return LambdaLR(optimizer, lr_lambda, last_epoch)
def default_load_json(json_file_path, encoding="utf-8", **kwargs):
with open(json_file_path, "r", encoding=encoding) as fin:
tmp_json = json.load(fin, **kwargs)
return tmp_json
def default_dump_json(
obj, json_file_path, encoding="utf-8", ensure_ascii=False, indent=2, **kwargs
):
with open(json_file_path, "w", encoding=encoding) as fout:
json.dump(obj, fout, ensure_ascii=ensure_ascii, indent=indent, **kwargs)
def default_load_pkl(pkl_file_path, **kwargs):
with open(pkl_file_path, "rb") as fin:
obj = pickle.load(fin, **kwargs)
return obj
def default_dump_pkl(obj, pkl_file_path, **kwargs):
with open(pkl_file_path, "wb") as fout:
pickle.dump(obj, fout, **kwargs)
def set_basic_log_config():
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=logging.INFO,
)
class BertTokenizerForDocEE(BertTokenizer):
"""Customized tokenizer"""
def __init__(
self,
vocab_file,
doc_lang="zh",
do_lower_case=True,
do_basic_tokenize=True,
never_split=None,
unk_token="[UNK]",
sep_token="[SEP]",
pad_token="[PAD]",
cls_token="[CLS]",
mask_token="[MASK]",
tokenize_chinese_chars=True,
strip_accents=None,
**kwargs,
):
super().__init__(
vocab_file,
do_lower_case=do_lower_case,
do_basic_tokenize=do_basic_tokenize,
never_split=never_split,
unk_token=unk_token,
sep_token=sep_token,
pad_token=pad_token,
cls_token=cls_token,
mask_token=mask_token,
tokenize_chinese_chars=tokenize_chinese_chars,
strip_accents=strip_accents,
**kwargs,
)
self.doc_lang = doc_lang
if self.doc_lang == "zh":
self.dee_tokenize = self.dee_char_tokenize
elif self.doc_lang == "en":
self.dee_tokenize = self.dee_space_tokenize
def dee_space_tokenize(self, text):
"""perform space tokenization"""
tokens = text.split()
out_tokens = []
for token in tokens:
if token in self.vocab:
out_tokens.append(token)
else:
out_tokens.append(self.unk_token)
return out_tokens
def dee_wordpiece_tokenize(self, text):
"""perform wordpiece tokenization"""
tokens = text.split()
out_tokens = []
for token in tokens:
pieces = self.tokenize(token)
if len(pieces) < 1:
pieces = [self.unk_token]
out_tokens += pieces
return out_tokens
def dee_char_tokenize(self, text):
"""perform pure character-based tokenization"""
tokens = list(text)
out_tokens = []
for token in tokens:
if token in self.vocab:
out_tokens.append(token)
else:
out_tokens.append(self.unk_token)
return out_tokens
def recursive_print_grad_fn(grad_fn, prefix="", depth=0, max_depth=50):
if depth > max_depth:
return
logger.info(prefix, depth, grad_fn.__class__.__name__)
if hasattr(grad_fn, "next_functions"):
for nf in grad_fn.next_functions:
ngfn = nf[0]
recursive_print_grad_fn(
ngfn, prefix=prefix + " ", depth=depth + 1, max_depth=max_depth
)
def strtobool(str_val):
"""Convert a string representation of truth to true (1) or false (0).
True values are 'y', 'yes', 't', 'true', 'on', and '1'; false values
are 'n', 'no', 'f', 'false', 'off', and '0'. Raises ValueError if
'val' is anything else.
"""
str_val = str_val.lower()
if str_val in ("y", "yes", "t", "true", "on", "1"):
return True
elif str_val in ("n", "no", "f", "false", "off", "0"):
return False
else:
raise ValueError("invalid truth value %r" % (str_val,))
def plot_graph_from_adj_mat(adj_mat, directory=".", title="No Title"):
fig = plt.figure(figsize=(16, 9), dpi=350)
adj_mat = np.array(adj_mat)
np.fill_diagonal(adj_mat, 0)
rows, cols = np.where(adj_mat == 1)
edges = zip(rows.tolist(), cols.tolist())
G = nx.Graph()
G.add_edges_from(edges)
options = {
"font_size": 36,
"node_size": 3000,
"node_color": "white",
"edgecolors": "black",
"linewidths": 5,
"width": 5,
}
nx.draw_networkx(G, **options)
# Set margins for the axes so that nodes aren't clipped
ax = plt.gca()
ax.margins(0.20)
plt.axis("off")
path = os.path.join(directory, f"{title}.png")
fig.savefig(path, format="png")
plt.close()
def extract_combinations_from_event_objs(event_objs):
combinations = set()
for events in event_objs:
if events is not None:
for instance in events:
combination = set()
for arg in instance:
if arg is not None:
combination.add(arg)
if len(combination) > 0:
combinations.add(tuple(sorted(list(combination))))
return combinations
def extract_instances_from_event_objs(event_objs):
"""has a role type in the final combination compared with `extract_combinations_from_event_objs`"""
instances = set()
for events in event_objs:
if events is not None:
for instance in events:
combination = set()
for role, arg in enumerate(instance):
if arg is not None:
combination.add((arg, role))
if len(combination) > 0:
instances.add(tuple(sorted(list(combination))))
return instances
def remove_combination_roles(combinations):
ret_combs = set()
for comb in combinations:
if isinstance(comb[0], int):
ret_combs.add(comb)
continue
new_comb = set()
for arg_role in comb:
if len(arg_role) == 2:
arg, _ = arg_role
else:
arg = arg_role
if arg is not None:
new_comb.add(arg)
new_comb = sorted(list(new_comb))
ret_combs.add(tuple(new_comb))
return ret_combs
def contain_role_type(event_instance):
return any(arg is not None and isinstance(arg, tuple) for arg in event_instance)
def remove_event_obj_roles(event_objs_list, event_type_fields_pairs):
result_event_arg_idxs_objs_list = []
for event_idx, events in enumerate(event_objs_list):
if events is None:
result_event_arg_idxs_objs_list.append(None)
continue
tmp_events = []
for event in events:
# if the event_arg_idxs_objs_list has already been fixed, then pass
if not contain_role_type(event):
tmp_events.append(event)
continue
tmp_span_idxs = [
None for _ in range(len(event_type_fields_pairs[event_idx][1]))
]
for span, field in event:
tmp_span_idxs[field] = span
tmp_events.append(tuple(tmp_span_idxs))
result_event_arg_idxs_objs_list.append(tmp_events)
return result_event_arg_idxs_objs_list
def negative_sampling(gold_combinations, len_spans):
negative_combination = set(list(range(len_spans)))
for gc in gold_combinations:
negative_combination = negative_combination - set(gc)
if len(negative_combination) > 0:
return tuple(sorted(list(negative_combination)))
else:
return None
def random_sampling(
whole_arg_rel_mat, len_spans, min_num_span=2, num_samp=5, max_samp_times=20
):
"""
random sampling part of the whole combination graph
Returns:
[[combination (List), adj matrix (List[List])], [...], ...]
"""
ret_pairs = []
combinations = []
for _ in range(max_samp_times):
if len(combinations) >= num_samp:
break
tmp_combination = []
for i in range(len_spans):
if random.random() >= 0.5:
tmp_combination.append(i)
if len(tmp_combination) >= min_num_span:
combinations.append(tmp_combination)
for combination in combinations[:num_samp]:
adj_mat = whole_arg_rel_mat.get_sub_graph_adj_mat(combination)
ret_pairs.append([combination, adj_mat])
return ret_pairs
def fill_diag(mat, num):
for i in range(len(mat)):
mat[i][i] = num
return mat
def fold_and(mat):
r"""
mat[j, i] = mat[i, j] = 1 iff mat[i, j] == mat[j, i] == 1
"""
new_mat = [[0] * len(mat[0]) for _ in range(len(mat))]
for i in range(len(mat)):
for j in range(len(mat[0])):
if mat[i][j] == mat[j][i] == 1:
new_mat[i][j] = new_mat[j][i] = 1 # qy: 只记录下双向的箭头
return new_mat
def left_tril(mat):
new_mat = np.array(mat)
new_mat = np.tril(new_mat, k=-1)
return new_mat.tolist()
def tril_fold_or(mat):
new_mat = np.array(mat)
new_mat = np.tril(new_mat, k=-1)
new_mat = np.bitwise_or(new_mat, new_mat.T)
return new_mat.tolist()
def assign_role_from_gold_to_comb(comb, gold_comb):
r"""
pass the roles in gold combination to pred combination
role will be `None` if there's no such mapping
Returns:
[(0, {1, 2}), (1, None)]
"""
span_idx2roles = defaultdict(set)
for span_idx, role in gold_comb:
span_idx2roles[span_idx].add(role)
new_comb = []
for span_idx in comb:
new_comb.append((span_idx, span_idx2roles.get(span_idx, None)))
return new_comb
def reveal_event_obj_from_comb_instance(comb_instance, num_fields):
r"""
from role-assgined comb to event obj
"""
ret_results = [None] * num_fields
for span_idx, roles in comb_instance:
if roles is None:
continue
for role_idx in roles:
ret_results[role_idx] = span_idx
return ret_results
def closest_match(comb, gold_combs):
r"""
get the closest combination with intersection match
Returns:
combination
similarity: 0 if there's no matched span
"""
idx2match = []
for gold_comb_idx, gold_comb in enumerate(gold_combs):
num_match = 0
if isinstance(gold_comb[0], tuple):
num_match = len(set(comb) & set(span_idx[0] for span_idx in gold_comb))
else:
num_match = len(set(comb) & set(span_idx for span_idx in gold_comb))
idx2match.append((gold_comb_idx, num_match))
idx2match.sort(key=lambda x: x[1], reverse=True)
return gold_combs[idx2match[0][0]], idx2match[0][1]
def recover_ins(event_type_fields_list, convert_ids_to_tokens_func, record_mat):
inses = []
for event_idx, events in enumerate(record_mat):
if events is not None:
for ins in events:
tmp_ins = {
"EventType": event_type_fields_list[event_idx][0],
"Arguments": {
event_type_fields_list[event_idx][1][field_idx]: "".join(
convert_ids_to_tokens_func(arg)
)
if arg is not None
else None
for field_idx, arg in enumerate(ins)
},
}
inses.append(tmp_ins)
return inses
def convert_role_fea_event_obj_to_standard(event_type_fields_list, event_objs):
new_event_objs = []
for event_idx, events in enumerate(event_objs):
if events is None:
new_event_objs.append(None)
continue
num_fields = len(event_type_fields_list[event_idx][1])
new_inses = []
for ins in events:
tmp_ins = [None for _ in range(num_fields)]
for arg in ins:
span_idx, field_idx = arg
tmp_ins[field_idx] = span_idx
new_inses.append(tmp_ins)
new_event_objs.append(new_inses)
return new_event_objs
def list_models():
models = dir(importlib.import_module("dee.models"))
models = list(filter(lambda x: x[0].upper() == x[0] and x[0] != "_", models))
return models
def merge_non_conflicting_ins_objs(instances, min_coo=1):
final = []
final_merged = []
merged = set()
for ins1, ins2 in itertools.combinations(instances, 2):
mergable_values = []
coo = 0
for field1, field2 in zip(ins1, ins2):
if field1 is None or field2 is None:
mergable_values.append(True)
continue
if field1 == field2:
coo += 1
mergable_values.append(True)
continue
mergable_values.append(False)
if all(mergable_values) and coo >= min_coo:
# mergable
new_obj = []
for field1, field2 in zip(ins1, ins2):
merged_field = None
if field1 is None:
merged_field = field2
elif field2 is None:
merged_field = field1
else:
# or field2 (here, field1 == field2)
merged_field = field1
new_obj.append(merged_field)
final_merged.append(new_obj)
merged.add(tuple(ins1))
merged.add(tuple(ins2))
for ins in instances:
if tuple(ins) not in merged:
final.append(ins)
return final + final_merged
def list_flatten(lst):
total_list = []
len_mapping = []
for idx, elements in enumerate(lst):
len_mapping += [[idx, i] for i in range(len(elements))]
total_list += elements
return total_list, len_mapping
class RegexEntExtractor(object):
def __init__(self) -> None:
self.field2type = {
# shares
"TotalHoldingShares": "share",
"TotalPledgedShares": "share",
"PledgedShares": "share",
"FrozeShares": "share",
"RepurchasedShares": "share",
"TradedShares": "share",
"LaterHoldingShares": "share",
# ratio
"TotalHoldingRatio": "ratio",
# date
"StartDate": "date",
"ReleasedDate": "date",
"EndDate": "date",
"ClosingDate": "date",
"UnfrozeDate": "date",
# money
"RepurchaseAmount": "money",
"HighestTradingPrice": "money",
"LowestTradingPrice": "money",
"AveragePrice": "money",
# Duee-fin
# shares
"质押股票/股份数量": "share",
"回购股份数量": "share",
"交易股票/股份数量": "share",
# ratio
"质押物占总股比": "ratio",
"质押物占持股比": "ratio",
"占公司总股本比例": "ratio",
"增持部分占总股本比例": "ratio",
"增持部分占所持比例": "ratio",
"减持部分占总股本比例": "ratio",
"减持部分占所持比例": "ratio",
# date
"披露时间": "date",
"披露日期": "date",
"中标日期": "date",
"事件时间": "date",
"回购完成时间": "date",
"被约谈时间": "date",
"收购完成时间": "date",
"交易完成时间": "date",
"破产时间": "date",
# money
"每股交易价格": "money",
"交易金额": "money",
"募资金额": "money",
"发行价格": "money",
"市值": "money",
"融资金额": "money",
"净亏损": "money",
# ccks
# shares
# ratio
# date
"案发时间": "date",
"裁定时间": "date",
"公告时间": "date",
"减持开始日期": "date",
"质押结束日期": "date",
"质押开始日期": "date",
"增持开始日期": "date",
"冻结结束日期": "date",
"冻结开始日期": "date",
"死亡/失联时间": "date",
# money
"资损金额": "money",
"损失金额": "money",
"减持金额": "money",
"质押金额": "money",
"增持金额": "money",
"冻结金额": "money",
"赔付金额": "money",
}
self.field_id2field_name = {}
self.basic_type_id = None # id of `O` label
self.type2func = {
"share": self.extract_share,
"ratio": self.extract_ratio,
"date": self.extract_date,
"money": self.extract_money,
}
@classmethod
def _extract(cls, regex, text, group=0):
results = []
matches = re.finditer(regex, text)
for match in matches:
results.append([match.group(group), match.span(group)])
return results
@classmethod
def extract_share(cls, text):
regex = r"(\d+股)[^票]"
results = cls._extract(regex, text, group=1)
return results
@classmethod
def extract_ratio(cls, text):
regex = r"\d+(\.\d+)?%"
results = cls._extract(regex, text)
return results
@classmethod
def extract_date(cls, text):
regex = r"\d{4}年\d{1,2}月\d{1,2}日"
results = cls._extract(regex, text)
return results
@classmethod
def extract_money(cls, text):
regex = r"\d+(\.\d+)?元"
results = cls._extract(regex, text)
return results
def extract(self, text):
r"""
extract ents from one sentence
Returns:
{
"ratio": [[ent, (start pos, end pos)], ...],
...
}
"""
field2results = defaultdict(list)
for field, func in self.type2func.items():
results = func(text)
if len(results) > 0:
field2results[field].extend(results)
return field2results
def extract_doc(
self, doc: List[str], exclude_ents: Optional[List[str]] = []
) -> Dict[str, List]:
r"""
extract ents from the whole document (multiple lines)
Returns:
{
"ratio": [[ent, (sentence idx, start pos, end pos)], ...],
...
}
"""
field2results = defaultdict(list)
for sent_idx, line in enumerate(doc):
results = self.extract(line)
for field, fr in results.items(): # qy: filed有四种share ratio money date
for match_text, match_span in fr:
if match_text not in exclude_ents:
field2results[field].append(
[match_text, [sent_idx, match_span[0], match_span[1]]]
)
return field2results
regex_extractor = RegexEntExtractor() # qy:正则匹配
def chain_prod(num_list: List):
return functools.reduce(lambda x, y: x * y, num_list)
| [
"torch.optim.lr_scheduler.LambdaLR"
] | 1.5.1 | qinyan-li/DocEE | e8d2202a44907df5f12f9a67180d849a54421ab7 |
1.5 | # pip install pytorch-lightning
# pip install neptune-client
#%%
from __future__ import print_function
from utils.hessian_penalty.hessian_penalty_pytorch import hessian_penalty
from pytorch_lightning.loggers import WandbLogger
from pytorch_lightning.callbacks.progress import ProgressBar
from sklearn.model_selection import train_test_split
from pytorch_lightning.callbacks.early_stopping import EarlyStopping
from collections import defaultdict
import torch
import torch.utils.data
from torch import nn, optim
from torch.nn import functional as F
import math
import pytorch_lightning as pl
# import utils.plot_utils as utils
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pickle
import wandb
import time
import os
from utils.dsprites.datasets import get_dataloaders
from utils import run_utils, data_utils, utils, metric_utils, settings, disentangle_utils, latent_space_utils # ,morpho_utils
import utils.morphomnist.io as morpho_io
#ToDo EDA:
# - Long Tail graphics
# - Remove user who had less than a threshold of seen items
# - Create Markdown with EDA results
#ToDo input_params:
# Parameter that should be tweakable by invoking the routine:
# - epochs
# - learning_rate
# - batch_size
# - simplified_rating
#ToDo metrics:
# Add https://towardsdatascience.com/evaluation-metrics-for-recommender-systems-df56c6611093
from torchvision import datasets, transforms
seed = 42
torch.manual_seed(seed)
class VAE(pl.LightningModule):
def __init__(self, conf:dict, *args, **kwargs):
super().__init__()
# self.kwargs = kwargs
self.save_hyperparameters(conf)
self.ls_predicted_movies = []
self.is_hessian_penalty_activated = self.hparams["is_hessian_penalty_activated"]
self.used_data = self.hparams["used_data"]
self.expanded_user_item = self.hparams["expanded_user_item"]
self.generative_factors = self.hparams["generative_factors"]
self.mixup = self.hparams["mixup"]
self.np_synthetic_data = self.hparams["synthetic_data"]
self.ls_syn_y = self.hparams["syn_y"]
self.experiment_path_train = conf["experiment_path"]
self.experiment_path_test = self.experiment_path_train
self.beta = self.hparams["beta"]
self.avg_mce = 0.0
self.train_dataset = None
self.test_dataset = None
self.test_size = self.hparams["test_size"]
# self.test_size = 0.005
self.no_latent_factors = self.hparams["latent_dim"]
self.max_unique_movies = 0
self.unique_movies =0
self.np_user_item = None
self.small_dataset = self.hparams["small_dataset"]
self.simplified_rating = self.hparams["simplified_rating"]
self.max_epochs = self.hparams["max_epochs"]
self.dct_index2itemId = None
self.test_y_bin = None
self.df_movies_z_combined =None
# if(self.np_synthetic_data is None):
# self.load_dataset() #additionaly assigns self.unique_movies and self.np_user_item
# self.df_movies = pd.read_csv('../data/generated/df_movies_cleaned3.csv')
# self.dct_attribute_distribution = utils.load_json_as_dict(
# 'attribute_distribution.json') # load relative frequency distributioon from dictionary (pickle it)
#
# else:
# self.train_dataset, self.test_dataset = train_test_split(self.np_synthetic_data, test_size=self.test_size, random_state=42)
# self.train_y, self.test_y = train_test_split(self.ls_syn_y, test_size=self.test_size, random_state=42)
# self.test_y_bin = np.asarray(pd.get_dummies(pd.DataFrame(data=self.test_y)))
# self.unique_movies = self.np_synthetic_data.shape[1]
# self.df_movies = pd.read_csv('../data/generated/syn.csv')
# self.dct_attribute_distribution = utils.load_json_as_dict(
# 'syn_attribute_distribution.json') # load relative frequency distributioon from dictionary (pickle it)
self.bs = 100
#nn.Linear layer creates a linear function (¸x + b), with its parameters initialized
self.input_dimension = int(self.unique_movies *math.pow(4, self.generative_factors)) if self.expanded_user_item == True else self.unique_movies
if(self.used_data=='morpho'):
self.input_dimension = 28*28
elif(self.used_data=='dsprites'):
self.input_dimension = 64 * 64
self.fc11 = nn.Linear(in_features=self.input_dimension, out_features=1200) #input
self.fc12 = nn.Linear(in_features=1200, out_features=1200) #input
self.fc13 = nn.Linear(in_features=1200, out_features=1200) #input
self.encoder = nn.Sequential(self.fc11, nn.ReLU(),
self.fc12, nn.ReLU(),
self.fc13#, nn.ReLU(),
)
self.fc21 = nn.Linear(in_features=1200, out_features=self.no_latent_factors) #encoder mean
self.fc22 = nn.Linear(in_features=1200, out_features=self.no_latent_factors) #encoder variance
self.fc31 = nn.Linear(in_features=self.no_latent_factors, out_features=1200)
self.fc32 = nn.Linear(in_features=1200, out_features=1200)
self.fc33 = nn.Linear(in_features=1200, out_features=1200)
self.fc331 = nn.Linear(in_features=1200, out_features=1200)
self.fc34 = nn.Linear(in_features=1200, out_features=self.input_dimension)
self.decoder = nn.Sequential(self.fc31, nn.Tanh(),
self.fc32, nn.Tanh(),
self.fc33, nn.Tanh(),
self.fc331, nn.Tanh(),
self.fc34)
self.KLD = None
self.ls_kld = []
self.dis_KLD = None
self.z = None
self.kld_matrix = np.empty((0, self.no_latent_factors))
self.np_z_test = np.empty((0, self.no_latent_factors))#self.test_dataset.shape[0]
self.np_mu_test = np.empty((0, self.no_latent_factors))
self.np_logvar_test = np.empty((0, self.no_latent_factors))
self.np_z_train = np.empty((0, self.no_latent_factors)) # self.test_dataset.shape[0]
self.np_mu_train = np.empty((0, self.no_latent_factors))
self.np_logvar_train = np.empty((0, self.no_latent_factors))
self.sigmoid_annealing_threshold = self.hparams['sigmoid_annealing_threshold']
self.mce_batch_train = None
self.mce_batch_test = None
self.z_mean_train = []
self.z_min_train = []
self.z_max_train = []
# Initialize weights
self.encoder.apply(run_utils.weight_init)
self.decoder.apply(run_utils.weight_init)
self.batch_size =512
# np_user_item, ls_y = sklearn.utils.shuffle(np_user_item, ls_y)
if (self.used_data == 'dsprites'):
self.batch_size =4096
loader = get_dataloaders('dsprites', batch_size=512, shuffle=False)
dsprites_data = loader.dataset.imgs#[:5000]
self.dsprites_lat_names = loader.dataset.lat_names
dsprites_gen_fac_values = loader.dataset.lat_values#[:5000]
# self.train_dataset, self.test_dataset = train_test_split(dsprites_data, test_size=self.test_size, shuffle=False,random_state=42)
#dsprites_data[:int(dsprites_data.shape[0]*0.1)]
self.train_dataset, self.test_dataset = train_test_split(dsprites_data,
test_size=self.test_size, shuffle=True,random_state=42)
self.train_y, self.test_y = train_test_split(dsprites_gen_fac_values,
test_size=self.test_size, shuffle=True,random_state=42)
def encode(self, x):
h1 = F.relu(self.encoder(x))
return self.fc21(h1), self.fc22(h1)
def reparameterize(self, mu, logvar):
std = torch.exp(0.5 * logvar)
eps = torch.randn_like(std)
return mu + eps * std
def decode(self, z):
# return torch.bernoulli(self.decoder(z))
return torch.bernoulli(torch.sigmoid(self.decoder(z)))
def compute_z(self, mu, logvar):
z = self.reparameterize(mu, logvar)
return z
def sample(self, mu, log_var):
std = torch.exp(log_var / 2)
p = torch.distributions.Normal(torch.zeros_like(mu), torch.ones_like(std))
q = torch.distributions.Normal(mu, std)
z = q.rsample()
return p, q, z
def forward(self, x, **kwargs):
if(kwargs):
z = kwargs['z']
mu = kwargs['mu']
logvar = kwargs['logvar']
p = None
q = None
else:
# print(x.view(-1, self.unique_movies)[0])
# print(x[0])
mu, logvar = self.encode(x) #40960/512 (Batchsize) results in 512,80
# z = self.compute_z(mu, logvar)
p, q, z = self.sample(mu, logvar)
self.z = z
return self.decode(z), mu, logvar, p, q
def train_dataloader(self):
# MNIST Dataset
if(self.used_data == 'morpho'):
# MORPHO_MNIST_FILE_TRAIN_Y = "/Users/d069735/workspace/Study/decoding-latent-space-rs/data/morpho-mnist/global/train-pert-idx1-ubyte.gz"
MORPHO_MNIST_FILE_TRAIN_Y = "/models/_output_dir/pm-pert-idx1-ubyte.gz"
MORPHO_MNIST_FILE_TRAIN_X = "/models/_output_dir/pm-images-idx3-ubyte.gz"
self.train_dataset = morpho_io.load_idx(MORPHO_MNIST_FILE_TRAIN_X)[:59000]
self.train_y = morpho_io.load_idx(MORPHO_MNIST_FILE_TRAIN_Y)[:59000]
elif(self.used_data == 'dsprites'):
# train_loader = get_dataloaders('dsprites', batch_size=512, shuffle=False)
train_loader = torch.utils.data.DataLoader(dataset=self.train_dataset, num_workers=0,batch_size=self.batch_size, shuffle=True)
return train_loader
#regular mnist
else:
self.train_dataset = datasets.MNIST(root='../data/mnist_data/', train=True, transform=transforms.ToTensor(), download=True)
self.train_y = self.train_dataset.targets.tolist()
train_loader = torch.utils.data.DataLoader(dataset=self.train_dataset, batch_size=self.batch_size, shuffle=False)
return train_loader
def test_dataloader(self):
#bar = datasets.MNIST(root='../data/mnist_data/', train=False, transform=transforms.ToTensor(),download=False)
if(self.used_data == 'morpho'):
# MORPHO_MNIST_FILE_TEST_Y = "/Users/d069735/workspace/Study/decoding-latent-space-rs/data/morpho-mnist/global/t10k-pert-idx1-ubyte.gz"
MORPHO_MNIST_FILE_TEST_Y = "/models/_output_dir/pm-pert-idx1-ubyte.gz"
# MORPHO_MNIST_FILE_TEST_X = "/Users/d069735/workspace/Study/decoding-latent-space-rs/data/morpho-mnist/global/t10k-images-idx3-ubyte.gz"
MORPHO_MNIST_FILE_TEST_X = "/models/_output_dir/pm-images-idx3-ubyte.gz"
self.test_dataset = morpho_io.load_idx(MORPHO_MNIST_FILE_TEST_X)[59000:60000]
self.test_y = morpho_io.load_idx(MORPHO_MNIST_FILE_TEST_Y)[59000:60000]
elif(self.used_data == 'dsprites'):
# self.test_dataset =
test_loader = torch.utils.data.DataLoader(dataset=self.test_dataset, batch_size=int(self.batch_size/2), shuffle=False)
return test_loader
#regular mnist
else:
self.test_y = self.test_dataset.targets.tolist()
test_loader = torch.utils.data.DataLoader(dataset=self.test_dataset, batch_size=self.bs, shuffle=False)
return test_loader
def configure_optimizers(self):
optimizer = optim.Adagrad(self.parameters(), lr=1e-2) #Adam
# criterion = nn.MSELoss() # mean-squared error loss
# scheduler = StepLR(optimizer, step_size=1)
return optimizer#, scheduler
def collect_z_values(self, ts_mu_chunk, ts_logvar_chunk):#, ls_y
start = time.time()
ls_grad_z = self.compute_z(ts_mu_chunk, ts_logvar_chunk)
self.np_z_train = np.append(self.np_z_train, np.asarray(ls_grad_z.tolist()),
axis=0) # TODO Describe in thesis that I get back a grad object instead of a pure tensor as it is in the test method since we are in the training method.
self.np_mu_train = np.append(self.np_mu_train, np.asarray(ts_mu_chunk.tolist()), axis=0)
self.np_logvar_train = np.append(self.np_logvar_train, np.asarray(ts_logvar_chunk.tolist()), axis=0)
# print('Shape np_z_train: {}'.format(self.np_z_train.shape))
z_mean = self.np_z_train.mean(axis=0)
z_min = self.np_z_train.min(axis=0)
z_max = self.np_z_train.max(axis=0)
if(len(self.z_mean_train) == 0):
self.z_mean_train = z_mean
self.z_min_train = z_min
self.z_max_train = z_max
else:
self.z_mean_train = (z_mean + self.z_mean_train) / 2
self.z_max_train = np.amax(np.vstack((self.z_max_train, z_max)), axis=0) #Stack old and new together and find the max
self.z_min_train = np.amin(np.vstack((self.z_min_train, z_min)), axis=0)
# print('collect_z_values in seconds: {}'.format(time.time() - start))
def average_mce_batch(self, mce_batch, mce_mini_batch):
if (mce_batch == None):
mce_batch = mce_mini_batch
else:
for key_lf, mce_lf in mce_batch.items():
for key, val in mce_lf.items():
new_val = mce_mini_batch[key_lf].get(key)
if(new_val):
mce_batch[key_lf][key] = (new_val + val)/2
return mce_batch
#taken from https://github.com/facebookresearch/mixup-cifar10
def mixup_data(self, x, y, alpha=1.0, use_cuda=True):
'''Returns mixed inputs, pairs of targets, and lambda'''
if alpha > 0:
lam = np.random.beta(alpha, alpha)
else:
lam = 1
batch_size = x.size()[0]
if use_cuda:
index = torch.randperm(batch_size).cuda()
else:
index = torch.randperm(batch_size)
mixed_x = lam * x + (1 - lam) * x[index, :]
y_a, y_b = y, y[index]
return mixed_x, y_a, y_b, lam
def training_step(self, batch, batch_idx):
mce_minibatch=None
# print('train step')
# (data, _) = batch
if(self.used_data=='morpho'):
ts_batch_user_features = batch.view(-1, 784).float() / 255. #.unsqueeze(1)
elif(self.used_data=='dsprites'):
ts_batch_user_features = batch.view(-1, 64*64).float() / 255. #.unsqueeze(1)
recon_batch, ts_mu_chunk, ts_logvar_chunk, p, q = self.forward(ts_batch_user_features) # sample data
if(self.current_epoch == self.max_epochs-1):
# print("Last round..")
self.collect_z_values(ts_mu_chunk, ts_logvar_chunk)#, ls_preference
if (self.current_epoch == self.sigmoid_annealing_threshold ):
self.collect_z_values(ts_mu_chunk, ts_logvar_chunk)
# mce_minibatch = metric_utils.mce_batch(self, ts_batch_user_features, self.dct_index2itemId, k=3)
# self.mce_batch_train = self.average_mce_batch(self.mce_batch_train, mce_minibatch)
batch_mse, batch_kld = self.loss_function(recon_batch.view(-1, 64*64),
ts_batch_user_features, #ts_batch_user_features,
ts_mu_chunk,
ts_logvar_chunk,
self.beta,
self.unique_movies,
p,
q,
new_kld_function = False)
hp_loss =0
batch_mse = batch_mse / len(ts_batch_user_features)
if(self.is_hessian_penalty_activated and self.current_epoch > int(1/3*self.max_epochs-1)):
print('<---- Applying Hessian Penalty ---->')
np_z = self.compute_z(ts_mu_chunk, ts_logvar_chunk)
hp_loss = hessian_penalty(G=self.decode, z=np_z)
print('Hessian Penalty:{}'.format(hp_loss))
batch_loss = hp_loss
else:
batch_loss = batch_mse + batch_kld
self.ls_kld.append(self.KLD.tolist())
#Additional logs go into tensorboard_logs
tensorboard_logs = {'train_loss': batch_loss,
'KLD-Train': batch_kld,
'MSE-Train': batch_mse} #
return {'loss': batch_loss,
'log': tensorboard_logs,
'logvar': np.asarray(ts_mu_chunk.tolist()).mean(axis=0),
'mu': np.asarray(ts_mu_chunk.tolist()).mean(axis=0)}
def training_epoch_end(self, outputs):
print("Saving MCE before KLD is applied...")
avg_logvar = np.array([x['logvar'] for x in outputs]).mean(axis=0)
# avg_var = np.array([x['var'] for x in outputs]).mean(axis=0)
avg_mu = np.array([x['mu'] for x in outputs]).mean(axis=0)
self.np_logvar_train = np.append(self.np_logvar_train, [avg_logvar], axis=0)
self.np_mu_train = np.append(self.np_mu_train, [avg_mu], axis=0)
if(self.current_epoch == self.sigmoid_annealing_threshold ):
utils.save_dict_as_json(self.mce_batch_train, 'mce_results_wo_kld.json', self.experiment_path_train)
return {}
# def validation_step(self, batch, batch_idx):
# return 0
def test_step(self, batch, batch_idx):
# print('test step')
batch_mce =0
test_loss = 0
# self.eval()
# ts_batch_user_features = batch.view(-1, self.input_dimension)
# (data, _) = batch
if (self.used_data == 'morpho'):
ts_batch_user_features = batch.view(-1, 784).float() / 255.
elif (self.used_data == 'dsprites'):
ts_batch_user_features = batch.view(-1, 64*64).float() / 255.
if (self.mixup):
ts_batch_user_features, y_a, y_b, lam = self.mixup_data(ts_batch_user_features, self.test_y_bin,
alpha=1.0, use_cuda=False)
recon_batch, ts_mu_chunk, ts_logvar_chunk, p, q = self.forward(ts_batch_user_features)
ls_z = self.compute_z(ts_mu_chunk, ts_logvar_chunk)
self.ls_predicted_movies.extend((-recon_batch).argsort()[:,0].tolist())
self.np_z_test = np.append(self.np_z_test, np.asarray(ls_z), axis=0) #TODO get rid of np_z_chunk and use np.asarray(mu_chunk)
self.np_mu_test = np.append(self.np_mu_test, np.asarray(ts_mu_chunk), axis =0)
self.np_logvar_test = np.append(self.np_logvar_test, np.asarray(ts_logvar_chunk), axis =0)
# self.np_z = np.vstack((self.np_z, np_z_chunk))
# batch_rmse_w_zeros, batch_mse_w_zeros, batch_rmse, batch_mse = self.calculate_batch_metrics(recon_batch=recon_batch, ts_batch_user_features =ts_batch_user_features)
batch_mse, kld = self.loss_function(recon_batch,
ts_batch_user_features, #ts_batch_user_features,
ts_mu_chunk,
ts_logvar_chunk,
self.beta,
self.unique_movies,
p,
q,
new_kld_function=False)
batch_loss = batch_mse + kld
# mce_minibatch = metric_utils.mce_batch(self, ts_batch_user_features, self.dct_index2itemId, k=3)
# self.mce_batch_test = self.average_mce_batch(self.mce_batch_test, mce_minibatch)
#to be rermoved mean_mce = { for single_mce in batch_mce}
loss = batch_loss.item() / len(ts_batch_user_features)
# bce = batch_bce/len(ts_batch_user_features)
tensorboard_logs = {'KLD-Test': kld,
'MSE-test': batch_mse}
return {'test_loss': loss,
# 'rmse': batch_rmse,
'mse': batch_mse,
# 'rmse_w_zeros': batch_rmse_w_zeros,
# 'mse_w_zeros': batch_mse_w_zeros,
'log':tensorboard_logs,
'KLD-Test': kld,
'MSE-Test': batch_mse
}
# test_loss /= len(test_loader.dataset)
# print('====> Test set loss: {:.4f}'.format(test_loss))
def test_epoch_end(self, outputs):
avg_loss = np.array([x['test_loss'] for x in outputs]).mean()
mse_test = np.array([x['MSE-Test'] for x in outputs])
kld_test =np.array([x['KLD-Test'] for x in outputs])
# ls_mce = {x['mce'] for x in outputs}
# utils.save_dict_as_json(self.mce_batch_test, 'mce_results.json', self.experiment_path_test)
# avg_mce = dict(utils.calculate_mean_of_ls_dict(ls_mce))
# avg_rmse = np.array([x['rmse'] for x in outputs]).mean()
# avg_rmse_w_zeros = np.array([x['rmse_w_zeros'] for x in outputs]).mean()
# avg_mse = np.array([x['mse'] for x in outputs]).mean()
# avg_mse_w_zeros = np.array([x['mse_w_zeros'] for x in outputs]).mean()
tensorboard_logs = {'test_loss': avg_loss, 'MSE-Test':mse_test,'KLD-Test': kld_test }
assert len(mse_test)==len(kld_test)
for i in range(0, len(mse_test)):
wandb_logger.log_metrics({'MSE-Test': mse_test[i],'KLD-Test': kld_test[i]} )
# wandb_logger.log_metrics({'rmse': avg_rmse,
# 'rmse_w_zeros':avg_rmse_w_zeros,
# 'mse': avg_mse,
# 'mse_w_zeros': avg_mse_w_zeros})#, 'kld_matrix':self.kld_matrix
return {'test_loss': avg_loss,
'log': tensorboard_logs,
# 'rmse': avg_rmse,
'MSE-Test':mse_test,
'KLD-test': kld_test }#, , 'mce':avg_mce
def sigmoid_annealing(self, beta, epoch):
stretch_factor = 0.5
if(epoch < self.sigmoid_annealing_threshold):
return 0
else:
kld_weight = beta/(1+ math.exp(-epoch * stretch_factor + self.sigmoid_annealing_threshold)) #epoch_threshold moves e function along the x-axis
return kld_weight
def kl_divergence(self,p, q):
return np.sum(np.where(p != 0, p * np.log(p / q), 0))
def new_kld_func(self, p, q):
log_qz = q.log_prob(self.z)
log_pz = p.log_prob(self.z)
kl = log_qz - log_pz
return kl
# Reconstruction + KL divergence losses summed over all elements and batch
def loss_function(self, recon_x, x, mu, logvar, beta, unique_movies, p, q, new_kld_function=False):
# MSE = F.binary_cross_entropy(recon_x, x,reduction='sum')
MSE = F.binary_cross_entropy(recon_x, torch.bernoulli(x),reduction='sum')
kld_latent_factors = torch.exp(logvar) + mu ** 2 - 1. - logvar
kld_mean = -0.5 * torch.mean(
torch.sum(-kld_latent_factors, dim=1))
self.kld_matrix = np.append(self.kld_matrix, np.asarray(kld_latent_factors.tolist()), axis=0)
if (self.training):
kld_weight = self.sigmoid_annealing(beta, self.current_epoch)
else:
kld_weight = beta
self.KLD = kld_mean * kld_weight
return MSE, self.KLD
def calculate_batch_metrics(self, recon_batch, ts_batch_user_features):
# Compute MSE
# TODO MOre generic ...
# mask = generate_mask(ts_batch_user_features, tsls_yhat_user, user_based_items_filter=loss_user_items_only)
# tsls_yhat_user_filtered = tsls_yhat_user[~mask] # Predicted: Filter out unseen+unrecommended items
# ts_user_features_seen = ts_batch_user_features[~mask] # Ground Truth: Filter out unseen+unrecommended items
# TODO ...than this approach
batch_rmse = 0
batch_mse = 0
batch_rmse_wo_zeros = 0
batch_mse_wo_zeros = 0
ls_yhat_user = recon_batch * ts_batch_user_features # Set all items to zero that are of no interest and haven't been seen
for idx, tensor in enumerate(ls_yhat_user):
np_y = ts_batch_user_features[idx].data.numpy()
np_y_wo_zeros = np_y[np.nonzero(np_y)] # inner returns the index
np_yhat = tensor.data.numpy()
np_yhat_wo_zeros = np_yhat[np.nonzero(np_y)] #This must be np_y
rmse, mse = metric_utils.calculate_metrics(np_y, np_yhat)
batch_mse += mse
batch_rmse += rmse
if(len(np_yhat_wo_zeros)>0):
rmse_wo_zeros, mse_wo_zeros = metric_utils.calculate_metrics(np_y_wo_zeros, np_yhat_wo_zeros)
batch_rmse_wo_zeros += rmse_wo_zeros
batch_mse_wo_zeros += mse_wo_zeros
# batch_rmse, batch_mse = utils.calculate_metrics(ts_batch_user_features,ls_yhat_user)
avg_rmse = batch_rmse / ls_yhat_user.shape[0]
avg_rmse_wo_zeros = batch_rmse_wo_zeros / ls_yhat_user.shape[0]
avg_mse = batch_mse / ls_yhat_user.shape[0]
avg_mse_wo_zeros = batch_mse_wo_zeros / ls_yhat_user.shape[0]
return avg_rmse, avg_mse, avg_rmse_wo_zeros, avg_mse_wo_zeros
def load_attributes_and_files(self, path): #'filename.pickle'
with open(path, 'rb') as handle:
dct_attributes = pickle.load(handle)
self.np_z_train = dct_attributes['np_z_train']
self.np_logvar_train = dct_attributes['np_logvar_train']
self.np_mu_train = dct_attributes['np_mu_train']
self.train_y = dct_attributes['train_y']
self.ls_kld = dct_attributes['ls_kld']
# self.dct_attribute_distribution = utils.load_json_as_dict(
# 'attribute_distribution.json') # load relative frequency distributioon from dictionary (pickle it)
self.z_max_train = dct_attributes['z_max_train']
self.z_min_train = dct_attributes['z_min_train']
self.z_mean_train = dct_attributes['z_mean_train']
print('Attributes loaded')
def save_attributes(self, path):
dct_attributes = {'np_z_train':self.np_z_train,
'np_logvar_train': self.np_logvar_train,
'np_mu_train': self.np_mu_train,
'train_y': self.train_y,
'ls_kld':self.ls_kld,
'z_max_train': self.z_max_train,
'z_min_train': self.z_min_train,
'z_mean_train': self.z_mean_train}
with open(path, 'wb') as handle:
pickle.dump(dct_attributes, handle)
print('Attributes saved')
def generate_distribution_df():
dct_attribute_distribution = utils.compute_relative_frequency(
pd.read_csv('../data/generated/syn.csv'))
utils.save_dict_as_json(dct_attribute_distribution, 'syn_attribute_distribution.json')
if __name__ == '__main__':
torch.manual_seed(100)
args = run_utils.create_training_args()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # use gpu if available
settings.init()
#%%
train = False
synthetic_data = True
expanded_user_item = False
mixup = False
is_hessian_penalty_activated = False
continous_data=False
normalvariate = False
# morpho = True
# used_data = "morpho"
used_data = "dsprites"
base_path = 'results/models/vae/'
ls_epochs = [70]#5 with new data, 70 was trained w/ old mnist
#Note: Mit steigender Epoche wird das disentanglement verstärkt
#
ls_latent_factors = [10]
ls_betas = [4] #disentangle_factors .0003
no_generative_factors = 3
for epoch in ls_epochs:
for lf in ls_latent_factors:
if(len(ls_betas)==0):
if(expanded_user_item):
beta_normalized = lf/(800)
else:
beta_normalized = lf / (20 * no_generative_factors) #lf/input_size, e.g. 2/10000 = 0.0002
ls_betas.append(beta_normalized)
for beta in ls_betas:
train_tag = "train"
if(not train):
train_tag = "test"
print("Processing model with: {} epochs, {} latent factors, {} beta".format(epoch, lf, beta))
exp_name = "dsprites_mse_{}_data_{}_beta_{}_epochs_{}_lf_synt_{}_normal_{}_continous_{}_hessian_{}".format(used_data, beta, epoch, lf, synthetic_data, normalvariate, continous_data, is_hessian_penalty_activated)
wandb_name = exp_name + "_" + train_tag
model_name = exp_name + ".ckpt"
attribute_name = exp_name + "_attributes.pickle"
model_path = base_path + model_name
attribute_path = base_path + attribute_name
experiment_path = utils.create_experiment_directory()
model_params = run_utils.create_model_params(experiment_path, epoch, lf, beta, int(epoch / 4), expanded_user_item, mixup,
no_generative_factors, epoch, is_hessian_penalty_activated, used_data)
args.max_epochs = epoch
wandb_logger = WandbLogger(project='recommender-xai', tags=['morpho', train_tag], name=wandb_name)
trainer = pl.Trainer.from_argparse_args(args,
logger=wandb_logger, #False
gpus=0,
weights_summary='full',
checkpoint_callback = False,
callbacks = [ProgressBar(), EarlyStopping(monitor='train_loss')]
)
if(train):
print('<---------------------------------- VAE Training ---------------------------------->')
print("Running with the following configuration: \n{}".format(args))
if (synthetic_data):
model_params['synthetic_data'], model_params['syn_y'] = data_utils.create_synthetic_data(no_generative_factors,
experiment_path,
expanded_user_item,
continous_data,
normalvariate,
False)
generate_distribution_df()
model = VAE(model_params)
wandb_logger.watch(model, log='gradients', log_freq=100)
# utils.print_nn_summary(model, size =200)
print('------ Start Training ------')
trainer.fit(model)
kld_matrix = model.KLD
# print('% altering has provided information gain:{}'.format(
# int(settings.ig_m_hat_cnt) / (int(settings.ig_m_cnt) + int(settings.ig_m_hat_cnt))))
# model.dis_KLD
print('------ Saving model ------')
trainer.save_checkpoint(model_path)
model.save_attributes(attribute_path)
print('------ Load model -------')
#Sample
# z = torch.randn(64, lf)
# sample = model.decode(z)
# save_image(sample.view(64, 1, 28, 28), './results/mnist_imgs/sample_morpho_20' + '.png')
# base_curated_test_path = "results/models/curated_vae/"
# model_path = base_curated_test_path + "4_beta_10_epochs_10_lf_synt_True.ckpt"
# model_path = base_curated_test_path + "4_beta_16_epochs_10_lf_synt_True.ckpt"
# model_path = base_curated_test_path + "4_beta_4_epochs_10_lf_synt_True.ckpt"
# model_path = base_curated_test_path + "4_beta_20_epochs_10_lf_synt_True.ckpt"
# attribute_path = base_curated_test_path + "4_beta_10_epochs_10_lf_synt_True_attributes.pickle"
# attribute_path = base_curated_test_path + "4_beta_16_epochs_10_lf_synt_True_attributes.pickle"
# attribute_path = base_curated_test_path + "4_beta_4_epochs_10_lf_synt_True_attributes.pickle"
# attribute_path = base_curated_test_path + "4_beta_20_epochs_10_lf_synt_True_attributes.pickle"
test_model = VAE.load_from_checkpoint(model_path)#, load_saved_attributes=True, saved_attributes_path='attributes.pickle'
# test_model.test_size = model_params['test_size']
test_model.load_attributes_and_files(attribute_path)
test_model.experiment_path_test = experiment_path
# print("show np_z_train mean:{}, min:{}, max:{}".format(z_mean_train, z_min_train, z_max_train ))
print('------ Start Test ------')
start = time.time()
trainer.test(test_model) #The test loop will not be used until you call.
# print('Test time in seconds: {}'.format(time.time() - start))
# print('% altering has provided information gain:{}'.format( int(settings.ig_m_hat_cnt)/(int(settings.ig_m_cnt)+int(settings.ig_m_hat_cnt) )))
# print(results)
dct_param ={'epochs':epoch, 'lf':lf,'beta':beta}
# plot_utils.plot_results(test_model,
# test_model.experiment_path_test,
# test_model.experiment_path_train,
# dct_param )
disentangle_utils.run_disentanglement_eval(test_model, experiment_path, dct_param)
artifact = wandb.Artifact('Plots', type='result')
artifact.add_dir(experiment_path)#, name='images'
wandb_logger.experiment.log_artifact(artifact)
working_directory = os.path.abspath(os.getcwd())
absolute_path = working_directory + "/" + experiment_path + "images/"
ls_path_images = [absolute_path + file_name for file_name in os.listdir(absolute_path)]
# wandb.log({"images": [wandb.Image(plt.imread(img_path)) for img_path in ls_path_images]})
dct_images = {img_path.split(sep='_')[2].split(sep='/')[-1]: wandb.Image(plt.imread(img_path)) for img_path in ls_path_images}
wandb.log(dct_images)
# wandb.log({"example_1": wandb.Image(...), "example_2",: wandb.Image(...)})
#TODO Bring back in
# neptune_logger.experiment.log_image('MCEs',"./results/images/mce_epochs_"+str(max_epochs)+".png")
# neptune_logger.experiment.log_artifact("./results/images/mce_epochs_"+str(max_epochs)+".png")
print('Test done')
exit()
| [
"torch.nn.Linear",
"torch.randperm",
"torch.cuda.is_available",
"torch.exp",
"torch.sum",
"torch.manual_seed",
"torch.randn_like",
"torch.utils.data.DataLoader",
"torch.zeros_like",
"torch.nn.Tanh",
"torch.nn.ReLU",
"torch.distributions.Normal",
"torch.bernoulli",
"torch.ones_like"
] | 1.5.1 | sneakyPad/decoding-latent-space-rs | bc7bfba5d6cf5a9d72f5c5393f394dee1025441a |
1.1 | import torch
import torch.nn as nn
import numpy as np
from ...utils import box_utils
from .point_head_template import PointHeadTemplate
from ...ops.roiaware_pool3d import roiaware_pool3d_utils
class SoftmaxFocalClassificationLoss(nn.Module):
"""
Softmax focal cross entropy loss.
"""
def __init__(self, gamma: float = 2.0, alpha: float = 0.25):
"""
Args:
gamma: Weighting parameter to balance loss for hard and easy examples.
alpha: Weighting parameter to balance loss for positive and negative examples.
"""
super(SoftmaxFocalClassificationLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
weighted_loss: (B, #anchors, #classes) float tensor after weighting.
"""
ce_loss = nn.functional.cross_entropy(input, target, reduction='none')#target.to(torch.int64)
pt = torch.exp(-ce_loss)
#loss = self.alpha * (1-pt)**self.gamma * ce_loss
loss = ce_loss
print('loss shape:')
print(loss.shape, weights.shape)
if weights.shape.__len__() == 2 or \
(weights.shape.__len__() == 1 and target.shape.__len__() == 2):
weights = weights.unsqueeze(-1)
assert weights.shape.__len__() == loss.shape.__len__()
loss *= weights
loss = loss.mean()
return loss
class PointSegHead(PointHeadTemplate):
"""
A point segmentation head.
Reference Paper: https://arxiv.org/abs/1706.02413
"""
def __init__(self, num_class, input_channels, model_cfg, **kwargs):
super().__init__(model_cfg=model_cfg, num_class=num_class)
self.cls_layers = self.make_fc_layers(
fc_cfg=self.model_cfg.CLS_FC,
input_channels=input_channels,
output_channels=num_class+1
)
def assign_targets(self, input_dict):
"""
Args:
input_dict:
point_features: (N1 + N2 + N3 + ..., C)
batch_size:
point_coords: (N1 + N2 + N3 + ..., 4) [bs_idx, x, y, z]
gt_boxes (optional): (B, M, 8)
Returns:
point_cls_labels: (N1 + N2 + N3 + ...), long type, 0:background, -1:ignored
"""
point_coords = input_dict['point_coords']
gt_boxes = input_dict['gt_boxes']
assert gt_boxes.shape.__len__() == 3, 'gt_boxes.shape=%s' % str(gt_boxes.shape)
assert point_coords.shape.__len__() in [2], 'points.shape=%s' % str(point_coords.shape)
batch_size = gt_boxes.shape[0]
extend_gt_boxes = box_utils.enlarge_box3d(
gt_boxes.view(-1, gt_boxes.shape[-1]) # extra_width=self.model_cfg.TARGET_CONFIG.GT_EXTRA_WIDTH
).view(batch_size, -1, gt_boxes.shape[-1])
targets_dict = self.assign_stack_targets(
points=point_coords, gt_boxes=gt_boxes, extend_gt_boxes=extend_gt_boxes,
set_ignore_flag=True, use_ball_constraint=False,
ret_part_labels=False
)
return targets_dict
def forward(self, batch_dict):
"""
Args:
batch_dict:
batch_size:
point_features: (N1 + N2 + N3 + ..., C) or (B, N, C)
point_features_before_fusion: (N1 + N2 + N3 + ..., C)
point_coords: (N1 + N2 + N3 + ..., 4) [bs_idx, x, y, z]
point_labels (optional): (N1 + N2 + N3 + ...)
gt_boxes (optional): (B, M, 8)
Returns:
batch_dict:
point_cls_scores: (N1 + N2 + N3 + ..., 1)
"""
if self.model_cfg.get('USE_POINT_FEATURES_BEFORE_FUSION', False):
point_features = batch_dict['point_features_before_fusion']
else:
point_features = batch_dict['point_features']
point_cls_preds = self.cls_layers(point_features) # (total_points, num_class)
ret_dict = {
'point_cls_preds': point_cls_preds
}
softmax = nn.Softmax(dim=1)
point_cls_scores = softmax(point_cls_preds)
_, batch_dict['point_cls_scores'] = torch.max(point_cls_scores, 1)
# print('detected scores:')
# print(batch_dict['point_cls_scores'][:200])
if self.training:
targets_dict = self.assign_targets(batch_dict)
ret_dict['point_cls_labels'] = targets_dict['point_cls_labels']
else:##############################else branch only for showing debugging info
targets_dict = self.assign_targets(batch_dict)
ret_dict['point_cls_labels'] = targets_dict['point_cls_labels']
self.forward_ret_dict = ret_dict
return batch_dict
def build_losses(self, losses_cfg):
self.add_module(
'cls_loss_func',
SoftmaxFocalClassificationLoss(alpha=0.25, gamma=2.0)
)
def get_loss(self, tb_dict=None):
tb_dict = {} if tb_dict is None else tb_dict
point_loss_cls, tb_dict_1 = self.get_cls_layer_loss()
point_loss = point_loss_cls
tb_dict.update(tb_dict_1)
return point_loss, tb_dict
def get_cls_layer_loss(self, tb_dict=None):
point_cls_labels = self.forward_ret_dict['point_cls_labels'].view(-1)
point_cls_preds = self.forward_ret_dict['point_cls_preds'].view(-1, self.num_class+1)
positives = (point_cls_labels > 0)
negative_cls_weights = (point_cls_labels == 0) * 1.0
cls_weights = (negative_cls_weights + 10.0 * positives).float()
pos_normalizer = positives.sum(dim=0).float()
cls_weights /= torch.clamp(pos_normalizer, min=1.0)
####################################### Weights Info
# print('Weights Info')
# print(cls_weights.shape)
# #print(point_cls_labels[:200])
# #print(point_cls_preds[:200])
# label = point_cls_labels.cpu().numpy()
# u,count = np.unique(label, return_counts=True)
# print(u)
# print(count)
# #print(cls_weights[:200])
# weight = cls_weights.tolist()
# print(len(set(weight)))
########################################
# one_hot_targets = point_cls_preds.new_zeros(*list(point_cls_labels.shape), self.num_class + 2)
# one_hot_targets.scatter_(-1, (point_cls_labels * (point_cls_labels >= 0).long()).unsqueeze(dim=-1).long(), 1.0)
# one_hot_targets = one_hot_targets[..., 1:]
# print(one_hot_targets.shape)
# print(one_hot_targets[:3, :])
# cls_loss_src = self.cls_loss_func(point_cls_preds, one_hot_targets, weights=cls_weights)
# point_loss_cls = cls_loss_src.sum()
point_loss_cls = self.cls_loss_func(point_cls_preds, point_cls_labels, cls_weights)
# loss_weights_dict = self.model_cfg.LOSS_CONFIG.LOSS_WEIGHTS
# point_loss_cls = point_loss_cls * loss_weights_dict['point_cls_weight']
if tb_dict is None:
tb_dict = {}
tb_dict.update({
'point_loss_cls': point_loss_cls.item(),
'point_pos_num': pos_normalizer.item()
})
return point_loss_cls, tb_dict
| [
"torch.nn.Softmax",
"torch.max",
"torch.clamp",
"torch.nn.functional.cross_entropy",
"torch.exp"
] | 1.1 | kathy-lee/astyx-pcdet | 355bfd88c37e1b717482f651778c1d4cb2f647d2 |
0.4 | # --------------------------------------------------------
# SiamMask
# Licensed under The MIT License
# Written by Qiang Wang (wangqiang2015 at ia.ac.cn)
# --------------------------------------------------------
from __future__ import division
import argparse
import logging
import numpy as np
import cv2
from PIL import Image
from os import makedirs
from os.path import join, isdir, isfile
from utils.log_helper import init_log, add_file_handler
from utils.load_helper import load_pretrain
from utils.bbox_helper import get_axis_aligned_bbox, cxy_wh_2_rect
from utils.benchmark_helper import load_dataset, dataset_zoo
import torch
from torch.autograd import Variable
import torch.nn.functional as F
from utils.anchors import Anchors
from utils.tracker_config import TrackerConfig
from utils.config_helper import load_config
from utils.pyvotkit.region import vot_overlap, vot_float2str
thrs = np.arange(0.3, 0.5, 0.05)
parser = argparse.ArgumentParser(description='Test SiamMask')
parser.add_argument('--arch', dest='arch', default='', choices=['Custom',],
help='architecture of pretrained model')
parser.add_argument('--config', dest='config', required=True, help='hyper-parameter for SiamMask')
parser.add_argument('--resume', default='', type=str, required=True,
metavar='PATH', help='path to latest checkpoint (default: none)')
parser.add_argument('--mask', action='store_true', help='whether use mask output')
parser.add_argument('--refine', action='store_true', help='whether use mask refine output')
parser.add_argument('--dataset', dest='dataset', default='VOT2018', choices=dataset_zoo,
help='datasets')
parser.add_argument('-l', '--log', default="log_test.txt", type=str, help='log file')
parser.add_argument('-v', '--visualization', dest='visualization', action='store_true',
help='whether visualize result')
parser.add_argument('--save_mask', action='store_true', help='whether use save mask for davis')
parser.add_argument('--gt', action='store_true', help='whether use gt rect for davis (Oracle)')
parser.add_argument('--video', default='', type=str, help='test special video')
parser.add_argument('--cpu', action='store_true', help='cpu mode')
parser.add_argument('--debug', action='store_true', help='debug mode')
def to_torch(ndarray):
if type(ndarray).__module__ == 'numpy':
return torch.from_numpy(ndarray)
elif not torch.is_tensor(ndarray):
raise ValueError("Cannot convert {} to torch tensor"
.format(type(ndarray)))
return ndarray
def im_to_torch(img):
img = np.transpose(img, (2, 0, 1)) # C*H*W
img = to_torch(img).float()
return img
def get_subwindow_tracking(im, pos, model_sz, original_sz, avg_chans, out_mode='torch'):
if isinstance(pos, float):
pos = [pos, pos]
sz = original_sz
im_sz = im.shape
c = (original_sz + 1) / 2
context_xmin = round(pos[0] - c)
context_xmax = context_xmin + sz - 1
context_ymin = round(pos[1] - c)
context_ymax = context_ymin + sz - 1
left_pad = int(max(0., -context_xmin))
top_pad = int(max(0., -context_ymin))
right_pad = int(max(0., context_xmax - im_sz[1] + 1))
bottom_pad = int(max(0., context_ymax - im_sz[0] + 1))
context_xmin = context_xmin + left_pad
context_xmax = context_xmax + left_pad
context_ymin = context_ymin + top_pad
context_ymax = context_ymax + top_pad
# zzp: a more easy speed version
r, c, k = im.shape
if any([top_pad, bottom_pad, left_pad, right_pad]):
te_im = np.zeros((r + top_pad + bottom_pad, c + left_pad + right_pad, k), np.uint8)
te_im[top_pad:top_pad + r, left_pad:left_pad + c, :] = im
if top_pad:
te_im[0:top_pad, left_pad:left_pad + c, :] = avg_chans
if bottom_pad:
te_im[r + top_pad:, left_pad:left_pad + c, :] = avg_chans
if left_pad:
te_im[:, 0:left_pad, :] = avg_chans
if right_pad:
te_im[:, c + left_pad:, :] = avg_chans
im_patch_original = te_im[int(context_ymin):int(context_ymax + 1), int(context_xmin):int(context_xmax + 1), :]
else:
im_patch_original = im[int(context_ymin):int(context_ymax + 1), int(context_xmin):int(context_xmax + 1), :]
if not np.array_equal(model_sz, original_sz):
im_patch = cv2.resize(im_patch_original, (model_sz, model_sz))
else:
im_patch = im_patch_original
# cv2.imshow('crop', im_patch)
# cv2.waitKey(0)
return im_to_torch(im_patch) if out_mode in 'torch' else im_patch
def generate_anchor(cfg, score_size):
anchors = Anchors(cfg)
anchor = anchors.anchors
x1, y1, x2, y2 = anchor[:, 0], anchor[:, 1], anchor[:, 2], anchor[:, 3]
anchor = np.stack([(x1+x2)*0.5, (y1+y2)*0.5, x2-x1, y2-y1], 1)
total_stride = anchors.stride
anchor_num = anchor.shape[0]
anchor = np.tile(anchor, score_size * score_size).reshape((-1, 4))
ori = - (score_size // 2) * total_stride
xx, yy = np.meshgrid([ori + total_stride * dx for dx in range(score_size)],
[ori + total_stride * dy for dy in range(score_size)])
xx, yy = np.tile(xx.flatten(), (anchor_num, 1)).flatten(), \
np.tile(yy.flatten(), (anchor_num, 1)).flatten()
anchor[:, 0], anchor[:, 1] = xx.astype(np.float32), yy.astype(np.float32)
return anchor
def siamese_init(im, target_pos, target_sz, model, hp=None, device='cpu'):
state = dict()
state['im_h'] = im.shape[0]
state['im_w'] = im.shape[1]
p = TrackerConfig()
p.update(hp, model.anchors)
p.renew()
net = model
p.scales = model.anchors['scales']
p.ratios = model.anchors['ratios']
p.anchor_num = model.anchor_num
p.anchor = generate_anchor(model.anchors, p.score_size)
avg_chans = np.mean(im, axis=(0, 1))
wc_z = target_sz[0] + p.context_amount * sum(target_sz)
hc_z = target_sz[1] + p.context_amount * sum(target_sz)
s_z = round(np.sqrt(wc_z * hc_z))
# initialize the exemplar
z_crop = get_subwindow_tracking(im, target_pos, p.exemplar_size, s_z, avg_chans)
z = Variable(z_crop.unsqueeze(0))
net.template(z.to(device))
if p.windowing == 'cosine':
window = np.outer(np.hanning(p.score_size), np.hanning(p.score_size))
elif p.windowing == 'uniform':
window = np.ones((p.score_size, p.score_size))
window = np.tile(window.flatten(), p.anchor_num)
state['p'] = p
state['net'] = net
state['avg_chans'] = avg_chans
state['window'] = window
state['target_pos'] = target_pos
state['target_sz'] = target_sz
return state
def siamese_track(state, im, mask_enable=False, refine_enable=False, device='cpu', debug=False):
p = state['p']
net = state['net']
avg_chans = state['avg_chans']
window = state['window']
target_pos = state['target_pos']
target_sz = state['target_sz']
wc_x = target_sz[1] + p.context_amount * sum(target_sz)
hc_x = target_sz[0] + p.context_amount * sum(target_sz)
s_x = np.sqrt(wc_x * hc_x)
scale_x = p.exemplar_size / s_x
d_search = (p.instance_size - p.exemplar_size) / 2
pad = d_search / scale_x
s_x = s_x + 2 * pad
crop_box = [target_pos[0] - round(s_x) / 2, target_pos[1] - round(s_x) / 2, round(s_x), round(s_x)]
if debug:
im_debug = im.copy()
crop_box_int = np.int0(crop_box)
cv2.rectangle(im_debug, (crop_box_int[0], crop_box_int[1]),
(crop_box_int[0] + crop_box_int[2], crop_box_int[1] + crop_box_int[3]), (255, 0, 0), 2)
cv2.imshow('search area', im_debug)
cv2.waitKey(0)
# extract scaled crops for search region x at previous target position
x_crop = Variable(get_subwindow_tracking(im, target_pos, p.instance_size, round(s_x), avg_chans).unsqueeze(0))
if mask_enable:
score, delta, mask = net.track_mask(x_crop.to(device))
else:
score, delta = net.track(x_crop.to(device))
delta = delta.permute(1, 2, 3, 0).contiguous().view(4, -1).data.cpu().numpy()
score = F.softmax(score.permute(1, 2, 3, 0).contiguous().view(2, -1).permute(1, 0), dim=1).data[:,
1].cpu().numpy()
delta[0, :] = delta[0, :] * p.anchor[:, 2] + p.anchor[:, 0]
delta[1, :] = delta[1, :] * p.anchor[:, 3] + p.anchor[:, 1]
delta[2, :] = np.exp(delta[2, :]) * p.anchor[:, 2]
delta[3, :] = np.exp(delta[3, :]) * p.anchor[:, 3]
def change(r):
return np.maximum(r, 1. / r)
def sz(w, h):
pad = (w + h) * 0.5
sz2 = (w + pad) * (h + pad)
return np.sqrt(sz2)
def sz_wh(wh):
pad = (wh[0] + wh[1]) * 0.5
sz2 = (wh[0] + pad) * (wh[1] + pad)
return np.sqrt(sz2)
# size penalty
target_sz_in_crop = target_sz*scale_x
s_c = change(sz(delta[2, :], delta[3, :]) / (sz_wh(target_sz_in_crop))) # scale penalty
r_c = change((target_sz_in_crop[0] / target_sz_in_crop[1]) / (delta[2, :] / delta[3, :])) # ratio penalty
penalty = np.exp(-(r_c * s_c - 1) * p.penalty_k)
pscore = penalty * score
# cos window (motion model)
pscore = pscore * (1 - p.window_influence) + window * p.window_influence
best_pscore_id = np.argmax(pscore)
pred_in_crop = delta[:, best_pscore_id] / scale_x
lr = penalty[best_pscore_id] * score[best_pscore_id] * p.lr # lr for OTB
res_x = pred_in_crop[0] + target_pos[0]
res_y = pred_in_crop[1] + target_pos[1]
res_w = target_sz[0] * (1 - lr) + pred_in_crop[2] * lr
res_h = target_sz[1] * (1 - lr) + pred_in_crop[3] * lr
target_pos = np.array([res_x, res_y])
target_sz = np.array([res_w, res_h])
# for Mask Branch
if mask_enable:
best_pscore_id_mask = np.unravel_index(best_pscore_id, (5, p.score_size, p.score_size))
delta_x, delta_y = best_pscore_id_mask[2], best_pscore_id_mask[1]
if refine_enable:
mask = net.track_refine((delta_y, delta_x)).to(device).sigmoid().squeeze().view(
p.out_size, p.out_size).cpu().data.numpy()
else:
mask = mask[0, :, delta_y, delta_x].sigmoid(). \
squeeze().view(p.out_size, p.out_size).cpu().data.numpy()
def crop_back(image, bbox, out_sz, padding=-1):
a = (out_sz[0] - 1) / bbox[2]
b = (out_sz[1] - 1) / bbox[3]
c = -a * bbox[0]
d = -b * bbox[1]
mapping = np.array([[a, 0, c],
[0, b, d]]).astype(np.float)
crop = cv2.warpAffine(image, mapping, (out_sz[0], out_sz[1]),
flags=cv2.INTER_LINEAR,
borderMode=cv2.BORDER_CONSTANT,
borderValue=padding)
return crop
s = crop_box[2] / p.instance_size
sub_box = [crop_box[0] + (delta_x - p.base_size / 2) * p.total_stride * s,
crop_box[1] + (delta_y - p.base_size / 2) * p.total_stride * s,
s * p.exemplar_size, s * p.exemplar_size]
s = p.out_size / sub_box[2]
back_box = [-sub_box[0] * s, -sub_box[1] * s, state['im_w'] * s, state['im_h'] * s]
mask_in_img = crop_back(mask, back_box, (state['im_w'], state['im_h']))
target_mask = (mask_in_img > p.seg_thr).astype(np.uint8)
if cv2.__version__[0] == '4':
contours, _ = cv2.findContours(target_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
else:
_, contours, _ = cv2.findContours(target_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
cnt_area = [cv2.contourArea(cnt) for cnt in contours]
if len(contours) != 0 and np.max(cnt_area) > 100:
contour = contours[np.argmax(cnt_area)] # use max area polygon
polygon = contour.reshape(-1, 2)
# pbox = cv2.boundingRect(polygon) # Min Max Rectangle
prbox = cv2.boxPoints(cv2.minAreaRect(polygon)) # Rotated Rectangle
# box_in_img = pbox
rbox_in_img = prbox
else: # empty mask
location = cxy_wh_2_rect(target_pos, target_sz)
rbox_in_img = np.array([[location[0], location[1]],
[location[0] + location[2], location[1]],
[location[0] + location[2], location[1] + location[3]],
[location[0], location[1] + location[3]]])
target_pos[0] = max(0, min(state['im_w'], target_pos[0]))
target_pos[1] = max(0, min(state['im_h'], target_pos[1]))
target_sz[0] = max(10, min(state['im_w'], target_sz[0]))
target_sz[1] = max(10, min(state['im_h'], target_sz[1]))
state['target_pos'] = target_pos
state['target_sz'] = target_sz
state['score'] = score[best_pscore_id]
state['mask'] = mask_in_img if mask_enable else []
state['ploygon'] = rbox_in_img if mask_enable else []
return state
def track_vot(model, video, hp=None, mask_enable=False, refine_enable=False, device='cpu'):
regions = [] # result and states[1 init / 2 lost / 0 skip]
image_files, gt = video['image_files'], video['gt']
start_frame, end_frame, lost_times, toc = 0, len(image_files), 0, 0
for f, image_file in enumerate(image_files):
im = cv2.imread(image_file)
tic = cv2.getTickCount()
if f == start_frame: # init
cx, cy, w, h = get_axis_aligned_bbox(gt[f])
target_pos = np.array([cx, cy])
target_sz = np.array([w, h])
state = siamese_init(im, target_pos, target_sz, model, hp, device) # init tracker
location = cxy_wh_2_rect(state['target_pos'], state['target_sz'])
regions.append(1 if 'VOT' in args.dataset else gt[f])
elif f > start_frame: # tracking
state = siamese_track(state, im, mask_enable, refine_enable, device, args.debug) # track
if mask_enable:
location = state['ploygon'].flatten()
mask = state['mask']
else:
location = cxy_wh_2_rect(state['target_pos'], state['target_sz'])
mask = []
if 'VOT' in args.dataset:
gt_polygon = ((gt[f][0], gt[f][1]), (gt[f][2], gt[f][3]),
(gt[f][4], gt[f][5]), (gt[f][6], gt[f][7]))
if mask_enable:
pred_polygon = ((location[0], location[1]), (location[2], location[3]),
(location[4], location[5]), (location[6], location[7]))
else:
pred_polygon = ((location[0], location[1]),
(location[0] + location[2], location[1]),
(location[0] + location[2], location[1] + location[3]),
(location[0], location[1] + location[3]))
b_overlap = vot_overlap(gt_polygon, pred_polygon, (im.shape[1], im.shape[0]))
else:
b_overlap = 1
if b_overlap:
regions.append(location)
else: # lost
regions.append(2)
lost_times += 1
start_frame = f + 5 # skip 5 frames
else: # skip
regions.append(0)
toc += cv2.getTickCount() - tic
if args.visualization and f >= start_frame: # visualization (skip lost frame)
im_show = im.copy()
if f == 0: cv2.destroyAllWindows()
if gt.shape[0] > f:
if len(gt[f]) == 8:
cv2.polylines(im_show, [np.array(gt[f], np.int).reshape((-1, 1, 2))], True, (0, 255, 0), 3)
else:
cv2.rectangle(im_show, (gt[f, 0], gt[f, 1]), (gt[f, 0] + gt[f, 2], gt[f, 1] + gt[f, 3]), (0, 255, 0), 3)
if len(location) == 8:
if mask_enable:
mask = mask > state['p'].seg_thr
im_show[:, :, 2] = mask * 255 + (1 - mask) * im_show[:, :, 2]
location_int = np.int0(location)
cv2.polylines(im_show, [location_int.reshape((-1, 1, 2))], True, (0, 255, 255), 3)
else:
location = [int(l) for l in location]
cv2.rectangle(im_show, (location[0], location[1]),
(location[0] + location[2], location[1] + location[3]), (0, 255, 255), 3)
cv2.putText(im_show, str(f), (40, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 2)
cv2.putText(im_show, str(lost_times), (40, 80), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
cv2.putText(im_show, str(state['score']) if 'score' in state else '', (40, 120), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
cv2.imshow(video['name'], im_show)
cv2.waitKey(1)
toc /= cv2.getTickFrequency()
# save result
name = args.arch.split('.')[0] + '_' + ('mask_' if mask_enable else '') + ('refine_' if refine_enable else '') +\
args.resume.split('/')[-1].split('.')[0]
if 'VOT' in args.dataset:
video_path = join('test', args.dataset, name,
'baseline', video['name'])
if not isdir(video_path): makedirs(video_path)
result_path = join(video_path, '{:s}_001.txt'.format(video['name']))
with open(result_path, "w") as fin:
for x in regions:
fin.write("{:d}\n".format(x)) if isinstance(x, int) else \
fin.write(','.join([vot_float2str("%.4f", i) for i in x]) + '\n')
else: # OTB
video_path = join('test', args.dataset, name)
if not isdir(video_path): makedirs(video_path)
result_path = join(video_path, '{:s}.txt'.format(video['name']))
with open(result_path, "w") as fin:
for x in regions:
fin.write(','.join([str(i) for i in x])+'\n')
logger.info('({:d}) Video: {:12s} Time: {:02.1f}s Speed: {:3.1f}fps Lost: {:d}'.format(
v_id, video['name'], toc, f / toc, lost_times))
return lost_times, f / toc
def MultiBatchIouMeter(thrs, outputs, targets, start=None, end=None):
targets = np.array(targets)
outputs = np.array(outputs)
num_frame = targets.shape[0]
if start is None:
object_ids = np.array(list(range(outputs.shape[0]))) + 1
else:
object_ids = [int(id) for id in start]
num_object = len(object_ids)
res = np.zeros((num_object, len(thrs)), dtype=np.float32)
output_max_id = np.argmax(outputs, axis=0).astype('uint8')+1
outputs_max = np.max(outputs, axis=0)
for k, thr in enumerate(thrs):
output_thr = outputs_max > thr
for j in range(num_object):
target_j = targets == object_ids[j]
if start is None:
start_frame, end_frame = 1, num_frame - 1
else:
start_frame, end_frame = start[str(object_ids[j])] + 1, end[str(object_ids[j])] - 1
iou = []
for i in range(start_frame, end_frame):
pred = (output_thr[i] * output_max_id[i]) == (j+1)
mask_sum = (pred == 1).astype(np.uint8) + (target_j[i] > 0).astype(np.uint8)
intxn = np.sum(mask_sum == 2)
union = np.sum(mask_sum > 0)
if union > 0:
iou.append(intxn / union)
elif union == 0 and intxn == 0:
iou.append(1)
res[j, k] = np.mean(iou)
return res
def track_vos(model, video, hp=None, mask_enable=False, refine_enable=False, mot_enable=False, device='cpu'):
image_files = video['image_files']
annos = [np.array(Image.open(x)) for x in video['anno_files']]
if 'anno_init_files' in video:
annos_init = [np.array(Image.open(x)) for x in video['anno_init_files']]
else:
annos_init = [annos[0]]
if not mot_enable:
annos = [(anno > 0).astype(np.uint8) for anno in annos]
annos_init = [(anno_init > 0).astype(np.uint8) for anno_init in annos_init]
if 'start_frame' in video:
object_ids = [int(id) for id in video['start_frame']]
else:
object_ids = [o_id for o_id in np.unique(annos[0]) if o_id != 0]
if len(object_ids) != len(annos_init):
annos_init = annos_init*len(object_ids)
object_num = len(object_ids)
toc = 0
pred_masks = np.zeros((object_num, len(image_files), annos[0].shape[0], annos[0].shape[1]))-1
for obj_id, o_id in enumerate(object_ids):
if 'start_frame' in video:
start_frame = video['start_frame'][str(o_id)]
end_frame = video['end_frame'][str(o_id)]
else:
start_frame, end_frame = 0, len(image_files)
for f, image_file in enumerate(image_files):
im = cv2.imread(image_file)
tic = cv2.getTickCount()
if f == start_frame: # init
mask = annos_init[obj_id] == o_id
x, y, w, h = cv2.boundingRect((mask).astype(np.uint8))
cx, cy = x + w/2, y + h/2
target_pos = np.array([cx, cy])
target_sz = np.array([w, h])
state = siamese_init(im, target_pos, target_sz, model, hp, device=device) # init tracker
elif end_frame >= f > start_frame: # tracking
state = siamese_track(state, im, mask_enable, refine_enable, device=device) # track
mask = state['mask']
toc += cv2.getTickCount() - tic
if end_frame >= f >= start_frame:
pred_masks[obj_id, f, :, :] = mask
toc /= cv2.getTickFrequency()
if len(annos) == len(image_files):
multi_mean_iou = MultiBatchIouMeter(thrs, pred_masks, annos,
start=video['start_frame'] if 'start_frame' in video else None,
end=video['end_frame'] if 'end_frame' in video else None)
for i in range(object_num):
for j, thr in enumerate(thrs):
logger.info('Fusion Multi Object{:20s} IOU at {:.2f}: {:.4f}'.format(video['name'] + '_' + str(i + 1), thr,
multi_mean_iou[i, j]))
else:
multi_mean_iou = []
if args.save_mask:
video_path = join('test', args.dataset, 'SiamMask', video['name'])
if not isdir(video_path): makedirs(video_path)
pred_mask_final = np.array(pred_masks)
pred_mask_final = (np.argmax(pred_mask_final, axis=0).astype('uint8') + 1) * (
np.max(pred_mask_final, axis=0) > state['p'].seg_thr).astype('uint8')
for i in range(pred_mask_final.shape[0]):
cv2.imwrite(join(video_path, image_files[i].split('/')[-1].split('.')[0] + '.png'), pred_mask_final[i].astype(np.uint8))
if args.visualization:
pred_mask_final = np.array(pred_masks)
pred_mask_final = (np.argmax(pred_mask_final, axis=0).astype('uint8') + 1) * (
np.max(pred_mask_final, axis=0) > state['p'].seg_thr).astype('uint8')
COLORS = np.random.randint(128, 255, size=(object_num, 3), dtype="uint8")
COLORS = np.vstack([[0, 0, 0], COLORS]).astype("uint8")
mask = COLORS[pred_mask_final]
for f, image_file in enumerate(image_files):
output = ((0.4 * cv2.imread(image_file)) + (0.6 * mask[f,:,:,:])).astype("uint8")
cv2.imshow("mask", output)
cv2.waitKey(1)
logger.info('({:d}) Video: {:12s} Time: {:02.1f}s Speed: {:3.1f}fps'.format(
v_id, video['name'], toc, f*len(object_ids) / toc))
return multi_mean_iou, f*len(object_ids) / toc
def main():
global args, logger, v_id
args = parser.parse_args()
cfg = load_config(args)
init_log('global', logging.INFO)
if args.log != "":
add_file_handler('global', args.log, logging.INFO)
logger = logging.getLogger('global')
logger.info(args)
# setup model
if args.arch == 'Custom':
from custom import Custom
model = Custom(anchors=cfg['anchors'])
else:
parser.error('invalid architecture: {}'.format(args.arch))
if args.resume:
assert isfile(args.resume), '{} is not a valid file'.format(args.resume)
model = load_pretrain(model, args.resume)
model.eval()
device = torch.device('cuda' if (torch.cuda.is_available() and not args.cpu) else 'cpu')
model = model.to(device)
# setup dataset
dataset = load_dataset(args.dataset)
# VOS or VOT?
if args.dataset in ['DAVIS2016', 'DAVIS2017', 'ytb_vos'] and args.mask:
vos_enable = True # enable Mask output
else:
vos_enable = False
total_lost = 0 # VOT
iou_lists = [] # VOS
speed_list = []
for v_id, video in enumerate(dataset.keys(), start=1):
if args.video != '' and video != args.video:
continue
if vos_enable:
iou_list, speed = track_vos(model, dataset[video], cfg['hp'] if 'hp' in cfg.keys() else None,
args.mask, args.refine, args.dataset in ['DAVIS2017', 'ytb_vos'], device=device)
iou_lists.append(iou_list)
else:
lost, speed = track_vot(model, dataset[video], cfg['hp'] if 'hp' in cfg.keys() else None,
args.mask, args.refine, device=device)
total_lost += lost
speed_list.append(speed)
# report final result
if vos_enable:
for thr, iou in zip(thrs, np.mean(np.concatenate(iou_lists), axis=0)):
logger.info('Segmentation Threshold {:.2f} mIoU: {:.3f}'.format(thr, iou))
else:
logger.info('Total Lost: {:d}'.format(total_lost))
logger.info('Mean Speed: {:.2f} FPS'.format(np.mean(speed_list)))
if __name__ == '__main__':
main()
| [
"torch.cuda.is_available",
"torch.is_tensor",
"torch.from_numpy"
] | 0.4.1 | culdo/SiamMask | 8eb80eb95c255124e7732c31f22795c7d65dd66b |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.