repo
stringlengths 1
99
| file
stringlengths 13
215
| code
stringlengths 12
59.2M
| file_length
int64 12
59.2M
| avg_line_length
float64 3.82
1.48M
| max_line_length
int64 12
2.51M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
RegularizedBN | RegularizedBN-main/fairseq/data/raw_label_dataset.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from . import FairseqDataset
class RawLabelDataset(FairseqDataset):
def __init__(self, labels):
super().__init__()
self.labels = labels
def __getitem__(self, index):
return self.labels[index]
def __len__(self):
return len(self.labels)
def collater(self, samples):
return torch.tensor(samples)
| 547 | 20.92 | 65 | py |
RegularizedBN | RegularizedBN-main/fairseq/data/resampling_dataset.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import numpy as np
from fairseq.data import BaseWrapperDataset, plasma_utils
logger = logging.getLogger(__name__)
class ResamplingDataset(BaseWrapperDataset):
"""Randomly samples from a given dataset at each epoch.
Sampling is done with or without replacement, depending on the "replace"
parameter.
Optionally, the epoch size can be rescaled. This is potentially desirable
to increase per-epoch coverage of the base dataset (since sampling with
replacement means that many items in the dataset will be left out). In the
case of sampling without replacement, size_ratio should be strictly less
than 1.
Args:
dataset (~torch.utils.data.Dataset): dataset on which to sample.
weights (List[float]): list of probability weights
(default: None, which corresponds to uniform sampling).
replace (bool): sampling mode; True for "with replacement", or False
for "without replacement" (default: True)
size_ratio (float): the ratio to subsample to; must be positive
(default: 1.0).
batch_by_size (bool): whether or not to batch by sequence length
(default: True).
seed (int): RNG seed to use (default: 0).
epoch (int): starting epoch number (default: 1).
"""
def __init__(
self,
dataset,
weights=None,
replace=True,
size_ratio=1.0,
batch_by_size=True,
seed=0,
epoch=1,
):
super().__init__(dataset)
if weights is None:
self.weights = None
else:
assert len(weights) == len(dataset)
weights_arr = np.array(weights, dtype=np.float64)
weights_arr /= weights_arr.sum()
self.weights = plasma_utils.PlasmaArray(weights_arr)
self.replace = replace
assert size_ratio > 0.0
if not self.replace:
assert size_ratio < 1.0
self.size_ratio = float(size_ratio)
self.actual_size = np.ceil(len(dataset) * self.size_ratio).astype(int)
self.batch_by_size = batch_by_size
self.seed = seed
self._cur_epoch = None
self._cur_indices = None
self.set_epoch(epoch)
def __getitem__(self, index):
return self.dataset[self._cur_indices.array[index]]
def __len__(self):
return self.actual_size
@property
def sizes(self):
if isinstance(self.dataset.sizes, list):
return [s[self._cur_indices.array] for s in self.dataset.sizes]
return self.dataset.sizes[self._cur_indices.array]
def num_tokens(self, index):
return self.dataset.num_tokens(self._cur_indices.array[index])
def size(self, index):
return self.dataset.size(self._cur_indices.array[index])
def ordered_indices(self):
if self.batch_by_size:
order = [
np.arange(len(self)),
self.sizes,
] # No need to handle `self.shuffle == True`
return np.lexsort(order)
else:
return np.arange(len(self))
def prefetch(self, indices):
self.dataset.prefetch(self._cur_indices.array[indices])
def set_epoch(self, epoch):
logger.debug('ResamplingDataset.set_epoch: {}'.format(epoch))
super().set_epoch(epoch)
if epoch == self._cur_epoch:
return
self._cur_epoch = epoch
# Generate a weighted sample of indices as a function of the
# random seed and the current epoch.
rng = np.random.RandomState(
[
42, # magic number
self.seed % (2 ** 32), # global seed
self._cur_epoch, # epoch index
]
)
self._cur_indices = plasma_utils.PlasmaArray(
rng.choice(
len(self.dataset),
self.actual_size,
replace=self.replace,
p=(None if self.weights is None else self.weights.array),
)
)
| 4,232 | 29.89781 | 78 | py |
RegularizedBN | RegularizedBN-main/fairseq/data/dictionary.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
from collections import Counter
from multiprocessing import Pool
import torch
from fairseq import utils
from fairseq.binarizer import safe_readline
from fairseq.data import data_utils
from fairseq.file_io import PathManager
from fairseq.tokenizer import tokenize_line
class Dictionary(object):
"""A mapping from symbols to consecutive integers"""
def __init__(
self,
*, # begin keyword-only arguments
bos="<s>",
pad="<pad>",
eos="</s>",
unk="<unk>",
extra_special_symbols=None,
):
self.unk_word, self.pad_word, self.eos_word = unk, pad, eos
self.symbols = []
self.count = []
self.indices = {}
self.bos_index = self.add_symbol(bos)
self.pad_index = self.add_symbol(pad)
self.eos_index = self.add_symbol(eos)
self.unk_index = self.add_symbol(unk)
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(s)
self.nspecial = len(self.symbols)
def __eq__(self, other):
return self.indices == other.indices
def __getitem__(self, idx):
if idx < len(self.symbols):
return self.symbols[idx]
return self.unk_word
def __len__(self):
"""Returns the number of symbols in the dictionary"""
return len(self.symbols)
def __contains__(self, sym):
return sym in self.indices
def index(self, sym):
"""Returns the index of the specified symbol"""
assert isinstance(sym, str)
if sym in self.indices:
return self.indices[sym]
return self.unk_index
def string(
self,
tensor,
bpe_symbol=None,
escape_unk=False,
extra_symbols_to_ignore=None,
unk_string=None,
):
"""Helper for converting a tensor of token indices to a string.
Can optionally remove BPE symbols or escape <unk> words.
"""
if torch.is_tensor(tensor) and tensor.dim() == 2:
return "\n".join(
self.string(t, bpe_symbol, escape_unk, extra_symbols_to_ignore)
for t in tensor
)
extra_symbols_to_ignore = set(extra_symbols_to_ignore or [])
extra_symbols_to_ignore.add(self.eos())
def token_string(i):
if i == self.unk():
if unk_string is not None:
return unk_string
else:
return self.unk_string(escape_unk)
else:
return self[i]
if hasattr(self, "bos_index"):
extra_symbols_to_ignore.add(self.bos())
sent = " ".join(
token_string(i)
for i in tensor
if utils.item(i) not in extra_symbols_to_ignore
)
return data_utils.post_process(sent, bpe_symbol)
def unk_string(self, escape=False):
"""Return unknown string, optionally escaped as: <<unk>>"""
if escape:
return "<{}>".format(self.unk_word)
else:
return self.unk_word
def add_symbol(self, word, n=1, overwrite=False):
"""Adds a word to the dictionary"""
if word in self.indices and not overwrite:
idx = self.indices[word]
self.count[idx] = self.count[idx] + n
return idx
else:
idx = len(self.symbols)
self.indices[word] = idx
self.symbols.append(word)
self.count.append(n)
return idx
def update(self, new_dict):
"""Updates counts from new dictionary."""
for word in new_dict.symbols:
idx2 = new_dict.indices[word]
if word in self.indices:
idx = self.indices[word]
self.count[idx] = self.count[idx] + new_dict.count[idx2]
else:
idx = len(self.symbols)
self.indices[word] = idx
self.symbols.append(word)
self.count.append(new_dict.count[idx2])
def finalize(self, threshold=-1, nwords=-1, padding_factor=8):
"""Sort symbols by frequency in descending order, ignoring special ones.
Args:
- threshold defines the minimum word count
- nwords defines the total number of words in the final dictionary,
including special symbols
- padding_factor can be used to pad the dictionary size to be a
multiple of 8, which is important on some hardware (e.g., Nvidia
Tensor Cores).
"""
if nwords <= 0:
nwords = len(self)
new_indices = dict(zip(self.symbols[: self.nspecial], range(self.nspecial)))
new_symbols = self.symbols[: self.nspecial]
new_count = self.count[: self.nspecial]
c = Counter(
dict(
sorted(zip(self.symbols[self.nspecial :], self.count[self.nspecial :]))
)
)
for symbol, count in c.most_common(nwords - self.nspecial):
if count >= threshold:
new_indices[symbol] = len(new_symbols)
new_symbols.append(symbol)
new_count.append(count)
else:
break
assert len(new_symbols) == len(new_indices)
self.count = list(new_count)
self.symbols = list(new_symbols)
self.indices = new_indices
self.pad_to_multiple_(padding_factor)
def pad_to_multiple_(self, padding_factor):
"""Pad Dictionary size to be a multiple of *padding_factor*."""
if padding_factor > 1:
i = 0
while len(self) % padding_factor != 0:
symbol = "madeupword{:04d}".format(i)
self.add_symbol(symbol, n=0)
i += 1
def bos(self):
"""Helper to get index of beginning-of-sentence symbol"""
return self.bos_index
def pad(self):
"""Helper to get index of pad symbol"""
return self.pad_index
def eos(self):
"""Helper to get index of end-of-sentence symbol"""
return self.eos_index
def unk(self):
"""Helper to get index of unk symbol"""
return self.unk_index
@classmethod
def load(cls, f):
"""Loads the dictionary from a text file with the format:
```
<symbol0> <count0>
<symbol1> <count1>
...
```
"""
d = cls()
d.add_from_file(f)
return d
def add_from_file(self, f):
"""
Loads a pre-existing dictionary from a text file and adds its symbols
to this instance.
"""
if isinstance(f, str):
try:
with PathManager.open(f, "r", encoding="utf-8") as fd:
self.add_from_file(fd)
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(
"Incorrect encoding detected in {}, please "
"rebuild the dataset".format(f)
)
return
lines = f.readlines()
indices_start_line = self._load_meta(lines)
for line in lines[indices_start_line:]:
try:
line, field = line.rstrip().rsplit(" ", 1)
if field == "#fairseq:overwrite":
overwrite = True
line, field = line.rsplit(" ", 1)
else:
overwrite = False
count = int(field)
word = line
if word in self and not overwrite:
raise RuntimeError(
"Duplicate word found when loading Dictionary: '{}'. "
"Duplicate words can overwrite earlier ones by adding the "
"#fairseq:overwrite flag at the end of the corresponding row "
"in the dictionary file. If using the Camembert model, please "
"download an updated copy of the model file."
.format(word)
)
self.add_symbol(word, n=count, overwrite=overwrite)
except ValueError:
raise ValueError(
"Incorrect dictionary format, expected '<token> <cnt> [flags]'"
)
def _save(self, f, kv_iterator):
if isinstance(f, str):
PathManager.mkdirs(os.path.dirname(f))
with PathManager.open(f, "w", encoding="utf-8") as fd:
return self.save(fd)
for k, v in kv_iterator:
print("{} {}".format(k, v), file=f)
def _get_meta(self):
return [], []
def _load_meta(self, lines):
return 0
def save(self, f):
"""Stores dictionary into a text file"""
ex_keys, ex_vals = self._get_meta()
self._save(
f,
zip(
ex_keys + self.symbols[self.nspecial :],
ex_vals + self.count[self.nspecial :],
),
)
def dummy_sentence(self, length):
t = torch.Tensor(length).uniform_(self.nspecial + 1, len(self)).long()
t[-1] = self.eos()
return t
def encode_line(
self,
line,
line_tokenizer=tokenize_line,
add_if_not_exist=True,
consumer=None,
append_eos=True,
reverse_order=False,
):
words = line_tokenizer(line)
if reverse_order:
words = list(reversed(words))
nwords = len(words)
ids = torch.IntTensor(nwords + 1 if append_eos else nwords)
for i, word in enumerate(words):
if add_if_not_exist:
idx = self.add_symbol(word)
else:
idx = self.index(word)
if consumer is not None:
consumer(word, idx)
ids[i] = idx
if append_eos:
ids[nwords] = self.eos_index
return ids
@staticmethod
def _add_file_to_dictionary_single_worker(
filename, tokenize, eos_word, worker_id=0, num_workers=1
):
counter = Counter()
with open(PathManager.get_local_path(filename), "r", encoding="utf-8") as f:
size = os.fstat(f.fileno()).st_size
chunk_size = size // num_workers
offset = worker_id * chunk_size
end = offset + chunk_size
f.seek(offset)
if offset > 0:
safe_readline(f) # drop first incomplete line
line = f.readline()
while line:
for word in tokenize(line):
counter.update([word])
counter.update([eos_word])
if f.tell() > end:
break
line = f.readline()
return counter
@staticmethod
def add_file_to_dictionary(filename, dict, tokenize, num_workers):
def merge_result(counter):
for w, c in sorted(counter.items()):
dict.add_symbol(w, c)
if num_workers > 1:
pool = Pool(processes=num_workers)
results = []
for worker_id in range(num_workers):
results.append(
pool.apply_async(
Dictionary._add_file_to_dictionary_single_worker,
(filename, tokenize, dict.eos_word, worker_id, num_workers),
)
)
pool.close()
pool.join()
for r in results:
merge_result(r.get())
else:
merge_result(
Dictionary._add_file_to_dictionary_single_worker(
filename, tokenize, dict.eos_word
)
)
class TruncatedDictionary(object):
def __init__(self, wrapped_dict, length):
self.__class__ = type(
wrapped_dict.__class__.__name__,
(self.__class__, wrapped_dict.__class__),
{},
)
self.__dict__ = wrapped_dict.__dict__
self.wrapped_dict = wrapped_dict
self.length = min(len(self.wrapped_dict), length)
def __len__(self):
return self.length
def __getitem__(self, i):
if i < self.length:
return self.wrapped_dict[i]
return self.wrapped_dict.unk()
| 12,579 | 31.339332 | 87 | py |
RegularizedBN | RegularizedBN-main/fairseq/data/append_token_dataset.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import torch
from . import BaseWrapperDataset
class AppendTokenDataset(BaseWrapperDataset):
def __init__(self, dataset, token=None):
super().__init__(dataset)
self.token = token
if token is not None:
self._sizes = np.array(dataset.sizes) + 1
else:
self._sizes = dataset.sizes
def __getitem__(self, idx):
item = self.dataset[idx]
if self.token is not None:
item = torch.cat([item, item.new([self.token])])
return item
@property
def sizes(self):
return self._sizes
def num_tokens(self, index):
n = self.dataset.num_tokens(index)
if self.token is not None:
n += 1
return n
def size(self, index):
n = self.dataset.size(index)
if self.token is not None:
n += 1
return n
| 1,066 | 23.813953 | 65 | py |
RegularizedBN | RegularizedBN-main/fairseq/data/fasta_dataset.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import subprocess
import threading
from pathlib import Path
import numpy as np
import torch
def fasta_file_path(prefix_path):
return prefix_path + ".fasta"
class FastaDataset(torch.utils.data.Dataset):
"""
For loading protein sequence datasets in the common FASTA data format
"""
def __init__(self, path: str, cache_indices=False):
self.fn = fasta_file_path(path)
self.threadlocal = threading.local()
self.cache = Path(f"{path}.fasta.idx.npy")
if cache_indices:
if self.cache.exists():
self.offsets, self.sizes = np.load(self.cache)
else:
self.offsets, self.sizes = self._build_index(path)
np.save(self.cache, np.stack([self.offsets, self.sizes]))
else:
self.offsets, self.sizes = self._build_index(path)
def _get_file(self):
if not hasattr(self.threadlocal, "f"):
self.threadlocal.f = open(self.fn, "r")
return self.threadlocal.f
def __getitem__(self, idx):
f = self._get_file()
f.seek(self.offsets[idx])
desc = f.readline().strip()
line = f.readline()
seq = ""
while line != "" and line[0] != ">":
seq += line.strip()
line = f.readline()
return desc, seq
def __len__(self):
return self.offsets.size
def _build_index(self, path: str):
# Use grep and awk to get 100M/s on local SSD.
# Should process your enormous 100G fasta in ~10 min single core...
path = fasta_file_path(path)
bytes_offsets = subprocess.check_output(
f"cat {path} | tqdm --bytes --total $(wc -c < {path})"
"| grep --byte-offset '^>' -o | cut -d: -f1",
shell=True,
)
fasta_lengths = subprocess.check_output(
f"cat {path} | tqdm --bytes --total $(wc -c < {path})"
"| awk '/^>/ {print \"\";next;} { printf(\"%s\",$0);}' | tail -n+2 | awk '{print length($1)}'",
shell=True,
)
bytes_np = np.fromstring(bytes_offsets, dtype=np.int64, sep=" ")
sizes_np = np.fromstring(fasta_lengths, dtype=np.int64, sep=" ")
return bytes_np, sizes_np
def __setstate__(self, state):
self.__dict__ = state
self.threadlocal = threading.local()
def __getstate__(self):
d = {}
for i, v in self.__dict__.items():
if i != "threadlocal":
d[i] = v
return d
def __del__(self):
if hasattr(self.threadlocal, "f"):
self.threadlocal.f.close()
del self.threadlocal.f
@staticmethod
def exists(path):
return os.path.exists(fasta_file_path(path))
class EncodedFastaDataset(FastaDataset):
"""
The FastaDataset returns raw sequences - this allows us to return
indices with a dictionary instead.
"""
def __init__(self, path, dictionary):
super().__init__(path, cache_indices=True)
self.dictionary = dictionary
def __getitem__(self, idx):
desc, seq = super().__getitem__(idx)
return self.dictionary.encode_line(seq, line_tokenizer=list).long()
| 3,387 | 30.37037 | 107 | py |
RegularizedBN | RegularizedBN-main/fairseq/data/mask_tokens_dataset.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from functools import lru_cache
import numpy as np
import torch
from fairseq.data import data_utils, Dictionary
from . import BaseWrapperDataset, LRUCacheDataset
class MaskTokensDataset(BaseWrapperDataset):
"""
A wrapper Dataset for masked language modeling.
Input items are masked according to the specified masking probability.
Args:
dataset: Dataset to wrap.
sizes: Sentence lengths
vocab: Dictionary with the vocabulary and special tokens.
pad_idx: Id of pad token in vocab
mask_idx: Id of mask token in vocab
return_masked_tokens: controls whether to return the non-masked tokens
(the default) or to return a tensor with the original masked token
IDs (and *pad_idx* elsewhere). The latter is useful as targets for
masked LM training.
seed: Seed for random number generator for reproducibility.
mask_prob: probability of replacing a token with *mask_idx*.
leave_unmasked_prob: probability that a masked token is unmasked.
random_token_prob: probability of replacing a masked token with a
random token from the vocabulary.
freq_weighted_replacement: sample random replacement words based on
word frequencies in the vocab.
mask_whole_words: only mask whole words. This should be a byte mask
over vocab indices, indicating whether it is the beginning of a
word. We will extend any mask to encompass the whole word.
bpe: BPE to use for whole-word masking.
"""
@classmethod
def apply_mask(cls, dataset: torch.utils.data.Dataset, *args, **kwargs):
"""Return the source and target datasets for masked LM training."""
dataset = LRUCacheDataset(dataset)
return (
LRUCacheDataset(cls(dataset, *args, **kwargs, return_masked_tokens=False)),
LRUCacheDataset(cls(dataset, *args, **kwargs, return_masked_tokens=True)),
)
def __init__(
self,
dataset: torch.utils.data.Dataset,
vocab: Dictionary,
pad_idx: int,
mask_idx: int,
return_masked_tokens: bool = False,
seed: int = 1,
mask_prob: float = 0.15,
leave_unmasked_prob: float = 0.1,
random_token_prob: float = 0.1,
freq_weighted_replacement: bool = False,
mask_whole_words: torch.Tensor = None,
):
assert 0.0 < mask_prob < 1.0
assert 0.0 <= random_token_prob <= 1.0
assert 0.0 <= leave_unmasked_prob <= 1.0
assert random_token_prob + leave_unmasked_prob <= 1.0
self.dataset = dataset
self.vocab = vocab
self.pad_idx = pad_idx
self.mask_idx = mask_idx
self.return_masked_tokens = return_masked_tokens
self.seed = seed
self.mask_prob = mask_prob
self.leave_unmasked_prob = leave_unmasked_prob
self.random_token_prob = random_token_prob
self.mask_whole_words = mask_whole_words
if random_token_prob > 0.0:
if freq_weighted_replacement:
weights = np.array(self.vocab.count)
else:
weights = np.ones(len(self.vocab))
weights[:self.vocab.nspecial] = 0
self.weights = weights / weights.sum()
self.epoch = 0
def set_epoch(self, epoch, **unused):
super().set_epoch(epoch)
self.epoch = epoch
@lru_cache(maxsize=8)
def __getitem__(self, index: int):
with data_utils.numpy_seed(self.seed, self.epoch, index):
item = self.dataset[index]
sz = len(item)
assert self.mask_idx not in item, \
'Dataset contains mask_idx (={}), this is not expected!'.format(
self.mask_idx,
)
if self.mask_whole_words is not None:
word_begins_mask = self.mask_whole_words.gather(0, item)
word_begins_idx = word_begins_mask.nonzero().view(-1)
sz = len(word_begins_idx)
words = np.split(word_begins_mask, word_begins_idx)[1:]
assert len(words) == sz
word_lens = list(map(len, words))
# decide elements to mask
mask = np.full(sz, False)
num_mask = int(
# add a random number for probabilistic rounding
self.mask_prob * sz + np.random.rand()
)
mask[np.random.choice(sz, num_mask, replace=False)] = True
if self.return_masked_tokens:
# exit early if we're just returning the masked tokens
# (i.e., the targets for masked LM training)
if self.mask_whole_words is not None:
mask = np.repeat(mask, word_lens)
new_item = np.full(len(mask), self.pad_idx)
new_item[mask] = item[torch.from_numpy(mask.astype(np.uint8)) == 1]
return torch.from_numpy(new_item)
# decide unmasking and random replacement
rand_or_unmask_prob = self.random_token_prob + self.leave_unmasked_prob
if rand_or_unmask_prob > 0.0:
rand_or_unmask = mask & (np.random.rand(sz) < rand_or_unmask_prob)
if self.random_token_prob == 0.0:
unmask = rand_or_unmask
rand_mask = None
elif self.leave_unmasked_prob == 0.0:
unmask = None
rand_mask = rand_or_unmask
else:
unmask_prob = self.leave_unmasked_prob / rand_or_unmask_prob
decision = np.random.rand(sz) < unmask_prob
unmask = rand_or_unmask & decision
rand_mask = rand_or_unmask & (~decision)
else:
unmask = rand_mask = None
if unmask is not None:
mask = mask ^ unmask
if self.mask_whole_words is not None:
mask = np.repeat(mask, word_lens)
new_item = np.copy(item)
new_item[mask] = self.mask_idx
if rand_mask is not None:
num_rand = rand_mask.sum()
if num_rand > 0:
if self.mask_whole_words is not None:
rand_mask = np.repeat(rand_mask, word_lens)
num_rand = rand_mask.sum()
new_item[rand_mask] = np.random.choice(
len(self.vocab),
num_rand,
p=self.weights,
)
return torch.from_numpy(new_item)
| 6,847 | 38.356322 | 87 | py |
RegularizedBN | RegularizedBN-main/fairseq/data/concat_dataset.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import bisect
import numpy as np
from torch.utils.data.dataloader import default_collate
from . import FairseqDataset
class ConcatDataset(FairseqDataset):
@staticmethod
def cumsum(sequence, sample_ratios):
r, s = [], 0
for e, ratio in zip(sequence, sample_ratios):
curr_len = int(ratio * len(e))
r.append(curr_len + s)
s += curr_len
return r
def __init__(self, datasets, sample_ratios=1):
super(ConcatDataset, self).__init__()
assert len(datasets) > 0, "datasets should not be an empty iterable"
self.datasets = list(datasets)
if isinstance(sample_ratios, int):
sample_ratios = [sample_ratios] * len(self.datasets)
self.sample_ratios = sample_ratios
self.cumulative_sizes = self.cumsum(self.datasets, sample_ratios)
self.real_sizes = [len(d) for d in self.datasets]
def __len__(self):
return self.cumulative_sizes[-1]
def __getitem__(self, idx):
dataset_idx, sample_idx = self._get_dataset_and_sample_index(idx)
return self.datasets[dataset_idx][sample_idx]
def _get_dataset_and_sample_index(self, idx: int):
dataset_idx = bisect.bisect_right(self.cumulative_sizes, idx)
if dataset_idx == 0:
sample_idx = idx
else:
sample_idx = idx - self.cumulative_sizes[dataset_idx - 1]
sample_idx = sample_idx % self.real_sizes[dataset_idx]
return dataset_idx, sample_idx
def collater(self, samples, **extra_args):
# For now only supports datasets with same underlying collater implementations
if hasattr(self.datasets[0], 'collater'):
return self.datasets[0].collater(samples, **extra_args)
else:
return default_collate(samples, **extra_args)
def size(self, idx: int):
"""
Return an example's size as a float or tuple.
"""
dataset_idx, sample_idx = self._get_dataset_and_sample_index(idx)
return self.datasets[dataset_idx].size(sample_idx)
def num_tokens(self, index: int):
return np.max(self.size(index))
def attr(self, attr: str, index: int):
dataset_idx = bisect.bisect_right(self.cumulative_sizes, index)
return getattr(self.datasets[dataset_idx], attr, None)
@property
def sizes(self):
_dataset_sizes = []
for ds, sr in zip(self.datasets, self.sample_ratios):
if isinstance(ds.sizes, np.ndarray):
_dataset_sizes.append(np.tile(ds.sizes, sr))
else:
# Only support underlying dataset with single size array.
assert isinstance(ds.sizes, list)
_dataset_sizes.append(np.tile(ds.sizes[0], sr))
return np.concatenate(_dataset_sizes)
@property
def supports_prefetch(self):
return all(d.supports_prefetch for d in self.datasets)
def ordered_indices(self):
"""
Returns indices sorted by length. So less padding is needed.
"""
return np.argsort(self.sizes)
def prefetch(self, indices):
frm = 0
for to, ds in zip(self.cumulative_sizes, self.datasets):
real_size = len(ds)
if getattr(ds, 'supports_prefetch', False):
ds.prefetch([(i - frm) % real_size for i in indices if frm <= i < to])
frm = to
def set_epoch(self, epoch):
super().set_epoch(epoch)
for ds in self.datasets:
if hasattr(ds, 'set_epoch'):
ds.set_epoch(epoch)
| 3,759 | 34.471698 | 86 | py |
RegularizedBN | RegularizedBN-main/fairseq/data/data_utils.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
try:
from collections.abc import Iterable
except ImportError:
from collections import Iterable
import contextlib
import itertools
import logging
import os
import warnings
from typing import Tuple, Optional
import numpy as np
import torch
logger = logging.getLogger(__name__)
def infer_language_pair(path):
"""Infer language pair from filename: <split>.<lang1>-<lang2>.(...).idx"""
src, dst = None, None
for filename in os.listdir(path):
parts = filename.split('.')
if len(parts) >= 3 and len(parts[1].split('-')) == 2:
return parts[1].split('-')
return src, dst
def collate_tokens(values, pad_idx, eos_idx=None, left_pad=False, move_eos_to_beginning=False, pad_to_length=None):
"""Convert a list of 1d tensors into a padded 2d tensor."""
size = max(v.size(0) for v in values)
size = size if pad_to_length is None else max(size, pad_to_length)
res = values[0].new(len(values), size).fill_(pad_idx)
def copy_tensor(src, dst):
assert dst.numel() == src.numel()
if move_eos_to_beginning: #false
if eos_idx is None:
# if no eos_idx is specified, then use the last token in src
dst[0] = src[-1]
else:
dst[0] = eos_idx
dst[1:] = src[:-1]
else:
dst.copy_(src) #src加了eos符号但是没加bos符号,可以通过args来添加bos
for i, v in enumerate(values):
copy_tensor(v, res[i][size - len(v):] if left_pad else res[i][:len(v)])
#print(res[:2]);exit()
return res
def load_indexed_dataset(path, dictionary=None, dataset_impl=None, combine=False, default='cached'):
"""A helper function for loading indexed datasets.
Args:
path (str): path to indexed dataset (e.g., 'data-bin/train')
dictionary (~fairseq.data.Dictionary): data dictionary
dataset_impl (str, optional): which dataset implementation to use. If
not provided, it will be inferred automatically. For legacy indexed
data we use the 'cached' implementation by default.
combine (bool, optional): automatically load and combine multiple
datasets. For example, if *path* is 'data-bin/train', then we will
combine 'data-bin/train', 'data-bin/train1', ... and return a
single ConcatDataset instance.
"""
from fairseq.data.concat_dataset import ConcatDataset
import fairseq.data.indexed_dataset as indexed_dataset
datasets = []
for k in itertools.count():
path_k = path + (str(k) if k > 0 else '')
dataset_impl_k = dataset_impl
if dataset_impl_k is None:
dataset_impl_k = indexed_dataset.infer_dataset_impl(path_k) #mmap
dataset = indexed_dataset.make_dataset(
path_k,
impl=dataset_impl_k or default,
fix_lua_indexing=True,
dictionary=dictionary,
)
if dataset is None:
break
logger.info('loaded {} examples from: {}'.format(len(dataset), path_k))
datasets.append(dataset)
if not combine:
break
if len(datasets) == 0:
return None
elif len(datasets) == 1:
return datasets[0]
else:
return ConcatDataset(datasets)
@contextlib.contextmanager
def numpy_seed(seed, *addl_seeds):
"""Context manager which seeds the NumPy PRNG with the specified seed and
restores the state afterward"""
if seed is None:
yield
return
if len(addl_seeds) > 0:
seed = int(hash((seed, *addl_seeds)) % 1e6)
state = np.random.get_state()
np.random.seed(seed)
try:
yield
finally:
np.random.set_state(state)
def collect_filtered(function, iterable, filtered):
"""
Similar to :func:`filter` but collects filtered elements in ``filtered``.
Args:
function (callable): function that returns ``False`` for elements that
should be filtered
iterable (iterable): iterable to filter
filtered (list): list to store filtered elements
"""
for el in iterable:
if function(el):
yield el
else:
filtered.append(el)
def _filter_by_size_dynamic(indices, size_fn, max_positions, raise_exception=False):
def compare_leq(a, b):
return a <= b if not isinstance(a, tuple) else max(a) <= b
def check_size(idx):
if isinstance(max_positions, float) or isinstance(max_positions, int):
return size_fn(idx) <= max_positions
elif isinstance(max_positions, dict):
idx_size = size_fn(idx)
assert isinstance(idx_size, dict)
intersect_keys = set(max_positions.keys()) & set(idx_size.keys())
return all(
all(a is None or b is None or a <= b
for a, b in zip(idx_size[key], max_positions[key]))
for key in intersect_keys
)
else:
# Hacky as heck, for the specific case of multilingual training with RoundRobin.
if isinstance(size_fn(idx), dict) and isinstance(max_positions, tuple):
return all(
a is None or b is None or compare_leq(a, b)
for a, b in zip(size_fn(idx).values(), max_positions)
)
# For MultiCorpusSampledDataset, will generalize it later
if not isinstance(size_fn(idx), Iterable):
return all(size_fn(idx) <= b for b in max_positions)
return all(
a is None or b is None or a <= b
for a, b in zip(size_fn(idx), max_positions)
)
ignored = []
itr = collect_filtered(check_size, indices, ignored)
indices = np.fromiter(itr, dtype=np.int64, count=-1)
return indices, ignored
def filter_by_size(indices, dataset, max_positions, raise_exception=False):
"""
[deprecated] Filter indices based on their size.
Use `FairseqDataset::filter_indices_by_size` instead.
Args:
indices (List[int]): ordered list of dataset indices
dataset (FairseqDataset): fairseq dataset instance
max_positions (tuple): filter elements larger than this size.
Comparisons are done component-wise.
raise_exception (bool, optional): if ``True``, raise an exception if
any elements are filtered (default: False).
"""
warnings.warn(
'data_utils.filter_by_size is deprecated. '
'Use `FairseqDataset::filter_indices_by_size` instead.',
stacklevel=2
)
if isinstance(max_positions, float) or isinstance(max_positions, int):
if hasattr(dataset, 'sizes') and isinstance(dataset.sizes, np.ndarray):
ignored = indices[dataset.sizes[indices] > max_positions].tolist()
indices = indices[dataset.sizes[indices] <= max_positions]
elif hasattr(dataset, 'sizes') and isinstance(dataset.sizes, list) and len(dataset.sizes) == 1:
ignored = indices[dataset.sizes[0][indices] > max_positions].tolist()
indices = indices[dataset.sizes[0][indices] <= max_positions]
else:
indices, ignored = _filter_by_size_dynamic(indices, dataset.size, max_positions)
else:
indices, ignored = _filter_by_size_dynamic(indices, dataset.size, max_positions)
if len(ignored) > 0 and raise_exception:
raise Exception((
'Size of sample #{} is invalid (={}) since max_positions={}, '
'skip this example with --skip-invalid-size-inputs-valid-test'
).format(ignored[0], dataset.size(ignored[0]), max_positions))
if len(ignored) > 0:
logger.warning((
'{} samples have invalid sizes and will be skipped, '
'max_positions={}, first few sample ids={}'
).format(len(ignored), max_positions, ignored[:10]))
return indices
def my_batch_by_size(indices, num_tokens, max_tokens, bsmul=8, tol=0):
class Int(object):
def __init__(self, index):
self.index = index
self.tol = tol
def __eq__(self, other):
return abs(num_tokens(self.index)-num_tokens(other.index)) <= tol
def __lt__(self, other):
return num_tokens(self.index)+tol < num_tokens(other.index)
new_indices = [Int(indices[i]) for i in range(len(indices))]
new_indices.sort()
batches = []
num_tokens_list = []
max_pad_list = []
cur_batch = [new_indices[k].index for k in range(bsmul)]
cur_batch_size = bsmul
length = [num_tokens(new_indices[k].index) for k in range(0,bsmul)]
cur_maxlen = max(length)
cur_minlen = min(length)
cur_num_tokens = cur_maxlen*bsmul
#assert len(new_indices)==(len(new_indices)//bsmul)*bsmul, "require batch size multiple"
for i in range(1,len(new_indices)//bsmul+1):
interval = bsmul
if i==len(new_indices)//bsmul:
interval = len(new_indices)-len(new_indices)//bsmul*bsmul
if interval==0:
continue
length = [num_tokens(new_indices[k].index) for k in range(bsmul*i,bsmul*i+interval)]
max_len = max(length)
min_len = min(length)
tem_cur_maxlen = max(max_len,cur_maxlen)
tem_cur_minlen = min(min_len,cur_minlen)
#tem_cur_maxlen = max(cur_maxlen, num_tokens(new_indices[i].index))
#tem_cur_minlen = min(cur_maxlen, num_tokens(new_indices[i].index))
if (cur_batch_size+interval)*tem_cur_maxlen<=max_tokens:
cur_batch += [new_indices[k].index for k in range(bsmul*i,bsmul*i+interval)]
cur_maxlen = tem_cur_maxlen
cur_minlen = tem_cur_minlen
cur_batch_size += interval
else:
batches.append(cur_batch)
max_pad_list.append(cur_maxlen-cur_minlen)
num_tokens_list.append(cur_batch_size*cur_maxlen)
cur_batch = [new_indices[k].index for k in range(bsmul*i,bsmul*i+interval)]
cur_maxlen = max_len
cur_minlen = min_len
cur_batch_size = interval
assert cur_batch != [], "wrong logic of cur_batch!"
print("last num tokens: {}".format(cur_batch_size*cur_maxlen))
batches.append(cur_batch)
max_pad_list.append(cur_maxlen-cur_minlen)
num_tokens_list.append(cur_batch_size*cur_maxlen)
return batches, num_tokens_list, max_pad_list
def batch_by_size(
indices, num_tokens_fn, max_tokens=None, max_sentences=None,
required_batch_size_multiple=1, fixed_shapes=None,my_batching=0,tol=0,
):
"""
Yield mini-batches of indices bucketed by size. Batches may contain
sequences of different lengths.
Args:
indices (List[int]): ordered list of dataset indices
num_tokens_fn (callable): function that returns the number of tokens at
a given index
max_tokens (int, optional): max number of tokens in each batch
(default: None).
max_sentences (int, optional): max number of sentences in each
batch (default: None).
required_batch_size_multiple (int, optional): require batch size to
be less than N or a multiple of N (default: 1).
fixed_shapes (List[Tuple[int, int]], optional): if given, batches will
only be created with the given shapes. *max_sentences* and
*required_batch_size_multiple* will be ignored (default: None).
"""
try:
from fairseq.data.data_utils_fast import (
batch_by_size_fast, batch_fixed_shapes_fast,
)
except ImportError:
raise ImportError(
'Please build Cython components with: `pip install --editable .` '
'or `python setup.py build_ext --inplace`'
)
max_tokens = max_tokens if max_tokens is not None else -1
max_sentences = max_sentences if max_sentences is not None else -1
bsz_mult = required_batch_size_multiple
if not isinstance(indices, np.ndarray): #false
indices = np.fromiter(indices, dtype=np.int64, count=-1)
if fixed_shapes is None:
if my_batching and len(indices)>1e5:
print("my batching!")
my_batches, num_tokens_list, max_pad_list = my_batch_by_size(indices, num_tokens_fn,max_tokens, bsmul=required_batch_size_multiple,tol=tol)
return my_batches
else:
return batch_by_size_fast(
indices, num_tokens_fn, max_tokens, max_sentences, bsz_mult,
)
else:
fixed_shapes = np.array(fixed_shapes, dtype=np.int64)
sort_order = np.lexsort([
fixed_shapes[:, 1].argsort(), # length
fixed_shapes[:, 0].argsort(), # bsz
])
fixed_shapes_sorted = fixed_shapes[sort_order]
return batch_fixed_shapes_fast(indices, num_tokens_fn, fixed_shapes_sorted)
def post_process(sentence: str, symbol: str):
if symbol == "sentencepiece":
sentence = sentence.replace(" ", "").replace("\u2581", " ").strip()
elif symbol == 'wordpiece':
sentence = sentence.replace(" ", "").replace("_", " ").strip()
elif symbol == 'letter':
sentence = sentence.replace(" ", "").replace("|", " ").strip()
elif symbol == "_EOW":
sentence = sentence.replace(" ", "").replace("_EOW", " ").strip()
elif symbol is not None and symbol != 'none':
sentence = (sentence + " ").replace(symbol, "").rstrip()
return sentence
def compute_mask_indices(
shape: Tuple[int, int],
padding_mask: Optional[torch.Tensor],
mask_prob: float,
mask_length: int,
mask_type: str = "static",
mask_other: float = 0.0,
min_masks: int = 0,
no_overlap: bool = False,
min_space: int = 0,
) -> np.ndarray:
"""
Computes random mask spans for a given shape
Args:
shape: the the shape for which to compute masks.
should be of size 2 where first element is batch size and 2nd is timesteps
padding_mask: optional padding mask of the same size as shape, which will prevent masking padded elements
mask_prob: probability for each token to be chosen as start of the span to be masked. this will be multiplied by
number of timesteps divided by length of mask span to mask approximately this percentage of all elements.
however due to overlaps, the actual number will be smaller (unless no_overlap is True)
mask_type: how to compute mask lengths
static = fixed size
uniform = sample from uniform distribution [mask_other, mask_length*2]
normal = sample from normal distribution with mean mask_length and stdev mask_other. mask is min 1 element
poisson = sample from possion distribution with lambda = mask length
min_masks: minimum number of masked spans
no_overlap: if false, will switch to an alternative recursive algorithm that prevents spans from overlapping
min_space: only used if no_overlap is True, this is how many elements to keep unmasked between spans
"""
bsz, all_sz = shape
mask = np.full((bsz, all_sz), False)
all_num_mask = int(
# add a random number for probabilistic rounding
mask_prob * all_sz / float(mask_length)
+ np.random.rand()
)
all_num_mask = max(min_masks, all_num_mask)
mask_idcs = []
for i in range(bsz):
if padding_mask is not None:
sz = all_sz - padding_mask[i].long().sum().item()
num_mask = int(
# add a random number for probabilistic rounding
mask_prob * sz / float(mask_length)
+ np.random.rand()
)
num_mask = max(min_masks, num_mask)
else:
sz = all_sz
num_mask = all_num_mask
if mask_type == "static":
lengths = np.full(num_mask, mask_length)
elif mask_type == "uniform":
lengths = np.random.randint(mask_other, mask_length * 2 + 1, size=num_mask)
elif mask_type == "normal":
lengths = np.random.normal(mask_length, mask_other, size=num_mask)
lengths = [max(1, int(round(x))) for x in lengths]
elif mask_type == "poisson":
lengths = np.random.poisson(mask_length, size=num_mask)
lengths = [int(round(x)) for x in lengths]
else:
raise Exception("unknown mask selection " + mask_type)
if sum(lengths) == 0:
lengths[0] = min(mask_length, sz - 1)
if no_overlap:
mask_idc = []
def arrange(s, e, length, keep_length):
span_start = np.random.randint(s, e-length)
mask_idc.extend(span_start + i for i in range(length))
new_parts = []
if span_start - s - min_space >= keep_length:
new_parts.append((s, span_start-min_space+1))
if e - span_start - keep_length - min_space > keep_length:
new_parts.append((span_start + length + min_space, e))
return new_parts
parts = [(0, sz)]
min_length = min(lengths)
for length in sorted(lengths, reverse=True):
lens = np.fromiter((e - s if e-s >= length+min_space else 0 for s, e in parts), np.int)
l_sum = np.sum(lens)
if l_sum == 0:
break
probs = lens / np.sum(lens)
c = np.random.choice(len(parts), p=probs)
s, e = parts.pop(c)
parts.extend(arrange(s, e, length, min_length))
mask_idc = np.asarray(mask_idc)
else:
min_len = min(lengths)
if sz - min_len <= num_mask:
min_len = sz - num_mask - 1
mask_idc = np.random.choice(sz - min_len, num_mask, replace=False)
mask_idc = np.asarray(
[
mask_idc[j] + offset
for j in range(len(mask_idc))
for offset in range(lengths[j])
]
)
mask_idcs.append(np.unique(mask_idc[mask_idc < sz]))
min_len = min([len(m) for m in mask_idcs])
for i, mask_idc in enumerate(mask_idcs):
if len(mask_idc) > min_len:
mask_idc = np.random.choice(mask_idc, min_len, replace=False)
mask[i, mask_idc] = True
return mask
| 18,575 | 39.12095 | 151 | py |
RegularizedBN | RegularizedBN-main/fairseq/data/nested_dictionary_dataset.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from collections import OrderedDict
import torch
from torch.utils.data.dataloader import default_collate
from . import FairseqDataset
def _flatten(dico, prefix=None):
"""Flatten a nested dictionary."""
new_dico = OrderedDict()
if isinstance(dico, dict):
prefix = prefix + '.' if prefix is not None else ''
for k, v in dico.items():
if v is None:
continue
new_dico.update(_flatten(v, prefix + k))
elif isinstance(dico, list):
for i, v in enumerate(dico):
new_dico.update(_flatten(v, prefix + '.[' + str(i) + ']'))
else:
new_dico = OrderedDict({prefix: dico})
return new_dico
def _unflatten(dico):
"""Unflatten a flattened dictionary into a nested dictionary."""
new_dico = OrderedDict()
for full_k, v in dico.items():
full_k = full_k.split('.')
node = new_dico
for k in full_k[:-1]:
if k.startswith('[') and k.endswith(']'):
k = int(k[1:-1])
if k not in node:
node[k] = OrderedDict()
node = node[k]
node[full_k[-1]] = v
return new_dico
class NestedDictionaryDataset(FairseqDataset):
def __init__(self, defn, sizes=None):
super().__init__()
self.defn = _flatten(defn)
self.sizes = [sizes] if not isinstance(sizes, (list, tuple)) else sizes
first = None
for v in self.defn.values():
if not isinstance(v, (FairseqDataset, torch.utils.data.Dataset, )):
raise ValueError('Expected Dataset but found: {}'.format(v.__class__))
first = first or v
if len(v) > 0:
assert len(v) == len(first), 'dataset lengths must match'
self._len = len(first)
def __getitem__(self, index):
return OrderedDict((k, ds[index]) for k, ds in self.defn.items())
def __len__(self):
return self._len
def collater(self, samples):
"""Merge a list of samples to form a mini-batch.
Args:
samples (List[dict]): samples to collate
Returns:
dict: a mini-batch suitable for forwarding with a Model
"""
if len(samples) == 0:
return {}
sample = OrderedDict()
for k, ds in self.defn.items():
try:
sample[k] = ds.collater([s[k] for s in samples])
except NotImplementedError:
sample[k] = default_collate([s[k] for s in samples])
return _unflatten(sample)
def num_tokens(self, index):
"""Return the number of tokens in a sample. This value is used to
enforce ``--max-tokens`` during batching."""
return max(s[index] for s in self.sizes)
def size(self, index):
"""Return an example's size as a float or tuple. This value is used when
filtering a dataset with ``--max-positions``."""
if len(self.sizes) == 1:
return self.sizes[0][index]
else:
return (s[index] for s in self.sizes)
@property
def supports_prefetch(self):
"""Whether this dataset supports prefetching."""
return any(ds.supports_prefetch for ds in self.defn.values())
def prefetch(self, indices):
"""Prefetch the data required for this epoch."""
for ds in self.defn.values():
if getattr(ds, 'supports_prefetch', False):
ds.prefetch(indices)
def set_epoch(self, epoch):
super().set_epoch(epoch)
for ds in self.defn.values():
ds.set_epoch(epoch)
| 3,776 | 31.282051 | 86 | py |
RegularizedBN | RegularizedBN-main/fairseq/data/add_target_dataset.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from . import BaseWrapperDataset
from . import data_utils
class AddTargetDataset(BaseWrapperDataset):
def __init__(self, dataset, labels, pad, eos, batch_targets, process_label=None, add_to_input=False):
super().__init__(dataset)
self.labels = labels
self.batch_targets = batch_targets
self.pad = pad
self.eos = eos
self.process_label = process_label
self.add_to_input = add_to_input
def get_label(self, index):
return self.labels[index] if self.process_label is None else self.process_label(self.labels[index])
def __getitem__(self, index):
item = self.dataset[index]
item["label"] = self.get_label(index)
return item
def size(self, index):
sz = self.dataset.size(index)
own_sz = len(self.get_label(index))
return (sz, own_sz)
def collater(self, samples):
collated = self.dataset.collater(samples)
if len(collated) == 0:
return collated
indices = set(collated["id"].tolist())
target = [s["label"] for s in samples if s["id"] in indices]
if self.batch_targets:
collated["target_lengths"] = torch.LongTensor([len(t) for t in target])
target = data_utils.collate_tokens(target, pad_idx=self.pad, left_pad=False)
collated["ntokens"] = collated["target_lengths"].sum().item()
else:
collated["ntokens"] = sum([len(t) for t in target])
collated["target"] = target
if self.add_to_input:
eos = target.new_full((target.size(0), 1), self.eos)
collated["target"] = torch.cat([target, eos], dim=-1).long()
collated["net_input"]["prev_output_tokens"] = torch.cat([eos, target], dim=-1).long()
collated["ntokens"] += target.size(0)
return collated | 2,046 | 35.553571 | 107 | py |
RegularizedBN | RegularizedBN-main/fairseq/data/transform_eos_lang_pair_dataset.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from . import FairseqDataset
import torch
from typing import Optional
class TransformEosLangPairDataset(FairseqDataset):
"""A :class:`~fairseq.data.FairseqDataset` wrapper that transform bos on
collated samples of language pair dataset.
Note that the transformation is applied in :func:`collater`.
Args:
dataset (~fairseq.data.FairseqDataset): dataset that collates sample into
LanguagePairDataset schema
src_eos (int): original source end-of-sentence symbol index to be replaced
new_src_eos (int, optional): new end-of-sentence symbol index to replace source eos symbol
tgt_bos (int, optional): original target beginning-of-sentence symbol index to be replaced
new_tgt_bos (int, optional): new beginning-of-sentence symbol index to replace at the
beginning of 'prev_output_tokens'
"""
def __init__(
self,
dataset: FairseqDataset,
src_eos: int,
new_src_eos: Optional[int] = None,
tgt_bos: Optional[int] = None,
new_tgt_bos: Optional[int] = None,
):
self.dataset = dataset
self.src_eos = src_eos
self.new_src_eos = new_src_eos
self.tgt_bos = tgt_bos
self.new_tgt_bos = new_tgt_bos
def __getitem__(self, index):
return self.dataset[index]
def __len__(self):
return len(self.dataset)
def collater(self, samples, **extra_args):
samples = self.dataset.collater(samples, **extra_args)
if self.new_src_eos is not None:
if self.dataset.left_pad_source:
assert(samples['net_input']['src_tokens'][:, -1] != self.src_eos).sum() == 0
samples['net_input']['src_tokens'][:, -1] = self.new_src_eos
else:
eos_idx = samples['net_input']['src_lengths'] - 1
assert(
samples['net_input']['src_tokens'][torch.arange(eos_idx.size(0)), eos_idx] != self.src_eos
).sum() == 0
eos_idx = eos_idx.resize_(len(samples['net_input']['src_lengths']), 1)
samples['net_input']['src_tokens'].scatter_(1, eos_idx, self.new_src_eos)
if self.new_tgt_bos is not None and 'prev_output_tokens' in samples['net_input']:
if self.dataset.left_pad_target:
# TODO: support different padding direction on target side
raise NotImplementedError(
'TransformEosLangPairDataset does not implement --left-pad-target True option'
)
else:
assert (samples['net_input']['prev_output_tokens'][:, 0] != self.tgt_bos).sum() == 0
samples['net_input']['prev_output_tokens'][:, 0] = self.new_tgt_bos
return samples
def num_tokens(self, index):
return self.dataset.num_tokens(index)
def size(self, index):
return self.dataset.size(index)
def ordered_indices(self):
return self.dataset.ordered_indices()
@property
def supports_prefetch(self):
return getattr(self.dataset, 'supports_prefetch', False)
def prefetch(self, indices):
return self.dataset.prefetch(indices)
| 3,381 | 36.577778 | 110 | py |
RegularizedBN | RegularizedBN-main/fairseq/data/lm_context_window_dataset.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import torch
from fairseq.data.monolingual_dataset import MonolingualDataset
from . import FairseqDataset
class LMContextWindowDataset(FairseqDataset):
"""Wraps a MonolingualDataset and provides more context for evaluation."""
def __init__(self, dataset, tokens_per_sample, context_window, pad_idx):
assert isinstance(dataset, MonolingualDataset)
assert context_window > 0
self.dataset = dataset
self.tokens_per_sample = tokens_per_sample
self.context_window = context_window
self.pad_idx = pad_idx
self.prev_tokens = np.empty([0])
def __getitem__(self, index):
return self.dataset[index]
def __len__(self):
return len(self.dataset)
def collater(self, samples):
sample = self.dataset.collater(samples)
pad = self.pad_idx
max_sample_len = self.tokens_per_sample + self.context_window
bsz, tsz = sample['net_input']['src_tokens'].shape
start_idxs = [0] * bsz
toks = sample['net_input']['src_tokens']
lengths = sample['net_input']['src_lengths']
tgt = sample['target']
new_toks = np.empty([bsz, tsz + self.context_window], dtype=np.int64)
new_tgt = np.full([bsz, tsz + self.context_window], pad, dtype=np.int64)
sample_lens = toks.ne(pad).long().sum(dim=1).cpu()
for i in range(bsz):
sample_len = sample_lens[i]
extra = len(self.prev_tokens) + sample_len - max_sample_len
if extra > 0:
self.prev_tokens = self.prev_tokens[extra:]
pads = np.full(self.context_window - len(self.prev_tokens), pad)
new_toks[i] = np.concatenate([self.prev_tokens, toks[i].numpy(), pads])
new_tgt[i, len(self.prev_tokens):len(self.prev_tokens) + len(tgt[i])] = tgt[i]
start_idxs[i] = len(self.prev_tokens)
lengths[i] += len(self.prev_tokens)
self.prev_tokens = new_toks[i][new_toks[i] != pad][-self.context_window:]
sample['net_input']['src_tokens'] = torch.from_numpy(new_toks)
sample['target'] = torch.from_numpy(new_tgt)
sample['start_indices'] = start_idxs
return sample
def num_tokens(self, index):
return self.dataset.num_tokens(index)
def size(self, index):
return self.dataset.size(index)
def ordered_indices(self):
# NOTE we don't shuffle the data to retain access to the previous dataset elements
return np.arange(len(self.dataset))
@property
def supports_prefetch(self):
return getattr(self.dataset, 'supports_prefetch', False)
def prefetch(self, indices):
return self.dataset.prefetch(indices)
| 2,910 | 35.848101 | 90 | py |
RegularizedBN | RegularizedBN-main/fairseq/data/colorize_dataset.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from . import BaseWrapperDataset
class ColorizeDataset(BaseWrapperDataset):
""" Adds 'colors' property to net input that is obtained from the provided color getter for use by models """
def __init__(self, dataset, color_getter):
super().__init__(dataset)
self.color_getter = color_getter
def collater(self, samples):
base_collate = super().collater(samples)
if len(base_collate) > 0:
base_collate["net_input"]["colors"] = torch.tensor(
list(self.color_getter(self.dataset, s["id"]) for s in samples),
dtype=torch.long,
)
return base_collate
| 844 | 32.8 | 113 | py |
RegularizedBN | RegularizedBN-main/fairseq/data/iterators.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import itertools
import logging
import math
import operator
import os
import queue
import time
from threading import Thread
import numpy as np
import torch
from fairseq.data import data_utils
logger = logging.getLogger(__name__)
# Object used by _background_consumer to signal the source is exhausted
# to the main thread.
_sentinel = object()
class CountingIterator(object):
"""Wrapper around an iterable that maintains the iteration count.
Args:
iterable (iterable): iterable to wrap
start (int): starting iteration count. Note that this doesn't
actually advance the iterator.
total (int): override the iterator length returned by
``__len__``. This can be used to truncate *iterator*.
Attributes:
n (int): number of elements consumed from this iterator
"""
def __init__(self, iterable, start=None, total=None):
self.iterable = iterable
self.itr = iter(self)
if start is None:
self.n = getattr(iterable, 'n', 0)
else:
self.n = start
if total is None:
self.total = self.n + len(iterable)
else:
self.total = total
def __len__(self):
return self.total
def __iter__(self):
for x in self.iterable:
if self.n >= self.total:
raise RuntimeError(
'Mismatch between actual and expected iterable length. '
'Please report this to the fairseq developers.'
)
self.n += 1
yield x
def __next__(self):
return next(self.itr)
def has_next(self):
"""Whether the iterator has been exhausted."""
return self.n < len(self)
def skip(self, num_to_skip):
"""Fast-forward the iterator by skipping *num_to_skip* elements."""
next(itertools.islice(self.itr, num_to_skip, num_to_skip), None)
return self
def take(self, n):
"""
Truncates the iterator to n elements at most.
"""
self.total = min(self.total, n)
# Propagate this change to the underlying iterator
if hasattr(self.iterable, "take"):
self.iterable.take(n)
else:
self.iterable = itertools.islice(self.iterable, n)
class EpochBatchIterating(object):
def __len__(self) -> int:
raise NotImplementedError
@property
def next_epoch_idx(self):
raise NotImplementedError
def next_epoch_itr(self, shuffle=True, fix_batches_to_gpus=False):
"""Return a new iterator over the dataset.
Args:
shuffle (bool, optional): shuffle batches before returning the
iterator (default: True).
fix_batches_to_gpus: ensure that batches are always
allocated to the same shards across epochs. Requires
that :attr:`dataset` supports prefetching (default: False).
"""
raise NotImplementedError
def end_of_epoch(self) -> bool:
"""Returns whether the most recent epoch iterator has been exhausted"""
raise NotImplementedError
@property
def iterations_in_epoch(self) -> int:
"""The number of consumed batches in the current epoch."""
raise NotImplementedError
def state_dict(self):
"""Returns a dictionary containing a whole state of the iterator."""
raise NotImplementedError
def load_state_dict(self, state_dict):
"""Copies the state of the iterator from the given *state_dict*."""
raise NotImplementedError
class StreamingEpochBatchIterator(EpochBatchIterating):
def __init__(
self, dataset, epoch=1, num_shards=1, shard_id=0,
):
assert isinstance(dataset, torch.utils.data.IterableDataset)
self.dataset = dataset
self.epoch = max(epoch, 1) # we use 1-based indexing for epochs
self._current_epoch_iterator = None
self.num_shards = num_shards
self.shard_id = shard_id
@property
def next_epoch_idx(self):
"""Return the epoch index after *next_epoch_itr* is called."""
if self._current_epoch_iterator is not None and self.end_of_epoch():
return self.epoch + 1
else:
return self.epoch
def next_epoch_itr(self, shuffle=True, fix_batches_to_gpus=False):
self.epoch = self.next_epoch_idx
self.dataset.set_epoch(self.epoch)
self._current_epoch_iterator = CountingIterator(
iterable=ShardedIterator(
iterable=self.dataset,
num_shards=self.num_shards,
shard_id=self.shard_id,
),
)
return self._current_epoch_iterator
def end_of_epoch(self) -> bool:
return not self._current_epoch_iterator.has_next()
@property
def iterations_in_epoch(self) -> int:
if self._current_epoch_iterator is not None:
return self._current_epoch_iterator.n
return 0
def state_dict(self):
return {
'epoch': self.epoch,
}
def load_state_dict(self, state_dict):
self.epoch = state_dict['epoch']
class EpochBatchIterator(EpochBatchIterating):
"""A multi-epoch iterator over a :class:`torch.utils.data.Dataset`.
Compared to :class:`torch.utils.data.DataLoader`, this iterator:
- can be reused across multiple epochs with the :func:`next_epoch_itr`
method (optionally shuffled between epochs)
- can be serialized/deserialized with the :func:`state_dict` and
:func:`load_state_dict` methods
- supports sharding with the *num_shards* and *shard_id* arguments
Args:
dataset (~torch.utils.data.Dataset): dataset from which to load the data
collate_fn (callable): merges a list of samples to form a mini-batch
batch_sampler (~torch.utils.data.Sampler or a callable): an iterator over batches of
indices, or a callable to create such an iterator (~torch.utils.data.Sampler).
A callable batch_sampler will be called for each epoch to enable per epoch dynamic
batch iterators defined by this callable batch_sampler.
seed (int, optional): seed for random number generator for
reproducibility (default: 1).
num_shards (int, optional): shard the data iterator into N
shards (default: 1).
shard_id (int, optional): which shard of the data iterator to
return (default: 0).
num_workers (int, optional): how many subprocesses to use for data
loading. 0 means the data will be loaded in the main process
(default: 0).
epoch (int, optional): the epoch to start the iterator from
(default: 1).
buffer_size (int, optional): the number of batches to keep ready in the
queue. Helps speeding up dataloading. When buffer_size is zero, the
default torch.utils.data.DataLoader preloading is used.
timeout (int, optional): if positive, the timeout value for collecting a batch
from workers. Should always be non-negative. (default: ``0``)
"""
def __init__(
self, dataset, collate_fn, batch_sampler, seed=1, num_shards=1, shard_id=0,
num_workers=0, epoch=1, buffer_size=0, timeout=0,
):
assert isinstance(dataset, torch.utils.data.Dataset)
self.dataset = dataset
self.collate_fn = collate_fn
self.batch_sampler = batch_sampler
self._frozen_batches = tuple(batch_sampler) if not callable(batch_sampler) else None
self.seed = seed
self.num_shards = num_shards
self.shard_id = shard_id
self.num_workers = num_workers
# This upper limit here is to prevent people from abusing this feature
# in a shared computing environment.
self.buffer_size = min(buffer_size, 20)
self.timeout = timeout
self.epoch = max(epoch, 1) # we use 1-based indexing for epochs
self.shuffle = True
self._cur_epoch_itr = None
self._next_epoch_itr = None
self._supports_prefetch = getattr(dataset, 'supports_prefetch', False)
@property
def frozen_batches(self):
if self._frozen_batches is None:
self._frozen_batches = tuple(self.batch_sampler(self.dataset, self.epoch))
return self._frozen_batches
def __len__(self):
return int(math.ceil(len(self.frozen_batches) / float(self.num_shards)))
@property
def n(self):
return self.iterations_in_epoch
@property
def next_epoch_idx(self):
"""Return the epoch index after *next_epoch_itr* is called."""
if self._next_epoch_itr is not None:
return self.epoch
elif self._cur_epoch_itr is not None and self.end_of_epoch():
return self.epoch + 1
else:
return self.epoch
def next_epoch_itr(self, shuffle=True, fix_batches_to_gpus=False):
"""Return a new iterator over the dataset.
Args:
shuffle (bool, optional): shuffle batches before returning the
iterator (default: True).
fix_batches_to_gpus: ensure that batches are always
allocated to the same shards across epochs. Requires
that :attr:`dataset` supports prefetching (default: False).
"""
self.epoch = self.next_epoch_idx
self.dataset.set_epoch(self.epoch) #nothing done
if self._next_epoch_itr is not None: #false
self._cur_epoch_itr = self._next_epoch_itr
self._next_epoch_itr = None
else:
if callable(self.batch_sampler): #false
# reset _frozen_batches to refresh the next epoch
self._frozen_batches = None
self._cur_epoch_itr = self._get_iterator_for_epoch(
self.epoch, shuffle, fix_batches_to_gpus=fix_batches_to_gpus,
)
self.shuffle = shuffle
return self._cur_epoch_itr
def end_of_epoch(self) -> bool:
"""Returns whether the most recent epoch iterator has been exhausted"""
return not self._cur_epoch_itr.has_next()
@property
def iterations_in_epoch(self):
"""The number of consumed batches in the current epoch."""
if self._cur_epoch_itr is not None:
return self._cur_epoch_itr.n
elif self._next_epoch_itr is not None:
return self._next_epoch_itr.n
return 0
def state_dict(self):
"""Returns a dictionary containing a whole state of the iterator."""
if self.end_of_epoch():
epoch = self.epoch + 1
iter_in_epoch = 0
else:
epoch = self.epoch
iter_in_epoch = self.iterations_in_epoch
return {
'version': 2,
'epoch': epoch,
'iterations_in_epoch': iter_in_epoch,
'shuffle': self.shuffle,
}
def load_state_dict(self, state_dict):
"""Copies the state of the iterator from the given *state_dict*."""
self.epoch = state_dict['epoch']
itr_pos = state_dict.get('iterations_in_epoch', 0)
version = state_dict.get('version', 1)
if itr_pos > 0:
# fast-forward epoch iterator
self._next_epoch_itr = self._get_iterator_for_epoch(
self.epoch,
shuffle=state_dict.get('shuffle', True),
offset=itr_pos,
)
if self._next_epoch_itr is None:
if version == 1:
# legacy behavior: we finished the epoch, increment epoch counter
self.epoch += 1
else:
raise RuntimeError(
'Cannot resume training due to dataloader mismatch, please '
'report this to the fairseq developers. You can relaunch '
'training with `--reset-dataloader` and it should work.'
)
else:
self._next_epoch_itr = None
def _get_iterator_for_epoch(self, epoch, shuffle, fix_batches_to_gpus=False, offset=0):
#print("get_iterator_for_epoch")
#print(self._supports_prefetch); #true
def shuffle_batches(batches, seed):
with data_utils.numpy_seed(seed):
np.random.shuffle(batches)
return batches
def refresh_batch_sampler(seed):
with data_utils.numpy_seed(seed):
indices = self.dataset.ordered_indices()
batch_sampler = self.dataset.batch_by_size( #c++ code, can't change, but I have reproduced it in python
indices,
max_tokens=4096,
max_sentences=None,
required_batch_size_multiple=8,
my_batching=0,
tol=0
)
return shuffle_batches(batch_sampler,seed)
#print("shuffle", shuffle, "fix batches", fix_batches_to_gpus) #true; false
if self._supports_prefetch: #true on #50; false on #42, data may be different(e.g. dictionary)
batches = self.frozen_batches
if shuffle and not fix_batches_to_gpus:
batches = shuffle_batches(list(batches), self.seed + epoch)
#print("my refreshing!")
#batches = refresh_batch_sampler(self.seed + epoch)
batches = list(ShardedIterator(
batches, self.num_shards, self.shard_id, fill_value=[]
))
self.dataset.prefetch([i for s in batches for i in s])
if shuffle and fix_batches_to_gpus:
batches = shuffle_batches(batches, self.seed + epoch + self.shard_id)
else:
print("shuffle or not", shuffle);exit()
if shuffle: #true
#shuffle is not enough, we need a new batch sampler
batches = shuffle_batches(list(self.frozen_batches), self.seed + epoch)
#print("my refreshing!"); exit()
#batches = refresh_batch_sampler(self.seed + epoch)
else:
batches = self.frozen_batches
batches = list(ShardedIterator(
batches, self.num_shards, self.shard_id, fill_value=[]
))
if offset > 0 and offset >= len(batches):
return None
if self.num_workers > 0: #true
os.environ['PYTHONWARNINGS'] = 'ignore:semaphore_tracker:UserWarning'
# Create data loader
itr = torch.utils.data.DataLoader(
self.dataset,
collate_fn=self.collate_fn,
batch_sampler=batches[offset:],
num_workers=self.num_workers,
timeout=self.timeout,
)
# Wrap with a BufferedIterator if needed
if self.buffer_size > 0: #=10
itr = BufferedIterator(self.buffer_size, itr)
# Wrap with CoutingIterator
itr = CountingIterator(itr, start=offset)
return itr
class GroupedIterator(CountingIterator):
"""Wrapper around an iterable that returns groups (chunks) of items.
Args:
iterable (iterable): iterable to wrap
chunk_size (int): size of each chunk
Attributes:
n (int): number of elements consumed from this iterator
"""
def __init__(self, iterable, chunk_size):
#chunk_size=1
itr = _chunk_iterator(iterable, chunk_size)
super().__init__(
itr,
start=int(math.ceil(getattr(iterable, 'n', 0) / float(chunk_size))),
total=int(math.ceil(len(iterable) / float(chunk_size))),
)
self.chunk_size = chunk_size
def _chunk_iterator(itr, chunk_size):
chunk = []
for x in itr:
chunk.append(x)
if len(chunk) == chunk_size:
yield chunk
chunk = []
if len(chunk) > 0:
yield chunk
class ShardedIterator(CountingIterator):
"""A sharded wrapper around an iterable, padded to length.
Args:
iterable (iterable): iterable to wrap
num_shards (int): number of shards to split the iterable into
shard_id (int): which shard to iterator over
fill_value (Any, optional): padding value when the iterable doesn't
evenly divide *num_shards* (default: None).
Attributes:
n (int): number of elements consumed from this iterator
"""
def __init__(self, iterable, num_shards, shard_id, fill_value=None):
if shard_id < 0 or shard_id >= num_shards:
raise ValueError('shard_id must be between 0 and num_shards')
sharded_len = int(math.ceil(len(iterable) / float(num_shards)))
itr = map(
operator.itemgetter(1),
itertools.zip_longest(
range(sharded_len),
itertools.islice(iterable, shard_id, len(iterable), num_shards),
fillvalue=fill_value,
),
)
super().__init__(
itr,
start=int(math.ceil(getattr(iterable, 'n', 0) / float(num_shards))),
total=sharded_len,
)
class BackgroundConsumer(Thread):
def __init__(self, queue, source, max_len):
Thread.__init__(self)
self._queue = queue
self._source = source
self._max_len = max_len
self.count = 0
def run(self):
try:
self._source_iter = iter(self._source)
for _ in range(len(self._source)):
item = next(self._source_iter)
self._queue.put(item)
# Stop if we reached the maximum length
self.count += 1
if self._max_len is not None and self.count >= self._max_len:
break
# Signal the consumer we are done.
self._queue.put(_sentinel)
except Exception as e:
self._queue.put(e)
del self._source_iter
class BufferedIterator(object):
def __init__(self, size, iterable):
self._queue = queue.Queue(size)
self._iterable = iterable
self.max_len = None
self._consumer = None
self.start_time = time.time()
self.warning_time = None
def _create_consumer(self):
self._consumer = BackgroundConsumer(
self._queue,
self._iterable,
self.max_len
)
self._consumer.daemon = True
self._consumer.start()
def __iter__(self):
return self
def __len__(self):
return len(self._iterable)
def take(self, n):
self.max_len = n
def __next__(self):
# Create consumer if not created yet
if self._consumer is None:
self._create_consumer()
# Notify the user if there is a data loading bottleneck
if self._queue.qsize() < min(2, max(1, self._queue.maxsize // 2)):
if time.time() - self.start_time > 5 * 60:
if self.warning_time is None or time.time() - self.warning_time > 15 * 60:
logger.debug(
"Data loading buffer is empty or nearly empty. This may "
"indicate a data loading bottleneck, and increasing the "
"number of workers (--num-workers) may help."
)
self.warning_time = time.time()
# Get next example
item = self._queue.get(True)
if isinstance(item, Exception):
raise item
if item is _sentinel:
raise StopIteration()
return item
| 19,798 | 34.292335 | 117 | py |
RegularizedBN | RegularizedBN-main/fairseq/data/backtranslation_dataset.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from fairseq import utils
from . import FairseqDataset
def backtranslate_samples(samples, collate_fn, generate_fn, cuda=True):
"""Backtranslate a list of samples.
Given an input (*samples*) of the form:
[{'id': 1, 'source': 'hallo welt'}]
this will return:
[{'id': 1, 'source': 'hello world', 'target': 'hallo welt'}]
Args:
samples (List[dict]): samples to backtranslate. Individual samples are
expected to have a 'source' key, which will become the 'target'
after backtranslation.
collate_fn (callable): function to collate samples into a mini-batch
generate_fn (callable): function to generate backtranslations
cuda (bool): use GPU for generation (default: ``True``)
Returns:
List[dict]: an updated list of samples with a backtranslated source
"""
collated_samples = collate_fn(samples)
s = utils.move_to_cuda(collated_samples) if cuda else collated_samples
generated_sources = generate_fn(s)
id_to_src = {
sample['id']: sample['source'] for sample in samples
}
# Go through each tgt sentence in batch and its corresponding best
# generated hypothesis and create a backtranslation data pair
# {id: id, source: generated backtranslation, target: original tgt}
return [
{'id': id.item(), 'target': id_to_src[id.item()], 'source': hypos[0]['tokens'].cpu()}
for id, hypos in zip(collated_samples['id'], generated_sources)
]
class BacktranslationDataset(FairseqDataset):
"""
Sets up a backtranslation dataset which takes a tgt batch, generates
a src using a tgt-src backtranslation function (*backtranslation_fn*),
and returns the corresponding `{generated src, input tgt}` batch.
Args:
tgt_dataset (~fairseq.data.FairseqDataset): the dataset to be
backtranslated. Only the source side of this dataset will be used.
After backtranslation, the source sentences in this dataset will be
returned as the targets.
src_dict (~fairseq.data.Dictionary): the dictionary of backtranslated
sentences.
tgt_dict (~fairseq.data.Dictionary, optional): the dictionary of
sentences to be backtranslated.
backtranslation_fn (callable, optional): function to call to generate
backtranslations. This is typically the `generate` method of a
:class:`~fairseq.sequence_generator.SequenceGenerator` object.
Pass in None when it is not available at initialization time, and
use set_backtranslation_fn function to set it when available.
output_collater (callable, optional): function to call on the
backtranslated samples to create the final batch
(default: ``tgt_dataset.collater``).
cuda: use GPU for generation
"""
def __init__(
self,
tgt_dataset,
src_dict,
tgt_dict=None,
backtranslation_fn=None,
output_collater=None,
cuda=True,
**kwargs
):
self.tgt_dataset = tgt_dataset
self.backtranslation_fn = backtranslation_fn
self.output_collater = output_collater if output_collater is not None \
else tgt_dataset.collater
self.cuda = cuda if torch.cuda.is_available() else False
self.src_dict = src_dict
self.tgt_dict = tgt_dict
def __getitem__(self, index):
"""
Returns a single sample from *tgt_dataset*. Note that backtranslation is
not applied in this step; use :func:`collater` instead to backtranslate
a batch of samples.
"""
return self.tgt_dataset[index]
def __len__(self):
return len(self.tgt_dataset)
def set_backtranslation_fn(self, backtranslation_fn):
self.backtranslation_fn = backtranslation_fn
def collater(self, samples):
"""Merge and backtranslate a list of samples to form a mini-batch.
Using the samples from *tgt_dataset*, load a collated target sample to
feed to the backtranslation model. Then take the backtranslation with
the best score as the source and the original input as the target.
Note: we expect *tgt_dataset* to provide a function `collater()` that
will collate samples into the format expected by *backtranslation_fn*.
After backtranslation, we will feed the new list of samples (i.e., the
`(backtranslated source, original source)` pairs) to *output_collater*
and return the result.
Args:
samples (List[dict]): samples to backtranslate and collate
Returns:
dict: a mini-batch with keys coming from *output_collater*
"""
if samples[0].get('is_dummy', False):
return samples
samples = backtranslate_samples(
samples=samples,
collate_fn=self.tgt_dataset.collater,
generate_fn=(
lambda net_input: self.backtranslation_fn(net_input)
),
cuda=self.cuda,
)
return self.output_collater(samples)
def num_tokens(self, index):
"""Just use the tgt dataset num_tokens"""
return self.tgt_dataset.num_tokens(index)
def ordered_indices(self):
"""Just use the tgt dataset ordered_indices"""
return self.tgt_dataset.ordered_indices()
def size(self, index):
"""Return an example's size as a float or tuple. This value is used
when filtering a dataset with ``--max-positions``.
Note: we use *tgt_dataset* to approximate the length of the source
sentence, since we do not know the actual length until after
backtranslation.
"""
tgt_size = self.tgt_dataset.size(index)[0]
return (tgt_size, tgt_size)
@property
def supports_prefetch(self):
return getattr(self.tgt_dataset, 'supports_prefetch', False)
def prefetch(self, indices):
return self.tgt_dataset.prefetch(indices)
| 6,235 | 36.566265 | 93 | py |
RegularizedBN | RegularizedBN-main/fairseq/data/monolingual_dataset.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import torch
from . import data_utils, FairseqDataset
def collate(samples, pad_idx, eos_idx):
if len(samples) == 0:
return {}
def merge(key, is_list=False):
if is_list:
res = []
for i in range(len(samples[0][key])):
res.append(data_utils.collate_tokens(
[s[key][i] for s in samples], pad_idx, eos_idx, left_pad=False,
))
return res
else:
return data_utils.collate_tokens(
[s[key] for s in samples], pad_idx, eos_idx, left_pad=False,
)
src_tokens = merge('source')
if samples[0]['target'] is not None:
is_target_list = isinstance(samples[0]['target'], list)
target = merge('target', is_target_list)
else:
target = src_tokens
return {
'id': torch.LongTensor([s['id'] for s in samples]),
'nsentences': len(samples),
'ntokens': sum(len(s['source']) for s in samples),
'net_input': {
'src_tokens': src_tokens,
'src_lengths': torch.LongTensor([
s['source'].numel() for s in samples
]),
},
'target': target,
}
class MonolingualDataset(FairseqDataset):
"""
A wrapper around torch.utils.data.Dataset for monolingual data.
Args:
dataset (torch.utils.data.Dataset): dataset to wrap
sizes (List[int]): sentence lengths
vocab (~fairseq.data.Dictionary): vocabulary
shuffle (bool, optional): shuffle the elements before batching
(default: True).
"""
def __init__(self, dataset, sizes, src_vocab, tgt_vocab, add_eos_for_other_targets, shuffle,
targets=None, add_bos_token=False):
self.dataset = dataset
self.sizes = np.array(sizes)
self.vocab = src_vocab
self.tgt_vocab = tgt_vocab
self.add_eos_for_other_targets = add_eos_for_other_targets
self.shuffle = shuffle
self.add_bos_token = add_bos_token
assert targets is None or all(t in {'self', 'future', 'past'} for t in targets), \
"targets must be none or one of 'self', 'future', 'past'"
if targets is not None and len(targets) == 0:
targets = None
self.targets = targets
def __getitem__(self, index):
if self.targets is not None:
# *future_target* is the original sentence
# *source* is shifted right by 1 (maybe left-padded with eos)
# *past_target* is shifted right by 2 (left-padded as needed)
#
# Left-to-right language models should condition on *source* and
# predict *future_target*.
# Right-to-left language models should condition on *source* and
# predict *past_target*.
source, future_target, past_target = self.dataset[index]
source, target = self._make_source_target(source, future_target, past_target)
else:
source = self.dataset[index]
target = None
source, target = self._maybe_add_bos(source, target)
return {'id': index, 'source': source, 'target': target}
def __len__(self):
return len(self.dataset)
def _make_source_target(self, source, future_target, past_target):
if self.targets is not None:
target = []
if self.add_eos_for_other_targets and (('self' in self.targets) or ('past' in self.targets)) \
and source[-1] != self.vocab.eos():
# append eos at the end of source
source = torch.cat([source, source.new([self.vocab.eos()])])
if 'future' in self.targets:
future_target = torch.cat([future_target, future_target.new([self.vocab.pad()])])
if 'past' in self.targets:
# first token is before the start of sentence which is only used in "none" break mode when
# add_eos_for_other_targets is False
past_target = torch.cat([past_target.new([self.vocab.pad()]), past_target[1:], source[-2, None]])
for t in self.targets:
if t == 'self':
target.append(source)
elif t == 'future':
target.append(future_target)
elif t == 'past':
target.append(past_target)
else:
raise Exception('invalid target ' + t)
if len(target) == 1:
target = target[0]
else:
target = future_target
return source, self._filter_vocab(target)
def _maybe_add_bos(self, source, target):
if self.add_bos_token:
source = torch.cat([source.new([self.vocab.bos()]), source])
if target is not None:
target = torch.cat([target.new([self.tgt_vocab.bos()]), target])
return source, target
def _filter_vocab(self, target):
if len(self.tgt_vocab) != len(self.vocab):
def _filter(target):
mask = target.ge(len(self.tgt_vocab))
if mask.any():
target[mask] = self.tgt_vocab.unk()
return target
if isinstance(target, list):
return [_filter(t) for t in target]
return _filter(target)
return target
def collater(self, samples):
"""Merge a list of samples to form a mini-batch.
Args:
samples (List[dict]): samples to collate
Returns:
dict: a mini-batch with the following keys:
- `id` (LongTensor): example IDs in the original input order
- `ntokens` (int): total number of tokens in the batch
- `net_input` (dict): the input to the Model, containing keys:
- `src_tokens` (LongTensor): a padded 2D Tensor of tokens in
the source sentence of shape `(bsz, src_len)`. Padding will
appear on the right.
- `target` (LongTensor): a padded 2D Tensor of tokens in the
target sentence of shape `(bsz, tgt_len)`. Padding will appear
on the right.
"""
return collate(samples, self.vocab.pad(), self.vocab.eos())
def num_tokens(self, index):
"""Return the number of tokens in a sample. This value is used to
enforce ``--max-tokens`` during batching."""
return self.sizes[index]
def size(self, index):
"""Return an example's size as a float or tuple. This value is used when
filtering a dataset with ``--max-positions``."""
return self.sizes[index]
def ordered_indices(self):
"""Return an ordered list of indices. Batches will be constructed based
on this order."""
if self.shuffle:
order = [np.random.permutation(len(self))]
else:
order = [np.arange(len(self))]
order.append(self.sizes)
return np.lexsort(order)
@property
def supports_prefetch(self):
return getattr(self.dataset, 'supports_prefetch', False)
def prefetch(self, indices):
self.dataset.prefetch(indices)
| 7,469 | 36.164179 | 117 | py |
RegularizedBN | RegularizedBN-main/fairseq/data/roll_dataset.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from . import BaseWrapperDataset
class RollDataset(BaseWrapperDataset):
def __init__(self, dataset, shifts):
super().__init__(dataset)
self.shifts = shifts
def __getitem__(self, index):
item = self.dataset[index]
return torch.roll(item, self.shifts)
| 486 | 23.35 | 65 | py |
RegularizedBN | RegularizedBN-main/fairseq/data/replace_dataset.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from . import BaseWrapperDataset
class ReplaceDataset(BaseWrapperDataset):
"""Replaces tokens found in the dataset by a specified replacement token
Args:
dataset (~torch.utils.data.Dataset): dataset to replace tokens in
replace_map(Dictionary[int,int]): map of token to replace -> replacement token
offsets (List[int]): do not replace tokens before (from left if pos, right if neg) this offset. should be
as many as the number of objects returned by the underlying dataset __getitem__ method.
"""
def __init__(self, dataset, replace_map, offsets):
super().__init__(dataset)
assert len(replace_map) > 0
self.replace_map = replace_map
self.offsets = offsets
def __getitem__(self, index):
item = self.dataset[index]
is_tuple = isinstance(item, tuple)
srcs = item if is_tuple else [item]
for offset, src in zip(self.offsets, srcs):
for k, v in self.replace_map.items():
src_off = src[offset:] if offset >= 0 else src[:offset]
src_off.masked_fill_(src_off == k, v)
item = srcs if is_tuple else srcs[0]
return item
| 1,394 | 36.702703 | 117 | py |
RegularizedBN | RegularizedBN-main/fairseq/data/id_dataset.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from . import FairseqDataset
class IdDataset(FairseqDataset):
def __getitem__(self, index):
return index
def __len__(self):
return 0
def collater(self, samples):
return torch.tensor(samples)
| 424 | 19.238095 | 65 | py |
RegularizedBN | RegularizedBN-main/fairseq/data/indexed_dataset.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from functools import lru_cache
import os
import shutil
import struct
import numpy as np
import torch
from . import FairseqDataset
from fairseq.data.fasta_dataset import FastaDataset
def __best_fitting_dtype(vocab_size=None):
if vocab_size is not None and vocab_size < 65500:
return np.uint16
else:
return np.int32
def get_available_dataset_impl():
return ['raw', 'lazy', 'cached', 'mmap', 'fasta']
def infer_dataset_impl(path):
if IndexedRawTextDataset.exists(path):
return 'raw'
elif IndexedDataset.exists(path):
with open(index_file_path(path), 'rb') as f:
magic = f.read(8)
if magic == IndexedDataset._HDR_MAGIC:
return 'cached'
elif magic == MMapIndexedDataset.Index._HDR_MAGIC[:8]:
return 'mmap'
else:
return None
elif FastaDataset.exists(path):
return 'fasta'
else:
return None
def make_builder(out_file, impl, vocab_size=None):
if impl == 'mmap':
return MMapIndexedDatasetBuilder(out_file, dtype=__best_fitting_dtype(vocab_size))
elif impl == 'fasta':
raise NotImplementedError
else:
return IndexedDatasetBuilder(out_file)
def make_dataset(path, impl, fix_lua_indexing=False, dictionary=None):
if impl == 'raw' and IndexedRawTextDataset.exists(path):
assert dictionary is not None
return IndexedRawTextDataset(path, dictionary)
elif impl == 'lazy' and IndexedDataset.exists(path):
return IndexedDataset(path, fix_lua_indexing=fix_lua_indexing)
elif impl == 'cached' and IndexedDataset.exists(path):
return IndexedCachedDataset(path, fix_lua_indexing=fix_lua_indexing)
elif impl == 'mmap' and MMapIndexedDataset.exists(path):
return MMapIndexedDataset(path)
elif impl == 'fasta' and FastaDataset.exists(path):
from fairseq.data.fasta_dataset import EncodedFastaDataset
return EncodedFastaDataset(path, dictionary)
return None
def dataset_exists(path, impl):
if impl == 'raw':
return IndexedRawTextDataset.exists(path)
elif impl == 'mmap':
return MMapIndexedDataset.exists(path)
else:
return IndexedDataset.exists(path)
def read_longs(f, n):
a = np.empty(n, dtype=np.int64)
f.readinto(a)
return a
def write_longs(f, a):
f.write(np.array(a, dtype=np.int64))
dtypes = {
1: np.uint8,
2: np.int8,
3: np.int16,
4: np.int32,
5: np.int64,
6: np.float,
7: np.double,
8: np.uint16
}
def code(dtype):
for k in dtypes.keys():
if dtypes[k] == dtype:
return k
raise ValueError(dtype)
def index_file_path(prefix_path):
return prefix_path + '.idx'
def data_file_path(prefix_path):
return prefix_path + '.bin'
class IndexedDataset(FairseqDataset):
"""Loader for TorchNet IndexedDataset"""
_HDR_MAGIC = b'TNTIDX\x00\x00'
def __init__(self, path, fix_lua_indexing=False):
super().__init__()
self.path = path
self.fix_lua_indexing = fix_lua_indexing
self.data_file = None
self.read_index(path)
def read_index(self, path):
with open(index_file_path(path), 'rb') as f:
magic = f.read(8)
assert magic == self._HDR_MAGIC, (
'Index file doesn\'t match expected format. '
'Make sure that --dataset-impl is configured properly.'
)
version = f.read(8)
assert struct.unpack('<Q', version) == (1,)
code, self.element_size = struct.unpack('<QQ', f.read(16))
self.dtype = dtypes[code]
self._len, self.s = struct.unpack('<QQ', f.read(16))
self.dim_offsets = read_longs(f, self._len + 1)
self.data_offsets = read_longs(f, self._len + 1)
self.sizes = read_longs(f, self.s)
def read_data(self, path):
self.data_file = open(data_file_path(path), 'rb', buffering=0)
def check_index(self, i):
if i < 0 or i >= self._len:
raise IndexError('index out of range')
def __del__(self):
if self.data_file:
self.data_file.close()
@lru_cache(maxsize=8)
def __getitem__(self, i):
if not self.data_file:
self.read_data(self.path)
self.check_index(i)
tensor_size = self.sizes[self.dim_offsets[i]:self.dim_offsets[i + 1]]
a = np.empty(tensor_size, dtype=self.dtype)
self.data_file.seek(self.data_offsets[i] * self.element_size)
self.data_file.readinto(a)
item = torch.from_numpy(a).long()
if self.fix_lua_indexing:
item -= 1 # subtract 1 for 0-based indexing
return item
def __len__(self):
return self._len
def num_tokens(self, index):
return self.sizes[index]
def size(self, index):
return self.sizes[index]
@staticmethod
def exists(path):
return (
os.path.exists(index_file_path(path)) and os.path.exists(data_file_path(path))
)
@property
def supports_prefetch(self):
return False # avoid prefetching to save memory
class IndexedCachedDataset(IndexedDataset):
def __init__(self, path, fix_lua_indexing=False):
super().__init__(path, fix_lua_indexing=fix_lua_indexing)
self.cache = None
self.cache_index = {}
@property
def supports_prefetch(self):
return True
def prefetch(self, indices):
if all(i in self.cache_index for i in indices):
return
if not self.data_file:
self.read_data(self.path)
indices = sorted(set(indices))
total_size = 0
for i in indices:
total_size += self.data_offsets[i + 1] - self.data_offsets[i]
self.cache = np.empty(total_size, dtype=self.dtype)
ptx = 0
self.cache_index.clear()
for i in indices:
self.cache_index[i] = ptx
size = self.data_offsets[i + 1] - self.data_offsets[i]
a = self.cache[ptx: ptx + size]
self.data_file.seek(self.data_offsets[i] * self.element_size)
self.data_file.readinto(a)
ptx += size
if self.data_file:
# close and delete data file after prefetch so we can pickle
self.data_file.close()
self.data_file = None
@lru_cache(maxsize=8)
def __getitem__(self, i):
self.check_index(i)
tensor_size = self.sizes[self.dim_offsets[i]:self.dim_offsets[i + 1]]
a = np.empty(tensor_size, dtype=self.dtype)
ptx = self.cache_index[i]
np.copyto(a, self.cache[ptx: ptx + a.size])
item = torch.from_numpy(a).long()
if self.fix_lua_indexing:
item -= 1 # subtract 1 for 0-based indexing
return item
class IndexedRawTextDataset(FairseqDataset):
"""Takes a text file as input and binarizes it in memory at instantiation.
Original lines are also kept in memory"""
def __init__(self, path, dictionary, append_eos=True, reverse_order=False):
self.tokens_list = []
self.lines = []
self.sizes = []
self.append_eos = append_eos
self.reverse_order = reverse_order
self.read_data(path, dictionary)
self.size = len(self.tokens_list)
def read_data(self, path, dictionary):
with open(path, 'r', encoding='utf-8') as f:
for line in f:
self.lines.append(line.strip('\n'))
tokens = dictionary.encode_line(
line, add_if_not_exist=False,
append_eos=self.append_eos, reverse_order=self.reverse_order,
).long()
self.tokens_list.append(tokens)
self.sizes.append(len(tokens))
self.sizes = np.array(self.sizes)
def check_index(self, i):
if i < 0 or i >= self.size:
raise IndexError('index out of range')
@lru_cache(maxsize=8)
def __getitem__(self, i):
self.check_index(i)
return self.tokens_list[i]
def get_original_text(self, i):
self.check_index(i)
return self.lines[i]
def __del__(self):
pass
def __len__(self):
return self.size
def num_tokens(self, index):
return self.sizes[index]
def size(self, index):
return self.sizes[index]
@staticmethod
def exists(path):
return os.path.exists(path)
class IndexedDatasetBuilder(object):
element_sizes = {
np.uint8: 1,
np.int8: 1,
np.int16: 2,
np.int32: 4,
np.int64: 8,
np.float: 4,
np.double: 8
}
def __init__(self, out_file, dtype=np.int32):
self.out_file = open(out_file, 'wb')
self.dtype = dtype
self.data_offsets = [0]
self.dim_offsets = [0]
self.sizes = []
self.element_size = self.element_sizes[self.dtype]
def add_item(self, tensor):
# +1 for Lua compatibility
bytes = self.out_file.write(np.array(tensor.numpy() + 1, dtype=self.dtype))
self.data_offsets.append(self.data_offsets[-1] + bytes / self.element_size)
for s in tensor.size():
self.sizes.append(s)
self.dim_offsets.append(self.dim_offsets[-1] + len(tensor.size()))
def merge_file_(self, another_file):
index = IndexedDataset(another_file)
assert index.dtype == self.dtype
begin = self.data_offsets[-1]
for offset in index.data_offsets[1:]:
self.data_offsets.append(begin + offset)
self.sizes.extend(index.sizes)
begin = self.dim_offsets[-1]
for dim_offset in index.dim_offsets[1:]:
self.dim_offsets.append(begin + dim_offset)
with open(data_file_path(another_file), 'rb') as f:
while True:
data = f.read(1024)
if data:
self.out_file.write(data)
else:
break
def finalize(self, index_file):
self.out_file.close()
index = open(index_file, 'wb')
index.write(b'TNTIDX\x00\x00')
index.write(struct.pack('<Q', 1))
index.write(struct.pack('<QQ', code(self.dtype), self.element_size))
index.write(struct.pack('<QQ', len(self.data_offsets) - 1, len(self.sizes)))
write_longs(index, self.dim_offsets)
write_longs(index, self.data_offsets)
write_longs(index, self.sizes)
index.close()
def _warmup_mmap_file(path):
with open(path, 'rb') as stream:
while stream.read(100 * 1024 * 1024):
pass
#最终使用了这个dataset类
class MMapIndexedDataset(torch.utils.data.Dataset):
class Index(object):
_HDR_MAGIC = b'MMIDIDX\x00\x00'
@classmethod
def writer(cls, path, dtype):
class _Writer(object):
def __enter__(self):
self._file = open(path, 'wb')
self._file.write(cls._HDR_MAGIC)
self._file.write(struct.pack('<Q', 1))
self._file.write(struct.pack('<B', code(dtype)))
return self
@staticmethod
def _get_pointers(sizes):
dtype_size = dtype().itemsize
address = 0
pointers = []
for size in sizes:
pointers.append(address)
address += size * dtype_size
return pointers
def write(self, sizes):
pointers = self._get_pointers(sizes)
self._file.write(struct.pack('<Q', len(sizes)))
sizes = np.array(sizes, dtype=np.int32)
self._file.write(sizes.tobytes(order='C'))
del sizes
pointers = np.array(pointers, dtype=np.int64)
self._file.write(pointers.tobytes(order='C'))
del pointers
def __exit__(self, exc_type, exc_val, exc_tb):
self._file.close()
return _Writer()
def __init__(self, path):
with open(path, 'rb') as stream:
magic_test = stream.read(9)
assert self._HDR_MAGIC == magic_test, (
'Index file doesn\'t match expected format. '
'Make sure that --dataset-impl is configured properly.'
)
version = struct.unpack('<Q', stream.read(8))
assert (1,) == version
dtype_code, = struct.unpack('<B', stream.read(1))
self._dtype = dtypes[dtype_code]
self._dtype_size = self._dtype().itemsize
self._len = struct.unpack('<Q', stream.read(8))[0]
offset = stream.tell()
_warmup_mmap_file(path)
self._bin_buffer_mmap = np.memmap(path, mode='r', order='C')
self._bin_buffer = memoryview(self._bin_buffer_mmap)
self._sizes = np.frombuffer(self._bin_buffer, dtype=np.int32, count=self._len, offset=offset)
self._pointers = np.frombuffer(self._bin_buffer, dtype=np.int64, count=self._len,
offset=offset + self._sizes.nbytes)
def __del__(self):
self._bin_buffer_mmap._mmap.close()
del self._bin_buffer_mmap
@property
def dtype(self):
return self._dtype
@property
def sizes(self):
return self._sizes
@lru_cache(maxsize=8)
def __getitem__(self, i):
return self._pointers[i], self._sizes[i]
def __len__(self):
return self._len
def __init__(self, path):
super().__init__()
self._path = None
self._index = None
self._bin_buffer = None
self._do_init(path)
def __getstate__(self):
return self._path
def __setstate__(self, state):
self._do_init(state)
def _do_init(self, path):
self._path = path
self._index = self.Index(index_file_path(self._path))
_warmup_mmap_file(data_file_path(self._path))
self._bin_buffer_mmap = np.memmap(data_file_path(self._path), mode='r', order='C')
self._bin_buffer = memoryview(self._bin_buffer_mmap)
def __del__(self):
self._bin_buffer_mmap._mmap.close()
del self._bin_buffer_mmap
del self._index
def __len__(self):
return len(self._index)
@lru_cache(maxsize=8) #重复调用参数相同时时可以直接从cache返回结果,节约时间
def __getitem__(self, i):
ptr, size = self._index[i] #
np_array = np.frombuffer(self._bin_buffer, dtype=self._index.dtype, count=size, offset=ptr)
if self._index.dtype != np.int64:
np_array = np_array.astype(np.int64)
return torch.from_numpy(np_array)
@property
def sizes(self):
return self._index.sizes
@property
def supports_prefetch(self):
return False
@staticmethod
def exists(path):
return (
os.path.exists(index_file_path(path)) and os.path.exists(data_file_path(path))
)
class MMapIndexedDatasetBuilder(object):
def __init__(self, out_file, dtype=np.int64):
self._data_file = open(out_file, 'wb')
self._dtype = dtype
self._sizes = []
def add_item(self, tensor):
np_array = np.array(tensor.numpy(), dtype=self._dtype)
self._data_file.write(np_array.tobytes(order='C'))
self._sizes.append(np_array.size)
def merge_file_(self, another_file):
# Concatenate index
index = MMapIndexedDataset.Index(index_file_path(another_file))
assert index.dtype == self._dtype
for size in index.sizes:
self._sizes.append(size)
# Concatenate data
with open(data_file_path(another_file), 'rb') as f:
shutil.copyfileobj(f, self._data_file)
def finalize(self, index_file):
self._data_file.close()
with MMapIndexedDataset.Index.writer(index_file, self._dtype) as index:
index.write(self._sizes)
| 16,431 | 29.887218 | 105 | py |
RegularizedBN | RegularizedBN-main/fairseq/data/denoising_dataset.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import torch
import math
from . import data_utils, FairseqDataset
def collate(
samples,
pad_idx,
eos_idx,
vocab,
left_pad_source=False,
left_pad_target=False,
input_feeding=True,
pad_to_length=None,
):
assert input_feeding
if len(samples) == 0:
return {}
def merge(key, left_pad, move_eos_to_beginning=False, pad_to_length=None):
return data_utils.collate_tokens(
[s[key] for s in samples],
pad_idx,
eos_idx=None, # use eos_idx of each sample instead of vocab.eos()
left_pad=left_pad,
move_eos_to_beginning=move_eos_to_beginning,
pad_to_length=pad_to_length,
)
id = torch.LongTensor([s['id'] for s in samples])
src_tokens = merge(
'source', left_pad=left_pad_source,
pad_to_length=pad_to_length['source'] if pad_to_length is not None else None,
)
# sort by descending source length
src_lengths = torch.LongTensor([s['source'].numel() for s in samples])
src_lengths, sort_order = src_lengths.sort(descending=True)
id = id.index_select(0, sort_order)
src_tokens = src_tokens.index_select(0, sort_order)
prev_output_tokens = None
target = None
if samples[0].get('target', None) is not None:
target = merge(
'target', left_pad=left_pad_target,
pad_to_length=pad_to_length['target'] if pad_to_length is not None else None,
)
target = target.index_select(0, sort_order)
ntokens = sum(len(s['target']) for s in samples)
if input_feeding:
# we create a shifted version of targets for feeding the
# previous output token(s) into the next decoder step
prev_output_tokens = merge(
'target',
left_pad=left_pad_target,
move_eos_to_beginning=True,
pad_to_length=pad_to_length['target'] if pad_to_length is not None else None,
)
prev_output_tokens = prev_output_tokens.index_select(0, sort_order)
else:
ntokens = sum(len(s['source']) for s in samples)
batch = {
'id': id,
'ntokens': ntokens,
'net_input': {
'src_tokens': src_tokens,
'src_lengths': src_lengths,
},
'target': target,
'nsentences': samples[0]['source'].size(0),
'sort_order': sort_order,
}
if prev_output_tokens is not None:
batch['net_input']['prev_output_tokens'] = prev_output_tokens
return batch
class DenoisingDataset(FairseqDataset):
"""
A wrapper around TokenBlockDataset for BART dataset.
Args:
dataset (TokenBlockDataset): dataset to wrap
sizes (List[int]): sentence lengths
vocab (~fairseq.data.Dictionary): vocabulary
mask_idx (int): dictionary index used for masked token
mask_whole_words: only mask whole words. This should be a byte mask
over vocab indices, indicating whether it is the beginning of a
word. We will extend any mask to encompass the whole word.
shuffle (bool, optional): shuffle the elements before batching.
Default: ``True``
seed: Seed for random number generator for reproducibility.
args: argparse arguments.
"""
def __init__(
self,
dataset,
sizes,
vocab,
mask_idx,
mask_whole_words,
shuffle,
seed,
args,
eos=None,
item_transform_func=None,
):
self.dataset = dataset
self.sizes = sizes
self.vocab = vocab
self.shuffle = shuffle
self.seed = seed
self.mask_idx = mask_idx
self.mask_whole_word = mask_whole_words
self.mask_ratio = args.mask
self.random_ratio = args.mask_random
self.insert_ratio = args.insert
self.rotate_ratio = args.rotate
self.permute_sentence_ratio = args.permute_sentences
self.eos = (eos if eos is not None else vocab.eos())
self.item_transform_func = item_transform_func
if args.bpe != 'gpt2':
self.full_stop_index = self.vocab.eos()
else:
assert args.bpe == 'gpt2'
self.full_stop_index = self.vocab.index('13')
self.replace_length = args.replace_length
if self.replace_length not in [-1, 0, 1]:
raise ValueError(f'invalid arg: replace_length={self.replace_length}')
if args.mask_length not in ['subword', 'word', 'span-poisson']:
raise ValueError(f'invalid arg: mask-length={args.mask_length}')
if args.mask_length == 'subword' and args.replace_length not in [0, 1]:
raise ValueError(f'if using subwords, use replace-length=1 or 0')
self.mask_span_distribution = None
if args.mask_length == 'span-poisson':
_lambda = args.poisson_lambda
lambda_to_the_k = 1
e_to_the_minus_lambda = math.exp(-_lambda)
k_factorial = 1
ps = []
for k in range(0, 128):
ps.append(e_to_the_minus_lambda * lambda_to_the_k / k_factorial)
lambda_to_the_k *= _lambda
k_factorial *= (k + 1)
if ps[-1] < 0.0000001:
break
ps = torch.FloatTensor(ps)
self.mask_span_distribution = torch.distributions.Categorical(ps)
self.epoch = 0
def set_epoch(self, epoch, **unused):
self.epoch = epoch
def __getitem__(self, index):
with data_utils.numpy_seed(self.seed, self.epoch, index):
tokens = self.dataset[index]
assert tokens[-1] == self.eos
source, target = tokens, tokens.clone()
if self.permute_sentence_ratio > 0.0:
source = self.permute_sentences(source, self.permute_sentence_ratio)
if self.mask_ratio > 0:
source = self.add_whole_word_mask(source, self.mask_ratio)
if self.insert_ratio > 0:
source = self.add_insertion_noise(source, self.insert_ratio)
if self.rotate_ratio > 0.0 and np.random.random() < self.rotate_ratio:
source = self.add_rolling_noise(source)
# there can additional changes to make:
if self.item_transform_func is not None:
source, target = self.item_transform_func(source, target)
assert (source >= 0).all()
assert (source[1:-1] >= 1).all()
assert (source <= len(self.vocab)).all()
assert source[0] == self.vocab.bos()
assert source[-1] == self.eos
return {
'id': index,
'source': source,
'target': target,
}
def __len__(self):
return len(self.dataset)
def permute_sentences(self, source, p=1.0):
full_stops = (source == self.full_stop_index)
# Pretend it ends with a full stop so last span is a sentence
full_stops[-2] = 1
# Tokens that are full stops, where the previous token is not
sentence_ends = (full_stops[1:] * ~full_stops[:-1]).nonzero() + 2
result = source.clone()
num_sentences = sentence_ends.size(0)
num_to_permute = math.ceil((num_sentences * 2 * p) / 2.0)
substitutions = torch.randperm(num_sentences)[:num_to_permute]
ordering = torch.arange(0, num_sentences)
ordering[substitutions] = substitutions[torch.randperm(num_to_permute)]
# Ignore <bos> at start
index = 1
for i in ordering:
sentence = source[(sentence_ends[i - 1] if i > 0 else 1):sentence_ends[i]]
result[index:index + sentence.size(0)] = sentence
index += sentence.size(0)
return result
def word_starts(self, source):
if self.mask_whole_word is not None:
is_word_start = self.mask_whole_word.gather(0, source)
else:
is_word_start = torch.ones(source.size())
is_word_start[0] = 0
is_word_start[-1] = 0
return is_word_start
def add_whole_word_mask(self, source, p):
is_word_start = self.word_starts(source)
num_to_mask = int(math.ceil(is_word_start.float().sum() * p))
num_inserts = 0
if num_to_mask == 0:
return source
if self.mask_span_distribution is not None:
lengths = self.mask_span_distribution.sample(sample_shape=(num_to_mask,))
# Make sure we have enough to mask
cum_length = torch.cumsum(lengths, 0)
while cum_length[-1] < num_to_mask:
lengths = torch.cat([lengths, self.mask_span_distribution.sample(sample_shape=(num_to_mask,))], dim=0)
cum_length = torch.cumsum(lengths, 0)
# Trim to masking budget
i = 0
while cum_length[i] < num_to_mask:
i += 1
lengths[i] = num_to_mask - (0 if i == 0 else cum_length[i - 1])
num_to_mask = i + 1
lengths = lengths[:num_to_mask]
# Handle 0-length mask (inserts) separately
lengths = lengths[lengths > 0]
num_inserts = num_to_mask - lengths.size(0)
num_to_mask -= num_inserts
if num_to_mask == 0:
return self.add_insertion_noise(source, num_inserts / source.size(0))
assert (lengths > 0).all()
else:
lengths = torch.ones((num_to_mask,)).long()
assert is_word_start[-1] == 0
word_starts = is_word_start.nonzero()
indices = word_starts[torch.randperm(word_starts.size(0))[:num_to_mask]].squeeze(1)
mask_random = torch.FloatTensor(num_to_mask).uniform_() < self.random_ratio
source_length = source.size(0)
assert source_length - 1 not in indices
to_keep = torch.ones(source_length, dtype=torch.bool)
is_word_start[-1] = 255 # acts as a long length, so spans don't go over the end of doc
if self.replace_length == 0:
to_keep[indices] = 0
else:
# keep index, but replace it with [MASK]
source[indices] = self.mask_idx
source[indices[mask_random]] = torch.randint(1, len(self.vocab), size=(mask_random.sum(),))
if self.mask_span_distribution is not None:
assert len(lengths.size()) == 1
assert lengths.size() == indices.size()
lengths -= 1
while indices.size(0) > 0:
assert lengths.size() == indices.size()
lengths -= is_word_start[indices + 1].long()
uncompleted = lengths >= 0
indices = indices[uncompleted] + 1
mask_random = mask_random[uncompleted]
lengths = lengths[uncompleted]
if self.replace_length != -1:
# delete token
to_keep[indices] = 0
else:
# keep index, but replace it with [MASK]
source[indices] = self.mask_idx
source[indices[mask_random]] = torch.randint(1, len(self.vocab), size=(mask_random.sum(),))
else:
# A bit faster when all lengths are 1
while indices.size(0) > 0:
uncompleted = is_word_start[indices + 1] == 0
indices = indices[uncompleted] + 1
mask_random = mask_random[uncompleted]
if self.replace_length != -1:
# delete token
to_keep[indices] = 0
else:
# keep index, but replace it with [MASK]
source[indices] = self.mask_idx
source[indices[mask_random]] = torch.randint(1, len(self.vocab), size=(mask_random.sum(),))
assert source_length - 1 not in indices
source = source[to_keep]
if num_inserts > 0:
source = self.add_insertion_noise(source, num_inserts / source.size(0))
return source
def add_permuted_noise(self, tokens, p):
num_words = len(tokens)
num_to_permute = math.ceil(((num_words * 2) * p) / 2.0)
substitutions = torch.randperm(num_words - 2)[:num_to_permute] + 1
tokens[substitutions] = tokens[substitutions[torch.randperm(num_to_permute)]]
return tokens
def add_rolling_noise(self, tokens):
offset = np.random.randint(1, max(1, tokens.size(-1) - 1) + 1)
tokens = torch.cat(
(tokens[0:1], tokens[offset:-1], tokens[1:offset], tokens[-1:]),
dim=0,
)
return tokens
def add_insertion_noise(self, tokens, p):
if p == 0.0:
return tokens
num_tokens = len(tokens)
n = int(math.ceil(num_tokens * p))
noise_indices = torch.randperm(num_tokens + n - 2)[:n] + 1
noise_mask = torch.zeros(size=(num_tokens + n,), dtype=torch.bool)
noise_mask[noise_indices] = 1
result = torch.LongTensor(n + len(tokens)).fill_(-1)
num_random = int(math.ceil(n * self.random_ratio))
result[noise_indices[num_random:]] = self.mask_idx
result[noise_indices[:num_random]] = torch.randint(low=1, high=len(self.vocab), size=(num_random,))
result[~noise_mask] = tokens
assert (result >= 0).all()
return result
def collater(self, samples, pad_to_length=None):
"""Merge a list of samples to form a mini-batch.
Args:
samples (List[dict]): samples to collate
Returns:
dict: a mini-batch of data
"""
return collate(
samples, self.vocab.pad(), self.eos, self.vocab,
pad_to_length=pad_to_length)
def num_tokens(self, index):
"""Return the number of tokens in a sample. This value is used to
enforce ``--max-tokens`` during batching."""
return self.sizes[index]
def size(self, index):
"""Return an example's size as a float or tuple. This value is used when
filtering a dataset with ``--max-positions``."""
return self.sizes[index]
def ordered_indices(self):
"""Return an ordered list of indices. Batches will be constructed based
on this order."""
if self.shuffle:
indices = np.random.permutation(len(self))
else:
indices = np.arange(len(self))
return indices[np.argsort(self.sizes[indices], kind='mergesort')]
def prefetch(self, indices):
self.src.prefetch(indices)
self.tgt.prefetch(indices)
@property
def supports_prefetch(self):
return (
hasattr(self.src, 'supports_prefetch')
and self.src.supports_prefetch
and hasattr(self.tgt, 'supports_prefetch')
and self.tgt.supports_prefetch
)
| 15,082 | 35.968137 | 118 | py |
RegularizedBN | RegularizedBN-main/fairseq/data/prepend_token_dataset.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import torch
from . import BaseWrapperDataset
class PrependTokenDataset(BaseWrapperDataset):
def __init__(self, dataset, token=None):
super().__init__(dataset)
self.token = token
if token is not None:
self._sizes = np.array(dataset.sizes) + 1
else:
self._sizes = dataset.sizes
def __getitem__(self, idx):
item = self.dataset[idx]
if self.token is not None:
item = torch.cat([item.new([self.token]), item])
return item
@property
def sizes(self):
return self._sizes
def num_tokens(self, index):
n = self.dataset.num_tokens(index)
if self.token is not None:
n += 1
return n
def size(self, index):
n = self.dataset.size(index)
if self.token is not None:
n += 1
return n
| 1,067 | 23.837209 | 65 | py |
RegularizedBN | RegularizedBN-main/fairseq/data/numel_dataset.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import torch
from . import BaseWrapperDataset
class NumelDataset(BaseWrapperDataset):
def __init__(self, dataset, reduce=False):
super().__init__(dataset)
self.reduce = reduce
def __getitem__(self, index):
item = self.dataset[index]
if torch.is_tensor(item):
return torch.numel(item)
else:
return np.size(item)
def __len__(self):
return len(self.dataset)
def collater(self, samples):
if self.reduce:
return sum(samples)
else:
return torch.tensor(samples)
| 787 | 22.878788 | 65 | py |
RegularizedBN | RegularizedBN-main/fairseq/data/noising.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import numpy as np
from fairseq.data import data_utils
class WordNoising(object):
"""Generate a noisy version of a sentence, without changing words themselves."""
def __init__(self, dictionary, bpe_cont_marker="@@", bpe_end_marker=None):
self.dictionary = dictionary
self.bpe_end = None
if bpe_cont_marker:
self.bpe_end = np.array([
not self.dictionary[i].endswith(bpe_cont_marker)
for i in range(len(self.dictionary))
])
elif bpe_end_marker:
self.bpe_end = np.array([
self.dictionary[i].endswith(bpe_end_marker)
for i in range(len(self.dictionary))
])
self.get_word_idx = (
self._get_bpe_word_idx
if self.bpe_end is not None
else self._get_token_idx
)
def noising(self, x, lengths, noising_prob=0.0):
raise NotImplementedError()
def _get_bpe_word_idx(self, x):
"""
Given a list of BPE tokens, for every index in the tokens list,
return the index of the word grouping that it belongs to.
For example, for input x corresponding to ["how", "are", "y@@", "ou"],
return [[0], [1], [2], [2]].
"""
# x: (T x B)
bpe_end = self.bpe_end[x]
if (x.size(0) == 1 and x.size(1) == 1):
# Special case when we only have one word in x. If x = [[N]],
# bpe_end is a scalar (bool) instead of a 2-dim array of bools,
# which makes the sum operation below fail.
return np.array([[0]])
# do a reduce front sum to generate word ids
word_idx = bpe_end[::-1].cumsum(0)[::-1]
word_idx = word_idx.max(0)[None, :] - word_idx
return word_idx
def _get_token_idx(self, x):
"""
This is to extend noising functions to be able to apply to non-bpe
tokens, e.g. word or characters.
"""
x = torch.t(x)
word_idx = np.array([range(len(x_i)) for x_i in x])
return np.transpose(word_idx)
class WordDropout(WordNoising):
"""Randomly drop input words. If not passing blank_idx (default is None),
then dropped words will be removed. Otherwise, it will be replaced by the
blank_idx."""
def __init__(self, dictionary, default_dropout_prob=0.1, bpe_cont_marker="@@", bpe_end_marker=None):
super().__init__(dictionary, bpe_cont_marker, bpe_end_marker)
self.default_dropout_prob = default_dropout_prob
def noising(self, x, lengths, dropout_prob=None, blank_idx=None):
if dropout_prob is None:
dropout_prob = self.default_dropout_prob
# x: (T x B), lengths: B
if dropout_prob == 0:
return x, lengths
assert 0 < dropout_prob < 1
# be sure to drop entire words
word_idx = self.get_word_idx(x)
sentences = []
modified_lengths = []
for i in range(lengths.size(0)):
# Since dropout probabilities need to apply over non-pad tokens,
# it is not trivial to generate the keep mask without consider
# input lengths; otherwise, this could be done outside the loop
# We want to drop whole words based on word_idx grouping
num_words = max(word_idx[:, i]) + 1
# ith example: [x0, x1, ..., eos, pad, ..., pad]
# We should only generate keep probs for non-EOS tokens. Thus if the
# input sentence ends in EOS, the last word idx is not included in
# the dropout mask generation and we append True to always keep EOS.
# Otherwise, just generate the dropout mask for all word idx
# positions.
has_eos = x[lengths[i] - 1, i] == self.dictionary.eos()
if has_eos: # has eos?
keep = np.random.rand(num_words - 1) >= dropout_prob
keep = np.append(keep, [True]) # keep EOS symbol
else:
keep = np.random.rand(num_words) >= dropout_prob
words = x[:lengths[i], i].tolist()
# TODO: speed up the following loop
# drop words from the input according to keep
new_s = [
w if keep[word_idx[j, i]] else blank_idx
for j, w in enumerate(words)
]
new_s = [w for w in new_s if w is not None]
# we need to have at least one word in the sentence (more than the
# start / end sentence symbols)
if len(new_s) <= 1:
# insert at beginning in case the only token left is EOS
# EOS should be at end of list.
new_s.insert(0, words[np.random.randint(0, len(words))])
assert len(new_s) >= 1 and (
not has_eos # Either don't have EOS at end or last token is EOS
or (len(new_s) >= 2 and new_s[-1] == self.dictionary.eos())
), "New sentence is invalid."
sentences.append(new_s)
modified_lengths.append(len(new_s))
# re-construct input
modified_lengths = torch.LongTensor(modified_lengths)
modified_x = torch.LongTensor(
modified_lengths.max(),
modified_lengths.size(0)
).fill_(self.dictionary.pad())
for i in range(modified_lengths.size(0)):
modified_x[:modified_lengths[i], i].copy_(torch.LongTensor(sentences[i]))
return modified_x, modified_lengths
class WordShuffle(WordNoising):
"""Shuffle words by no more than k positions."""
def __init__(self, dictionary, default_max_shuffle_distance=3, bpe_cont_marker="@@", bpe_end_marker=None):
super().__init__(dictionary, bpe_cont_marker, bpe_end_marker)
self.default_max_shuffle_distance = 3
def noising(self, x, lengths, max_shuffle_distance=None):
if max_shuffle_distance is None:
max_shuffle_distance = self.default_max_shuffle_distance
# x: (T x B), lengths: B
if max_shuffle_distance == 0:
return x, lengths
# max_shuffle_distance < 1 will return the same sequence
assert max_shuffle_distance > 1
# define noise word scores
noise = np.random.uniform(
0,
max_shuffle_distance,
size=(x.size(0), x.size(1)),
)
noise[0] = -1 # do not move start sentence symbol
# be sure to shuffle entire words
word_idx = self.get_word_idx(x)
x2 = x.clone()
for i in range(lengths.size(0)):
length_no_eos = lengths[i]
if x[lengths[i] - 1, i] == self.dictionary.eos():
length_no_eos = lengths[i] - 1
# generate a random permutation
scores = word_idx[:length_no_eos, i] + noise[word_idx[:length_no_eos, i], i]
# ensure no reordering inside a word
scores += 1e-6 * np.arange(length_no_eos.item())
permutation = scores.argsort()
# shuffle words
x2[:length_no_eos, i].copy_(
x2[:length_no_eos, i][torch.from_numpy(permutation)]
)
return x2, lengths
class UnsupervisedMTNoising(WordNoising):
"""
Implements the default configuration for noising in UnsupervisedMT
(github.com/facebookresearch/UnsupervisedMT)
"""
def __init__(
self,
dictionary,
max_word_shuffle_distance,
word_dropout_prob,
word_blanking_prob,
bpe_cont_marker="@@",
bpe_end_marker=None,
):
super().__init__(dictionary)
self.max_word_shuffle_distance = max_word_shuffle_distance
self.word_dropout_prob = word_dropout_prob
self.word_blanking_prob = word_blanking_prob
self.word_dropout = WordDropout(
dictionary=dictionary,
bpe_cont_marker=bpe_cont_marker,
bpe_end_marker=bpe_end_marker,
)
self.word_shuffle = WordShuffle(
dictionary=dictionary,
bpe_cont_marker=bpe_cont_marker,
bpe_end_marker=bpe_end_marker,
)
def noising(self, x, lengths):
# 1. Word Shuffle
noisy_src_tokens, noisy_src_lengths = self.word_shuffle.noising(
x=x,
lengths=lengths,
max_shuffle_distance=self.max_word_shuffle_distance,
)
# 2. Word Dropout
noisy_src_tokens, noisy_src_lengths = self.word_dropout.noising(
x=noisy_src_tokens,
lengths=noisy_src_lengths,
dropout_prob=self.word_dropout_prob,
)
# 3. Word Blanking
noisy_src_tokens, noisy_src_lengths = self.word_dropout.noising(
x=noisy_src_tokens,
lengths=noisy_src_lengths,
dropout_prob=self.word_blanking_prob,
blank_idx=self.dictionary.unk(),
)
return noisy_src_tokens
class NoisingDataset(torch.utils.data.Dataset):
def __init__(
self,
src_dataset,
src_dict,
seed,
noiser=None,
noising_class=UnsupervisedMTNoising,
**kwargs
):
"""
Wrap a :class:`~torch.utils.data.Dataset` and apply noise to the
samples based on the supplied noising configuration.
Args:
src_dataset (~torch.utils.data.Dataset): dataset to wrap.
to build self.src_dataset --
a LanguagePairDataset with src dataset as the source dataset and
None as the target dataset. Should NOT have padding so that
src_lengths are accurately calculated by language_pair_dataset
collate function.
We use language_pair_dataset here to encapsulate the tgt_dataset
so we can re-use the LanguagePairDataset collater to format the
batches in the structure that SequenceGenerator expects.
src_dict (~fairseq.data.Dictionary): source dictionary
seed (int): seed to use when generating random noise
noiser (WordNoising): a pre-initialized :class:`WordNoising`
instance. If this is None, a new instance will be created using
*noising_class* and *kwargs*.
noising_class (class, optional): class to use to initialize a
default :class:`WordNoising` instance.
kwargs (dict, optional): arguments to initialize the default
:class:`WordNoising` instance given by *noiser*.
"""
self.src_dataset = src_dataset
self.src_dict = src_dict
self.seed = seed
self.noiser = noiser if noiser is not None else noising_class(
dictionary=src_dict, **kwargs,
)
def __getitem__(self, index):
"""
Returns a single noisy sample. Multiple samples are fed to the collater
create a noising dataset batch.
"""
src_tokens = self.src_dataset[index]
src_lengths = torch.LongTensor([len(src_tokens)])
src_tokens = src_tokens.unsqueeze(0)
# Transpose src tokens to fit expected shape of x in noising function
# (batch size, sequence length) -> (sequence length, batch size)
src_tokens_t = torch.t(src_tokens)
with data_utils.numpy_seed(self.seed + index):
noisy_src_tokens = self.noiser.noising(src_tokens_t, src_lengths)
# Transpose back to expected src_tokens format
# (sequence length, 1) -> (1, sequence length)
noisy_src_tokens = torch.t(noisy_src_tokens)
return noisy_src_tokens[0]
def __len__(self):
"""
The length of the noising dataset is the length of src.
"""
return len(self.src_dataset)
@property
def supports_prefetch(self):
return self.src_dataset.supports_prefetch
def prefetch(self, indices):
if self.src_dataset.supports_prefetch:
self.src_dataset.prefetch(indices)
| 12,184 | 37.560127 | 110 | py |
RegularizedBN | RegularizedBN-main/fairseq/data/bucket_pad_length_dataset.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import torch.nn.functional as F
from fairseq.data import BaseWrapperDataset
class BucketPadLengthDataset(BaseWrapperDataset):
"""
Bucket and pad item lengths to the nearest bucket size. This can be used to
reduce the number of unique batch shapes, which is important on TPUs since
each new batch shape requires a recompilation.
Args:
dataset (FairseqDatset): dataset to bucket
sizes (List[int]): all item sizes
num_buckets (int): number of buckets to create
pad_idx (int): padding symbol
left_pad (bool): if True, pad on the left; otherwise right pad
"""
def __init__(
self,
dataset,
sizes,
num_buckets,
pad_idx,
left_pad,
):
super().__init__(dataset)
self.pad_idx = pad_idx
self.left_pad = left_pad
assert num_buckets > 0
self.buckets = np.unique(
np.percentile(
sizes,
np.linspace(0, 100, num_buckets + 1),
interpolation='lower',
)[1:]
)
def get_bucketed_sizes(orig_sizes, buckets):
sizes = np.copy(orig_sizes)
assert np.min(sizes) >= 0
start_val = -1
for end_val in buckets:
mask = (sizes > start_val) & (sizes <= end_val)
sizes[mask] = end_val
start_val = end_val
return sizes
self._bucketed_sizes = get_bucketed_sizes(sizes, self.buckets)
def __getitem__(self, index):
item = self.dataset[index]
bucket_size = self._bucketed_sizes[index]
num_pad = bucket_size - item.size(-1)
return F.pad(
item,
(num_pad if self.left_pad else 0, 0 if self.left_pad else num_pad),
value=self.pad_idx,
)
@property
def sizes(self):
return self._bucketed_sizes
def num_tokens(self, index):
return self._bucketed_sizes[index]
def size(self, index):
return self._bucketed_sizes[index]
| 2,261 | 28 | 79 | py |
RegularizedBN | RegularizedBN-main/fairseq/data/concat_sentences_dataset.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from . import FairseqDataset
class ConcatSentencesDataset(FairseqDataset):
def __init__(self, *datasets):
super().__init__()
self.datasets = datasets
assert all(len(ds) == len(datasets[0]) for ds in datasets), \
'datasets must have the same length'
def __getitem__(self, index):
return torch.cat([ds[index] for ds in self.datasets])
def __len__(self):
return len(self.datasets[0])
def collater(self, samples):
return self.datasets[0].collater(samples)
@property
def sizes(self):
return sum(ds.sizes for ds in self.datasets)
def num_tokens(self, index):
return sum(ds.num_tokens(index) for ds in self.datasets)
def size(self, index):
return sum(ds.size(index) for ds in self.datasets)
def ordered_indices(self):
return self.datasets[0].ordered_indices()
@property
def supports_prefetch(self):
return any(
getattr(ds, 'supports_prefetch', False) for ds in self.datasets
)
def prefetch(self, indices):
for ds in self.datasets:
if getattr(ds, 'supports_prefetch', False):
ds.prefetch(indices)
def set_epoch(self, epoch):
super().set_epoch(epoch)
for ds in self.datasets:
if hasattr(ds, 'set_epoch'):
ds.set_epoch(epoch)
| 1,573 | 26.614035 | 75 | py |
RegularizedBN | RegularizedBN-main/fairseq/data/fairseq_dataset.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import torch.utils.data
from fairseq.data import data_utils
class EpochListening:
"""Mixin for receiving updates whenever the epoch increments."""
def set_epoch(self, epoch):
"""Will receive the updated epoch number at the beginning of the epoch."""
pass
class FairseqDataset(torch.utils.data.Dataset, EpochListening):
"""A dataset that provides helpers for batching."""
def __getitem__(self, index):
raise NotImplementedError
def __len__(self):
raise NotImplementedError
def collater(self, samples):
"""Merge a list of samples to form a mini-batch.
Args:
samples (List[dict]): samples to collate
Returns:
dict: a mini-batch suitable for forwarding with a Model
"""
raise NotImplementedError
def num_tokens(self, index):
"""Return the number of tokens in a sample. This value is used to
enforce ``--max-tokens`` during batching."""
raise NotImplementedError
def src_num_tokens(self,index):
raise NotImplementedError
def tgt_num_tokens(self,index):
raise NotImplementedError
def size(self, index):
"""Return an example's size as a float or tuple. This value is used when
filtering a dataset with ``--max-positions``."""
raise NotImplementedError
def ordered_indices(self):
"""Return an ordered list of indices. Batches will be constructed based
on this order."""
return np.arange(len(self), dtype=np.int64)
@property
def supports_prefetch(self):
"""Whether this dataset supports prefetching."""
return False
def attr(self, attr: str, index: int):
return getattr(self, attr, None)
def prefetch(self, indices):
"""Prefetch the data required for this epoch."""
raise NotImplementedError
def get_batch_shapes(self):
"""
Return a list of valid batch shapes, for example::
[(8, 512), (16, 256), (32, 128)]
The first dimension of each tuple is the batch size and can be ``None``
to automatically infer the max batch size based on ``--max-tokens``.
The second dimension of each tuple is the max supported length as given
by :func:`fairseq.data.FairseqDataset.num_tokens`.
This will be used by :func:`fairseq.data.FairseqDataset.batch_by_size`
to restrict batch shapes. This is useful on TPUs to avoid too many
dynamic shapes (and recompilations).
"""
return None
def batch_by_size(
self,
indices,
max_tokens=None,
max_sentences=None,
required_batch_size_multiple=1,
my_batching=0,
tol=0
):
"""
Given an ordered set of indices, return batches according to
*max_tokens*, *max_sentences* and *required_batch_size_multiple*.
"""
from fairseq.data import data_utils
fixed_shapes = self.get_batch_shapes()
if fixed_shapes is not None:
def adjust_bsz(bsz, num_tokens):
if bsz is None:
assert max_tokens is not None, 'Must specify --max-tokens'
bsz = max_tokens // num_tokens
if max_sentences is not None:
bsz = min(bsz, max_sentences)
elif (
bsz >= required_batch_size_multiple
and bsz % required_batch_size_multiple != 0
):
bsz -= (bsz % required_batch_size_multiple)
return bsz
fixed_shapes = np.array([
[adjust_bsz(bsz, num_tokens), num_tokens]
for (bsz, num_tokens) in fixed_shapes
])
#print(1, len(indices),type(indices))#4500966
#a = [self.num_tokens(i)>=20 and self.num_tokens(i)<=40 for i in range(len(indices))]
#print(np.sum(np.array(a))) #2248033
'''
new_indices = []
for i in range(len(indices)):
if (self.src_num_tokens(indices[i])>=25 and self.src_num_tokens(indices[i])<=35):
new_indices.append(indices[i])
indices = np.array(new_indices)
'''
#print(2, len(indices))
#exit()
#max(src,tgt):
#20-40:2248033
#25-35:1209236
#27-32:673342
#src:
#20-40:2243518
#25-35:1188508
#27-32:663084
return data_utils.batch_by_size(
indices,
num_tokens_fn=self.num_tokens,
max_tokens=max_tokens,
max_sentences=max_sentences,
required_batch_size_multiple=required_batch_size_multiple,
fixed_shapes=fixed_shapes,
my_batching=my_batching,
tol=tol,
)
def filter_indices_by_size(self, indices, max_sizes):
"""
Filter a list of sample indices. Remove those that are longer than
specified in *max_sizes*.
WARNING: don't update, override method in child classes
Args:
indices (np.array): original array of sample indices
max_sizes (int or list[int] or tuple[int]): max sample size,
can be defined separately for src and tgt (then list or tuple)
Returns:
np.array: filtered sample array
list: list of removed indices
"""
if isinstance(max_sizes, float) or isinstance(max_sizes, int):
if hasattr(self, 'sizes') and isinstance(self.sizes, np.ndarray):
ignored = indices[self.sizes[indices] > max_sizes].tolist()
indices = indices[self.sizes[indices] <= max_sizes]
elif hasattr(self, 'sizes') and isinstance(self.sizes, list) and len(self.sizes) == 1:
ignored = indices[self.sizes[0][indices] > max_sizes].tolist()
indices = indices[self.sizes[0][indices] <= max_sizes]
else:
indices, ignored = data_utils._filter_by_size_dynamic(indices, self.size, max_sizes)
else:
indices, ignored = data_utils._filter_by_size_dynamic(indices, self.size, max_sizes)
return indices, ignored
class FairseqIterableDataset(torch.utils.data.IterableDataset, EpochListening):
"""
For datasets that need to be read sequentially, usually because the data is
being streamed or otherwise can't be manipulated on a single machine.
"""
def __iter__(self):
raise NotImplementedError
| 6,723 | 33.482051 | 100 | py |
RegularizedBN | RegularizedBN-main/fairseq/data/transform_eos_dataset.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from . import FairseqDataset
class TransformEosDataset(FairseqDataset):
"""A :class:`~fairseq.data.FairseqDataset` wrapper that appends/prepends/strips EOS.
Note that the transformation is applied in :func:`collater`.
Args:
dataset (~fairseq.data.FairseqDataset): dataset to wrap
eos (int): index of the end-of-sentence symbol
append_eos_to_src (bool, optional): append EOS to the end of src
remove_eos_from_src (bool, optional): remove EOS from the end of src
append_eos_to_tgt (bool, optional): append EOS to the end of tgt
remove_eos_from_tgt (bool, optional): remove EOS from the end of tgt
"""
def __init__(
self,
dataset,
eos,
append_eos_to_src=False,
remove_eos_from_src=False,
append_eos_to_tgt=False,
remove_eos_from_tgt=False,
has_target=True,
):
if not isinstance(dataset, FairseqDataset):
raise ValueError('dataset must be an instance of FairseqDataset')
if append_eos_to_src and remove_eos_from_src:
raise ValueError('cannot combine append_eos_to_src and remove_eos_from_src')
if append_eos_to_tgt and remove_eos_from_tgt:
raise ValueError('cannot combine append_eos_to_tgt and remove_eos_from_tgt')
self.dataset = dataset
self.eos = torch.LongTensor([eos])
self.append_eos_to_src = append_eos_to_src
self.remove_eos_from_src = remove_eos_from_src
self.append_eos_to_tgt = append_eos_to_tgt
self.remove_eos_from_tgt = remove_eos_from_tgt
self.has_target = has_target
# precompute how we should adjust the reported sizes
self._src_delta = 0
self._src_delta += 1 if append_eos_to_src else 0
self._src_delta -= 1 if remove_eos_from_src else 0
self._tgt_delta = 0
self._tgt_delta += 1 if append_eos_to_tgt else 0
self._tgt_delta -= 1 if remove_eos_from_tgt else 0
self._checked_src = False
self._checked_tgt = False
def _check_src(self, src, expect_eos):
if not self._checked_src:
assert (src[-1] == self.eos[0]) == expect_eos
self._checked_src = True
def _check_tgt(self, tgt, expect_eos):
if self.has_target and not self._checked_tgt:
assert (tgt[-1] == self.eos[0]) == expect_eos
self._checked_tgt = True
def __getitem__(self, index):
return self.dataset[index]
def __len__(self):
return len(self.dataset)
def collater(self, samples):
def transform(item):
if self.append_eos_to_src:
self.eos = self.eos.to(device=item['source'].device)
self._check_src(item['source'], expect_eos=False)
item['source'] = torch.cat([item['source'], self.eos])
if self.remove_eos_from_src:
self.eos = self.eos.to(device=item['source'].device)
self._check_src(item['source'], expect_eos=True)
item['source'] = item['source'][:-1]
if self.append_eos_to_tgt:
self.eos = self.eos.to(device=item['target'].device)
self._check_tgt(item['target'], expect_eos=False)
item['target'] = torch.cat([item['target'], self.eos])
if self.remove_eos_from_tgt:
self.eos = self.eos.to(device=item['target'].device)
self._check_tgt(item['target'], expect_eos=True)
item['target'] = item['target'][:-1]
return item
samples = list(map(transform, samples))
return self.dataset.collater(samples)
def num_tokens(self, index):
return self.dataset.num_tokens(index)
def size(self, index):
if self.has_target:
src_len, tgt_len = self.dataset.size(index)
return (src_len + self._src_delta, tgt_len + self._tgt_delta)
else:
return self.dataset.size(index)
def ordered_indices(self):
# NOTE: we assume that the ordering does not change based on the
# addition or removal of eos
return self.dataset.ordered_indices()
@property
def supports_prefetch(self):
return getattr(self.dataset, 'supports_prefetch', False)
def prefetch(self, indices):
return self.dataset.prefetch(indices)
| 4,576 | 36.516393 | 88 | py |
RegularizedBN | RegularizedBN-main/fairseq/data/multilingual/sampled_multi_dataset.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import List
from enum import Enum
from collections import OrderedDict
from collections import defaultdict
from bisect import bisect_right
import hashlib
import logging
import datetime
import time
import numpy as np
import torch
from fairseq import distributed_utils
from fairseq.data import plasma_utils, FairseqDataset
def get_time_gap(s, e):
return (datetime.datetime.fromtimestamp(e) - datetime.datetime.fromtimestamp(s)).__str__()
logger = logging.getLogger(__name__)
def default_virtual_size_func(datasets, ratios, max_scale_up=1.5):
sizes = [len(d) for d in datasets]
if ratios is None:
return sum(sizes)
largest_idx = np.argmax(sizes)
largest_r = ratios[largest_idx]
largest_s = sizes[largest_idx]
# set virtual sizes relative to the largest dataset
virtual_sizes = [(r / largest_r) * largest_s for r in ratios]
vsize = sum(virtual_sizes)
max_size = sum(sizes) * max_scale_up
return int(vsize if vsize < max_size else max_size)
class CollateFormat(Enum):
single = 1
ordered_dict = 2
class SampledMultiDataset(FairseqDataset):
"""Samples from multiple sub-datasets according to given sampling ratios.
Args:
datasets (
List[~torch.utils.data.Dataset]
or OrderedDict[str, ~torch.utils.data.Dataset]
): datasets
sampling_ratios (List[float]): list of probability of each dataset to be sampled
(default: None, which corresponds to concating all dataset together).
batch_by_size (bool): whether or not to batch by sequence length
(default: True).
seed (int): RNG seed to use (default: 2).
epoch (int): starting epoch number (default: 1).
eval_key (str, optional): a key used at evaluation time that causes
this instance to pass-through batches from *datasets[eval_key]*.
collate_format (CollateFormat): collater output format, either CollateFormat.ordered_dict or
CollateFormat.single (default: CollateFormat.single) where CollateFormat.single configures
the collater to output batches of data mixed from all sub-datasets,
and CollateFormat.ordered_dict configures the collater to output a dictionary of batches indexed by keys
of sub-datasets.
Note that not all sub-datasets will present in a single batch in both formats.
virtual_size (int, or callable): the expected virtual size of the dataset (default: default_virtual_size_func).
split (str): the split of the data, e.g. 'train', 'valid' or 'test'.
shared_collater (bool): whether or not to all sub-datasets have the same collater.
"""
def __init__(
self,
datasets,
sampling_ratios=None,
batch_by_size=False,
seed=2,
epoch=1,
eval_key=None,
collate_format=CollateFormat.single,
virtual_size=default_virtual_size_func,
split='',
shared_collater=False,
):
super().__init__()
self.batch_by_size = batch_by_size
self.shared_collater = shared_collater
if isinstance(datasets, OrderedDict):
self.keys = list(datasets.keys())
datasets = list(datasets.values())
elif isinstance(datasets, List):
self.keys = list(range(len(datasets)))
else:
raise AssertionError()
self.datasets = datasets
self.split = split
self.eval_key = eval_key
if self.eval_key is not None:
self.collate_format = CollateFormat.single
else:
self.collate_format = collate_format
self.seed = seed
self._cur_epoch = None
self._cur_indices = None
self._sizes = None
self._ordered_indices = None
self.virtual_size_per_dataset = None
# caching properties
self._reset_cached_properties()
self.setup_sampling(sampling_ratios, virtual_size)
self.cumulated_sizes = None
self.virtual_size_per_dataset = None
self._size_cache = {}
self.set_epoch(epoch)
def _clean_if_not_none(self, var_list):
for v in var_list:
if v is not None:
del v
def _reset_cached_properties(self):
self._clean_if_not_none([
self._sizes, self._ordered_indices, self._cur_indices
])
self._sizes = None
self._ordered_indices = None
self._cur_indices = None
def setup_sampling(self, sample_ratios, virtual_size):
sizes = [len(d) for d in self.datasets]
if sample_ratios is None:
# default back to concating datasets
self.sample_ratios = None
self.virtual_size = sum(sizes)
else:
if not isinstance(sample_ratios, np.ndarray):
sample_ratios = np.array(sample_ratios)
self.sample_ratios = plasma_utils.PlasmaArray(sample_ratios)
virtual_size = default_virtual_size_func if virtual_size is None else virtual_size
self.virtual_size = (
virtual_size(self.datasets, self.sample_ratios.array) if callable(virtual_size)
else virtual_size)
def adjust_sampling(self, epoch, sampling_ratios, virtual_size):
if sampling_ratios is not None:
sampling_ratios = self._sync_sample_ratios(sampling_ratios)
self.setup_sampling(sampling_ratios, virtual_size)
def _sync_sample_ratios(self, ratios):
# in case the ratios are not precisely the same across processes
# also to ensure every procresses update the ratios in the same pace
ratios = torch.DoubleTensor(ratios)
if torch.distributed.is_initialized():
if torch.cuda.is_available():
distributed_utils.all_reduce(ratios.cuda())
else:
distributed_utils.all_reduce(ratios)
ret = ratios.cpu()
ret = ret.numpy()
return ret
def random_choice_in_dataset(self, rng, dataset, choice_size):
if hasattr(dataset, 'random_choice_in_dataset'):
return dataset.random_choice_in_dataset(rng, choice_size)
dataset_size = len(dataset)
return rng.choice(dataset_size, choice_size, replace=(choice_size > dataset_size))
def get_virtual_indices(self, rng, datasets, sample_ratios, virtual_size):
def get_counts(sample_ratios):
counts = np.array([virtual_size * r for r in sample_ratios], dtype=np.int64)
diff = virtual_size - counts.sum()
assert diff >= 0
# due to round-offs, the size might not match the desired sizes
if diff > 0:
dataset_indices = rng.choice(len(sample_ratios), size=diff, p=sample_ratios)
for i in dataset_indices:
counts[i] += 1
return counts
def get_in_dataset_indices(datasets, sizes, sample_ratios):
counts = get_counts(sample_ratios)
# uniformally sample desired counts for each dataset
# if the desired counts are large, sample with replacement:
indices = [
self.random_choice_in_dataset(rng, d, c)
for c, d in zip(counts, datasets)]
return indices
sizes = [len(d) for d in datasets]
if sample_ratios is None:
# default back to concating datasets
in_dataset_indices = [list(range(s)) for s in sizes]
virtual_sizes_per_dataset = sizes
else:
sample_ratios = sample_ratios.array
ratios = sample_ratios / sample_ratios.sum()
in_dataset_indices = get_in_dataset_indices(datasets, sizes, ratios)
virtual_sizes_per_dataset = [len(d) for d in in_dataset_indices]
virtual_sizes_per_dataset = np.array(virtual_sizes_per_dataset, np.int64)
cumulative_sizes = np.cumsum(virtual_sizes_per_dataset)
assert sum(virtual_sizes_per_dataset) == virtual_size
assert cumulative_sizes[-1] == virtual_size
if virtual_size < sum(sizes):
logger.warning(
f'virtual data size ({virtual_size}) is less than real data size ({sum(sizes)}).'
' If virtual size << real data size, there could be data coverage issue.'
)
in_dataset_indices = np.hstack(in_dataset_indices)
return in_dataset_indices, cumulative_sizes, virtual_sizes_per_dataset
def _get_dataset_and_index(self, index):
i = bisect_right(self.cumulated_sizes.array, index)
return i, self._cur_indices.array[index]
def __getitem__(self, index):
ds_idx, ds_sample_idx = self._get_dataset_and_index(index)
ret = (ds_idx, self.datasets[ds_idx][ds_sample_idx])
return ret
def num_tokens(self, index):
ds_idx, ds_sample_idx = self._get_dataset_and_index(index)
return self.datasets[ds_idx].num_tokens(ds_sample_idx)
def size(self, index):
if self._sizes is not None:
return self._sizes[index]
ds_idx, ds_sample_idx = self._get_dataset_and_index(index)
return self.datasets[ds_idx].size(ds_sample_idx)
def __len__(self):
return self.virtual_size
def collater(self, samples, **extra_args):
"""Merge a list of samples to form a mini-batch."""
if len(samples) == 0:
return None
if self.collate_format == 'ordered_dict':
collect_samples = [[] for _ in range(len(self.datasets))]
for (i, sample) in samples:
collect_samples[i].append(sample)
return OrderedDict([
(self.keys[i], dataset.collater(collect_samples[i]))
for i, (key, dataset) in enumerate(zip(self.keys, self.datasets))
if len(collect_samples[i]) > 0
])
elif self.shared_collater:
return self.datasets[0].collater(
[s for _, s in samples]
)
else:
samples_dict = defaultdict(list)
pad_to_length = defaultdict(int) if 'pad_to_length' not in extra_args else extra_args['pad_to_length']
for ds_idx, s in samples:
pad_to_length['source'] = max(pad_to_length['source'], s['source'].size(0))
if s['target'] is not None:
pad_to_length['target'] = max(pad_to_length['target'], s['target'].size(0))
samples_dict[ds_idx].append(s)
batches = [
self.datasets[i].collater(samples_dict[i], pad_to_length=pad_to_length)
for i in range(len(self.datasets))
if len(samples_dict[i]) > 0
]
def straight_data(tensors):
batch = torch.cat(tensors, dim=0)
return batch
src_lengths = straight_data([b['net_input']['src_lengths'] for b in batches])
src_lengths, sort_order = src_lengths.sort(descending=True)
def straight_order(tensors):
batch = straight_data(tensors)
return batch.index_select(0, sort_order)
batch = {
'id': straight_order([b['id'] for b in batches]),
'nsentences': sum(b['nsentences'] for b in batches),
'ntokens': sum(b['ntokens'] for b in batches),
'net_input': {
'src_tokens': straight_order([b['net_input']['src_tokens'] for b in batches]),
'src_lengths': src_lengths,
},
'target': straight_order([b['target'] for b in batches]) if batches[0]['target'] is not None else None,
}
if 'prev_output_tokens' in batches[0]['net_input']:
batch['net_input']['prev_output_tokens'] = straight_order(
[b['net_input']['prev_output_tokens'] for b in batches])
if 'src_lang_id' in batches[0]['net_input']:
batch['net_input']['src_lang_id'] = straight_order([b['net_input']['src_lang_id'] for b in batches])
if 'tgt_lang_id' in batches[0]:
batch['tgt_lang_id'] = straight_order([b['tgt_lang_id'] for b in batches])
return batch
@property
def sizes(self):
if self._sizes is not None:
return self._sizes
start_time = time.time()
size_cache = self._size_cache
ret = []
for i in range(len(self)):
ds_idx, ds_sample_idx = self._get_dataset_and_index(i)
if (ds_idx, ds_sample_idx) in size_cache:
ret.append(size_cache[(ds_idx, ds_sample_idx)])
else:
s = self.datasets[ds_idx].size(ds_sample_idx)
size_cache[(ds_idx, ds_sample_idx)] = s
ret.append(s)
logger.debug(f'sizes() calling time: {get_time_gap(start_time, time.time())}')
self._sizes = np.array(ret, np.int64)
return self._sizes
def ordered_indices(self):
if self._ordered_indices is not None:
return self._ordered_indices
if self.batch_by_size:
# No need to do shuffle as the data items are already randomized
indices = np.arange(len(self))
sizes = self.sizes
tgt_sizes = sizes[:, 1] if len(sizes.shape) > 0 and sizes.shape[1] > 1 else None
src_sizes = sizes[:, 0] if len(sizes.shape) > 0 and sizes.shape[1] > 1 else sizes
# sort by target length, then source length
if tgt_sizes is not None:
indices = indices[
np.argsort(tgt_sizes[indices], kind='mergesort')
]
sort_indices = indices[np.argsort(src_sizes[indices], kind='mergesort')]
else:
sort_indices = np.arange(len(self))
self._ordered_indices = sort_indices
return sort_indices
def prefetch(self, indices):
prefetch_indices = [[] for _ in range(len(self.datasets))]
for i in indices:
ds_idx, ds_sample_idx = self._get_dataset_and_index(i)
prefetch_indices[ds_idx].append(ds_sample_idx)
for i in range(len(prefetch_indices)):
self.datasets[i].prefetch(prefetch_indices[i])
def set_epoch(self, epoch):
super().set_epoch(epoch)
if epoch == self._cur_epoch:
# re-enter so return
return
for d in self.datasets:
if hasattr(d, 'set_epoch'):
d.set_epoch(epoch)
self._cur_epoch = epoch
self._establish_virtual_datasets()
def _establish_virtual_datasets(self):
if self.sample_ratios is None and self._cur_indices is not None:
# not a samping dataset, no need to resample if indices are already established
return
self._reset_cached_properties()
start_time = time.time()
# Generate a weighted sample of indices as a function of the
# random seed and the current epoch.
rng = np.random.RandomState(
[
int(hashlib.sha1(str(self.__class__.__name__).encode('utf-8')).hexdigest(), 16) % (2 ** 32),
self.seed % (2 ** 32), # global seed
self._cur_epoch, # epoch index,
]
)
indices, cumulated_sizes, virtual_size_per_dataset = self.get_virtual_indices(
rng, self.datasets, self.sample_ratios, self.virtual_size)
self._clean_if_not_none([
self.cumulated_sizes, self.virtual_size_per_dataset
])
self._cur_indices = plasma_utils.PlasmaArray(indices)
self.cumulated_sizes = plasma_utils.PlasmaArray(cumulated_sizes)
self.virtual_size_per_dataset = plasma_utils.PlasmaArray(virtual_size_per_dataset)
raw_sizes = [len(d) for d in self.datasets]
sampled_sizes = self.virtual_size_per_dataset.array
logger.info(f'[{self.split}] Raw sizes: {str(dict(zip(self.keys, raw_sizes)))}; '
f'raw total size: {sum(raw_sizes)}')
logger.info(f'[{self.split}] Resampled sizes: {str(dict(zip(self.keys, sampled_sizes)))}; '
f'resampled total size: {sum(sampled_sizes)}')
if self.sample_ratios is not None:
logger.info(f'[{self.split}] Upsampling ratios: {str(dict(zip(self.keys, self.sample_ratios.array)))}')
else:
logger.info(f'[{self.split}] A concat dataset')
logger.debug(f'[{self.split}] virtual dataset established time: {get_time_gap(start_time, time.time())}')
| 16,841 | 41.423174 | 119 | py |
RegularizedBN | RegularizedBN-main/fairseq/data/multilingual/sampled_multi_epoch_dataset.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import hashlib
import math
import logging
import time
import numpy as np
import torch
from fairseq import distributed_utils
from fairseq.data import plasma_utils, SampledMultiDataset
from .sampled_multi_dataset import default_virtual_size_func, get_time_gap, CollateFormat
logger = logging.getLogger(__name__)
class SampledMultiEpochDataset(SampledMultiDataset):
"""Samples from multiple sub-datasets according to sampling ratios
using virtual epoch sizes to speed up dataloading.
Args:
datasets (
List[~torch.utils.data.Dataset]
or OrderedDict[str, ~torch.utils.data.Dataset]
): datasets
sampling_ratios (List[float]): list of probability of each dataset to be sampled
(default: None, which corresponds to concating all dataset together).
batch_by_size (bool): whether or not to batch by sequence length
(default: True).
seed (int): RNG seed to use (default: 2).
epoch (int): starting epoch number (default: 1).
eval_key (str, optional): a key used at evaluation time that causes
this instance to pass-through batches from *datasets[eval_key]*.
collate_format (CollateFormat): collater output format, either CollateFormat.ordered_dict or
CollateFormat.single (default: CollateFormat.single) where CollateFormat.single configures
the collater to output batches of data mixed from all sub-datasets,
and CollateFormat.ordered_dict configures the collater to output a dictionary of batches indexed by keys
of sub-datasets.
Note that not all sub-datasets will present in a single batch in both formats.
virtual_size (int, or callable): the expected virtual size of the dataset (default: default_virtual_size_func).
split (str): the split of the data, e.g. 'train', 'valid' or 'test'.
virtual_epoch_size (int): virtual epoch size, the dataset will go through the data by
this virtual epoch size one by one to speed up data loading, e.g. indicing and filtering
can be performed whenever a virtual epoch is loaded without waiting for the whole dataset to be loaded.
shared_collater (bool): whether or not to all sub-datasets have the same collater.
shard_epoch (int): the real epoch number for shard selection.
"""
def __init__(
self,
datasets,
sampling_ratios=None,
batch_by_size=False,
seed=2,
epoch=1,
eval_key=None,
collate_format=CollateFormat.single,
virtual_size=default_virtual_size_func,
split='',
virtual_epoch_size=None,
shared_collater=False,
shard_epoch=1,
):
self.virtual_epoch_size = virtual_epoch_size
self._current_epoch_start_index = None
self._epoch_sizes = None
self._epoch_ordered_indices = None
self._random_globa_indices = None
self.shard_epoch = shard_epoch if shard_epoch is not None else 1
self.load_next_shard = None
super().__init__(
datasets=datasets,
sampling_ratios=sampling_ratios,
batch_by_size=batch_by_size,
seed=seed,
epoch=epoch,
eval_key=eval_key,
collate_format=collate_format,
virtual_size=virtual_size,
split=split,
shared_collater=shared_collater,
)
def _setup(self, epoch):
self.virtual_epoch_size = self.virtual_epoch_size if self.virtual_epoch_size is not None else self.virtual_size
if self.virtual_epoch_size > self.virtual_size:
logger.warning(f'virtual epoch size {self.virtual_epoch_size} '
f'is greater than virtual dataset size {self.virtual_size}')
self.virtual_epoch_size = self.virtual_size
self.num_virtual_epochs = math.ceil(self.virtual_size / self.virtual_epoch_size)
self._current_epoch_start_index = self._get_epoch_start_index(epoch)
logger.info(f'virtual epoch size {self.virtual_epoch_size}; virtual dataset size {self.virtual_size}')
def _map_epoch_index_to_global(self, index):
index = self._current_epoch_start_index + index
# add randomness
return self._random_globa_indices.array[index]
def __getitem__(self, index):
i = self._map_epoch_index_to_global(index)
return super().__getitem__(i)
def num_tokens(self, index):
i = self._map_epoch_index_to_global(index)
return super().num_tokens(i)
def size(self, index):
if self._epoch_sizes is not None:
return self._epoch_sizes.array[index]
index = self._map_epoch_index_to_global(index)
ds_idx, ds_sample_idx = self._get_dataset_and_index(index)
return self.datasets[ds_idx].size(ds_sample_idx)
def __len__(self):
return (
self.virtual_epoch_size
if self._current_epoch_start_index + self.virtual_epoch_size < self.virtual_size
else self.virtual_size - self._current_epoch_start_index
)
@property
def sizes(self):
if self._epoch_sizes is not None:
return self._epoch_sizes.array
start_time = time.time()
size_cache = self._size_cache
ret = []
for i in range(len(self)):
index = self._map_epoch_index_to_global(i)
ds_idx, ds_sample_idx = self._get_dataset_and_index(index)
if (ds_idx, ds_sample_idx) in size_cache:
ret.append(size_cache[(ds_idx, ds_sample_idx)])
else:
s = self.datasets[ds_idx].size(ds_sample_idx)
s = (s, s) if not isinstance(s, tuple) else s
size_cache[(ds_idx, ds_sample_idx)] = s
ret.append(s)
self._epoch_sizes = plasma_utils.PlasmaArray(np.array(ret, np.int64))
logger.info(f'sizes() calling time: {get_time_gap(start_time, time.time())}')
return self._epoch_sizes.array
def ordered_indices(self):
if self._epoch_ordered_indices is not None:
return self._epoch_ordered_indices.array
if self.batch_by_size:
# No need to do shuffle as the data items are already randomized
indices = np.arange(len(self))
sizes = self.sizes
tgt_sizes = sizes[:, 1] if len(sizes.shape) > 0 and sizes.shape[1] > 1 else None
src_sizes = sizes[:, 0] if len(sizes.shape) > 0 and sizes.shape[1] > 1 else sizes
# sort by target length, then source length
if tgt_sizes is not None:
indices = indices[
np.argsort(tgt_sizes[indices], kind='mergesort')
]
sort_indices = indices[np.argsort(src_sizes[indices], kind='mergesort')]
else:
sort_indices = np.arange(len(self))
self._epoch_ordered_indices = plasma_utils.PlasmaArray(sort_indices)
return self._epoch_ordered_indices.array
def prefetch(self, indices):
prefetch_indices = [[] for _ in range(len(self.datasets))]
for i in indices:
index = self._map_epoch_index_to_global(i)
ds_idx, ds_sample_idx = self._get_dataset_and_index(index)
prefetch_indices[ds_idx].append(ds_sample_idx)
for i in range(len(prefetch_indices)):
self.datasets[i].prefetch(prefetch_indices[i])
def set_epoch(self, epoch):
if self._current_epoch_start_index is None:
self._setup(epoch)
self._next_virtual_epoch(epoch)
if epoch == self._cur_epoch:
# re-enter so return
return
self._next_virtual_epoch(epoch)
def _get_epoch_start_index(self, epoch):
assert epoch >= 1 # fairseq is using 1-based epoch everywhere
return ((epoch - 1) % self.num_virtual_epochs) * self.virtual_epoch_size
def _next_global_indices(self, epoch):
rng = np.random.RandomState(
[
int(hashlib.sha1(str(self.__class__.__name__).encode('utf-8')).hexdigest(), 16) % (2 ** 32),
self.seed % (2 ** 32), # global seed
epoch, # epoch index,
]
)
del self._random_globa_indices
self._random_globa_indices = plasma_utils.PlasmaArray(
rng.choice(self.virtual_size, self.virtual_size, replace=False))
if self.load_next_shard is None:
self.load_next_shard = False
else:
# increase shard epoch for next loading
self.shard_epoch += 1
self.load_next_shard = True
# a hack to avoid possible out of sync of shard epoch number
# TODO: to confirm whether this is needed; without it, CUDA event error is occassionally observed
synced_shard_epoch = self._sync_shard_epoch(self.shard_epoch)
logger.info('to load next epoch/shard in next load_dataset: '
f'epoch={epoch}/shard_epoch={self.shard_epoch}[synced={synced_shard_epoch}]')
def _sync_shard_epoch(self, shard_epoch):
# in case the ratios are not precisely the same across processes
# also to ensure every procresses update the ratios in the same pace
shard_epoch = torch.DoubleTensor([shard_epoch])
if torch.distributed.is_initialized():
if torch.cuda.is_available():
distributed_utils.all_reduce(shard_epoch.cuda())
else:
distributed_utils.all_reduce(shard_epoch)
ret = shard_epoch.cpu()
ret = ret.numpy()
return ret
def _sync_epoch(self, epoch):
# in case the ratios are not precisely the same across processes
# also to ensure every procresses update the ratios in the same pace
epoch = torch.DoubleTensor([epoch])
if torch.distributed.is_initialized():
if torch.cuda.is_available():
distributed_utils.all_reduce(epoch.cuda())
else:
distributed_utils.all_reduce(epoch)
ret = epoch.cpu()
ret = ret.numpy()
return ret
def _next_virtual_epoch(self, epoch):
index = self._get_epoch_start_index(epoch)
if index == 0 or self._random_globa_indices is None:
# need to start from the beginning,
# so call super().set_epoch(epoch) to establish the global virtual indices
logger.info('establishing a new set of global virtual indices for '
f'epoch={epoch}/shard_epoch={self.shard_epoch}')
super().set_epoch(epoch)
self._next_global_indices(epoch)
else:
self._cur_epoch = epoch
# reset cache sizes and ordered_indices for the epoch after moving to a new epoch
self._clean_if_not_none([
self._epoch_sizes, self._epoch_ordered_indices, self._size_cache
])
self._epoch_sizes = None
self._epoch_ordered_indices = None
self._current_epoch_start_index = index
self._size_cache = {}
| 11,332 | 42.756757 | 119 | py |
RegularizedBN | RegularizedBN-main/fairseq/data/audio/raw_audio_dataset.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import logging
import numpy as np
import sys
import torch
import torch.nn.functional as F
from .. import FairseqDataset
logger = logging.getLogger(__name__)
class RawAudioDataset(FairseqDataset):
def __init__(
self,
sample_rate,
max_sample_size=None,
min_sample_size=None,
shuffle=True,
min_length=0,
pad=False,
normalize=False,
):
super().__init__()
self.sample_rate = sample_rate
self.sizes = []
self.max_sample_size = (
max_sample_size if max_sample_size is not None else sys.maxsize
)
self.min_sample_size = min_sample_size
self.min_length = min_length
self.pad = pad
self.shuffle = shuffle
self.normalize = normalize
def __getitem__(self, index):
raise NotImplementedError()
def __len__(self):
return len(self.sizes)
def postprocess(self, feats, curr_sample_rate):
if feats.dim() == 2:
feats = feats.mean(-1)
if curr_sample_rate != self.sample_rate:
raise Exception(f"sample rate: {curr_sample_rate}, need {self.sample_rate}")
assert feats.dim() == 1, feats.dim()
if self.normalize:
with torch.no_grad():
feats = F.layer_norm(feats, feats.shape)
return feats
def crop_to_max_size(self, wav, target_size):
size = len(wav)
diff = size - target_size
if diff <= 0:
return wav
start = np.random.randint(0, diff + 1)
end = size - diff + start
return wav[start:end]
def collater(self, samples):
samples = [
s
for s in samples
if s["source"] is not None
]
if len(samples) == 0:
return {}
sources = [s["source"] for s in samples]
sizes = [len(s) for s in sources]
if self.pad:
target_size = min(max(sizes), self.max_sample_size)
else:
target_size = min(min(sizes), self.max_sample_size)
collated_sources = sources[0].new_zeros(len(sources), target_size)
padding_mask = (
torch.BoolTensor(collated_sources.shape).fill_(False) if self.pad else None
)
for i, (source, size) in enumerate(zip(sources, sizes)):
diff = size - target_size
if diff == 0:
collated_sources[i] = source
elif diff < 0:
assert self.pad
collated_sources[i] = torch.cat(
[source, source.new_full((-diff,), 0.0)]
)
padding_mask[i, diff:] = True
else:
collated_sources[i] = self.crop_to_max_size(source, target_size)
input = {"source": collated_sources}
if self.pad:
input["padding_mask"] = padding_mask
return {"id": torch.LongTensor([s["id"] for s in samples]), "net_input": input}
def num_tokens(self, index):
return self.size(index)
def size(self, index):
"""Return an example's size as a float or tuple. This value is used when
filtering a dataset with ``--max-positions``."""
if self.pad:
return self.sizes[index]
return min(self.sizes[index], self.max_sample_size)
def ordered_indices(self):
"""Return an ordered list of indices. Batches will be constructed based
on this order."""
if self.shuffle:
order = [np.random.permutation(len(self))]
else:
order = [np.arange(len(self))]
order.append(self.sizes)
return np.lexsort(order)[::-1]
class FileAudioDataset(RawAudioDataset):
def __init__(
self,
manifest_path,
sample_rate,
max_sample_size=None,
min_sample_size=None,
shuffle=True,
min_length=0,
pad=False,
normalize=False,
):
super().__init__(
sample_rate=sample_rate,
max_sample_size=max_sample_size,
min_sample_size=min_sample_size,
shuffle=shuffle,
min_length=min_length,
pad=pad,
normalize=normalize,
)
self.fnames = []
skipped = 0
with open(manifest_path, "r") as f:
self.root_dir = f.readline().strip()
for line in f:
items = line.strip().split("\t")
assert len(items) == 2, line
sz = int(items[1])
if min_length is not None and sz < min_length:
skipped += 1
continue
self.fnames.append(items[0])
self.sizes.append(sz)
logger.info(f"loaded {len(self.fnames)}, skipped {skipped} samples")
def __getitem__(self, index):
import soundfile as sf
fname = os.path.join(self.root_dir, self.fnames[index])
wav, curr_sample_rate = sf.read(fname)
feats = torch.from_numpy(wav).float()
feats = self.postprocess(feats, curr_sample_rate)
return {"id": index, "source": feats}
| 5,341 | 28.351648 | 88 | py |
RegularizedBN | RegularizedBN-main/fairseq/data/encoders/utils.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from fairseq.data import encoders
def get_whole_word_mask(args, dictionary):
bpe = encoders.build_bpe(args)
if bpe is not None:
def is_beginning_of_word(i):
if i < dictionary.nspecial:
# special elements are always considered beginnings
return True
tok = dictionary[i]
if tok.startswith('madeupword'):
return True
try:
return bpe.is_beginning_of_word(tok)
except ValueError:
return True
mask_whole_words = torch.ByteTensor(list(
map(is_beginning_of_word, range(len(dictionary)))
))
return mask_whole_words
return None
| 907 | 30.310345 | 67 | py |
RegularizedBN | RegularizedBN-main/fairseq/data/legacy/block_pair_dataset.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import numpy as np
import torch
from fairseq.data import FairseqDataset
class BlockPairDataset(FairseqDataset):
"""Break a Dataset of tokens into sentence pair blocks for next sentence
prediction as well as masked language model.
High-level logics are:
1. break input tensor to tensor blocks
2. pair the blocks with 50% next sentence and 50% random sentence
3. return paired blocks as well as related segment labels
Args:
dataset (~torch.utils.data.Dataset): dataset to break into blocks
sizes: array of sentence lengths
dictionary: dictionary for the task
block_size: maximum block size
break_mode: mode for breaking copurs into block pairs. currently we support
2 modes
doc: respect document boundaries and each part of the pair should belong to on document
none: don't respect any boundary and cut tokens evenly
short_seq_prob: probability for generating shorter block pairs
doc_break_size: Size for empty line separating documents. Typically 1 if
the sentences have eos, 0 otherwise.
"""
def __init__(
self,
dataset,
dictionary,
sizes,
block_size,
break_mode="doc",
short_seq_prob=0.1,
doc_break_size=1,
):
super().__init__()
self.dataset = dataset
self.pad = dictionary.pad()
self.eos = dictionary.eos()
self.cls = dictionary.cls()
self.mask = dictionary.mask()
self.sep = dictionary.sep()
self.break_mode = break_mode
self.dictionary = dictionary
self.short_seq_prob = short_seq_prob
self.block_indices = []
assert len(dataset) == len(sizes)
if break_mode == "doc":
cur_doc = []
for sent_id, sz in enumerate(sizes):
assert doc_break_size == 0 or sz != 0, (
"when doc_break_size is non-zero, we expect documents to be"
"separated by a blank line with a single eos."
)
# empty line as document separator
if sz == doc_break_size:
if len(cur_doc) == 0:
continue
self.block_indices.append(cur_doc)
cur_doc = []
else:
cur_doc.append(sent_id)
max_num_tokens = block_size - 3 # Account for [CLS], [SEP], [SEP]
self.sent_pairs = []
self.sizes = []
for doc_id, doc in enumerate(self.block_indices):
self._generate_sentence_pair(doc, doc_id, max_num_tokens, sizes)
elif break_mode is None or break_mode == "none":
# each block should have half of the block size since we are constructing block pair
sent_length = (block_size - 3) // 2
total_len = sum(dataset.sizes)
length = math.ceil(total_len / sent_length)
def block_at(i):
start = i * sent_length
end = min(start + sent_length, total_len)
return (start, end)
sent_indices = np.array([block_at(i) for i in range(length)])
sent_sizes = np.array([e - s for s, e in sent_indices])
dataset_index = self._sent_to_dataset_index(sent_sizes)
# pair sentences
self._pair_sentences(dataset_index)
else:
raise ValueError("Invalid break_mode: " + break_mode)
def _pair_sentences(self, dataset_index):
"""
Give a list of evenly cut blocks/sentences, pair these sentences with 50%
consecutive sentences and 50% random sentences.
This is used for none break mode
"""
# pair sentences
for sent_id, sent in enumerate(dataset_index):
next_sent_label = (
1 if np.random.rand() > 0.5 and sent_id != len(dataset_index) - 1 else 0
)
if next_sent_label:
next_sent = dataset_index[sent_id + 1]
else:
next_sent = dataset_index[
self._skip_sampling(len(dataset_index), [sent_id, sent_id + 1])
]
self.sent_pairs.append((sent, next_sent, next_sent_label))
# The current blocks don't include the special tokens but the
# sizes already account for this
self.sizes.append(3 + sent[3] + next_sent[3])
def _sent_to_dataset_index(self, sent_sizes):
"""
Build index mapping block indices to the underlying dataset indices
"""
dataset_index = []
ds_idx, ds_remaining = -1, 0
for to_consume in sent_sizes:
sent_size = to_consume
if ds_remaining == 0:
ds_idx += 1
ds_remaining = sent_sizes[ds_idx]
start_ds_idx = ds_idx
start_offset = sent_sizes[ds_idx] - ds_remaining
while to_consume > ds_remaining:
to_consume -= ds_remaining
ds_idx += 1
ds_remaining = sent_sizes[ds_idx]
ds_remaining -= to_consume
dataset_index.append(
(
start_ds_idx, # starting index in dataset
start_offset, # starting offset within starting index
ds_idx, # ending index in dataset
sent_size, # sentence length
)
)
assert ds_remaining == 0
assert ds_idx == len(self.dataset) - 1
return dataset_index
def _generate_sentence_pair(self, doc, doc_id, max_num_tokens, sizes):
"""
Go through a single document and genrate sentence paris from it
"""
current_chunk = []
current_length = 0
curr = 0
# To provide more randomness, we decrease target seq length for parts of
# samples (10% by default). Note that max_num_tokens is the hard threshold
# for batching and will never be changed.
target_seq_length = max_num_tokens
if np.random.random() < self.short_seq_prob:
target_seq_length = np.random.randint(2, max_num_tokens)
# loop through all sentences in document
while curr < len(doc):
sent_id = doc[curr]
current_chunk.append(sent_id)
current_length = sum(sizes[current_chunk])
# split chunk and generate pair when exceed target_seq_length or
# finish the loop
if curr == len(doc) - 1 or current_length >= target_seq_length:
# split the chunk into 2 parts
a_end = 1
if len(current_chunk) > 2:
a_end = np.random.randint(1, len(current_chunk) - 1)
sent_a = current_chunk[:a_end]
len_a = sum(sizes[sent_a])
# generate next sentence label, note that if there is only 1 sentence
# in current chunk, label is always 0
next_sent_label = (
1 if np.random.rand() > 0.5 and len(current_chunk) != 1 else 0
)
if not next_sent_label:
# if next sentence label is 0, sample sent_b from a random doc
target_b_length = target_seq_length - len_a
rand_doc_id = self._skip_sampling(len(self.block_indices), [doc_id])
random_doc = self.block_indices[rand_doc_id]
random_start = np.random.randint(0, len(random_doc))
sent_b = []
len_b = 0
for j in range(random_start, len(random_doc)):
sent_b.append(random_doc[j])
len_b = sum(sizes[sent_b])
if len_b >= target_b_length:
break
# return the second part of the chunk since it's not used
num_unused_segments = len(current_chunk) - a_end
curr -= num_unused_segments
else:
# if next sentence label is 1, use the second part of chunk as sent_B
sent_b = current_chunk[a_end:]
len_b = sum(sizes[sent_b])
# currently sent_a and sent_B may be longer than max_num_tokens,
# truncate them and return block idx and offsets for them
sent_a, sent_b = self._truncate_sentences(
sent_a, sent_b, max_num_tokens
)
self.sent_pairs.append((sent_a, sent_b, next_sent_label))
self.sizes.append(3 + sent_a[3] + sent_b[3])
current_chunk = []
curr += 1
def _skip_sampling(self, total, skip_ids):
"""
Generate a random integer which is not in skip_ids. Sample range is [0, total)
TODO: ids in skip_ids should be consecutive, we can extend it to more generic version later
"""
rand_id = np.random.randint(total - len(skip_ids))
return rand_id if rand_id < min(skip_ids) else rand_id + len(skip_ids)
def _truncate_sentences(self, sent_a, sent_b, max_num_tokens):
"""
Trancate a pair of sentence to limit total length under max_num_tokens
Logics:
1. Truncate longer sentence
2. Tokens to be truncated could be at the beginning or the end of the sentnce
Returns:
Truncated sentences represented by dataset idx
"""
len_a, len_b = sum(self.dataset.sizes[sent_a]), sum(self.dataset.sizes[sent_b])
front_cut_a = front_cut_b = end_cut_a = end_cut_b = 0
while True:
total_length = (
len_a + len_b - front_cut_a - front_cut_b - end_cut_a - end_cut_b
)
if total_length <= max_num_tokens:
break
if len_a - front_cut_a - end_cut_a > len_b - front_cut_b - end_cut_b:
if np.random.rand() < 0.5:
front_cut_a += 1
else:
end_cut_a += 1
else:
if np.random.rand() < 0.5:
front_cut_b += 1
else:
end_cut_b += 1
# calculate ds indices as well as offsets and return
truncated_sent_a = self._cut_sentence(sent_a, front_cut_a, end_cut_a)
truncated_sent_b = self._cut_sentence(sent_b, front_cut_b, end_cut_b)
return truncated_sent_a, truncated_sent_b
def _cut_sentence(self, sent, front_cut, end_cut):
"""
Cut a sentence based on the numbers of tokens to be cut from beginning and end
Represent the sentence as dataset idx and return
"""
start_ds_idx, end_ds_idx, offset = sent[0], sent[-1], 0
target_len = sum(self.dataset.sizes[sent]) - front_cut - end_cut
while front_cut > 0:
if self.dataset.sizes[start_ds_idx] > front_cut:
offset += front_cut
break
else:
front_cut -= self.dataset.sizes[start_ds_idx]
start_ds_idx += 1
while end_cut > 0:
if self.dataset.sizes[end_ds_idx] > end_cut:
break
else:
end_cut -= self.dataset.sizes[end_ds_idx]
end_ds_idx -= 1
return start_ds_idx, offset, end_ds_idx, target_len
def _fetch_block(self, start_ds_idx, offset, end_ds_idx, length):
"""
Fetch a block of tokens based on its dataset idx
"""
buffer = torch.cat(
[self.dataset[idx] for idx in range(start_ds_idx, end_ds_idx + 1)]
)
s, e = offset, offset + length
return buffer[s:e]
def __getitem__(self, index):
block1, block2, next_sent_label = self.sent_pairs[index]
block1 = self._fetch_block(*block1)
block2 = self._fetch_block(*block2)
return block1, block2, next_sent_label
def __len__(self):
return len(self.sizes)
@property
def supports_prefetch(self):
return getattr(self.dataset, "supports_prefetch", False)
def prefetch(self, indices):
prefetch_idx = set()
for index in indices:
for block1, block2, _ in [self.sent_pairs[index]]:
for ds_idx in range(block1[0], block1[2] + 1):
prefetch_idx.add(ds_idx)
for ds_idx in range(block2[0], block2[2] + 1):
prefetch_idx.add(ds_idx)
self.dataset.prefetch(prefetch_idx)
| 12,878 | 40.146965 | 99 | py |
RegularizedBN | RegularizedBN-main/fairseq/data/legacy/masked_lm_dataset.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import numpy as np
import torch
from typing import Dict, List, Tuple
from fairseq.data import FairseqDataset, data_utils
from fairseq.data import Dictionary
from fairseq.data.legacy.block_pair_dataset import BlockPairDataset
from fairseq.data.token_block_dataset import TokenBlockDataset
from fairseq.data.concat_dataset import ConcatDataset
class MaskedLMDataset(FairseqDataset):
"""
A wrapper Dataset for masked language modelling. The dataset
wraps around TokenBlockDataset or BlockedPairDataset and creates a batch
where the input blocks are masked according to the specified masking
probability. Additionally the batch can also contain sentence level targets
if this is specified.
Args:
dataset: Dataset which generates blocks of data. Only BlockPairDataset
and TokenBlockDataset are supported.
sizes: Sentence lengths
vocab: Dictionary with the vocabulary and special tokens.
pad_idx: Id of padding token in dictionary
mask_idx: Id of mask token in dictionary
classif_token_idx: Id of classification token in dictionary. This is the
token associated with the sentence embedding (Eg: CLS for BERT)
sep_token_idx: Id of separator token in dictionary
(Eg: SEP in BERT)
seed: Seed for random number generator for reproducibility.
shuffle: Shuffle the elements before batching.
has_pairs: Specifies whether the underlying dataset
generates a pair of blocks along with a sentence_target or not.
Setting it to True assumes that the underlying dataset generates a
label for the pair of sentences which is surfaced as
sentence_target. The default value assumes a single block with no
sentence target.
segment_id: An optional segment id for filling in the segment labels
when we are in the single block setting (Eg: XLM). Default is 0.
masking_ratio: specifies what percentage of the blocks should be masked.
masking_prob: specifies the probability of a given token being
replaced with the "MASK" token.
random_token_prob: specifies the probability of a given token being
replaced by a random token from the vocabulary.
"""
def __init__(
self,
dataset: FairseqDataset,
sizes: np.ndarray,
vocab: Dictionary,
pad_idx: int,
mask_idx: int,
classif_token_idx: int,
sep_token_idx: int,
seed: int = 1,
shuffle: bool = True,
has_pairs: bool = True,
segment_id: int = 0,
masking_ratio: float = 0.15,
masking_prob: float = 0.8,
random_token_prob: float = 0.1
):
# Make sure the input datasets are the ones supported
assert (
isinstance(dataset, TokenBlockDataset) or
isinstance(dataset, BlockPairDataset) or
isinstance(dataset, ConcatDataset)
), "MaskedLMDataset only wraps TokenBlockDataset or BlockPairDataset or " \
"ConcatDataset"
self.dataset = dataset
self.sizes = np.array(sizes)
self.vocab = vocab
self.pad_idx = pad_idx
self.mask_idx = mask_idx
self.classif_token_idx = classif_token_idx
self.sep_token_idx = sep_token_idx
self.shuffle = shuffle
self.seed = seed
self.has_pairs = has_pairs
self.segment_id = segment_id
self.masking_ratio = masking_ratio
self.masking_prob = masking_prob
self.random_token_prob = random_token_prob
# If we have only one block then sizes needs to be updated to include
# the classification token
if not has_pairs:
self.sizes = self.sizes + 1
def __getitem__(
self,
index: int
):
# if has_pairs, then expect 2 blocks and a sentence target
if self.has_pairs:
(block_one, block_two, sentence_target) = self.dataset[index]
else:
block_one = self.dataset[index]
return {
"id": index,
"block_one": block_one,
"block_two": block_two if self.has_pairs else None,
"sentence_target": sentence_target if self.has_pairs else None,
}
def __len__(self):
return len(self.dataset)
def _mask_block(
self,
sentence: np.ndarray,
mask_idx: int,
pad_idx: int,
dictionary_token_range: Tuple,
):
"""
Mask tokens for Masked Language Model training
Samples mask_ratio tokens that will be predicted by LM.
Note:This function may not be efficient enough since we had multiple
conversions between np and torch, we can replace them with torch
operators later.
Args:
sentence: 1d tensor to be masked
mask_idx: index to use for masking the sentence
pad_idx: index to use for masking the target for tokens we aren't
predicting
dictionary_token_range: range of indices in dictionary which can
be used for random word replacement
(e.g. without special characters)
Return:
masked_sent: masked sentence
target: target with words which we are not predicting replaced
by pad_idx
"""
masked_sent = np.copy(sentence)
sent_length = len(sentence)
mask_num = math.ceil(sent_length * self.masking_ratio)
mask = np.random.choice(sent_length, mask_num, replace=False)
target = np.copy(sentence)
for i in range(sent_length):
if i in mask:
rand = np.random.random()
# replace with mask if probability is less than masking_prob
# (Eg: 0.8)
if rand < self.masking_prob:
masked_sent[i] = mask_idx
# replace with random token if probability is less than
# masking_prob + random_token_prob (Eg: 0.9)
elif rand < (self.masking_prob + self.random_token_prob):
# sample random token from dictionary
masked_sent[i] = (
np.random.randint(
dictionary_token_range[0], dictionary_token_range[1]
)
)
else:
target[i] = pad_idx
return masked_sent, target
def _collate(
self,
samples: List[Dict],
pad_idx: int,
eos_idx: int
):
"""
Does the heavy lifting for creating a batch from the input list of
examples. The logic is as follows:
1. Mask the input blocks. In case has_pair is True then we have 2
blocks to mask.
2. Prepend the first masked block tensor with the special token
used as sentence embedding. Eg: CLS in BERT. This happens
irrespective of the value of has_pair.
3. If has_pair is True, then append the first masked block with the
special separator token (eg: SEP for BERT) and compute segment
label accordingly. In this case, also append the second masked
block with this special separator token and compute its segment
label.
4. For the targets tensor, prepend and append with padding index
accordingly.
5. Concatenate all tensors.
"""
if len(samples) == 0:
return {}
# To ensure determinism, we reset the state of the PRNG after every
# batch based on the seed and the first id of the batch. This ensures
# that across epochs we get the same mask for the same example. This
# is needed for reproducibility and is how BERT does masking
# TODO: Can we add deteminism without this constraint?
with data_utils.numpy_seed(self.seed + samples[0]["id"]):
for s in samples:
# token range is needed for replacing with random token during
# masking
token_range = (self.vocab.nspecial, len(self.vocab))
# mask according to specified probabilities.
masked_blk_one, masked_tgt_one = self._mask_block(
s["block_one"], self.mask_idx, self.pad_idx, token_range,
)
tokens = np.concatenate([
[self.classif_token_idx], masked_blk_one
])
targets = np.concatenate([[self.pad_idx], masked_tgt_one])
segments = np.ones(len(tokens)) * self.segment_id
# if has_pairs is True then we need to add the SEP token to both
# the blocks after masking and re-compute segments based on the new
# lengths.
if self.has_pairs:
tokens_one = np.concatenate([tokens, [self.sep_token_idx]])
targets_one = np.concatenate([targets, [self.pad_idx]])
masked_blk_two, masked_tgt_two = self._mask_block(
s["block_two"], self.mask_idx, self.pad_idx, token_range)
tokens_two = np.concatenate(
[masked_blk_two, [self.sep_token_idx]])
targets_two = np.concatenate([masked_tgt_two, [self.pad_idx]])
# block + 1 sep + 1 special (CLS)
segments_one = np.zeros(len(tokens_one))
# block + 1 sep
segments_two = np.ones(len(tokens_two))
tokens = np.concatenate([tokens_one, tokens_two])
targets = np.concatenate([targets_one, targets_two])
segments = np.concatenate([segments_one, segments_two])
s["source"] = torch.LongTensor(tokens)
s["segment_labels"] = torch.LongTensor(segments)
s["lm_target"] = torch.LongTensor(targets)
def merge(key):
return data_utils.collate_tokens(
[s[key] for s in samples], pad_idx, eos_idx, left_pad=False
)
return {
"id": torch.LongTensor([s["id"] for s in samples]),
"ntokens": sum(len(s["source"]) for s in samples),
"net_input": {
"src_tokens": merge("source"),
"segment_labels": merge("segment_labels"),
},
"lm_target": merge("lm_target"),
"sentence_target": torch.LongTensor(
[s["sentence_target"] for s in samples]
) if self.has_pairs else None,
"nsentences": len(samples),
}
def collater(
self,
samples: List[Dict]
):
"""Merge a list of samples to form a mini-batch.
Args:
samples (List[dict]): samples to collate
Returns:
dict: a mini-batch of data
"""
return self._collate(samples, self.vocab.pad(), self.vocab.eos())
def num_tokens(
self,
index: int
):
"""
Return the number of tokens in a sample. This value is used to
enforce max-tokens during batching.
"""
return self.sizes[index]
def size(
self,
index: int
):
"""
Return an example's size as a float or tuple. This value is used when
filtering a dataset with max-positions.
"""
return self.sizes[index]
def ordered_indices(self):
"""
Return an ordered list of indices. Batches will be constructed based
on this order.
"""
if self.shuffle:
return np.random.permutation(len(self))
else:
order = [np.arange(len(self))]
order.append(self.sizes)
return np.lexsort(order)
@property
def supports_prefetch(self):
return getattr(self.dataset, "supports_prefetch", False)
def prefetch(self, indices):
self.dataset.prefetch(indices)
| 12,468 | 37.603715 | 83 | py |
RegularizedBN | RegularizedBN-main/fairseq/tasks/translation_from_pretrained_bart.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from fairseq.data import LanguagePairDataset
from fairseq import utils
from .translation import load_langpair_dataset, TranslationTask
from . import register_task
@register_task('translation_from_pretrained_bart')
class TranslationFromPretrainedBARTTask(TranslationTask):
"""
Translate from source language to target language with a model initialized with a multilingual pretrain.
Args:
src_dict (~fairseq.data.Dictionary): dictionary for the source language
tgt_dict (~fairseq.data.Dictionary): dictionary for the target language
.. note::
The translation task is compatible with :mod:`fairseq-train`,
:mod:`fairseq-generate` and :mod:`fairseq-interactive`.
The translation task provides the following additional command-line
arguments:
.. argparse::
:ref: fairseq.tasks.translation_parser
:prog:
"""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
# fmt: off
TranslationTask.add_args(parser)
parser.add_argument('--langs', required=True, metavar='LANG',
help='comma-separated list of monolingual language, '
'for example, "en,de,fr". These should match the '
'langs from pretraining (and be in the same order). '
'You should always add all pretraining language idx '
'during finetuning.')
parser.add_argument('--prepend-bos', action='store_true',
help='prepend bos token to each sentence, which matches '
'mBART pretraining')
# fmt: on
def __init__(self, args, src_dict, tgt_dict):
super().__init__(args, src_dict, tgt_dict)
self.langs = args.langs.split(',')
for d in [src_dict, tgt_dict]:
for l in self.langs:
d.add_symbol('[{}]'.format(l))
d.add_symbol('<mask>')
def load_dataset(self, split, epoch=1, combine=False, **kwargs):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
paths = utils.split_paths(self.args.data)
assert len(paths) > 0
data_path = paths[(epoch - 1) % len(paths)]
# infer langcode
src, tgt = self.args.source_lang, self.args.target_lang
self.datasets[split] = load_langpair_dataset(
data_path, split, src, self.src_dict, tgt, self.tgt_dict,
combine=combine, dataset_impl=self.args.dataset_impl,
upsample_primary=self.args.upsample_primary,
left_pad_source=self.args.left_pad_source,
left_pad_target=self.args.left_pad_target,
max_source_positions=getattr(self.args, 'max_source_positions', 1024),
max_target_positions=getattr(self.args, 'max_target_positions', 1024),
load_alignments=self.args.load_alignments,
prepend_bos=getattr(self.args, 'prepend_bos', False),
append_source_id=True
)
def build_generator(self, models, args):
if getattr(args, 'score_reference', False):
from fairseq.sequence_scorer import SequenceScorer
return SequenceScorer(
self.target_dictionary,
eos=self.tgt_dict.index('[{}]'.format(self.args.target_lang))
)
else:
from fairseq.sequence_generator import SequenceGenerator
return SequenceGenerator(
models,
self.target_dictionary,
beam_size=getattr(args, 'beam', 5),
max_len_a=getattr(args, 'max_len_a', 0),
max_len_b=getattr(args, 'max_len_b', 200),
min_len=getattr(args, 'min_len', 1),
normalize_scores=(not getattr(args, 'unnormalized', False)),
len_penalty=getattr(args, 'lenpen', 1),
unk_penalty=getattr(args, 'unkpen', 0),
temperature=getattr(args, 'temperature', 1.),
match_source_len=getattr(args, 'match_source_len', False),
no_repeat_ngram_size=getattr(args, 'no_repeat_ngram_size', 0),
eos=self.tgt_dict.index('[{}]'.format(self.args.target_lang))
)
def build_dataset_for_inference(self, src_tokens, src_lengths, constraints=None):
src_lang_id = self.source_dictionary.index('[{}]'.format(self.args.source_lang))
source_tokens = []
for s_t in src_tokens:
s_t = torch.cat([s_t, s_t.new(1).fill_(src_lang_id)])
source_tokens.append(s_t)
dataset = LanguagePairDataset(source_tokens, src_lengths, self.source_dictionary,
tgt_dict=self.target_dictionary,
constraints=constraints)
return dataset
| 5,169 | 41.377049 | 108 | py |
RegularizedBN | RegularizedBN-main/fairseq/tasks/language_modeling.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import numpy as np
import torch
from fairseq import utils
from fairseq.data import (
AppendTokenDataset,
data_utils,
Dictionary,
IdDataset,
MonolingualDataset,
NestedDictionaryDataset,
NumelDataset,
PadDataset,
PrependTokenDataset,
StripTokenDataset,
TokenBlockDataset,
TransformEosDataset,
TruncatedDictionary,
)
from fairseq.data.shorten_dataset import maybe_shorten_dataset
from fairseq.tasks import FairseqTask, register_task
logger = logging.getLogger(__name__)
@register_task("language_modeling")
class LanguageModelingTask(FairseqTask):
"""
Train a language model.
Args:
dictionary (~fairseq.data.Dictionary): the dictionary for the input of
the language model
output_dictionary (~fairseq.data.Dictionary): the dictionary for the
output of the language model. In most cases it will be the same as
*dictionary*, but could possibly be a more limited version of the
dictionary (if ``--output-dictionary-size`` is used).
targets (List[str]): list of the target types that the language model
should predict. Can be one of "self", "future", and "past".
Defaults to "future".
.. note::
The language modeling task is compatible with :mod:`fairseq-train`,
:mod:`fairseq-generate`, :mod:`fairseq-interactive` and
:mod:`fairseq-eval-lm`.
The language modeling task provides the following additional command-line
arguments:
.. argparse::
:ref: fairseq.tasks.language_modeling_parser
:prog:
"""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
# fmt: off
parser.add_argument('data', help='path to data directory')
parser.add_argument('--sample-break-mode', default='none',
choices=['none', 'complete', 'complete_doc', 'eos'],
help='If omitted or "none", fills each sample with tokens-per-sample '
'tokens. If set to "complete", splits samples only at the end '
'of sentence, but may include multiple sentences per sample. '
'"complete_doc" is similar but respects doc boundaries. '
'If set to "eos", includes only one sentence per sample.')
parser.add_argument('--tokens-per-sample', default=1024, type=int,
help='max number of tokens per sample for LM dataset')
parser.add_argument('--output-dictionary-size', default=-1, type=int,
help='limit the size of output dictionary')
parser.add_argument('--self-target', action='store_true',
help='include self target')
parser.add_argument('--future-target', action='store_true',
help='include future target')
parser.add_argument('--past-target', action='store_true',
help='include past target')
parser.add_argument('--add-bos-token', action='store_true',
help='prepend beginning of sentence token (<s>)')
parser.add_argument('--max-target-positions', type=int, metavar='N',
help='max number of tokens in the target sequence')
parser.add_argument('--shorten-method', default='none',
choices=['none', 'truncate', 'random_crop'],
help='if not none, shorten sequences that exceed --tokens-per-sample')
parser.add_argument('--shorten-data-split-list', default='',
help='comma-separated list of dataset splits to apply shortening to, '
'e.g., "train,valid" (default: all dataset splits)')
# fmt: on
def __init__(self, args, dictionary, output_dictionary=None, targets=None):
super().__init__(args)
self.dictionary = dictionary
self.output_dictionary = output_dictionary or dictionary
if targets is None:
targets = ["future"]
self.targets = targets
@classmethod
def setup_dictionary(cls, args, **kwargs):
dictionary = None
output_dictionary = None
if args.data:
paths = utils.split_paths(args.data)
assert len(paths) > 0
dictionary = Dictionary.load(os.path.join(paths[0], "dict.txt"))
logger.info("dictionary: {} types".format(len(dictionary)))
output_dictionary = dictionary
if args.output_dictionary_size >= 0:
output_dictionary = TruncatedDictionary(
dictionary, args.output_dictionary_size
)
return (dictionary, output_dictionary)
@classmethod
def setup_task(cls, args, **kwargs):
"""Setup the task (e.g., load dictionaries).
Args:
args (argparse.Namespace): parsed command-line arguments
"""
dictionary, output_dictionary = cls.setup_dictionary(args, **kwargs)
# upgrade old checkpoints
if hasattr(args, "exclude_self_target"):
args.self_target = not args.exclude_self_target
targets = []
if getattr(args, "self_target", False):
targets.append("self")
if getattr(args, "future_target", False):
targets.append("future")
if getattr(args, "past_target", False):
targets.append("past")
if len(targets) == 0:
# standard language modeling
targets = ["future"]
return cls(args, dictionary, output_dictionary, targets=targets)
def build_model(self, args):
model = super().build_model(args)
for target in self.targets:
if target not in model.supported_targets:
raise ValueError(
"Unsupported language modeling target: {}".format(target)
)
return model
def load_dataset(self, split, epoch=1, combine=False, **kwargs):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
paths = utils.split_paths(self.args.data)
assert len(paths) > 0
data_path = paths[(epoch - 1) % len(paths)]
split_path = os.path.join(data_path, split)
dataset = data_utils.load_indexed_dataset(
split_path, self.dictionary, self.args.dataset_impl, combine=combine
)
if dataset is None:
raise FileNotFoundError(
"Dataset not found: {} ({})".format(split, split_path)
)
dataset = maybe_shorten_dataset(
dataset,
split,
self.args.shorten_data_split_list,
self.args.shorten_method,
self.args.tokens_per_sample,
self.args.seed,
)
dataset = TokenBlockDataset(
dataset,
dataset.sizes,
self.args.tokens_per_sample,
pad=self.dictionary.pad(),
eos=self.dictionary.eos(),
break_mode=self.args.sample_break_mode,
include_targets=True,
)
add_eos_for_other_targets = (
self.args.sample_break_mode is not None
and self.args.sample_break_mode != "none"
)
self.datasets[split] = self._initialize_dataset(
dataset=dataset,
sizes=dataset.sizes,
src_vocab=self.dictionary,
tgt_vocab=self.output_dictionary,
add_eos_for_other_targets=add_eos_for_other_targets,
shuffle=True,
targets=self.targets,
add_bos_token=self.args.add_bos_token,
)
def _initialize_dataset(self, **kwargs):
return MonolingualDataset(**kwargs)
def build_dataset_for_inference(self, src_tokens, src_lengths, **kwargs):
"""
Generate batches for inference. We prepend an eos token to src_tokens
(or bos if `--add-bos-token` is set) and we append a <pad> to target.
This is convenient both for generation with a prefix and LM scoring.
"""
dataset = StripTokenDataset(
TokenBlockDataset(
src_tokens,
src_lengths,
block_size=None, # ignored for "eos" break mode
pad=self.source_dictionary.pad(),
eos=self.source_dictionary.eos(),
break_mode="eos",
),
# remove eos from (end of) target sequence
self.source_dictionary.eos(),
)
src_dataset = PrependTokenDataset(
dataset,
token=(
self.source_dictionary.bos()
if getattr(self.args, "add_bos_token", False)
else self.source_dictionary.eos()
),
)
tgt_dataset = AppendTokenDataset(
dataset,
token=self.source_dictionary.pad()
)
return NestedDictionaryDataset(
{
"id": IdDataset(),
"net_input": {
"src_tokens": PadDataset(src_dataset, pad_idx=self.source_dictionary.pad(), left_pad=False),
"src_lengths": NumelDataset(src_dataset, reduce=False),
},
"target": PadDataset(tgt_dataset, pad_idx=self.source_dictionary.pad(), left_pad=False),
},
sizes=[np.array(src_lengths)],
)
def inference_step(self, generator, models, sample, prefix_tokens=None, constraints=None):
with torch.no_grad():
# Generation will always be conditioned on bos_token
if getattr(self.args, "add_bos_token", False):
bos_token = self.source_dictionary.bos()
else:
bos_token = self.source_dictionary.eos()
if constraints is not None:
raise NotImplementedError("Constrained decoding with the language_modeling task is not supported")
# SequenceGenerator doesn't use src_tokens directly, we need to
# pass the `prefix_tokens` argument instead
if prefix_tokens is None and sample["net_input"]["src_tokens"].nelement():
prefix_tokens = sample["net_input"]["src_tokens"]
if prefix_tokens[:, 0].eq(bos_token).all():
prefix_tokens = prefix_tokens[:, 1:]
return generator.generate(
models, sample, prefix_tokens=prefix_tokens, bos_token=bos_token,
)
@property
def source_dictionary(self):
"""Return the :class:`~fairseq.data.Dictionary` for the language
model."""
return self.dictionary
@property
def target_dictionary(self):
"""Return the :class:`~fairseq.data.Dictionary` for the language
model."""
return self.output_dictionary
| 11,244 | 37.248299 | 114 | py |
RegularizedBN | RegularizedBN-main/fairseq/tasks/multilingual_masked_lm.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import numpy as np
import torch
from fairseq.data import (
data_utils,
Dictionary,
encoders,
ConcatDataset,
IdDataset,
MaskTokensDataset,
NestedDictionaryDataset,
NumelDataset,
NumSamplesDataset,
PadDataset,
PrependTokenDataset,
RawLabelDataset,
ResamplingDataset,
SortDataset,
TokenBlockDataset,
)
from fairseq.tasks import FairseqTask, register_task
from fairseq import utils
logger = logging.getLogger(__name__)
@register_task('multilingual_masked_lm')
class MultiLingualMaskedLMTask(FairseqTask):
"""Task for training masked language models (e.g., BERT, RoBERTa)."""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
parser.add_argument('data', help='colon separated path to data directories list, \
will be iterated upon during epochs in round-robin manner')
parser.add_argument('--sample-break-mode', default='complete',
choices=['none', 'complete', 'complete_doc', 'eos'],
help='If omitted or "none", fills each sample with tokens-per-sample '
'tokens. If set to "complete", splits samples only at the end '
'of sentence, but may include multiple sentences per sample. '
'"complete_doc" is similar but respects doc boundaries. '
'If set to "eos", includes only one sentence per sample.')
parser.add_argument('--tokens-per-sample', default=512, type=int,
help='max number of total tokens over all segments '
'per sample for BERT dataset')
parser.add_argument('--mask-prob', default=0.15, type=float,
help='probability of replacing a token with mask')
parser.add_argument('--leave-unmasked-prob', default=0.1, type=float,
help='probability that a masked token is unmasked')
parser.add_argument('--random-token-prob', default=0.1, type=float,
help='probability of replacing a token with a random token')
parser.add_argument('--freq-weighted-replacement', action='store_true',
help='sample random replacement words based on word frequencies')
parser.add_argument('--mask-whole-words', default=False, action='store_true',
help='mask whole words; you may also want to set --bpe')
parser.add_argument('--multilang-sampling-alpha', type=float, default=1.0,
help='smoothing alpha for sample rations across multiple datasets')
def __init__(self, args, dictionary):
super().__init__(args)
self.dictionary = dictionary
self.seed = args.seed
# add mask token
self.mask_idx = dictionary.add_symbol('<mask>')
@classmethod
def setup_task(cls, args, **kwargs):
paths = utils.split_paths(args.data)
assert len(paths) > 0
dictionary = Dictionary.load(os.path.join(paths[0], 'dict.txt'))
logger.info('dictionary: {} types'.format(len(dictionary)))
return cls(args, dictionary)
def _get_whole_word_mask(self):
# create masked input and targets
if self.args.mask_whole_words:
bpe = encoders.build_bpe(self.args)
if bpe is not None:
def is_beginning_of_word(i):
if i < self.source_dictionary.nspecial:
# special elements are always considered beginnings
return True
tok = self.source_dictionary[i]
if tok.startswith('madeupword'):
return True
try:
return bpe.is_beginning_of_word(tok)
except ValueError:
return True
mask_whole_words = torch.ByteTensor(list(
map(is_beginning_of_word, range(len(self.source_dictionary)))
))
else:
mask_whole_words = None
return mask_whole_words
def _get_sample_prob(self, dataset_lens):
"""
Get smoothed sampling porbability by languages. This helps low resource
languages by upsampling them.
"""
prob = dataset_lens / dataset_lens.sum()
smoothed_prob = prob ** self.args.multilang_sampling_alpha
smoothed_prob = smoothed_prob / smoothed_prob.sum()
return smoothed_prob
def load_dataset(self, split, epoch=1, combine=False, **kwargs):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
paths = utils.split_paths(self.args.data)
assert len(paths) > 0
data_path = paths[(epoch - 1) % len(paths)]
languages = sorted(
name for name in os.listdir(data_path)
if os.path.isdir(os.path.join(data_path, name))
)
logger.info("Training on {0} languages: {1}".format(len(languages), languages))
logger.info("Language to id mapping: ", {
lang: id for id, lang in enumerate(languages)
}
)
mask_whole_words = self._get_whole_word_mask()
lang_datasets = []
for lang_id, language in enumerate(languages):
split_path = os.path.join(data_path, language, split)
dataset = data_utils.load_indexed_dataset(
split_path,
self.source_dictionary,
self.args.dataset_impl,
combine=combine,
)
if dataset is None:
raise FileNotFoundError('Dataset not found: {} ({})'.format(split, split_path))
# create continuous blocks of tokens
dataset = TokenBlockDataset(
dataset,
dataset.sizes,
self.args.tokens_per_sample - 1, # one less for <s>
pad=self.source_dictionary.pad(),
eos=self.source_dictionary.eos(),
break_mode=self.args.sample_break_mode,
)
logger.info('loaded {} blocks from: {}'.format(len(dataset), split_path))
# prepend beginning-of-sentence token (<s>, equiv. to [CLS] in BERT)
dataset = PrependTokenDataset(dataset, self.source_dictionary.bos())
src_dataset, tgt_dataset = MaskTokensDataset.apply_mask(
dataset,
self.source_dictionary,
pad_idx=self.source_dictionary.pad(),
mask_idx=self.mask_idx,
seed=self.args.seed,
mask_prob=self.args.mask_prob,
leave_unmasked_prob=self.args.leave_unmasked_prob,
random_token_prob=self.args.random_token_prob,
freq_weighted_replacement=self.args.freq_weighted_replacement,
mask_whole_words=mask_whole_words,
)
lang_dataset = NestedDictionaryDataset(
{
'net_input': {
'src_tokens': PadDataset(
src_dataset,
pad_idx=self.source_dictionary.pad(),
left_pad=False,
),
'src_lengths': NumelDataset(src_dataset, reduce=False),
},
'target': PadDataset(
tgt_dataset,
pad_idx=self.source_dictionary.pad(),
left_pad=False,
),
'nsentences': NumSamplesDataset(),
'ntokens': NumelDataset(src_dataset, reduce=True),
'lang_id': RawLabelDataset([lang_id] * src_dataset.sizes.shape[0]),
},
sizes=[src_dataset.sizes],
)
lang_datasets.append(lang_dataset)
dataset_lengths = np.array(
[len(d) for d in lang_datasets],
dtype=float,
)
logger.info(
'loaded total {} blocks for all languages'.format(
dataset_lengths.sum(),
)
)
if split == self.args.train_subset:
# For train subset, additionally up or down sample languages.
sample_probs = self._get_sample_prob(dataset_lengths)
logger.info("Sample probability by language: ", {
lang: "{0:.4f}".format(sample_probs[id])
for id, lang in enumerate(languages)
}
)
size_ratio = (sample_probs * dataset_lengths.sum()) / dataset_lengths
logger.info("Up/Down Sampling ratio by language: ", {
lang: "{0:.2f}".format(size_ratio[id])
for id, lang in enumerate(languages)
}
)
resampled_lang_datasets = [
ResamplingDataset(
lang_datasets[i],
size_ratio=size_ratio[i],
seed=self.args.seed,
epoch=epoch,
replace=size_ratio[i] >= 1.0,
)
for i, d in enumerate(lang_datasets)
]
dataset = ConcatDataset(resampled_lang_datasets)
else:
dataset = ConcatDataset(lang_datasets)
lang_splits = [split]
for lang_id, lang_dataset in enumerate(lang_datasets):
split_name = split + '_' + languages[lang_id]
lang_splits.append(split_name)
self.datasets[split_name] = lang_dataset
# [TODO]: This is hacky for now to print validation ppl for each
# language individually. Maybe need task API changes to allow it
# in more generic ways.
if split in self.args.valid_subset:
self.args.valid_subset = self.args.valid_subset.replace(
split, ','.join(lang_splits)
)
with data_utils.numpy_seed(self.args.seed + epoch):
shuffle = np.random.permutation(len(dataset))
self.datasets[split] = SortDataset(
dataset,
sort_order=[
shuffle,
dataset.sizes,
],
)
def build_dataset_for_inference(self, src_tokens, src_lengths, sort=True):
src_dataset = PadDataset(
TokenBlockDataset(
src_tokens,
src_lengths,
self.args.tokens_per_sample - 1, # one less for <s>
pad=self.source_dictionary.pad(),
eos=self.source_dictionary.eos(),
break_mode='eos',
),
pad_idx=self.source_dictionary.pad(),
left_pad=False,
)
src_dataset = PrependTokenDataset(src_dataset, self.source_dictionary.bos())
src_dataset = NestedDictionaryDataset(
{
'id': IdDataset(),
'net_input': {
'src_tokens': src_dataset,
'src_lengths': NumelDataset(src_dataset, reduce=False),
},
},
sizes=src_lengths,
)
if sort:
src_dataset = SortDataset(src_dataset, sort_order=[src_lengths])
return src_dataset
def get_batch_iterator(
self, dataset, max_tokens=None, max_sentences=None, max_positions=None,
ignore_invalid_inputs=False, required_batch_size_multiple=1,
seed=1, num_shards=1, shard_id=0, num_workers=0, epoch=1,
):
# Recreate epoch iterator every epoch cause the underlying
# datasets are dynamic due to sampling.
self.dataset_to_epoch_iter = {}
epoch_iter = super().get_batch_iterator(
dataset, max_tokens, max_sentences, max_positions,
ignore_invalid_inputs, required_batch_size_multiple,
seed, num_shards, shard_id, num_workers, epoch,
)
self.dataset_to_epoch_iter = {}
return epoch_iter
@property
def source_dictionary(self):
return self.dictionary
@property
def target_dictionary(self):
return self.dictionary
| 12,616 | 38.676101 | 98 | py |
RegularizedBN | RegularizedBN-main/fairseq/tasks/multilingual_translation.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from collections import OrderedDict
import logging
import os
import contextlib
import torch
from fairseq import metrics, options
from fairseq.data import (
Dictionary,
LanguagePairDataset,
RoundRobinZipDatasets,
TransformEosLangPairDataset,
)
from fairseq.models import FairseqMultiModel
from fairseq.tasks.translation import load_langpair_dataset
from . import FairseqTask, register_task
from fairseq import utils
logger = logging.getLogger(__name__)
def _lang_token(lang: str):
return '__{}__'.format(lang)
def _lang_token_index(dic: Dictionary, lang: str):
"""Return language token index."""
idx = dic.index(_lang_token(lang))
assert idx != dic.unk_index, \
'cannot find language token for lang {}'.format(lang)
return idx
@register_task('multilingual_translation')
class MultilingualTranslationTask(FairseqTask):
"""A task for training multiple translation models simultaneously.
We iterate round-robin over batches from multiple language pairs, ordered
according to the `--lang-pairs` argument.
The training loop is roughly:
for i in range(len(epoch)):
for lang_pair in args.lang_pairs:
batch = next_batch_for_lang_pair(lang_pair)
loss = criterion(model_for_lang_pair(lang_pair), batch)
loss.backward()
optimizer.step()
In practice, `next_batch_for_lang_pair` is abstracted in a FairseqDataset
(e.g., `RoundRobinZipDatasets`) and `model_for_lang_pair` is a model that
implements the `FairseqMultiModel` interface.
During inference it is required to specify a single `--source-lang` and
`--target-lang`, which indicates the inference langauge direction.
`--lang-pairs`, `--encoder-langtok`, `--decoder-langtok` have to be set to
the same value as training.
"""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
# fmt: off
parser.add_argument('data', metavar='DIR', help='path to data directory')
parser.add_argument('--lang-pairs', default=None, metavar='PAIRS',
help='comma-separated list of language pairs (in training order): en-de,en-fr,de-fr')
parser.add_argument('-s', '--source-lang', default=None, metavar='SRC',
help='source language (only needed for inference)')
parser.add_argument('-t', '--target-lang', default=None, metavar='TARGET',
help='target language (only needed for inference)')
parser.add_argument('--left-pad-source', default='True', type=str, metavar='BOOL',
help='pad the source on the left (default: True)')
parser.add_argument('--left-pad-target', default='False', type=str, metavar='BOOL',
help='pad the target on the left (default: False)')
parser.add_argument('--max-source-positions', default=1024, type=int, metavar='N',
help='max number of tokens in the source sequence')
parser.add_argument('--max-target-positions', default=1024, type=int, metavar='N',
help='max number of tokens in the target sequence')
parser.add_argument('--upsample-primary', default=1, type=int,
help='amount to upsample primary dataset')
parser.add_argument('--encoder-langtok', default=None, type=str, choices=['src', 'tgt'],
metavar='SRCTGT',
help='replace beginning-of-sentence in source sentence with source or target '
'language token. (src/tgt)')
parser.add_argument('--decoder-langtok', action='store_true',
help='replace beginning-of-sentence in target sentence with target language token')
# fmt: on
def __init__(self, args, dicts, training):
super().__init__(args)
self.dicts = dicts
self.training = training
if training:
self.lang_pairs = args.lang_pairs
else:
self.lang_pairs = ['{}-{}'.format(args.source_lang, args.target_lang)]
# eval_lang_pairs for multilingual translation is usually all of the
# lang_pairs. However for other multitask settings or when we want to
# optimize for certain languages we want to use a different subset. Thus
# the eval_lang_pairs class variable is provided for classes that extend
# this class.
self.eval_lang_pairs = self.lang_pairs
# model_lang_pairs will be used to build encoder-decoder model pairs in
# models.build_model(). This allows multitask type of sub-class can
# build models other than the input lang_pairs
self.model_lang_pairs = self.lang_pairs
self.langs = list(dicts.keys())
@classmethod
def setup_task(cls, args, **kwargs):
dicts, training = cls.prepare(args, **kwargs)
return cls(args, dicts, training)
@classmethod
def prepare(cls, args, **kargs):
args.left_pad_source = options.eval_bool(args.left_pad_source)
args.left_pad_target = options.eval_bool(args.left_pad_target)
if args.lang_pairs is None:
raise ValueError('--lang-pairs is required. List all the language pairs in the training objective.')
if isinstance(args.lang_pairs, str):
args.lang_pairs = args.lang_pairs.split(',')
sorted_langs = sorted(list({x for lang_pair in args.lang_pairs for x in lang_pair.split('-')}))
if args.source_lang is not None or args.target_lang is not None:
training = False
else:
training = True
# load dictionaries
dicts = OrderedDict()
for lang in sorted_langs:
paths = utils.split_paths(args.data)
assert len(paths) > 0
dicts[lang] = cls.load_dictionary(os.path.join(paths[0], 'dict.{}.txt'.format(lang)))
if len(dicts) > 0:
assert dicts[lang].pad() == dicts[sorted_langs[0]].pad()
assert dicts[lang].eos() == dicts[sorted_langs[0]].eos()
assert dicts[lang].unk() == dicts[sorted_langs[0]].unk()
if args.encoder_langtok is not None or args.decoder_langtok:
for lang_to_add in sorted_langs:
dicts[lang].add_symbol(_lang_token(lang_to_add))
logger.info('[{}] dictionary: {} types'.format(lang, len(dicts[lang])))
return dicts, training
def get_encoder_langtok(self, src_lang, tgt_lang):
if self.args.encoder_langtok is None:
return self.dicts[src_lang].eos()
if self.args.encoder_langtok == 'src':
return _lang_token_index(self.dicts[src_lang], src_lang)
else:
return _lang_token_index(self.dicts[src_lang], tgt_lang)
def get_decoder_langtok(self, tgt_lang):
if not self.args.decoder_langtok:
return self.dicts[tgt_lang].eos()
return _lang_token_index(self.dicts[tgt_lang], tgt_lang)
def alter_dataset_langtok(self, lang_pair_dataset,
src_eos=None, src_lang=None, tgt_eos=None, tgt_lang=None):
if self.args.encoder_langtok is None and not self.args.decoder_langtok:
return lang_pair_dataset
new_src_eos = None
if self.args.encoder_langtok is not None and src_eos is not None \
and src_lang is not None and tgt_lang is not None:
new_src_eos = self.get_encoder_langtok(src_lang, tgt_lang)
else:
src_eos = None
new_tgt_bos = None
if self.args.decoder_langtok and tgt_eos is not None and tgt_lang is not None:
new_tgt_bos = self.get_decoder_langtok(tgt_lang)
else:
tgt_eos = None
return TransformEosLangPairDataset(
lang_pair_dataset,
src_eos=src_eos,
new_src_eos=new_src_eos,
tgt_bos=tgt_eos,
new_tgt_bos=new_tgt_bos,
)
def load_dataset(self, split, epoch=1, **kwargs):
"""Load a dataset split."""
paths = utils.split_paths(self.args.data)
assert len(paths) > 0
data_path = paths[(epoch - 1) % len(paths)]
def language_pair_dataset(lang_pair):
src, tgt = lang_pair.split('-')
langpair_dataset = load_langpair_dataset(
data_path, split, src, self.dicts[src], tgt, self.dicts[tgt],
combine=True, dataset_impl=self.args.dataset_impl,
upsample_primary=self.args.upsample_primary,
left_pad_source=self.args.left_pad_source,
left_pad_target=self.args.left_pad_target,
max_source_positions=self.args.max_source_positions,
max_target_positions=self.args.max_target_positions,
)
return self.alter_dataset_langtok(
langpair_dataset,
src_eos=self.dicts[src].eos(),
src_lang=src,
tgt_eos=self.dicts[tgt].eos(),
tgt_lang=tgt,
)
self.datasets[split] = RoundRobinZipDatasets(
OrderedDict([
(lang_pair, language_pair_dataset(lang_pair))
for lang_pair in self.lang_pairs
]),
eval_key=None if self.training else "%s-%s" % (self.args.source_lang, self.args.target_lang),
)
def build_dataset_for_inference(self, src_tokens, src_lengths, constraints=None):
if constraints is not None:
raise NotImplementedError("Constrained decoding with the multilingual_translation task is not supported")
lang_pair = "%s-%s" % (self.args.source_lang, self.args.target_lang)
return RoundRobinZipDatasets(
OrderedDict([(
lang_pair,
self.alter_dataset_langtok(
LanguagePairDataset(
src_tokens, src_lengths,
self.source_dictionary
),
src_eos=self.source_dictionary.eos(),
src_lang=self.args.source_lang,
tgt_eos=self.target_dictionary.eos(),
tgt_lang=self.args.target_lang,
),
)]),
eval_key=lang_pair,
)
def build_model(self, args):
def check_args():
messages = []
if len(set(self.args.lang_pairs).symmetric_difference(args.lang_pairs)) != 0:
messages.append('--lang-pairs should include all the language pairs {}.'.format(args.lang_pairs))
if self.args.encoder_langtok != args.encoder_langtok:
messages.append('--encoder-langtok should be {}.'.format(args.encoder_langtok))
if self.args.decoder_langtok != args.decoder_langtok:
messages.append('--decoder-langtok should {} be set.'.format("" if args.decoder_langtok else "not"))
if len(messages) > 0:
raise ValueError(' '.join(messages))
# Check if task args are consistant with model args
check_args()
from fairseq import models
model = models.build_model(args, self)
if not isinstance(model, FairseqMultiModel):
raise ValueError('MultilingualTranslationTask requires a FairseqMultiModel architecture')
return model
def train_step(self, sample, model, criterion, optimizer, update_num, ignore_grad=False):
model.train()
from collections import defaultdict
agg_loss, agg_sample_size, agg_logging_output = 0., 0., defaultdict(float)
curr_lang_pairs = [
lang_pair
for lang_pair in self.model_lang_pairs
if sample[lang_pair] is not None and len(sample[lang_pair]) != 0
]
for idx, lang_pair in enumerate(curr_lang_pairs):
def maybe_no_sync():
if (
self.args.distributed_world_size > 1
and hasattr(model, 'no_sync')
and idx < len(curr_lang_pairs) - 1
):
return model.no_sync()
else:
return contextlib.ExitStack() # dummy contextmanager
with maybe_no_sync():
loss, sample_size, logging_output = criterion(model.models[lang_pair], sample[lang_pair])
if ignore_grad:
loss *= 0
optimizer.backward(loss)
agg_loss += loss.detach().item()
# TODO make summing of the sample sizes configurable
agg_sample_size += sample_size
for k in logging_output:
agg_logging_output[k] += logging_output[k]
agg_logging_output[f"{lang_pair}:{k}"] += logging_output[k]
return agg_loss, agg_sample_size, agg_logging_output
def valid_step(self, sample, model, criterion):
model.eval()
with torch.no_grad():
from collections import defaultdict
agg_loss, agg_sample_size, agg_logging_output = 0., 0., defaultdict(float)
for lang_pair in self.eval_lang_pairs:
if lang_pair not in sample or sample[lang_pair] is None or len(sample[lang_pair]) == 0:
continue
loss, sample_size, logging_output = criterion(model.models[lang_pair], sample[lang_pair])
agg_loss += loss.data.item()
# TODO make summing of the sample sizes configurable
agg_sample_size += sample_size
for k in logging_output:
agg_logging_output[k] += logging_output[k]
agg_logging_output[f"{lang_pair}:{k}"] += logging_output[k]
return agg_loss, agg_sample_size, agg_logging_output
def inference_step(self, generator, models, sample, prefix_tokens=None, constraints=None):
with torch.no_grad():
if self.args.decoder_langtok:
bos_token = _lang_token_index(self.target_dictionary, self.args.target_lang)
else:
bos_token = self.target_dictionary.eos()
return generator.generate(
models,
sample,
prefix_tokens=prefix_tokens,
constraints=constraints,
bos_token=bos_token,
)
def reduce_metrics(self, logging_outputs, criterion):
with metrics.aggregate():
# pass 'sample_size', 'nsentences', 'ntokens' stats to fairseq_task
super().reduce_metrics(logging_outputs, criterion)
for k in ['sample_size', 'nsentences', 'ntokens']:
metrics.log_scalar(k, sum(l[k] for l in logging_outputs))
@property
def source_dictionary(self):
if self.training:
return next(iter(self.dicts.values()))
else:
return self.dicts[self.args.source_lang]
@property
def target_dictionary(self):
if self.training:
return next(iter(self.dicts.values()))
else:
return self.dicts[self.args.target_lang]
def max_positions(self):
"""Return the max sentence length allowed by the task."""
if len(self.datasets.values()) == 0:
return {'%s-%s' % (self.args.source_lang, self.args.target_lang):
(self.args.max_source_positions, self.args.max_target_positions)}
return OrderedDict([
(key, (self.args.max_source_positions, self.args.max_target_positions))
for split in self.datasets.keys()
for key in self.datasets[split].datasets.keys()
])
| 15,948 | 42.936639 | 117 | py |
RegularizedBN | RegularizedBN-main/fairseq/tasks/translation_lev.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import torch
from fairseq.data import LanguagePairDataset
from fairseq.utils import new_arange
from fairseq.tasks import register_task
from fairseq.tasks.translation import TranslationTask, load_langpair_dataset
from fairseq import utils
@register_task('translation_lev')
class TranslationLevenshteinTask(TranslationTask):
"""
Translation (Sequence Generation) task for Levenshtein Transformer
See `"Levenshtein Transformer" <https://arxiv.org/abs/1905.11006>`_.
"""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
# fmt: off
TranslationTask.add_args(parser)
parser.add_argument(
'--noise',
default='random_delete',
choices=['random_delete', 'random_mask', 'no_noise', 'full_mask'])
def load_dataset(self, split, epoch=1, combine=False, **kwargs):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
paths = utils.split_paths(self.args.data)
assert len(paths) > 0
data_path = paths[(epoch - 1) % len(paths)]
# infer langcode
src, tgt = self.args.source_lang, self.args.target_lang
self.datasets[split] = load_langpair_dataset(
data_path, split, src, self.src_dict, tgt, self.tgt_dict,
combine=combine, dataset_impl=self.args.dataset_impl,
upsample_primary=self.args.upsample_primary,
left_pad_source=self.args.left_pad_source,
left_pad_target=self.args.left_pad_target,
max_source_positions=self.args.max_source_positions,
max_target_positions=self.args.max_target_positions,
prepend_bos=True,
)
def inject_noise(self, target_tokens):
def _random_delete(target_tokens):
pad = self.tgt_dict.pad()
bos = self.tgt_dict.bos()
eos = self.tgt_dict.eos()
max_len = target_tokens.size(1)
target_mask = target_tokens.eq(pad)
target_score = target_tokens.clone().float().uniform_()
target_score.masked_fill_(
target_tokens.eq(bos) | target_tokens.eq(eos), 0.0)
target_score.masked_fill_(target_mask, 1)
target_score, target_rank = target_score.sort(1)
target_length = target_mask.size(1) - target_mask.float().sum(
1, keepdim=True)
# do not delete <bos> and <eos> (we assign 0 score for them)
target_cutoff = 2 + ((target_length - 2) * target_score.new_zeros(
target_score.size(0), 1).uniform_()).long()
target_cutoff = target_score.sort(1)[1] >= target_cutoff
prev_target_tokens = target_tokens.gather(
1, target_rank).masked_fill_(target_cutoff, pad).gather(
1,
target_rank.masked_fill_(target_cutoff,
max_len).sort(1)[1])
prev_target_tokens = prev_target_tokens[:, :prev_target_tokens.
ne(pad).sum(1).max()]
return prev_target_tokens
def _random_mask(target_tokens):
pad = self.tgt_dict.pad()
bos = self.tgt_dict.bos()
eos = self.tgt_dict.eos()
unk = self.tgt_dict.unk()
target_masks = target_tokens.ne(pad) & \
target_tokens.ne(bos) & \
target_tokens.ne(eos)
target_score = target_tokens.clone().float().uniform_()
target_score.masked_fill_(~target_masks, 2.0)
target_length = target_masks.sum(1).float()
target_length = target_length * target_length.clone().uniform_()
target_length = target_length + 1 # make sure to mask at least one token.
_, target_rank = target_score.sort(1)
target_cutoff = new_arange(target_rank) < target_length[:, None].long()
prev_target_tokens = target_tokens.masked_fill(
target_cutoff.scatter(1, target_rank, target_cutoff), unk)
return prev_target_tokens
def _full_mask(target_tokens):
pad = self.tgt_dict.pad()
bos = self.tgt_dict.bos()
eos = self.tgt_dict.eos()
unk = self.tgt_dict.unk()
target_mask = target_tokens.eq(bos) | target_tokens.eq(
eos) | target_tokens.eq(pad)
return target_tokens.masked_fill(~target_mask, unk)
if self.args.noise == 'random_delete':
return _random_delete(target_tokens)
elif self.args.noise == 'random_mask':
return _random_mask(target_tokens)
elif self.args.noise == 'full_mask':
return _full_mask(target_tokens)
elif self.args.noise == 'no_noise':
return target_tokens
else:
raise NotImplementedError
def build_generator(self, models, args):
# add models input to match the API for SequenceGenerator
from fairseq.iterative_refinement_generator import IterativeRefinementGenerator
return IterativeRefinementGenerator(
self.target_dictionary,
eos_penalty=getattr(args, 'iter_decode_eos_penalty', 0.0),
max_iter=getattr(args, 'iter_decode_max_iter', 10),
beam_size=getattr(args, 'iter_decode_with_beam', 1),
reranking=getattr(args, 'iter_decode_with_external_reranker', False),
decoding_format=getattr(args, 'decoding_format', None),
adaptive=not getattr(args, 'iter_decode_force_max_iter', False),
retain_history=getattr(args, 'retain_iter_history', False))
def build_dataset_for_inference(self, src_tokens, src_lengths, constraints=None):
if constraints is not None:
# Though see Susanto et al. (ACL 2020): https://www.aclweb.org/anthology/2020.acl-main.325/
raise NotImplementedError("Constrained decoding with the translation_lev task is not supported")
return LanguagePairDataset(
src_tokens, src_lengths, self.source_dictionary, append_bos=True
)
def train_step(self,
sample,
model,
criterion,
optimizer,
update_num,
ignore_grad=False):
model.train()
sample['prev_target'] = self.inject_noise(sample['target'])
loss, sample_size, logging_output = criterion(model, sample)
if ignore_grad:
loss *= 0
optimizer.backward(loss)
return loss, sample_size, logging_output
def valid_step(self, sample, model, criterion):
model.eval()
with torch.no_grad():
sample['prev_target'] = self.inject_noise(sample['target'])
loss, sample_size, logging_output = criterion(model, sample)
return loss, sample_size, logging_output
| 7,220 | 40.5 | 108 | py |
RegularizedBN | RegularizedBN-main/fairseq/tasks/fairseq_task.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import warnings
import torch
from fairseq import metrics, search, tokenizer, utils
from fairseq.data import data_utils, FairseqDataset, iterators, Dictionary
from fairseq.models.LSUV import LSUVinit
logger = logging.getLogger(__name__)
class FairseqTask(object):
"""
Tasks store dictionaries and provide helpers for loading/iterating over
Datasets, initializing the Model/Criterion and calculating the loss.
"""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
pass
@staticmethod
def logging_outputs_can_be_summed(criterion) -> bool:
"""
Whether the logging outputs returned by `train_step` and `valid_step` can
be summed across workers prior to calling `aggregate_logging_outputs`.
Setting this to True will improves distributed training speed.
"""
return criterion.logging_outputs_can_be_summed()
def __init__(self, args):
self.args = args
self.datasets = {}
self.dataset_to_epoch_iter = {}
self.lsuv = 0
@classmethod
def load_dictionary(cls, filename):
"""Load the dictionary from the filename
Args:
filename (str): the filename
"""
return Dictionary.load(filename)
@classmethod
def build_dictionary(
cls, filenames, workers=1, threshold=-1, nwords=-1, padding_factor=8
):
"""Build the dictionary
Args:
filenames (list): list of filenames
workers (int): number of concurrent workers
threshold (int): defines the minimum word count
nwords (int): defines the total number of words in the final dictionary,
including special symbols
padding_factor (int): can be used to pad the dictionary size to be a
multiple of 8, which is important on some hardware (e.g., Nvidia
Tensor Cores).
"""
d = Dictionary()
for filename in filenames:
Dictionary.add_file_to_dictionary(
filename, d, tokenizer.tokenize_line, workers
)
d.finalize(threshold=threshold, nwords=nwords, padding_factor=padding_factor)
return d
@classmethod
def setup_task(cls, args, **kwargs):
"""Setup the task (e.g., load dictionaries).
Args:
args (argparse.Namespace): parsed command-line arguments
"""
return cls(args, **kwargs)
def has_sharded_data(self, split):
return (os.pathsep in getattr(self.args, 'data', ''))
def load_dataset(self, split, combine=False, **kwargs):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
raise NotImplementedError
def dataset(self, split):
"""
Return a loaded dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
Returns:
a :class:`~fairseq.data.FairseqDataset` corresponding to *split*
"""
from fairseq.data import FairseqDataset
if split not in self.datasets:
raise KeyError("Dataset not loaded: " + split)
if not isinstance(self.datasets[split], FairseqDataset):
raise TypeError("Datasets are expected to be of type FairseqDataset")
return self.datasets[split]
def filter_indices_by_size(
self,
indices,
dataset,
max_positions=None,
ignore_invalid_inputs=False,
):
"""
Filter examples that are too large
Args:
indices (np.array): original array of sample indices
dataset (~fairseq.data.FairseqDataset): dataset to batch
max_positions (optional): max sentence length supported by the
model (default: None).
ignore_invalid_inputs (bool, optional): don't raise Exception for
sentences that are too long (default: False).
Returns:
np.array: array of filtered sample indices
"""
indices, ignored = dataset.filter_indices_by_size(indices, max_positions)
if len(ignored) > 0:
if not ignore_invalid_inputs:
raise Exception((
'Size of sample #{} is invalid (={}) since max_positions={}, '
'skip this example with --skip-invalid-size-inputs-valid-test'
).format(ignored[0], dataset.size(ignored[0]), max_positions))
logger.warning((
'{} samples have invalid sizes and will be skipped, '
'max_positions={}, first few sample ids={}'
).format(len(ignored), max_positions, ignored[:10]))
return indices
def get_batch_iterator(
self,
dataset,
max_tokens=None,
max_sentences=None,
max_positions=None,
ignore_invalid_inputs=False,
required_batch_size_multiple=1,
seed=1,
num_shards=1,
shard_id=0,
num_workers=0,
epoch=1,
):
"""
Get an iterator that yields batches of data from the given dataset.
Args:
dataset (~fairseq.data.FairseqDataset): dataset to batch
max_tokens (int, optional): max number of tokens in each batch
(default: None).
max_sentences (int, optional): max number of sentences in each
batch (default: None).
max_positions (optional): max sentence length supported by the
model (default: None).
ignore_invalid_inputs (bool, optional): don't raise Exception for
sentences that are too long (default: False).
required_batch_size_multiple (int, optional): require batch size to
be a multiple of N (default: 1).
seed (int, optional): seed for random number generator for
reproducibility (default: 1).
num_shards (int, optional): shard the data iterator into N
shards (default: 1).
shard_id (int, optional): which shard of the data iterator to
return (default: 0).
num_workers (int, optional): how many subprocesses to use for data
loading. 0 means the data will be loaded in the main process
(default: 0).
epoch (int, optional): the epoch to start the iterator from
(default: 1).
Returns:
~fairseq.iterators.EpochBatchIterator: a batched iterator over the
given dataset split
"""
# For default fairseq task, return same iterator across epochs
# as datasets are not dynamic, can be overridden in task specific
# setting.
if dataset in self.dataset_to_epoch_iter:
return self.dataset_to_epoch_iter[dataset]
assert isinstance(dataset, FairseqDataset)
# initialize the dataset with the correct starting epoch
dataset.set_epoch(epoch) #pass, nothing
# get indices ordered by example size
with data_utils.numpy_seed(seed):
indices = dataset.ordered_indices()
# filter examples that are too large
if max_positions is not None:
indices = self.filter_indices_by_size(
indices, dataset, max_positions, ignore_invalid_inputs
)
# create mini-batches with given size constraints
batch_sampler = dataset.batch_by_size( #c++ code, can't change, but I have reproduced it in python
indices,
max_tokens=max_tokens,
max_sentences=max_sentences,
required_batch_size_multiple=required_batch_size_multiple,
my_batching=self.args.my_batching,
tol=self.args.batching_tol
)
# return a reusable, sharded iterator
epoch_iter = iterators.EpochBatchIterator(
dataset=dataset,
collate_fn=dataset.collater,
batch_sampler=batch_sampler,
seed=seed,
num_shards=num_shards,
shard_id=shard_id,
num_workers=num_workers,
epoch=epoch,
buffer_size=getattr(self.args, 'data_buffer_size', 0),
)
self.dataset_to_epoch_iter[dataset] = epoch_iter
return epoch_iter
def build_model(self, args):
"""
Build the :class:`~fairseq.models.BaseFairseqModel` instance for this
task.
Args:
args (argparse.Namespace): parsed command-line arguments
Returns:
a :class:`~fairseq.models.BaseFairseqModel` instance
"""
from fairseq import models, quantization_utils
model = models.build_model(args, self) #<class 'fairseq.models.transformer.TransformerModel'>
if getattr(args, 'tpu', False):
model.prepare_for_tpu_()
model = quantization_utils.quantize_model_scalar(model, args)
return model
def build_criterion(self, args):
"""
Build the :class:`~fairseq.criterions.FairseqCriterion` instance for
this task.
Args:
args (argparse.Namespace): parsed command-line arguments
Returns:
a :class:`~fairseq.criterions.FairseqCriterion` instance
"""
from fairseq import criterions
return criterions.build_criterion(args, self)
def build_generator(
self, models, args,
seq_gen_cls=None, extra_gen_cls_kwargs=None
):
if getattr(args, "score_reference", False): #false
from fairseq.sequence_scorer import SequenceScorer
return SequenceScorer(
self.target_dictionary,
compute_alignment=getattr(args, "print_alignment", False),
)
from fairseq.sequence_generator import (
SequenceGenerator,
SequenceGeneratorWithAlignment,
)
# Choose search strategy. Defaults to Beam Search.
sampling = getattr(args, "sampling", False) #default
sampling_topk = getattr(args, "sampling_topk", -1) #default
sampling_topp = getattr(args, "sampling_topp", -1.0) #default
diverse_beam_groups = getattr(args, "diverse_beam_groups", -1) #default
diverse_beam_strength = getattr(args, "diverse_beam_strength", 0.5) #default
match_source_len = getattr(args, "match_source_len", False) #default
diversity_rate = getattr(args, "diversity_rate", -1) #default
constrained = getattr(args, "constraints", False) #default
if (
sum(
int(cond)
for cond in [
sampling,
diverse_beam_groups > 0,
match_source_len,
diversity_rate > 0,
]
)
> 1
): #false
raise ValueError("Provided Search parameters are mutually exclusive.")
assert sampling_topk < 0 or sampling, "--sampling-topk requires --sampling"
assert sampling_topp < 0 or sampling, "--sampling-topp requires --sampling"
if sampling: #false
search_strategy = search.Sampling(
self.target_dictionary, sampling_topk, sampling_topp
)
elif diverse_beam_groups > 0: #false
search_strategy = search.DiverseBeamSearch(
self.target_dictionary, diverse_beam_groups, diverse_beam_strength
)
elif match_source_len: #false
# this is useful for tagging applications where the output
# length should match the input length, so we hardcode the
# length constraints for simplicity
search_strategy = search.LengthConstrainedBeamSearch(
self.target_dictionary,
min_len_a=1,
min_len_b=0,
max_len_a=1,
max_len_b=0,
)
elif diversity_rate > -1: #false
search_strategy = search.DiverseSiblingsSearch(
self.target_dictionary, diversity_rate
)
elif constrained: #false
search_strategy = search.LexicallyConstrainedBeamSearch(self.target_dictionary, args.constraints)
else:
search_strategy = search.BeamSearch(self.target_dictionary)
if seq_gen_cls is None: #true
if getattr(args, "print_alignment", False): #false
seq_gen_cls = SequenceGeneratorWithAlignment
else:
seq_gen_cls = SequenceGenerator
extra_gen_cls_kwargs = extra_gen_cls_kwargs or {}
return seq_gen_cls(
models,
self.target_dictionary,
beam_size=getattr(args, "beam", 5),
max_len_a=getattr(args, "max_len_a", 0),
max_len_b=getattr(args, "max_len_b", 200),
min_len=getattr(args, "min_len", 1),
normalize_scores=(not getattr(args, "unnormalized", False)),
len_penalty=getattr(args, "lenpen", 1),
unk_penalty=getattr(args, "unkpen", 0),
temperature=getattr(args, "temperature", 1.0),
match_source_len=getattr(args, "match_source_len", False),
no_repeat_ngram_size=getattr(args, "no_repeat_ngram_size", 0),
search_strategy=search_strategy,
**extra_gen_cls_kwargs,
)
def train_step(
self, sample, model, criterion, optimizer, update_num, ignore_grad=False
):
"""
Do forward and backward, and return the loss as computed by *criterion*
for the given *model* and *sample*.
Args:
sample (dict): the mini-batch. The format is defined by the
:class:`~fairseq.data.FairseqDataset`.
model (~fairseq.models.BaseFairseqModel): the model
criterion (~fairseq.criterions.FairseqCriterion): the criterion
optimizer (~fairseq.optim.FairseqOptimizer): the optimizer
update_num (int): the current update
ignore_grad (bool): multiply loss by 0 if this is set to True
Returns:
tuple:
- the loss
- the sample size, which is used as the denominator for the
gradient
- logging outputs to display while training
"""
model.train()
model.set_num_updates(update_num)
#if self.lsuv%1101==0 and self.lsuv==0:
# #print(self.encoder);exit()
# model.encoder = LSUVinit(model.encoder, sample['net_input']['src_tokens'], needed_std = 1.0, std_tol = 0.1, max_attempts = 10, needed_mean = 0., do_orthonorm = False)
#self.lsuv += 1
with torch.autograd.profiler.record_function("forward"):
loss, sample_size, logging_output = criterion(model, sample)
if ignore_grad:
loss *= 0
with torch.autograd.profiler.record_function("backward"):
optimizer.backward(loss)
return loss, sample_size, logging_output
def valid_step(self, sample, model, criterion):
model.eval()
with torch.no_grad():
loss, sample_size, logging_output = criterion(model, sample)
return loss, sample_size, logging_output
def inference_step(self, generator, models, sample, prefix_tokens=None, constraints=None):
with torch.no_grad():
return generator.generate(models, sample, prefix_tokens=prefix_tokens, constraints=constraints)
def begin_epoch(self, epoch, model):
"""Hook function called before the start of each epoch."""
pass
def aggregate_logging_outputs(self, logging_outputs, criterion):
"""[deprecated] Aggregate logging outputs from data parallel training."""
utils.deprecation_warning(
"The aggregate_logging_outputs API is deprecated. "
"Please use the reduce_metrics API instead."
)
with metrics.aggregate() as agg:
self.reduce_metrics(logging_outputs, criterion)
return agg.get_smoothed_values()
def reduce_metrics(self, logging_outputs, criterion):
"""Aggregate logging outputs from data parallel training."""
# backward compatibility for tasks that override aggregate_logging_outputs
base_func = FairseqTask.aggregate_logging_outputs
self_func = getattr(self, "aggregate_logging_outputs").__func__
if self_func is not base_func:
utils.deprecation_warning(
"Tasks should implement the reduce_metrics API. "
"Falling back to deprecated aggregate_logging_outputs API."
)
agg_logging_outputs = self.aggregate_logging_outputs(
logging_outputs, criterion
)
for k, v in agg_logging_outputs.items():
metrics.log_scalar(k, v)
return
if not any("ntokens" in log for log in logging_outputs):
warnings.warn(
"ntokens not found in Criterion logging outputs, cannot log wpb or wps"
)
else:
ntokens = sum(log.get("ntokens", 0) for log in logging_outputs)
metrics.log_scalar("wpb", ntokens, priority=180, round=1)
metrics.log_speed("wps", ntokens, priority=90, round=1)
if not any("nsentences" in log for log in logging_outputs):
warnings.warn(
"nsentences not found in Criterion logging outputs, cannot log bsz"
)
else:
nsentences = sum(log.get("nsentences", 0) for log in logging_outputs)
metrics.log_scalar("bsz", nsentences, priority=190, round=1)
criterion.__class__.reduce_metrics(logging_outputs)
def max_positions(self):
"""Return the max input length allowed by the task."""
return None
@property
def source_dictionary(self):
"""Return the source :class:`~fairseq.data.Dictionary` (if applicable
for this task)."""
raise NotImplementedError
@property
def target_dictionary(self):
"""Return the target :class:`~fairseq.data.Dictionary` (if applicable
for this task)."""
raise NotImplementedError
| 18,544 | 37.635417 | 179 | py |
RegularizedBN | RegularizedBN-main/fairseq/tasks/translation_multi_simple_epoch.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import datetime
import time
import torch
from fairseq.data import (
data_utils,
FairseqDataset,
iterators,
LanguagePairDataset,
ListDataset,
)
from fairseq.tasks import FairseqTask, register_task
from fairseq.data.multilingual.sampling_method import SamplingMethod
from fairseq.data.multilingual.multilingual_data_manager import MultilingualDatasetManager
###
def get_time_gap(s, e):
return (datetime.datetime.fromtimestamp(e) - datetime.datetime.fromtimestamp(s)).__str__()
###
logger = logging.getLogger(__name__)
@register_task('translation_multi_simple_epoch')
class TranslationMultiSimpleEpochTask(FairseqTask):
"""
Translate from one (source) language to another (target) language.
Args:
langs (List[str]): a list of languages that are being supported
dicts (Dict[str, fairseq.data.Dictionary]): mapping from supported languages to their dictionaries
training (bool): whether the task should be configured for training or not
.. note::
The translation task is compatible with :mod:`fairseq-train`,
:mod:`fairseq-generate` and :mod:`fairseq-interactive`.
The translation task provides the following additional command-line
arguments:
.. argparse::
:ref: fairseq.tasks.translation_parser
:prog:
"""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
# fmt: off
parser.add_argument('-s', '--source-lang', default=None, metavar='SRC',
help='inference source language')
parser.add_argument('-t', '--target-lang', default=None, metavar='TARGET',
help='inference target language')
parser.add_argument('--lang-pairs', default=None, metavar='PAIRS',
help='comma-separated list of language pairs (in training order): en-de,en-fr,de-fr')
parser.add_argument('--keep-inference-langtok', action='store_true',
help='keep language tokens in inference output (e.g. for analysis or debugging)')
SamplingMethod.add_arguments(parser)
MultilingualDatasetManager.add_args(parser)
# fmt: on
def __init__(self, args, langs, dicts, training):
super().__init__(args)
self.langs = langs
self.dicts = dicts
self.training = training
if training:
self.lang_pairs = args.lang_pairs
else:
self.lang_pairs = ['{}-{}'.format(args.source_lang, args.target_lang)]
# eval_lang_pairs for multilingual translation is usually all of the
# lang_pairs. However for other multitask settings or when we want to
# optimize for certain languages we want to use a different subset. Thus
# the eval_lang_pairs class variable is provided for classes that extend
# this class.
self.eval_lang_pairs = self.lang_pairs
# model_lang_pairs will be used to build encoder-decoder model pairs in
# models.build_model(). This allows multitask type of sub-class can
# build models other than the input lang_pairs
self.model_lang_pairs = self.lang_pairs
self.sampling_method = SamplingMethod.build_sampler(args, self)
self.data_manager = MultilingualDatasetManager.setup_data_manager(
args, self.lang_pairs, langs, dicts, self.sampling_method)
@classmethod
def setup_task(cls, args, **kwargs):
langs, dicts, training = MultilingualDatasetManager.prepare(
cls.load_dictionary, args, **kwargs
)
return cls(args, langs, dicts, training)
def has_sharded_data(self, split):
return self.data_manager.has_sharded_data(split)
def load_dataset(self, split, epoch=1, combine=False, **kwargs):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
if split in self.datasets:
dataset = self.datasets[split]
if self.has_sharded_data(split) and dataset.load_next_shard:
shard_epoch = dataset.shard_epoch
else:
# no need to load next shard so skip loading
# also this avoid always loading from beginning of the data
return
else:
shard_epoch = None
logger.info(f'loading data for {split} epoch={epoch}/{shard_epoch}')
self.datasets[split] = self.data_manager.load_sampled_multi_epoch_dataset(
split,
self.training,
epoch=epoch, combine=combine, shard_epoch=shard_epoch, **kwargs
)
def build_dataset_for_inference(self, src_tokens, src_lengths, constraints=None):
if constraints is not None:
raise NotImplementedError("Constrained decoding with the multilingual_translation task is not supported")
src_data = ListDataset(src_tokens, src_lengths)
dataset = LanguagePairDataset(src_data, src_lengths, self.source_dictionary)
src_langtok_spec, tgt_langtok_spec = self.args.langtoks['main']
if self.args.lang_tok_replacing_bos_eos:
dataset = self.data_manager.alter_dataset_langtok(
dataset,
src_eos=self.source_dictionary.eos(),
src_lang=self.args.source_lang,
tgt_eos=self.target_dictionary.eos(),
tgt_lang=self.args.target_lang,
src_langtok_spec=src_langtok_spec,
tgt_langtok_spec=tgt_langtok_spec,
)
else:
dataset.src = self.data_manager.src_dataset_tranform_func(
self.args.source_lang,
self.args.target_lang,
dataset=dataset.src,
spec=src_langtok_spec,
)
return dataset
def build_generator(
self, models, args,
seq_gen_cls=None, extra_gen_cls_kwargs=None,
):
if not getattr(args, 'keep_inference_langtok', False):
_, tgt_langtok_spec = self.args.langtoks['main']
if tgt_langtok_spec:
tgt_lang_tok = self.data_manager.get_decoder_langtok(self.args.target_lang, tgt_langtok_spec)
extra_gen_cls_kwargs = extra_gen_cls_kwargs or {}
extra_gen_cls_kwargs['symbols_to_strip_from_output'] = {tgt_lang_tok}
return super().build_generator(
models, args,
seq_gen_cls=None,
extra_gen_cls_kwargs=extra_gen_cls_kwargs
)
def build_model(self, args):
return super().build_model(args)
def valid_step(self, sample, model, criterion):
loss, sample_size, logging_output = super().valid_step(sample, model, criterion)
return loss, sample_size, logging_output
def inference_step(self, generator, models, sample, prefix_tokens=None, constraints=None):
with torch.no_grad():
_, tgt_langtok_spec = self.args.langtoks['main']
if not self.args.lang_tok_replacing_bos_eos:
if prefix_tokens is None and tgt_langtok_spec:
tgt_lang_tok = self.data_manager.get_decoder_langtok(self.args.target_lang, tgt_langtok_spec)
src_tokens = sample['net_input']['src_tokens']
bsz = src_tokens.size(0)
prefix_tokens = torch.LongTensor(
[[tgt_lang_tok]]
).expand(bsz, 1).to(src_tokens)
return generator.generate(
models,
sample,
prefix_tokens=prefix_tokens,
constraints=constraints,
)
else:
return generator.generate(
models,
sample,
prefix_tokens=prefix_tokens,
bos_token=self.data_manager.get_decoder_langtok(self.args.target_lang, tgt_langtok_spec)
if tgt_langtok_spec else self.target_dictionary.eos(),
)
def reduce_metrics(self, logging_outputs, criterion):
super().reduce_metrics(logging_outputs, criterion)
def max_positions(self):
"""Return the max sentence length allowed by the task."""
return (self.args.max_source_positions, self.args.max_target_positions)
@property
def source_dictionary(self):
if self.training:
return next(iter(self.dicts.values()))
else:
return self.dicts[self.args.source_lang]
@property
def target_dictionary(self):
if self.training:
return next(iter(self.dicts.values()))
else:
return self.dicts[self.args.target_lang]
def create_batch_sampler_func(
self, max_positions, ignore_invalid_inputs,
max_tokens, max_sentences
):
def construct_batch_sampler(
dataset, epoch
):
splits = [s for s, _ in self.datasets.items() if self.datasets[s] == dataset]
split = splits[0] if len(splits) > 0 else None
if epoch is not None:
dataset.set_epoch(epoch)
start_time = time.time()
# get indices ordered by example size
indices = dataset.ordered_indices()
logger.debug(f'[{split}] @batch_sampler order indices time: {get_time_gap(start_time, time.time())}')
# filter examples that are too large
if max_positions is not None:
my_time = time.time()
indices = self.filter_indices_by_size(
indices,
dataset,
max_positions,
ignore_invalid_inputs=ignore_invalid_inputs,
)
logger.debug(f'[{split}] @batch_sampler filter_by_size time: {get_time_gap(my_time, time.time())}')
# create mini-batches with given size constraints
my_time = time.time()
batch_sampler = data_utils.batch_by_size(
indices, dataset.num_tokens, max_tokens=max_tokens, max_sentences=max_sentences,
)
logger.debug(f'[{split}] @batch_sampler batch_by_size time: {get_time_gap(my_time, time.time())}')
logger.debug(f'[{split}] per epoch batch_sampler set-up time: {get_time_gap(start_time, time.time())}')
return batch_sampler
return construct_batch_sampler
# we need to override get_batch_iterator because we want to reset the epoch iterator each time
def get_batch_iterator(
self, dataset, max_tokens=None, max_sentences=None, max_positions=None,
ignore_invalid_inputs=False, required_batch_size_multiple=1,
seed=1, num_shards=1, shard_id=0, num_workers=0, epoch=1,
):
"""
Get an iterator that yields batches of data from the given dataset.
Args:
dataset (~fairseq.data.FairseqDataset): dataset to batch
max_tokens (int, optional): max number of tokens in each batch
(default: None).
max_sentences (int, optional): max number of sentences in each
batch (default: None).
max_positions (optional): max sentence length supported by the
model (default: None).
ignore_invalid_inputs (bool, optional): don't raise Exception for
sentences that are too long (default: False).
required_batch_size_multiple (int, optional): require batch size to
be a multiple of N (default: 1).
seed (int, optional): seed for random number generator for
reproducibility (default: 1).
num_shards (int, optional): shard the data iterator into N
shards (default: 1).
shard_id (int, optional): which shard of the data iterator to
return (default: 0).
num_workers (int, optional): how many subprocesses to use for data
loading. 0 means the data will be loaded in the main process
(default: 0).
epoch (int, optional): the epoch to start the iterator from
(default: 0).
Returns:
~fairseq.iterators.EpochBatchIterator: a batched iterator over the
given dataset split
"""
# initialize the dataset with the correct starting epoch
assert isinstance(dataset, FairseqDataset)
if dataset in self.dataset_to_epoch_iter:
return self.dataset_to_epoch_iter[dataset]
if (
self.args.sampling_method == 'RoundRobin'
):
batch_iter = super().get_batch_iterator(
dataset, max_tokens=max_tokens, max_sentences=max_sentences, max_positions=max_positions,
ignore_invalid_inputs=ignore_invalid_inputs, required_batch_size_multiple=required_batch_size_multiple,
seed=seed, num_shards=num_shards, shard_id=shard_id, num_workers=num_workers, epoch=epoch,
)
self.dataset_to_epoch_iter[dataset] = batch_iter
return batch_iter
construct_batch_sampler = self.create_batch_sampler_func(
max_positions, ignore_invalid_inputs,
max_tokens, max_sentences)
epoch_iter = iterators.EpochBatchIterator(
dataset=dataset,
collate_fn=dataset.collater,
batch_sampler=construct_batch_sampler,
seed=seed,
num_shards=num_shards,
shard_id=shard_id,
num_workers=num_workers,
epoch=epoch,
)
return epoch_iter
| 13,953 | 41.284848 | 119 | py |
RegularizedBN | RegularizedBN-main/docs/conf.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# fairseq documentation build configuration file, created by
# sphinx-quickstart on Fri Aug 17 21:45:30 2018.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
import os
import sys
# source code directory, relative to this file, for sphinx-autobuild
sys.path.insert(0, os.path.abspath('..'))
source_suffix = ['.rst']
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.viewcode',
'sphinx.ext.napoleon',
'sphinxarg.ext',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'fairseq'
copyright = '2019, Facebook AI Research (FAIR)'
author = 'Facebook AI Research (FAIR)'
github_doc_root = 'https://github.com/pytorch/fairseq/tree/master/docs/'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.9.0'
# The full version, including alpha/beta/rc tags.
release = '0.9.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
highlight_language = 'python'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_context = {
'css_files': [
'_static/theme_overrides.css', # override wide tables in RTD theme
],
}
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
#html_sidebars = {
# '**': [
# 'about.html',
# 'navigation.html',
# 'relations.html', # needs 'show_related': True theme option to display
# 'searchbox.html',
# 'donate.html',
# ]
#}
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'numpy': ('http://docs.scipy.org/doc/numpy/', None),
'python': ('https://docs.python.org/', None),
'torch': ('https://pytorch.org/docs/master/', None),
}
| 4,235 | 30.849624 | 80 | py |
RegularizedBN | RegularizedBN-main/fairseq_cli/train_bn.py | #!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Train a new model on one or across multiple GPUs.
"""
#****************
#for testing bn
#****************
import argparse
import logging
import math
import random
import sys
import numpy as np
from scipy import io
import torch
from fairseq import (
checkpoint_utils,
distributed_utils,
options,
quantization_utils,
tasks,
utils,
)
from fairseq.data import iterators
from fairseq.logging import meters, metrics, progress_bar
from fairseq.model_parallel.megatron_trainer import MegatronTrainer
from fairseq.trainer import Trainer
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=logging.INFO,
stream=sys.stdout,
)
logger = logging.getLogger("fairseq_cli.train")
def main(args):
utils.import_user_module(args)
assert (
args.max_tokens is not None or args.max_sentences is not None
), "Must specify batch size either with --max-tokens or --max-sentences"
metrics.reset()
np.random.seed(args.seed)
utils.set_torch_seed(args.seed)
if distributed_utils.is_master(args):
checkpoint_utils.verify_checkpoint_directory(args.save_dir)
# Print args
logger.info(args)
#print('haha111')
#print(tasks)
# Setup task, e.g., translation, language modeling, etc.
task = tasks.setup_task(args) #fairseq.tasks.translation.TranslationTask
#print(task)
#print(args.valid_subset): 'valid'
# Load valid dataset (we load training data below, based on the latest checkpoint)
for valid_sub_split in args.valid_subset.split(","):
task.load_dataset(valid_sub_split, combine=False, epoch=1)
# Build model and criterion
model = task.build_model(args)
#exit(1)
criterion = task.build_criterion(args)
logger.info(model)
logger.info("task: {} ({})".format(args.task, task.__class__.__name__))
logger.info("model: {} ({})".format(args.arch, model.__class__.__name__))
logger.info(
"criterion: {} ({})".format(args.criterion, criterion.__class__.__name__)
)
logger.info(
"num. model params: {} (num. trained: {})".format(
sum(p.numel() for p in model.parameters()),
sum(p.numel() for p in model.parameters() if p.requires_grad),
)
)
# (optionally) Configure quantization
if args.quantization_config_path is not None:
quantizer = quantization_utils.Quantizer(
config_path=args.quantization_config_path,
max_epoch=args.max_epoch,
max_update=args.max_update,
)
else:
quantizer = None
# Build trainer
if args.model_parallel_size == 1:
trainer = Trainer(args, task, model, criterion, quantizer)
else:
trainer = MegatronTrainer(args, task, model, criterion)
logger.info(
"training on {} devices (GPUs/TPUs)".format(args.distributed_world_size)
)
logger.info(
"max tokens per GPU = {} and max sentences per GPU = {}".format(
args.max_tokens, args.max_sentences
)
)
# Load the latest checkpoint if one is available and restore the
# corresponding train iterator
extra_state, epoch_itr = checkpoint_utils.load_checkpoint(args, trainer)
#print(epoch_itr);fairseq.data.iterators.EpochBatchIterator object at 0x7f7fe00380f0
#
#epoch_itr is the train iterator
# Train until the learning rate gets too small
max_epoch = args.max_epoch or math.inf
lr = trainer.get_lr()
train_meter = meters.StopwatchMeter()
train_meter.start()
while lr > args.min_lr and epoch_itr.next_epoch_idx <= max_epoch:
# train for one epoch
valid_losses, should_stop = train(args, trainer, task, epoch_itr)
if should_stop:
break
# only use first validation loss to update the learning rate
lr = trainer.lr_step(epoch_itr.epoch, valid_losses[0])
epoch_itr = trainer.get_train_iterator(
epoch_itr.next_epoch_idx,
# sharded data: get train iterator for next epoch
load_dataset=task.has_sharded_data("train"),
)
train_meter.stop()
logger.info("done training in {:.1f} seconds".format(train_meter.sum))
def should_stop_early(args, valid_loss):
# skip check if no validation was done in the current epoch
if valid_loss is None:
return False
if args.patience <= 0:
return False
def is_better(a, b):
return a > b if args.maximize_best_checkpoint_metric else a < b
prev_best = getattr(should_stop_early, "best", None)
if prev_best is None or is_better(valid_loss, prev_best):
should_stop_early.best = valid_loss
should_stop_early.num_runs = 0
return False
else:
should_stop_early.num_runs += 1
if should_stop_early.num_runs >= args.patience:
logger.info(
"early stop since valid performance hasn't improved for last {} runs".format(
args.patience
)
)
return True
else:
return False
@metrics.aggregate("train")
def train(args, trainer, task, epoch_itr):
"""Train the model for one epoch and return validation losses."""
# Initialize data iterator
itr = epoch_itr.next_epoch_itr(
fix_batches_to_gpus=args.fix_batches_to_gpus,
shuffle=(epoch_itr.next_epoch_idx > args.curriculum),
)
update_freq = (
args.update_freq[epoch_itr.epoch - 1]
if epoch_itr.epoch <= len(args.update_freq)
else args.update_freq[-1]
)
itr = iterators.GroupedIterator(itr, update_freq)
if getattr(args, "tpu", False):
itr = utils.tpu_data_loader(itr)
#print(itr);exit()
progress = progress_bar.progress_bar(
itr,
log_format=args.log_format,
log_interval=args.log_interval,
epoch=epoch_itr.epoch,
tensorboard_logdir=(
args.tensorboard_logdir if distributed_utils.is_master(args) else None
),
default_log_format=("tqdm" if not args.no_progress_bar else "simple"),
)
trainer.begin_epoch(epoch_itr.epoch)
valid_subsets = args.valid_subset.split(",")
should_stop = False
num_updates = trainer.get_num_updates()
#print(num_updates);28626
train_nll_loss = []
train_loss = []
#print(progress);exit()
for dummy in range(12):
print(dummy+1)
end_of_epoch = True
valid_losses, should_stop = validate_and_save(
args, trainer, task, epoch_itr, valid_subsets, end_of_epoch,dummy
)
print('finish validation and save checkpoint');exit()
for i, samples in enumerate(progress):
#print('samples',samples[0]['net_input']['src_tokens']);exit()
#sample is a list len=1
#sample[0] is a dict keys='id', 'nsentences', 'ntokens', 'net_input', 'target'
#id: a 1D tensor (192)
#nsentences: 192
#ntokens: 2931
#net_input: a dict, keys='src_tokens', 'src_lengths', 'prev_output_tokens'
#'src_tokens':a 2D tensor(192,21) 似乎是B,T;
#'src_lengths': a 1D tensor(192) 全部为21
#'prev_output_tokens': a 2D tensor(192,16)
#target: a 2D tensor(192,16)
#只validate
#更改valid subset --valid_subset='train'
#更改checkpoint 和 save_checkpoint的代码
with metrics.aggregate("train_inner"), torch.autograd.profiler.record_function(
"train_step-%d" % i
):
log_output = trainer.train_step(samples) #training
train_nll_loss.append(log_output['nll_loss'])
train_loss.append(log_output['loss'])
if log_output is not None: # not OOM, overflow, ...
# log mid-epoch stats
num_updates = trainer.get_num_updates()
if num_updates % args.log_interval == 0:
stats = get_training_stats(metrics.get_smoothed_values("train_inner"))
progress.log(stats, tag="train_inner", step=num_updates)
# reset mid-epoch stats after each log interval
# the end-of-epoch stats will still be preserved
metrics.reset_meters("train_inner")
end_of_epoch = not itr.has_next()
valid_losses, should_stop = validate_and_save(
args, trainer, task, epoch_itr, valid_subsets, end_of_epoch
)
if should_stop:
break
d = {}
d['train_nll_loss_list'] = np.array(train_nll_loss)
d['train_loss_list'] = np.array(train_loss)
d['train_nll_loss'] = stats['nll_loss']
d['train_loss'] = stats['loss']
d['train_ppl'] = stats['ppl']
file = 'statistics/batch_train_loss_{}.mat'.format(epoch_itr.epoch)
io.savemat(file,d)
# log end-of-epoch stats
logger.info("end of epoch {} (average epoch stats below)".format(epoch_itr.epoch))
stats = get_training_stats(metrics.get_smoothed_values("train"))
progress.print(stats, tag="train", step=num_updates)
#print('valid',valid_losses) #bleu值
#print(stats);
#OrderedDict([('loss', 9.738), ('nll_loss', 9.141), ('ppl', 564.38),
#('wps', 15393.4), ('ups', 4.29), ('wpb', 3586.8), ('bsz', 145.5), ('num_updates', 1101),
#('lr', 0.000137625), ('gnorm', 1.788), ('train_wall', 176.0), ('wall', 261.0)])
# reset epoch-level meters
metrics.reset_meters("train")
return valid_losses, should_stop
def validate_and_save(args, trainer, task, epoch_itr, valid_subsets, end_of_epoch,dummy):
num_updates = trainer.get_num_updates()
do_save = (
args.save_interval_updates > 0
and num_updates > 0
and num_updates % args.save_interval_updates == 0
and num_updates >= args.validate_after_updates
) or (end_of_epoch and epoch_itr.epoch % args.save_interval == 0)
do_validate = (
(not end_of_epoch and do_save) # validate during mid-epoch saves
or (end_of_epoch and epoch_itr.epoch % args.validate_interval == 0)
or (args.validate_interval_updates > 0 and num_updates > 0 and num_updates % args.validate_interval_updates == 0)
) and not args.disable_validation
# Validate
valid_losses = [None]
do_validate, do_save = True, True
if do_validate:
valid_losses = validate(args, trainer, task, epoch_itr, valid_subsets)
# Stopping conditions
max_update = args.max_update or math.inf
should_stop = (
should_stop_early(args, valid_losses[0])
or trainer.get_num_updates() >= max_update
or (
args.stop_time_hours > 0
and trainer.cumulative_training_time() / (60 * 60) > args.stop_time_hours
)
)
do_save, should_stop = True, False
# Save checkpoint
if do_save or should_stop:
logger.info("begin save checkpoint")
checkpoint_utils.save_checkpoint(args, trainer, epoch_itr, valid_losses[0],dummy)
return valid_losses, should_stop
def get_training_stats(stats):
stats["wall"] = round(metrics.get_meter("default", "wall").elapsed_time, 0)
return stats
def validate(args, trainer, task, epoch_itr, subsets):
"""Evaluate the model on the validation set(s) and return the losses."""
if args.fixed_validation_seed is not None:
# set fixed seed for every validation
utils.set_torch_seed(args.fixed_validation_seed)
valid_losses = []
for subset in subsets:
logger.info('begin validation on "{}" subset'.format(subset))
# Initialize data iterator
itr = trainer.get_valid_iterator(subset).next_epoch_itr(shuffle=False)
if getattr(args, "tpu", False):
itr = utils.tpu_data_loader(itr)
progress = progress_bar.progress_bar(
itr,
log_format=args.log_format,
log_interval=args.log_interval,
epoch=epoch_itr.epoch,
prefix=f"valid on '{subset}' subset",
tensorboard_logdir=(
args.tensorboard_logdir if distributed_utils.is_master(args) else None
),
default_log_format=("tqdm" if not args.no_progress_bar else "simple"),
)
# create a new root metrics aggregator so validation metrics
# don't pollute other aggregators (e.g., train meters)
with metrics.aggregate(new_root=True) as agg:
for sample in progress:
trainer.valid_step(sample)
# log validation stats
stats = get_valid_stats(args, trainer, agg.get_smoothed_values())
progress.print(stats, tag=subset, step=trainer.get_num_updates())
d = {}
d['valid_nll_loss'] = stats['nll_loss']
d['valid_loss'] = stats['loss']
d['valid_bleu'] = stats['bleu']
d['valid_ppl'] = stats['ppl']
file = 'statistics/batch_valid_loss_{}.mat'.format(epoch_itr.epoch)
io.savemat(file,d)
valid_losses.append(stats[args.best_checkpoint_metric])
return valid_losses
def get_valid_stats(args, trainer, stats):
stats["num_updates"] = trainer.get_num_updates()
if hasattr(checkpoint_utils.save_checkpoint, "best"):
key = "best_{0}".format(args.best_checkpoint_metric)
best_function = max if args.maximize_best_checkpoint_metric else min
stats[key] = best_function(
checkpoint_utils.save_checkpoint.best, stats[args.best_checkpoint_metric]
)
return stats
def cli_main(modify_parser=None):
parser = options.get_training_parser()
args = options.parse_args_and_arch(parser, modify_parser=modify_parser)
if args.profile:
with torch.cuda.profiler.profile():
with torch.autograd.profiler.emit_nvtx():
distributed_utils.call_main(args, main)
else:
distributed_utils.call_main(args, main)
if __name__ == "__main__":
cli_main()
| 14,061 | 34.420655 | 121 | py |
RegularizedBN | RegularizedBN-main/fairseq_cli/generate.py | #!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Translate pre-processed data with a trained model.
"""
import logging
import math
import os
import sys
import numpy as np
import torch
from fairseq import checkpoint_utils, options, scoring, tasks, utils
from fairseq.logging import progress_bar
from fairseq.logging.meters import StopwatchMeter, TimeMeter
from fairseq.data import encoders
def main(args):
#print(args);exit()
assert args.path is not None, '--path required for generation!'
assert not args.sampling or args.nbest == args.beam, \
'--sampling requires --nbest to be equal to --beam'
assert args.replace_unk is None or args.dataset_impl == 'raw', \
'--replace-unk requires a raw text dataset (--dataset-impl=raw)'
if args.results_path is not None:
os.makedirs(args.results_path, exist_ok=True)
output_path = os.path.join(args.results_path, 'generate-{}.txt'.format(args.gen_subset))
with open(output_path, 'w', buffering=1, encoding='utf-8') as h:
return _main(args, h)
else:
return _main(args, sys.stdout)
def get_symbols_to_strip_from_output(generator):
if hasattr(generator, 'symbols_to_strip_from_output'):
return generator.symbols_to_strip_from_output
else:
return {generator.eos}
def _main(args, output_file):
logging.basicConfig(
format='%(asctime)s | %(levelname)s | %(name)s | %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=logging.INFO,
stream=output_file,
)
logger = logging.getLogger('fairseq_cli.generate')
utils.import_user_module(args)
if args.max_tokens is None and args.max_sentences is None:
args.max_tokens = 12000
#logger.info(args) #打印一堆args信息
# Fix seed for stochastic decoding
if args.seed is not None and not args.no_seed_provided:
np.random.seed(args.seed)
utils.set_torch_seed(args.seed)
use_cuda = torch.cuda.is_available() and not args.cpu
# Load dataset splits
task = tasks.setup_task(args)
task.load_dataset(args.gen_subset)
# Set dictionaries
try:
src_dict = getattr(task, 'source_dictionary', None)
except NotImplementedError:
src_dict = None
tgt_dict = task.target_dictionary
# Load ensemble
logger.info('loading model(s) from {}'.format(args.path))
models, _model_args = checkpoint_utils.load_model_ensemble(
utils.split_paths(args.path),
arg_overrides=eval(args.model_overrides),
task=task,
suffix=getattr(args, "checkpoint_suffix", ""),
)
# Optimize ensemble for generation
for model in models:
model.prepare_for_inference_(args)
if args.fp16:
model.half()
if use_cuda:
model.cuda()
# Load alignment dictionary for unknown word replacement
# (None if no unknown word replacement, empty if no path to align dictionary)
align_dict = utils.load_align_dict(args.replace_unk)
# Load dataset (possibly sharded)
itr = task.get_batch_iterator(
dataset=task.dataset(args.gen_subset),
max_tokens=args.max_tokens,
max_sentences=args.max_sentences,
max_positions=utils.resolve_max_positions(
task.max_positions(),
*[model.max_positions() for model in models]
),
ignore_invalid_inputs=args.skip_invalid_size_inputs_valid_test,
required_batch_size_multiple=args.required_batch_size_multiple,
num_shards=args.num_shards,
shard_id=args.shard_id,
num_workers=args.num_workers,
).next_epoch_itr(shuffle=False)
progress = progress_bar.progress_bar(
itr,
log_format=args.log_format,
log_interval=args.log_interval,
default_log_format=('tqdm' if not args.no_progress_bar else 'none'),
)
# Initialize generator
gen_timer = StopwatchMeter()
generator = task.build_generator(models, args)
# Handle tokenization and BPE
tokenizer = encoders.build_tokenizer(args)
bpe = encoders.build_bpe(args)
def decode_fn(x):
if bpe is not None:
x = bpe.decode(x)
if tokenizer is not None:
x = tokenizer.decode(x)
return x
scorer = scoring.build_scorer(args, tgt_dict)
num_sentences = 0
has_target = True
wps_meter = TimeMeter()
for sample in progress:
sample = utils.move_to_cuda(sample) if use_cuda else sample
if 'net_input' not in sample:
continue
prefix_tokens = None
if args.prefix_size > 0:
prefix_tokens = sample['target'][:, :args.prefix_size]
constraints = None
if "constraints" in sample:
constraints = sample["constraints"]
gen_timer.start()
hypos = task.inference_step(generator, models, sample, prefix_tokens=prefix_tokens, constraints=constraints)
num_generated_tokens = sum(len(h[0]['tokens']) for h in hypos)
gen_timer.stop(num_generated_tokens)
for i, sample_id in enumerate(sample['id'].tolist()):
has_target = sample['target'] is not None
# Remove padding
if 'src_tokens' in sample['net_input']:
src_tokens = utils.strip_pad(sample['net_input']['src_tokens'][i, :], tgt_dict.pad())
else:
src_tokens = None
target_tokens = None
if has_target:
target_tokens = utils.strip_pad(sample['target'][i, :], tgt_dict.pad()).int().cpu()
# Either retrieve the original sentences or regenerate them from tokens.
if align_dict is not None:
src_str = task.dataset(args.gen_subset).src.get_original_text(sample_id)
target_str = task.dataset(args.gen_subset).tgt.get_original_text(sample_id)
else:
if src_dict is not None:
src_str = src_dict.string(src_tokens, args.remove_bpe)
else:
src_str = ""
if has_target:
target_str = tgt_dict.string(
target_tokens,
args.remove_bpe,
escape_unk=True,
extra_symbols_to_ignore=get_symbols_to_strip_from_output(generator),
)
src_str = decode_fn(src_str)
if has_target:
target_str = decode_fn(target_str)
if not args.quiet:
if src_dict is not None:
print('S-{}\t{}'.format(sample_id, src_str), file=output_file)
if has_target:
print('T-{}\t{}'.format(sample_id, target_str), file=output_file)
# Process top predictions
for j, hypo in enumerate(hypos[i][:args.nbest]):
hypo_tokens, hypo_str, alignment = utils.post_process_prediction(
hypo_tokens=hypo['tokens'].int().cpu(),
src_str=src_str,
alignment=hypo['alignment'],
align_dict=align_dict,
tgt_dict=tgt_dict,
remove_bpe=args.remove_bpe,
extra_symbols_to_ignore=get_symbols_to_strip_from_output(generator),
)
detok_hypo_str = decode_fn(hypo_str)
if not args.quiet:
score = hypo['score'] / math.log(2) # convert to base 2
# original hypothesis (after tokenization and BPE)
print('H-{}\t{}\t{}'.format(sample_id, score, hypo_str), file=output_file)
# detokenized hypothesis
print('D-{}\t{}\t{}'.format(sample_id, score, detok_hypo_str), file=output_file)
print('P-{}\t{}'.format(
sample_id,
' '.join(map(
lambda x: '{:.4f}'.format(x),
# convert from base e to base 2
hypo['positional_scores'].div_(math.log(2)).tolist(),
))
), file=output_file)
if args.print_alignment:
print('A-{}\t{}'.format(
sample_id,
' '.join(['{}-{}'.format(src_idx, tgt_idx) for src_idx, tgt_idx in alignment])
), file=output_file)
if args.print_step:
print('I-{}\t{}'.format(sample_id, hypo['steps']), file=output_file)
if getattr(args, 'retain_iter_history', False):
for step, h in enumerate(hypo['history']):
_, h_str, _ = utils.post_process_prediction(
hypo_tokens=h['tokens'].int().cpu(),
src_str=src_str,
alignment=None,
align_dict=None,
tgt_dict=tgt_dict,
remove_bpe=None,
)
print('E-{}_{}\t{}'.format(sample_id, step, h_str), file=output_file)
# Score only the top hypothesis
if has_target and j == 0:
if align_dict is not None or args.remove_bpe is not None:
# Convert back to tokens for evaluation with unk replacement and/or without BPE
target_tokens = tgt_dict.encode_line(target_str, add_if_not_exist=True)
hypo_tokens = tgt_dict.encode_line(detok_hypo_str, add_if_not_exist=True)
if hasattr(scorer, 'add_string'):
scorer.add_string(target_str, detok_hypo_str)
else:
scorer.add(target_tokens, hypo_tokens)
wps_meter.update(num_generated_tokens)
progress.log({'wps': round(wps_meter.avg)})
num_sentences += sample["nsentences"] if "nsentences" in sample else sample['id'].numel()
logger.info('NOTE: hypothesis and token scores are output in base 2')
logger.info('Translated {} sentences ({} tokens) in {:.1f}s ({:.2f} sentences/s, {:.2f} tokens/s)'.format(
num_sentences, gen_timer.n, gen_timer.sum, num_sentences / gen_timer.sum, 1. / gen_timer.avg))
if has_target:
if args.bpe and not args.sacrebleu:
if args.remove_bpe:
logger.warning("BLEU score is being computed by splitting detokenized string on spaces, this is probably not what you want. Use --sacrebleu for standard 13a BLEU tokenization")
else:
logger.warning("If you are using BPE on the target side, the BLEU score is computed on BPE tokens, not on proper words. Use --sacrebleu for standard 13a BLEU tokenization")
# use print to be consistent with other main outputs: S-, H-, T-, D- and so on
print(
'Generate {} with beam={}: {}'.format(args.gen_subset, args.beam, scorer.result_string()),
file=output_file)
return scorer
def cli_main():
parser = options.get_generation_parser()
args = options.parse_args_and_arch(parser)
main(args)
if __name__ == '__main__':
cli_main()
| 11,494 | 38.501718 | 192 | py |
RegularizedBN | RegularizedBN-main/fairseq_cli/validate.py | #!/usr/bin/env python3 -u
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from itertools import chain
import logging
import sys
import torch
from fairseq import checkpoint_utils, distributed_utils, options, utils
from fairseq.logging import metrics, progress_bar
logging.basicConfig(
format='%(asctime)s | %(levelname)s | %(name)s | %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=logging.INFO,
stream=sys.stdout,
)
logger = logging.getLogger('fairseq_cli.validate')
def main(args, override_args=None):
utils.import_user_module(args)
assert args.max_tokens is not None or args.max_sentences is not None, \
'Must specify batch size either with --max-tokens or --max-sentences'
use_fp16 = args.fp16
use_cuda = torch.cuda.is_available() and not args.cpu
if use_cuda:
torch.cuda.set_device(args.device_id)
if override_args is not None:
overrides = vars(override_args)
overrides.update(eval(getattr(override_args, 'model_overrides', '{}')))
else:
overrides = None
# Load ensemble
logger.info('loading model(s) from {}'.format(args.path))
models, model_args, task = checkpoint_utils.load_model_ensemble_and_task(
[args.path],
arg_overrides=overrides,
suffix=getattr(args, "checkpoint_suffix", ""),
)
model = models[0]
# Move models to GPU
for model in models:
if use_fp16:
model.half()
if use_cuda:
model.cuda()
# Print args
logger.info(model_args)
# Build criterion
criterion = task.build_criterion(model_args)
criterion.eval()
for subset in args.valid_subset.split(','):
try:
task.load_dataset(subset, combine=False, epoch=1)
dataset = task.dataset(subset)
except KeyError:
raise Exception('Cannot find dataset: ' + subset)
# Initialize data iterator
itr = task.get_batch_iterator(
dataset=dataset,
max_tokens=args.max_tokens,
max_sentences=args.max_sentences,
max_positions=utils.resolve_max_positions(
task.max_positions(),
*[m.max_positions() for m in models],
),
ignore_invalid_inputs=args.skip_invalid_size_inputs_valid_test,
required_batch_size_multiple=args.required_batch_size_multiple,
seed=args.seed,
num_shards=args.distributed_world_size,
shard_id=args.distributed_rank,
num_workers=args.num_workers,
).next_epoch_itr(shuffle=False)
progress = progress_bar.progress_bar(
itr,
log_format=args.log_format,
log_interval=args.log_interval,
prefix=f"valid on '{subset}' subset",
default_log_format=('tqdm' if not args.no_progress_bar else 'simple'),
)
log_outputs = []
for i, sample in enumerate(progress):
sample = utils.move_to_cuda(sample) if use_cuda else sample
_loss, _sample_size, log_output = task.valid_step(sample, model, criterion)
progress.log(log_output, step=i)
log_outputs.append(log_output)
if args.distributed_world_size > 1:
log_outputs = distributed_utils.all_gather_list(
log_outputs,
max_size=getattr(args, 'all_gather_list_size', 16384),
)
log_outputs = list(chain.from_iterable(log_outputs))
with metrics.aggregate() as agg:
task.reduce_metrics(log_outputs, criterion)
log_output = agg.get_smoothed_values()
progress.print(log_output, tag=subset, step=i)
def cli_main():
parser = options.get_validation_parser()
args = options.parse_args_and_arch(parser)
# only override args that are explicitly given on the command line
override_parser = options.get_validation_parser()
override_args = options.parse_args_and_arch(override_parser, suppress_defaults=True)
distributed_utils.call_main(args, main, override_args=override_args)
if __name__ == '__main__':
cli_main()
| 4,297 | 31.315789 | 88 | py |
RegularizedBN | RegularizedBN-main/fairseq_cli/eval_lm.py | #!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Evaluate the perplexity of a trained language model.
"""
import logging
import math
import os
import torch
from fairseq import checkpoint_utils, options, tasks, utils
from fairseq.data import LMContextWindowDataset
from fairseq.logging import progress_bar
from fairseq.logging.meters import StopwatchMeter, TimeMeter
from fairseq.sequence_scorer import SequenceScorer
from fairseq import distributed_utils
logging.basicConfig(
format='%(asctime)s | %(levelname)s | %(name)s | %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=logging.INFO,
)
logger = logging.getLogger('fairseq_cli.eval_lm')
class WordStat(object):
def __init__(self, word, is_bpe):
self.word = word
self.is_bpe = is_bpe
self.log_prob = 0
self.next_word_prob = 0
self.count = 0
self.missing_next_words = 0
def add(self, log_prob, next_word_prob):
""" increments counters for the sum of log probs of current word and next
word (given context ending at current word). Since the next word might be at the end of the example,
or it might be not counted because it is not an ending subword unit,
also keeps track of how many of those we have seen """
if next_word_prob is not None:
self.next_word_prob += next_word_prob
else:
self.missing_next_words += 1
self.log_prob += log_prob
self.count += 1
def __str__(self):
return '{}\t{}\t{}\t{}\t{}\t{}'.format(self.word, self.count, self.log_prob, self.is_bpe,
self.next_word_prob, self.count - self.missing_next_words)
def main(parsed_args, **unused_kwargs):
assert parsed_args.path is not None, '--path required for evaluation!'
if torch.cuda.is_available() and not parsed_args.cpu:
torch.cuda.set_device(parsed_args.device_id)
utils.import_user_module(parsed_args)
logger.info(parsed_args)
use_cuda = torch.cuda.is_available() and not parsed_args.cpu
task = tasks.setup_task(parsed_args)
# Load ensemble
logger.info('loading model(s) from {}'.format(parsed_args.path))
models, args = checkpoint_utils.load_model_ensemble(
parsed_args.path.split(os.pathsep),
arg_overrides=eval(parsed_args.model_overrides),
task=task,
suffix=getattr(parsed_args, "checkpoint_suffix", ""),
)
for arg in vars(parsed_args).keys():
if arg not in {
'self_target', 'future_target', 'past_target', 'tokens_per_sample',
'output_size_dictionary', 'add_bos_token',
}:
setattr(args, arg, getattr(parsed_args, arg))
# reduce tokens per sample by the required context window size
args.tokens_per_sample -= args.context_window
task = tasks.setup_task(args)
# Load dataset splits
task.load_dataset(args.gen_subset)
dataset = task.dataset(args.gen_subset)
if args.context_window > 0:
dataset = LMContextWindowDataset(
dataset=dataset,
tokens_per_sample=args.tokens_per_sample,
context_window=args.context_window,
pad_idx=task.source_dictionary.pad(),
)
logger.info('{} {} {} examples'.format(args.data, args.gen_subset, len(dataset)))
# Optimize ensemble for generation and set the source and dest dicts on the model (required by scorer)
for model in models:
model.prepare_for_inference_(args)
if args.fp16:
model.half()
if use_cuda:
model.cuda()
assert len(models) > 0
logger.info('num. model params: {}'.format(sum(p.numel() for p in models[0].parameters())))
itr = task.get_batch_iterator(
dataset=dataset,
max_tokens=args.max_tokens or 36000,
max_sentences=args.max_sentences,
max_positions=utils.resolve_max_positions(*[
model.max_positions() for model in models
]),
ignore_invalid_inputs=True,
num_shards=args.num_shards,
shard_id=args.shard_id,
num_workers=args.num_workers,
).next_epoch_itr(shuffle=False)
progress = progress_bar.progress_bar(
itr,
log_format=args.log_format,
log_interval=args.log_interval,
default_log_format=('tqdm' if not args.no_progress_bar else 'none'),
)
gen_timer = StopwatchMeter()
scorer = SequenceScorer(task.target_dictionary, args.softmax_batch)
score_sum = 0.
count = 0
if args.remove_bpe is not None:
if args.remove_bpe == 'sentencepiece':
raise NotImplementedError
else:
bpe_cont = args.remove_bpe.rstrip()
bpe_toks = {
i
for i in range(len(task.source_dictionary))
if task.source_dictionary[i].endswith(bpe_cont)
}
bpe_len = len(bpe_cont)
else:
bpe_toks = None
bpe_len = 0
word_stats = dict()
wps_meter = TimeMeter()
for sample in progress:
if 'net_input' not in sample:
continue
sample = utils.move_to_cuda(sample) if use_cuda else sample
gen_timer.start()
hypos = scorer.generate(models, sample)
gen_timer.stop(sample['ntokens'])
for i, hypos_i in enumerate(hypos):
hypo = hypos_i[0]
sample_id = sample['id'][i]
tokens = hypo['tokens']
tgt_len = tokens.numel()
pos_scores = hypo['positional_scores'].float()
if getattr(args, 'add_bos_token', False):
assert hypo['tokens'][0].item() == task.target_dictionary.bos()
tokens = tokens[1:]
pos_scores = pos_scores[1:]
skipped_toks = 0
if bpe_toks is not None:
for i in range(tgt_len - 1):
if tokens[i].item() in bpe_toks:
skipped_toks += 1
pos_scores[i + 1] += pos_scores[i]
pos_scores[i] = 0
inf_scores = pos_scores.eq(float('inf')) | pos_scores.eq(float('-inf'))
if inf_scores.any():
logger.info(
'skipping tokens with inf scores:',
task.target_dictionary.string(tokens[inf_scores.nonzero()])
)
pos_scores = pos_scores[(~inf_scores).nonzero()]
score_sum += pos_scores.sum().cpu()
count += pos_scores.numel() - skipped_toks
if args.output_word_probs or args.output_word_stats:
w = ''
word_prob = []
is_bpe = False
for i in range(len(tokens)):
w_ind = tokens[i].item()
w += task.source_dictionary[w_ind]
if bpe_toks is not None and w_ind in bpe_toks:
w = w[:-bpe_len]
is_bpe = True
else:
word_prob.append((w, pos_scores[i].item()))
next_prob = None
ind = i + 1
while ind < len(tokens):
if pos_scores[ind].item() != 0:
next_prob = pos_scores[ind]
break
ind += 1
word_stats.setdefault(w, WordStat(w, is_bpe)).add(pos_scores[i].item(), next_prob)
is_bpe = False
w = ''
if args.output_word_probs:
logger.info(
str(int(sample_id)) + " "
+ ('\t'.join('{} [{:2f}]'.format(x[0], x[1]) for x in word_prob))
)
wps_meter.update(sample['ntokens'])
progress.log({'wps': round(wps_meter.avg)})
avg_nll_loss = -score_sum / count / math.log(2) # convert to base 2
logger.info('Evaluated {} tokens in {:.1f}s ({:.2f} tokens/s)'.format(
gen_timer.n, gen_timer.sum, 1. / gen_timer.avg
))
logger.info('Loss (base 2): {:.4f}, Perplexity: {:.2f}'.format(
avg_nll_loss, 2**avg_nll_loss
))
if args.output_word_stats:
for ws in sorted(word_stats.values(), key=lambda x: x.count, reverse=True):
logger.info(ws)
def cli_main():
parser = options.get_eval_lm_parser()
args = options.parse_args_and_arch(parser)
distributed_utils.call_main(args, main)
if __name__ == '__main__':
cli_main()
| 8,744 | 33.160156 | 112 | py |
RegularizedBN | RegularizedBN-main/fairseq_cli/interactive.py | #!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Translate raw text with a trained model. Batches data on-the-fly.
"""
from collections import namedtuple
import fileinput
import logging
import math
import sys
import time
import os
import numpy as np
import torch
from fairseq import checkpoint_utils, distributed_utils, options, tasks, utils
from fairseq.data import encoders
from fairseq.token_generation_constraints import pack_constraints, unpack_constraints
from .generate import get_symbols_to_strip_from_output
logging.basicConfig(
format='%(asctime)s | %(levelname)s | %(name)s | %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=logging.INFO,
stream=sys.stdout,
)
logger = logging.getLogger('fairseq_cli.interactive')
Batch = namedtuple('Batch', 'ids src_tokens src_lengths constraints')
Translation = namedtuple('Translation', 'src_str hypos pos_scores alignments')
def buffered_read(input, buffer_size):
buffer = []
with fileinput.input(files=[input], openhook=fileinput.hook_encoded("utf-8")) as h:
for src_str in h:
buffer.append(src_str.strip())
if len(buffer) >= buffer_size:
yield buffer
buffer = []
if len(buffer) > 0:
yield buffer
def make_batches(lines, args, task, max_positions, encode_fn):
def encode_fn_target(x):
return encode_fn(x)
if args.constraints:
# Strip (tab-delimited) contraints, if present, from input lines,
# store them in batch_constraints
batch_constraints = [list() for _ in lines]
for i, line in enumerate(lines):
if "\t" in line:
lines[i], *batch_constraints[i] = line.split("\t")
# Convert each List[str] to List[Tensor]
for i, constraint_list in enumerate(batch_constraints):
batch_constraints[i] = [task.target_dictionary.encode_line(
encode_fn_target(constraint),
append_eos=False,
add_if_not_exist=False,
) for constraint in constraint_list]
tokens = [
task.source_dictionary.encode_line(
encode_fn(src_str), add_if_not_exist=False
).long()
for src_str in lines
]
if args.constraints:
constraints_tensor = pack_constraints(batch_constraints)
else:
constraints_tensor = None
lengths = [t.numel() for t in tokens]
itr = task.get_batch_iterator(
dataset=task.build_dataset_for_inference(tokens, lengths, constraints=constraints_tensor),
max_tokens=args.max_tokens,
max_sentences=args.max_sentences,
max_positions=max_positions,
ignore_invalid_inputs=args.skip_invalid_size_inputs_valid_test
).next_epoch_itr(shuffle=False)
for batch in itr:
ids = batch['id']
src_tokens = batch['net_input']['src_tokens']
src_lengths = batch['net_input']['src_lengths']
constraints = batch.get("constraints", None)
yield Batch(
ids=ids,
src_tokens=src_tokens,
src_lengths=src_lengths,
constraints=constraints,
)
def main(args):
start_time = time.time()
total_translate_time = 0
utils.import_user_module(args)
if args.buffer_size < 1:
args.buffer_size = 1
if args.max_tokens is None and args.max_sentences is None:
args.max_sentences = 1
assert not args.sampling or args.nbest == args.beam, \
'--sampling requires --nbest to be equal to --beam'
assert not args.max_sentences or args.max_sentences <= args.buffer_size, \
'--max-sentences/--batch-size cannot be larger than --buffer-size'
logger.info(args)
# Fix seed for stochastic decoding
if args.seed is not None and not args.no_seed_provided:
np.random.seed(args.seed)
utils.set_torch_seed(args.seed)
use_cuda = torch.cuda.is_available() and not args.cpu
# Setup task, e.g., translation
task = tasks.setup_task(args)
# Load ensemble
logger.info('loading model(s) from {}'.format(args.path))
models, _model_args = checkpoint_utils.load_model_ensemble(
args.path.split(os.pathsep),
arg_overrides=eval(args.model_overrides),
task=task,
suffix=getattr(args, "checkpoint_suffix", ""),
)
# Set dictionaries
src_dict = task.source_dictionary
tgt_dict = task.target_dictionary
# Optimize ensemble for generation
for model in models:
model.prepare_for_inference_(args)
if args.fp16:
model.half()
if use_cuda:
model.cuda()
# Initialize generator
generator = task.build_generator(models, args)
# Handle tokenization and BPE
tokenizer = encoders.build_tokenizer(args)
bpe = encoders.build_bpe(args)
def encode_fn(x):
if tokenizer is not None:
x = tokenizer.encode(x)
if bpe is not None:
x = bpe.encode(x)
return x
def decode_fn(x):
if bpe is not None:
x = bpe.decode(x)
if tokenizer is not None:
x = tokenizer.decode(x)
return x
# Load alignment dictionary for unknown word replacement
# (None if no unknown word replacement, empty if no path to align dictionary)
align_dict = utils.load_align_dict(args.replace_unk)
max_positions = utils.resolve_max_positions(
task.max_positions(),
*[model.max_positions() for model in models]
)
if args.constraints:
logger.warning("NOTE: Constrained decoding currently assumes a shared subword vocabulary.")
if args.buffer_size > 1:
logger.info('Sentence buffer size: %s', args.buffer_size)
logger.info('NOTE: hypothesis and token scores are output in base 2')
logger.info('Type the input sentence and press return:')
start_id = 0
for inputs in buffered_read(args.input, args.buffer_size):
results = []
for batch in make_batches(inputs, args, task, max_positions, encode_fn):
bsz = batch.src_tokens.size(0)
src_tokens = batch.src_tokens
src_lengths = batch.src_lengths
constraints = batch.constraints
if use_cuda:
src_tokens = src_tokens.cuda()
src_lengths = src_lengths.cuda()
if constraints is not None:
constraints = constraints.cuda()
sample = {
'net_input': {
'src_tokens': src_tokens,
'src_lengths': src_lengths,
},
}
translate_start_time = time.time()
translations = task.inference_step(generator, models, sample, constraints=constraints)
translate_time = time.time() - translate_start_time
total_translate_time += translate_time
list_constraints = [[] for _ in range(bsz)]
if args.constraints:
list_constraints = [unpack_constraints(c) for c in constraints]
for i, (id, hypos) in enumerate(zip(batch.ids.tolist(), translations)):
src_tokens_i = utils.strip_pad(src_tokens[i], tgt_dict.pad())
constraints = list_constraints[i]
results.append((start_id + id, src_tokens_i, hypos,
{ "constraints": constraints,
"time": translate_time / len(translations) }
))
# sort output to match input order
for id_, src_tokens, hypos, info in sorted(results, key=lambda x: x[0]):
if src_dict is not None:
src_str = src_dict.string(src_tokens, args.remove_bpe)
print('S-{}\t{}'.format(id_, src_str))
print("W-{}\t{:.3f}\tseconds".format(id_, info["time"]))
for constraint in info["constraints"]:
print("C-{}\t{}".format(id_, tgt_dict.string(constraint, args.remove_bpe)))
# Process top predictions
for hypo in hypos[:min(len(hypos), args.nbest)]:
hypo_tokens, hypo_str, alignment = utils.post_process_prediction(
hypo_tokens=hypo['tokens'].int().cpu(),
src_str=src_str,
alignment=hypo['alignment'],
align_dict=align_dict,
tgt_dict=tgt_dict,
remove_bpe=args.remove_bpe,
extra_symbols_to_ignore=get_symbols_to_strip_from_output(generator),
)
detok_hypo_str = decode_fn(hypo_str)
score = hypo['score'] / math.log(2) # convert to base 2
# original hypothesis (after tokenization and BPE)
print('H-{}\t{}\t{}'.format(id_, score, hypo_str))
# detokenized hypothesis
print('D-{}\t{}\t{}'.format(id_, score, detok_hypo_str))
print('P-{}\t{}'.format(
id_,
' '.join(map(
lambda x: '{:.4f}'.format(x),
# convert from base e to base 2
hypo['positional_scores'].div_(math.log(2)).tolist(),
))
))
if args.print_alignment:
alignment_str = " ".join(["{}-{}".format(src, tgt) for src, tgt in alignment])
print('A-{}\t{}'.format(
id_,
alignment_str
))
# update running id_ counter
start_id += len(inputs)
logger.info("Total time: {:.3f} seconds; translation time: {:.3f}".format(time.time() - start_time, total_translate_time))
def cli_main():
parser = options.get_interactive_generation_parser()
args = options.parse_args_and_arch(parser)
distributed_utils.call_main(args, main)
if __name__ == '__main__':
cli_main()
| 10,107 | 34.843972 | 126 | py |
RegularizedBN | RegularizedBN-main/fairseq_cli/train.py | #!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Train a new model on one or across multiple GPUs.
"""
import argparse
import logging
import math
import random
import sys
import numpy as np
from scipy import io
import torch
from fairseq import (
checkpoint_utils,
distributed_utils,
options,
quantization_utils,
tasks,
utils,
)
from fairseq.data import iterators
from fairseq.logging import meters, metrics, progress_bar
from fairseq.model_parallel.megatron_trainer import MegatronTrainer
from fairseq.trainer import Trainer
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=logging.INFO,
stream=sys.stdout,
)
logger = logging.getLogger("fairseq_cli.train")
def main(args):
utils.import_user_module(args)
assert (
args.max_tokens is not None or args.max_sentences is not None
), "Must specify batch size either with --max-tokens or --max-sentences"
metrics.reset()
#np.random.seed(args.seed)
#utils.set_torch_seed(args.seed)
#torch.manual_seed(args.seed)
#torch.cuda.manual_seed(args.seed)
#print("new seed");
seed = args.seed
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed) # if you are using multi-GPU.
np.random.seed(seed) # Numpy module.
random.seed(seed) # Python random module.
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
#print(torch.randint(0, 20, (20,)));exit()
if distributed_utils.is_master(args):
checkpoint_utils.verify_checkpoint_directory(args.save_dir)
# Print args
logger.info(args)
#print('haha111')
#print(tasks)
# Setup task, e.g., translation, language modeling, etc.
task = tasks.setup_task(args) #fairseq.tasks.translation.TranslationTask
#print(task)
#print(args.valid_subset): 'valid'
# Load valid dataset (we load training data below, based on the latest checkpoint)
for valid_sub_split in args.valid_subset.split(","):
task.load_dataset(valid_sub_split, combine=False, epoch=1)
# Build model and criterion
model = task.build_model(args)
#exit(1)
criterion = task.build_criterion(args)
logger.info(model)
logger.info("task: {} ({})".format(args.task, task.__class__.__name__))
logger.info("model: {} ({})".format(args.arch, model.__class__.__name__))
logger.info(
"criterion: {} ({})".format(args.criterion, criterion.__class__.__name__)
)
logger.info(
"num. model params: {} (num. trained: {})".format(
sum(p.numel() for p in model.parameters()),
sum(p.numel() for p in model.parameters() if p.requires_grad),
)
)
# (optionally) Configure quantization
if args.quantization_config_path is not None:
quantizer = quantization_utils.Quantizer(
config_path=args.quantization_config_path,
max_epoch=args.max_epoch,
max_update=args.max_update,
)
else:
quantizer = None
# Build trainer
if args.model_parallel_size == 1:
trainer = Trainer(args, task, model, criterion, quantizer)
else:
trainer = MegatronTrainer(args, task, model, criterion)
logger.info(
"training on {} devices (GPUs/TPUs)".format(args.distributed_world_size)
)
logger.info(
"max tokens per GPU = {} and max sentences per GPU = {}".format(
args.max_tokens, args.max_sentences
)
)
# Load the latest checkpoint if one is available and restore the
# corresponding train iterator
extra_state, epoch_itr = checkpoint_utils.load_checkpoint(args, trainer)
#print(epoch_itr);fairseq.data.iterators.EpochBatchIterator object at 0x7f7fe00380f0
#
#epoch_itr is the train iterator
# Train until the learning rate gets too small
max_epoch = args.max_epoch or math.inf
lr = trainer.get_lr()
train_meter = meters.StopwatchMeter()
train_meter.start()
while lr > args.min_lr and epoch_itr.next_epoch_idx <= max_epoch:
# train for one epoch
valid_losses, should_stop = train(args, trainer, task, epoch_itr)
if should_stop:
break
# only use first validation loss to update the learning rate
lr = trainer.lr_step(epoch_itr.epoch, valid_losses[0])
epoch_itr = trainer.get_train_iterator(
epoch_itr.next_epoch_idx,
# sharded data: get train iterator for next epoch
load_dataset=task.has_sharded_data("train"),
)
train_meter.stop()
logger.info("done training in {:.1f} seconds".format(train_meter.sum))
def should_stop_early(args, valid_loss):
# skip check if no validation was done in the current epoch
if valid_loss is None:
return False
if args.patience <= 0:
return False
def is_better(a, b):
return a > b if args.maximize_best_checkpoint_metric else a < b
prev_best = getattr(should_stop_early, "best", None)
if prev_best is None or is_better(valid_loss, prev_best):
should_stop_early.best = valid_loss
should_stop_early.num_runs = 0
return False
else:
should_stop_early.num_runs += 1
if should_stop_early.num_runs >= args.patience:
logger.info(
"early stop since valid performance hasn't improved for last {} runs".format(
args.patience
)
)
return True
else:
return False
def extract_id(s):
cand = s.split('/')
for c in cand:
if(c.find('transformer')>=0 or c.find('cnn')>=0):
return c
print("error path!")
exit()
return 'error'
@metrics.aggregate("train")
def train(args, trainer, task, epoch_itr):
"""Train the model for one epoch and return validation losses."""
# Initialize data iterator
itr = epoch_itr.next_epoch_itr(
fix_batches_to_gpus=args.fix_batches_to_gpus,
shuffle=(epoch_itr.next_epoch_idx > args.curriculum),
)
update_freq = (
args.update_freq[epoch_itr.epoch - 1]
if epoch_itr.epoch <= len(args.update_freq)
else args.update_freq[-1]
)
itr = iterators.GroupedIterator(itr, update_freq)
if getattr(args, "tpu", False):
itr = utils.tpu_data_loader(itr)
#print(itr);exit()
progress = progress_bar.progress_bar(
itr,
log_format=args.log_format,
log_interval=args.log_interval,
epoch=epoch_itr.epoch,
tensorboard_logdir=(
args.tensorboard_logdir if distributed_utils.is_master(args) else None
),
default_log_format=("tqdm" if not args.no_progress_bar else "simple"),
)
trainer.begin_epoch(epoch_itr.epoch)
valid_subsets = args.valid_subset.split(",")
should_stop = False
num_updates = trainer.get_num_updates()
train_nll_loss = []
train_loss = []
#print(progress);exit()
#t = 1
for i, samples in enumerate(progress):
#if i==0:
# print(samples)
#print('samples',samples[0]['net_input']['src_tokens']);exit()
#sample is a list len=1
#sample[0] is a dict keys='id', 'nsentences', 'ntokens', 'net_input', 'target'
#id: a 1D tensor (192)
#nsentences: 192
#ntokens: 2931
#net_input: a dict, keys='src_tokens', 'src_lengths', 'prev_output_tokens'
#'src_tokens':a 2D tensor(192,21) 似乎是B,T;
#'src_lengths': a 1D tensor(192) 全部为21
#'prev_output_tokens': a 2D tensor(192,16)
#target: a 2D tensor(192,16)
with metrics.aggregate("train_inner"), torch.autograd.profiler.record_function(
"train_step-%d" % i
):
log_output = trainer.train_step(samples) #training
#print(log_output);exit()
#1train_nll_loss.append(log_output['nll_loss'])
#1train_loss.append(log_output['loss'])
#print("{} {:.3f} {:.3f}".format(t, log_output['nll_loss'],log_output['loss']))
#t += 1
#if(t>=100):
# exit()
if log_output is not None: # not OOM, overflow, ...
# log mid-epoch stats
num_updates = trainer.get_num_updates()
if num_updates % args.log_interval == 0:
stats = get_training_stats(metrics.get_smoothed_values("train_inner"))
progress.log(stats, tag="train_inner", step=num_updates)
# reset mid-epoch stats after each log interval
# the end-of-epoch stats will still be preserved
metrics.reset_meters("train_inner")
end_of_epoch = not itr.has_next()
valid_losses, should_stop = validate_and_save(
args, trainer, task, epoch_itr, valid_subsets, end_of_epoch
)
if should_stop:
break
#print(args.best_checkpoint_metric)
d = {}
d['type'] = 0
#d['train_nll_loss_list'] = np.array(train_nll_loss)
#d['train_loss_list'] = np.array(train_loss)
recore_item = ['nll_loss', 'loss','bleu','ppl']
for item in recore_item:
if item in stats.keys():
if stats[item]:
d['train'+"_"+item] = stats[item]
file = 'statistics/{}/train_loss_{}.mat'.format(extract_id(args.save_dir),epoch_itr.epoch)
io.savemat(file,d)
# log end-of-epoch stats
logger.info("end of epoch {} (average epoch stats below)".format(epoch_itr.epoch))
stats = get_training_stats(metrics.get_smoothed_values("train"))
progress.print(stats, tag="train", step=num_updates)
#print('valid',valid_losses) #bleu值
#print(stats);
#OrderedDict([('loss', 9.738), ('nll_loss', 9.141), ('ppl', 564.38),
#('wps', 15393.4), ('ups', 4.29), ('wpb', 3586.8), ('bsz', 145.5), ('num_updates', 1101),
#('lr', 0.000137625), ('gnorm', 1.788), ('train_wall', 176.0), ('wall', 261.0)])
# reset epoch-level meters
metrics.reset_meters("train")
return valid_losses, should_stop
def validate_and_save(args, trainer, task, epoch_itr, valid_subsets, end_of_epoch):
num_updates = trainer.get_num_updates()
do_save = (
args.save_interval_updates > 0
and num_updates > 0
and num_updates % args.save_interval_updates == 0
and num_updates >= args.validate_after_updates
) or (end_of_epoch and epoch_itr.epoch % args.save_interval == 0)
do_validate = (
(not end_of_epoch and do_save) # validate during mid-epoch saves
or (end_of_epoch and epoch_itr.epoch % args.validate_interval == 0)
or (args.validate_interval_updates > 0 and num_updates > 0 and num_updates % args.validate_interval_updates == 0)
) and not args.disable_validation
# Validate
valid_losses = [None]
if do_validate:
valid_losses = validate(args, trainer, task, epoch_itr, valid_subsets)
# Stopping conditions
max_update = args.max_update or math.inf
should_stop = (
should_stop_early(args, valid_losses[0])
or trainer.get_num_updates() >= max_update
or (
args.stop_time_hours > 0
and trainer.cumulative_training_time() / (60 * 60) > args.stop_time_hours
)
)
# Save checkpoint
if do_save or should_stop:
logger.info("begin save checkpoint")
checkpoint_utils.save_checkpoint(args, trainer, epoch_itr, valid_losses[0])
return valid_losses, should_stop
def get_training_stats(stats):
stats["wall"] = round(metrics.get_meter("default", "wall").elapsed_time, 0)
return stats
def validate(args, trainer, task, epoch_itr, subsets):
"""Evaluate the model on the validation set(s) and return the losses."""
if args.fixed_validation_seed is not None:
# set fixed seed for every validation
utils.set_torch_seed(args.fixed_validation_seed)
valid_losses = []
for subset in subsets:
logger.info('begin validation on "{}" subset'.format(subset))
# Initialize data iterator
itr = trainer.get_valid_iterator(subset).next_epoch_itr(shuffle=False)
if getattr(args, "tpu", False):
itr = utils.tpu_data_loader(itr)
progress = progress_bar.progress_bar(
itr,
log_format=args.log_format,
log_interval=args.log_interval,
epoch=epoch_itr.epoch,
prefix=f"valid on '{subset}' subset",
tensorboard_logdir=(
args.tensorboard_logdir if distributed_utils.is_master(args) else None
),
default_log_format=("tqdm" if not args.no_progress_bar else "simple"),
)
# create a new root metrics aggregator so validation metrics
# don't pollute other aggregators (e.g., train meters)
with metrics.aggregate(new_root=True) as agg:
for sample in progress:
trainer.valid_step(sample)
# log validation stats
stats = get_valid_stats(args, trainer, agg.get_smoothed_values())
progress.print(stats, tag=subset, step=trainer.get_num_updates())
d = {}
d['type'] = 1
recore_item = ['nll_loss', 'loss','bleu','ppl']
for item in recore_item:
if item in stats.keys():
if stats[item]:
d['valid'+"_"+item] = stats[item]
file = 'statistics/{}/valid_loss_{}.mat'.format(extract_id(args.save_dir),epoch_itr.epoch)
io.savemat(file,d)
if hasattr(args, "best_checkpoint_metric"):
valid_losses.append(stats[args.best_checkpoint_metric])
else:
valid_losses.append(-1)
return valid_losses
def get_valid_stats(args, trainer, stats):
stats["num_updates"] = trainer.get_num_updates()
if hasattr(checkpoint_utils.save_checkpoint, "best"):
key = "best_{0}".format(args.best_checkpoint_metric)
best_function = max if args.maximize_best_checkpoint_metric else min
stats[key] = best_function(
checkpoint_utils.save_checkpoint.best, stats[args.best_checkpoint_metric]
)
return stats
def cli_main(modify_parser=None):
parser = options.get_training_parser()
args = options.parse_args_and_arch(parser, modify_parser=modify_parser)
#print(args.save_dir)
prefix = extract_id(args.save_dir)
import os
path = os.path.join('statistics', prefix)
#if path.find("big")<0 and 1:
os.mkdir(path)
if args.profile:
with torch.cuda.profiler.profile():
with torch.autograd.profiler.emit_nvtx():
distributed_utils.call_main(args, main)
else:
distributed_utils.call_main(args, main)
if __name__ == "__main__":
cli_main()
| 14,942 | 33.913551 | 121 | py |
torpido | torpido-master/gym/envs/parameter_tuning/train_deep_cnn.py | from __future__ import print_function
import gym
import random
from gym import spaces
import numpy as np
from keras.datasets import cifar10, mnist, cifar100
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Convolution2D, MaxPooling2D
from keras.optimizers import SGD
from keras.utils import np_utils
from keras.regularizers import WeightRegularizer
from keras import backend as K
from itertools import cycle
import math
class CNNClassifierTraining(gym.Env):
"""Environment where agent learns to select training parameters and
architecture of a deep convolutional neural network
Training parameters that the agent can adjust are learning
rate, learning rate decay, momentum, batch size, L1 / L2 regularization.
Agent can select up to 5 cnn layers and up to 2 fc layers.
Agent is provided with feedback on validation accuracy, as well as on
the size of a dataset.
"""
metadata = {"render.modes": ["human"]}
def __init__(self, natural=False):
"""
Initialize environment
"""
# I use array of len 1 to store constants (otherwise there were some errors)
self.action_space = spaces.Tuple((
spaces.Box(-5.0, 0.0, 1), # learning rate
spaces.Box(-7.0, -2.0, 1), # decay
spaces.Box(-5.0, 0.0, 1), # momentum
spaces.Box(2, 8, 1), # batch size
spaces.Box(-6.0, 1.0, 1), # l1 reg
spaces.Box(-6.0, 1.0, 1), # l2 reg
spaces.Box(0.0, 1.0, (5, 2)), # convolutional layer parameters
spaces.Box(0.0, 1.0, (2, 2)), # fully connected layer parameters
))
# observation features, in order: num of instances, num of labels,
# validation accuracy after training with given parameters
self.observation_space = spaces.Box(-1e5, 1e5, 2) # validation accuracy
# Start the first game
self._reset()
def _step(self, action):
"""
Perform some action in the environment
"""
assert self.action_space.contains(action)
lr, decay, momentum, batch_size, l1, l2, convs, fcs = action
# map ranges of inputs
lr = (10.0 ** lr[0]).astype('float32')
decay = (10.0 ** decay[0]).astype('float32')
momentum = (10.0 ** momentum[0]).astype('float32')
batch_size = int(2 ** batch_size[0])
l1 = (10.0 ** l1[0]).astype('float32')
l2 = (10.0 ** l2[0]).astype('float32')
"""
names = ["lr", "decay", "mom", "batch", "l1", "l2"]
values = [lr, decay, momentum, batch_size, l1, l2]
for n,v in zip(names, values):
print(n,v)
"""
diverged, acc = self.train_blueprint(lr, decay, momentum, batch_size, l1, l2, convs, fcs)
# save best validation. If diverged, acc is zero
if acc > self.best_val:
self.best_val = acc
self.previous_acc = acc
self.epoch_idx += 1
done = self.epoch_idx == 10
reward = self.best_val
# as for number of labels increases, learning problem becomes
# more difficult for fixed dataset size. In order to avoid
# for the agent to ignore more complex datasets, on which
# accuracy is low and concentrate on simple cases which bring bulk
# of reward, reward is normalized by number of labels in dataset
reward *= self.nb_classes
# formula below encourages higher best validation
reward += reward ** 2
return self._get_obs(), reward, done, {}
def _render(self, mode="human", close=False):
if close:
return
print(">> Step ", self.epoch_idx, "best validation:", self.best_val)
def _get_obs(self):
"""
Observe the environment. Is usually used after the step is taken
"""
# observation as per observation space
return np.array([self.nb_inst,
self.previous_acc])
def data_mix(self):
# randomly choose dataset
dataset = random.choice(['mnist', 'cifar10', 'cifar100']) #
n_labels = 10
if dataset == "mnist":
data = mnist.load_data()
if dataset == "cifar10":
data = cifar10.load_data()
if dataset == "cifar100":
data = cifar100.load_data()
n_labels = 100
# Choose dataset size. This affects regularization needed
r = np.random.rand()
# not using full dataset to make regularization more important and
# speed up testing a little bit
data_size = int(2000 * (1 - r) + 40000 * r)
# I do not use test data for validation, but last 10000 instances in dataset
# so that trained models can be compared to results in literature
(CX, CY), (CXt, CYt) = data
if dataset == "mnist":
CX = np.expand_dims(CX, axis=1)
data = CX[:data_size], CY[:data_size], CX[-10000:], CY[-10000:]
return data, n_labels
def _reset(self):
self.generate_data()
# initial accuracy values
self.best_val = 0.0
self.previous_acc = 0.0
self.epoch_idx = 0
return self._get_obs()
def generate_data(self):
self.data, self.nb_classes = self.data_mix()
# zero index corresponds to training inputs
self.nb_inst = len(self.data[0])
def train_blueprint(self, lr, decay, momentum, batch_size, l1, l2, convs, fcs):
X, Y, Xv, Yv = self.data
nb_classes = self.nb_classes
reg = WeightRegularizer()
# a hack to make regularization variable
reg.l1 = K.variable(0.0)
reg.l2 = K.variable(0.0)
# input square image dimensions
img_rows, img_cols = X.shape[-1], X.shape[-1]
img_channels = X.shape[1]
# convert class vectors to binary class matrices
Y = np_utils.to_categorical(Y, nb_classes)
Yv = np_utils.to_categorical(Yv, nb_classes)
# here definition of the model happens
model = Sequential()
has_convs = False
# create all convolutional layers
for val, use in convs:
# Size of convolutional layer
cnvSz = int(val * 127) + 1
if use < 0.5:
continue
has_convs = True
model.add(Convolution2D(cnvSz, 3, 3, border_mode='same',
input_shape=(img_channels, img_rows, img_cols),
W_regularizer=reg,
b_regularizer=reg))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
# model.add(Dropout(0.25))
if has_convs:
model.add(Flatten())
else:
model.add(Flatten(input_shape=(img_channels, img_rows, img_cols))) # avoid excetpions on no convs
# create all fully connected layers
for val, use in fcs:
if use < 0.5:
continue
# choose fully connected layer size
densesz = int(1023 * val) + 1
model.add(Dense(densesz,
W_regularizer=reg,
b_regularizer=reg))
model.add(Activation('relu'))
# model.add(Dropout(0.5))
model.add(Dense(nb_classes,
W_regularizer=reg,
b_regularizer=reg))
model.add(Activation('softmax'))
# let's train the model using SGD + momentum (how original).
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy',
optimizer=sgd,
metrics=['accuracy'])
X = X.astype('float32')
Xv = Xv.astype('float32')
X /= 255
Xv /= 255
model = model
sgd = sgd
reg = reg
# set parameters of training step
sgd.lr.set_value(lr)
sgd.decay.set_value(decay)
sgd.momentum.set_value(momentum)
reg.l1.set_value(l1)
reg.l2.set_value(l2)
# train model for one epoch_idx
H = model.fit(X, Y,
batch_size=int(batch_size),
nb_epoch=10,
shuffle=True)
diverged = math.isnan(H.history['loss'][-1])
acc = 0.0
if not diverged:
_, acc = model.evaluate(Xv, Yv)
return diverged, acc
| 8,578 | 29.859712 | 110 | py |
torpido | torpido-master/gym/envs/parameter_tuning/convergence.py | from __future__ import print_function
import gym
import random
from gym import spaces
import numpy as np
from keras.datasets import cifar10, mnist, cifar100
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Convolution2D, MaxPooling2D
from keras.optimizers import SGD
from keras.utils import np_utils
from keras.regularizers import WeightRegularizer
from keras import backend as K
from itertools import cycle
import math
class ConvergenceControl(gym.Env):
"""Environment where agent learns to tune parameters of training
DURING the training of the neural network to improve its convergence /
performance on the validation set.
Parameters can be tuned after every epoch. Parameters tuned are learning
rate, learning rate decay, momentum, batch size, L1 / L2 regularization.
Agent is provided with feedback on validation accuracy, as well as on
the size of dataset and number of classes, and some coarse description of
architecture being optimized.
The most close publication that I am aware of that tries to solve similar
environment is
http://research.microsoft.com/pubs/259048/daniel2016stepsizecontrol.pdf
"""
metadata = {"render.modes": ["human"]}
def __init__(self, natural=False):
"""
Initialize environment
"""
# I use array of len 1 to store constants (otherwise there were some errors)
self.action_space = spaces.Tuple((
spaces.Box(-5.0,0.0, 1), # learning rate
spaces.Box(-7.0,-2.0, 1), # decay
spaces.Box(-5.0,0.0, 1), # momentum
spaces.Box(2, 8, 1), # batch size
spaces.Box(-6.0,1.0, 1), # l1 reg
spaces.Box(-6.0,1.0, 1), # l2 reg
))
# observation features, in order: num of instances, num of labels,
# number of filter in part A / B of neural net, num of neurons in
# output layer, validation accuracy after training with given
# parameters
self.observation_space = spaces.Box(-1e5,1e5, 6) # validation accuracy
# Start the first game
self._reset()
def _step(self, action):
"""
Perform some action in the environment
"""
assert self.action_space.contains(action)
lr, decay, momentum, batch_size, l1, l2 = action;
# map ranges of inputs
lr = (10.0 ** lr[0]).astype('float32')
decay = (10.0 ** decay[0]).astype('float32')
momentum = (10.0 ** momentum[0]).astype('float32')
batch_size = int( 2 ** batch_size[0] )
l1 = (10.0 ** l1[0]).astype('float32')
l2 = (10.0 ** l2[0]).astype('float32')
"""
names = ["lr", "decay", "mom", "batch", "l1", "l2"]
values = [lr, decay, momentum, batch_size, l1, l2]
for n,v in zip(names, values):
print(n,v)
"""
X,Y,Xv,Yv = self.data
# set parameters of training step
self.sgd.lr.set_value(lr)
self.sgd.decay.set_value(decay)
self.sgd.momentum.set_value(momentum)
self.reg.l1.set_value(l1)
self.reg.l2.set_value(l2)
# train model for one epoch_idx
H = self.model.fit(X, Y,
batch_size=int(batch_size),
nb_epoch=1,
shuffle=True)
_, acc = self.model.evaluate(Xv,Yv)
# save best validation
if acc > self.best_val:
self.best_val = acc
self.previous_acc = acc;
self.epoch_idx = self.epoch_idx + 1
diverged = math.isnan( H.history['loss'][-1] )
done = self.epoch_idx == 20 or diverged
if diverged:
""" maybe not set to a very large value; if you get something nice,
but then diverge, maybe it is not too bad
"""
reward = -100.0
else:
reward = self.best_val
# as number of labels increases, learning problem becomes
# more difficult for fixed dataset size. In order to avoid
# for the agent to ignore more complex datasets, on which
# accuracy is low and concentrate on simple cases which bring bulk
# of reward, I normalize by number of labels in dataset
reward = reward * self.nb_classes
# formula below encourages higher best validation
reward = reward + reward ** 2
return self._get_obs(), reward, done, {}
def _render(self, mode="human", close=False):
if close:
return
print(">> Step ",self.epoch_idx,"best validation:", self.best_val)
def _get_obs(self):
"""
Observe the environment. Is usually used after the step is taken
"""
# observation as per observation space
return np.array([self.nb_classes,
self.nb_inst,
self.convAsz,
self.convBsz,
self.densesz,
self.previous_acc])
def data_mix(self):
# randomly choose dataset
dataset = random.choice(['mnist', 'cifar10', 'cifar100'])#
n_labels = 10
if dataset == "mnist":
data = mnist.load_data()
if dataset == "cifar10":
data = cifar10.load_data()
if dataset == "cifar100":
data = cifar100.load_data()
n_labels = 100
# Choose dataset size. This affects regularization needed
r = np.random.rand()
# not using full dataset to make regularization more important and
# speed up testing a little bit
data_size = int( 2000 * (1-r) + 40000 * r )
# I do not use test data for validation, but last 10000 instances in dataset
# so that trained models can be compared to results in literature
(CX, CY), (CXt, CYt) = data
if dataset == "mnist":
CX = np.expand_dims(CX, axis=1)
data = CX[:data_size], CY[:data_size], CX[-10000:], CY[-10000:];
return data, n_labels
def _reset(self):
reg = WeightRegularizer()
# a hack to make regularization variable
reg.l1 = K.variable(0.0)
reg.l2 = K.variable(0.0)
data, nb_classes = self.data_mix()
X, Y, Xv, Yv = data
# input square image dimensions
img_rows, img_cols = X.shape[-1], X.shape[-1]
img_channels = X.shape[1]
# save number of classes and instances
self.nb_classes = nb_classes
self.nb_inst = len(X)
# convert class vectors to binary class matrices
Y = np_utils.to_categorical(Y, nb_classes)
Yv = np_utils.to_categorical(Yv, nb_classes)
# here definition of the model happens
model = Sequential()
# double true for icnreased probability of conv layers
if random.choice([True, True, False]):
# Choose convolution #1
self.convAsz = random.choice([32,64,128])
model.add(Convolution2D(self.convAsz, 3, 3, border_mode='same',
input_shape=(img_channels, img_rows, img_cols),
W_regularizer = reg,
b_regularizer = reg))
model.add(Activation('relu'))
model.add(Convolution2D(self.convAsz, 3, 3,
W_regularizer = reg,
b_regularizer = reg))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
# Choose convolution size B (if needed)
self.convBsz = random.choice([0,32,64])
if self.convBsz > 0:
model.add(Convolution2D(self.convBsz, 3, 3, border_mode='same',
W_regularizer = reg,
b_regularizer = reg))
model.add(Activation('relu'))
model.add(Convolution2D(self.convBsz, 3, 3,
W_regularizer = reg,
b_regularizer = reg))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
else:
model.add(Flatten(input_shape=(img_channels, img_rows, img_cols)))
self.convAsz = 0
self.convBsz = 0
# choose fully connected layer size
self.densesz = random.choice([256,512,762])
model.add(Dense(self.densesz,
W_regularizer = reg,
b_regularizer = reg))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(nb_classes,
W_regularizer = reg,
b_regularizer = reg))
model.add(Activation('softmax'))
# let's train the model using SGD + momentum (how original).
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy',
optimizer=sgd,
metrics=['accuracy'])
X = X.astype('float32')
Xv = Xv.astype('float32')
X /= 255
Xv /= 255
self.data = (X,Y,Xv,Yv)
self.model = model
self.sgd = sgd
# initial accuracy values
self.best_val = 0.0
self.previous_acc = 0.0
self.reg = reg
self.epoch_idx = 0
return self._get_obs()
| 9,944 | 31.713816 | 84 | py |
torpido | torpido-master/utils/gcn/layers.py | from gcn.inits import *
import tensorflow as tf
flags = tf.app.flags
FLAGS = flags.FLAGS
# global unique layer ID dictionary for layer name assignment
_LAYER_UIDS = {}
def get_layer_uid(layer_name=''):
"""Helper function, assigns unique layer IDs."""
if layer_name not in _LAYER_UIDS:
_LAYER_UIDS[layer_name] = 1
return 1
else:
_LAYER_UIDS[layer_name] += 1
return _LAYER_UIDS[layer_name]
def sparse_dropout(x, keep_prob, noise_shape):
"""Dropout for sparse tensors."""
random_tensor = keep_prob
random_tensor += tf.random_uniform(noise_shape)
dropout_mask = tf.cast(tf.floor(random_tensor), dtype=tf.bool)
pre_out = tf.sparse_retain(x, dropout_mask)
return pre_out * (1./keep_prob)
def dot(x, y, sparse=False):
"""Wrapper for tf.matmul (sparse vs dense)."""
if sparse:
res = tf.sparse_tensor_dense_matmul(x, y)
else:
res = tf.matmul(x, y)
return res
class Layer(object):
"""Base layer class. Defines basic API for all layer objects.
Implementation inspired by keras (http://keras.io).
# Properties
name: String, defines the variable scope of the layer.
logging: Boolean, switches Tensorflow histogram logging on/off
# Methods
_call(inputs): Defines computation graph of layer
(i.e. takes input, returns output)
__call__(inputs): Wrapper for _call()
_log_vars(): Log all variables
"""
def __init__(self, **kwargs):
allowed_kwargs = {'name', 'logging'}
for kwarg in kwargs.keys():
assert kwarg in allowed_kwargs, 'Invalid keyword argument: ' + kwarg
name = kwargs.get('name')
if not name:
layer = self.__class__.__name__.lower()
name = layer + '_' + str(get_layer_uid(layer))
self.name = name
self.vars = {}
logging = kwargs.get('logging', False)
self.logging = logging
self.sparse_inputs = False
def _call(self, inputs):
return inputs
def __call__(self, inputs):
with tf.name_scope(self.name):
if self.logging and not self.sparse_inputs:
tf.summary.histogram(self.name + '/inputs', inputs)
outputs = self._call(inputs)
if self.logging:
tf.summary.histogram(self.name + '/outputs', outputs)
return outputs
def _log_vars(self):
for var in self.vars:
tf.summary.histogram(self.name + '/vars/' + var, self.vars[var])
class Dense(Layer):
"""Dense layer."""
def __init__(self, input_dim, output_dim, placeholders, dropout=0., sparse_inputs=False,
act=tf.nn.relu, bias=False, featureless=False, **kwargs):
super(Dense, self).__init__(**kwargs)
if dropout:
self.dropout = placeholders['dropout']
else:
self.dropout = 0.
self.act = act
self.sparse_inputs = sparse_inputs
self.featureless = featureless
self.bias = bias
# helper variable for sparse dropout
self.num_features_nonzero = placeholders['num_features_nonzero']
with tf.variable_scope(self.name + '_vars'):
self.vars['weights'] = glorot([input_dim, output_dim],
name='weights')
if self.bias:
self.vars['bias'] = zeros([output_dim], name='bias')
if self.logging:
self._log_vars()
def _call(self, inputs):
x = inputs
# dropout
if self.sparse_inputs:
x = sparse_dropout(x, 1-self.dropout, self.num_features_nonzero)
else:
x = tf.nn.dropout(x, 1-self.dropout)
# transform
output = dot(x, self.vars['weights'], sparse=self.sparse_inputs)
# bias
if self.bias:
output += self.vars['bias']
return self.act(output)
class GraphConvolution(Layer):
"""Graph convolution layer."""
def __init__(self, input_dim, output_dim, placeholders, dropout=0.,
sparse_inputs=False, act=tf.nn.relu, bias=False,
featureless=False, **kwargs):
super(GraphConvolution, self).__init__(**kwargs)
if dropout:
self.dropout = placeholders['dropout']
else:
self.dropout = 0.
self.act = act
self.support = placeholders['support']
self.sparse_inputs = sparse_inputs
self.featureless = featureless
self.bias = bias
# helper variable for sparse dropout
self.num_features_nonzero = placeholders['num_features_nonzero']
with tf.variable_scope(self.name + '_vars'):
for i in range(len(self.support)):
self.vars['weights_' + str(i)] = glorot([input_dim, output_dim],
name='weights_' + str(i))
if self.bias:
self.vars['bias'] = zeros([output_dim], name='bias')
if self.logging:
self._log_vars()
def _call(self, inputs):
x = inputs
# dropout
if self.sparse_inputs:
x = sparse_dropout(x, 1-self.dropout, self.num_features_nonzero)
else:
x = tf.nn.dropout(x, 1-self.dropout)
# convolve
supports = list()
for i in range(len(self.support)):
if not self.featureless:
pre_sup = dot(x, self.vars['weights_' + str(i)],
sparse=self.sparse_inputs)
else:
pre_sup = self.vars['weights_' + str(i)]
support = dot(self.support[i], pre_sup, sparse=True)
supports.append(support)
output = tf.add_n(supports)
# bias
if self.bias:
output += self.vars['bias']
return self.act(output)
| 5,886 | 30.148148 | 92 | py |
BDI | BDI-main/utils.py | import os
import re
import requests
import numpy as np
import functools
from jax.experimental import optimizers
import jax
import jax.config
from jax.config import config as jax_config
jax_config.update('jax_enable_x64', True) # for numerical stability, can disable if not an issue
from jax import numpy as jnp
from jax import scipy as sp
import neural_tangents as nt
from neural_tangents import stax
from jax import random
import copy
d = None
def load_d(task):
global d
d = np.load("npy/" + task + ".npy", allow_pickle=True)
weights = None
def load_weights(task_name, y, gamma):
global weights
#if task_name in ['TFBind8-Exact-v0', 'GFP-Transformer-v0','UTR-ResNet-v0']:
# index = np.argsort(y, axis=0).squeeze()
# anchor = y[index][-10]
# tmp = y>=anchor
# weights = tmp/np.sum(tmp)
#elif task_name in ['Superconductor-RandomForest-v0', 'HopperController-Exact-v0',
# 'AntMorphology-Exact-v0', 'DKittyMorphology-Exact-v0']:
# tmp = np.exp(gamma*y)
# weights = tmp/np.sum(tmp)
tmp = np.exp(gamma*y)
weights = tmp/np.sum(tmp)
print("weights", np.max(weights), np.min(weights))
y_min = None
y_max = None
def load_y(task_name):
global y_min
global y_max
dic2y = np.load("npy/dic2y.npy", allow_pickle=True).item()
y_min, y_max = dic2y[task_name]
def process_data(task, task_name):
if task_name in ['TFBind8-Exact-v0', 'GFP-Transformer-v0','UTR-ResNet-v0']:
task_x = task.to_logits(task.x)
elif task_name in ['Superconductor-RandomForest-v0', 'HopperController-Exact-v0',
'AntMorphology-Exact-v0', 'DKittyMorphology-Exact-v0']:
task_x = copy.deepcopy(task.x)
task_x = task.normalize_x(task_x)
shape0 = task_x.shape
task_x = task_x.reshape(task_x.shape[0], -1)
task_y = task.normalize_y(task.y)
return task_x, task_y, shape0
def evaluate_sample(task, x_init, task_name, shape0):
if task_name in ['TFBind8-Exact-v0', 'GFP-Transformer-v0','UTR-ResNet-v0']:
X1 = x_init.reshape(-1, shape0[1], shape0[2])
elif task_name in ['Superconductor-RandomForest-v0', 'HopperController-Exact-v0',
'AntMorphology-Exact-v0', 'DKittyMorphology-Exact-v0']:
X1 = x_init
X1 = task.denormalize_x(X1)
if task_name in ['TFBind8-Exact-v0', 'GFP-Transformer-v0','UTR-ResNet-v0']:
X1 = task.to_integers(X1)
Y1 = task.predict(X1)
max_v = (np.max(Y1)-y_min)/(y_max-y_min)
med_v = (np.median(Y1)-y_min)/(y_max-y_min)
return max_v, med_v
#return np.max(Y1), np.median(Y1)
def make_loss_fn(kernel_fn, mode="distill"):
@jax.jit
def loss_fn_both(x_support, y_support, x_target, y_target, reg=0):
#use support set to compute target set loss
y_support = jax.lax.stop_gradient(y_support)
k_ss = kernel_fn(x_support, x_support)
k_ts = kernel_fn(x_target, x_support)
k_ss_reg = (k_ss + jnp.abs(reg) * jnp.trace(k_ss) * jnp.eye(k_ss.shape[0]) / k_ss.shape[0])
pred = jnp.dot(k_ts, sp.linalg.solve(k_ss_reg, y_support, sym_pos=True))
mse_loss1 = 0.5*jnp.sum(weights*(pred - y_target) ** 2)
#use target set to compute support set loss
#k_tt = kernel_fn(x_target, x_target)
k_st = kernel_fn(x_support, x_target)
#k_tt_reg = (k_tt + jnp.abs(reg) * jnp.trace(k_tt) * jnp.eye(k_tt.shape[0]) / k_tt.shape[0])
#pred = jnp.dot(k_st, sp.linalg.solve(k_tt_reg, y_target, sym_pos=True))
#d = np.load("d.npy", allow_pickle=True)
#pred = jnp.dot(k_st, sp.linalg.solve(k_tt, y_target, sym_pos=True))
pred = jnp.dot(k_st, d)
mse_loss2 = 0.5*jnp.mean((pred - y_support) ** 2)
#merge loss
mse_loss = mse_loss1 + mse_loss2
return mse_loss, mse_loss
@jax.jit
def loss_fn_distill(x_support, y_support, x_target, y_target, reg=1e-6):
y_support = jax.lax.stop_gradient(y_support)
k_ss = kernel_fn(x_support, x_support)
k_ts = kernel_fn(x_target, x_support)
k_ss_reg = (k_ss + jnp.abs(reg) * jnp.trace(k_ss) * jnp.eye(k_ss.shape[0]) / k_ss.shape[0])
pred = jnp.dot(k_ts, sp.linalg.solve(k_ss_reg, y_support, sym_pos=True))
mse_loss = 0.5*jnp.sum(weights*(pred - y_target) ** 2)
return mse_loss, mse_loss
@jax.jit
def loss_fn_grad(x_support, y_support, x_target, y_target, reg=1e-6):
y_support = jax.lax.stop_gradient(y_support)
#k_tt = kernel_fn(x_target, x_target)
#k_tt_reg = (k_tt + jnp.abs(reg) * jnp.trace(k_tt) * jnp.eye(k_tt.shape[0]) / k_tt.shape[0])
k_st = kernel_fn(x_support, x_target)
#d = sp.linalg.solve(k_tt_reg, y_target, sym_pos=True)
#d = np.load("d.npy", allow_pickle=True)
#pred = jnp.dot(k_st, sp.linalg.solve(k_tt, y_target, sym_pos=True))
pred = jnp.dot(k_st, d)
mse_loss = 0.5*jnp.mean((pred - y_support) ** 2)
return mse_loss, mse_loss
if mode == "both":
return loss_fn_both
elif mode == "distill":
return loss_fn_distill
elif mode == "grad":
return loss_fn_grad
def get_update_functions(init_params, kernel_fn, lr, mode="distill"):
opt_init, opt_update, get_params = optimizers.adam(lr)
opt_state = opt_init(init_params)
loss_fn = make_loss_fn(kernel_fn, mode)
grad_loss = jax.grad(lambda params, x_target, y_target: loss_fn(params['x'],
params['y'],
x_target,
y_target), has_aux=True)
@jax.jit
def update_fn(step, opt_state, params, x_target, y_target):
dparams, aux = grad_loss(params, x_target, y_target)
return opt_update(step, dparams, opt_state), aux
return opt_state, get_params, update_fn
| 5,913 | 38.691275 | 100 | py |
BDI | BDI-main/BDI.py | import functools
from jax.experimental import optimizers
import jax
import jax.config
from jax.config import config as jax_config
jax_config.update('jax_enable_x64', True) # for numerical stability, can disable if not an issue
from jax import numpy as jnp
from jax import scipy as sp
import numpy as np
import neural_tangents as nt
from neural_tangents import stax
from jax import random
from utils import *
import argparse
import design_bench
import copy
import time
parser = argparse.ArgumentParser(description="bi-level sequence learning")
parser.add_argument('--mode', choices=['distill', 'grad', 'both'], type=str, default='both')
parser.add_argument('--task', choices=['TFBind8-Exact-v0', 'Superconductor-RandomForest-v0',
'GFP-Transformer-v0', 'UTR-ResNet-v0', 'HopperController-Exact-v0',
'AntMorphology-Exact-v0', 'DKittyMorphology-Exact-v0'], type=str,
default='TFBind8-Exact-v0')
parser.add_argument('--topk', default=128, type=int)
parser.add_argument('--label', default=10.0, type=float)
parser.add_argument('--gamma', default=0.0, type=float)
parser.add_argument('--outer_lr', default=1e-1, type=float)
parser.add_argument('--Tmax', default=200, type=int)
parser.add_argument('--interval', default=200, type=int)
args = parser.parse_args()
#define kernel
init_fn, apply_fn, kernel_fn = stax.serial(stax.Dense(1), stax.Relu(), stax.Dense(1), stax.Relu(),
stax.Dense(1), stax.Relu(), stax.Dense(1), stax.Relu(), stax.Dense(1), stax.Relu(), stax.Dense(1))
KERNEL_FN = functools.partial(kernel_fn, get='ntk')
def distill(args):
#design task
task = design_bench.make(args.task)
#process data
task_x, task_y, shape0 = process_data(task, args.task)
load_weights(args.task, task_y, args.gamma)
#choose candidates
indexs = np.argsort(task_y.squeeze())
index = indexs[-args.topk:]
x_init = copy.deepcopy(task_x[index])
y_init = args.label*np.ones((x_init.shape[0], 1))
#overall before evaluation
max_score, median_score = evaluate_sample(task, x_init, args.task, shape0)
print("Before max {} median {}\n".format(max_score, median_score))
for x_i in range(x_init.shape[0]):
# define distill data
params_init = {'x': x_init[x_i].reshape(1, -1), 'y': y_init[x_i].reshape(1, -1)}
# instance evaluation before
score_before, _ = evaluate_sample(task, x_init[x_i], args.task, shape0)
# use the distill data to define optimizer
opt_state, get_params, update_fn = get_update_functions(params_init, KERNEL_FN, args.outer_lr, mode=args.mode)
params = get_params(opt_state)
# define target bench
x_target_batch = copy.deepcopy(task_x)
y_target_batch = copy.deepcopy(task_y)
for i in range(1, args.Tmax + 1):
# full batch gradient descent
opt_state, train_loss = update_fn(i, opt_state, params, x_target_batch, y_target_batch)
params = get_params(opt_state)
# store the updated distilled data
x_init[x_i] = params['x'].squeeze()
max_score, median_score = evaluate_sample(task, x_init, args.task, shape0)
print("After max {} median {}\n".format(max_score, median_score))
if __name__ == "__main__":
print(args)
load_d(args.task)
load_y(args.task)
distill(args)
| 3,446 | 39.081395 | 141 | py |
BDI | BDI-main/npy/compute_d.py | import os
import re
import requests
import numpy as np
import functools
from jax.experimental import optimizers
import jax
import jax.config
from jax.config import config as jax_config
jax_config.update('jax_enable_x64', True) # for numerical stability, can disable if not an issue
from jax import numpy as jnp
from jax import scipy as sp
import neural_tangents as nt
from neural_tangents import stax
from jax import random
import argparse
import design_bench
import copy
import time
from utils import *
parser = argparse.ArgumentParser(description="bi-level sequence learning")
parser.add_argument('--task', choices=['TFBind8-Exact-v0', 'Superconductor-RandomForest-v0',
'GFP-Transformer-v0', 'UTR-ResNet-v0', 'HopperController-Exact-v0',
'AntMorphology-Exact-v0', 'DKittyMorphology-Exact-v0'],
type=str, default='UTR-ResNet-v0')
args = parser.parse_args()
init_fn, apply_fn, kernel_fn = stax.serial(stax.Dense(1), stax.Relu(), stax.Dense(1), stax.Relu(),stax.Dense(1), stax.Relu(), stax.Dense(1), stax.Relu(), stax.Dense(1), stax.Relu(), stax.Dense(1))
KERNEL_FN = functools.partial(kernel_fn, get='ntk')
task = design_bench.make(args.task)
print(task.x.shape)
#process data
x_target, y_target, shape0 = process_data(task, args.task)
reg = 1e-6
print("x_target {} y_target {}".format(x_target.shape, y_target.shape))
k_tt = KERNEL_FN(x_target, x_target)
k_tt_reg = (k_tt + jnp.abs(reg) * jnp.trace(k_tt) * jnp.eye(k_tt.shape[0]) / k_tt.shape[0])
d = sp.linalg.solve(k_tt_reg, y_target, sym_pos=True)
np.save("npy/" + args.task + ".npy", d)
| 1,668 | 35.282609 | 196 | py |
BayesFlow | BayesFlow-master/docsrc/source/conf.py | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
import os
import sys
sys.path.insert(0, os.path.abspath("../.."))
# -- Project information -----------------------------------------------------
project = "BayesFlow"
copyright = "2023, BayesFlow authors (lead maintainer: Stefan T. Radev)"
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"numpydoc",
"sphinx.ext.autosummary",
"sphinx.ext.todo",
"sphinx.ext.coverage",
"sphinx.ext.viewcode",
"myst_nb",
"sphinx.ext.extlinks",
"sphinx.ext.intersphinx",
"sphinx_design"
]
numpydoc_show_class_members = False
myst_enable_extensions = [
"amsmath",
"colon_fence",
"deflist",
"dollarmath",
"html_image",
]
myst_url_schemes = ["http", "https", "mailto"]
autodoc_default_options = {
"members": "var1, var2",
"special-members": "__call__",
"undoc-members": True,
"exclude-members": "__weakref__",
}
# Define shorthand for external links:
extlinks = {
"mainbranch": ("https://github.com/stefanradev93/BayesFlow/blob/master/%s", None),
}
coverage_show_missing_items = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_book_theme"
html_title = "BayesFlow: Amortized Bayesian Inference"
# Add any paths that contain custom _static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin _static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ["css/custom.css"]
html_show_sourcelink = False
html_theme_options = {
"repository_url": "https://github.com/stefanradev93/BayesFlow",
"repository_branch": "master",
"use_edit_page_button": True,
"use_issues_button": True,
"use_repository_button": True,
"use_download_button": True,
"logo": {"alt-text": "BayesFlow"},
}
html_logo = "_static/bayesflow_hex.png"
html_favicon = '_static/bayesflow_hex.ico'
html_baseurl = "https://www.bayesflow.org/"
html_js_files = [
"https://cdnjs.cloudflare.com/ajax/libs/require.js/2.3.4/require.min.js"
]
todo_include_todos = True
# do not execute jupyter notebooks when building docs
nb_execution_mode = "off"
# download notebooks as .ipynb and not as .ipynb.txt
html_sourcelink_suffix = ""
suppress_warnings = [
f"autosectionlabel._examples/{filename.split('.')[0]}"
for filename in os.listdir("../../examples")
if os.path.isfile(os.path.join("../../examples", filename))
] # Avoid duplicate label warnings for Jupyter notebooks.
remove_from_toctrees = ["_autosummary/*"]
| 3,727 | 30.863248 | 86 | py |
BayesFlow | BayesFlow-master/tests/test_benchmarks.py | # Copyright (c) 2022 The BayesFlow Developers
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import copy
import numpy as np
import pytest
import tensorflow as tf
from assets.benchmark_network_architectures import NETWORK_SETTINGS
from bayesflow import benchmarks
from bayesflow.amortizers import AmortizedLikelihood, AmortizedPosterior, AmortizedPosteriorLikelihood
from bayesflow.networks import InvertibleNetwork
from bayesflow.trainers import Trainer
def _get_trainer_configuration(benchmark_name, mode):
"""Helper function to configure test ``Trainer`` instance."""
# Clear tensorflow session
tf.keras.backend.clear_session()
# Setup benchmark instance
benchmark = benchmarks.Benchmark(benchmark_name, mode=mode)
# Setup posterior amortizer
if mode == "posterior":
amortizer = AmortizedPosterior(InvertibleNetwork(**NETWORK_SETTINGS[benchmark_name][mode]))
elif mode == "likelihood":
amortizer = AmortizedLikelihood(InvertibleNetwork(**NETWORK_SETTINGS[benchmark_name][mode]))
else:
amortizer = AmortizedPosteriorLikelihood(
amortized_posterior=AmortizedPosterior(InvertibleNetwork(**NETWORK_SETTINGS[benchmark_name]["posterior"])),
amortized_likelihood=AmortizedLikelihood(
InvertibleNetwork(**NETWORK_SETTINGS[benchmark_name]["likelihood"])
),
)
trainer = Trainer(
amortizer=amortizer,
generative_model=benchmark.generative_model,
learning_rate=0.0001,
configurator=benchmark.configurator,
memory=False,
)
return trainer
@pytest.mark.parametrize("benchmark_name", benchmarks.available_benchmarks)
@pytest.mark.parametrize("mode", ["posterior", "likelihood", "joint"])
def test_posterior(benchmark_name, mode):
"""This test will run posterior, likelihood, and joint estimation on all benchmarks. It will create a
minimal ``Trainer`` instance and test whether the weights change after a couple of backpropagation updates.
Implicitly, the function will test if the coupling ``GenerativeModel`` -> ``configurator`` ->
``Amortizer`` -> ``Trainer`` works.
"""
# Default settings for testing
epochs = 1
iterations = 5
batch_size = 16
# Init trainer (including checks) and train
trainer = _get_trainer_configuration(benchmark_name, mode=mode)
trainable_variables_pre = copy.deepcopy(trainer.amortizer.trainable_variables)
_ = trainer.train_online(epochs, iterations, batch_size)
trainable_variables_post = copy.deepcopy(trainer.amortizer.trainable_variables)
# Test whether weights change
for before, after in zip(trainable_variables_pre, trainable_variables_post):
assert np.any(before != after)
| 3,760 | 41.258427 | 119 | py |
BayesFlow | BayesFlow-master/bayesflow/inference_networks.py | # Copyright (c) 2022 The BayesFlow Developers
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import numpy as np
import tensorflow as tf
from bayesflow import default_settings
from bayesflow.coupling_networks import CouplingLayer
from bayesflow.helper_functions import build_meta_dict
from bayesflow.helper_networks import MCDropout
class InvertibleNetwork(tf.keras.Model):
"""Implements a chain of conditional invertible coupling layers for conditional density estimation."""
available_designs = ("affine", "spline", "interleaved")
def __init__(
self,
num_params,
num_coupling_layers=6,
coupling_design="affine",
coupling_settings=None,
permutation="fixed",
use_act_norm=True,
act_norm_init=None,
use_soft_flow=False,
soft_flow_bounds=(1e-3, 5e-2),
**kwargs,
):
"""Creates a chain of coupling layers with optional `ActNorm` layers in-between. Implements ideas from:
[1] Radev, S. T., Mertens, U. K., Voss, A., Ardizzone, L., & Köthe, U. (2020).
BayesFlow: Learning complex stochastic models with invertible neural networks.
IEEE Transactions on Neural Networks and Learning Systems.
[2] Kim, H., Lee, H., Kang, W. H., Lee, J. Y., & Kim, N. S. (2020).
Softflow: Probabilistic framework for normalizing flow on manifolds.
Advances in Neural Information Processing Systems, 33, 16388-16397.
[3] Ardizzone, L., Kruse, J., Lüth, C., Bracher, N., Rother, C., & Köthe, U. (2020).
Conditional invertible neural networks for diverse image-to-image translation.
In DAGM German Conference on Pattern Recognition (pp. 373-387). Springer, Cham.
[4] Durkan, C., Bekasov, A., Murray, I., & Papamakarios, G. (2019).
Neural spline flows. Advances in Neural Information Processing Systems, 32.
[5] Kingma, D. P., & Dhariwal, P. (2018).
Glow: Generative flow with invertible 1x1 convolutions.
Advances in Neural Information Processing Systems, 31.
Parameters
----------
num_params : int
The number of parameters to perform inference on. Equivalently, the dimensionality of the
latent space.
num_coupling_layers : int, optional, default: 6
The number of coupling layers to use as defined in [1] and [2]. In general, more coupling layers
will give you more expressive power, but will be slower and may need more simulations to train.
Typically, between 4 and 10 coupling layers should suffice for most applications.
coupling_design : str or callable, optional, default: 'affine'
The type of internal coupling network to use. Must be in ['affine', 'spline', 'interleaved'].
The first corresponds to the architecture in [3, 5], the second corresponds to a modified
version of [4]. The third option will alternate between affine and spline layers, for example,
if num_coupling_layers == 3, the chain will consist of ["affine", "spline", "affine"] layers.
In general, spline couplings run slower than affine couplings, but require fewer coupling
layers. Spline couplings may work best with complex (e.g., multimodal) low-dimensional
problems. The difference will become less and less pronounced as we move to higher dimensions.
Note: This is the first setting you may want to change, if inference does not work as expected!
coupling_settings : dict or None, optional, default: None
The coupling network settings to pass to the internal coupling layers. See ``default_settings``
for possible settings. Below are two examples.
Examples:
1. If using ``coupling_design='affine``, you may want to turn on Monte Carlo Dropout and
use an ELU activation function for the internal networks. You can do this by providing:
``
coupling_settings={
'mc_dropout' : True,
'dense_args' : dict(units=128, activation='elu')
}
``
2. If using ``coupling_design='spline'``, you may want to change the number of learnable bins
and increase the dropout probability (i.e., more regularization to guard against overfitting):
``
coupling_settings={
'dropout_prob': 0.2,
'bins' : 32,
}
``
permutation : str or None, optional, default: 'fixed'
Whether to use permutations between coupling layers. Highly recommended if ``num_coupling_layers > 1``
Important: Must be in ['fixed', 'learnable', None]
use_act_norm : bool, optional, default: True
Whether to use activation normalization after each coupling layer, as used in [5].
Recommended to keep default.
act_norm_init : np.ndarray of shape (num_simulations, num_params) or None, optional, default: None
Optional data-dependent initialization for the internal ``ActNorm`` layers, as done in [5]. Could be helpful
for deep invertible networks.
use_soft_flow : bool, optional, default: False
Whether to perturb the taregt distribution (i.e., parameters) with small amount of independent
noise, as done in [2]. Could be helpful for degenrate distributions.
soft_flow_bounds : tuple(float, float), optional, default: (1e-3, 5e-2)
The bounds of the continuous uniform distribution from which the noise scale would be sampled
at each iteration. Only relevant when ``use_soft_flow=True``.
**kwargs : dict
Optional keyword arguments (e.g., name) passed to the tf.keras.Model __init__ method.
"""
super().__init__(**kwargs)
layer_settings = dict(
latent_dim=num_params,
permutation=permutation,
use_act_norm=use_act_norm,
act_norm_init=act_norm_init,
)
self.coupling_layers = self._create_coupling_layers(
layer_settings, coupling_settings, coupling_design, num_coupling_layers
)
self.soft_flow = use_soft_flow
self.soft_low = soft_flow_bounds[0]
self.soft_high = soft_flow_bounds[1]
self.permutation = permutation
self.use_act_norm = use_act_norm
self.latent_dim = num_params
def call(self, targets, condition, inverse=False, **kwargs):
"""Performs one pass through an invertible chain (either inverse or forward).
Parameters
----------
targets : tf.Tensor
The estimation quantities of interest, shape (batch_size, ...)
condition : tf.Tensor
The conditional data x, shape (batch_size, summary_dim)
inverse : bool, default: False
Flag indicating whether to run the chain forward or backwards
Returns
-------
(z, log_det_J) : tuple(tf.Tensor, tf.Tensor)
If inverse=False: The transformed input and the corresponding Jacobian of the transformation,
v shape: (batch_size, ...), log_det_J shape: (batch_size, ...)
target : tf.Tensor
If inverse=True: The transformed out, shape (batch_size, ...)
Notes
-----
If ``inverse=False``, the return is ``(z, log_det_J)``.\n
If ``inverse=True``, the return is ``target``.
"""
if inverse:
return self.inverse(targets, condition, **kwargs)
return self.forward(targets, condition, **kwargs)
def forward(self, targets, condition, **kwargs):
"""Performs a forward pass though the chain."""
# Add noise to target if using SoftFlow, use explicitly
# not in call(), since methods are public
if self.soft_flow and condition is not None:
# Extract shapes of tensors
target_shape = tf.shape(targets)
condition_shape = tf.shape(condition)
# Needs to be concatinable with condition
if len(condition_shape) == 2:
shape_scale = (condition_shape[0], 1)
else:
shape_scale = (condition_shape[0], condition_shape[1], 1)
# Case training mode
if kwargs.get("training"):
noise_scale = tf.random.uniform(shape=shape_scale, minval=self.soft_low, maxval=self.soft_high)
# Case inference mode
else:
noise_scale = tf.zeros(shape=shape_scale) + self.soft_low
# Perturb data with noise (will broadcast to all dimensions)
if len(shape_scale) == 2 and len(target_shape) == 3:
targets += tf.expand_dims(noise_scale, axis=1) * tf.random.normal(shape=target_shape)
else:
targets += noise_scale * tf.random.normal(shape=target_shape)
# Augment condition with noise scale variate
condition = tf.concat((condition, noise_scale), axis=-1)
z = targets
log_det_Js = []
for layer in self.coupling_layers:
z, log_det_J = layer(z, condition, **kwargs)
log_det_Js.append(log_det_J)
# Sum Jacobian determinants for all layers (coupling blocks) to obtain total Jacobian.
log_det_J = tf.add_n(log_det_Js)
return z, log_det_J
def inverse(self, z, condition, **kwargs):
"""Performs a reverse pass through the chain. Assumes that it is only used
in inference mode, so ``**kwargs`` contains ``training=False``."""
# Add noise to target if using SoftFlow, use explicitly
# not in call(), since methods are public
if self.soft_flow and condition is not None:
# Needs to be concatinable with condition
shape_scale = (
(condition.shape[0], 1) if len(condition.shape) == 2 else (condition.shape[0], condition.shape[1], 1)
)
noise_scale = tf.zeros(shape=shape_scale) + 2.0 * self.soft_low
# Augment condition with noise scale variate
condition = tf.concat((condition, noise_scale), axis=-1)
target = z
for layer in reversed(self.coupling_layers):
target = layer(target, condition, inverse=True, **kwargs)
return target
@staticmethod
def _create_coupling_layers(settings, coupling_settings, coupling_design, num_coupling_layers):
"""Helper method to create a list of coupling layers. Takes care
of the different options for coupling design.
"""
if coupling_design not in InvertibleNetwork.available_designs:
raise NotImplementedError("Coupling design should be one of", InvertibleNetwork.available_designs)
# Case affine or spline
if coupling_design != "interleaved":
design = coupling_design
_coupling_settings = coupling_settings
coupling_layers = [
CouplingLayer(coupling_design=design, coupling_settings=_coupling_settings, **settings)
for _ in range(num_coupling_layers)
]
# Case interleaved, starts with affine
else:
coupling_layers = []
designs = (["affine", "spline"] * int(np.ceil(num_coupling_layers / 2)))[:num_coupling_layers]
for design in designs:
# Fail gently, if neither None, nor a dictionary with keys ("spline", "affine")
_coupling_settings = None if coupling_settings is None else coupling_settings[design]
layer = CouplingLayer(coupling_design=design, coupling_settings=_coupling_settings, **settings)
coupling_layers.append(layer)
return coupling_layers
@classmethod
def create_config(cls, **kwargs):
""" "Used to create the settings dictionary for the internal networks of the invertible
network. Will fill in missing"""
settings = build_meta_dict(user_dict=kwargs, default_setting=default_settings.DEFAULT_SETTING_INVERTIBLE_NET)
return settings
class EvidentialNetwork(tf.keras.Model):
"""Implements a network whose outputs are the concentration parameters of a Dirichlet density.
Follows ideas from:
[1] Radev, S. T., D'Alessandro, M., Mertens, U. K., Voss, A., Köthe, U., & Bürkner, P. C. (2021).
Amortized Bayesian model comparison with evidential deep learning.
IEEE Transactions on Neural Networks and Learning Systems.
[2] Sensoy, M., Kaplan, L., & Kandemir, M. (2018).
Evidential deep learning to quantify classification uncertainty.
Advances in neural information processing systems, 31.
"""
def __init__(self, num_models, dense_args=None, num_dense=3, output_activation="softplus", **kwargs):
"""Creates an instance of an evidential network for amortized model comparison.
Parameters
----------
num_models : int
The number of candidate (competing models) for the comparison scenario.
dense_args : dict or None, optional, default: None
The arguments for a tf.keras.layers.Dense layer. If None, defaults will be used.
num_dense : int, optional, default: 3
The number of dense layers for the main network part.
output_activation : str or callable, optional, default: 'softplus'
The activation function to use for the network outputs.
Important: needs to have positive outputs.
**kwargs : dict, optional, default: {}
Optional keyword arguments (e.g., name) passed to the tf.keras.Model __init__ method.
"""
super().__init__(**kwargs)
if dense_args is None:
dense_args = default_settings.DEFAULT_SETTING_DENSE_EVIDENTIAL
# A network to increase representation power
self.dense = tf.keras.Sequential([tf.keras.layers.Dense(**dense_args) for _ in range(num_dense)])
# The layer to output model evidences
self.alpha_layer = tf.keras.layers.Dense(
num_models,
activation=output_activation,
**{k: v for k, v in dense_args.items() if k != "units" and k != "activation"},
)
self.num_models = num_models
def call(self, condition, **kwargs):
"""Computes evidences for model comparison given a batch of data and optional concatenated context,
typically passed through a summayr network.
Parameters
----------
condition : tf.Tensor of shape (batch_size, ...)
The input variables used for determining ``p(model | condition)``
Returns
-------
evidence : tf.Tensor of shape (batch_size, num_models) -- the learned model evidences
"""
return self.evidence(condition, **kwargs)
@tf.function
def evidence(self, condition, **kwargs):
rep = self.dense(condition, **kwargs)
alpha = self.alpha_layer(rep, **kwargs)
evidence = alpha + 1.0
return evidence
def sample(self, condition, n_samples, **kwargs):
"""Samples posterior model probabilities from the higher-order Dirichlet density.
Parameters
----------
condition : tf.Tensor
The summary of the observed (or simulated) data, shape (n_data_sets, ...)
n_samples : int
Number of samples to obtain from the approximate posterior
Returns
-------
pm_samples : tf.Tensor or np.array
The posterior draws from the Dirichlet distribution, shape (num_samples, num_batch, num_models)
"""
alpha = self.evidence(condition, **kwargs)
n_datasets = alpha.shape[0]
pm_samples = np.stack(
[np.default_rng().dirichlet(alpha[n, :], size=n_samples) for n in range(n_datasets)], axis=1
)
return pm_samples
@classmethod
def create_config(cls, **kwargs):
""" "Used to create the settings dictionary for the internal networks of the invertible
network. Will fill in missing"""
settings = build_meta_dict(user_dict=kwargs, default_setting=default_settings.DEFAULT_SETTING_EVIDENTIAL_NET)
return settings
class PMPNetwork(tf.keras.Model):
"""Implements a network that approximates posterior model probabilities (PMPs) as employed in [1].
[1] Elsemüller, L., Schnuerch, M., Bürkner, P. C., & Radev, S. T. (2023).
A Deep Learning Method for Comparing Bayesian Hierarchical Models.
arXiv preprint arXiv:2301.11873.
"""
def __init__(
self,
num_models,
dense_args=None,
num_dense=3,
dropout=True,
mc_dropout=False,
dropout_prob=0.05,
output_activation=tf.nn.softmax,
**kwargs,
):
"""Creates an instance of a PMP network for amortized model comparison.
Parameters
----------
num_models : int
The number of candidate (competing models) for the comparison scenario.
dense_args : dict or None, optional, default: None
The arguments for a tf.keras.layers.Dense layer. If None, defaults will be used.
num_dense : int, optional, default: 3
The number of dense layers for the main network part.
dropout : bool, optional, default: True
Whether to use dropout in-between the hidden layers.
mc_dropout : bool, optional, default: False
Whether to use dropout Monte Carlo dropout (i.e., Bayesian approximation) during inference
dropout_prob : float in (0, 1), optional, default: 0.05
The dropout probability. Only has effecft if ``dropout=True`` or ``mc_dropout=True``
output_activation : callable, optional, default: tf.nn.softmax
The activation function to apply to the network outputs.
Important: Needs to have positive outputs and be bounded between 0 and 1.
**kwargs : dict, optional, default: {}
Optional keyword arguments (e.g., name) passed to the ``tf.keras.Model`` __init__ method.
"""
super().__init__(**kwargs)
# Pick default settings, if None provided
if dense_args is None:
dense_args = default_settings.DEFAULT_SETTING_DENSE_PMP
# Sequential model with optional (MC) Dropout
self.net = tf.keras.Sequential()
for _ in range(num_dense):
self.net.add(tf.keras.layers.Dense(**dense_args))
if mc_dropout:
self.net.add(MCDropout(dropout_prob))
elif dropout:
self.net.add(tf.keras.layers.Dropout(dropout_prob))
else:
pass
self.output_layer = tf.keras.layers.Dense(num_models)
self.output_activation = output_activation
self.num_models = num_models
def call(self, condition, return_probs=True, **kwargs):
"""Forward pass through the network. Computes approximated PMPs given a batch of data
and optional concatenated context, typically passed through a summary network.
Parameters
----------
condition : tf.Tensor of shape (batch_size, ...)
The input variables used for determining ``p(model | condition)``
return_probs : bool, optional, default: True
Whether to return probabilities or logits (pre-activation, unnormalized)
Returns
-------
out : tf.Tensor of shape (batch_size, ..., num_models)
The approximated PMPs (post-activation) or logits (pre-activation)
"""
rep = self.net(condition, **kwargs)
logits = self.output_layer(rep, **kwargs)
if return_probs:
return self.output_activation(logits)
return logits
def posterior_probs(self, condition, **kwargs):
"""Shortcut function to obtain posterior probabilities given a
condition tensor (e.g., summary statistics of data sets).
Parameters
----------
condition : tf.Tensor of shape (batch_size, ...)
The input variables used for determining ``p(model | condition)``
Returns
-------
out : tf.Tensor of shape (batch_size, ..., num_models)
The approximated PMPs
"""
return self(condition, return_probs=True, **kwargs)
def logits(self, condition, **kwargs):
"""Shortcut function to obtain logits given a condition tensor
(e.g., summary statistics of data sets).
Parameters
----------
condition : tf.Tensor of shape (batch_size, ...)
The input variables used for determining ``p(model | condition)``
Returns
-------
out : tf.Tensor of shape (batch_size, ..., num_models)
The approximated PMPs
"""
return self(condition, return_probs=False, **kwargs)
@classmethod
def create_config(cls, **kwargs):
"""Used to create the settings dictionary for the internal networks of the
network. Will fill in missing."""
settings = build_meta_dict(user_dict=kwargs, default_setting=default_settings.DEFAULT_SETTING_PMP_NET)
return settings
| 22,507 | 43.133333 | 120 | py |
BayesFlow | BayesFlow-master/bayesflow/helper_functions.py | # Copyright (c) 2022 The BayesFlow Developers
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import copy
import tensorflow as tf
from tensorflow.keras.optimizers.schedules import LearningRateSchedule
from bayesflow import default_settings
from bayesflow.exceptions import ConfigurationError, ShapeError
def check_tensor_sanity(tensor, logger):
"""Tests for the presence of NaNs and Infs in a tensor."""
if tf.executing_eagerly():
if tf.reduce_any(tf.math.is_nan(tensor)):
num_na = tf.reduce_sum(tf.cast(tf.math.is_nan(tensor), tf.int8)).numpy()
logger.warn(f"Warning! Returned estimates contain {num_na} nan values!")
if tf.reduce_any(tf.math.is_inf(tensor)):
num_inf = tf.reduce_sum(tf.cast(tf.math.is_inf(tensor), tf.int8)).numpy()
logger.warn(f"Warning! Returned estimates contain {num_inf} inf values!")
else:
if tf.reduce_any(tf.math.is_nan(tensor)):
num_na = tf.reduce_sum(tf.cast(tf.math.is_nan(tensor), tf.int8))
tf.print("Warning! Returned estimates contain", num_na, "nan values!")
if tf.reduce_any(tf.math.is_inf(tensor)):
num_inf = tf.reduce_sum(tf.cast(tf.math.is_inf(tensor), tf.int8))
tf.print(f"Warning! Returned estimates contain", num_inf, "inf values!")
def merge_left_into_right(left_dict, right_dict):
"""Function to merge nested dict `left_dict` into nested dict `right_dict`."""
for k, v in left_dict.items():
if isinstance(v, dict):
if right_dict.get(k) is not None:
right_dict[k] = merge_left_into_right(v, right_dict.get(k))
else:
right_dict[k] = v
else:
right_dict[k] = v
return right_dict
def build_meta_dict(user_dict: dict, default_setting: default_settings.MetaDictSetting) -> dict:
"""Integrates a user-defined dictionary into a default dictionary.
Takes a user-defined dictionary and a default dictionary.
#. Scan the `user_dict` for violations by unspecified mandatory fields.
#. Merge `user_dict` entries into the `default_dict`. Considers nested dict structure.
Parameters
----------
user_dict : dict
The user's dictionary
default_setting : MetaDictSetting
The specified default setting with attributes:
- `meta_dict`: dictionary with default values.
- `mandatory_fields`: list(str) keys that need to be specified by the `user_dict`
Returns
-------
merged_dict: dict
Merged dictionary.
"""
default_dict = copy.deepcopy(default_setting.meta_dict)
mandatory_fields = copy.deepcopy(default_setting.mandatory_fields)
# Check if all mandatory fields are provided by the user
if not all([field in user_dict.keys() for field in mandatory_fields]):
raise ConfigurationError(f"Not all mandatory fields provided! Need at least the following: {mandatory_fields}")
# Merge the user dict into the default dict
merged_dict = merge_left_into_right(user_dict, default_dict)
return merged_dict
def extract_current_lr(optimizer):
"""Extracts current learning rate from `optimizer`.
Parameters
----------
optimizer : instance of subclass of `tf.keras.optimizers.Optimizer`
Optimizer to extract the learning rate from
Returns
-------
current_lr : np.float or NoneType
Current learning rate, or `None` if it can't be determined
"""
if isinstance(optimizer.lr, LearningRateSchedule):
# LearningRateSchedule instances need number of iterations
current_lr = optimizer.lr(optimizer.iterations).numpy()
elif hasattr(optimizer.lr, "numpy"):
# Convert learning rate to numpy
current_lr = optimizer.lr.numpy()
else:
# Unable to extract numerical value from optimizer.lr
current_lr = None
return current_lr
def format_loss_string(
ep, it, loss, avg_dict, slope=None, lr=None, ep_str="Epoch", it_str="Iter", scalar_loss_str="Loss"
):
"""Prepare loss string for displaying on progress bar."""
# Prepare info part
disp_str = f"{ep_str}: {ep}, {it_str}: {it}"
if type(loss) is dict:
for k, v in loss.items():
disp_str += f",{k}: {v.numpy():.3f}"
else:
disp_str += f",{scalar_loss_str}: {loss.numpy():.3f}"
# Add running
if avg_dict is not None:
for k, v in avg_dict.items():
disp_str += f",{k}: {v:.3f}"
if slope is not None:
disp_str += f",L.Slope: {slope:.3f}"
if lr is not None:
disp_str += f",LR: {lr:.2E}"
return disp_str
def loss_to_string(ep, loss, ep_str="Epoch", scalar_loss_str="Loss"):
"""Converts output from an amortizer into a string.
For instance, if a ``dict`` is provided, it will be converted as, e.g.,:
dictionary = {k1: v1, k2: v2} -> 'k1: v1, k2: v2'
"""
disp_str = f"Validation, {ep_str}: {ep}"
if type(loss) is dict:
for k, v in loss.items():
disp_str += f", {k}: {v.numpy():.3f}"
else:
disp_str += f", {scalar_loss_str}: {loss.numpy():.3f}"
return disp_str
def backprop_step(input_dict, amortizer, optimizer, **kwargs):
"""Computes the loss of the provided amortizer given an input dictionary and applies gradients.
Parameters
----------
input_dict : dict
The configured output of the genrative model
amortizer : tf.keras.Model
The custom amortizer. Needs to implement a compute_loss method.
optimizer : tf.keras.optimizers.Optimizer
The optimizer used to update the amortizer's parameters.
**kwargs : dict
Optional keyword arguments passed to the network's compute_loss method
Returns
-------
loss : dict
The outputs of the compute_loss() method of the amortizer comprising all
loss components, such as divergences or regularization.
"""
# Forward pass and loss computation
with tf.GradientTape() as tape:
# Compute custom loss
loss = amortizer.compute_loss(input_dict, training=True, **kwargs)
# If dict, add components
if type(loss) is dict:
_loss = tf.add_n(list(loss.values()))
else:
_loss = loss
# Collect regularization loss, if any
if amortizer.losses != []:
reg = tf.add_n(amortizer.losses)
_loss += reg
if type(loss) is dict:
loss["W.Decay"] = reg
else:
loss = {"Loss": loss, "W.Decay": reg}
# One step backprop and return loss
gradients = tape.gradient(_loss, amortizer.trainable_variables)
optimizer.apply_gradients(zip(gradients, amortizer.trainable_variables))
return loss
def check_posterior_prior_shapes(post_samples, prior_samples):
"""Checks requirements for the shapes of posterior and prior draws as
necessitated by most diagnostic functions.
Parameters
----------
post_samples : np.ndarray of shape (n_data_sets, n_post_draws, n_params)
The posterior draws obtained from n_data_sets
prior_samples : np.ndarray of shape (n_data_sets, n_params)
The prior draws obtained for generating n_data_sets
Raises
------
ShapeError
If there is a deviation form the expected shapes of `post_samples` and `prior_samples`.
"""
if len(post_samples.shape) != 3:
raise ShapeError(
f"post_samples should be a 3-dimensional array, with the "
+ f"first dimension being the number of (simulated) data sets, "
+ f"the second dimension being the number of posterior draws per data set, "
+ f"and the third dimension being the number of parameters (marginal distributions), "
+ f"but your input has dimensions {len(post_samples.shape)}"
)
elif len(prior_samples.shape) != 2:
raise ShapeError(
f"prior_samples should be a 2-dimensional array, with the "
+ f"first dimension being the number of (simulated) data sets / prior draws "
+ f"and the second dimension being the number of parameters (marginal distributions), "
+ f"but your input has dimensions {len(prior_samples.shape)}"
)
elif post_samples.shape[0] != prior_samples.shape[0]:
raise ShapeError(
f"The number of elements over the first dimension of post_samples and prior_samples"
+ f"should match, but post_samples has {post_samples.shape[0]} and prior_samples has "
+ f"{prior_samples.shape[0]} elements, respectively."
)
elif post_samples.shape[-1] != prior_samples.shape[-1]:
raise ShapeError(
f"The number of elements over the last dimension of post_samples and prior_samples"
+ f"should match, but post_samples has {post_samples.shape[1]} and prior_samples has "
+ f"{prior_samples.shape[-1]} elements, respectively."
)
| 10,019 | 39.08 | 119 | py |
BayesFlow | BayesFlow-master/bayesflow/trainers.py | # Copyright (c) 2022 The BayesFlow Developers
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import logging
import os
from pickle import load as pickle_load
import tensorflow as tf
import numpy as np
from tqdm.autonotebook import tqdm
from bayesflow.amortizers import (
AmortizedLikelihood,
AmortizedModelComparison,
AmortizedPosterior,
AmortizedPosteriorLikelihood,
)
from bayesflow.computational_utilities import maximum_mean_discrepancy
from bayesflow.configuration import *
from bayesflow.default_settings import DEFAULT_KEYS, OPTIMIZER_DEFAULTS
from bayesflow.diagnostics import plot_latent_space_2d, plot_sbc_histograms
from bayesflow.exceptions import ArgumentError, SimulationError
from bayesflow.helper_classes import (
EarlyStopper,
LossHistory,
MemoryReplayBuffer,
MultiSimulationDataset,
SimulationDataset,
SimulationMemory,
)
from bayesflow.helper_functions import backprop_step, extract_current_lr, format_loss_string, loss_to_string
from bayesflow.simulation import GenerativeModel, MultiGenerativeModel
logging.basicConfig()
class Trainer:
"""This class connects a generative model (or, already simulated data from a model) with
a configurator and a neural inference architecture for amortized inference (amortizer). A Trainer
instance is responsible for optimizing the amortizer via various forms of simulation-based training.
At the very minimum, the trainer must be initialized with an `amortizer` instance, which is capable
of processing the (configured) outputs of a generative model. A `configurator` will then process
the outputs of the generative model and convert them into suitable inputs for the amortizer. Users
can choose from a palette of default configurators or create their own configurators, essentially
building a modularized pipeline `GenerativeModel` -> `Configurator` -> `Amortizer`. Most complex models
will require custom configurators.
Notes
-----
Currently, the trainer supports the following simulation-based training regimes, based on efficiency
considerations:
* Online training
>>> trainer.train_online(epochs, iterations_per_epoch, batch_size, **kwargs)
This training regime is optimal for fast generative models which can efficiently simulated data on-the-fly.
In order for this training regime to be efficient, on-the-fly batch simulations should not take longer
than 2-3 seconds.
* Experience replay training
>>> trainer.train_experience_replay(epochs, iterations_per_epoch, batch_size, **kwargs)
This training regime is also good for fast generative models capable of efficiently simulating data on-the-fly.
Compare to pure online training, this training will keep an experience replay buffer from which simulations
are randomly sampled, so the networks will likely see some simulations multiple times.
* Round-based training
>>> trainer.train_rounds(rounds, sim_per_round, epochs, batch_size, **kwargs)
This training regime is optimal for slow, but still reasonably performant generative models.
In order for this training regime to be efficient, on-the-fly batch simulations should not take
longer than 2-3 minutes.
.. note:: overfitting presents a danger when using small numbers of simulated data sets, so it is recommended
to use some amount of regularization for the neural amortizer(s).
* Offline training
>>> trainer.train_offline(simulations_dict, epochs, batch_size, **kwargs)
This training regime is optimal for very slow, external simulators, which take several minutes for a
single simulation. It assumes that all training data has been already simulated and stored on disk.
.. warning:: Overfitting presents a danger when using a small simulated data set, so it is recommended to use
some amount of regularization for the neural amortizer(s).
.. note::
For extremely slow simulators (i.e., more than an hour of a single simulation), the BayesFlow framework
might not be the ideal choice and should probably be considered in combination with a black-box surrogate
optimization method, such as Bayesian optimization.
"""
def __init__(
self,
amortizer,
generative_model=None,
configurator=None,
checkpoint_path=None,
max_to_keep=3,
default_lr=0.0005,
skip_checks=False,
memory=False,
**kwargs,
):
"""Creates a trainer which will use a generative model (or data simulated from it) to optimize
a neural architecture (amortizer) for amortized posterior inference, likelihood inference, or both.
Parameters
----------
amortizer : `bayesflow.amortizers.Amortizer`
The neural architecture to be optimized.
generative_model : `bayesflow.forward_inference.GenerativeModel`
A generative model returning a dictionary with randomly sampled parameters, data, and optional context
configurator : callable or None, optional, default: None
A callable object transforming and combining the outputs of the generative model into inputs for a BayesFlow
amortizer.
checkpoint_path : string or None, optional, default: None
Optional file path for storing the trained amortizer, loss history and optional memory.
max_to_keep : int, optional, default: 3
Number of checkpoints and loss history snapshots to keep.
default_lr : float, optional, default: 0.0005
The default learning rate to use for default optimizers.
skip_checks : bool, optional, default: False
If True, do not perform consistency checks, i.e., simulator runs and passed through nets
memory : bool or bayesflow.SimulationMemory, optional, default: False
If ``True``, store a pre-defined amount of simulations for later use (validation, etc.).
If `SimulationMemory` instance provided, stores a reference to the instance.
Otherwise the corresponding attribute will be set to None.
Other Parameters:
-----------------
memory_kwargs : dict
Keyword arguments to be passed to the `SimulationMemory` instance, if ``memory=True``
num_models : int
The number of models in an amortized model comparison scenario, in case of a custom model comparison
amortizer which does not have a num_models attribute.
"""
# Set-up logging
logger = logging.getLogger()
logger.setLevel(logging.INFO)
self.amortizer = amortizer
self.generative_model = generative_model
if self.generative_model is None:
logger.info(
"Trainer initialization: No generative model provided. Only offline learning mode is available!"
)
# Determine n models in case model comparison mode
if type(generative_model) is MultiGenerativeModel:
_num_models = generative_model.num_models
elif type(amortizer) is AmortizedModelComparison:
_num_models = amortizer.num_models
else:
_num_models = kwargs.get("num_models")
# Set-up configurator
self.configurator = self._manage_configurator(configurator, num_models=_num_models)
# Set-up memory classes
self.loss_history = LossHistory()
if memory is True:
self.simulation_memory = SimulationMemory(**kwargs.pop("memory_kwargs", {}))
elif type(memory) is SimulationMemory:
self.simulation_memory = memory
else:
self.simulation_memory = None
# Set-up replay buffer and optimizer attributes
self.replay_buffer = None
self.optimizer = None
self.default_lr = default_lr
# Checkpoint and helper classes settings
self.max_to_keep = max_to_keep
if checkpoint_path is not None:
self.checkpoint = tf.train.Checkpoint(model=self.amortizer)
self.manager = tf.train.CheckpointManager(self.checkpoint, checkpoint_path, max_to_keep=max_to_keep)
self.checkpoint.restore(self.manager.latest_checkpoint)
self.loss_history.load_from_file(checkpoint_path)
if self.simulation_memory is not None:
self.simulation_memory.load_from_file(checkpoint_path)
if self.manager.latest_checkpoint:
logger.info("Networks loaded from {}".format(self.manager.latest_checkpoint))
else:
logger.info("Initialized networks from scratch.")
else:
self.checkpoint = None
self.manager = None
self.checkpoint_path = checkpoint_path
# Perform a sanity check with provided components
if not skip_checks:
self._check_consistency()
def diagnose_latent2d(self, inputs=None, **kwargs):
"""Performs visual pre-inference diagnostics of latent space on either provided validation data
(new simulations) or internal simulation memory.
If ``inputs is not None``, then diagnostics will be performed on the inputs, regardless
whether the `simulation_memory` of the trainer is empty or not. If ``inputs is None``, then
the trainer will try to access is memory or raise a `ConfigurationError`.
Parameters
----------
inputs : None, list, or dict, optional, default: None
The optional inputs to use
Other Parameters
----------------
conf_args :
optional keyword arguments passed to the configurator
net_args :
optional keyword arguments passed to the amortizer
plot_args :
optional keyword arguments passed to `plot_latent_space_2d`
Returns
-------
fig : plt.Figure
The figure object which can be readily saved to disk using `fig.savefig()`.
"""
if type(self.amortizer) is AmortizedPosterior:
# If no inputs, try memory and throw if no memory
if inputs is None:
if self.simulation_memory is None:
raise ConfigurationError(
"You should either enable simulation memory or supply the inputs argument."
)
else:
inputs = self.simulation_memory.get_memory()
else:
inputs = self.configurator(inputs, **kwargs.pop("conf_args", {}))
# Do inference
if type(inputs) is list:
z, _ = self.amortizer.call_loop(inputs, **kwargs.pop("net_args", {}))
else:
z, _ = self.amortizer(inputs, **kwargs.pop("net_args", {}))
return plot_latent_space_2d(z, **kwargs.pop("plot_args", {}))
else:
raise NotImplementedError("Latent space diagnostics are only available for type AmortizedPosterior!")
def diagnose_sbc_histograms(self, inputs=None, n_samples=None, **kwargs):
"""Performs visual pre-inference diagnostics via simulation-based calibration (SBC)
(new simulations) or internal simulation memory.
If ``inputs is not None``, then diagnostics will be performed on the inputs, regardless
whether the `simulation_memory` of the trainer is empty or not. If ``inputs is None``, then
the trainer will try to access is memory or raise a `ConfigurationError`.
Parameters
----------
inputs : None, list or dict, optional, default: None
The optional inputs to use
n_samples : int or None, optional, default: None
The number of posterior samples to draw for each simulated data set.
If None, the number will be heuristically determined so that n_sim / n_draws is approximately equal to 20
Other Parameters
----------------
conf_args :
optional keyword arguments passed to the configurator
net_args :
optional keyword arguments passed to the amortizer
plot_args :
optional keyword arguments passed to `plot_sbc()`
Returns
-------
fig : plt.Figure
The figure object which can be readily saved to disk using `fig.savefig()`.
"""
if type(self.amortizer) is AmortizedPosterior:
# If no inputs, try memory and throw if no memory
if inputs is None:
if self.simulation_memory is None:
raise ConfigurationError("You should either ")
else:
inputs = self.simulation_memory.get_memory()
else:
inputs = self.configurator(inputs, **kwargs.pop("conf_args", {}))
# Heuristically determine the number of posterior samples
if n_samples is None:
if type(inputs) is list:
n_sim = np.sum([inp["parameters"].shape[0] for inp in inputs])
n_samples = int(np.ceil(n_sim / 20))
else:
n_samples = int(np.ceil(inputs["parameters"].shape[0] / 20))
# Do inference
if type(inputs) is list:
post_samples = self.amortizer.sample_loop(inputs, n_samples=n_samples, **kwargs.pop("net_args", {}))
prior_samples = np.concatenate([inp["parameters"] for inp in inputs], axis=0)
else:
post_samples = self.amortizer(inputs, n_samples, n_samples, **kwargs.pop("net_args", {}))
prior_samples = inputs["parameters"]
# Check for prior names and override keyword if available
plot_kwargs = kwargs.pop("plot_args", {})
if type(self.generative_model) is GenerativeModel and plot_kwargs.get("param_names") is None:
plot_kwargs["param_names"] = self.generative_model.param_names
return plot_sbc_histograms(post_samples, prior_samples, **plot_kwargs)
else:
raise NotImplementedError("SBC diagnostics are only available for type AmortizedPosterior!")
def load_pretrained_network(self):
"""Attempts to load a pre-trained network if checkpoint path is provided and a checkpoint manager exists."""
if self.manager is None or self.checkpoint is None:
return False
status = self.checkpoint.restore(self.manager.latest_checkpoint)
return status
def train_online(
self,
epochs,
iterations_per_epoch,
batch_size,
save_checkpoint=True,
optimizer=None,
reuse_optimizer=False,
early_stopping=False,
use_autograph=True,
validation_sims=None,
**kwargs,
):
"""Trains an amortizer via online learning. Additional keyword arguments
are passed to the generative mode, configurator, and amortizer.
Parameters
----------
epochs : int
Number of epochs (and number of times a checkpoint is stored)
iterations_per_epoch : int
Number of batch simulations to perform per epoch
batch_size : int
Number of simulations to perform at each backprop step
save_checkpoint : bool, default: True
A flag to decide whether to save checkpoints after each epoch,
if a checkpoint_path provided during initialization, otherwise ignored.
optimizer : tf.keras.optimizer.Optimizer or None
Optimizer for the neural network. ``None`` will result in ``tf.keras.optimizers.Adam``
using a learning rate of 5e-4 and a cosine decay from 5e-4 to 0. A custom optimizer
will override default learning rate and schedule settings.
reuse_optimizer : bool, optional, default: False
A flag indicating whether the optimizer instance should be treated as persistent or not.
If ``False``, the optimizer and its states are not stored after training has finished.
Otherwise, the optimizer will be stored as ``self.optimizer` and re-used in further training runs.
early_stopping : bool, optional, default: False
Whether to use optional stopping or not during training. Could speed up training.
Only works if ``validation_sims is not None``, i.e., validation data has been provided.
use_autograph : bool, optional, default: True
Whether to use autograph for the backprop step. Could lead to enormous speed-ups but
could also be harder to debug.
validation_sims : dict or None, optional, default: None
Simulations used as a "validation set".
If ``dict``, will assume it's the output of a generative model and try
``amortizer.compute_loss(configurator(validation_sims))``
after each epoch.
If ``int``, will assume it's the number of sims to generate from the generative
model before starting training. Only considered if a generative model has been
provided during initialization.
If ``None`` (default), no validation set will be used.
Other Parameters
----------------
model_args :
optional kwargs passed to the generative model
val_model_args:
optional kwargs passed to the generative model for generating validation data. Only useful if
``type(validation_sims) is int``.
conf_args :
optional kwargs passed to the configurator before each backprop (update) step.
val_conf_args :
optional kwargs passed to the configurator then configuring the validation data.
net_args :
optional kwargs passed to the amortizer
early_stopping_args :
optional kwargs passed to the `EarlyStopper`
Returns
-------
losses : dict or pandas.DataFrame
A dictionary storing the losses across epochs and iterations
"""
assert self.generative_model is not None, "No generative model found. Only offline training is possible!"
# Compile update function, if specified
if use_autograph:
_backprop_step = tf.function(backprop_step, reduce_retracing=True)
else:
_backprop_step = backprop_step
# Create new optimizer and initialize loss history
self._setup_optimizer(optimizer, epochs, iterations_per_epoch)
self.loss_history.start_new_run()
validation_sims = self._config_validation(validation_sims, **kwargs.pop("val_model_args", {}))
# Create early stopper, if conditions met, otherwise None returned
early_stopper = self._config_early_stopping(early_stopping, validation_sims, **kwargs)
# Loop through training epochs
for ep in range(1, epochs + 1):
with tqdm(total=iterations_per_epoch, desc=f"Training epoch {ep}") as p_bar:
for it in range(1, iterations_per_epoch + 1):
# Perform one training step and obtain current loss value
loss = self._train_step(batch_size, update_step=_backprop_step, **kwargs)
# Store returned loss
self.loss_history.add_entry(ep, loss)
# Compute running loss
avg_dict = self.loss_history.get_running_losses(ep)
# Extract current learning rate
lr = extract_current_lr(self.optimizer)
# Format for display on progress bar
disp_str = format_loss_string(ep, it, loss, avg_dict, lr=lr)
# Update progress bar
p_bar.set_postfix_str(disp_str)
p_bar.update(1)
# Store and compute validation loss, if specified
self._save_trainer(save_checkpoint)
self._validation(ep, validation_sims, **kwargs)
# Check early stopping, if specified
if self._check_early_stopping(early_stopper):
break
# Remove optimizer reference, if not set as persistent
if not reuse_optimizer:
self.optimizer = None
return self.loss_history.get_plottable()
def train_offline(
self,
simulations_dict,
epochs,
batch_size,
save_checkpoint=True,
optimizer=None,
reuse_optimizer=False,
early_stopping=False,
validation_sims=None,
use_autograph=True,
**kwargs,
):
"""Trains an amortizer via offline learning. Assume parameters, data and optional
context have already been simulated (i.e., forward inference has been performed).
Parameters
----------
simulations_dict : dict
A dictionary containing the simulated data / context, if using the default keys,
the method expects at least the mandatory keys ``sim_data`` and ``prior_draws`` to be present
epochs : int
Number of epochs (and number of times a checkpoint is stored)
batch_size : int
Number of simulations to perform at each backpropagation step
save_checkpoint : bool, default: True
Determines whether to save checkpoints after each epoch,
if a checkpoint_path provided during initialization, otherwise ignored.
optimizer : tf.keras.optimizer.Optimizer or None
Optimizer for the neural network. ``None`` will result in ``tf.keras.optimizers.Adam``
using a learning rate of 5e-4 and a cosine decay from 5e-4 to 0. A custom optimizer
will override default learning rate and schedule settings.
reuse_optimizer : bool, optional, default: False
A flag indicating whether the optimizer instance should be treated as persistent or not.
If ``False``, the optimizer and its states are not stored after training has finished.
Otherwise, the optimizer will be stored as ``self.optimizer`` and re-used in further training runs.
early_stopping : bool, optional, default: False
Whether to use optional stopping or not during training. Could speed up training.
Only works if ``validation_sims is not None``, i.e., validation data has been provided.
use_autograph : bool, optional, default: True
Whether to use autograph for the backprop step. Could lead to enormous speed-ups but
could also be harder to debug.
validation_sims : dict, int, or None, optional, default: None
Simulations used as a "validation set".
If ``dict``, will assume it's the output of a generative model and try
``amortizer.compute_loss(configurator(validation_sims))`` after each epoch.
If ``int``, will assume it's the number of sims to generate from the generative
model before starting training. Only considered if a generative model has been
provided during initialization.
If ``None`` (default), no validation set will be used.
Other Parameters
----------------
val_model_args :
optional kwargs passed to the generative model for generating validation data.
Only useful if ``type(validation_sims) is int``.
conf_args :
optional kwargs passed to the configurator before each backprop (update) step.
val_conf_args :
optional kwargs passed to the configurator then configuring the validation data.
net_args :
optional kwargs passed to the amortizer
early_stopping_args :
optional kwargs passed to the `EarlyStopper`
Returns
-------
losses : ``dict`` or ``pandas.DataFrame``
A dictionary or a data frame storing the losses across epochs and iterations
"""
# Compile update function, if specified
if use_autograph:
_backprop_step = tf.function(backprop_step, reduce_retracing=True)
else:
_backprop_step = backprop_step
# Inits
if isinstance(self.amortizer, AmortizedModelComparison):
data_set = MultiSimulationDataset(simulations_dict, batch_size)
else:
data_set = SimulationDataset(simulations_dict, batch_size)
self._setup_optimizer(optimizer, epochs, data_set.num_batches)
self.loss_history.start_new_run()
validation_sims = self._config_validation(validation_sims, **kwargs.pop("val_model_args", {}))
# Create early stopper, if conditions met, otherwise None returned
early_stopper = self._config_early_stopping(early_stopping, validation_sims, **kwargs)
# Loop through epochs
for ep in range(1, epochs + 1):
with tqdm(total=data_set.num_batches, desc="Training epoch {}".format(ep)) as p_bar:
# Loop through dataset
for bi, forward_dict in enumerate(data_set, start=1):
# Perform one training step and obtain current loss value
input_dict = self.configurator(forward_dict, **kwargs.pop("conf_args", {}))
loss = self._train_step(batch_size, _backprop_step, input_dict, **kwargs)
# Store returned loss
self.loss_history.add_entry(ep, loss)
# Compute running loss
avg_dict = self.loss_history.get_running_losses(ep)
# Extract current learning rate
lr = extract_current_lr(self.optimizer)
# Format for display on progress bar
disp_str = format_loss_string(ep, bi, loss, avg_dict, lr=lr, it_str="Batch")
# Update progress
p_bar.set_postfix_str(disp_str)
p_bar.update(1)
# Store and compute validation loss, if specified
self._save_trainer(save_checkpoint)
self._validation(ep, validation_sims, **kwargs)
# Check early stopping, if specified
if self._check_early_stopping(early_stopper):
break
# Remove optimizer reference, if not set as persistent
if not reuse_optimizer:
self.optimizer = None
return self.loss_history.get_plottable()
def train_from_presimulation(
self,
presimulation_path,
optimizer,
save_checkpoint=True,
max_epochs=None,
reuse_optimizer=False,
custom_loader=None,
early_stopping=False,
validation_sims=None,
use_autograph=True,
**kwargs,
):
"""Trains an amortizer via a modified form of offline training.
Like regular offline training, it assumes that parameters, data and optional context have already
been simulated (i.e., forward inference has been performed).
Also like regular offline training, it is faster than online training in scenarios where simulations are slow.
Unlike regular offline training, it uses each batch from the presimulated dataset only once during training,
if not otherwise specified by a higher maximal number of epochs. Then, presimulated data is reused in a cyclic
manner to achieve the desired number of epochs.
A larger presimulated dataset is therefore required than for offline training, and the increase in speed
gained by loading simulations instead of generating them on the fly comes at a cost:
a large presimulated dataset takes up a large amount of hard drive space.
Parameters
----------
presimulation_path : str
File path to the folder containing the files from the precomputed simulation.
Ideally generated using a GenerativeModel's presimulate_and_save method, otherwise must match
the structure produced by that method.
Each file contains the data for one epoch (i.e. a number of batches), and must be compatible
with the custom_loader provided.
The custom_loader must read each file into a collection (either a dictionary or a list) of simulation_dict
objects.
This is easily achieved with the pickle library: if the files were generated from collections of
simulation_dict objects using pickle.dump, the _default_loader (default for custom_load) will
load them using pickle.load.
Training parameters like number of iterations and batch size are inferred from the files during training.
optimizer : tf.keras.optimizer.Optimizer
Optimizer for the neural network training. Since for this training, it is impossible to guess the number of
iterations beforehead, an optimizer must be provided.
save_checkpoint : bool, optional, default : True
Determines whether to save checkpoints after each epoch,
if a checkpoint_path provided during initialization, otherwise ignored.
max_epochs : int or None, optional, default: None
An optional parameter to limit or extend the number of epochs. If number of epochs is larger than the files
of the dataset, presimulations will be reused.
reuse_optimizer : bool, optional, default: False
A flag indicating whether the optimizer instance should be treated as persistent or not.
If ``False``, the optimizer and its states are not stored after training has finished.
Otherwise, the optimizer will be stored as ``self.optimizer`` and re-used in further training runs.
custom_loader : callable, optional, default: self._default_loader
Must take a string file_path as an input and output a collection (dictionary or list) of
simulation_dict objects. A simulation_dict has the keys ``prior_non_batchable_context``,
``prior_batchable_context``, ``prior_draws``, ``sim_non_batchable_context``, ``sim_batchable_context``, and
``sim_data``.
Here, ``prior_draws`` and ``sim_data`` must have actual data as values, the rest are optional.
early_stopping : bool, optional, default: False
Whether to use optional stopping or not during training. Could speed up training.
validation_sims : dict, int, or None, optional, default: None
Simulations used as a validation set.
If ``dict``, will assume it's the output of a generative model and try
``amortizer.compute_loss(configurator(validation_sims))``
after each epoch.
If ``int``, will assume it's the number of sims to generate from the generative
model before starting training. Only considered if a generative model has been
provided during initialization.
If ``None`` (default), no validation set will be used.
use_autograph : bool, optional, default: True
Whether to use autograph for the backprop step. Could lead to enormous speed-ups but
could also be harder to debug.
Other Parameters
----------------
conf_args :
optional keyword arguments passed to the configurator
net_args :
optional keyword arguments passed to the amortizer
Returns
-------
losses : ``dict`` or ``pandas.DataFrame``
A dictionary or a data frame storing the losses across epochs and iterations
"""
self.optimizer = optimizer
# Compile update function, if specified
if use_autograph:
_backprop_step = tf.function(backprop_step, reduce_retracing=True)
else:
_backprop_step = backprop_step
# Inits
self.loss_history.start_new_run()
validation_sims = self._config_validation(validation_sims, **kwargs.pop("val_model_args", {}))
# Create early stopper, if conditions met, otherwise None returned
early_stopper = self._config_early_stopping(early_stopping, validation_sims, **kwargs)
# Loop over the presimulated dataset.
file_list = os.listdir(presimulation_path)
# Use default loading function if none is provided
if custom_loader is None:
custom_loader = self._default_loader
# Remove non-pickle files from the list
file_list = [f for f in file_list if f.endswith(".pkl")]
if max_epochs is not None:
# Limit number of epochs to max_epochs
if len(file_list) > max_epochs:
file_list = file_list[:max_epochs]
# If the number of files is smaller than the number of epochs, repeat the files until max_epochs is reached
elif len(file_list) < max_epochs:
file_list = file_list * int(np.ceil(max_epochs / len(file_list)))
file_list = file_list[:max_epochs]
for ep, current_filename in enumerate(file_list, start=1):
# Read single file into memory as a dictionary or list
file_path = os.path.join(presimulation_path, current_filename)
epoch_data = custom_loader(file_path)
# For each epoch, the number of iterations is inferred from the presimulated dictionary or
# list used for that epoch
if isinstance(epoch_data, dict):
index_list = list(epoch_data.keys())
elif isinstance(epoch_data, list):
index_list = np.arange(len(epoch_data))
else:
raise ValueError(
f"Loading a simulation file resulted in a {type(epoch_data)}. Must be a dictionary or a list!"
)
with tqdm(total=len(index_list), desc=f"Training epoch {ep}") as p_bar:
for it, index in enumerate(index_list, start=1):
# Perform one training step and obtain current loss value
input_dict = self.configurator(epoch_data[index])
# Like the number of iterations, the batch size is inferred from presimulated dictionary or list
batch_size = epoch_data[index][DEFAULT_KEYS["sim_data"]].shape[0]
loss = self._train_step(batch_size, _backprop_step, input_dict, **kwargs)
# Store returned loss
self.loss_history.add_entry(ep, loss)
# Compute running loss
avg_dict = self.loss_history.get_running_losses(ep)
# Extract current learning rate
lr = extract_current_lr(self.optimizer)
# Format for display on progress bar
disp_str = format_loss_string(ep, it, loss, avg_dict, lr=lr)
# Update progress bar
p_bar.set_postfix_str(disp_str)
p_bar.update(1)
# Store after each epoch, if specified
self._save_trainer(save_checkpoint)
self._validation(ep, validation_sims, **kwargs)
# Check early stopping, if specified
if self._check_early_stopping(early_stopper):
break
# Remove reference to optimizer, if not set to persistent
if not reuse_optimizer:
self.optimizer = None
return self.loss_history.get_plottable()
def train_experience_replay(
self,
epochs,
iterations_per_epoch,
batch_size,
save_checkpoint=True,
optimizer=None,
reuse_optimizer=False,
buffer_capacity=1000,
early_stopping=False,
use_autograph=True,
validation_sims=None,
**kwargs,
):
"""Trains the network(s) via experience replay using a memory replay buffer, as utilized
in reinforcement learning. Additional keyword arguments are passed to the generative mode,
configurator, and amortizer. Read below for signature.
Parameters
----------
epochs : int
Number of epochs (and number of times a checkpoint is stored)
iterations_per_epoch : int
Number of batch simulations to perform per epoch
batch_size : int
Number of simulations to perform at each backpropagation step.
save_checkpoint : bool, optional, default: True
A flag to decide whether to save checkpoints after each epoch,
if a ``checkpoint_path`` provided during initialization, otherwise ignored.
optimizer : tf.keras.optimizer.Optimizer or None
Optimizer for the neural network. ``None`` will result in ``tf.keras.optimizers.Adam``
using a learning rate of 5e-4 and a cosine decay from 5e-4 to 0. A custom optimizer
will override default learning rate and schedule settings.
reuse_optimizer : bool, optional, default: False
A flag indicating whether the optimizer instance should be treated as persistent or not.
If ``False``, the optimizer and its states are not stored after training has finished.
Otherwise, the optimizer will be stored as ``self.optimizer`` and re-used in further training runs.
buffer_capacity : int, optional, default: 1000
Max number of batches to store in buffer. For instance, if ``batch_size=32``
and ``capacity_in_batches=1000``, then the buffer will hold a maximum of
32 * 1000 = 32000 simulations. Be careful with memory!
Important! Argument will be ignored if buffer has previously been initialized!
early_stopping : bool, optional, default: True
Whether to use optional stopping or not during training. Could speed up training.
Only works if ``validation_sims is not None``, i.e., validation data has been provided.
use_autograph : bool, optional, default: True
Whether to use autograph for the backprop step. Could lead to enormous speed-ups but
could also be harder to debug.
validation_sims : dict or None, optional, default: None
Simulations used as a "validation set".
If ``dict``, will assume it's the output of a generative model and try
``amortizer.compute_loss(configurator(validation_sims))``
after each epoch.
If ``int``, will assume it's the number of sims to generate from the generative
model before starting training. Only considered if a generative model has been
provided during initialization.
If ``None`` (default), no validation set will be used.
Other Parameters
----------------
model_args :
optional kwargs passed to the generative model
val_model_args :
optional kwargs passed to the generative model for generating validation data. Only useful if
``type(validation_sims) is int``.
conf_args :
optional kwargs passed to the configurator before each backprop (update) step.
val_conf_args :
optional kwargs passed to the configurator then configuring the validation data.
net_args :
optional kwargs passed to the amortizer
early_stopping_args:
optional kwargs passed to the `EarlyStopper`
Returns
-------
losses : ``dict`` or ``pandas.DataFrame``
A dictionary or a data frame storing the losses across epochs and iterations.
"""
assert self.generative_model is not None, "No generative model found. Only offline training is possible!"
# Compile update function, if specified
if use_autograph:
_backprop_step = tf.function(backprop_step, reduce_retracing=True)
else:
_backprop_step = backprop_step
# Inits
self._setup_optimizer(optimizer, epochs, iterations_per_epoch)
self.loss_history.start_new_run()
if self.replay_buffer is None:
self.replay_buffer = MemoryReplayBuffer(buffer_capacity)
validation_sims = self._config_validation(validation_sims)
# Create early stopper, if conditions met, otherwise None returned
early_stopper = self._config_early_stopping(early_stopping, validation_sims, **kwargs)
# Loop through epochs
for ep in range(1, epochs + 1):
with tqdm(total=iterations_per_epoch, desc=f"Training epoch {ep}") as p_bar:
for it in range(1, iterations_per_epoch + 1):
# Simulate a batch of data and store into buffer
input_dict = self._forward_inference(
batch_size, **kwargs.pop("conf_args", {}), **kwargs.pop("model_args", {})
)
self.replay_buffer.store(input_dict)
# Sample from buffer
input_dict = self.replay_buffer.sample()
# One step backprop
loss = _backprop_step(input_dict, self.amortizer, self.optimizer, **kwargs.pop("net_args", {}))
# Store returned loss
self.loss_history.add_entry(ep, loss)
# Compute running loss
avg_dict = self.loss_history.get_running_losses(ep)
# Extract current learning rate
lr = extract_current_lr(self.optimizer)
# Format for display on progress bar
disp_str = format_loss_string(ep, it, loss, avg_dict, lr=lr)
# Update progress bar
p_bar.set_postfix_str(disp_str)
p_bar.update(1)
# Store and compute validation loss, if specified
self._save_trainer(save_checkpoint)
self._validation(ep, validation_sims, **kwargs)
# Check early stopping, if specified
if self._check_early_stopping(early_stopper):
break
# Remove optimizer reference, if not set as persistent
if not reuse_optimizer:
self.optimizer = None
return self.loss_history.get_plottable()
def train_rounds(
self,
rounds,
sim_per_round,
epochs,
batch_size,
save_checkpoint=True,
optimizer=None,
reuse_optimizer=False,
early_stopping=False,
use_autograph=True,
validation_sims=None,
**kwargs,
):
"""Trains an amortizer via round-based learning. In each round, ``sim_per_round`` data sets
are simulated from the generative model and added to the data sets simulated in previous
round. Then, the networks are trained for ``epochs`` on the augmented set of data sets.
.. note::
Training time will increase from round to round, since the number of simulations
increases correspondingly. The final round will then train the networks on ``rounds * sim_per_round``
data sets, so make sure this number does not eat up all available memory.
Parameters
----------
rounds : int
Number of rounds to perform (outer loop)
sim_per_round : int
Number of simulations per round
epochs : int
Number of epochs (and number of times a checkpoint is stored, inner loop) within a round.
batch_size : int
Number of simulations to use at each backpropagation step
save_checkpoint : bool, optional, default: True
A flag to decide whether to save checkpoints after each epoch,
if a checkpoint_path provided during initialization, otherwise ignored.
optimizer : tf.keras.optimizer.Optimizer or None
Optimizer for the neural network training. ``None`` will result in ``tf.keras.optimizers.Adam``
using a learning rate of 5e-4 and a cosine decay from 5e-4 to 0. A custom optimizer
will override default learning rate and schedule settings.
reuse_optimizer : bool, optional, default: False
A flag indicating whether the optimizer instance should be treated as persistent or not.
If ``False``, the optimizer and its states are not stored after training has finished.
Otherwise, the optimizer will be stored as ``self.optimizer`` and re-used in further training runs.
early_stopping : bool, optional, default: False
Whether to use optional stopping or not during training. Could speed up training.
Only works if ``validation_sims is not None``, i.e., validation data has been provided.
Will be performed within rounds, not between rounds!
use_autograph : bool, optional, default: True
Whether to use autograph for the backprop step. Could lead to enormous speed-ups but
could also be harder to debug.
validation_sims : dict or None, optional, default: None
Simulations used as a "validation set".
If ``dict``, will assume it's the output of a generative model and try
``amortizer.compute_loss(configurator(validation_sims))``
after each epoch.
If ``int``, will assume it's the number of sims to generate from the generative
model before starting training. Only considered if a generative model has been
provided during initialization.
If ``None`` (default), no validation set will be used.
Other Parameters
----------------
model_args :
optional kwargs passed to the generative model
val_model_args :
optional kwargs passed to the generative model for generating validation data. Only useful if
``type(validation_sims) is int``.
conf_args :
optional kwargs passed to the configurator before each backprop (update) step.
val_conf_args :
optional kwargs passed to the configurator then configuring the validation data.
net_args :
optional kwargs passed to the amortizer
early_stopping_args :
optional kwargs passed to the `EarlyStopper`
Returns
-------
losses : ``dict`` or ``pandas.DataFrame``
A dictionary or a data frame storing the losses across epochs and iterations
"""
assert self.generative_model is not None, "No generative model found. Only offline training is possible!"
# Prepare logger
logger = logging.getLogger()
# Create new optimizer and initialize loss history, needs to calculate iters per epoch
batches_per_sim = np.ceil(sim_per_round / batch_size)
sum_total = (rounds + rounds**2) / 2
iterations_per_epoch = int(batches_per_sim * sum_total)
self._setup_optimizer(optimizer, epochs, iterations_per_epoch)
validation_sims = self._config_validation(validation_sims)
# Loop for each round
first_round = True
for r in range(1, rounds + 1):
# Data generation step
if first_round:
# Simulate initial data
logger.info(f"Simulating initial {sim_per_round} data sets for training...")
simulations_dict = self._forward_inference(sim_per_round, configure=False, **kwargs)
first_round = False
else:
# Simulate further data
logger.info(f"Simulating new {sim_per_round} data sets and appending to previous...")
logger.info(f"New total number of simulated data sets for training: {sim_per_round * r}")
simulations_dict_r = self._forward_inference(sim_per_round, configure=False, **kwargs)
# Attempt to concatenate data sets
for k in simulations_dict.keys():
if simulations_dict[k] is not None:
simulations_dict[k] = np.concatenate((simulations_dict[k], simulations_dict_r[k]), axis=0)
# Train offline with generated stuff
_ = self.train_offline(
simulations_dict,
epochs,
batch_size,
save_checkpoint,
reuse_optimizer=True,
early_stopping=early_stopping,
use_autograph=use_autograph,
validation_sims=validation_sims,
**kwargs,
)
# Remove optimizer reference, if not set as persistent
if not reuse_optimizer:
self.optimizer = None
return self.loss_history.get_plottable()
def mmd_hypothesis_test(
self, observed_data, reference_data=None, num_reference_simulations=1000, num_null_samples=100, bootstrap=False
):
"""Performs a sampling-based hypothesis test for detecting Out-Of-Simulation (model misspecification).
Parameters
----------
observed_data : np.ndarray
Observed data, shape (num_observed, ...)
reference_data : np.ndarray
Reference data representing samples from the well-specified model, shape (num_reference, ...)
num_reference_simulations : int, default: 1000
Number of reference simulations (M) simulated from the trainer's generative model
if no `reference_data` are provided.
num_null_samples : int, default: 100
Number of draws from the MMD sampling distribution under the null hypothesis "the trainer's generative
model is well-specified"
bootstrap : bool, default: False
If true, the reference data (see above) are bootstrapped for each sample from the MMD sampling distribution.
If false, a new data set is simulated for computing each draw from the MMD sampling distribution.
Returns
-------
mmd_null_samples : np.ndarray
samples from the H0 sampling distribution ("well-specified model")
mmd_observed : float
summary MMD estimate for the observed data sets
"""
if reference_data is None:
if self.generative_model is None:
raise ArgumentError("If you do not provide reference data, your trainer must have a generative model!")
reference_data = self.configurator(self.generative_model(num_reference_simulations))
if type(reference_data) == dict and "summary_conditions" in reference_data.keys():
reference_summary = self.amortizer.summary_net(reference_data["summary_conditions"])
else:
reference_summary = self.amortizer.summary_net(reference_data)
if type(observed_data) == dict and "summary_conditions" in observed_data.keys():
observed_summary = self.amortizer.summary_net(observed_data["summary_conditions"])
else:
observed_summary = self.amortizer.summary_net(observed_data)
num_observed = observed_summary.shape[0]
num_reference = reference_summary.shape[0]
mmd_null_samples = np.empty(num_null_samples, dtype=np.float32)
for i in tqdm(range(num_null_samples)):
if bootstrap:
bootstrap_idx = np.random.randint(0, num_reference, size=num_observed)
simulated_summary = tf.gather(reference_summary, bootstrap_idx, axis=0)
else:
simulated_data = self.configurator(self.generative_model(num_observed))
simulated_summary = self.amortizer.summary_net(simulated_data["summary_conditions"])
mmd_null_samples[i] = np.sqrt(maximum_mean_discrepancy(reference_summary, simulated_summary).numpy())
mmd_observed = np.sqrt(maximum_mean_discrepancy(reference_summary, observed_summary).numpy())
return mmd_null_samples, mmd_observed
def _config_validation(self, validation_sims, **kwargs):
"""Helper method to prepare validation set based on user input."""
logger = logging.getLogger()
if validation_sims is None:
return None
if type(validation_sims) is dict:
return validation_sims
if type(validation_sims) is int:
if self.generative_model is not None:
vals = self.generative_model(validation_sims, **kwargs)
logger.info(f"Generated {validation_sims} simulations for validation.")
return vals
else:
logger.warning(
"Validation simulations can only be generated if the Trainer is initialized "
+ "with a generative model."
)
return None
logger.warning('Type of argument "validation_sims" not understood. No validation simulations were created.')
def _config_early_stopping(self, early_stopping, validation_sims, **kwargs):
"""Helper method to configure early stopping or warn user for."""
if early_stopping:
if validation_sims is not None:
early_stopper = EarlyStopper(**kwargs.pop("early_stopping_args", {}))
else:
logger = logging.getLogger()
logger.warning("No early stopping will be used, since validation_sims were not provided.")
early_stopper = None
return early_stopper
return None
def _setup_optimizer(self, optimizer, epochs, iterations_per_epoch):
"""Helper method to prepare optimizer based on user input."""
if optimizer is None:
# No optimizer so far and None provided
if self.optimizer is None:
# Calculate decay steps for default cosine decay
schedule = tf.keras.optimizers.schedules.CosineDecay(
self.default_lr, iterations_per_epoch * epochs, name="lr_decay"
)
self.optimizer = tf.keras.optimizers.Adam(schedule, **OPTIMIZER_DEFAULTS)
# No optimizer provided, but optimizer exists, that is,
# has been declared as persistent, so do nothing
else:
pass
else:
self.optimizer = optimizer
def _save_trainer(self, save_checkpoint):
"""Helper method to take care of IO operations."""
if self.manager is not None and save_checkpoint:
self.manager.save()
self.loss_history.save_to_file(file_path=self.checkpoint_path, max_to_keep=self.max_to_keep)
if self.simulation_memory is not None:
self.simulation_memory.save_to_file(file_path=self.checkpoint_path)
def _validation(self, ep, validation_sims, **kwargs):
"""Helper method to take care of computing the validation loss(es)."""
if validation_sims is not None:
conf = self.configurator(validation_sims, **kwargs.pop("val_conf_args", {}))
val_loss = self.amortizer.compute_loss(conf, **kwargs.pop("net_args", {}))
self.loss_history.add_val_entry(ep, val_loss)
val_loss_str = loss_to_string(ep, val_loss)
logger = logging.getLogger()
logger.info(val_loss_str)
def _check_early_stopping(self, early_stopper):
"""Helper method to check improvement in validation loss."""
if early_stopper is not None:
if early_stopper.update_and_recommend(self.loss_history.last_total_val_loss()):
logger = logging.getLogger()
logger.info("Early stopping triggered.")
return True
return False
def _train_step(self, batch_size, update_step, input_dict=None, **kwargs):
"""Performs forward inference -> configuration -> network -> loss pipeline.
Parameters
----------
batch_size : int
Number of simulations to perform at each backprop step
update_step : callable
The function which will perform one backprop step on a batch. Should have the following signature:
``update_step(input_dict, amortizer, optimizer, **kwargs)``
input_dict : dict
The optional pre-configured forward dict from a generative model, simulated, if None
Other Parameters
----------------
model_args :
optional keyword arguments passed to the generative model
conf_args :
optional keyword arguments passed to the configurator
net_args :
optional keyword arguments passed to the amortizer
"""
if input_dict is None:
input_dict = self._forward_inference(
batch_size, **kwargs.pop("conf_args", {}), **kwargs.pop("model_args", {})
)
if self.simulation_memory is not None:
self.simulation_memory.store(input_dict)
loss = update_step(input_dict, self.amortizer, self.optimizer, **kwargs.pop("net_args", {}))
return loss
def _forward_inference(self, n_sim, configure=True, **kwargs):
"""Performs one step of single-model forward inference.
Parameters
----------
n_sim : int
Number of simulations to perform at the given step (i.e., batch size)
configure : bool, optional, default: True
Determines whether to pass the forward inputs through a configurator.
**kwargs : dict
Optional keyword arguments passed to the generative model
Returns
-------
out_dict : dict
The outputs of the generative model.
Raises
------
SimulationError
If the trainer has no generative model but ``trainer._forward_inference``
is called (i.e., needs to simulate data from the generative model)
"""
if self.generative_model is None:
raise SimulationError("No generative model specified. Only offline learning is available!")
out_dict = self.generative_model(n_sim, **kwargs.pop("model_args", {}))
if configure:
out_dict = self.configurator(out_dict, **kwargs.pop("conf_args", {}))
return out_dict
def _manage_configurator(self, config_fun, **kwargs):
"""Determines which configurator to use if None specified during construction."""
# Do nothing if callable provided
if callable(config_fun):
return config_fun
# If None of something else (default), infer default config based on amortizer type
else:
# Amortized posterior
if isinstance(self.amortizer, AmortizedPosterior):
default_config = DefaultPosteriorConfigurator()
# Amortized lieklihood
elif isinstance(self.amortizer, AmortizedLikelihood):
default_config = DefaultLikelihoodConfigurator()
# Joint amortizer
elif isinstance(self.amortizer, AmortizedPosteriorLikelihood):
default_config = DefaultJointConfigurator()
# Model comparison amortizer
elif isinstance(self.amortizer, AmortizedModelComparison):
if kwargs.get("num_models") is None:
raise ConfigurationError(
'Either your generative model or amortizer should have "num_models" attribute, or '
+ "you need initialize Trainer with num_models explicitly!"
)
default_config = DefaultModelComparisonConfigurator(kwargs.get("num_models"))
# Unknown raises an error
else:
raise NotImplementedError(
f"Could not initialize configurator based on " + f"amortizer type {type(self.amortizer)}!"
)
return default_config
def _check_consistency(self):
"""Attempts to run one step generative_model -> configurator -> amortizer -> loss with
batch_size=2. Should be skipped if generative model has non-standard behavior.
Raises
------
ConfigurationError
If any operation along the above chain fails.
"""
logger = logging.getLogger()
logger.setLevel(logging.INFO)
if self.generative_model is not None:
_n_sim = 2
try:
logger.info("Performing a consistency check with provided components...")
_ = self.amortizer.compute_loss(self.configurator(self.generative_model(_n_sim)))
logger.info("Done.")
except Exception as err:
raise ConfigurationError(
"Could not carry out computations of generative_model ->"
+ f"configurator -> amortizer -> loss! Error trace:\n {err}"
)
def _default_loader(self, file_path):
"""Uses pickle to load as a default."""
with open(file_path, "rb+") as f:
loaded_file = pickle_load(f)
return loaded_file
| 62,525 | 46.189434 | 120 | py |
BayesFlow | BayesFlow-master/bayesflow/helper_networks.py | # Copyright (c) 2022 The BayesFlow Developers
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from functools import partial
import numpy as np
import tensorflow as tf
from tensorflow.keras.layers import Conv1D, Dense, Dropout
from tensorflow.keras.models import Sequential
from bayesflow.exceptions import ConfigurationError
from bayesflow.wrappers import SpectralNormalization
class DenseCouplingNet(tf.keras.Model):
"""Implements a conditional version of a standard fully connected (FC) network.
Would also work as an unconditional estimator."""
def __init__(self, settings, dim_out, **kwargs):
"""Creates a conditional coupling net (FC neural network).
Parameters
----------
settings : dict
A dictionary holding arguments for a dense layer:
See https://www.tensorflow.org/api_docs/python/tf/keras/layers/Dense
As well as custom arguments for settings such as residual networks,
dropout, and spectral normalization.
dim_out : int
Number of outputs of the coupling net. Determined internally by the
consumer classes.
**kwargs : dict, optional, default: {}
Optional keyword arguments passed to the `tf.keras.Model` constructor.
"""
super().__init__(**kwargs)
# Create network body (input and hidden layers)
self.fc = Sequential()
for _ in range(settings["num_dense"]):
# Create dense layer with dict kwargs
layer = Dense(**settings["dense_args"])
# Wrap in spectral normalization, if specified
if settings.get("spec_norm") is True:
layer = SpectralNormalization(layer)
self.fc.add(layer)
# Figure out which dropout to use, MC has precedence over standard
# Fails gently, if no dropout_prob is specified
# Case both specified, MC wins
if settings.get("dropout") and settings.get("mc_dropout"):
self.fc.add(MCDropout(dropout_prob=settings["dropout_prob"]))
# Case only dropout, use standard
elif settings.get("dropout") and not settings.get("mc_dropout"):
self.fc.add(Dropout(rate=settings["dropout_prob"]))
# Case only MC, use MC
elif not settings.get("dropout") and settings.get("mc_dropout"):
self.fc.add(MCDropout(dropout_prob=settings["dropout_prob"]))
# No dropout
else:
pass
# Set residual flag
if settings.get("residual"):
self.fc.add(Dense(dim_out, **{k: v for k, v in settings["dense_args"].items() if k != "units"}))
self.residual_output = Dense(dim_out, kernel_initializer="zeros")
else:
self.fc.add(Dense(dim_out, kernel_initializer="zeros"))
self.residual_output = None
self.fc.build(input_shape=())
def call(self, target, condition, **kwargs):
"""Concatenates target and condition and performs a forward pass through the coupling net.
Parameters
----------
target : tf.Tensor
The split estimation quntities, for instance, parameters :math:`\\theta \sim p(\\theta)` of interest, shape (batch_size, ...)
condition : tf.Tensor or None
the conditioning vector of interest, for instance ``x = summary(x)``, shape (batch_size, summary_dim)
"""
# Handle case no condition
if condition is None:
if self.residual_output is not None:
return self.residual_output(self.fc(target, **kwargs) + target, **kwargs)
else:
return self.fc(target, **kwargs)
# Handle 3D case for a set-flow and repeat condition over
# the second `time` or `n_observations` axis of `target``
if len(tf.shape(target)) == 3 and len(tf.shape(condition)) == 2:
shape = tf.shape(target)
condition = tf.expand_dims(condition, 1)
condition = tf.tile(condition, [1, shape[1], 1])
inp = tf.concat((target, condition), axis=-1)
out = self.fc(inp, **kwargs)
if self.residual_output is not None:
out = self.residual_output(out + target, **kwargs)
return out
class Permutation(tf.keras.Model):
"""Implements a layer to permute the inputs entering a (conditional) coupling layer. Uses
fixed permutations, as these perform equally well compared to learned permutations."""
def __init__(self, input_dim):
"""Creates an invertible permutation layer for a conditional invertible layer.
Parameters
----------
input_dim : int
Ihe dimensionality of the input to the (conditional) coupling layer.
"""
super().__init__()
permutation_vec = np.random.permutation(input_dim)
inv_permutation_vec = np.argsort(permutation_vec)
self.permutation = tf.Variable(
initial_value=permutation_vec, trainable=False, dtype=tf.int32, name="permutation"
)
self.inv_permutation = tf.Variable(
initial_value=inv_permutation_vec, trainable=False, dtype=tf.int32, name="inv_permutation"
)
def call(self, target, inverse=False):
"""Permutes a batch of target vectors over the last axis.
Parameters
----------
target : tf.Tensor of shape (batch_size, ...)
The target vector to be permuted over its last axis.
inverse : bool, optional, default: False
Controls if the current pass is forward (``inverse=False``) or inverse (``inverse=True``).
Returns
-------
out : tf.Tensor of the same shape as `target`.
The (un-)permuted target vector.
"""
if not inverse:
return self._forward(target)
else:
return self._inverse(target)
def _forward(self, target):
"""Performs a fixed permutation over the last axis."""
return tf.gather(target, self.permutation, axis=-1)
def _inverse(self, target):
"""Un-does the fixed permutation over the last axis."""
return tf.gather(target, self.inv_permutation, axis=-1)
class Orthogonal(tf.keras.Model):
"""Imeplements a learnable orthogonal transformation according to [1]. Can be
used as an alternative to a fixed ``Permutation`` layer.
[1] Kingma, D. P., & Dhariwal, P. (2018). Glow: Generative flow with invertible 1x1
convolutions. Advances in neural information processing systems, 31.
"""
def __init__(self, input_dim):
"""Creates an invertible orthogonal transformation (generalized permutation)
Parameters
----------
input_dim : int
Ihe dimensionality of the input to the (conditional) coupling layer.
"""
super().__init__()
init = tf.keras.initializers.Orthogonal()
self.W = tf.Variable(
initial_value=init(shape=(input_dim, input_dim)), trainable=True, dtype=tf.float32, name="learnable_permute"
)
def call(self, target, inverse=False):
"""Transforms a batch of target vectors over the last axis through an approximately
orthogonal transform.
Parameters
----------
target : tf.Tensor of shape (batch_size, ...)
The target vector to be rotated over its last axis.
inverse : bool, optional, default: False
Controls if the current pass is forward (``inverse=False``) or inverse (``inverse=True``).
Returns
-------
out : tf.Tensor of the same shape as `target`.
The (un-)rotated target vector.
"""
if not inverse:
return self._forward(target)
else:
return self._inverse(target)
def _forward(self, target):
"""Performs a learnable generalized permutation over the last axis."""
shape = tf.shape(target)
rank = len(shape)
log_det = tf.math.log(tf.math.abs(tf.linalg.det(self.W)))
if rank == 2:
z = tf.linalg.matmul(target, self.W)
else:
z = tf.tensordot(target, self.W, [[rank - 1], [0]])
log_det = tf.cast(shape[1], tf.float32) * log_det
return z, log_det
def _inverse(self, z):
"""Un-does the learnable permutation over the last axis."""
W_inv = tf.linalg.inv(self.W)
rank = len(tf.shape(z))
if rank == 2:
return tf.linalg.matmul(z, W_inv)
return tf.tensordot(z, W_inv, [[rank - 1], [0]])
class MCDropout(tf.keras.Model):
"""Implements Monte Carlo Dropout as a Bayesian approximation according to [1].
Perhaps not the best approximation, but arguably the cheapest one out there!
[1] Gal, Y., & Ghahramani, Z. (2016, June). Dropout as a bayesian approximation:
Representing model uncertainty in deep learning.
In international conference on machine learning (pp. 1050-1059). PMLR.
"""
def __init__(self, dropout_prob=0.1, **kwargs):
"""Creates a custom instance of an MC Dropout layer. Will be used both
during training and inference.
Parameters
----------
dropout_prob : float, optional, default: 0.1
The dropout rate to be passed to ``tf.keras.layers.Dropout()``.
"""
super().__init__(**kwargs)
self.drop = Dropout(dropout_prob)
def call(self, inputs):
"""Randomly sets elements of ``inputs`` to zero.
Parameters
----------
inputs : tf.Tensor
Input of shape (batch_size, ...)
Returns
-------
out : tf.Tensor
Output of shape (batch_size, ...), same as ``inputs``.
"""
out = self.drop(inputs, training=True)
return out
class ActNorm(tf.keras.Model):
"""Implements an Activation Normalization (ActNorm) Layer.
Activation Normalization is learned invertible normalization, using
a Scale (s) and Bias (b) vector::
y = s * x + b (forward)
x = (y - b)/s (inverse)
Notes
-----
The scale and bias can be data dependent initialized, such that the
output has a mean of zero and standard deviation of one [1]_[2]_.
Alternatively, it is initialized with vectors of ones (scale) and
zeros (bias).
References
----------
.. [1] Kingma, Diederik P., and Prafulla Dhariwal.
"Glow: Generative flow with invertible 1x1 convolutions."
arXiv preprint arXiv:1807.03039 (2018).
.. [2] Salimans, Tim, and Durk P. Kingma.
"Weight normalization: A simple reparameterization to accelerate
training of deep neural networks."
Advances in neural information processing systems 29 (2016): 901-909.
"""
def __init__(self, latent_dim, act_norm_init, **kwargs):
"""Creates an instance of an ActNorm Layer as proposed by [1].
Parameters
----------
latent_dim : int
The dimensionality of the latent space (equal to the dimensionality of the target variable)
act_norm_init : np.ndarray of shape (num_simulations, num_params) or None, optional, default: None
Optional data-dependent initialization for the internal ``ActNorm`` layers, as done in [1]. Could be helpful
for deep invertible networks.
"""
super().__init__(**kwargs)
# Initialize scale and bias with zeros and ones if no batch for initalization was provided.
if act_norm_init is None:
self.scale = tf.Variable(tf.ones((latent_dim,)), trainable=True, name="act_norm_scale")
self.bias = tf.Variable(tf.zeros((latent_dim,)), trainable=True, name="act_norm_bias")
else:
self._initalize_parameters_data_dependent(act_norm_init)
def call(self, target, inverse=False):
"""Performs one pass through the actnorm layer (either inverse or forward) and normalizes
the last axis of `target`.
Parameters
----------
target : tf.Tensor of shape (batch_size, ...)
the target variables of interest, i.e., parameters for posterior estimation
inverse : bool, optional, default: False
Flag indicating whether to run the block forward or backwards
Returns
-------
(z, log_det_J) : tuple(tf.Tensor, tf.Tensor)
If inverse=False: The transformed input and the corresponding Jacobian of the transformation,
v shape: (batch_size, inp_dim), log_det_J shape: (,)
target : tf.Tensor
If inverse=True: The inversly transformed targets, shape == target.shape
Notes
-----
If ``inverse=False``, the return is ``(z, log_det_J)``.\n
If ``inverse=True``, the return is ``target``.
"""
if not inverse:
return self._forward(target)
else:
return self._inverse(target)
def _forward(self, target):
"""Performs a forward pass through the layer."""
z = self.scale * target + self.bias
ldj = tf.math.reduce_sum(tf.math.log(tf.math.abs(self.scale)), axis=-1)
return z, ldj
def _inverse(self, target):
"""Performs an inverse pass through the layer."""
return (target - self.bias) / self.scale
def _initalize_parameters_data_dependent(self, init_data):
"""Performs a data dependent initalization of the scale and bias.
Initalizes the scale and bias vector as proposed by [1], such that the
layer output has a mean of zero and a standard deviation of one.
[1] - Salimans, Tim, and Durk P. Kingma.
Weight normalization: A simple reparameterization to accelerate
training of deep neural networks.
Advances in neural information processing systems 29
(2016): 901-909.
Parameters
----------
init_data : tf.Tensor of shape (batch size, number of parameters)
Initiall values to estimate the scale and bias parameters by computing
the mean and standard deviation along the first dimension of `init_data`.
"""
# 2D Tensor case, assume first batch dimension
if len(init_data.shape) == 2:
mean = tf.math.reduce_mean(init_data, axis=0)
std = tf.math.reduce_std(init_data, axis=0)
# 3D Tensor case, assume first batch dimension, second number of observations dimension
elif len(init_data.shape) == 3:
mean = tf.math.reduce_mean(init_data, axis=(0, 1))
std = tf.math.reduce_std(init_data, axis=(0, 1))
# Raise other cases
else:
raise ConfigurationError(
f"""Currently, ActNorm supports only 2D and 3D Tensors,
but act_norm_init contains data with shape {init_data.shape}."""
)
scale = 1.0 / std
bias = (-1.0 * mean) / std
self.scale = tf.Variable(scale, trainable=True, name="act_norm_scale")
self.bias = tf.Variable(bias, trainable=True, name="act_norm_bias")
class InvariantModule(tf.keras.Model):
"""Implements an invariant module performing a permutation-invariant transform.
For details and rationale, see:
[1] Bloem-Reddy, B., & Teh, Y. W. (2020). Probabilistic Symmetries and Invariant Neural Networks.
J. Mach. Learn. Res., 21, 90-1. https://www.jmlr.org/papers/volume21/19-322/19-322.pdf
"""
def __init__(self, settings, **kwargs):
"""Creates an invariant module according to [1] which represents a learnable permutation-invariant
function with an option for learnable pooling.
Parameters
----------
settings : dict
A dictionary holding the configuration settings for the module.
**kwargs : dict, optional, default: {}
Optional keyword arguments passed to the `tf.keras.Model` constructor.
"""
super().__init__(**kwargs)
# Create internal functions
self.s1 = Sequential([Dense(**settings["dense_s1_args"]) for _ in range(settings["num_dense_s1"])])
self.s2 = Sequential([Dense(**settings["dense_s2_args"]) for _ in range(settings["num_dense_s2"])])
# Pick pooling function
if settings["pooling_fun"] == "mean":
pooling_fun = partial(tf.reduce_mean, axis=-2)
elif settings["pooling_fun"] == "max":
pooling_fun = partial(tf.reduce_max, axis=-2)
else:
if callable(settings["pooling_fun"]):
pooling_fun = settings["pooling_fun"]
else:
raise ConfigurationError("pooling_fun argument not understood!")
self.pooler = pooling_fun
def call(self, x, **kwargs):
"""Performs the forward pass of a learnable invariant transform.
Parameters
----------
x : tf.Tensor
Input of shape (batch_size,..., x_dim)
Returns
-------
out : tf.Tensor
Output of shape (batch_size,..., out_dim)
"""
x_reduced = self.pooler(self.s1(x, **kwargs))
out = self.s2(x_reduced, **kwargs)
return out
class EquivariantModule(tf.keras.Model):
"""Implements an equivariant module performing an equivariant transform.
For details and justification, see:
[1] Bloem-Reddy, B., & Teh, Y. W. (2020). Probabilistic Symmetries and Invariant Neural Networks.
J. Mach. Learn. Res., 21, 90-1. https://www.jmlr.org/papers/volume21/19-322/19-322.pdf
"""
def __init__(self, settings, **kwargs):
"""Creates an equivariant module according to [1] which combines equivariant transforms
with nested invariant transforms, thereby enabling interactions between set members.
Parameters
----------
settings : dict
A dictionary holding the configuration settings for the module.
**kwargs : dict, optional, default: {}
Optional keyword arguments passed to the ``tf.keras.Model`` constructor.
"""
super().__init__(**kwargs)
self.invariant_module = InvariantModule(settings)
self.s3 = Sequential([Dense(**settings["dense_s3_args"]) for _ in range(settings["num_dense_s3"])])
def call(self, x, **kwargs):
"""Performs the forward pass of a learnable equivariant transform.
Parameters
----------
x : tf.Tensor
Input of shape (batch_size, ..., x_dim)
Returns
-------
out : tf.Tensor
Output of shape (batch_size, ..., equiv_dim)
"""
# Store shape of x, will be (batch_size, ..., some_dim)
shape = tf.shape(x)
# Example: Output dim is (batch_size, inv_dim) - > (batch_size, N, inv_dim)
out_inv = self.invariant_module(x, **kwargs)
out_inv = tf.expand_dims(out_inv, -2)
tiler = [1] * len(shape)
tiler[-2] = shape[-2]
out_inv_rep = tf.tile(out_inv, tiler)
# Concatenate each x with the repeated invariant embedding
out_c = tf.concat([x, out_inv_rep], axis=-1)
# Pass through equivariant func
out = self.s3(out_c, **kwargs)
return out
class MultiConv1D(tf.keras.Model):
"""Implements an inception-inspired 1D convolutional layer using different kernel sizes."""
def __init__(self, settings, **kwargs):
"""Creates an inception-like Conv1D layer
Parameters
----------
settings : dict
A dictionary which holds the arguments for the internal ``Conv1D`` layers.
"""
super().__init__(**kwargs)
# Create a list of Conv1D layers with different kernel sizes
# ranging from 'min_kernel_size' to 'max_kernel_size'
self.convs = [
Conv1D(kernel_size=f, **settings["layer_args"])
for f in range(settings["min_kernel_size"], settings["max_kernel_size"])
]
# Create final Conv1D layer for dimensionalitiy reduction
dim_red_args = {k: v for k, v in settings["layer_args"].items() if k not in ["kernel_size", "strides"]}
dim_red_args["kernel_size"] = 1
dim_red_args["strides"] = 1
self.dim_red = Conv1D(**dim_red_args)
def call(self, x, **kwargs):
"""Performs a forward pass through the layer.
Parameters
----------
x : tf.Tensor
Input of shape (batch_size, n_time_steps, n_time_series)
Returns
-------
out : tf.Tensor
Output of shape (batch_size, n_time_steps, n_filters)
"""
out = self._multi_conv(x, **kwargs)
out = self.dim_red(out, **kwargs)
return out
def _multi_conv(self, x, **kwargs):
"""Applies the convolutions with different sizes and concatenates outputs."""
return tf.concat([conv(x, **kwargs) for conv in self.convs], axis=-1)
| 22,112 | 36.416244 | 135 | py |
BayesFlow | BayesFlow-master/bayesflow/coupling_networks.py | # Copyright (c) 2022 The BayesFlow Developers
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import tensorflow as tf
from numpy import e as EULER_CONST
from numpy import pi as PI_CONST
from bayesflow import default_settings
from bayesflow.exceptions import ConfigurationError
from bayesflow.helper_functions import build_meta_dict
from bayesflow.helper_networks import ActNorm, DenseCouplingNet, Orthogonal, Permutation
class AffineCoupling(tf.keras.Model):
"""Implements a conditional affine coupling block according to [1, 2], with additional
options, such as residual blocks or Monte Carlo Dropout.
[1] Kingma, D. P., & Dhariwal, P. (2018).
Glow: Generative flow with invertible 1x1 convolutions.
Advances in neural information processing systems, 31.
[2] Ardizzone, L., Lüth, C., Kruse, J., Rother, C., & Köthe, U. (2019).
Guided image generation with conditional invertible neural networks.
arXiv preprint arXiv:1907.02392.
"""
def __init__(self, dim_out, settings_dict, **kwargs):
"""Creates one half of an affine coupling layer to be used as part of a ``CouplingLayer`` in
an ``InvertibleNetwork`` instance.
Parameters
----------
dim_out : int
The output dimensionality of the affine coupling layer.
settings_dict : dict
The settings for the inner networks. Defaults will use:
``settings_dict={
"dense_args" : dict(units=128, activation="relu"),
"num_dense" : 2,
"spec_norm" : False,
"mc_dropout" : False,
"dropout" : True,
"residual" : False,
"dropout_prob" : 0.01,
"soft_clamping" : 1.9
}
``
"""
super().__init__(**kwargs)
self.dim_out = dim_out
self.soft_clamp = settings_dict["soft_clamping"]
# Check if separate settings for s and t are provided and adjust accordingly
if settings_dict.get("s_args") is not None and settings_dict.get("t_args") is not None:
s_settings, t_settings = settings_dict.get("s_args"), settings_dict.get("t_args")
elif settings_dict.get("s_args") is not None and settings_dict.get("t_args") is None:
raise ConfigurationError("s_args were provided, but you also need to provide t_args!")
elif settings_dict.get("s_args") is None and settings_dict.get("t_args") is not None:
raise ConfigurationError("t_args were provided, but you also need to provide s_args!")
else:
s_settings, t_settings = settings_dict, settings_dict
# Internal network (learnable scale and translation)
self.scale = DenseCouplingNet(s_settings, dim_out)
self.translate = DenseCouplingNet(t_settings, dim_out)
def call(self, split1, split2, condition, inverse=False, **kwargs):
"""Performs one pass through an affine coupling layer (either inverse or forward).
Parameters
----------
split1 : tf.Tensor of shape (batch_size, ..., input_dim//2)
The first partition of the input vector(s)
split2 : tf.Tensor of shape (batch_size, ..., ceil[input_dim//2])
The second partition of the input vector(s)
condition : tf.Tensor or None
The conditioning data of interest, for instance, x = summary_fun(x), shape (batch_size, ...).
If ``condition is None``, then the layer reduces to an unconditional coupling.
inverse : bool, optional, default: False
Flag indicating whether to run the block forward or backward.
Returns
-------
(z, log_det_J) : tuple(tf.Tensor, tf.Tensor)
If inverse=False: The transformed input and the corresponding Jacobian of the transformation,
z shape: (batch_size, ..., input_dim//2), log_det_J shape: (batch_size, ...)
target : tf.Tensor
If inverse=True: The back-transformed z, shape (batch_size, ..., inp_dim//2)
"""
if not inverse:
return self._forward(split1, split2, condition, **kwargs)
return self._inverse(split1, split2, condition, **kwargs)
def _forward(self, u1, u2, condition, **kwargs):
"""Performs a forward pass through the coupling layer. Used internally by the instance.
Parameters
----------
v1 : tf.Tensor of shape (batch_size, ..., dim_1)
The first partition of the input
v2 : tf.Tensor of shape (batch_size, ..., dim_2)
The second partition of the input
condition : tf.Tensor of shape (batch_size, ..., dim_condition) or None
The optional conditioning vector. Batch size must match the batch size
of the partitions.
Returns
-------
(v, log_det_J) : tuple(tf.Tensor, tf.Tensor)
The transformed input and the corresponding Jacobian of the transformation.
"""
s = self.scale(u2, condition, **kwargs)
if self.soft_clamp is not None:
s = (2.0 * self.soft_clamp / PI_CONST) * tf.math.atan(s / self.soft_clamp)
t = self.translate(u2, condition, **kwargs)
v = u1 * tf.math.exp(s) + t
log_det_J = tf.reduce_sum(s, axis=-1)
return v, log_det_J
def _inverse(self, v1, v2, condition, **kwargs):
"""Performs an inverse pass through the affine coupling block. Used internally by the instance.
Parameters
----------
v1 : tf.Tensor of shape (batch_size, ..., dim_1)
The first partition of the latent vector
v2 : tf.Tensor of shape (batch_size, ..., dim_2)
The second partition of the latent vector
condition : tf.Tensor of shape (batch_size, ..., dim_condition)
The optional conditioning vector. Batch size must match the batch size
of the partitions.
Returns
-------
u : tf.Tensor of shape (batch_size, ..., dim_1)
The back-transformed input.
"""
s = self.scale(v1, condition, **kwargs)
if self.soft_clamp is not None:
s = (2.0 * self.soft_clamp / PI_CONST) * tf.math.atan(s / self.soft_clamp)
t = self.translate(v1, condition, **kwargs)
u = (v2 - t) * tf.math.exp(-s)
return u
class SplineCoupling(tf.keras.Model):
"""Implements a conditional spline coupling block according to [1, 2], with additional
options, such as residual blocks or Monte Carlo Dropout.
[1] Durkan, C., Bekasov, A., Murray, I., & Papamakarios, G. (2019).
Neural spline flows. Advances in Neural Information Processing Systems, 32.
[2] Ardizzone, L., Lüth, C., Kruse, J., Rother, C., & Köthe, U. (2019).
Guided image generation with conditional invertible neural networks.
arXiv preprint arXiv:1907.02392.
Implement only rational quadratic splines (RQS), since these appear to work
best in practice and lead to stable training.
"""
def __init__(self, dim_out, settings_dict, **kwargs):
"""Creates one half of a spline coupling layer to be used as part of a ``CouplingLayer`` in
an ``InvertibleNetwork`` instance.
Parameters
----------
dim_out : int
The output dimensionality of the coupling layer.
settings_dict : dict
The settings for the inner networks. Defaults will use:
``settings_dict={
"dense_args" : dict(units=128, activation="relu"),
"num_dense" : 2,
"spec_norm" : False,
"mc_dropout" : False,
"dropout" : True,
"residual" : False,
"dropout_prob" : 0.05,
"bins" : 16,
"default_domain" : (-5., 5., -5., 5.)
}
``
"""
super().__init__(**kwargs)
self.dim_out = dim_out
self.bins = settings_dict["bins"]
self.default_domain = settings_dict["default_domain"]
self.spline_params_counts = {
"left_edge": 1,
"bottom_edge": 1,
"widths": self.bins,
"heights": self.bins,
"derivatives": self.bins - 1,
}
self.num_total_spline_params = sum(self.spline_params_counts.values()) * self.dim_out
# Internal network (learnable spline parameters)
self.net = DenseCouplingNet(settings_dict, self.num_total_spline_params)
def call(self, split1, split2, condition, inverse=False, **kwargs):
"""Performs one pass through a spline coupling layer (either inverse or forward).
Parameters
----------
split1 : tf.Tensor of shape (batch_size, ..., input_dim//2)
The first partition of the input vector(s)
split2 : tf.Tensor of shape (batch_size, ..., ceil[input_dim//2])
The second partition of the input vector(s)
condition : tf.Tensor or None
The conditioning data of interest, for instance, x = summary_fun(x), shape (batch_size, ...).
If ``condition is None``, then the layer recuces to an unconditional coupling.
inverse : bool, optional, default: False
Flag indicating whether to run the block forward or backward.
Returns
-------
(z, log_det_J) : tuple(tf.Tensor, tf.Tensor)
If inverse=False: The transformed input and the corresponding Jacobian of the transformation,
z shape: (batch_size, ..., input_dim//2), log_det_J shape: (batch_size, ...)
target : tf.Tensor
If inverse=True: The back-transformed z, shape (batch_size, ..., inp_dim//2)
"""
if not inverse:
return self._forward(split1, split2, condition, **kwargs)
return self._inverse(split1, split2, condition, **kwargs)
def _forward(self, u1, u2, condition, **kwargs):
"""Performs a forward pass through the spline coupling layer. Used internally by the instance.
Parameters
----------
v1 : tf.Tensor of shape (batch_size, ..., dim_1)
The first partition of the input
v2 : tf.Tensor of shape (batch_size, ..., dim_2)
The second partition of the input
condition : tf.Tensor of shape (batch_size, ..., dim_condition) or None
The optional conditioning vector. Batch size must match the batch size
of the partitions.
Returns
-------
(v, log_det_J) : tuple(tf.Tensor, tf.Tensor)
The transformed input and the corresponding Jacobian of the transformation.
"""
spline_params = self.net(u2, condition, **kwargs)
spline_params = self._semantic_spline_parameters(spline_params)
spline_params = self._constrain_parameters(spline_params)
v, log_det_J = self._calculate_spline(u1, spline_params, inverse=False)
return v, log_det_J
def _inverse(self, v1, v2, condition, **kwargs):
"""Performs an inverse pass through the coupling block. Used internally by the instance.
Parameters
----------
v1 : tf.Tensor of shape (batch_size, ..., dim_1)
The first partition of the latent vector
v2 : tf.Tensor of shape (batch_size, ..., dim_2)
The second partition of the latent vector
condition : tf.Tensor of shape (batch_size, ..., dim_condition)
The optional conditioning vector. Batch size must match the batch size
of the partitions.
Returns
-------
u : tf.Tensor of shape (batch_size, ..., dim_1)
The back-transformed input.
"""
spline_params = self.net(v1, condition, **kwargs)
spline_params = self._semantic_spline_parameters(spline_params)
spline_params = self._constrain_parameters(spline_params)
u = self._calculate_spline(v2, spline_params, inverse=True)
return u
def _calculate_spline(self, target, spline_params, inverse=False):
"""Computes both directions of a rational quadratic spline (RQS) as in:
https://github.com/vislearn/FrEIA/blob/master/FrEIA/modules/splines/rational_quadratic.py
At this point, ``spline_params`` represents a tuple with the parameters of the RQS learned
by the internal neural network (given optional conditional information).
Parameters
----------
target : tf.Tensor of shape (batch_size, ..., dim_2)
The target partition of the input vector to transform.
spline_params : tuple(tf.Tensor,...)
A tuple with tensors corresponding to the learnbale spline features:
(left_edge, bottom_edge, widths, heights, derivatives)
inverse : bool, optional, default: False
Flag indicating whether to run the block forward or backward.
Returns
-------
(result, log_det_J) : tuple(tf.Tensor, tf.Tensor)
If inverse=False: The transformed input and the corresponding Jacobian of the transformation,
result shape: (batch_size, ..., dim_2), log_det_J shape: (batch_size, ...)
result : tf.Tensor
If inverse=True: The back-transformed latent, shape (batch_size, ..., dim_2)
"""
# Extract all learnable parameters
left_edge, bottom_edge, widths, heights, derivatives = spline_params
# Placeholders for results
result = tf.zeros_like(target)
log_jac = tf.zeros_like(target)
total_width = tf.reduce_sum(widths, axis=-1, keepdims=True)
total_height = tf.reduce_sum(heights, axis=-1, keepdims=True)
knots_x = tf.concat([left_edge, left_edge + tf.math.cumsum(widths, axis=-1)], axis=-1)
knots_y = tf.concat([bottom_edge, bottom_edge + tf.math.cumsum(heights, axis=-1)], axis=-1)
# Determine which targets are in domain and which are not
if not inverse:
target_in_domain = tf.logical_and(knots_x[..., 0] < target, target <= knots_x[..., -1])
higher_indices = tf.searchsorted(knots_x, target[..., None])
else:
target_in_domain = tf.logical_and(knots_y[..., 0] < target, target <= knots_y[..., -1])
higher_indices = tf.searchsorted(knots_y, target[..., None])
target_in = target[target_in_domain]
target_in_idx = tf.where(target_in_domain)
target_out = target[~target_in_domain]
target_out_idx = tf.where(~target_in_domain)
# In-domain computation
if tf.size(target_in_idx) > 0:
# Index crunching
higher_indices = tf.gather_nd(higher_indices, target_in_idx)
higher_indices = tf.cast(higher_indices, tf.int32)
lower_indices = higher_indices - 1
lower_idx_tuples = tf.concat([tf.cast(target_in_idx, tf.int32), lower_indices], axis=-1)
higher_idx_tuples = tf.concat([tf.cast(target_in_idx, tf.int32), higher_indices], axis=-1)
# Spline computation
dk = tf.gather_nd(derivatives, lower_idx_tuples)
dkp = tf.gather_nd(derivatives, higher_idx_tuples)
xk = tf.gather_nd(knots_x, lower_idx_tuples)
xkp = tf.gather_nd(knots_x, higher_idx_tuples)
yk = tf.gather_nd(knots_y, lower_idx_tuples)
ykp = tf.gather_nd(knots_y, higher_idx_tuples)
x = target_in
dx = xkp - xk
dy = ykp - yk
sk = dy / dx
xi = (x - xk) / dx
# Forward pass
if not inverse:
numerator = dy * (sk * xi**2 + dk * xi * (1 - xi))
denominator = sk + (dkp + dk - 2 * sk) * xi * (1 - xi)
result_in = yk + numerator / denominator
# Log Jacobian for in-domain
numerator = sk**2 * (dkp * xi**2 + 2 * sk * xi * (1 - xi) + dk * (1 - xi) ** 2)
denominator = (sk + (dkp + dk - 2 * sk) * xi * (1 - xi)) ** 2
log_jac_in = tf.math.log(numerator + 1e-10) - tf.math.log(denominator + 1e-10)
log_jac = tf.tensor_scatter_nd_update(log_jac, target_in_idx, log_jac_in)
# Inverse pass
else:
y = x
a = dy * (sk - dk) + (y - yk) * (dkp + dk - 2 * sk)
b = dy * dk - (y - yk) * (dkp + dk - 2 * sk)
c = -sk * (y - yk)
discriminant = tf.maximum(b**2 - 4 * a * c, 0.0)
xi = 2 * c / (-b - tf.math.sqrt(discriminant))
result_in = xi * dx + xk
result = tf.tensor_scatter_nd_update(result, target_in_idx, result_in)
# Out-of-domain
if tf.size(target_out_idx) > 1:
scale = total_height / total_width
shift = bottom_edge - scale * left_edge
scale_out = tf.gather_nd(scale, target_out_idx)
shift_out = tf.gather_nd(shift, target_out_idx)
if not inverse:
result_out = scale_out * target_out[..., None] + shift_out
# Log Jacobian for out-of-domain points
log_jac_out = tf.math.log(scale_out + 1e-10)
log_jac_out = tf.squeeze(log_jac_out, axis=-1)
log_jac = tf.tensor_scatter_nd_update(log_jac, target_out_idx, log_jac_out)
else:
result_out = (target_out[..., None] - shift_out) / scale_out
result_out = tf.squeeze(result_out, axis=-1)
result = tf.tensor_scatter_nd_update(result, target_out_idx, result_out)
if not inverse:
return result, tf.reduce_sum(log_jac, axis=-1)
return result
def _semantic_spline_parameters(self, parameters):
"""Builds a tuple of tensors from the output of the coupling net.
Parameters
----------
parameters : tf.Tensor of shape (batch_size, ..., num_spline_parameters)
All learnable spline parameters packed in a single tensor, which will be
partitioned according to the role of each spline parameter.
Returns
-------
parameters : tuple(tf.Tensor, tf.Tensor, tf.Tensor, tf.Tensor, tf.Tensor)
The partitioned spline parameters according to their role in the spline computation.
"""
shape = tf.shape(parameters)
if len(shape) == 2:
new_shape = (shape[0], self.dim_out, -1)
elif len(shape) == 3:
new_shape = (shape[0], shape[1], self.dim_out, -1)
else:
raise NotImplementedError("Spline flows can currently only operate on 2D and 3D inputs!")
parameters = tf.reshape(parameters, new_shape)
parameters = tf.split(parameters, list(self.spline_params_counts.values()), axis=-1)
return parameters
def _constrain_parameters(self, parameters):
"""Takes care of zero spline parameters due to zeros kernel initializer and
applies parameter constraints for stability.
Parameters
----------
parameters : tuple(tf.Tensor, tf.Tensor, tf.Tensor, tf.Tensor, tf.Tensor)
The unconstrained spline parameters.
Returns
-------
parameters : tuple(tf.Tensor, tf.Tensor, tf.Tensor, tf.Tensor, tf.Tensor)
The constrained spline parameters.
"""
left_edge, bottom_edge, widths, heights, derivatives = parameters
# Set lower corners of domain relative to default domain
left_edge = left_edge + self.default_domain[0]
bottom_edge = bottom_edge + self.default_domain[2]
# Compute default widths and heights
default_width = (self.default_domain[1] - self.default_domain[0]) / self.bins
default_height = (self.default_domain[3] - self.default_domain[2]) / self.bins
# Compute shifts for softplus function
xshift = tf.math.log(tf.math.exp(default_width) - 1)
yshift = tf.math.log(tf.math.exp(default_height) - 1)
# Constrain widths and heights to be positive
widths = tf.math.softplus(widths + xshift)
heights = tf.math.softplus(heights + yshift)
# Compute spline derivatives
shift = tf.math.log(EULER_CONST - 1.0)
derivatives = tf.nn.softplus(derivatives + shift)
# Add in edge derivatives
total_height = tf.reduce_sum(heights, axis=-1, keepdims=True)
total_width = tf.reduce_sum(widths, axis=-1, keepdims=True)
scale = total_height / total_width
derivatives = tf.concat([scale, derivatives, scale], axis=-1)
return left_edge, bottom_edge, widths, heights, derivatives
class CouplingLayer(tf.keras.Model):
"""General wrapper for a coupling layer (either affine or spline) with different settings."""
def __init__(
self,
latent_dim,
coupling_settings=None,
coupling_design="affine",
permutation="fixed",
use_act_norm=True,
act_norm_init=None,
**kwargs,
):
"""Creates an invertible coupling layers instance with the provided hyperparameters.
Parameters
----------
latent_dim : int
The dimensionality of the latent space (equal to the dimensionality of the target variable)
coupling_settings : dict or None, optional, default: None
The coupling network settings to pass to the internal coupling layers. See ``default_settings``
for the required entries.
coupling_design : str or callable, optional, default: 'affine'
The type of internal coupling network to use. Must be in ['affine', 'spline'].
In general, spline couplings run slower than affine couplings, but require fewers coupling
layers. Spline couplings may work best with complex (e.g., multimodal) low-dimensional
problems. The difference will become less and less pronounced as we move to higher dimensions.
permutation : str or None, optional, default: 'fixed'
Whether to use permutations between coupling layers. Highly recommended if ``num_coupling_layers > 1``
Important: Must be in ['fixed', 'learnable', None]
use_act_norm : bool, optional, default: True
Whether to use activation normalization after each coupling layer. Recommended to keep default.
act_norm_init : np.ndarray of shape (num_simulations, num_params) or None, optional, default: None
Optional data-dependent initialization for the internal ``ActNorm`` layers.
**kwargs : dict
Optional keyword arguments (e.g., name) passed to the tf.keras.Model __init__ method.
"""
super().__init__(**kwargs)
# Set dimensionality attributes
self.latent_dim = latent_dim
self.dim_out1 = self.latent_dim // 2
self.dim_out2 = self.latent_dim // 2 if self.latent_dim % 2 == 0 else self.latent_dim // 2 + 1
# Determine coupling net settings
if coupling_settings is None:
user_dict = dict()
elif isinstance(coupling_settings, dict):
user_dict = coupling_settings
else:
raise ConfigurationError("coupling_net_settings argument must be None or a dict!")
# Determine type of coupling (affine or spline) and build settings
if coupling_design == "affine":
coupling_type = AffineCoupling
coupling_settings = build_meta_dict(
user_dict=user_dict, default_setting=default_settings.DEFAULT_SETTING_AFFINE_COUPLING
)
elif coupling_design == "spline":
coupling_type = SplineCoupling
coupling_settings = build_meta_dict(
user_dict=user_dict, default_setting=default_settings.DEFAULT_SETTING_SPLINE_COUPLING
)
else:
raise NotImplementedError('coupling_design must be in ["affine", "spline"]')
# Two-in-one coupling block (i.e., no inactive part after a forward pass)
self.net1 = coupling_type(self.dim_out1, coupling_settings)
self.net2 = coupling_type(self.dim_out2, coupling_settings)
# Optional (learnable or fixed) permutation
if permutation not in ["fixed", "learnable", None]:
raise ConfigurationError('Argument permutation should be in ["fixed", "learnable", None]')
if permutation == "fixed":
self.permutation = Permutation(self.latent_dim)
self.permutation.trainable = False
elif permutation == "learnable":
self.permutation = Orthogonal(self.latent_dim)
else:
self.permutation = None
# Optional learnable activation normalization
if use_act_norm:
self.act_norm = ActNorm(latent_dim, act_norm_init)
else:
self.act_norm = None
def call(self, target_or_z, condition, inverse=False, **kwargs):
"""Performs one pass through a the affine coupling layer (either inverse or forward).
Parameters
----------
target_or_z : tf.Tensor
The estimation quantites of interest or latent representations z ~ p(z), shape (batch_size, ...)
condition : tf.Tensor or None
The conditioning data of interest, for instance, x = summary_fun(x), shape (batch_size, ...).
If `condition is None`, then the layer recuces to an unconditional ACL.
inverse : bool, optional, default: False
Flag indicating whether to run the block forward or backward.
Returns
-------
(z, log_det_J) : tuple(tf.Tensor, tf.Tensor)
If inverse=False: The transformed input and the corresponding Jacobian of the transformation,
z shape: (batch_size, inp_dim), log_det_J shape: (batch_size, )
target : tf.Tensor
If inverse=True: The back-transformed z, shape (batch_size, inp_dim)
Notes
-----
If ``inverse=False``, the return is ``(z, log_det_J)``.\n
If ``inverse=True``, the return is ``target``
"""
if not inverse:
return self.forward(target_or_z, condition, **kwargs)
return self.inverse(target_or_z, condition, **kwargs)
def forward(self, target, condition, **kwargs):
"""Performs a forward pass through a coupling layer with an optinal `Permutation` and `ActNorm` layers.
Parameters
----------
target : tf.Tensor
The estimation quantities of interest, for instance, parameter vector of shape (batch_size, theta_dim)
condition : tf.Tensor or None
The conditioning vector of interest, for instance, x = summary(x), shape (batch_size, summary_dim)
If `None`, transformation amounts to unconditional estimation.
Returns
-------
(z, log_det_J) : tuple(tf.Tensor, tf.Tensor)
The transformed input and the corresponding Jacobian of the transformation.
"""
# Initialize log_det_Js accumulator
log_det_Js = tf.zeros(1)
# Normalize activation, if specified
if self.act_norm is not None:
target, log_det_J_act = self.act_norm(target)
log_det_Js += log_det_J_act
# Permute, if indicated
if self.permutation is not None:
target = self.permutation(target)
if self.permutation.trainable:
target, log_det_J_p = target
log_det_Js += log_det_J_p
# Pass through coupling layer
latent, log_det_J_c = self._forward(target, condition, **kwargs)
log_det_Js += log_det_J_c
return latent, log_det_Js
def inverse(self, latent, condition, **kwargs):
"""Performs an inverse pass through a coupling layer with an optinal `Permutation` and `ActNorm` layers.
Parameters
----------
z : tf.Tensor
latent variables z ~ p(z), shape (batch_size, theta_dim)
condition : tf.Tensor or None
The conditioning vector of interest, for instance, x = summary(x), shape (batch_size, summary_dim).
If `None`, transformation amounts to unconditional estimation.
Returns
-------
target : tf.Tensor
The back-transformed latent variable z.
"""
target = self._inverse(latent, condition, **kwargs)
if self.permutation is not None:
target = self.permutation(target, inverse=True)
if self.act_norm is not None:
target = self.act_norm(target, inverse=True)
return target
def _forward(self, target, condition, **kwargs):
"""Performs a forward pass through the coupling layer. Used internally by the instance.
Parameters
----------
target : tf.Tensor
The estimation quantities of interest, for instance, parameter vector of shape (batch_size, theta_dim)
condition : tf.Tensor or None
The conditioning vector of interest, for instance, x = summary(x), shape (batch_size, summary_dim)
If `None`, transformation amounts to unconditional estimation.
Returns
-------
(v, log_det_J) : tuple(tf.Tensor, tf.Tensor)
The transformed input and the corresponding Jacobian of the transformation.
"""
# Split input along last axis and perform forward coupling
u1, u2 = tf.split(target, [self.dim_out1, self.dim_out2], axis=-1)
v1, log_det_J1 = self.net1(u1, u2, condition, inverse=False, **kwargs)
v2, log_det_J2 = self.net2(u2, v1, condition, inverse=False, **kwargs)
v = tf.concat((v1, v2), axis=-1)
# Compute log determinat of the Jacobians from both splits
log_det_J = log_det_J1 + log_det_J2
return v, log_det_J
def _inverse(self, latent, condition, **kwargs):
"""Performs an inverse pass through the coupling block. Used internally by the instance.
Parameters
----------
latent : tf.Tensor
latent variables z ~ p(z), shape (batch_size, theta_dim)
condition : tf.Tensor or None
The conditioning vector of interest, for instance, x = summary(x), shape (batch_size, summary_dim).
If `None`, transformation amounts to unconditional estimation.
Returns
-------
u : tf.Tensor
The back-transformed input.
"""
# Split input along last axis and perform inverse coupling
v1, v2 = tf.split(latent, [self.dim_out1, self.dim_out2], axis=-1)
u2 = self.net2(v1, v2, condition, inverse=True, **kwargs)
u1 = self.net1(u2, v1, condition, inverse=True, **kwargs)
u = tf.concat((u1, u2), axis=-1)
return u
| 32,182 | 43.390345 | 114 | py |
BayesFlow | BayesFlow-master/bayesflow/default_settings.py | # Copyright (c) 2022 The BayesFlow Developers
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from abc import ABC, abstractmethod
import tensorflow as tf
class Setting(ABC):
"""Abstract base class for settings. It's here to potentially extend the setting functionality in future."""
@abstractmethod
def __init__(self):
""""""
pass
class MetaDictSetting(Setting):
"""Implements an interface for a default meta_dict with optional mandatory fields."""
def __init__(self, meta_dict: dict, mandatory_fields: list = []):
"""
Parameters
----------
meta_dict : dict
Default dictionary.
mandatory_fields : list, default: []
List of keys in `meta_dict` that need to be provided by the user.
"""
self.meta_dict = meta_dict
self.mandatory_fields = mandatory_fields
DEFAULT_SETTING_INVARIANT_NET = MetaDictSetting(
meta_dict={
"num_dense_s1": 2,
"num_dense_s2": 2,
"num_dense_s3": 2,
"num_equiv": 2,
"pooling_fun": "mean",
"dense_s1_args": None,
"dense_s2_args": None,
"dense_s3_args": None,
"summary_dim": 10,
},
mandatory_fields=[],
)
DEFAULT_SETTING_MULTI_CONV = {
"layer_args": {"activation": "relu", "filters": 32, "strides": 1, "padding": "causal"},
"min_kernel_size": 1,
"max_kernel_size": 3,
}
DEFAULT_SETTING_DENSE_INVARIANT = {"units": 64, "activation": "relu", "kernel_initializer": "glorot_uniform"}
DEFAULT_SETTING_DENSE_RECT = {"units": 256, "activation": "swish", "kernel_initializer": "glorot_uniform"}
DEFAULT_SETTING_DENSE_ATTENTION = {"units": 64, "activation": "relu", "kernel_initializer": "glorot_uniform"}
DEFAULT_SETTING_DENSE_EVIDENTIAL = {
"units": 64,
"kernel_initializer": "glorot_uniform",
"activation": "elu",
}
DEFAULT_SETTING_DENSE_PMP = {
"units": 64,
"kernel_initializer": "glorot_uniform",
"activation": "elu",
}
DEFAULT_SETTING_AFFINE_COUPLING = MetaDictSetting(
meta_dict={
"dense_args": dict(units=128, activation="relu", kernel_regularizer=tf.keras.regularizers.l2(5e-4)),
"num_dense": 2,
"spec_norm": False,
"mc_dropout": False,
"dropout": True,
"residual": False,
"dropout_prob": 0.01,
"soft_clamping": 1.9,
},
mandatory_fields=[],
)
DEFAULT_SETTING_SPLINE_COUPLING = MetaDictSetting(
meta_dict={
"dense_args": dict(units=128, activation="relu", kernel_regularizer=tf.keras.regularizers.l2(1e-3)),
"num_dense": 2,
"spec_norm": False,
"mc_dropout": False,
"dropout": True,
"residual": False,
"dropout_prob": 0.05,
"bins": 16,
"default_domain": (-5.0, 5.0, -5.0, 5.0),
},
mandatory_fields=[],
)
DEFAULT_SETTING_ATTENTION = {"key_dim": 32, "num_heads": 4, "dropout": 0.01}
DEFAULT_SETTING_INVERTIBLE_NET = MetaDictSetting(
meta_dict={
"num_coupling_layers": 5,
"coupling_net_settings": None,
"coupling_design": "affine",
"permutation": "fixed",
"use_act_norm": True,
"act_norm_init": None,
"use_soft_flow": False,
"soft_flow_bounds": (1e-3, 5e-2),
},
mandatory_fields=["num_params"],
)
DEFAULT_SETTING_EVIDENTIAL_NET = MetaDictSetting(
meta_dict={
"dense_args": dict(units=128, activation="relu"),
"num_dense": 3,
"output_activation": "softplus",
},
mandatory_fields=["num_models"],
)
DEFAULT_SETTING_PMP_NET = MetaDictSetting(
meta_dict={
"dense_args": dict(units=64, activation="relu"),
"num_dense": 3,
"output_activation": "softmax",
},
mandatory_fields=["num_models"],
)
OPTIMIZER_DEFAULTS = {"global_clipnorm": 1.0}
DEFAULT_KEYS = {
"prior_draws": "prior_draws",
"obs_data": "obs_data",
"sim_data": "sim_data",
"batchable_context": "batchable_context",
"non_batchable_context": "non_batchable_context",
"prior_batchable_context": "prior_batchable_context",
"prior_non_batchable_context": "prior_non_batchable_context",
"prior_context": "prior_context",
"hyper_prior_draws": "hyper_prior_draws",
"shared_prior_draws": "shared_prior_draws",
"local_prior_draws": "local_prior_draws",
"sim_batchable_context": "sim_batchable_context",
"sim_non_batchable_context": "sim_non_batchable_context",
"summary_conditions": "summary_conditions",
"direct_conditions": "direct_conditions",
"parameters": "parameters",
"hyper_parameters": "hyper_parameters",
"shared_parameters": "shared_parameters",
"local_parameters": "local_parameters",
"observables": "observables",
"targets": "targets",
"conditions": "conditions",
"posterior_inputs": "posterior_inputs",
"likelihood_inputs": "likelihood_inputs",
"model_outputs": "model_outputs",
"model_indices": "model_indices",
}
MMD_BANDWIDTH_LIST = [1e-6, 1e-5, 1e-4, 1e-3, 1e-2, 1e-1, 1, 5, 10, 15, 20, 25, 30, 35, 100, 1e3, 1e4, 1e5, 1e6]
| 6,111 | 29.257426 | 112 | py |
BayesFlow | BayesFlow-master/bayesflow/configuration.py | # Copyright (c) 2022 The BayesFlow Developers
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import numpy as np
from tensorflow.keras.utils import to_categorical
from bayesflow.default_settings import DEFAULT_KEYS
from bayesflow.exceptions import ConfigurationError
class DefaultJointConfigurator:
"""Fallback class for a generic configrator for joint posterior and likelihood approximation."""
def __init__(self, default_float_type=np.float32):
self.posterior_config = DefaultPosteriorConfigurator(default_float_type=default_float_type)
self.likelihood_config = DefaultLikelihoodConfigurator(default_float_type=default_float_type)
self.default_float_type = default_float_type
def __call__(self, forward_dict):
"""Configures the outputs of a generative model for joint learning."""
input_dict = {}
input_dict[DEFAULT_KEYS["posterior_inputs"]] = self.posterior_config(forward_dict)
input_dict[DEFAULT_KEYS["likelihood_inputs"]] = self.likelihood_config(forward_dict)
return input_dict
class DefaultLikelihoodConfigurator:
"""Fallback class for a generic configrator for amortized likelihood approximation."""
def __init__(self, default_float_type=np.float32):
self.default_float_type = default_float_type
def __call__(self, forward_dict):
"""Configures the output of a generative model for likelihood estimation."""
# Attempt to combine inputs
input_dict = self._combine(forward_dict)
# Convert everything to default type or fail gently
input_dict = {k: v.astype(self.default_float_type) if v is not None else v for k, v in input_dict.items()}
return input_dict
def _combine(self, forward_dict):
"""Default combination for entries in forward_dict."""
out_dict = {DEFAULT_KEYS["observables"]: None, DEFAULT_KEYS["conditions"]: None}
# Determine whether simulated or observed data available, throw if None present
if forward_dict.get(DEFAULT_KEYS["sim_data"]) is None and forward_dict.get(DEFAULT_KEYS["obs_data"]) is None:
raise ConfigurationError(
f"Either {DEFAULT_KEYS['sim_data']} or {DEFAULT_KEYS['obs_data']}"
+ " should be present as keys in the forward_dict."
)
# If only simulated or observed data present, all good
elif forward_dict.get(DEFAULT_KEYS["sim_data"]) is not None:
data = forward_dict.get(DEFAULT_KEYS["sim_data"])
elif forward_dict.get(DEFAULT_KEYS["obs_data"]) is not None:
data = forward_dict.get(DEFAULT_KEYS["obs_data"])
# Else if neither 'sim_data' nor 'obs_data' present, throw again
else:
raise ConfigurationError(
f"Either {DEFAULT_KEYS['sim_data']} or {DEFAULT_KEYS['obs_data']}"
+ " should be present as keys in the forward_dict."
)
# Extract targets and conditions
out_dict[DEFAULT_KEYS["observables"]] = data
out_dict[DEFAULT_KEYS["conditions"]] = forward_dict[DEFAULT_KEYS["prior_draws"]]
return out_dict
class DefaultCombiner:
"""Fallback class for a generic combiner of conditions."""
def __call__(self, forward_dict):
"""Converts all condition-related variables or fails."""
out_dict = {
DEFAULT_KEYS["summary_conditions"]: None,
DEFAULT_KEYS["direct_conditions"]: None,
}
# Determine whether simulated or observed data available, throw if None present
if forward_dict.get(DEFAULT_KEYS["sim_data"]) is None and forward_dict.get(DEFAULT_KEYS["obs_data"]) is None:
raise ConfigurationError(
f"Either {DEFAULT_KEYS['sim_data']} or {DEFAULT_KEYS['obs_data']}"
+ " should be present as keys in the forward_dict, but not both!"
)
# If only simulated or observed data present, all good
elif forward_dict.get(DEFAULT_KEYS["sim_data"]) is not None:
data = forward_dict.get(DEFAULT_KEYS["sim_data"])
elif forward_dict.get(DEFAULT_KEYS["obs_data"]) is not None:
data = forward_dict.get(DEFAULT_KEYS["obs_data"])
# Else if neither 'sim_data' nor 'obs_data' present, throw again
else:
raise ConfigurationError(
f"Either {DEFAULT_KEYS['sim_data']} or {DEFAULT_KEYS['obs_data']}"
+ " should be present as keys in the forward_dict."
)
# Handle simulated or observed data or throw if the data could not be converted to an array
try:
if type(data) is not np.ndarray:
summary_conditions = np.array(data)
else:
summary_conditions = data
except Exception as _:
raise ConfigurationError("Could not convert input data to array...")
# Handle prior batchable context or throw if error encountered
if forward_dict.get(DEFAULT_KEYS["prior_batchable_context"]) is not None:
try:
if type(forward_dict[DEFAULT_KEYS["prior_batchable_context"]]) is not np.ndarray:
pbc_as_array = np.array(forward_dict[DEFAULT_KEYS["prior_batchable_context"]])
else:
pbc_as_array = forward_dict[DEFAULT_KEYS["prior_batchable_context"]]
except Exception as _:
raise ConfigurationError("Could not convert prior batchable context to array.")
try:
summary_conditions = np.concatenate([summary_conditions, pbc_as_array], axis=-1)
except Exception as _:
raise ConfigurationError(
f"Could not concatenate data and prior batchable context. Shape mismatch: "
+ "data - {summary_conditions.shape}, prior_batchable_context - {pbc_as_array.shape}."
)
# Handle simulation batchable context, or throw if error encountered
if forward_dict.get(DEFAULT_KEYS["sim_batchable_context"]) is not None:
try:
if type(forward_dict[DEFAULT_KEYS["sim_batchable_context"]]) is not np.ndarray:
sbc_as_array = np.array(forward_dict[DEFAULT_KEYS["sim_batchable_context"]])
else:
sbc_as_array = forward_dict[DEFAULT_KEYS["sim_batchable_context"]]
except Exception as _:
raise ConfigurationError("Could not convert simulation batchable context to array!")
try:
summary_conditions = np.concatenate([summary_conditions, sbc_as_array], axis=-1)
except Exception as _:
raise ConfigurationError(
f"Could not concatenate data (+optional prior context) and"
+ f" simulation batchable context. Shape mismatch:"
+ f" data - {summary_conditions.shape}, prior_batchable_context - {sbc_as_array.shape}"
)
# Add summary conditions to output dict
out_dict[DEFAULT_KEYS["summary_conditions"]] = summary_conditions
# Handle non-batchable contexts
if (
forward_dict.get(DEFAULT_KEYS["prior_non_batchable_context"]) is None
and forward_dict.get(DEFAULT_KEYS["sim_non_batchable_context"]) is None
):
return out_dict
# Handle prior non-batchable context
direct_conditions = None
if forward_dict.get(DEFAULT_KEYS["prior_non_batchable_context"]) is not None:
try:
if type(forward_dict[DEFAULT_KEYS["prior_non_batchable_context"]]) is not np.ndarray:
pnbc_conditions = np.array(forward_dict[DEFAULT_KEYS["prior_non_batchable_context"]])
else:
pnbc_conditions = forward_dict[DEFAULT_KEYS["prior_non_batchable_context"]]
except Exception as _:
raise ConfigurationError("Could not convert prior non_batchable_context to an array!")
direct_conditions = pnbc_conditions
# Handle simulation non-batchable context
if forward_dict.get(DEFAULT_KEYS["sim_non_batchable_context"]) is not None:
try:
if type(forward_dict[DEFAULT_KEYS["sim_non_batchable_context"]]) is not np.ndarray:
snbc_conditions = np.array(forward_dict[DEFAULT_KEYS["sim_non_batchable_context"]])
else:
snbc_conditions = forward_dict[DEFAULT_KEYS["sim_non_batchable_context"]]
except Exception as _:
raise ConfigurationError("Could not convert sim_non_batchable_context to array!")
try:
if direct_conditions is not None:
direct_conditions = np.concatenate([direct_conditions, snbc_conditions], axis=-1)
else:
direct_conditions = snbc_conditions
except Exception as _:
raise ConfigurationError(
f"Could not concatenate prior non-batchable context and \
simulation non-batchable context. Shape mismatch: \
- {direct_conditions.shape} vs. {snbc_conditions.shape}"
)
out_dict[DEFAULT_KEYS["direct_conditions"]] = direct_conditions
return out_dict
class DefaultPosteriorConfigurator:
"""Fallback class for a generic configrator for amortized posterior approximation."""
def __init__(self, default_float_type=np.float32):
self.default_float_type = default_float_type
self.combiner = DefaultCombiner()
def __call__(self, forward_dict):
"""Processes the forward dict to configure the input to an amortizer."""
# Combine inputs (conditionals)
input_dict = self.combiner(forward_dict)
input_dict[DEFAULT_KEYS["parameters"]] = forward_dict[DEFAULT_KEYS["prior_draws"]]
# Convert everything to default type or fail gently
input_dict = {k: v.astype(self.default_float_type) if v is not None else v for k, v in input_dict.items()}
return input_dict
class DefaultModelComparisonConfigurator:
"""Fallback class for a default configurator for amortized model comparison."""
def __init__(self, num_models, combiner=None, default_float_type=np.float32):
self.num_models = num_models
if combiner is None:
self.combiner = DefaultCombiner()
else:
self.combiner = combiner
self.default_float_type = default_float_type
def __call__(self, forward_dict):
"""Convert all variables to arrays and combines them for inference into a dictionary with
the following keys, if DEFAULT_KEYS dictionary unchanged:
`model_indices` - a list of model indices, e.g., if two models, then [0, 1]
`model_outputs` - a list of dictionaries, e.g., if two models, then [dict0, dict1]
"""
# Prepare placeholders
input_dict = {
DEFAULT_KEYS["summary_conditions"]: None,
DEFAULT_KEYS["direct_conditions"]: None,
DEFAULT_KEYS["model_indices"]: None,
}
summary_conditions = []
direct_conditions = []
model_indices = []
# Loop through outputs of individual models
for m_idx, dict_m in zip(
forward_dict[DEFAULT_KEYS["model_indices"]], forward_dict[DEFAULT_KEYS["model_outputs"]]
):
# Configure individual model outputs
conf_out = self.combiner(dict_m)
# Extract summary conditions
if conf_out.get(DEFAULT_KEYS["summary_conditions"]) is not None:
summary_conditions.append(conf_out[DEFAULT_KEYS["summary_conditions"]])
num_draws_m = conf_out[DEFAULT_KEYS["summary_conditions"]].shape[0]
# Extract direct conditions
if conf_out.get(DEFAULT_KEYS["direct_conditions"]) is not None:
direct_conditions.append(conf_out[DEFAULT_KEYS["direct_conditions"]])
num_draws_m = conf_out[DEFAULT_KEYS["direct_conditions"]].shape[0]
model_indices.append(to_categorical([m_idx] * num_draws_m, self.num_models))
# At this point, all elements of the input_dicts should be arrays with identical keys
input_dict[DEFAULT_KEYS["summary_conditions"]] = (
np.concatenate(summary_conditions) if summary_conditions else None
)
input_dict[DEFAULT_KEYS["direct_conditions"]] = np.concatenate(direct_conditions) if direct_conditions else None
input_dict[DEFAULT_KEYS["model_indices"]] = np.concatenate(model_indices)
# Convert to default types
input_dict = {k: v.astype(self.default_float_type) if v is not None else v for k, v in input_dict.items()}
return input_dict
| 13,886 | 46.234694 | 120 | py |
BayesFlow | BayesFlow-master/bayesflow/wrappers.py | # Copyright (c) 2022 The BayesFlow Developers
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import tensorflow as tf
class SpectralNormalization(tf.keras.layers.Wrapper):
"""Performs spectral normalization on neural network weights. Adapted from:
https://www.tensorflow.org/addons/api_docs/python/tfa/layers/SpectralNormalization
This wrapper controls the Lipschitz constant of a layer by
constraining its spectral norm, which can stabilize the training of generative networks.
See Spectral Normalization for Generative Adversarial Networks](https://arxiv.org/abs/1802.05957).
"""
def __init__(self, layer, power_iterations=1, **kwargs):
super(SpectralNormalization, self).__init__(layer, **kwargs)
if power_iterations <= 0:
raise ValueError(
"`power_iterations` should be greater than zero, got " "`power_iterations={}`".format(power_iterations)
)
self.power_iterations = power_iterations
self._initialized = False
def build(self, input_shape):
"""Build `Layer`"""
# Register input shape
super().build(input_shape)
# Store reference to weights
if hasattr(self.layer, "kernel"):
self.w = self.layer.kernel
elif hasattr(self.layer, "embeddings"):
self.w = self.layer.embeddings
else:
raise AttributeError(
"{} object has no attribute 'kernel' nor " "'embeddings'".format(type(self.layer).__name__)
)
self.w_shape = self.w.shape.as_list()
self.u = self.add_weight(
shape=(1, self.w_shape[-1]),
initializer=tf.initializers.TruncatedNormal(stddev=0.02),
trainable=False,
name="sn_u",
dtype=self.w.dtype,
)
def call(self, inputs, training=False):
"""Call `Layer`
Parameters
----------
inputs : tf.Tensor of shape (None,...,condition_dim + target_dim)
The inputs to the corresponding layer.
"""
if training:
self.normalize_weights()
output = self.layer(inputs)
return output
def normalize_weights(self):
"""Generate spectral normalized weights.
This method will update the value of `self.w` with the
spectral normalized value, so that the layer is ready for `call()`.
"""
w = tf.reshape(self.w, [-1, self.w_shape[-1]])
u = self.u
with tf.name_scope("spectral_normalize"):
for _ in range(self.power_iterations):
v = tf.math.l2_normalize(tf.matmul(u, w, transpose_b=True))
u = tf.math.l2_normalize(tf.matmul(v, w))
u = tf.stop_gradient(u)
v = tf.stop_gradient(v)
sigma = tf.matmul(tf.matmul(v, w), u, transpose_b=True)
self.u.assign(tf.cast(u, self.u.dtype))
self.w.assign(tf.cast(tf.reshape(self.w / sigma, self.w_shape), self.w.dtype))
def get_config(self):
config = {"power_iterations": self.power_iterations}
base_config = super().get_config()
return {**base_config, **config}
| 4,193 | 37.477064 | 119 | py |
BayesFlow | BayesFlow-master/bayesflow/amortizers.py | # Copyright (c) 2022 The BayesFlow Developers
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import logging
from abc import ABC, abstractmethod
from functools import partial
from warnings import warn
logging.basicConfig()
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
from bayesflow.default_settings import DEFAULT_KEYS
from bayesflow.exceptions import ConfigurationError, SummaryStatsError
from bayesflow.helper_functions import check_tensor_sanity
from bayesflow.losses import log_loss, mmd_summary_space
from bayesflow.networks import EvidentialNetwork
class AmortizedTarget(ABC):
"""An abstract interface for an amortized learned distribution. Children should
implement the following public methods:
1. ``compute_loss(self, input_dict, **kwargs)``
2. ``sample(input_dict, **kwargs)``
3. ``log_prob(input_dict, **kwargs)``
"""
@abstractmethod
def __init__(self, *args, **kwargs):
pass
@abstractmethod
def compute_loss(self, input_dict, **kwargs):
pass
@abstractmethod
def sample(self, input_dict, **kwargs):
pass
@abstractmethod
def log_prob(self, input_dict, **kwargs):
pass
def _check_output_sanity(self, tensor):
logger = logging.getLogger()
check_tensor_sanity(tensor, logger)
class AmortizedPosterior(tf.keras.Model, AmortizedTarget):
"""A wrapper to connect an inference network for parameter estimation with an optional summary network
as in the original BayesFlow set-up described in the paper:
[1] Radev, S. T., Mertens, U. K., Voss, A., Ardizzone, L., & Köthe, U. (2020).
BayesFlow: Learning complex stochastic models with invertible neural networks.
IEEE Transactions on Neural Networks and Learning Systems.
But also allowing for augmented functionality, such as model misspecification detection in summary space:
[2] Schmitt, M., Bürkner, P. C., Köthe, U., & Radev, S. T. (2022).
Detecting Model Misspecification in Amortized Bayesian Inference with Neural Networks
arXiv preprint arXiv:2112.08866.
And learning of fat-tailed posteriors with a Student-t latent pushforward density:
[3] Jaini, P., Kobyzev, I., Yu, Y., & Brubaker, M. (2020, November).
Tails of lipschitz triangular flows.
In International Conference on Machine Learning (pp. 4673-4681). PMLR.
[4] Alexanderson, S., & Henter, G. E. (2020).
Robust model training and generalisation with Studentising flows.
arXiv preprint arXiv:2006.06599.
Serves as in interface for learning ``p(parameters | data, context).``
"""
def __init__(
self,
inference_net,
summary_net=None,
latent_dist=None,
latent_is_dynamic=False,
summary_loss_fun=None,
**kwargs,
):
"""Initializes a composite neural network to represent an amortized approximate posterior
for a Bayesian generative model.
Parameters
----------
inference_net : tf.keras.Model
An (invertible) inference network which processes the outputs of a generative model
summary_net : tf.keras.Model or None, optional, default: None
An optional summary network to compress non-vector data structures.
latent_dist : callable or None, optional, default: None
The latent distribution towards which to optimize the networks. Defaults to
a multivariate unit Gaussian.
latent_is_dynamic : bool, optional, default: False
If set to `True`, assumes that ``latent_dist`` is a function of the condtion and takes
a different shape depending on the condition. Useful for more expressive transforms
of complex distributions, such as fat-tailed or highly-multimodal distributions.
Important: In the case of dynamic latents, the user is responsible that the
latent is appropriately parameterized! If not using ``tensorflow_probability``,
the ``latent_dist`` object needs to implement the following methods:
- ``latent_dist(x).log_prob(z)`` and
- ``latent_dist(x).sample(n_samples)``
summary_loss_fun : callable, str, or None, optional, default: None
The loss function which accepts the outputs of the summary network. If ``None``, no loss is provided
and the summary space will not be shaped according to a known distribution (see [2]).
If ``summary_loss_fun='MMD'``, the default loss from [2] will be used.
**kwargs : dict, optional, default: {}
Additional keyword arguments passed to the ``__init__`` method of a ``tf.keras.Model`` instance.
Important
----------
- If no ``summary_net`` is provided, then the output dictionary of your generative model should not contain
any ``summary_conditions``, i.e., ``summary_conditions`` should be set to ``None``, otherwise these will be ignored.
"""
tf.keras.Model.__init__(self, **kwargs)
self.inference_net = inference_net
self.summary_net = summary_net
self.latent_dim = self.inference_net.latent_dim
self.latent_is_dynamic = latent_is_dynamic
self.summary_loss = self._determine_summary_loss(summary_loss_fun)
self.latent_dist = self._determine_latent_dist(latent_dist)
def call(self, input_dict, return_summary=False, **kwargs):
"""Performs a forward pass through the summary and inference network given an input dictionary.
Parameters
----------
input_dict : dict
Input dictionary containing the following mandatory keys, if ``DEFAULT_KEYS`` unchanged:
``parameters`` - the latent model parameters over which a condition density is learned
``summary_conditions`` - the conditioning variables (including data) that are first passed through a summary network
``direct_conditions`` - the conditioning variables that the directly passed to the inference network
return_summary : bool, optional, default: False
A flag which determines whether the learnable data summaries (representations) are returned or not.
**kwargs : dict, optional, default: {}
Additional keyword arguments passed to the networks
For instance, ``kwargs={'training': True}`` is passed automatically during training.
Returns
-------
net_out or (net_out, summary_out) : tuple of tf.Tensor
the outputs of ``inference_net(theta, summary_net(x, c_s), c_d)``, usually a latent variable and
log(det(Jacobian)), that is a tuple ``(z, log_det_J) or (sum_outputs, (z, log_det_J))`` if
``return_summary`` is set to True and a summary network is defined.``
"""
# Concatenate conditions, if given
summary_out, full_cond = self._compute_summary_condition(
input_dict.get(DEFAULT_KEYS["summary_conditions"]),
input_dict.get(DEFAULT_KEYS["direct_conditions"]),
**kwargs,
)
# Compute output of inference net
net_out = self.inference_net(input_dict[DEFAULT_KEYS["parameters"]], full_cond, **kwargs)
# Return summary outputs or not, depending on parameter
if return_summary:
return net_out, summary_out
return net_out
def compute_loss(self, input_dict, **kwargs):
"""Computes the loss of the posterior amortizer given an input dictionary, which will
typically be the output of a Bayesian ``GenerativeModel`` instance.
Parameters
----------
input_dict : dict
Input dictionary containing the following mandatory keys, if ``DEFAULT_KEYS`` unchanged:
``parameters`` - the latent model parameters over which a condition density is learned
``summary_conditions`` - the conditioning variables that are first passed through a summary network
``direct_conditions`` - the conditioning variables that the directly passed to the inference network
**kwargs : dict, optional, default: {}
Additional keyword arguments passed to the networks
For instance, ``kwargs={'training': True}`` is passed automatically during training.
Returns
-------
total_loss : tf.Tensor of shape (1,) - the total computed loss given input variables
"""
# Get amortizer outputs
net_out, sum_out = self(input_dict, return_summary=True, **kwargs)
z, log_det_J = net_out
# Case summary loss should be computed
if self.summary_loss is not None:
sum_loss = self.summary_loss(sum_out)
# Case no summary loss, simply add 0 for convenience
else:
sum_loss = 0.0
# Case dynamic latent space - function of summary conditions
if self.latent_is_dynamic:
logpdf = self.latent_dist(sum_out).log_prob(z)
# Case _static latent space
else:
logpdf = self.latent_dist.log_prob(z)
# Compute and return total loss
total_loss = tf.reduce_mean(-logpdf - log_det_J) + sum_loss
return total_loss
def call_loop(self, input_list, return_summary=False, **kwargs):
"""Performs a forward pass through the summary and inference network given a list of dicts
with the appropriate entries (i.e., as used for the standard call method).
This method is useful when GPU memory is limited or data sets have a different (non-Tensor) structure.
Parameters
----------
input_list : list of dicts, where each dict contains the following mandatory keys, if ``DEFAULT_KEYS`` unchanged:
``parameters`` - the latent model parameters over which a condition density is learned
``summary_conditions`` - the conditioning variables (including data) that are first passed through a summary network
``direct_conditions`` - the conditioning variables that the directly passed to the inference network
return_summary : bool, optional, default: False
A flag which determines whether the learnable data summaries (representations) are returned or not.
**kwargs : dict, optional, default: {}
Additional keyword arguments passed to the networks
Returns
-------
net_out or (net_out, summary_out) : tuple of tf.Tensor
the outputs of ``inference_net(theta, summary_net(x, c_s), c_d)``, usually a latent variable and
log(det(Jacobian)), that is a tuple ``(z, log_det_J) or (sum_outputs, (z, log_det_J)) if
return_summary is set to True and a summary network is defined.``
"""
outputs = []
for forward_dict in input_list:
outputs.append(self(forward_dict, return_summary, **kwargs))
net_out = [tf.concat([o[i] for o in outputs], axis=0) for i in range(len(outputs[0]))]
return tuple(net_out)
def sample(self, input_dict, n_samples, to_numpy=True, **kwargs):
"""Generates random draws from the approximate posterior given a dictionary with conditonal variables.
Parameters
----------
input_dict : dict
Input dictionary containing at least one of the following mandatory keys, if ``DEFAULT_KEYS`` unchanged:
``summary_conditions`` : the conditioning variables (including data) that are first passed through a summary network
``direct_conditions`` : the conditioning variables that the directly passed to the inference network
n_samples : int
The number of posterior draws (samples) to obtain from the approximate posterior
to_numpy : bool, optional, default: True
Flag indicating whether to return the samples as a ``np.ndarray`` or a ``tf.Tensor``.
**kwargs : dict, optional, default: {}
Additional keyword arguments passed to the networks
Returns
-------
post_samples : tf.Tensor or np.ndarray of shape (n_data_sets, n_samples, n_params)
The sampled parameters from the approximate posterior of each data set
"""
# Compute learnable summaries, if appropriate
_, conditions = self._compute_summary_condition(
input_dict.get(DEFAULT_KEYS["summary_conditions"]),
input_dict.get(DEFAULT_KEYS["direct_conditions"]),
training=False,
**kwargs,
)
# Obtain number of data sets
n_data_sets = conditions.shape[0]
# Obtain random draws from the approximate posterior given conditioning variables
# Case dynamic, assume tensorflow_probability instance, so need to reshape output from
# (n_samples, n_data_sets, latent_dim) to (n_data_sets, n_samples, latent_dim)
if self.latent_is_dynamic:
z_samples = self.latent_dist(conditions).sample(n_samples)
z_samples = tf.transpose(z_samples, (1, 0, 2))
# Case _static latent - marginal samples from the specified dist
else:
z_samples = self.latent_dist.sample((n_data_sets, n_samples))
# Obtain random draws from the approximate posterior given conditioning variables
post_samples = self.inference_net.inverse(z_samples, conditions, training=False, **kwargs)
# Only return 2D array, if first dimensions is 1
if post_samples.shape[0] == 1:
post_samples = post_samples[0]
self._check_output_sanity(post_samples)
# Return numpy version of tensor or tensor itself
if to_numpy:
return post_samples.numpy()
return post_samples
def sample_loop(self, input_list, n_samples, to_numpy=True, **kwargs):
"""Generates random draws from the approximate posterior given a list of dicts with conditonal variables.
Useful when GPU memory is limited or data sets have a different (non-Tensor) structure.
Parameters
----------
input_list : list of dictionaries, each dictionary having the following mandatory keys, if ``DEFAULT_KEYS`` unchanged:
``summary_conditions`` : the conditioning variables (including data) that are first passed through a summary network
``direct_conditions`` : the conditioning variables that the directly passed to the inference network
n_samples : int
The number of posterior draws (samples) to obtain from the approximate posterior
to_numpy : bool, optional, default: True
Flag indicating whether to return the samples as a ``np.ndarray`` or a ``tf.Tensor``
**kwargs : dict, optional, default: {}
Additional keyword arguments passed to the networks
Returns
-------
post_samples : tf.Tensor or np.ndarray of shape (n_datasets, n_samples, n_params)
The sampled parameters from the approximate posterior of each data set
"""
post_samples = []
for input_dict in input_list:
post_samples.append(self.sample(input_dict, n_samples, to_numpy, **kwargs))
if to_numpy:
return np.concatenate(post_samples, axis=0)
return tf.concat(post_samples, axis=0)
def log_posterior(self, input_dict, to_numpy=True, **kwargs):
"""Calculates the approximate log-posterior of targets given conditional variables via
the change-of-variable formula for a conditional normalizing flow.
Parameters
----------
input_dict : dict
Input dictionary containing the following mandatory keys, if ``DEFAULT_KEYS`` unchanged:
``parameters`` : the latent model parameters over which a conditional density (i.e., a posterior) is learned
``summary_conditions`` : the conditioning variables (including data) that are first passed through a summary network
``direct_conditions`` : the conditioning variables that are directly passed to the inference network
to_numpy : bool, optional, default: True
Flag indicating whether to return the lpdf values as a ``np.ndarray`` or a ``tf.Tensor``
**kwargs : dict, optional, default: {}
Additional keyword arguments passed to the networks
Returns
-------
log_post : tf.Tensor or np.ndarray of shape (batch_size, n_obs)
the approximate log-posterior density of each each parameter
"""
# Compute learnable summaries, if appropriate
_, conditions = self._compute_summary_condition(
input_dict.get(DEFAULT_KEYS["summary_conditions"]),
input_dict.get(DEFAULT_KEYS["direct_conditions"]),
training=False,
**kwargs,
)
# Forward pass through the network
z, log_det_J = self.inference_net.forward(
input_dict[DEFAULT_KEYS["parameters"]], conditions, training=False, **kwargs
)
# Compute approximate log posterior
# Case dynamic latent - function of conditions
if self.latent_is_dynamic:
log_post = self.latent_dist(conditions).log_prob(z) + log_det_J
# Case _static latent - marginal samples from z
else:
log_post = self.latent_dist.log_prob(z) + log_det_J
self._check_output_sanity(log_post)
if to_numpy:
return log_post.numpy()
return log_post
def log_prob(self, input_dict, to_numpy=True, **kwargs):
"""Identical to `log_posterior(input_dict, to_numpy, **kwargs)`."""
return self.log_posterior(input_dict, to_numpy=to_numpy, **kwargs)
def _compute_summary_condition(self, summary_conditions, direct_conditions, **kwargs):
"""Determines how to concatenate the provided conditions."""
# Compute learnable summaries, if given
if self.summary_net is not None:
sum_condition = self.summary_net(summary_conditions, **kwargs)
else:
sum_condition = None
# Concatenate learnable summaries with fixed summaries
if sum_condition is not None and direct_conditions is not None:
full_cond = tf.concat([sum_condition, direct_conditions], axis=-1)
elif sum_condition is not None:
full_cond = sum_condition
elif direct_conditions is not None:
full_cond = direct_conditions
else:
raise SummaryStatsError("Could not concatenarte or determine conditioning inputs...")
return sum_condition, full_cond
def _determine_latent_dist(self, latent_dist):
"""Determines which latent distribution to use and defaults to unit normal if None provided."""
if latent_dist is None:
return tfp.distributions.MultivariateNormalDiag(loc=[0.0] * self.latent_dim)
else:
return latent_dist
def _determine_summary_loss(self, loss_fun):
"""Determines which summary loss to use if default `None` argument provided, otherwise return identity."""
# If callable, return provided loss
if loss_fun is None or callable(loss_fun):
return loss_fun
# If string, check for MMD or mmd
elif type(loss_fun) is str:
if loss_fun.lower() == "mmd":
return mmd_summary_space
else:
raise NotImplementedError("For now, only 'mmd' is supported as a string argument for summary_loss_fun!")
# Throw if loss type unexpected
else:
raise NotImplementedError(
"Could not infer summary_loss_fun, argument should be of type (None, callable, or str)!"
)
class AmortizedLikelihood(tf.keras.Model, AmortizedTarget):
"""An interface for a surrogate model of a simulator, or an implicit likelihood
``p(data | parameters, context)``.
"""
def __init__(self, surrogate_net, latent_dist=None, **kwargs):
"""Initializes a composite neural architecture representing an amortized emulator
for the simulator (i.e., the implicit likelihood model).
Parameters
----------
surrogate_net : tf.keras.Model
An (invertible) inference network which processes the outputs of the generative model.
latent_dist : callable or None, optional, default: None
The latent distribution towards which to optimize the surrogate network outputs. Defaults to
a multivariate unit Gaussian.
**kwargs : dict, optional, default: {}
Additional keyword arguments passed to the ``__init__`` method of a ``tf.keras.Model`` instance.
"""
tf.keras.Model.__init__(self, **kwargs)
self.surrogate_net = surrogate_net
self.latent_dim = self.surrogate_net.latent_dim
self.latent_dist = self._determine_latent_dist(latent_dist)
def call(self, input_dict, **kwargs):
"""Performs a forward pass through the summary and inference network.
Parameters
----------
input_dict : dict
Input dictionary containing the following mandatory keys, if ``DEFAULT_KEYS`` unchanged:
``observables`` - the observables over which a condition density is learned (i.e., the data)
``conditions`` - the conditioning variables that the directly passed to the inference network
**kwargs : dict, optional, default: {}
Additional keyword arguments passed to the network
For instance, ``kwargs={'training': True}`` is passed automatically during training.
Returns
-------
net_out
the outputs of ``surrogate_net(theta, summary_net(x, c_s), c_d)``, usually a latent variable and
log(det(Jacobian)), that is a tuple ``(z, log_det_J)``.
"""
net_out = self.surrogate_net(
input_dict[DEFAULT_KEYS["observables"]], input_dict[DEFAULT_KEYS["conditions"]], **kwargs
)
return net_out
def call_loop(self, input_list, **kwargs):
"""Performs a forward pass through the surrogate network given a list of dicts
with the appropriate entries (i.e., as used for the standard call method).
This method is useful when GPU memory is limited or data sets have a different (non-Tensor) structure.
Parameters
----------
input_list : list of dicts, where each dict contains the following mandatory keys, if ``DEFAULT_KEYS`` unchanged:
``observables`` - the observables over which a condition density is learned (i.e., the data)
``conditions`` - the conditioning variables that the directly passed to the inference network
**kwargs : dict, optional, default: {}
Additional keyword arguments passed to the network
Returns
-------
net_out or (net_out, summary_out) : tuple of tf.Tensor
the outputs of ``inference_net(theta, summary_net(x, c_s), c_d)``, usually a latent variable and
log(det(Jacobian)), that is a tuple ``(z, log_det_J)``.
"""
outputs = []
for forward_dict in input_list:
outputs.append(self(forward_dict, **kwargs))
net_out = [tf.concat([o[i] for o in outputs], axis=0) for i in range(len(outputs[0]))]
return tuple(net_out)
def sample(self, input_dict, n_samples, to_numpy=True, **kwargs):
"""Generates `n_samples` random draws from the surrogate likelihood given input conditions.
Parameters
----------
input_dict : dict
Input dictionary containing the following mandatory keys, if ``DEFAULT_KEYS`` unchanged:
``conditions`` - the conditioning variables that are directly passed to the surrogate network
n_samples : int
The number of posterior samples to obtain from the approximate posterior
to_numpy : bool, optional, default: True
Flag indicating whether to return the samples as a ``np.ndarray`` or a ``tf.Tensor``
**kwargs : dict, optional, default: {}
Additional keyword arguments passed to the network
Returns
-------
lik_samples : tf.Tensor or np.ndarray of shape (n_datasets, n_samples, None)
A simulated batch of observables from the surrogate likelihood.
"""
# Extract condition
conditions = input_dict[DEFAULT_KEYS["conditions"]]
# Obtain number of data sets
n_data_sets = conditions.shape[0]
# Obtain random draws from the surrogate likelihood given conditioning variables
z_samples = self.latent_dist.sample((n_data_sets, n_samples))
# Obtain random draws from the surrogate likelihood given conditioning variables
lik_samples = self.surrogate_net.inverse(z_samples, conditions, training=False, **kwargs)
# Only return 2D array, if first dimensions is 1
if lik_samples.shape[0] == 1:
lik_samples = lik_samples[0]
self._check_output_sanity(lik_samples)
if to_numpy:
return lik_samples.numpy()
return lik_samples
def sample_loop(self, input_list, n_samples, to_numpy=True, **kwargs):
"""Generates random draws from the surrogate network given a list of dicts with conditonal variables.
Useful when GPU memory is limited or data sets have a different (non-Tensor) structure.
Parameters
----------
input_list : list of dictionaries, each dictionary having the following mandatory keys, if ``DEFAULT_KEYS`` unchanged:
``conditions`` - the conditioning variables that the directly passed to the surrogate network
n_samples : int
The number of posterior draws (samples) to obtain from the approximate posterior
to_numpy : bool, optional, default: True
Flag indicating whether to return the samples as a `np.array` or a `tf.Tensor`
**kwargs : dict, optional, default: {}
Additional keyword arguments passed to the network
Returns
-------
post_samples : tf.Tensor or np.ndarray of shape (n_data_sets, n_samples, data_dim)
the sampled parameters per data set
"""
post_samples = []
for input_dict in input_list:
post_samples.append(self.sample(input_dict, n_samples, to_numpy, **kwargs))
if to_numpy:
return np.concatenate(post_samples, axis=0)
return tf.concat(post_samples, axis=0)
def log_likelihood(self, input_dict, to_numpy=True, **kwargs):
"""Calculates the approximate log-likelihood of targets given conditional variables via
the change-of-variable formula for a conditional normalizing flow.
Parameters
----------
input_dict : dict
Input dictionary containing the following mandatory keys, if ``DEFAULT_KEYS`` unchanged:
``observables`` - the variables over which a condition density is learned (i.e., the observables)
``conditions`` - the conditioning variables that the directly passed to the inference network
to_numpy : bool, optional, default: True
Boolean flag indicating whether to return the log-lik values as a ``np.ndarray`` or a ``tf.Tensor``
**kwargs : dict, optional, default: {}
Additional keyword arguments passed to the network
Returns
-------
log_lik : tf.Tensor or np.ndarray of shape (batch_size, n_obs)
the approximate log-likelihood of each data point in each data set
"""
# Forward pass through the network
z, log_det_J = self.surrogate_net.forward(
input_dict[DEFAULT_KEYS["observables"]], input_dict[DEFAULT_KEYS["conditions"]], training=False, **kwargs
)
# Compute approximate log likelihood
log_lik = self.latent_dist.log_prob(z) + log_det_J
self._check_output_sanity(log_lik)
# Convert tensor to numpy array, if specified
if to_numpy:
return log_lik.numpy()
return log_lik
def log_prob(self, input_dict, to_numpy=True, **kwargs):
"""Identical to `log_likelihood(input_dict, to_numpy, **kwargs)`."""
return self.log_likelihood(input_dict, to_numpy=to_numpy, **kwargs)
def compute_loss(self, input_dict, **kwargs):
"""Computes the loss of the amortized given input data provided in input_dict.
Parameters
----------
input_dict : dict
Input dictionary containing the following mandatory keys:
``data`` - the observables over which a condition density is learned (i.e., the observables)
``conditions`` - the conditioning variables that the directly passed to the surrogate network
**kwargs : dict, optional, default: {}
Additional keyword arguments passed to the network
For instance, ``kwargs={'training': True}`` is passed automatically during simulation-based training.
Returns
-------
loss : tf.Tensor of shape (1,) - the total computed loss given input variables
"""
z, log_det_J = self(input_dict, **kwargs)
loss = tf.reduce_mean(-self.latent_dist.log_prob(z) - log_det_J)
return loss
def _determine_latent_dist(self, latent_dist):
"""Determines which latent distribution to use and defaults to unit normal if ``None`` provided."""
if latent_dist is None:
return tfp.distributions.MultivariateNormalDiag(loc=[0.0] * self.latent_dim)
else:
return latent_dist
class AmortizedPosteriorLikelihood(tf.keras.Model, AmortizedTarget):
"""An interface for jointly learning a surrogate model of the simulator and an approximate
posterior given a generative model, as proposed by:
[1] Radev, S. T., Schmitt, M., Pratz, V., Picchini, U., Köthe, U., & Bürkner, P. C. (2023).
JANA: Jointly Amortized Neural Approximation of Complex Bayesian Models.
arXiv preprint arXiv:2302.09125.
"""
def __init__(self, amortized_posterior, amortized_likelihood, **kwargs):
"""Initializes a joint learner comprising an amortized posterior and an amortized emulator.
Parameters
----------
amortized_posterior : an instance of AmortizedPosterior or a custom tf.keras.Model
The generative neural posterior approximator
amortized_likelihood : an instance of AmortizedLikelihood or a custom tf.keras.Model
The generative neural likelihood approximator
**kwargs : dict, optional, default: {}
Additional keyword arguments passed to the ``__init__`` method of a ``tf.keras.Model`` instance
"""
tf.keras.Model.__init__(self, **kwargs)
self.amortized_posterior = amortized_posterior
self.amortized_likelihood = amortized_likelihood
def call(self, input_dict, **kwargs):
"""Performs a forward pass through both amortizers.
Parameters
----------
input_dict : dict
Input dictionary containing the following mandatory keys:
`posterior_inputs` - The input dictionary for the amortized posterior
`likelihood_inputs` - The input dictionary for the amortized likelihood
Returns
-------
(post_out, lik_out) : tuple
The outputs of the posterior and likelihood networks given input variables.
"""
post_out = self.amortized_posterior(input_dict["posterior_inputs"], **kwargs)
lik_out = self.amortized_likelihood(input_dict["likelihood_inputs"], **kwargs)
return post_out, lik_out
def compute_loss(self, input_dict, **kwargs):
"""Computes the loss of the join amortizer by summing the corresponding amortized posterior
and likelihood losses.
Parameters
----------
input_dict : dict
Nested input dictionary containing the following mandatory keys, if DEFAULT_KEYS unchanged::
`posterior_inputs` - The input dictionary for the amortized posterior
`likelihood_inputs` - The input dictionary for the amortized likelihood
Returns
-------
total_losses : dict
A dictionary with keys `Post.Loss` and `Lik.Loss` containing the individual losses for the
two amortizers.
"""
loss_post = self.amortized_posterior.compute_loss(input_dict[DEFAULT_KEYS["posterior_inputs"]], **kwargs)
loss_lik = self.amortized_likelihood.compute_loss(input_dict[DEFAULT_KEYS["likelihood_inputs"]], **kwargs)
return {"Post.Loss": loss_post, "Lik.Loss": loss_lik}
def log_likelihood(self, input_dict, to_numpy=True, **kwargs):
"""Calculates the approximate log-likelihood of data given conditional variables via
the change-of-variable formula for conditional normalizing flows.
Parameters
----------
input_dict : dict
Input dictionary containing the following mandatory keys, if DEFAULT_KEYS unchanged:
`observables` - the variables over which a condition density is learned (i.e., the observables)
`conditions` - the conditioning variables that are directly passed to the inference network
OR a nested dictionary with key `likelihood_inputs` containing the above input dictionary
to_numpy : bool, optional, default: True
Flag indicating whether to return the samples as a `np.array` or a `tf.Tensor`
Returns
-------
log_lik : tf.Tensor of shape (batch_size, n_obs)
the approximate log-likelihood of each data point in each data set
"""
if input_dict.get(DEFAULT_KEYS["likelihood_inputs"]) is not None:
return self.amortized_likelihood.log_likelihood(
input_dict[DEFAULT_KEYS["likelihood_inputs"]], to_numpy=to_numpy, **kwargs
)
return self.amortized_likelihood.log_likelihood(input_dict, to_numpy=to_numpy, **kwargs)
def log_posterior(self, input_dict, to_numpy=True, **kwargs):
"""Calculates the approximate log-posterior of targets given conditional variables via
the change-of-variable formula for conditional normalizing flows.
Parameters
----------
input_dict : dict
Input dictionary containing the following mandatory keys, if DEFAULT_KEYS unchanged:
`parameters` - the latent generative model parameters over which a condition density is learned
`summary_conditions` - the conditioning variables that are first passed through a summary network
`direct_conditions` - the conditioning variables that the directly passed to the inference network
OR a nested dictionary with key `posterior_inputs` containing the above input dictionary
Returns
-------
log_post : tf.Tensor of shape (batch_size, n_obs)
the approximate log-likelihood of each data point in each data set
"""
if input_dict.get(DEFAULT_KEYS["posterior_inputs"]) is not None:
return self.amortized_posterior.log_posterior(
input_dict[DEFAULT_KEYS["posterior_inputs"]], to_numpy=to_numpy, **kwargs
)
return self.amortized_posterior.log_posterior(input_dict, to_numpy=to_numpy, **kwargs)
def log_prob(self, input_dict, to_numpy=True, **kwargs):
"""Identical to calling separate `log_likelihood()` and `log_posterior()`.
Returns
-------
out_dict : dict with keys `log_posterior` and `log_likelihood` corresponding
to the computed log_pdfs of the approximate posterior and likelihood.
"""
log_post = self.log_posterior(input_dict, to_numpy=to_numpy, **kwargs)
log_lik = self.log_likelihood(input_dict, to_numpy=to_numpy, **kwargs)
out_dict = {"log_posterior": log_post, "log_likelihood": log_lik}
return out_dict
def sample_data(self, input_dict, n_samples, to_numpy=True, **kwargs):
"""Generates `n_samples` random draws from the surrogate likelihood given input conditions.
Parameters
----------
input_dict : dict
Input dictionary containing the following mandatory keys, if DEFAULT_KEYS unchanged:
`conditions` - the conditioning variables that the directly passed to the inference network
OR a nested dictionary with key `likelihood_inputs` containing the above input dictionary
n_samples : int
The number of posterior samples to obtain from the approximate posterior
to_numpy : bool, optional, default: True
Flag indicating whether to return the samples as a `np.array` or a `tf.Tensor`
Returns
-------
lik_samples : tf.Tensor or np.ndarray of shape (n_datasets, n_samples, None)
Simulated observables from the surrogate likelihood.
"""
if input_dict.get(DEFAULT_KEYS["likelihood_inputs"]) is not None:
return self.amortized_likelihood.sample(
input_dict[DEFAULT_KEYS["likelihood_inputs"]], n_samples, to_numpy=to_numpy, **kwargs
)
return self.amortized_likelihood.sample(input_dict, n_samples, to_numpy=to_numpy, **kwargs)
def sample_parameters(self, input_dict, n_samples, to_numpy=True, **kwargs):
"""Generates random draws from the approximate posterior given conditonal variables.
Parameters
----------
input_dict : dict
Input dictionary containing the following mandatory keys, if DEFAULT KEYS unchanged:
`summary_conditions` : the conditioning variables (including data) that are first passed through a summary network
`direct_conditions` : the conditioning variables that the directly passed to the inference network
OR a nested dictionary with key `posterior_inputs` containing the above input dictionary
n_samples : int
The number of posterior samples to obtain from the approximate posterior
to_numpy : bool, optional, default: True
Boolean flag indicating whether to return the samples as a `np.array` or a `tf.Tensor`
Returns
-------
post_samples : tf.Tensor or np.ndarray of shape (n_datasets, n_samples, n_params)
the sampled parameters per data set
"""
if input_dict.get(DEFAULT_KEYS["posterior_inputs"]) is not None:
return self.amortized_posterior.sample(
input_dict[DEFAULT_KEYS["posterior_inputs"]], n_samples, to_numpy=to_numpy, **kwargs
)
return self.amortized_posterior.sample(input_dict, n_samples, to_numpy=to_numpy, **kwargs)
def sample(self, input_dict, n_post_samples, n_lik_samples, to_numpy=True, **kwargs):
"""Identical to calling `sample_parameters()` and `sample_data()` separately.
Returns
-------
out_dict : dict with keys `posterior_samples` and `likelihood_samples` corresponding
to the `n_samples` from the approximate posterior and likelihood, respectively
"""
post_samples = self.sample_parameters(input_dict, n_post_samples, to_numpy=to_numpy, **kwargs)
lik_samples = self.sample_data(input_dict, n_lik_samples, to_numpy=to_numpy, **kwargs)
out_dict = {"posterior_samples": post_samples, "likelihood_samples": lik_samples}
return out_dict
class AmortizedModelComparison(tf.keras.Model):
"""An interface to connect an evidential network for Bayesian model comparison with an optional summary network,
as described in the original paper on evidential neural networks for model comparison according to [1, 2]:
[1] Radev, S. T., D'Alessandro, M., Mertens, U. K., Voss, A., Köthe, U., & Bürkner, P. C. (2021).
Amortized bayesian model comparison with evidential deep learning.
IEEE Transactions on Neural Networks and Learning Systems.
[2] Elsemüller, L., Schnuerch, M., Bürkner, P. C., & Radev, S. T. (2023).
A Deep Learning Method for Comparing Bayesian Hierarchical Models.
arXiv preprint arXiv:2301.11873.
Note: the original paper [1] does not distinguish between the summary and the evidential networks, but
treats them as a whole, with the appropriate architecture dictated by the model application. For the
sake of consistency and modularity, the BayesFlow library separates the two constructs.
"""
def __init__(self, inference_net, summary_net=None, loss_fun=None):
"""Initializes a composite neural architecture for amortized bayesian model comparison.
Parameters
----------
inference_net : tf.keras.Model
A neural network which outputs model evidences.
summary_net : tf.keras.Model or None, optional, default: None
An optional summary network
loss_fun : callable or None, optional, default: None
The loss function which accepts the outputs of the amortizer. If None, the loss will be the log-loss.
Important
----------
- If no ``summary_net`` is provided, then the output dictionary of your generative model should not contain
any `sumamry_conditions`, i.e., ``summary_conditions`` should be set to None, otherwise these will be ignored.
- If no custom ``loss_fun`` is provided, the loss function will be the log loss for the means of a Dirichlet
distribution or softmax outputs.
"""
super().__init__()
self.inference_net = inference_net
self.summary_net = summary_net
self.loss = self._determine_loss(loss_fun)
self.num_models = self.inference_net.num_models
def call(self, input_dict, return_summary=False, **kwargs):
"""Performs a forward pass through both networks.
Parameters
----------
input_dict : dict
Input dictionary containing the following mandatory keys, if DEFAULT_KEYS unchanged
`summary_conditions` - the conditioning variables that are first passed through a summary network
`direct_conditions` - the conditioning variables that the directly passed to the evidential network
`model_indices` - the ground-truth, one-hot encoded model indices sampled from the model prior
return_summary : bool, optional, default: False
Indicates whether the summary network outputs are returned along the estimated evidences.
Returns
-------
net_out : tf.Tensor of shape (batch_size, num_models) or tuple of (net_out (batch_size, num_models),
summary_out (batch_size, summary_dim)), the latter being the summary network outputs, if
``return_summary is True``.
"""
summary_out, full_cond = self._compute_summary_condition(
input_dict.get(DEFAULT_KEYS["summary_conditions"]),
input_dict.get(DEFAULT_KEYS["direct_conditions"]),
**kwargs,
)
net_out = self.inference_net(full_cond, **kwargs)
if not return_summary:
return net_out
return net_out, summary_out
def posterior_probs(self, input_dict, to_numpy=True, **kwargs):
"""Compute posterior model probabilities (PMPs) given a dictionary with observed or
simulated data.
Parameters
----------
input_dict : dict
Input dictionary containing at least one of the following mandatory keys, if DEFAULT_KEYS unchanged
`summary_conditions` - the conditioning variables that are first passed through a summary network
`direct_conditions` - the conditioning variables that the directly passed to the evidential network
to_numpy : bool, optional, default: True
Flag indicating whether to return the PMPs a ``np.ndarray`` or a ``tf.Tensor``
**kwargs : dict, optional, default: {}
Additional keyword arguments passed to the networks
Returns
-------
out : tf.Tensor of shape (batch_size, ..., num_models)
The approximated PMPs
"""
_, full_cond = self._compute_summary_condition(
input_dict.get(DEFAULT_KEYS["summary_conditions"]),
input_dict.get(DEFAULT_KEYS["direct_conditions"]),
**kwargs,
)
pmps = self.inference_net(full_cond, **kwargs)
if to_numpy:
return pmps.numpy()
return pmps
def compute_loss(self, input_dict, **kwargs):
"""Computes the loss of the amortized model comparison instance.
Parameters
----------
input_dict : dict
Input dictionary containing the following mandatory keys, if DEFAULT_KEYS unchanged::
`summary_conditions` - the conditioning variables that are first passed through a summary network
`direct_conditions` - the conditioning variables that the directly passed to the evidence network
`model_indices` - the ground-truth, one-hot encoded model indices sampled from the model prior
Returns
-------
loss : tf.Tensor of shape (1,) - the total computed loss given input variables
"""
preds = self(input_dict, **kwargs)
loss = self.loss(input_dict[DEFAULT_KEYS["model_indices"]], preds)
return loss
def _compute_summary_condition(self, summary_conditions, direct_conditions, **kwargs):
"""Helper method to determines how to concatenate the provided conditions."""
# Compute learnable summaries, if given
if self.summary_net is not None:
sum_condition = self.summary_net(summary_conditions, **kwargs)
else:
sum_condition = None
# Concatenate learnable summaries with fixed summaries, this
if sum_condition is not None and direct_conditions is not None:
full_cond = tf.concat([sum_condition, direct_conditions], axis=-1)
elif sum_condition is not None:
full_cond = sum_condition
elif direct_conditions is not None:
full_cond = direct_conditions
else:
raise SummaryStatsError("Could not concatenarte or determine conditioning inputs...")
return sum_condition, full_cond
def _determine_loss(self, loss_fun):
"""Helper method to determine loss function to use."""
if loss_fun is None:
return partial(log_loss, evidential=isinstance(self.inference_net, EvidentialNetwork))
elif callable(loss_fun):
return loss_fun
else:
raise ConfigurationError(
"Loss function is neither default (`None`) not callable. Please provide a valid loss function!"
)
class TwoLevelAmortizedPosterior(tf.keras.Model, AmortizedTarget):
"""An interface for estimating arbitrary two level hierarchical Bayesian models."""
def __init__(self, local_amortizer, global_amortizer, summary_net=None, **kwargs):
"""Creates an wrapper for estimating two-level hierarchical Bayesian models.
Parameters
----------
local_amortizer : bayesflow.amortizers.AmortizedPosterior
A posterior amortizer without a summary network which will estimate
the full conditional of the (varying numbers of) local parameter vectors.
global_amortizer : bayesflow.amortizers.AmortizedPosterior
A posterior amortizer without a summary network which will estimate the joint
posterior of hyperparameters and optional shared parameters given a representation
of an entire hierarchical data set. If both hyper- and shared parameters are present,
the first dimensions correspond to the hyperparameters and the remaining ones correspond
to the shared parameters.
summary_net : tf.keras.Model or None, optional, default: None
An optional summary network to compress non-vector data structures.
**kwargs : dict, optional, default: {}
Additional keyword arguments passed to the ``__init__`` method of a ``tf.keras.Model`` instance.
"""
super().__init__(**kwargs)
self.local_amortizer = local_amortizer
self.global_amortizer = global_amortizer
self.summary_net = summary_net
def call(self, input_dict, **kwargs):
"""Forward pass through the hierarchical amortized posterior."""
local_summaries, global_summaries = self._compute_condition(input_dict, **kwargs)
local_inputs, global_inputs = self._prepare_inputs(input_dict, local_summaries, global_summaries)
local_out = self.local_amortizer(local_inputs, **kwargs)
global_out = self.global_amortizer(global_inputs, **kwargs)
return local_out, global_out
def compute_loss(self, input_dict, **kwargs):
"""Compute loss of all amortizers."""
local_summaries, global_summaries = self._compute_condition(input_dict, **kwargs)
local_inputs, global_inputs = self._prepare_inputs(input_dict, local_summaries, global_summaries)
local_loss = self.local_amortizer.compute_loss(local_inputs, **kwargs)
global_loss = self.global_amortizer.compute_loss(global_inputs, **kwargs)
return {"Local.Loss": local_loss, "Global.Loss": global_loss}
def sample(self, input_dict, n_samples, to_numpy=True, **kwargs):
"""Obtains samples from the joint hierarchical posterior given observations.
Important: Currently works only for single hierarchical data sets!
Parameters
----------
input_dict : dict
Input dictionary containing the following mandatory keys, if DEFAULT_KEYS unchanged:
`summary_conditions` - the hierarchical data set (to be embedded by the summary net)
As well as optional keys:
`direct_local_conditions` - (Context) variables used to condition the local posterior
`direct_global_conditions` - (Context) variables used to condition the global posterior
n_samples : int
The number of posterior draws (samples) to obtain from the approximate posterior
to_numpy : bool, optional, default: True
Flag indicating whether to return the samples as a `np.array` or a `tf.Tensor`
**kwargs : dict, optional, default: {}
Additional keyword arguments passed to the summary network as the amortizers
Returns
-------
samples_dict : dict
A dictionary with keys `global_samples` and `local_samples`
Local samples will hold an array-like of shape (num_replicas, num_samples, num_local)
and local samples will hold an array-like of shape (num_samples, num_hyper + num_shared),
if optional shared patameters are present, otherwise (num_samples, num_hyper),
"""
# Returned shapes will be :
# local_summaries.shape = (1, num_groups, summary_dim_local)
# global_summaries.shape = (1, summary_dim_global)
local_summaries, global_summaries = self._get_local_global(input_dict, **kwargs)
num_groups = local_summaries.shape[1]
if local_summaries.shape[0] != 1 or global_summaries.shape[0] != 1:
raise NotImplementedError("Method currently supports only single hierarchical data sets!")
# Obtain samples from p(global | all_data)
inp_global = {DEFAULT_KEYS["direct_conditions"]: global_summaries}
# New, shape will be (n_samples, num_globals)
global_samples = self.global_amortizer.sample(inp_global, n_samples, **kwargs, to_numpy=False)
# Repeat local conditions for n_samples
# New shape -> (num_groups, n_samples, summary_dim_local)
local_summaries = tf.stack([tf.squeeze(local_summaries, axis=0)] * n_samples, axis=1)
# Repeat global samples for num_groups
# New shape -> (num_groups, n_samples, num_globals)
global_samples_rep = tf.stack([global_samples] * num_groups, axis=0)
# Concatenate local summaries with global samples
# New shape -> (num_groups, num_samples, summary_dim_local + num_globals)
local_summaries = tf.concat([local_summaries, global_samples_rep], axis=-1)
# Obtain samples from p(local_i | data_i, global_i)
inp_local = {DEFAULT_KEYS["direct_conditions"]: local_summaries}
local_samples = self.local_amortizer.sample(inp_local, n_samples, to_numpy=False, **kwargs)
if to_numpy:
global_samples = global_samples.numpy()
local_samples = local_samples.numpy()
return {"global_samples": global_samples, "local_samples": local_samples}
def log_prob(self, input_dict):
"""Compute normalized log density."""
raise NotImplementedError
def _prepare_inputs(self, input_dict, local_summaries, global_summaries):
"""Prepare input dictionaries for both amortizers."""
# Prepare inputs for local amortizer
local_inputs = {"direct_conditions": local_summaries, "parameters": input_dict["local_parameters"]}
# Prepare inputs for global amortizer
_parameters = input_dict["hyper_parameters"]
if input_dict.get("shared_parameters") is not None:
_parameters = tf.concat([_parameters, input_dict.get("shared_parameters")], axis=-1)
global_inputs = {"direct_conditions": global_summaries, "parameters": _parameters}
return local_inputs, global_inputs
def _compute_condition(self, input_dict, **kwargs):
"""Determines conditionining variables for both amortizers."""
# Obtain needed summaries
local_summaries, global_summaries = self._get_local_global(input_dict, **kwargs)
# At this point, add globals as conditions
num_locals = tf.shape(local_summaries)[1]
# Add hyper parameters as conditions:
# p(local_n | data_n, hyper)
if input_dict.get("hyper_parameters") is not None:
_params = input_dict.get("hyper_parameters")
_params = tf.expand_dims(_params, 1)
_conds = tf.tile(_params, [1, num_locals, 1])
local_summaries = tf.concat([local_summaries, _conds], axis=-1)
# Add shared parameters as conditions:
# p(local_n | data_n, hyper, shared)
if input_dict.get("shared_parameters") is not None:
_params = input_dict.get("shared_parameters")
_params = tf.expand_dims(_params, 1)
_conds = tf.tile(_params, [1, num_locals, 1])
local_summaries = tf.concat([local_summaries, _conds], axis=-1)
return local_summaries, global_summaries
def _get_local_global(self, input_dict, **kwargs):
"""Helper function to obtain local and global condition tensors."""
# Obtain summary conditions
if self.summary_net is not None:
local_summaries, global_summaries = self.summary_net(
input_dict["summary_conditions"], return_all=True, **kwargs
)
if input_dict.get("direct_local_conditions") is not None:
local_summaries = tf.concat([local_summaries, input_dict.get("direct_local_conditions")], axis=-1)
if input_dict.get("direct_global_conditions") is not None:
global_summaries = tf.concat([global_summaries, input_dict.get("direct_global_conditions")], axis=-1)
# If no summary net provided, assume direct conditions exist or fail
else:
local_summaries = input_dict.get("direct_local_conditions")
global_summaries = input_dict.get("direct_global_conditions")
return local_summaries, global_summaries
class SingleModelAmortizer(AmortizedPosterior):
"""Deprecated class for amortizer posterior estimation."""
def __init_subclass__(cls, **kwargs):
warn(f"{cls.__name__} will be deprecated. Use `AmortizedPosterior` instead.", DeprecationWarning, stacklevel=2)
super().__init_subclass__(**kwargs)
def __init__(self, *args, **kwargs):
warn(
f"{self.__class__.__name__} will be deprecated. Use `AmortizedPosterior` instead.",
DeprecationWarning,
stacklevel=2,
)
super().__init__(*args, **kwargs)
| 57,826 | 46.052075 | 128 | py |
BayesFlow | BayesFlow-master/bayesflow/attention.py | # Copyright (c) 2022 The BayesFlow Developers
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import tensorflow as tf
from tensorflow.keras.layers import Dense, LayerNormalization, MultiHeadAttention
from tensorflow.keras.models import Sequential
class MultiHeadAttentionBlock(tf.keras.Model):
"""Implements the MAB block from [1] which represents learnable cross-attention.
[1] Lee, J., Lee, Y., Kim, J., Kosiorek, A., Choi, S., & Teh, Y. W. (2019).
Set transformer: A framework for attention-based permutation-invariant neural networks.
In International conference on machine learning (pp. 3744-3753). PMLR.
"""
def __init__(self, input_dim, attention_settings, num_dense_fc, dense_settings, use_layer_norm, **kwargs):
"""Creates a multihead attention block which will typically be used as part of a
set transformer architecture according to [1].
Parameters
----------
input_dim : int
The dimensionality of the input data (last axis).
attention_settings : dict
A dictionary which will be unpacked as the arguments for the ``MultiHeadAttention`` layer
See https://www.tensorflow.org/api_docs/python/tf/keras/layers/MultiHeadAttention.
num_dense_fc : int
The number of hidden layers for the internal feedforward network
dense_settings : dict
A dictionary which will be unpacked as the arguments for the ``Dense`` layer
use_layer_norm : boolean
Whether layer normalization before and after attention + feedforward
**kwargs : dict, optional, default: {}
Optional keyword arguments passed to the __init__() method of tf.keras.Model
"""
super().__init__(**kwargs)
self.att = MultiHeadAttention(**attention_settings)
self.ln_pre = LayerNormalization() if use_layer_norm else None
self.fc = Sequential([Dense(**dense_settings) for _ in range(num_dense_fc)])
self.fc.add(Dense(input_dim))
self.ln_post = LayerNormalization() if use_layer_norm else None
def call(self, x, y, **kwargs):
"""Performs the forward pass through the attention layer.
Parameters
----------
x : tf.Tensor
Input of shape (batch_size, set_size_x, input_dim)
y : tf.Tensor
Input of shape (batch_size, set_size_y, input_dim)
Returns
-------
out : tf.Tensor
Output of shape (batch_size, set_size_x, input_dim)
"""
h = x + self.att(x, y, y, **kwargs)
if self.ln_pre is not None:
h = self.ln_pre(h, **kwargs)
out = h + self.fc(h, **kwargs)
if self.ln_post is not None:
out = self.ln_post(out, **kwargs)
return out
class SelfAttentionBlock(tf.keras.Model):
"""Implements the SAB block from [1] which represents learnable self-attention.
[1] Lee, J., Lee, Y., Kim, J., Kosiorek, A., Choi, S., & Teh, Y. W. (2019).
Set transformer: A framework for attention-based permutation-invariant neural networks.
In International conference on machine learning (pp. 3744-3753). PMLR.
"""
def __init__(self, input_dim, attention_settings, num_dense_fc, dense_settings, use_layer_norm, **kwargs):
"""Creates a self-attention attention block which will typically be used as part of a
set transformer architecture according to [1].
Parameters
----------
input_dim : int
The dimensionality of the input data (last axis).
attention_settings : dict
A dictionary which will be unpacked as the arguments for the ``MultiHeadAttention`` layer
See https://www.tensorflow.org/api_docs/python/tf/keras/layers/MultiHeadAttention.
num_dense_fc : int
The number of hidden layers for the internal feedforward network
dense_settings : dict
A dictionary which will be unpacked as the arguments for the ``Dense`` layer
use_layer_norm : boolean
Whether layer normalization before and after attention + feedforward
**kwargs : dict, optional, default: {}
Optional keyword arguments passed to the __init__() method of tf.keras.Model
"""
super().__init__(**kwargs)
self.mab = MultiHeadAttentionBlock(input_dim, attention_settings, num_dense_fc, dense_settings, use_layer_norm)
def call(self, x, **kwargs):
"""Performs the forward pass through the self-attention layer.
Parameters
----------
x : tf.Tensor
Input of shape (batch_size, set_size, input_dim)
Returns
-------
out : tf.Tensor
Output of shape (batch_size, set_size, input_dim)
"""
return self.mab(x, x, **kwargs)
class InducedSelfAttentionBlock(tf.keras.Model):
"""Implements the ISAB block from [1] which represents learnable self-attention specifically
designed to deal with large sets via a learnable set of "inducing points".
[1] Lee, J., Lee, Y., Kim, J., Kosiorek, A., Choi, S., & Teh, Y. W. (2019).
Set transformer: A framework for attention-based permutation-invariant neural networks.
In International conference on machine learning (pp. 3744-3753). PMLR.
"""
def __init__(
self, input_dim, attention_settings, num_dense_fc, dense_settings, use_layer_norm, num_inducing_points, **kwargs
):
"""Creates a self-attention attention block with inducing points (ISAB) which will typically
be used as part of a set transformer architecture according to [1].
Parameters
----------
input_dim : int
The dimensionality of the input data (last axis).
attention_settings : dict
A dictionary which will be unpacked as the arguments for the ``MultiHeadAttention`` layer
See https://www.tensorflow.org/api_docs/python/tf/keras/layers/MultiHeadAttention.
num_dense_fc : int
The number of hidden layers for the internal feedforward network
dense_settings : dict
A dictionary which will be unpacked as the arguments for the ``Dense`` layer
use_layer_norm : boolean
Whether layer normalization before and after attention + feedforward
num_inducing_points : int
The number of inducing points. Should be lower than the smallest set size
**kwargs : dict, optional, default: {}
Optional keyword arguments passed to the __init__() method of tf.keras.Model
"""
super().__init__(**kwargs)
init = tf.keras.initializers.GlorotUniform()
self.I = tf.Variable(init(shape=(num_inducing_points, input_dim)), name="I", trainable=True)
self.mab0 = MultiHeadAttentionBlock(input_dim, attention_settings, num_dense_fc, dense_settings, use_layer_norm)
self.mab1 = MultiHeadAttentionBlock(input_dim, attention_settings, num_dense_fc, dense_settings, use_layer_norm)
def call(self, x, **kwargs):
"""Performs the forward pass through the self-attention layer.
Parameters
----------
x : tf.Tensor
Input of shape (batch_size, set_size, input_dim)
Returns
-------
out : tf.Tensor
Output of shape (batch_size, set_size, input_dim)
"""
batch_size = tf.shape(x)[0]
I_expanded = self.I[None, ...]
I_tiled = tf.tile(I_expanded, [batch_size, 1, 1])
h = self.mab0(I_tiled, x, **kwargs)
return self.mab1(x, h, **kwargs)
class PoolingWithAttention(tf.keras.Model):
"""Implements the pooling with multihead attention (PMA) block from [1] which represents
a permutation-invariant encoder for set-based inputs.
[1] Lee, J., Lee, Y., Kim, J., Kosiorek, A., Choi, S., & Teh, Y. W. (2019).
Set transformer: A framework for attention-based permutation-invariant neural networks.
In International conference on machine learning (pp. 3744-3753). PMLR.
"""
def __init__(
self, summary_dim, attention_settings, num_dense_fc, dense_settings, use_layer_norm, num_seeds=1, **kwargs
):
"""Creates a multihead attention block (MAB) which will perform cross-attention between an input set
and a set of seed vectors (typically one for a single summary) with summary_dim output dimensions.
Could also be used as part of a ``DeepSet`` for representing learnabl instead of fixed pooling.
Parameters
----------
summary_dim : int
The dimensionality of the learned permutation-invariant representation.
attention_settings : dict
A dictionary which will be unpacked as the arguments for the ``MultiHeadAttention`` layer
See https://www.tensorflow.org/api_docs/python/tf/keras/layers/MultiHeadAttention.
num_dense_fc : int
The number of hidden layers for the internal feedforward network
dense_settings : dict
A dictionary which will be unpacked as the arguments for the ``Dense`` layer
use_layer_norm : boolean
Whether layer normalization before and after attention + feedforward
num_seeds : int, optional, default: 1
The number of "seed vectors" to use. Each seed vector represents a permutation-invariant
summary of the entire set. If you use ``num_seeds > 1``, the resulting seeds will be flattened
into a 2-dimensional output, which will have a dimensionality of ``num_seeds * summary_dim``
**kwargs : dict, optional, default: {}
Optional keyword arguments passed to the __init__() method of tf.keras.Model
"""
super().__init__(**kwargs)
self.mab = MultiHeadAttentionBlock(
summary_dim, attention_settings, num_dense_fc, dense_settings, use_layer_norm, **kwargs
)
init = tf.keras.initializers.GlorotUniform()
self.seed_vec = tf.Variable(init(shape=(num_seeds, summary_dim)), name="seed_vec", trainable=True)
self.fc = Sequential([Dense(**dense_settings) for _ in range(num_dense_fc)])
self.fc.add(Dense(summary_dim))
def call(self, x, **kwargs):
"""Performs the forward pass through the PMA block.
Parameters
----------
x : tf.Tensor
Input of shape (batch_size, set_size, input_dim)
Returns
-------
out : tf.Tensor
Output of shape (batch_size, num_seeds * summary_dim)
"""
out = self.fc(x)
batch_size = tf.shape(x)[0]
seed_expanded = self.seed_vec[None, ...]
seed_tiled = tf.tile(seed_expanded, [batch_size, 1, 1])
out = self.mab(seed_tiled, out, **kwargs)
return tf.reshape(out, (tf.shape(out)[0], -1))
| 12,081 | 43.914498 | 120 | py |
BayesFlow | BayesFlow-master/bayesflow/summary_networks.py | # Copyright (c) 2022 The BayesFlow Developers
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from warnings import warn
import tensorflow as tf
from tensorflow.keras.layers import GRU, LSTM, Dense
from tensorflow.keras.models import Sequential
from bayesflow import default_settings as defaults
from bayesflow.attention import (
InducedSelfAttentionBlock,
MultiHeadAttentionBlock,
PoolingWithAttention,
SelfAttentionBlock,
)
from bayesflow.helper_networks import EquivariantModule, InvariantModule, MultiConv1D
class TimeSeriesTransformer(tf.keras.Model):
"""Implements a many-to-one transformer architecture for time series encoding.
Some ideas can be found in [1]:
[1] Wen, Q., Zhou, T., Zhang, C., Chen, W., Ma, Z., Yan, J., & Sun, L. (2022).
Transformers in time series: A survey. arXiv preprint arXiv:2202.07125.
https://arxiv.org/abs/2202.07125
"""
def __init__(
self,
input_dim,
attention_settings=None,
dense_settings=None,
use_layer_norm=True,
num_dense_fc=2,
summary_dim=10,
num_attention_blocks=2,
template_type="lstm",
template_dim=64,
**kwargs,
):
"""Creates a transformer architecture for encoding time series data into fixed size vectors given by
``summary_dim``. It features a recurrent network given by ``template_type`` which is responsible for
providing a single summary of the time series which then attends to each point in the time series pro-
cessed via a series of ``num_attention_blocks`` self-attention layers.
Important: Assumes that positional encodings have been appended to the input time series.
Recommnded: When using transformers as summary networks, you may want to use a smaller learning rate
during training, e.g., setting ``default_lr=1e-5`` in a ``Trainer`` instance.
Parameters
----------
input_dim : int
The dimensionality of the input data (last axis).
attention_settings : dict or None, optional, default None
A dictionary which will be unpacked as the arguments for the ``MultiHeadAttention`` layer.
If ``None``, default settings will be used (see ``bayesflow.default_settings``)
For instance, to use an attention block with 4 heads and key dimension 32, you can do:
``attention_settings=dict(num_heads=4, key_dim=32)``
You may also want to include dropout regularization in small-to-medium data regimes:
``attention_settings=dict(num_heads=4, key_dim=32, dropout=0.1)``
For more details and arguments, see:
https://www.tensorflow.org/api_docs/python/tf/keras/layers/MultiHeadAttention
dense_settings : dict or None, optional, default: None
A dictionary which will be unpacked as the arguments for the ``Dense`` layer.
For instance, to use hidden layers with 32 units and a relu activation, you can do:
``dict(units=32, activation='relu')
For more details and arguments, see:
https://www.tensorflow.org/api_docs/python/tf/keras/layers/Dense
use_layer_norm : boolean, optional, default: True
Whether layer normalization before and after attention + feedforward
num_dense_fc : int, optional, default: 2
The number of hidden layers for the internal feedforward network
summary_dim : int
The dimensionality of the learned permutation-invariant representation.
num_attention_blocks : int, optional, default: 2
The number of self-attention blocks to use before pooling.
template_type : str or callable, optional, default: 'lstm'
The many-to-one (learnable) transformation of the time series.
if ``lstm``, an LSTM network will be used.
if ``gru``, a GRU unit will be used.
if callable, a reference to ``template_type`` will be stored as an attribute.
template_dim : int, optional, default: 64
Only used if ``template_type`` in ['lstm', 'gru']. The number of hidden
units (equiv. output dimensions) of the recurrent network.
**kwargs : dict, optional, default: {}
Optional keyword arguments passed to the __init__() method of tf.keras.Model
"""
super().__init__(**kwargs)
# Process internal attention settings
if attention_settings is None:
attention_settings = defaults.DEFAULT_SETTING_ATTENTION
if dense_settings is None:
dense_settings = defaults.DEFAULT_SETTING_DENSE_ATTENTION
# Construct a series of self-attention blocks, these will process
# the time series in a many-to-many fashion
self.attention_blocks = Sequential()
for _ in range(num_attention_blocks):
block = SelfAttentionBlock(input_dim, attention_settings, num_dense_fc, dense_settings, use_layer_norm)
self.attention_blocks.add(block)
# Construct final attention layer, which will perform cross-attention
# between the outputs ot the self-attention layers and the dynamic template
self.output_attention = MultiHeadAttentionBlock(
template_dim, attention_settings, num_dense_fc, dense_settings, use_layer_norm
)
# A recurrent network will learn the dynamic many-to-one template
if template_type.upper() == "LSTM":
self.template_net = LSTM(template_dim)
elif template_type.upper() == "GRU":
self.template_net = GRU(template_dim)
else:
assert callable(template_type), "Argument `template_dim` should be callable or in ['lstm', 'gru']"
self.template_net = template_type
# Final output reduces representation into a vector of length summary_dim
self.output_layer = Dense(summary_dim)
def call(self, x, **kwargs):
"""Performs the forward pass through the transformer.
Parameters
----------
x : tf.Tensor
Time series input of shape (batch_size, num_time_points, input_dim)
Returns
-------
out : tf.Tensor
Output of shape (batch_size, summary_dim)
"""
rep = self.attention_blocks(x, **kwargs)
template = self.template_net(x, **kwargs)
rep = self.output_attention(tf.expand_dims(template, axis=1), rep, **kwargs)
rep = tf.squeeze(rep, axis=1)
out = self.output_layer(rep)
return out
class SetTransformer(tf.keras.Model):
"""Implements the set transformer architecture from [1] which ultimately represents
a learnable permutation-invariant function.
[1] Lee, J., Lee, Y., Kim, J., Kosiorek, A., Choi, S., & Teh, Y. W. (2019).
Set transformer: A framework for attention-based permutation-invariant neural networks.
In International conference on machine learning (pp. 3744-3753). PMLR.
"""
def __init__(
self,
input_dim,
attention_settings=None,
dense_settings=None,
use_layer_norm=True,
num_dense_fc=2,
summary_dim=10,
num_attention_blocks=2,
num_inducing_points=32,
num_seeds=1,
**kwargs,
):
"""Creates a set transformer architecture according to [1] which will extract permutation-invariant
features from an input set using a set of seed vectors (typically one for a single summary) with ``summary_dim``
output dimensions.
Recommnded: When using transformers as summary networks, you may want to use a smaller learning rate
during training, e.g., setting ``default_lr=1e-5`` in a ``Trainer`` instance.
Parameters
----------
input_dim : int
The dimensionality of the input data (last axis).
attention_settings : dict or None, optional, default: None
A dictionary which will be unpacked as the arguments for the ``MultiHeadAttention`` layer
For instance, to use an attention block with 4 heads and key dimension 32, you can do:
``attention_settings=dict(num_heads=4, key_dim=32)``
You may also want to include dropout regularization in small-to-medium data regimes:
``attention_settings=dict(num_heads=4, key_dim=32, dropout=0.1)``
For more details and arguments, see:
https://www.tensorflow.org/api_docs/python/tf/keras/layers/MultiHeadAttention
dense_settings : dict or None, optional, default: None
A dictionary which will be unpacked as the arguments for the ``Dense`` layer.
For instance, to use hidden layers with 32 units and a relu activation, you can do:
``dict(units=32, activation='relu')
For more details and arguments, see:
https://www.tensorflow.org/api_docs/python/tf/keras/layers/Dense
use_layer_norm : boolean, optional, default: True
Whether layer normalization before and after attention + feedforward
num_dense_fc : int, optional, default: 2
The number of hidden layers for the internal feedforward network
summary_dim : int
The dimensionality of the learned permutation-invariant representation.
num_attention_blocks : int, optional, default: 2
The number of self-attention blocks to use before pooling.
num_inducing_points : int or None, optional, default: 32
The number of inducing points. Should be lower than the smallest set size.
If ``None`` selected, a vanilla self-attenion block (SAB) will be used, otherwise
ISAB blocks will be used. For ``num_attention_blocks > 1``, we currently recommend
always using some number of inducing points.
num_seeds : int, optional, default: 1
The number of "seed vectors" to use. Each seed vector represents a permutation-invariant
summary of the entire set. If you use ``num_seeds > 1``, the resulting seeds will be flattened
into a 2-dimensional output, which will have a dimensionality of ``num_seeds * summary_dim``.
**kwargs : dict, optional, default: {}
Optional keyword arguments passed to the __init__() method of tf.keras.Model
"""
super().__init__(**kwargs)
# Process internal attention settings
if attention_settings is None:
attention_settings = defaults.DEFAULT_SETTING_ATTENTION
if dense_settings is None:
dense_settings = defaults.DEFAULT_SETTING_DENSE_ATTENTION
# Construct a series of self-attention blocks
self.attention_blocks = Sequential()
for _ in range(num_attention_blocks):
if num_inducing_points is not None:
block = InducedSelfAttentionBlock(
input_dim, attention_settings, num_dense_fc, dense_settings, use_layer_norm, num_inducing_points
)
else:
block = SelfAttentionBlock(input_dim, attention_settings, num_dense_fc, dense_settings, use_layer_norm)
self.attention_blocks.add(block)
# Pooler will be applied to the representations learned through self-attention
self.pooler = PoolingWithAttention(
summary_dim, attention_settings, num_dense_fc, dense_settings, use_layer_norm, num_seeds
)
def call(self, x, **kwargs):
"""Performs the forward pass through the set-transformer.
Parameters
----------
x : tf.Tensor
The input set of shape (batch_size, set_size, input_dim)
Returns
-------
out : tf.Tensor
Output of shape (batch_size, summary_dim * num_seeds)
"""
out = self.attention_blocks(x, **kwargs)
out = self.pooler(out, **kwargs)
return out
class DeepSet(tf.keras.Model):
"""Implements a deep permutation-invariant network according to [1] and [2].
[1] Zaheer, M., Kottur, S., Ravanbakhsh, S., Poczos, B., Salakhutdinov, R. R., & Smola, A. J. (2017).
Deep sets. Advances in neural information processing systems, 30.
[2] Bloem-Reddy, B., & Teh, Y. W. (2020).
Probabilistic Symmetries and Invariant Neural Networks.
J. Mach. Learn. Res., 21, 90-1.
"""
def __init__(
self,
summary_dim=10,
num_dense_s1=2,
num_dense_s2=2,
num_dense_s3=2,
num_equiv=2,
dense_s1_args=None,
dense_s2_args=None,
dense_s3_args=None,
pooling_fun="mean",
**kwargs,
):
"""Creates a stack of 'num_equiv' equivariant layers followed by a final invariant layer.
Parameters
----------
summary_dim : int, optional, default: 10
The number of learned summary statistics.
num_dense_s1 : int, optional, default: 2
The number of dense layers in the inner function of a deep set.
num_dense_s2 : int, optional, default: 2
The number of dense layers in the outer function of a deep set.
num_dense_s3 : int, optional, default: 2
The number of dense layers in an equivariant layer.
num_equiv : int, optional, default: 2
The number of equivariant layers in the network.
dense_s1_args : dict or None, optional, default: None
The arguments for the dense layers of s1 (inner, pre-pooling function). If `None`,
defaults will be used (see `default_settings`). Otherwise, all arguments for a
tf.keras.layers.Dense layer are supported.
dense_s2_args : dict or None, optional, default: None
The arguments for the dense layers of s2 (outer, post-pooling function). If `None`,
defaults will be used (see `default_settings`). Otherwise, all arguments for a
tf.keras.layers.Dense layer are supported.
dense_s3_args : dict or None, optional, default: None
The arguments for the dense layers of s3 (equivariant function). If `None`,
defaults will be used (see `default_settings`). Otherwise, all arguments for a
tf.keras.layers.Dense layer are supported.
pooling_fun : str of callable, optional, default: 'mean'
If string argument provided, should be one in ['mean', 'max']. In addition, ac actual
neural network can be passed for learnable pooling.
**kwargs : dict, optional, default: {}
Optional keyword arguments passed to the __init__() method of tf.keras.Model.
"""
super().__init__(**kwargs)
# Prepare settings dictionary
settings = dict(
num_dense_s1=num_dense_s1,
num_dense_s2=num_dense_s2,
num_dense_s3=num_dense_s3,
dense_s1_args=defaults.DEFAULT_SETTING_DENSE_INVARIANT if dense_s1_args is None else dense_s1_args,
dense_s2_args=defaults.DEFAULT_SETTING_DENSE_INVARIANT if dense_s2_args is None else dense_s2_args,
dense_s3_args=defaults.DEFAULT_SETTING_DENSE_INVARIANT if dense_s3_args is None else dense_s3_args,
pooling_fun=pooling_fun,
)
# Create equivariant layers and final invariant layer
self.equiv_layers = Sequential([EquivariantModule(settings) for _ in range(num_equiv)])
self.inv = InvariantModule(settings)
# Output layer to output "summary_dim" learned summary statistics
self.out_layer = Dense(summary_dim, activation="linear")
self.summary_dim = summary_dim
def call(self, x):
"""Performs the forward pass of a learnable deep invariant transformation consisting of
a sequence of equivariant transforms followed by an invariant transform.
Parameters
----------
x : tf.Tensor
Input of shape (batch_size, n_obs, data_dim)
Returns
-------
out : tf.Tensor
Output of shape (batch_size, out_dim)
"""
# Pass through series of augmented equivariant transforms
out_equiv = self.equiv_layers(x)
# Pass through final invariant layer
out = self.out_layer(self.inv(out_equiv))
return out
class InvariantNetwork(DeepSet):
"""Deprecated class for ``InvariantNetwork``."""
def __init_subclass__(cls, **kwargs):
warn(
f"{cls.__name__} will be deprecated at some point. Use ``DeepSet`` instead.",
DeprecationWarning,
stacklevel=2,
)
super().__init_subclass__(**kwargs)
def __init__(self, *args, **kwargs):
warn(
f"{self.__class__.__name__} will be deprecated. at some point. Use ``DeepSet`` instead.",
DeprecationWarning,
stacklevel=2,
)
super().__init__(*args, **kwargs)
class SequentialNetwork(tf.keras.Model):
"""Implements a sequence of `MultiConv1D` layers followed by an LSTM network.
For details and rationale, see [1]:
[1] Radev, S. T., Graw, F., Chen, S., Mutters, N. T., Eichel, V. M., Bärnighausen, T., & Köthe, U. (2021).
OutbreakFlow: Model-based Bayesian inference of disease outbreak dynamics with invertible neural networks
and its application to the COVID-19 pandemics in Germany.
PLoS computational biology, 17(10), e1009472.
https://journals.plos.org/ploscompbiol/article?id=10.1371/journal.pcbi.1009472
"""
def __init__(self, summary_dim=10, num_conv_layers=2, lstm_units=128, conv_settings=None, **kwargs):
"""Creates a stack of inception-like layers followed by an LSTM network, with the idea
of learning vector representations from multivariate time series data.
Parameters
----------
summary_dim : int, optional, default: 10
The number of learned summary statistics.
num_conv_layers : int, optional, default: 2
The number of convolutional layers to use.
lstm_units : int, optional, default: 128
The number of hidden LSTM units.
conv_settings : dict or None, optional, default: None
The arguments passed to the `MultiConv1D` internal networks. If `None`,
defaults will be used from `default_settings`. If a dictionary is provided,
it should contain the followin keys:
- layer_args (dict) : arguments for `tf.keras.layers.Conv1D` without kernel_size
- min_kernel_size (int) : the minimum kernel size (>= 1)
- max_kernel_size (int) : the maximum kernel size
**kwargs : dict
Optional keyword arguments passed to the __init__() method of tf.keras.Model
"""
super().__init__(**kwargs)
# Take care of None conv_settings
if conv_settings is None:
conv_settings = defaults.DEFAULT_SETTING_MULTI_CONV
self.net = Sequential([MultiConv1D(conv_settings) for _ in range(num_conv_layers)])
self.lstm = LSTM(lstm_units)
self.out_layer = Dense(summary_dim, activation="linear")
self.summary_dim = summary_dim
def call(self, x, **kwargs):
"""Performs a forward pass through the network by first passing `x` through the sequence of
multi-convolutional layers and then applying the LSTM network.
Parameters
----------
x : tf.Tensor
Input of shape (batch_size, n_time_steps, n_time_series)
Returns
-------
out : tf.Tensor
Output of shape (batch_size, summary_dim)
"""
out = self.net(x, **kwargs)
out = self.lstm(out, **kwargs)
out = self.out_layer(out, **kwargs)
return out
class SplitNetwork(tf.keras.Model):
"""Implements a vertical stack of networks and concatenates their individual outputs. Allows for splitting
of data to provide an individual network for each split of the data.
"""
def __init__(self, num_splits, split_data_configurator, network_type=InvariantNetwork, network_kwargs={}, **kwargs):
"""Creates a composite network of `num_splits` sub-networks of type `network_type`, each with configuration
specified by `meta`.
Parameters
----------
num_splits : int
The number if splits for the data, which will equal the number of sub-networks.
split_data_configurator : callable
Function that takes the arguments `i` and `x` where `i` is the index of the
network and `x` are the inputs to the `SplitNetwork`. Should return the input
for the corresponding network.
For example, to achieve a network with is permutation-invariant both
vertically (i.e., across rows) and horizontally (i.e., across columns), one could to:
`def split(i, x):
selector = tf.where(x[:,:,0]==i, 1.0, 0.0)
selected = x[:,:,1] * selector
split_x = tf.stack((selector, selected), axis=-1)
return split_x
`
where `x[:,:,0]` contains an integer indicating which split the data
in `x[:,:,1]` belongs to. All values in `x[:,:,1]` that are not selected
are set to zero. The selector is passed along with the modified data,
indicating which rows belong to the split `i`.
network_type : callable, optional, default: `InvariantNetowk`
Type of neural network to use.
meta : dict, optional, default: {}
A dictionary containing the configuration for the networks.
**kwargs
Optional keyword arguments to be passed to the `tf.keras.Model` superclass.
"""
super().__init__(**kwargs)
self.num_splits = num_splits
self.split_data_configurator = split_data_configurator
self.networks = [network_type(**network_kwargs) for _ in range(num_splits)]
def call(self, x):
"""Performs a forward pass through the subnetworks and concatenates their output.
Parameters
----------
x : tf.Tensor
Input of shape (batch_size, n_obs, data_dim)
Returns
-------
out : tf.Tensor
Output of shape (batch_size, out_dim)
"""
out = [self.networks[i](self.split_data_configurator(i, x)) for i in range(self.num_splits)]
out = tf.concat(out, axis=-1)
return out
class HierarchicalNetwork(tf.keras.Model):
"""Implements a hierarchical summary network according to [1].
[1] Elsemüller, L., Schnuerch, M., Bürkner, P. C., & Radev, S. T. (2023).
A Deep Learning Method for Comparing Bayesian Hierarchical Models.
arXiv preprint arXiv:2301.11873.
"""
def __init__(self, networks_list, **kwargs):
"""Creates a hierarchical network consisting of stacked summary networks (one for each hierarchical level)
that are aligned with the probabilistic structure of the processed data.
Note: The networks will start processing from the lowest hierarchical level (e.g., observational level)
up to the highest hierarchical level. It is recommended to provide higher-level networks with more
expressive power to allow for an adequate compression of lower-level data.
Example: For two-level hierarchical models with the assumption of temporal dependencies on the lowest
hierarchical level (e.g., observational level) and exchangeable units at the higher level
(e.g., group level), a list of [SequentialNetwork(), DeepSet()] could be passed.
----------
Parameters:
networks_list : list of tf.keras.Model:
The list of summary networks (one per hierarchical level), starting from the lowest hierarchical level
"""
super().__init__(**kwargs)
self.networks = networks_list
def call(self, x, return_all=False, **kwargs):
"""Performs the forward pass through the hierarchical network,
transforming the nested input into learned summary statistics.
Parameters
----------
data : tf.Tensor of shape (batch_size, ..., data_dim)
Example, hierarchical data sets with two levels:
(batch_size, D, L, x_dim) -> reduces to (batch_size, out_dim).
return_all : boolean, optional, default: False
Whether to return all intermediate outputs (True) or just
the final one (False).
Returns
-------
out : tf.Tensor
Output of shape ``(batch_size, out_dim) if return_all=False`` else a tuple
of ``len(outputs) == len(networks)`` corresponding to all outputs of all networks.
"""
if return_all:
outputs = []
for net in self.networks:
x = net(x, **kwargs)
outputs.append(x)
return outputs
else:
for net in self.networks:
x = net(x, **kwargs)
return x
| 26,357 | 42.93 | 120 | py |
BayesFlow | BayesFlow-master/bayesflow/helper_classes.py | # Copyright (c) 2022 The BayesFlow Developers
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import re
from copy import deepcopy
import numpy as np
import pandas as pd
import tensorflow as tf
try:
import cPickle as pickle
except:
import pickle
import logging
logging.basicConfig()
from sklearn.linear_model import HuberRegressor
from bayesflow.default_settings import DEFAULT_KEYS
class SimulationDataset:
"""Helper class to create a tensorflow.data.Dataset which parses simulation dictionaries
and returns simulation dictionaries as expected by BayesFlow amortizers.
"""
def __init__(self, forward_dict, batch_size, buffer_size=1024):
"""Creates a wrapper holding a ``tf.data.Dataset`` instance for
offline training in an amortized estimation context.
Parameters
----------
forward_dict : dict
The outputs from a ``GenerativeModel`` or a custom function,
stored in a dictionary with at least the following keys:
``sim_data`` - an array representing the batched output of the model
``prior_draws`` - an array with prior generated from the model's prior
batch_size : int
The total number of simulations from all models in a given batch.
The batch size per model will be calculated as ``batch_size // num_models``
buffer_size : int, optional, default: 1024
The buffer size for shuffling elements in a ``tf.data.Dataset``
"""
slices, keys_used, keys_none, n_sim = self._determine_slices(forward_dict)
self.data = tf.data.Dataset.from_tensor_slices(tuple(slices)).shuffle(buffer_size).batch(batch_size)
self.keys_used = keys_used
self.keys_none = keys_none
self.n_sim = n_sim
self.num_batches = len(self.data)
def _determine_slices(self, forward_dict):
"""Determine slices for a tensorflow Dataset."""
keys_used = []
keys_none = []
slices = []
for k, v in forward_dict.items():
if forward_dict[k] is not None:
slices.append(v)
keys_used.append(k)
else:
keys_none.append(k)
n_sim = forward_dict[DEFAULT_KEYS["sim_data"]].shape[0]
return slices, keys_used, keys_none, n_sim
def __call__(self, batch_in):
"""Convert output of tensorflow.data.Dataset to dict."""
forward_dict = {}
for key_used, batch_stuff in zip(self.keys_used, batch_in):
forward_dict[key_used] = batch_stuff.numpy()
for key_none in zip(self.keys_none):
forward_dict[key_none] = None
return forward_dict
def __len__(self):
return len(self.data)
def __iter__(self):
return map(self, self.data)
class MultiSimulationDataset:
"""Helper class for model comparison training with multiple generative models.
Will create multiple ``SimulationDataset`` instances, each parsing their own
simulation dictionaries and returning these as expected by BayesFlow amortizers.
"""
def __init__(self, forward_dict, batch_size, buffer_size=1024):
"""Creates a wrapper holding multiple ``tf.data.Dataset`` instances for
offline training in an amortized model comparison context.
Parameters
----------
forward_dict : dict
The outputs from a ``MultiGenerativeModel`` or a custom function,
stored in a dictionary with at least the following keys:
``model_outputs`` - a list with length equal to the number of models,
each element representing a batched output of a single model
``model_indices`` - a list with integer model indices, which will
later be one-hot-encoded for the model comparison learning problem.
batch_size : int
The total number of simulations from all models in a given batch.
The batch size per model will be calculated as ``batch_size // num_models``
buffer_size : int, optional, default: 1024
The buffer size for shuffling elements in a ``tf.data.Dataset``
"""
self.model_indices = forward_dict[DEFAULT_KEYS["model_indices"]]
self.num_models = len(self.model_indices)
self.per_model_batch_size = batch_size // self.num_models
self.datasets = [
SimulationDataset(out, self.per_model_batch_size, buffer_size)
for out in forward_dict[DEFAULT_KEYS["model_outputs"]]
]
self.current_it = 0
self.num_batches = min([d.num_batches for d in self.datasets])
self.iters = [iter(d) for d in self.datasets]
self.batch_size = batch_size
def __next__(self):
if self.current_it < self.num_batches:
outputs = [next(d) for d in self.iters]
output_dict = {DEFAULT_KEYS["model_outputs"]: outputs, DEFAULT_KEYS["model_indices"]: self.model_indices}
self.current_it += 1
return output_dict
self.current_it = 0
self.iters = [iter(d) for d in self.datasets]
raise StopIteration
def __iter__(self):
return self
class EarlyStopper:
"""This class will track the total validation loss and trigger an early stopping
recommendation based on its hyperparameters."""
def __init__(self, patience=5, tolerance=0.05):
"""
patience : int, optional, default: 4
How many successive times the tolerance value is reached before triggering
an early stopping recommendation.
tolerance : float, optional, default: 0.05
The minimum reduction of validation loss to be considered significant.
"""
self.history = []
self.patience = patience
self.tolerance = tolerance
self._patience_counter = 0
def update_and_recommend(self, current_val_loss):
"""Adds loss to history and check difference between sequential losses."""
self.history.append(current_val_loss)
rec = self._check_patience()
return rec
def _check_patience(self):
"""Check whether the patience has been surpassed or not.
Assumes current_val_loss has previously been added to the internal
history, so it has at least one element.
"""
# Still not enough history, no recommendation
if len(self.history) <= 1:
return False
# Significant increase according to tolerance, reset patience
if (self.history[-2] - self.history[-1]) >= self.tolerance:
self._patience_counter = 0
return False
# Not a signifcant increase, check counter
else:
# Still no stop recommendation, but increase counter
if self._patience_counter < self.patience:
self._patience_counter += 1
return False
# Reset counter and recommend stop
else:
self._patience_counter = 0
return True
class RegressionLRAdjuster:
"""This class will compute the slope of the loss trajectory and inform learning rate decay."""
file_name = "lr_adjuster"
def __init__(
self,
optimizer,
period=1000,
wait_between_fits=10,
patience=10,
tolerance=-0.05,
reduction_factor=0.25,
cooldown_factor=2,
num_resets=3,
**kwargs,
):
"""Creates an instance with given hyperparameters which will track the slope of the
loss trajectory according to specified hyperparameters and then issue an optional
stopping suggestion.
Parameters
----------
optimizer : tf.keras.optimizers.Optimizer instance
An optimizer implementing a lr() method
period : int, optional, default: 1000
How much loss values to consider from the past
wait_between_fits : int, optional, default: 10
How many backpropagation updates to wait between two successive fits
patience : int, optional, default: 10
How many successive times the tolerance value is reached before lr update.
tolerance : float, optional, default: -0.05
The minimum slope to be considered substantial for training.
reduction_factor : float in [0, 1], optional, default: 0.25
The factor by which the learning rate is reduced upon hitting the `tolerance`
threshold for `patience` number of times
cooldown_factor : float, optional, default: 2
The factor by which the `period` is multiplied to arrive at a cooldown period.
num_resets : int, optional, default: 3
How many times to reduce the learning rate before issuing an optional stopping
**kwargs : dict, optional, default {}
Additional keyword arguments passed to the `HuberRegression` class.
"""
self.optimizer = optimizer
self.period = period
self.wait_between_periods = wait_between_fits
self.regressor = HuberRegressor(**kwargs)
self.t_vector = np.linspace(0, 1, self.period)[:, np.newaxis]
self.patience = patience
self.tolerance = tolerance
self.num_resets = num_resets
self.reduction_factor = reduction_factor
self.stopping_issued = False
self.cooldown_factor = cooldown_factor
self._history = {"iteration": [], "learning_rate": []}
self._reset_counter = 0
self._patience_counter = 0
self._cooldown_counter = 0
self._wait_counter = 0
self._slope = None
self._is_waiting = False
self._in_cooldown = False
def get_slope(self, losses):
"""Fits a Huber regression on the provided loss trajectory or returns `None` if
not enough data points present.
"""
# Return None if not enough loss values present
if losses.shape[0] < self.period:
return None
# Increment counter
if self._in_cooldown:
self._cooldown_counter += 1
# Check if still in a waiting phase and return old slope
# if still waiting, otherwise refit Huber regression
wait = self._check_waiting()
if wait:
return self._slope
else:
self.regressor.fit(self.t_vector, losses[-self.period :])
self._slope = self.regressor.coef_[0]
self._check_patience()
return self._slope
def reset(self):
"""Resets all stateful variables in preparation for a new start."""
self._reset_counter = 0
self._patience_counter = 0
self._cooldown_counter = 0
self._wait_counter = 0
self._in_cooldown = False
self._is_waiting = False
self.stopping_issued = False
def save_to_file(self, file_path):
"""Saves the state parameters of a RegressionLRAdjuster object to a pickled dictionary in file_path."""
# Create path to memory
memory_path = os.path.join(file_path, f"{RegressionLRAdjuster.file_name}.pkl")
# Prepare attributes
states_dict = {}
states_dict["_history"] = self._history
states_dict["_reset_counter"] = self._reset_counter
states_dict["_patience_counter"] = self._patience_counter
states_dict["_cooldown_counter"] = self._cooldown_counter
states_dict["_wait_counter"] = self._wait_counter
states_dict["_slope"] = self._slope
states_dict["_is_waiting"] = self._is_waiting
states_dict["_in_cooldown"] = self._in_cooldown
# Dump as pickle object
with open(memory_path, "wb") as f:
pickle.dump(states_dict, f)
def load_from_file(self, file_path):
"""Loads the saved LRAdjuster object from file_path."""
# Logger init
logger = logging.getLogger()
logger.setLevel(logging.INFO)
# Create path to memory
memory_path = os.path.join(file_path, f"{RegressionLRAdjuster.file_name}.pkl")
# Case memory file exists
if os.path.exists(memory_path):
# Load pickle and fill in attributes
with open(memory_path, "rb") as f:
states_dict = pickle.load(f)
self._history = states_dict["_history"]
self._reset_counter = states_dict["_reset_counter"]
self._patience_counter = states_dict["_patience_counter"]
self._cooldown_counter = states_dict["_cooldown_counter"]
self._wait_counter = states_dict["_wait_counter"]
self._slope = states_dict["_slope"]
self._is_waiting = states_dict["_is_waiting"]
self._in_cooldown = states_dict["_in_cooldown"]
logger.info(f"Loaded RegressionLRAdjuster from {memory_path}")
# Case memory file does not exist
else:
logger.info("Initialized a new RegressionLRAdjuster.")
def _check_patience(self):
"""Determines whether to reduce learning rate or be patient."""
# Do nothing, if still in cooldown period
if self._in_cooldown and self._cooldown_counter < int(self.cooldown_factor * self.period):
return
# Otherwise set cooldown flag to False and reset counter
else:
self._in_cooldown = False
self._cooldown_counter = 0
# Check if negetaive slope too small
if self._slope > self.tolerance:
self._patience_counter += 1
else:
self._patience_counter = max(0, self._patience_counter - 1)
# Check if patience surpassed and issue a reduction in learning rate
if self._patience_counter >= self.patience:
self._reduce_learning_rate()
self._patience_counter = 0
def _reduce_learning_rate(self):
"""Reduces the learning rate by a given factor."""
# Logger init
logger = logging.getLogger()
logger.setLevel(logging.INFO)
if self._reset_counter >= self.num_resets:
self.stopping_issued = True
else:
# Take care of updating learning rate
old_lr = self.optimizer.lr.numpy()
new_lr = round(self.reduction_factor * old_lr, 8)
self.optimizer.lr.assign(new_lr)
self._reset_counter += 1
# Store iteration and learning rate
self._history["iteration"].append(self.optimizer.iterations.numpy())
self._history["learning_rate"].append(old_lr)
# Verbose info to user
logger.info(f"Reducing learning rate from {old_lr:.8f} to: {new_lr:.8f} and entering cooldown...")
# Set cooldown flag to avoid reset for some time given by self.period
self._in_cooldown = True
def _check_waiting(self):
"""Determines whether to compute a new slope or wait."""
# Case currently waiting
if self._is_waiting:
# Case currently waiting but period is over
if self._wait_counter >= self.wait_between_periods - 1:
self._wait_counter = 0
self._is_waiting = False
# Case currently waiting and period not over
else:
self._wait_counter += 1
return True
# Case not waiting
else:
self._is_waiting = True
self._wait_counter += 1
return False
class LossHistory:
"""Helper class to keep track of losses during training."""
file_name = "history"
def __init__(self):
self.latest = 0
self.history = {}
self.val_history = {}
self.loss_names = []
self.val_loss_names = []
self._current_run = 0
self._total_loss = []
self._total_val_loss = []
@property
def total_loss(self):
return np.array(self._total_loss)
@property
def total_val_loss(self):
return np.array(self._total_val_loss)
def last_total_loss(self):
return self._total_loss[-1]
def last_total_val_loss(self):
return self._total_val_loss[-1]
def start_new_run(self):
self._current_run += 1
self.history[f"Run {self._current_run}"] = {}
self.val_history[f"Run {self._current_run}"] = {}
def add_val_entry(self, epoch, val_loss):
"""Add validation entry to loss structure. Assume ``loss_names`` already exists
as an attribute, so no attempt will be made to create names.
"""
# Add epoch key, if specified
if self.val_history[f"Run {self._current_run}"].get(f"Epoch {epoch}") is None:
self.val_history[f"Run {self._current_run}"][f"Epoch {epoch}"] = []
# Handle dict loss output
if type(val_loss) is dict:
# Store keys, if none existing
if self.val_loss_names == []:
self.val_loss_names = ["Val." + k for k in val_loss.keys()]
# Create and store entry
entry = [v.numpy() if type(v) is not np.ndarray else v for v in val_loss.values()]
self.val_history[f"Run {self._current_run}"][f"Epoch {epoch}"].append(entry)
# Add entry to total loss
self._total_val_loss.append(sum(entry))
# Handle tuple or list loss output
elif type(val_loss) is tuple or type(val_loss) is list:
entry = [v.numpy() if type(v) is not np.ndarray else v for v in val_loss]
self.val_history[f"Run {self._current_run}"][f"Epoch {epoch}"].append(entry)
# Store keys, if none existing
if self.val_loss_names == []:
self.val_loss_names = [f"Val.Loss.{l}" for l in range(1, len(entry) + 1)]
# Add entry to total loss
self._total_val_loss.append(sum(entry))
# Assume scalar loss output
else:
self.val_history[f"Run {self._current_run}"][f"Epoch {epoch}"].append(val_loss.numpy())
# Store keys, if none existing
if self.val_loss_names == []:
self.val_loss_names.append("Default.Val.Loss")
# Add entry to total loss
self._total_val_loss.append(val_loss.numpy())
def add_entry(self, epoch, current_loss):
"""Adds loss entry for current epoch into internal memory data structure."""
# Add epoch key, if specified
if self.history[f"Run {self._current_run}"].get(f"Epoch {epoch}") is None:
self.history[f"Run {self._current_run}"][f"Epoch {epoch}"] = []
# Handle dict loss output
if type(current_loss) is dict:
# Store keys, if none existing
if self.loss_names == []:
self.loss_names = [k for k in current_loss.keys()]
# Create and store entry
entry = [v.numpy() if type(v) is not np.ndarray else v for v in current_loss.values()]
self.history[f"Run {self._current_run}"][f"Epoch {epoch}"].append(entry)
# Add entry to total loss
self._total_loss.append(sum(entry))
# Handle tuple or list loss output
elif type(current_loss) is tuple or type(current_loss) is list:
entry = [v.numpy() if type(v) is not np.ndarray else v for v in current_loss]
self.history[f"Run {self._current_run}"][f"Epoch {epoch}"].append(entry)
# Store keys, if none existing
if self.loss_names == []:
self.loss_names = [f"Loss.{l}" for l in range(1, len(entry) + 1)]
# Add entry to total loss
self._total_loss.append(sum(entry))
# Assume scalar loss output
else:
self.history[f"Run {self._current_run}"][f"Epoch {epoch}"].append(current_loss.numpy())
# Store keys, if none existing
if self.loss_names == []:
self.loss_names.append("Default.Loss")
# Add entry to total loss
self._total_loss.append(current_loss.numpy())
def get_running_losses(self, epoch):
"""Compute and return running means of the losses for current epoch."""
means = np.atleast_1d(np.mean(self.history[f"Run {self._current_run}"][f"Epoch {epoch}"], axis=0))
if means.shape[0] == 1:
return {"Avg.Loss": means[0]}
else:
return {"Avg." + k: v for k, v in zip(self.loss_names, means)}
def get_plottable(self):
"""Returns the losses as a nicely formatted pandas DataFrame, in case
only train losses were collected, otherwise a dict of data frames.
"""
# Assume equal lengths per epoch and run
try:
losses_df = self._to_data_frame(self.history, self.loss_names)
if any([v for v in self.val_history.values()]):
# Rremove decay
names = [name for name in self.loss_names if "Decay" not in name]
val_losses_df = self._to_data_frame(self.val_history, names)
return {"train_losses": losses_df, "val_losses": val_losses_df}
return losses_df
# Handle unequal lengths or problems when user kills training with an interrupt
except ValueError as ve:
if any([v for v in self.val_history.values()]):
return {"train_losses": self.history, "val_losses": self.val_history}
return self.history
except TypeError as te:
if any([v for v in self.val_history.values()]):
return {"train_losses": self.history, "val_losses": self.val_history}
return self.history
def flush(self):
"""Returns current history and removes all existing loss history, but keeps loss names."""
history = self.history
val_history = self.val_history
self.history = {}
self.val_history = {}
self._total_loss = []
self._total_val_loss = []
self._current_run = 0
return history, val_history
def save_to_file(self, file_path, max_to_keep):
"""Saves a `LossHistory` object to a pickled dictionary in file_path.
If max_to_keep saved loss history files are found in file_path, the oldest is deleted before a new one is saved.
"""
# Increment history index
self.latest += 1
# Path to history
history_path = os.path.join(file_path, f"{LossHistory.file_name}_{self.latest}.pkl")
# Prepare full history dict
pickle_dict = {
"history": self.history,
"val_history": self.val_history,
"loss_names": self.loss_names,
"val_loss_names": self.val_loss_names,
"_current_run": self._current_run,
"_total_loss": self._total_loss,
"_total_val_loss": self._total_val_loss,
}
# Pickle current
with open(history_path, "wb") as f:
pickle.dump(pickle_dict, f)
# Get list of history checkpoints
history_checkpoints_list = [l for l in os.listdir(file_path) if "history" in l]
# Determine the oldest saved loss history and remove it
if len(history_checkpoints_list) > max_to_keep:
oldest_history_path = os.path.join(file_path, f"history_{self.latest-max_to_keep}.pkl")
os.remove(oldest_history_path)
def load_from_file(self, file_path):
"""Loads the most recent saved `LossHistory` object from `file_path`."""
# Logger init
logger = logging.getLogger()
logger.setLevel(logging.INFO)
# Get list of histories
if os.path.exists(file_path):
history_checkpoints_list = [l for l in os.listdir(file_path) if LossHistory.file_name in l]
else:
history_checkpoints_list = []
# Case history list is not empty
if len(history_checkpoints_list) > 0:
# Determine which file contains the latest LossHistory and load it
file_numbers = [int(re.findall(r"\d+", h)[0]) for h in history_checkpoints_list]
latest_file = history_checkpoints_list[np.argmax(file_numbers)]
latest_number = np.max(file_numbers)
latest_path = os.path.join(file_path, latest_file)
# Load dictionary
with open(latest_path, "rb") as f:
loaded_history_dict = pickle.load(f)
# Fill public entries
self.latest = latest_number
self.history = loaded_history_dict.get("history", {})
self.val_history = loaded_history_dict.get("val_history", {})
self.loss_names = loaded_history_dict.get("loss_names", [])
self.val_loss_names = loaded_history_dict.get("val_loss_names", [])
# Fill private entries
self._current_run = loaded_history_dict.get("_current_run", 0)
self._total_loss = loaded_history_dict.get("_total_loss", [])
self._total_val_loss = loaded_history_dict.get("_total_val_loss", [])
# Verbose
logger.info(f"Loaded loss history from {latest_path}.")
# Case history list is empty
else:
logger.info("Initialized empty loss history.")
def _to_data_frame(self, history, names):
"""Helper function to convert a history dict into a DataFrame."""
losses_list = [pd.melt(pd.DataFrame.from_dict(history[r], orient="index").T) for r in history]
losses_list = pd.concat(losses_list, axis=0).value.to_list()
losses_list = [l for l in losses_list if l is not None]
losses_df = pd.DataFrame(losses_list, columns=names)
return losses_df
class SimulationMemory:
"""Helper class to keep track of a pre-determined number of simulations during training."""
file_name = "memory"
def __init__(self, stores_raw=True, capacity_in_batches=50):
self.stores_raw = stores_raw
self._capacity = capacity_in_batches
self._buffer = [None] * self._capacity
self._idx = 0
self.size_in_batches = 0
def store(self, forward_dict):
"""Stores simulation outputs in `forward_dict`, if internal buffer is not full.
Parameters
----------
forward_dict : dict
The configured outputs of the forward model.
"""
# If full, overwrite at index
if not self.is_full():
self._buffer[self._idx] = forward_dict
self._idx += 1
self.size_in_batches += 1
def get_memory(self):
return deepcopy(self._buffer)
def is_full(self):
"""Returns True if the buffer is full, otherwise False."""
if self._idx >= self._capacity:
return True
return False
def save_to_file(self, file_path):
"""Saves a `SimulationMemory` object to a pickled dictionary in file_path."""
# Create path to memory
memory_path = os.path.join(file_path, f"{SimulationMemory.file_name}.pkl")
# Prepare attributes
full_memory_dict = {}
full_memory_dict["stores_raw"] = self.stores_raw
full_memory_dict["_capacity"] = self._capacity
full_memory_dict["_buffer"] = self._buffer
full_memory_dict["_idx"] = self._idx
full_memory_dict["_size_in_batches"] = self.size_in_batches
# Dump as pickle object
with open(memory_path, "wb") as f:
pickle.dump(full_memory_dict, f)
def load_from_file(self, file_path):
"""Loads the saved `SimulationMemory` object from file_path."""
# Logger init
logger = logging.getLogger()
logger.setLevel(logging.INFO)
# Create path to memory
memory_path = os.path.join(file_path, f"{SimulationMemory.file_name}.pkl")
# Case memory file exists
if os.path.exists(file_path):
# Load pickle and fill in attributes
with open(memory_path, "rb") as f:
full_memory_dict = pickle.load(f)
self.stores_raw = full_memory_dict["stores_raw"]
self._capacity = full_memory_dict["_capacity"]
self._buffer = full_memory_dict["_buffer"]
self._idx = full_memory_dict["_idx"]
self.size_in_batches = full_memory_dict["_size_in_batches"]
logger.info(f"Loaded simulation memory from {memory_path}")
# Case memory file does not exist
else:
logger.info("Initialized empty simulation memory.")
class MemoryReplayBuffer:
"""Implements a memory replay buffer for simulation-based inference."""
def __init__(self, capacity_in_batches=500):
"""Creates a circular buffer following the logic of experience replay.
Parameters
----------
capacity_in_batches : int, optional, default: 50
The capacity of the buffer in batches of simulations. Could potentially grow
very large, so make sure you pick a reasonable number!
"""
self._capacity = capacity_in_batches
self._buffer = [None] * self._capacity
self._idx = 0
self._size_in_batches = 0
self._is_full = False
def store(self, forward_dict):
"""Stores simulation outputs, if internal buffer is not full.
Parameters
----------
forward_dict : dict
The confogired outputs of the forward model.
"""
# If full, overwrite at index
if self._is_full:
self._overwrite(forward_dict)
# Otherwise still capacity to append
else:
# Add to internal list
self._buffer[self._idx] = forward_dict
# Increment index and # of batches currently stored
self._idx += 1
self._size_in_batches += 1
# Check whether buffer is full and set flag if thats the case
if self._idx == self._capacity:
self._is_full = True
def sample(self):
"""Samples `batch_size` number of parameter vectors and simulations from buffer.
Returns
-------
forward_dict : dict
The (raw or configured) outputs of the forward model.
"""
rand_idx = np.random.default_rng().integers(low=0, high=self._size_in_batches)
return self._buffer[rand_idx]
def _overwrite(self, forward_dict):
"""Overwrites a simulated batch at current position. Only called when the internal buffer is full."""
# Reset index, if at the end of buffer
if self._idx == self._capacity:
self._idx = 0
# Overwrite params and data at index
self._buffer[self._idx] = forward_dict
# Increment index
self._idx += 1
| 31,839 | 37.223289 | 120 | py |
BayesFlow | BayesFlow-master/bayesflow/experimental/rectifiers.py | # Copyright (c) 2022 The BayesFlow Developers
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from functools import partial
import tensorflow as tf
import tensorflow_probability as tfp
import bayesflow.default_settings as defaults
from bayesflow.computational_utilities import compute_jacobian_trace
from bayesflow.exceptions import SummaryStatsError
from bayesflow.helper_networks import MCDropout
from bayesflow.losses import mmd_summary_space
class DriftNetwork(tf.keras.Model):
"""Implements a learnable velocity field for a neural ODE. Will typically be used
in conjunction with a ``RectifyingFlow`` instance, as proposed by [1] in the context
of unconditional image generation.
[1] Liu, X., Gong, C., & Liu, Q. (2022).
Flow straight and fast: Learning to generate and transfer data with rectified flow.
arXiv preprint arXiv:2209.03003.
"""
def __init__(
self, target_dim, num_dense=3, dense_args=None, dropout=True, mc_dropout=False, dropout_prob=0.05, **kwargs
):
"""Creates a learnable velocity field instance to be used in the context of rectifying
flows or neural ODEs.
[1] Liu, X., Gong, C., & Liu, Q. (2022).
Flow straight and fast: Learning to generate and transfer data with rectified flow.
arXiv preprint arXiv:2209.03003.
Parameters
----------
target_dim : int
The problem dimensionality (e.g., in parameter estimation, the number of parameters)
num_dense : int, optional, default: 3
The number of hidden layers for the inner fully-connected network
dense_args : dict or None, optional, default: None
The arguments to be passed to ``tf.keras.layers.Dense`` constructor. If None, default settings
will be fetched from ``bayesflow.default_settings``.
dropout : bool, optional, default: True
Whether to use dropout in-between the hidden layers.
mc_dropout : bool, optional, default: False
Whether to use dropout Monte Carlo dropout (i.e., Bayesian approximation) during inference
dropout_prob : float in (0, 1), optional, default: 0.05
The dropout probability. Only has effecft if ``dropout=True`` or ``mc_dropout=True``
**kwargs : dict, optional, default: {}
Optional keyword arguments passed to the ``tf.keras.Model.__init__`` method.
"""
super().__init__(**kwargs)
self.latent_dim = target_dim
if dense_args is None:
dense_args = defaults.DEFAULT_SETTING_DENSE_RECT
self.net = tf.keras.Sequential()
for _ in range(num_dense):
self.net.add(tf.keras.layers.Dense(**dense_args))
if mc_dropout:
self.net.add(MCDropout(dropout_prob))
elif dropout:
self.net.add(tf.keras.layers.Dropout(dropout_prob))
else:
pass
self.net.add(tf.keras.layers.Dense(self.latent_dim))
self.net.build(input_shape=())
def call(self, target_vars, latent_vars, time, condition, **kwargs):
"""Performs a linear interpolation between target and latent variables
over time (i.e., a single ODE step during training).
Parameters
----------
target_vars : tf.Tensor of shape (batch_size, ..., num_targets)
The variables of interest (e.g., parameters) over which we perform inference.
latent_vars : tf.Tensor of shape (batch_size, ..., num_targets)
The sampled random variates from the base distribution.
time : tf.Tensor of shape (batch_size, ..., 1)
A vector of time indices in (0, 1)
condition : tf.Tensor of shape (batch_size, ..., condition_dim)
The optional conditioning variables (e.g., as returned by a summary network)
**kwargs : dict, optional, default: {}
Optional keyword arguments passed to the ``tf.keras.Model`` call() method
"""
diff = target_vars - latent_vars
wdiff = time * target_vars + (1 - time) * latent_vars
drift = self.drift(wdiff, time, condition, **kwargs)
return diff, drift
def drift(self, target_t, time, condition, **kwargs):
"""Returns the drift at target_t time given optional condition(s).
Parameters
----------
target_t : tf.Tensor of shape (batch_size, ..., num_targets)
The variables of interest (e.g., parameters) over which we perform inference.
time : tf.Tensor of shape (batch_size, ..., 1)
A vector of time indices in (0, 1)
condition : tf.Tensor of shape (batch_size, ..., condition_dim)
The optional conditioning variables (e.g., as returned by a summary network)
**kwargs : dict, optional, default: {}
Optional keyword arguments passed to the drift network.
"""
if condition is not None:
inp = tf.concat([target_t, condition, time], axis=-1)
else:
inp = tf.concat([target_t, time], axis=-1)
return self.net(inp, **kwargs)
class RectifiedDistribution(tf.keras.Model):
"""Implements a rectifying flows according to [1]. To be used as an alternative
to a normalizing flow in a BayesFlow pipeline.
[1] Liu, X., Gong, C., & Liu, Q. (2022).
Flow straight and fast: Learning to generate and transfer data with rectified flow.
arXiv preprint arXiv:2209.03003.
"""
def __init__(self, drift_net, summary_net=None, latent_dist=None, loss_fun=None, summary_loss_fun=None, **kwargs):
"""Initializes a composite neural network to represent an amortized approximate posterior through
for a rectifying flow.
Parameters
----------
drift_net : tf.keras.Model
A neural network for the velocity field (drift) of the learnable ODE
summary_net : tf.keras.Model or None, optional, default: None
An optional summary network to compress non-vector data structures.
latent_dist : callable or None, optional, default: None
The latent distribution towards which to optimize the networks. Defaults to
a multivariate unit Gaussian.
loss_fun : callable or None, optional, default: None
The loss function for "rectifying" the velocity field. If ``None``, defaults
to tf.keras.losses.logcosh. Sensible alternatives are MSE (as in [])
summary_loss_fun : callable, str, or None, optional, default: None
The loss function which accepts the outputs of the summary network. If ``None``, no loss is provided
and the summary space will not be shaped according to a known distribution (see [2]).
If ``summary_loss_fun='MMD'``, the default loss from [2] will be used.
**kwargs : dict, optional, default: {}
Additional keyword arguments passed to the ``__init__`` method of a ``tf.keras.Model`` instance.
Important
----------
- If no ``summary_net`` is provided, then the output dictionary of your generative model should not contain
any ``summary_conditions``, i.e., ``summary_conditions`` should be set to ``None``, otherwise these will be ignored.
"""
super().__init__(**kwargs)
self.drift_net = drift_net
self.summary_net = summary_net
self.latent_dim = drift_net.latent_dim
self.latent_dist = self._determine_latent_dist(latent_dist)
self.loss_fun = self._determine_loss(loss_fun)
self.summary_loss = self._determine_summary_loss(summary_loss_fun)
def call(self, input_dict, return_summary=False, num_eval_points=32, **kwargs):
"""Performs a forward pass through the summary and drift network given an input dictionary.
Parameters
----------
input_dict : dict
Input dictionary containing the following mandatory keys, if ``DEFAULT_KEYS`` unchanged:
``targets`` - the latent model parameters over which a condition density is learned
``summary_conditions`` - the conditioning variables (including data) that are first passed through a summary network
``direct_conditions`` - the conditioning variables that the directly passed to the inference network
return_summary : bool, optional, default: False
A flag which determines whether the learnable data summaries (representations) are returned or not.
**kwargs : dict, optional, default: {}
Additional keyword arguments passed to the networks
For instance, ``kwargs={'training': True}`` is passed automatically during training.
Returns
-------
net_out or (net_out, summary_out)
"""
# Concatenate conditions, if given
summary_out, full_cond = self._compute_summary_condition(
input_dict.get(defaults.DEFAULT_KEYS["summary_conditions"]),
input_dict.get(defaults.DEFAULT_KEYS["direct_conditions"]),
**kwargs,
)
# Extract target variables
target_vars = input_dict[defaults.DEFAULT_KEYS["targets"]]
# Extract batch size (autograph friendly)
batch_size = tf.shape(target_vars)[0]
# Sample latent variables
latent_vars = self.latent_dist.sample(batch_size)
# Do a little trick for less noisy estimator
target_vars = tf.stack([target_vars] * num_eval_points, axis=1)
latent_vars = tf.stack([latent_vars] * num_eval_points, axis=1)
full_cond = tf.stack([full_cond] * num_eval_points, axis=1)
# Sample time
time = tf.random.uniform((batch_size, num_eval_points, 1))
# Compute drift
net_out = self.drift_net(target_vars, latent_vars, time, full_cond, **kwargs)
# Return summary outputs or not, depending on parameter
if return_summary:
return net_out, summary_out
return net_out
def compute_loss(self, input_dict, **kwargs):
"""Computes the loss of the posterior amortizer given an input dictionary, which will
typically be the output of a Bayesian ``GenerativeModel`` instance.
Parameters
----------
input_dict : dict
Input dictionary containing the following mandatory keys, if ``DEFAULT_KEYS`` unchanged:
``targets`` - the latent variables over which a condition density is learned
``summary_conditions`` - the conditioning variables that are first passed through a summary network
``direct_conditions`` - the conditioning variables that the directly passed to the inference network
**kwargs : dict, optional, default: {}
Additional keyword arguments passed to the networks
For instance, ``kwargs={'training': True}`` is passed automatically during training.
Returns
-------
total_loss : tf.Tensor of shape (1,) - the total computed loss given input variables
"""
net_out, sum_out = self(input_dict, return_summary=True, **kwargs)
diff, drift = net_out
loss = self.loss_fun(diff, drift)
# Case summary loss should be computed
if self.summary_loss is not None:
sum_loss = self.summary_loss(sum_out)
# Case no summary loss, simply add 0 for convenience
else:
sum_loss = 0.0
# Compute and return total loss
total_loss = tf.reduce_mean(loss) + sum_loss
return total_loss
def sample(self, input_dict, n_samples, to_numpy=True, step_size=1e-3, **kwargs):
"""Generates random draws from the approximate posterior given a dictionary with conditonal variables.
Parameters
----------
input_dict : dict
Input dictionary containing the following mandatory keys, if ``DEFAULT_KEYS`` unchanged:
``summary_conditions`` : the conditioning variables (including data) that are first passed through a summary network
``direct_conditions`` : the conditioning variables that the directly passed to the inference network
n_samples : int
The number of posterior draws (samples) to obtain from the approximate posterior
to_numpy : bool, optional, default: True
Flag indicating whether to return the samples as a ``np.ndarray`` or a ``tf.Tensor``
step_size : float, optional, default: 0.01
The step size for the stochastic Euler solver.
**kwargs : dict, optional, default: {}
Additional keyword arguments passed to the networks
Returns
-------
post_samples : tf.Tensor or np.ndarray of shape (n_data_sets, n_samples, n_params)
The sampled parameters from the approximate posterior of each data set
"""
# Compute condition (direct, summary, or both)
_, conditions = self._compute_summary_condition(
input_dict.get(defaults.DEFAULT_KEYS["summary_conditions"]),
input_dict.get(defaults.DEFAULT_KEYS["direct_conditions"]),
training=False,
**kwargs,
)
n_data_sets = tf.shape(conditions)[0]
# Sample initial latent variables -> shape (n_data_sets, n_samples, latent_dim)
latent_vars = self.latent_dist.sample((n_data_sets, n_samples))
# Replicate conditions and solve ODEs simulatenously
conditions = tf.stack([conditions] * n_samples, axis=1)
post_samples = self._solve_euler(latent_vars, conditions, step_size, **kwargs)
# Remove trailing first dimension in the single data case
if n_data_sets == 1:
post_samples = tf.squeeze(post_samples, axis=0)
# Return numpy version of tensor or tensor itself
if to_numpy:
return post_samples.numpy()
return post_samples
def log_density(self, input_dict, to_numpy=True, step_size=1e-3, **kwargs):
"""Computes the log density..."""
# Compute condition (direct, summary, or both)
_, conditions = self._compute_summary_condition(
input_dict.get(defaults.DEFAULT_KEYS["summary_conditions"]),
input_dict.get(defaults.DEFAULT_KEYS["direct_conditions"]),
training=False,
**kwargs,
)
# Extract targets
target_vars = input_dict[defaults.DEFAULT_KEYS["targets"]]
# Reverse ODE and log pdf computation with the trace method
latents, trace = self._solve_euler_inv(target_vars, conditions, step_size, **kwargs)
lpdf = self.latent_dist.log_prob(latents) + trace
# Return numpy version of tensor or tensor itself
if to_numpy:
return lpdf.numpy()
return lpdf
def _solve_euler(self, latent_vars, condition, dt=1e-3, **kwargs):
"""Simple stochastic parallel Euler solver."""
num_steps = int(1 / dt)
time_vec = tf.zeros((tf.shape(latent_vars)[0], tf.shape(latent_vars)[1], 1))
target = tf.identity(latent_vars)
for _ in range(num_steps + 1):
target += self.drift_net.drift(target, time_vec, condition, **kwargs) * dt
time_vec += dt
return target
def _solve_euler_inv(self, targets, condition, dt=1e-3, **kwargs):
"""Solves the reverse ODE (negative direction of drift) and returns the trace."""
def velocity(latents, drift, time_vec, condition, **kwargs):
v = drift(latents, time_vec, condition, **kwargs)
return v
batch_size = tf.shape(targets)[0]
num_samples = tf.shape(targets)[1]
num_steps = int(1 / dt)
time_vec = tf.ones((batch_size, num_samples, 1))
trace = tf.zeros((batch_size, num_samples))
latents = tf.identity(targets)
for _ in range(num_steps + 1):
f = partial(velocity, drift=self.drift_net.drift, time_vec=time_vec, condition=condition)
drift_t, trace_t = compute_jacobian_trace(f, latents, **kwargs)
latents -= drift_t * dt
trace -= trace_t * dt
time_vec -= dt
return latents, trace
def _compute_summary_condition(self, summary_conditions, direct_conditions, **kwargs):
"""Determines how to concatenate the provided conditions."""
# Compute learnable summaries, if given
if self.summary_net is not None:
sum_condition = self.summary_net(summary_conditions, **kwargs)
else:
sum_condition = None
# Concatenate learnable summaries with fixed summaries
if sum_condition is not None and direct_conditions is not None:
full_cond = tf.concat([sum_condition, direct_conditions], axis=-1)
elif sum_condition is not None:
full_cond = sum_condition
elif direct_conditions is not None:
full_cond = direct_conditions
else:
raise SummaryStatsError("Could not concatenarte or determine conditioning inputs...")
return sum_condition, full_cond
def _determine_latent_dist(self, latent_dist):
"""Determines which latent distribution to use and defaults to unit normal if ``None`` provided."""
if latent_dist is None:
return tfp.distributions.MultivariateNormalDiag(loc=[0.0] * self.latent_dim)
else:
return latent_dist
def _determine_summary_loss(self, loss_fun):
"""Determines which summary loss to use if default `None` argument provided, otherwise return identity."""
# If callable, return provided loss
if loss_fun is None or callable(loss_fun):
return loss_fun
# If string, check for MMD or mmd
elif type(loss_fun) is str:
if loss_fun.lower() == "mmd":
return mmd_summary_space
else:
raise NotImplementedError("For now, only 'mmd' is supported as a string argument for summary_loss_fun!")
# Throw if loss type unexpected
else:
raise NotImplementedError(
"Could not infer summary_loss_fun, argument should be of type (None, callable, or str)!"
)
def _determine_loss(self, loss_fun):
"""Determines which summary loss to use if default ``None`` argument provided, otherwise return identity."""
if loss_fun is None:
return tf.keras.losses.log_cosh
return loss_fun
| 19,610 | 45.035211 | 128 | py |
FinRL_Market_Simulator | FinRL_Market_Simulator-master/policy_twap.py | """
TWAP strategy
"""
import torch
import torch.nn as nn
import torch.optim as opt
from torch import Tensor
from torch.autograd import Variable
import torch.nn.functional as F
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
from constants import CODE_LIST, JUNE_DATE_LIST, VALIDATION_DATE_LIST, VALIDATION_CODE_LIST
from env import make_env
from pathos.multiprocessing import ProcessingPool as Pool
from sklearn.preprocessing import StandardScaler
from scipy.special import softmax, expit
from collections import deque
from tqdm import trange
import pandas as pd
import numpy as np
import itertools
import pdb
import os
class DefaultConfig(object):
path_raw_data = '/mnt/execution_data_v2/raw'
# path_pkl_data = '/data/execution_data/pkl'
path_pkl_data = '/mnt/execution_data_v2/pkl'
result_path = 'results/exp34'
code_list = CODE_LIST
date_list = JUNE_DATE_LIST
code_list_validation = VALIDATION_CODE_LIST
date_list_validation = VALIDATION_DATE_LIST
# Selected features
simulation_features = [
'bidPrice1', 'bidPrice2', 'bidPrice3', 'bidPrice4', 'bidPrice5',
'bidVolume1', 'bidVolume2', 'bidVolume3', 'bidVolume4', 'bidVolume5',
'askPrice1', 'askPrice2', 'askPrice3', 'askPrice4', 'askPrice5',
'askVolume1', 'askVolume2', 'askVolume3', 'askVolume4', 'askVolume5',
'high_low_price_diff', 'close_price', 'volume', 'vwap', 'time_diff',
'ask_bid_spread', 'ab_volume_misbalance', 'transaction_net_volume', 'volatility',
'trend', 'immediate_market_order_cost_bid',
]
# Stack the features of the previous x bars
simulation_loockback_horizon = 5
# Whether return flattened or stacked features of the past x bars
simulation_do_feature_flatten = True
# ############################### Trade Setting 1 Parameters ###############################
# # Planning horizon is 30mins
# simulation_planning_horizon = 30
# # Total volume to trade w.r.t. the basis volume
# simulation_volume_ratio = 0.005
# # Type of action space
# simulation_action_type = 'discrete_p'
# # Order volume = total volume / simulation_num_shares
# simulation_num_shares = 10
# # Use discrete actions
# simulation_discrete_actions = np.linspace(-30, 30, 61)
# ############################### END ######################################################
# ############################### Trade Setting 2 Parameters ###############################
# Planning horizon is 30mins
simulation_planning_horizon = 30
# Total volume to trade w.r.t. the basis volume
simulation_volume_ratio = 0.005
# Type of action space
simulation_action_type = 'discrete_q'
# Use discrete actions
simulation_discrete_actions = np.arange(31)
# ############################### END ######################################################
simulation_direction = 'sell'
# Quadratic penalty to minimize the impact of permanent market impact
# Penalty = coeff * basis_price / basis_volume
# Encourage a uniform liquidation strategy
simulation_linear_reg_coeff = 0.1
# If the quantity is not fully filled at the last time step, we place an MO to liquidate and further plus a penalty
simulation_not_filled_penalty_bp = 2.0
# Scale the price delta if we use continuous actions
simulation_continuous_action_scale = 10
# Scale the reward to approx. unit range
simulation_reward_scale = 1000
class TWAP_Agent(object):
def __init__(self):
super(TWAP_Agent, self).__init__()
def act(self, market_state, private_state):
elapsed_time = private_state[0]
executed_quantity = 1 - private_state[1]
if elapsed_time >= executed_quantity:
return 0
else:
return 60
class TWAP_Agent2(object):
def __init__(self):
super(TWAP_Agent2, self).__init__()
def act(self, market_state, private_state):
return 1
class Evaluation(object):
def __init__(self, config):
super(Evaluation, self).__init__()
self.config = config
self.env = make_env(config)
def evaluate(self, agent):
def run(dumb):
bps = []
rews = []
for code in self.config.code_list_validation:
for date in self.config.date_list_validation:
record = self.evaluate_single(agent, code=code, date=date)
bps.append(record['BP'].values[-1])
rews.append(record['reward'].sum())
return np.mean(bps), np.mean(rews)
pool = Pool(80)
record = pool.map(run, list(range(1000)))
bp_list = [item[0] for item in record]
rew_list = [item[1] for item in record]
return dict(
BP_avg=np.mean(bp_list),
reward_avg=np.mean(rew_list),
BP_std=np.std(bp_list),
reward_std=np.std(rew_list)
)
def evaluate_detail_batch(self, agent, iteration=1,
code='000504.XSHE',
date_list=['2021-06-01', '2021-06-03', '2021-06-04', '2021-07-02', '2021-07-05', '2021-07-06']):
path = os.path.join(self.config.result_path, 'evaluation', 'it{:08d}'.format(iteration))
os.makedirs(path, exist_ok=True)
record = []
for date in date_list:
for i in range(5):
res = self.evaluate_single(agent, code=code, date=date)
record.append(res)
Figure().plot_policy(df=res, filename=os.path.join(path, 'fig_{}_{}_{}.png'.format(code, date, i)))
pd.concat(record).to_csv(os.path.join(path, 'detail_{}.csv'.format(code)))
def evaluate_single(self, agent, code='600519.XSHG', date='2021-06-01'):
record = []
sm, sp = self.env.reset(code, date)
done = False
step = 0
action = None
info = dict(status=None)
while not done:
action = agent.act(sm, sp)
nsm, nsp, reward, done, info = self.env.step(action)
record.append(dict(
code=code,
date=date,
step=step,
quantity=self.env.quantity,
action=action,
ask_price=self.env.data.obtain_level('askPrice', 1),
bid_price=self.env.data.obtain_level('bidPrice', 1),
order_price=np.round((1 + self.config.simulation_discrete_actions[action] / 10000) \
* self.env.data.obtain_level('askPrice', 1) * 100) / 100 if action is not None else None,
reward=reward,
cash=self.env.cash,
BP=self.env.get_metric('BP'),
IS=self.env.get_metric('IS'),
status=info['status'],
index=self.env.data.current_index
))
step += 1
sm, sp = nsm, nsp
return pd.DataFrame(record)
class Figure(object):
def __init__(self):
pass
@staticmethod
def plot_policy(df, filename):
fig, ax1 = plt.subplots(figsize=(15, 6))
ax2 = ax1.twinx()
ax1.plot(df['index'], df['ask_price'], label='ask_price')
ax1.plot(df['index'], df['bid_price'], label='bid_price')
ax1.plot(df['index'], df['order_price'], label='order_price')
ax1.legend(loc='lower left')
ax2.plot(df['index'], df['quantity'], 'k*', label='inventory')
ax1.set_title('{} {} BP={:.4f}'.format(df['code'].values[-1], df['date'].values[-1], df['BP'].values[-1]))
ax2.legend(loc='upper right')
plt.savefig(filename, bbox_inches='tight')
plt.close('all')
@staticmethod
def plot_training_process_basic(df, filename):
while df.shape[0] > 1500:
df = df[::2]
fig, ax1 = plt.subplots(figsize=(15, 6))
ax2 = ax1.twinx()
ax1.plot(df.index.values, df['reward'], 'C0', label='reward')
ax1.legend(loc='lower left')
ax2.plot(df.index.values, df['BP'], 'C1', label='BP')
ax2.legend(loc='upper right')
top_size = df.shape[0] // 10
mean_bp_first = np.mean(df['BP'].values[:top_size])
mean_bp_last = np.mean(df['BP'].values[-top_size:])
mean_rew_first = np.mean(df['reward'].values[:top_size])
mean_rew_last = np.mean(df['reward'].values[-top_size:])
ax2.set_title('BP {:.4f}->{:.4f} reward {:.4f}->{:.4f}'.format(mean_bp_first, mean_bp_last, mean_rew_first, mean_rew_last))
if 'loss' in df.columns:
ax3 = ax1.twinx()
p3, = ax3.plot(df.index.values, df['loss'], 'C2')
ax3.yaxis.label.set_color('C2')
plt.savefig(filename, bbox_inches='tight')
plt.close('all')
if __name__ == '__main__':
for i, lin_reg in enumerate([1.0, 0.1, 0.01]):
config = DefaultConfig()
config.simulation_linear_reg_coeff = lin_reg
evaluation = Evaluation(config)
agent = TWAP_Agent2()
result = evaluation.evaluate(agent)
print('Lin_reg={:.1E} BP={:.4f}({:.4f}) reward={:.4f}({:.4f})'\
.format(lin_reg, result['BP_avg'], result['BP_std'], result['reward_avg'], result['reward_std']))
evaluation.evaluate_detail_batch(agent, iteration=i+20)
| 9,303 | 35.486275 | 131 | py |
FinRL_Market_Simulator | FinRL_Market_Simulator-master/policy_tuned_ppo.py | """
Tuned PPO algorithm for optimized trade execution
"""
from env_v2 import make_env
from storage import RolloutStorage
from constants import CODE_LIST, JUNE_DATE_LIST, VALIDATION_DATE_LIST, VALIDATION_CODE_LIST
from sklearn.preprocessing import StandardScaler
from pathos.multiprocessing import ProcessingPool as Pool
from scipy.special import softmax, expit
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
from torch import Tensor
import torch.optim as opt
from tensorboardX import SummaryWriter
from collections import deque
from collections import namedtuple
from os import makedirs as mkdir
from os.path import join as joindir
from tqdm import trange
import numpy as np
import pandas as pd
import itertools
import argparse
import math
import time
import os
time_stamp = str(time.gmtime()[1]) + "-" + \
str(time.gmtime()[2]) + "-" + str(time.gmtime()[3]) + "-" + \
str(time.gmtime()[4]) + "-" + str(time.gmtime()[5])
Transition = namedtuple('Transition', ('sm', 'sp', 'value', 'action', 'logproba', 'mask', 'next_sm', 'next_sp', 'reward'))
EPS = 1e-10
# RESULT_DIR = 'results/ppo_exp1' # + time_stamp
# mkdir(RESULT_DIR, exist_ok=True)
# Hyperparameters
parser = argparse.ArgumentParser(description='PlaNet or Dreamer')
parser.add_argument('--arch', type=str, default='v1', choices=['v1', 'v2', 'v2-5', 'v3'])
parser.add_argument('--lr', type=float, default=3e-4)
parser.add_argument('--seed', type=int, default=8888)
args_ = parser.parse_args()
class DefaultConfig(object):
path_raw_data = '/data/execution_data/raw'
# path_pkl_data = '/data/execution_data/pkl'
path_pkl_data = '/mnt/execution_data_v2/pkl'
# path_pkl_data = os.path.expanduser('~/execution_data/pkl')
result_path = 'results/ppo_exp3'
code_list = CODE_LIST
date_list = JUNE_DATE_LIST
code_list_validation = VALIDATION_CODE_LIST
date_list_validation = VALIDATION_DATE_LIST
agent_scale = 1000
agent_batch_size = 2048
agent_learn_start = 1000
agent_gamma = 0.998
# agent_epsilon = 0.7
agent_total_steps = 20 * agent_scale
# Smooth L1 loss (SL1) or mean squared error (MSE)
# agent_loss_type = 'SL1'
# agent_lr_decay_freq = 2000
agent_eval_freq = 100
agent_plot_freq = 50
agent_device = 'cuda'
# Selected features
simulation_features = [
'bidPrice1', 'bidPrice2', 'bidPrice3', 'bidPrice4', 'bidPrice5',
'bidVolume1', 'bidVolume2', 'bidVolume3', 'bidVolume4', 'bidVolume5',
'askPrice1', 'askPrice2', 'askPrice3', 'askPrice4', 'askPrice5',
'askVolume1', 'askVolume2', 'askVolume3', 'askVolume4', 'askVolume5',
'high_low_price_diff', 'close_price', 'volume', 'vwap', 'time_diff',
'ask_bid_spread', 'ab_volume_misbalance', 'transaction_net_volume', 'volatility',
'trend', 'immediate_market_order_cost_bid',
]
# ############################### Trade Setting Parameters ###############################
# Planning horizon is 30mins
simulation_planning_horizon = 30
# Order volume = total volume / simulation_num_shares
simulation_num_shares = 10
# Total volume to trade w.r.t. the basis volume
simulation_volume_ratio = 0.005
# ############################### END ###############################
# ############################### Test Parameters ###############################
# Encourage a uniform liquidation strategy
simulation_linear_reg_coeff = [0.1, 0.01]
agent_network_structrue = None
# ############################### END ###############################
# Stack the features of the previous x bars
simulation_loockback_horizon = 5
# Whether return flattened or stacked features of the past x bars
simulation_do_feature_flatten = True
simulation_direction = 'sell'
# If the quantity is not fully filled at the last time step, we place an MO to liquidate and further plus a penalty
simulation_not_filled_penalty_bp = 2.0
# Use discrete actions
simulation_discreate_actions = \
np.concatenate([[-50, -40, -30, -25, -20, -15], np.linspace(-10, 10, 21), [15, 20, 25, 30, 40, 50]])
# Scale the price delta if we use continuous actions
simulation_continuous_action_scale = 10
# Use 'discrete' or 'continuous' action space?
simulation_action_type = 'discrete'
# PPO parameters =====
# tricks
agent_learning_rate = [1e-4, 1e-5]
eps = 1e-5
clip_param = 0.2
num_epoch = 4
num_mini_batch = 32
value_loss_coef = 0.5
entropy_coef = 0.01
max_grad_norm = 0.5
use_clipped_value_loss = True
num_steps = 2048
gae_lambda = 0.95
use_linear_lr_decay = True
schedule_adam = 'linear'
schedule_clip = 'linear'
layer_norm = True
state_norm = True
advantage_norm = True
lossvalue_norm = True
clip = 0.2
lamda = 0.97
# ====================
seed = 3333
class Figure(object):
def __init__(self):
pass
@staticmethod
def plot_policy(df, filename):
fig, ax1 = plt.subplots(figsize=(15, 6))
ax2 = ax1.twinx()
ax1.plot(df['index'], df['ask_price'], label='ask_price')
ax1.plot(df['index'], df['bid_price'], label='bid_price')
ax1.plot(df['index'], df['order_price'], label='order_price')
ax1.legend(loc='lower left')
ax2.plot(df['index'], df['quantity'], 'k*', label='inventory')
ax1.set_title('{} {} BP={:.4f}'.format(df['code'].values[-1], df['date'].values[-1], df['BP'].values[-1]))
ax2.legend(loc='upper right')
plt.savefig(filename, bbox_inches='tight')
plt.close('all')
@staticmethod
def plot_training_process_basic(df, filename):
while df.shape[0] > 1500:
df = df[::2]
fig, ax1 = plt.subplots(figsize=(15, 6))
ax2 = ax1.twinx()
ax1.plot(df.index.values, df['reward'], 'C0', label='reward')
ax1.legend(loc='lower left')
ax2.plot(df.index.values, df['BP'], 'C1', label='BP')
ax2.legend(loc='upper right')
top_size = df.shape[0] // 10
mean_bp_first = np.mean(df['BP'].values[:top_size])
mean_bp_last = np.mean(df['BP'].values[-top_size:])
mean_rew_first = np.mean(df['reward'].values[:top_size])
mean_rew_last = np.mean(df['reward'].values[-top_size:])
ax2.set_title('BP {:.4f}->{:.4f} reward {:.4f}->{:.4f}'.format(mean_bp_first, mean_bp_last, mean_rew_first, mean_rew_last))
if 'loss' in df.columns:
ax3 = ax1.twinx()
p3, = ax3.plot(df.index.values, df['loss'], 'C2')
ax3.yaxis.label.set_color('C2')
plt.savefig(filename, bbox_inches='tight')
plt.close('all')
return dict(mean_bp_first=mean_bp_first, mean_bp_last=mean_bp_last, mean_rew_first=mean_rew_first, mean_rew_last=mean_rew_last)
class RunningStat(object):
def __init__(self, shape):
self._n = 0
self._M = np.zeros(shape)
self._S = np.zeros(shape)
def push(self, x):
x = np.asarray(x)
assert x.shape == self._M.shape
self._n += 1
if self._n == 1:
self._M[...] = x
else:
oldM = self._M.copy()
self._M[...] = oldM + (x - oldM) / self._n
self._S[...] = self._S + (x - oldM) * (x - self._M)
@property
def n(self):
return self._n
@property
def mean(self):
return self._M
@property
def var(self):
return self._S / (self._n - 1) if self._n > 1 else np.square(self._M)
@property
def std(self):
return np.sqrt(self.var)
@property
def shape(self):
return self._M.shape
class ZFilter:
"""
y = (x-mean)/std
using running estimates of mean,std
"""
def __init__(self, shape, demean=True, destd=True, clip=10.0):
self.demean = demean
self.destd = destd
self.clip = clip
self.rs = RunningStat(shape)
def __call__(self, x, update=True):
if update: self.rs.push(x)
if self.demean:
x = x - self.rs.mean
if self.destd:
x = x / (self.rs.std + 1e-8)
if self.clip:
x = np.clip(x, -self.clip, self.clip)
return x
def output_shape(self, input_space):
return input_space.shape
class Memory(object):
def __init__(self):
self.memory = []
def push(self, *args):
self.memory.append(Transition(*args))
def sample(self):
return Transition(*zip(*self.memory))
def __len__(self):
return len(self.memory)
class FixedCategorical(torch.distributions.Categorical):
def sample(self):
return super().sample().unsqueeze(-1)
def log_probs(self, actions):
return (
super()
.log_prob(actions.squeeze(-1))
.view(actions.size(0), -1)
.sum(-1)
.unsqueeze(-1)
)
def mode(self):
return self.probs.argmax(dim=-1, keepdim=True)
class Categorical(nn.Module):
def __init__(self, num_inputs, num_outputs):
super(Categorical, self).__init__()
def init(module, weight_init, bias_init, gain=1):
weight_init(module.weight.data, gain=gain)
bias_init(module.bias.data)
return module
init_ = lambda m: init(
m,
nn.init.orthogonal_,
lambda x: nn.init.constant_(x, 0),
gain=0.01)
self.linear = init_(nn.Linear(num_inputs, num_outputs))
def forward(self, x):
x = self.linear(x)
return FixedCategorical(logits=x)
class ActorCritic_v2_Discrete(nn.Module):
def __init__(self, num_inputs1, num_inputs2, num_outputs, hidden=64, layer_norm=True):
super(ActorCritic_v2_Discrete, self).__init__()
self.num_inputs1 = num_inputs1
self.num_inputs2 = num_inputs2
self.num_outputs = num_outputs
def init(module, weight_init, bias_init, gain=1):
weight_init(module.weight.data, gain=gain)
bias_init(module.bias.data)
return module
init_ = lambda m: init(m, nn.init.orthogonal_, lambda x: nn.init.
constant_(x, 0), np.sqrt(2))
self.actor_fc1 = nn.Sequential(init_(nn.Linear(num_inputs1, hidden*2)), nn.Tanh(),
init_(nn.Linear(hidden*2, hidden)), nn.Tanh())
self.actor_fc2 = nn.Sequential(init_(nn.Linear(num_inputs2, hidden)), nn.Tanh())
self.actor_fc3 = nn.Sequential(init_(nn.Linear(hidden*2, hidden)), nn.Tanh())
self.dist = Categorical(hidden, num_outputs)
self.critic_fc1 = nn.Sequential(init_(nn.Linear(num_inputs1, hidden*2)), nn.Tanh(),
init_(nn.Linear(hidden*2, hidden)), nn.Tanh())
self.critic_fc2 = nn.Sequential(init_(nn.Linear(num_inputs2, hidden)), nn.Tanh())
self.critic_fc3 = nn.Sequential(init_(nn.Linear(hidden*2, hidden)), nn.Tanh())
self.critic_linear = init_(nn.Linear(hidden, 1))
self.train()
def forward(self, market_states, private_states):
"""
run policy network (actor) as well as value network (critic)
:param states: a Tensor2 represents states
:return: 3 Tensor2
"""
hidden_actor = self._forward_actor(market_states, private_states)
hidden_critic = self._forward_critic(market_states, private_states)
critic_value = self.critic_linear(hidden_critic)
return critic_value, hidden_actor
def _forward_actor(self, market_states, private_states):
market = self.actor_fc1(market_states)
private = self.actor_fc2(private_states)
states = torch.cat((market, private), 1) # (1, hidden) + (1, hidden) => (1, hidden * 2)
hidden_actor = self.actor_fc3(states)
return hidden_actor
def _forward_critic(self, market_states, private_states):
market = self.critic_fc1(market_states)
private = self.critic_fc2(private_states)
states = torch.cat((market, private), 1)
hidden_critic = self.critic_fc3(states)
return hidden_critic
def act(self, market_states, private_states):
value, actor_features = self.forward(market_states, private_states)
dist = self.dist(actor_features)
action = dist.sample()
action_log_probs = dist.log_probs(action)
return value, action, action_log_probs
def get_value(self, market_states, private_states):
value, _ = self.forward(market_states, private_states)
return value
def evaluate_actions(self, market_states, private_states, action):
value, actor_features = self.forward(market_states, private_states)
dist = self.dist(actor_features)
action_log_probs = dist.log_probs(action)
dist_entropy = dist.entropy().mean()
return value, action_log_probs, dist_entropy
class Agent(object):
def __init__(self, config, writer):
super(Agent, self).__init__()
self._set_seed()
# ==== initialization ====
self.clip_param = config.clip_param
self.ppo_epoch = config.num_epoch
self.num_mini_batch = config.num_mini_batch
self.value_loss_coef = config.value_loss_coef
self.entropy_coef = config.entropy_coef
self.max_grad_norm = config.max_grad_norm
self.use_clipped_value_loss = config.use_clipped_value_loss
self.num_steps = config.num_steps
self.use_linear_lr_decay = config.use_linear_lr_decay
self.config = config
self.env = make_env(config)
self.dim_input1 = self.env.observation_dim # dimension of market states
self.dim_input2 = 2 # dimension of private states
self.dim_output = self.env.action_dim # for continuous, =1
network = config.agent_network_structrue
self.network = network(self.dim_input1, self.dim_input2, self.dim_output).to(device=self.config.agent_device)
self.optimizer = opt.Adam(self.network.parameters(), lr=config.agent_learning_rate, eps=config.eps)
# =========================
# ==== Print Parameters ====
print("Network:", config.agent_network_structrue)
print("Learning Rate:", config.agent_learning_rate)
print("EPS:", config.eps)
print("Clip param:", self.clip_param)
print("PPO epoch:", self.ppo_epoch)
print("Num mini batch:", self.num_mini_batch)
print("Value loss coef:", self.value_loss_coef)
print("Entropy coef:", self.entropy_coef)
print("Max grad norm:", self.max_grad_norm)
print("Use clipped value loss:", self.use_clipped_value_loss)
print("Num steps:", self.num_steps)
print("use_linear_lr_decay:", self.use_linear_lr_decay)
# ===========================
self.rollouts = RolloutStorage(self.num_steps, self.dim_input1, self.dim_input2, self.dim_output)
self.running_state_m = ZFilter((self.dim_input1,), clip=5.0)
self.running_state_p = ZFilter((self.dim_input2,), clip=5.0)
self.writer = writer
self.evaluation = Evaluation(self.config)
@staticmethod
def _filter(state):
return np.clip(state, -3, 3)
def _set_seed(self, seed=None):
if seed is None:
seed = int.from_bytes(os.urandom(4), byteorder='little')
else:
seed = seed + 1234
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
def learn(self):
train_record = []
eval_record = []
# record average 1-round cumulative reward in every episode
# reward_record = []
global_steps = 0
ms_scaler = StandardScaler()
self.env.reset() # warm up the environment
# ==== market state normalization ====
obs_market_list = []
for _ in range(self.num_steps):
# random sample action to collect some samples
a = self.env.action_sample_func()
obs_market, obs_private, reward, done, info = self.env.step(a)
if done:
obs_market, obs_private = self.env.reset()
obs_market_list.append(obs_market)
ms_scaler.fit(np.array(obs_market_list))
# =====================================
obs_market, obs_private = self.env.reset()
obs_market = self._filter(ms_scaler.transform(np.array(obs_market).reshape(1, -1)))[0]
self.rollouts.obs_market[0].copy_(torch.from_numpy(obs_market))
self.rollouts.obs_private[0].copy_(torch.from_numpy(obs_private))
self.rollouts.to(self.config.agent_device)
for i_episode in trange(self.config.agent_total_steps):
reward_list = []
if self.use_linear_lr_decay:
# decrease learning rate linearly
lr = self.config.agent_learning_rate - (self.config.agent_learning_rate * (i_episode / float(self.config.agent_total_steps)))
for param_group in self.optimizer.param_groups:
param_group['lr'] = lr
reward_sum = 0
t = 0
for step in range(self.num_steps):
# (1) Sample actions
with torch.no_grad():
value, action, action_log_prob = self.network.act(
self.rollouts.obs_market[step].unsqueeze(0), self.rollouts.obs_private[step].unsqueeze(0))
# Obser reward and next obs
obs_market, obs_private, reward, done, info = self.env.step(action)
obs_market = self._filter(ms_scaler.transform(np.array(obs_market).reshape(1, -1)))[0]
# If done then clean the history of observations.
masks = torch.FloatTensor((0.0,)) if done else torch.FloatTensor((1.0,))
reward = torch.FloatTensor((reward,))
reward_sum += reward
if done:
train_record.append(dict(
i=i_episode,
reward=reward_sum,
BP=self.env.get_metric('BP'),
IS=self.env.get_metric('IS'),
code=info['code'],
date=info['date'],
start_index=info['start_index']
))
reward_list.append(reward_sum)
global_steps += (t + 1)
reward_sum = 0
t = 0
obs_market, obs_private = self.env.reset()
obs_market = self._filter(ms_scaler.transform(np.array(obs_market).reshape(1, -1)))[0]
t = t + 1
self.rollouts.insert(torch.from_numpy(obs_market), torch.from_numpy(obs_private),
action[0], action_log_prob[0], value[0], reward, masks)
# reward_record.append({
# 'episode': i_episode,
# 'steps': global_steps,
# 'meanepreward': torch.mean(reward_list)})
with torch.no_grad():
next_value = self.network.get_value(
self.rollouts.obs_market[-1].unsqueeze(0), self.rollouts.obs_private[-1].unsqueeze(0)).detach()
self.rollouts.compute_returns(next_value[0], self.config.agent_gamma, self.config.gae_lambda)
advantages = self.rollouts.returns[:-1] - self.rollouts.value_preds[:-1]
advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-5)
value_loss_epoch = 0
action_loss_epoch = 0
dist_entropy_epoch = 0
for e in range(self.ppo_epoch):
data_generator = self.rollouts.feed_forward_generator(advantages, self.num_mini_batch)
for sample in data_generator:
obs_market_batch, obs_private_batch, actions_batch, \
value_preds_batch, return_batch, masks_batch, old_action_log_probs_batch, \
adv_targ = sample
# Reshape to do in a single forward pass for all steps
values, action_log_probs, dist_entropy = self.network.evaluate_actions(
obs_market_batch, obs_private_batch, actions_batch)
ratio = torch.exp(action_log_probs - old_action_log_probs_batch)
surr1 = ratio * adv_targ
surr2 = torch.clamp(ratio, 1.0 - self.clip_param,
1.0 + self.clip_param) * adv_targ
action_loss = -torch.min(surr1, surr2).mean()
if self.use_clipped_value_loss:
value_pred_clipped = value_preds_batch + \
(values - value_preds_batch).clamp(-self.clip_param, self.clip_param)
value_losses = (values - return_batch).pow(2)
value_losses_clipped = (value_pred_clipped - return_batch).pow(2)
value_loss = 0.5 * torch.max(value_losses,
value_losses_clipped).mean()
else:
value_loss = 0.5 * (return_batch - values).pow(2).mean()
self.optimizer.zero_grad()
(value_loss * self.value_loss_coef + action_loss -
dist_entropy * self.entropy_coef).backward()
nn.utils.clip_grad_norm_(self.network.parameters(),
self.max_grad_norm)
self.optimizer.step()
value_loss_epoch += value_loss.item()
action_loss_epoch += action_loss.item()
dist_entropy_epoch += dist_entropy.item()
num_updates = self.ppo_epoch * self.num_mini_batch
value_loss_epoch /= num_updates
action_loss_epoch /= num_updates
dist_entropy_epoch /= num_updates
# value_loss_epoch, action_loss_epoch, dist_entropy_epoch
self.rollouts.after_update()
# Step 5: Evaluate and log performance
if i_episode % self.config.agent_plot_freq == 0 and len(train_record) > 0:
print(train_record[-1])
self.evaluation.evaluate_detail_batch(self.network, ms_scaler, iteration=i_episode)
self.writer.add_scalar("train/reward", torch.mean(train_record[-1]['reward']), i_episode)
self.writer.add_scalar("train/BP", train_record[-1]['BP'], i_episode)
self.writer.add_scalar("train/IS", train_record[-1]['IS'], i_episode)
self.writer.add_scalar("train/value_loss_epoch", value_loss_epoch, i_episode)
self.writer.add_scalar("train/action_loss_epoch", action_loss_epoch, i_episode)
self.writer.add_scalar("train/dist_entropy_epoch", dist_entropy_epoch, i_episode)
if i_episode % self.config.agent_eval_freq == 0:
eval_record.append(self.evaluation.evaluate(self.network, ms_scaler))
print("BP:", eval_record[-1]['BP'], 'Reward:', eval_record[-1]['reward'])
np.save(self.config.result_path + "/eval_record_"+str(i_episode)+".npy", eval_record[-1]['ac_list'])
self.writer.add_scalar("eval/reward", np.mean(eval_record[-1]['reward']), i_episode)
self.writer.add_scalar("eval/BP", np.mean(eval_record[-1]['BP']), i_episode)
self.writer.add_scalar("eval/ac_min", np.mean(eval_record[-1]['ac_min']), i_episode)
self.writer.add_scalar("eval/ac_max", np.mean(eval_record[-1]['ac_max']), i_episode)
self.writer.add_scalar("eval/ac_mean", np.mean(eval_record[-1]['ac_mean']), i_episode)
return train_record, eval_record
class Evaluation(object):
def __init__(self, config):
super(Evaluation, self).__init__()
self.config = config
self.env = make_env(config)
def evaluate(self, network, scalar):
bp_list = []
rew_list = []
ac_list = []
ac_mean_list = []
ac_logstd_list = []
for code in self.config.code_list_validation:
for date in self.config.date_list_validation:
record, action_list, action_mean_list, action_logstd_list = self.evaluate_single(network, scalar, code=code, date=date)
bp_list.append(record['BP'].values[-1])
rew_list.append(record['reward'].sum())
ac_list.append(action_list)
ac_mean_list.append(action_mean_list)
ac_logstd_list.append(action_logstd_list)
return dict(
BP=np.mean(bp_list),
reward=np.mean(rew_list),
ac_min = np.min(ac_list),
ac_max = np.max(ac_list),
ac_mean = np.mean(ac_list),
ac_list = ac_list
)
def evaluate_detail_batch(self, network, scalar, iteration=1,
code='000504.XSHE',
date_list=['2021-06-01', '2021-06-03', '2021-06-04', '2021-07-02', '2021-07-05', '2021-07-06']):
path = os.path.join(self.config.result_path, 'evaluation', 'it{:08d}'.format(iteration))
os.makedirs(path, exist_ok=True)
record = []
for date in date_list:
for i in range(5):
res, _, _, _ = self.evaluate_single(network, scalar, code=code, date=date)
record.append(res)
Figure().plot_policy(df=res, filename=os.path.join(path, 'fig_{}_{}_{}.png'.format(code, date, i)))
pd.concat(record).to_csv(os.path.join(path, 'detail_{}.csv'.format(code)))
def evaluate_single(self, network, scalar, code='600519.XSHG', date='2021-06-01'):
record = []
sm, sp = self.env.reset(code, date)
done = False
step = 0
action = None
info = dict(status=None)
action_list = []
action_mean_list = []
action_logstd_list = []
while not done:
sm = Agent._filter(scalar.transform(sm.reshape(1, -1)))[0]
value, action, action_log_prob = network.act(Tensor(sm).unsqueeze(0).to(device=self.config.agent_device),
Tensor(sp).unsqueeze(0).to(device=self.config.agent_device))
action = action.item()
action_list.append(action)
action_logstd_list.append(action_log_prob.item())
nsm, nsp, reward, done, info = self.env.step(action)
record.append(dict(
code=code,
date=date,
step=step,
quantity=self.env.quantity,
action=action,
ask_price=self.env.data.obtain_level('askPrice', 1),
bid_price=self.env.data.obtain_level('bidPrice', 1),
order_price=np.round((1 + self.config.simulation_discreate_actions[action] / 10000) \
* self.env.data.obtain_level('askPrice', 1) * 100) / 100 if action is not None else None,
reward=reward,
cash=self.env.cash,
BP=self.env.get_metric('BP'),
IS=self.env.get_metric('IS'),
status=info['status'],
index=self.env.data.current_index
))
step += 1
sm, sp = nsm, nsp
return pd.DataFrame(record), action_list, action_mean_list, action_logstd_list
def run(argus):
model, lr, lin_reg, num_epoch, parallel_id = argus
config = DefaultConfig()
config.agent_learning_rate = lr
config.simulation_linear_reg_coeff = lin_reg
config.num_epoch = num_epoch
# config.simulation_continuous_action_scale = action_scale
# config.agent_network_structrue = model
if model == 'v2-5':
print("discrete ppo")
config.agent_network_structrue = ActorCritic_v2_Discrete
# elif model == 'v3':
# config.agent_network_structrue = ActorCritic_v3
else:
raise NotImplementedError
info = dict(learning_rate=lr, linear_reg=lin_reg, num_epoch=num_epoch, architecture=config.agent_network_structrue.__name__, parallel_id=parallel_id)
print("Config:", info)
id_str = '{}_lr-{:.1E}_linreg-{:.1E}_numepoch-{}_id-{}'.format(model, lr, lin_reg, num_epoch, parallel_id)
config.result_path = os.path.join(config.result_path, id_str)
print("result path:", config.result_path)
os.makedirs(config.result_path, exist_ok=True)
extend_path = lambda x: os.path.join(config.result_path, x)
writer = SummaryWriter(config.result_path + '/logs-' + str(parallel_id))
agent = Agent(config, writer)
train_record, eval_record = agent.learn()
train_record, eval_record = pd.DataFrame(train_record), pd.DataFrame(eval_record)
train_record.to_csv(extend_path('dqn_train_record.csv'))
eval_record.to_csv(extend_path('dqn_eval_record.csv'))
train_info = Figure().plot_training_process_basic(train_record, extend_path('dqn_train_record.png'))
eval_info = Figure().plot_training_process_basic(eval_record, extend_path('dqn_eval_record.png'))
info.update({('trn_' + k): v for k, v in train_info.items()})
info.update({('val_' + k): v for k, v in eval_info.items()})
return info
if __name__ == '__main__':
record = []
# test_list = list(itertools.product(['v1', 'v2', 'v3'], [3e-4, 1e-4], [0.1, 0.01], [3, 5, 10], np.arange(5)))
test_list = list(itertools.product(['v2-5',], [5e-5], [0.01,], [4,], np.arange(3)))
pool = Pool(3)
record = pool.map(run, test_list)
record = pd.DataFrame(record)
record.to_csv(os.path.join(DefaultConfig().result_path, 'result_original.csv'))
stats = record.groupby(['learning_rate', 'linear_reg', 'architecture']).agg([np.mean, np.std])
stats.to_csv(os.path.join(DefaultConfig().result_path, 'result_stats.csv'))
| 28,315 | 34.572864 | 153 | py |
FinRL_Market_Simulator | FinRL_Market_Simulator-master/policy_tuned_dqn.py | """
Tuned DQN algorithm for optimized trade execution
"""
import torch
import torch.nn as nn
import torch.optim as opt
from torch import Tensor
from torch.autograd import Variable
import torch.nn.functional as F
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
from constants import CODE_LIST, JUNE_DATE_LIST, VALIDATION_DATE_LIST, VALIDATION_CODE_LIST
from env import make_env
from pathos.multiprocessing import ProcessingPool as Pool
from sklearn.preprocessing import StandardScaler
from scipy.special import softmax, expit
from collections import deque
from tqdm import trange
import pandas as pd
import numpy as np
import itertools
import pdb
import os
class DefaultConfig(object):
path_raw_data = '/data/execution_data_v2/raw'
# path_pkl_data = '/data/execution_data/pkl'
path_pkl_data = '/mnt/execution_data_v2/pkl'
# path_pkl_data = os.path.expanduser('~/execution_data/pkl')
result_path = 'results/exp36'
code_list = CODE_LIST
date_list = JUNE_DATE_LIST
code_list_validation = VALIDATION_CODE_LIST
date_list_validation = VALIDATION_DATE_LIST
agent_scale = 100000
agent_batch_size = 128
agent_learn_start = 1000
agent_gamma = 0.998
agent_epsilon = 0.7
agent_total_steps = 20 * agent_scale
agent_buffer_size = agent_scale
agent_network_update_freq = 4
# Smooth L1 loss (SL1) or mean squared error (MSE)
agent_loss_type = 'SL1'
agent_lr_decay_freq = 2000
agent_target_update_freq = 2000
agent_eval_freq = 2000
# Becomes 0.01 upon 70% of the training
agent_epsilon_decay = np.exp(np.log(0.01) / (agent_scale * 0.5))
agent_plot_freq = 20000
agent_device = 'cuda'
# Selected features
simulation_features = [
'bidPrice1', 'bidPrice2', 'bidPrice3', 'bidPrice4', 'bidPrice5',
'bidVolume1', 'bidVolume2', 'bidVolume3', 'bidVolume4', 'bidVolume5',
'askPrice1', 'askPrice2', 'askPrice3', 'askPrice4', 'askPrice5',
'askVolume1', 'askVolume2', 'askVolume3', 'askVolume4', 'askVolume5',
'high_low_price_diff', 'close_price', 'volume', 'vwap', 'time_diff',
'ask_bid_spread', 'ab_volume_misbalance', 'transaction_net_volume', 'volatility',
'trend', 'immediate_market_order_cost_bid',
]
# ############################### Trade Setting Parameters ###############################
# Planning horizon is 30mins
simulation_planning_horizon = 30
# Total volume to trade w.r.t. the basis volume
simulation_volume_ratio = 0.005
# Order volume = total volume / simulation_num_shares
simulation_num_shares = 10
# Maximum quantity is total_quantity / simulation_num_shares; further devide this into 3 levels
simulation_discrete_quantities = 3
# Choose the wrapper
simulation_action_type = 'discrete_pq'
# Discrete action space
simulation_discrete_actions = \
list(itertools.product(
np.concatenate([[-50, -40, -30, -25, -20, -15], np.linspace(-10, 10, 21), [15, 20, 25, 30, 40, 50]]),
np.arange(simulation_discrete_quantities) + 1
))
# ############################### END ###############################
# ############################### Test Parameters ###############################
# Encourage a uniform liquidation strategy
simulation_linear_reg_coeff = [0.1]
agent_learning_rate = [2e-5, 1e-5, 5e-6]
agent_network_structrue = 'MLPNetwork_complex,MLPNetwork_Xcomplex'
# ############################### END ###############################
# Stack the features of the previous x bars
simulation_loockback_horizon = 5
# Whether return flattened or stacked features of the past x bars
simulation_do_feature_flatten = True
simulation_direction = 'sell'
# If the quantity is not fully filled at the last time step, we place an MO to liquidate and further plus a penalty
simulation_not_filled_penalty_bp = 2.0
# Scale the price delta if we use continuous actions
# simulation_continuous_action_scale = 10
# The Q network
class MLPNetwork(nn.Module):
def __init__(self, dim_input1, dim_input2, dim_output, hidden=128):
super(MLPNetwork, self).__init__()
self.dim_input1 = dim_input1
self.dim_input2 = dim_input2
self.dim_output = dim_output
self.fc1 = nn.Linear(dim_input1, 2 * hidden)
self.fc2 = nn.Linear(2 * hidden, hidden)
self.fc3 = nn.Linear(dim_input2, hidden)
self.fc4 = nn.Linear(2 * hidden, dim_output)
def forward(self, market_states, private_states):
x = F.relu(self.fc1(market_states))
x = F.relu(self.fc2(x))
y = F.relu(self.fc3(private_states))
z = torch.cat((x, y), 1)
z = self.fc4(z)
return z
def act(self, market_state, private_state, device='cuda'):
market_state = Tensor(market_state).unsqueeze(0).to(device=device)
private_state = Tensor(private_state).unsqueeze(0).to(device=device)
return int(self.forward(market_state, private_state).argmax(1)[0])
def act_egreedy(self, market_state, private_state, e=0.7, device='cuda'):
return self.act(market_state, private_state, device='cuda') if np.random.rand() > e \
else np.random.randint(self.dim_output)
# The Q network - more parameters
class MLPNetwork_complex(nn.Module):
def __init__(self, dim_input1, dim_input2, dim_output, hidden=256):
super(MLPNetwork_complex, self).__init__()
self.dim_input1 = dim_input1
self.dim_input2 = dim_input2
self.dim_output = dim_output
self.fc1 = nn.Linear(dim_input1, 2 * hidden)
self.fc2 = nn.Linear(2 * hidden, hidden)
self.fc3 = nn.Linear(dim_input2, hidden)
self.fc4 = nn.Linear(2 * hidden, hidden)
self.fc5 = nn.Linear(hidden, dim_output)
def forward(self, market_states, private_states):
x = F.relu(self.fc1(market_states))
x = F.relu(self.fc2(x))
y = F.relu(self.fc3(private_states))
z = torch.cat((x, y), 1)
z = F.relu(self.fc4(z))
z = self.fc5(z)
return z
def act(self, market_state, private_state, device='cuda'):
market_state = Tensor(market_state).unsqueeze(0).to(device=device)
private_state = Tensor(private_state).unsqueeze(0).to(device=device)
return int(self.forward(market_state, private_state).argmax(1)[0])
def act_egreedy(self, market_state, private_state, e=0.7, device='cuda'):
return self.act(market_state, private_state, device='cuda') if np.random.rand() > e \
else np.random.randint(self.dim_output)
# The Q network - more more parameters
class MLPNetwork_Xcomplex(nn.Module):
def __init__(self, dim_input1, dim_input2, dim_output, hidden=512):
super(MLPNetwork_Xcomplex, self).__init__()
self.dim_input1 = dim_input1
self.dim_input2 = dim_input2
self.dim_output = dim_output
self.fc1 = nn.Linear(dim_input1, 2 * hidden)
self.fc2 = nn.Linear(2 * hidden, hidden)
self.fc3 = nn.Linear(dim_input2, hidden)
self.fc4 = nn.Linear(2 * hidden, hidden)
self.fc5 = nn.Linear(hidden, hidden)
self.fc6 = nn.Linear(hidden, dim_output)
def forward(self, market_states, private_states):
x = F.relu(self.fc1(market_states))
x = F.relu(self.fc2(x))
y = F.relu(self.fc3(private_states))
z = torch.cat((x, y), 1)
z = F.relu(self.fc4(z))
z = F.relu(self.fc5(z))
z = self.fc6(z)
return z
def act(self, market_state, private_state, device='cuda'):
market_state = Tensor(market_state).unsqueeze(0).to(device=device)
private_state = Tensor(private_state).unsqueeze(0).to(device=device)
return int(self.forward(market_state, private_state).argmax(1)[0])
def act_egreedy(self, market_state, private_state, e=0.7, device='cuda'):
return self.act(market_state, private_state, device='cuda') if np.random.rand() > e \
else np.random.randint(self.dim_output)
# The Q network - more parameters + positional encoding
class MLPNetwork_complex_posenc(nn.Module):
def __init__(self, dim_input1, dim_input2, dim_output, hidden=256):
super(MLPNetwork_complex_posenc, self).__init__()
self.dim_input1 = dim_input1
self.dim_input2 = dim_input2
self.dim_output = dim_output
self.hidden = hidden
self.fc1 = nn.Linear(dim_input1, 2 * hidden)
self.fc2 = nn.Linear(2 * hidden, hidden)
self.fc4 = nn.Linear(2 * hidden, hidden)
self.fc5 = nn.Linear(hidden, dim_output)
def forward(self, market_states, private_states):
y = torch.einsum('bi, j->bij', private_states, torch.arange(self.hidden // self.dim_input2, device=private_states.device))
y = y.view(-1, self.hidden)
y = torch.sin(y * 12345).detach()
x = F.relu(self.fc1(market_states))
x = F.relu(self.fc2(x))
z = torch.cat((x, y), 1)
z = F.relu(self.fc4(z))
z = self.fc5(z)
return z
def act(self, market_state, private_state, device='cuda'):
market_state = Tensor(market_state).unsqueeze(0).to(device=device)
private_state = Tensor(private_state).unsqueeze(0).to(device=device)
return int(self.forward(market_state, private_state).argmax(1)[0])
def act_egreedy(self, market_state, private_state, e=0.7, device='cuda'):
return self.act(market_state, private_state, device='cuda') if np.random.rand() > e \
else np.random.randint(self.dim_output)
class ReplayBuffer(object):
"""docstring for ReplayBuffer"""
def __init__(self, maxlen):
super(ReplayBuffer, self).__init__()
self.maxlen = maxlen
self.data = deque(maxlen=maxlen)
def push(self, *args):
self.data.append(args)
def sample(self, batch_size):
inds = np.random.choice(len(self.data), batch_size, replace=False)
return zip(*[self.data[i] for i in inds])
def sample_all(self):
return zip(*list(self.data))
def update_all(self, new_data, ind):
for i in range(len(self.data)):
tup = list(self.data[i])
tup[ind] = new_data[i, :]
self.data[i] = tuple(tup)
class Agent(object):
def __init__(self, config):
super(Agent, self).__init__()
self._set_seed()
self.config = config
self.env = make_env(config)
self.dim_input1 = self.env.observation_dim # dimension of market states
self.dim_input2 = 2 # dimension of private states
self.dim_output = self.env.action_dim
network = config.agent_network_structrue
self.network = network(self.dim_input1, self.dim_input2, self.dim_output).to(device=self.config.agent_device)
self.network_target = network(self.dim_input1, self.dim_input2, self.dim_output).to(device=self.config.agent_device)
self.network_target.load_state_dict(self.network.state_dict())
self.optimizer = opt.Adam(self.network.parameters(), lr=config.agent_learning_rate)
self.scheduler = opt.lr_scheduler.StepLR(self.optimizer, step_size=config.agent_lr_decay_freq, gamma=0.998)
self.buffer = ReplayBuffer(self.config.agent_buffer_size)
self.evaluation = Evaluation(self.config)
if config.agent_loss_type == 'MSE':
self.loss_func = nn.MSELoss()
elif config.agent_loss_type == 'SL1':
self.loss_func = F.smooth_l1_loss
def _set_seed(self, seed=None):
if seed is None:
seed = int.from_bytes(os.urandom(4), byteorder='little')
else:
seed = seed + 1234
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
@staticmethod
def _filter(state):
return np.clip(state, -3, 3)
def _to_tensor(self, tensor, dtype=torch.float):
return torch.tensor(tensor, dtype=dtype, device=self.config.agent_device)
def learn(self):
train_record = []
eval_record = []
reward = 0
eplen = 0
loss = 0
avg_Q = 0
epsilon = self.config.agent_epsilon
ms_scaler = StandardScaler()
sm, sp = self.env.reset()
for i in trange(self.config.agent_total_steps):
# Step 1: Execute one step and store it to the replay buffer
if i <= self.config.agent_learn_start:
a = self.env.action_sample_func()
else:
tsm = ms_scaler.transform(sm.reshape(1, -1)).flatten()
a = self.network.act_egreedy(tsm, sp, e=epsilon, device=self.config.agent_device)
nsm, nsp, r, done, info = self.env.step(a)
self.buffer.push(sm, sp, a, r, nsm, nsp, done)
reward += r
eplen += 1
if done:
train_record.append(dict(
i=i,
reward=reward,
eplen=eplen,
epsilon=epsilon,
lr=self.optimizer.param_groups[0]['lr'],
loss=float(loss),
avg_Q=float(avg_Q),
BP=self.env.get_metric('BP'),
IS=self.env.get_metric('IS'),
code=info['code'],
date=info['date'],
start_index=info['start_index']
))
reward = 0
eplen = 0
epsilon = max(0.01, epsilon * self.config.agent_epsilon_decay)
sm, sp = self.env.reset()
else:
sm, sp = nsm, nsp
# Step 2: Estimate variance for market states
if i == self.config.agent_learn_start:
market_states, _, _, _, nmarket_states, _, _ = self.buffer.sample_all()
ms_scaler.fit(np.array(market_states))
# Since we will use the buffer later, so we need to scale the market states in the buffer
self.buffer.update_all(ms_scaler.transform(market_states), 0)
self.buffer.update_all(ms_scaler.transform(nmarket_states), 4)
# Step 3: Update the network every several steps
if i >= self.config.agent_learn_start and i % self.config.agent_network_update_freq == 0:
# sample a batch from the replay buffer
bsm, bsp, ba, br, bnsm, bnsp, bd = self.buffer.sample(self.config.agent_batch_size)
market_states = self._to_tensor(self._filter(ms_scaler.transform(np.array(bsm))))
private_states = self._to_tensor(np.array(bsp))
actions = self._to_tensor(np.array(ba), dtype=torch.long)
rewards = self._to_tensor(np.array(br))
nmarket_states = self._to_tensor(self._filter(ms_scaler.transform(np.array(bnsm))))
nprivate_states = self._to_tensor(np.array(bnsp))
masks = self._to_tensor(1 - np.array(bd) * 1)
nactions = self.network(nmarket_states, nprivate_states).argmax(1)
Qtarget = (rewards + masks * self.config.agent_gamma * \
self.network_target(nmarket_states, nprivate_states)[range(self.config.agent_batch_size), \
nactions]).detach()
Qvalue = self.network(market_states, private_states)[range(self.config.agent_batch_size), actions]
avg_Q = Qvalue.mean().detach()
loss = self.loss_func(Qvalue, Qtarget)
self.network.zero_grad()
loss.backward()
for param in self.network.parameters():
param.grad.data.clamp_(-1, 1)
# print('Finish the {}-th iteration, the loss = {}'.format(i, float(loss)))
self.optimizer.step()
self.scheduler.step()
# Step 4: Update target network
if i % self.config.agent_target_update_freq == 0:
self.network_target.load_state_dict(self.network.state_dict())
# Step 5: Evaluate and log performance
if i % self.config.agent_plot_freq == 0 and len(train_record) > 0:
eval_agent = (lambda sm, sp: self.network.act_egreedy(ms_scaler.transform(sm.reshape(1, -1)).flatten(), sp, e=0.0)) \
if i > self.config.agent_learn_start else \
(lambda sm, sp: self.network.act_egreedy(sm, sp, e=0.0))
self.evaluation.evaluate_detail_batch(eval_agent, iteration=i)
print(train_record[-1])
if i % self.config.agent_eval_freq == 0:
eval_agent = (lambda sm, sp: self.network.act_egreedy(ms_scaler.transform(sm.reshape(1, -1)).flatten(), sp, e=0.0)) \
if i > self.config.agent_learn_start else \
(lambda sm, sp: self.network.act_egreedy(sm, sp, e=0.0))
eval_record.append(self.evaluation.evaluate(eval_agent))
print(eval_record[-1])
return train_record, eval_record
class Evaluation(object):
def __init__(self, config):
super(Evaluation, self).__init__()
self.config = config
self.env = make_env(config)
def evaluate(self, agent):
bp_list = []
rew_list = []
for code in self.config.code_list_validation:
for date in self.config.date_list_validation:
record = self.evaluate_single(agent, code=code, date=date)
bp_list.append(record['BP'].values[-1])
rew_list.append(record['reward'].sum())
return dict(
BP=np.mean(bp_list),
reward=np.mean(rew_list)
)
def evaluate_detail_batch(self, agent, iteration=1,
code='000504.XSHE',
date_list=['2021-06-01', '2021-06-03', '2021-06-04', '2021-07-02', '2021-07-05', '2021-07-06']):
path = os.path.join(self.config.result_path, 'evaluation', 'it{:08d}'.format(iteration))
os.makedirs(path, exist_ok=True)
record = []
for date in date_list:
for i in range(5):
res = self.evaluate_single(agent, code=code, date=date)
record.append(res)
Figure().plot_policy(df=res, filename=os.path.join(path, 'fig_{}_{}_{}.png'.format(code, date, i)))
pd.concat(record).to_csv(os.path.join(path, 'detail_{}.csv'.format(code)))
def evaluate_single(self, agent, code='600519.XSHG', date='2021-06-01'):
record = []
sm, sp = self.env.reset(code, date)
done = False
step = 0
action = None
info = dict(status=None)
while not done:
action = agent(sm, sp)
nsm, nsp, reward, done, info = self.env.step(action)
if self.config.simulation_action_type == 'discrete_pq':
order_price = self.config.simulation_discrete_actions[action][0]
order_price = np.round((1 + order_price / 10000) \
* self.env.data.obtain_level('askPrice', 1) * 100) / 100
elif self.config.simulation_action_type == 'discrete_p':
order_price = self.config.simulation_discrete_actions[action]
order_price = np.round((1 + order_price / 10000) \
* self.env.data.obtain_level('askPrice', 1) * 100) / 100
elif self.config.simulation_action_type == 'discrete_q':
order_price = self.env.data.obtain_level('bidPrice', 1)
record.append(dict(
code=code,
date=date,
step=step,
quantity=self.env.quantity,
action=action,
ask_price=self.env.data.obtain_level('askPrice', 1),
bid_price=self.env.data.obtain_level('bidPrice', 1),
order_price=order_price,
reward=reward,
cash=self.env.cash,
BP=self.env.get_metric('BP'),
IS=self.env.get_metric('IS'),
status=info['status'],
index=self.env.data.current_index
))
step += 1
sm, sp = nsm, nsp
return pd.DataFrame(record)
class Figure(object):
def __init__(self):
pass
@staticmethod
def plot_policy(df, filename):
fig, ax1 = plt.subplots(figsize=(15, 6))
ax2 = ax1.twinx()
ax1.plot(df['index'], df['ask_price'], label='ask_price')
ax1.plot(df['index'], df['bid_price'], label='bid_price')
ax1.plot(df['index'], df['order_price'], label='order_price')
ax1.legend(loc='lower left')
ax2.plot(df['index'], df['quantity'], 'k*', label='inventory')
ax1.set_title('{} {} BP={:.4f}'.format(df['code'].values[-1], df['date'].values[-1], df['BP'].values[-1]))
ax2.legend(loc='upper right')
plt.savefig(filename, bbox_inches='tight')
plt.close('all')
@staticmethod
def plot_training_process_basic(df, filename):
while df.shape[0] > 1500:
df = df[::2]
fig, ax1 = plt.subplots(figsize=(15, 6))
ax2 = ax1.twinx()
ax1.plot(df.index.values, df['reward'], 'C0', label='reward')
ax1.legend(loc='lower left')
ax2.plot(df.index.values, df['BP'], 'C1', label='BP')
ax2.legend(loc='upper right')
top_size = df.shape[0] // 10
mean_bp_first = np.mean(df['BP'].values[:top_size])
mean_bp_last = np.mean(df['BP'].values[-top_size:])
mean_rew_first = np.mean(df['reward'].values[:top_size])
mean_rew_last = np.mean(df['reward'].values[-top_size:])
ax2.set_title('BP {:.4f}->{:.4f} reward {:.4f}->{:.4f}'.format(mean_bp_first, mean_bp_last, mean_rew_first, mean_rew_last))
if 'loss' in df.columns:
ax3 = ax1.twinx()
p3, = ax3.plot(df.index.values, df['loss'], 'C2')
ax3.yaxis.label.set_color('C2')
plt.savefig(filename, bbox_inches='tight')
plt.close('all')
return dict(mean_bp_first=mean_bp_first, mean_bp_last=mean_bp_last, mean_rew_first=mean_rew_first, mean_rew_last=mean_rew_last)
def run(argus):
model, lr, lin_reg, parallel_id = argus
config = DefaultConfig()
config.agent_learning_rate = lr
config.simulation_linear_reg_coeff = lin_reg
config.agent_network_structrue = model
info = dict(learning_rate=lr, linear_reg=lin_reg, architecture=model.__name__, parallel_id=parallel_id)
id_str = '{}_lr{:.1E}_linreg_{:.1E}_{}'.format(model.__name__, lr, lin_reg, parallel_id)
config.result_path = os.path.join(config.result_path, id_str)
os.makedirs(config.result_path, exist_ok=True)
extend_path = lambda x: os.path.join(config.result_path, x)
agent = Agent(config)
train_record, eval_record = agent.learn()
train_record, eval_record = pd.DataFrame(train_record), pd.DataFrame(eval_record)
train_record.to_csv(extend_path('dqn_train_record.csv'))
eval_record.to_csv(extend_path('dqn_eval_record.csv'))
train_info = Figure().plot_training_process_basic(train_record, extend_path('dqn_train_record.png'))
eval_info = Figure().plot_training_process_basic(eval_record, extend_path('dqn_eval_record.png'))
info.update({('trn_' + k): v for k, v in train_info.items()})
info.update({('val_' + k): v for k, v in eval_info.items()})
return info
if __name__ == '__main__':
record = []
test_list = list(itertools.product(
[MLPNetwork_complex, MLPNetwork_Xcomplex],
[2e-5, 1e-5, 5e-6],
[0.1, 0.01],
np.arange(5)
))
parallel = False
if parallel:
pool = Pool(4)
record = pool.map(run, test_list)
else:
record = []
for tmp in test_list:
tmp_res = run(tmp)
record = pd.DataFrame(record)
record.to_csv(os.path.join(DefaultConfig().result_path, 'result_original.csv'))
stats = record.groupby(['learning_rate', 'linear_reg', 'architecture']).agg([np.mean, np.std])
stats.to_csv(os.path.join(DefaultConfig().result_path, 'result_stats.csv'))
| 24,410 | 39.82107 | 135 | py |
FinRL_Market_Simulator | FinRL_Market_Simulator-master/OrderExecution/order_execution_env.py | import os
import torch
from random import shuffle
from functorch import vmap
from shares_data_process import get_share_dicts_by_day
"""
Readme 写于 2022-11-08 17:28:39
## OrderExecutionEnv 订单执行仿真环境
### 什么是订单执行任务?
举例:我持有1000股茅台,想要在一个月内,拿到股票市场上卖掉,换取尽可能多的现金。
设置较高的价格卖出,能多换取现金,但自己持有的股票就无法在规定时限内卖出。
所以交易员会设计“订单执行策略”,根据市场行情,将很大的订单,拆分成可执行的小订单,尽量在规定时间内以更高价格卖出。
订单执行仿真环境:
我们为了让强化学习算法完成订单执行任务,设计了这些仿真环境。
- OrderExecutionEnv 是一个用CPU计算的 single env,但代码容易理解
- OrderExecutionVecEnv 是一个用GPU计算的 vectorized env,计算效率高
state,可观测的状态,特征数量:`self.state_dim = 4 + self.data_dicts[0]['tech_factors'].shape[1]`
- internal state:(会受到智能体动作的影响而改变的state)
- cash 现金 (现金不需要加入到状态里,因为我们不需要买东西)
- remain_quantity 剩余的需要被执行的订单数量,是整数
- quantity 当前时刻需要被执行的订单数量,是整数
- external state (不受智能体动作的影响而改变的state,随着仿真程度的提高,他们也有机会变成 internal state)
- remain_step_rate 剩余可执行的步数,除以可执行的总步数,所以它会慢慢从 1.0 减少到 0.0
- last_price 上一个时刻的收盘价,策略会学习这个价格的偏移量,用来得到这一时刻的订单执行价格
- tech_factor 我自己随便写的 技术特征,有一点点用,后期可以替换成专业的 technical factors
action,策略的动作,特征数量:2
- delta_price 调整后会得到挂到交易所的订单的价格 executed_price
- 根据上一时刻的价格,加上 delta_price,得到这一时刻的挂到交易所的订单的价格
- 相邻两个档位的最小价格变动是0.01,因此我们 让 -1.0~+1.0 的 delta_price乘以 price_scale=50*0.01
- delta_price 等于0 表示挂单价格等于上一时刻的最后成交价
- delta_price 等于-1表示用仿真环境设计的最低价格去挂单,反之,+1表示最高价格
- quantity_ratio 调整后会得到挂到交易所的订单的数量 executed_quantity
- 动作空间是 -1.0~+1.0,线性变换到 0.0~2.0后,得到 quantity_ratio
- reset时,根据剩余的挂单时间,以及剩余的挂单量,计算出基础挂单量self.quantity
以上设计的原因:
- 用固定的动作 (0, 0) 表示 delta_price=0, quantity_ratio=1.0,能得到一个baselines
- 没有让策略直接输出 挂单价格,而是输出 delta_price,能让策略在类似的state下,输出相似的action
- 没有让策略直接输出 挂单数量,而是输出 quantity_ratio,能限制策略的挂单上限,避免超过环境的仿真能力
注意,还有一些注释写在了 OrderExecutionVecEnv 里面,这些注释是偏向 GPU并行仿真工程实现的内容。
详细注释写在 OrderExecutionVecEnv 里,而不是 OrderExecutionEnv 里
"""
class OrderExecutionVecEnv:
"""
这个版本将基础成交量由动态改为静态
"""
def __init__(self, num_envs: int = 4, gpu_id: int = 0, if_random=False,
share_name: str = '000768_XSHE', beg_date: str = '2022-09-01', end_date: str = '2022-09-03', ):
self.if_random = if_random # 设计随机的 reset,能让策略在更多样的state下学习,会提高策略泛化能力。
self.num_levels = 5 # 从10档行情中,选出 n_levels 个档位 用于仿真
self.price_scale = 25 # 策略网络输出的第一个动作特征,是订单的卖出价格与上一时刻的变化量,表示30个档位
self.volume_scale = 1e-2 # 自动设置订单执行任务里,需要被执行的订单的数量,是成交量的 volume_scale 倍
self.executed_scale = 2e-2 # last_price 里,订单的成交比率
assert self.volume_scale < self.executed_scale
'''stack state'''
self.n_stack = 8 # 保存 n_stack 个不同时刻t的state,用于堆叠state
self.n_state = [] # 保存 n_stack 个不同时刻t的state,用于堆叠state
'''device'''
self.device = torch.device(f"cuda:{gpu_id}" if (torch.cuda.is_available() and (gpu_id >= 0)) else "cpu")
# 为Vectorized Env 指定单张GPU设备进行并行计算
'''load data'''
self.max_len = None # 赋值为None,会在不调用env.reset() 就运行step的情况下,会主动引发可预见的错误
self.share_name = share_name # 当前被随机抽取出来的股票的名字
self.cumulative_returns = torch.zeros(0) # 赋值为 torch.zeros(0) 这个空张量,是有意强调他们是张量,而不是None
self.price = torch.zeros(0)
self.volume = torch.zeros(0)
self.ask_prices = torch.zeros(0) # indices=[1, max_level] 各个级别的成交价
self.bid_prices = torch.zeros(0) # indices=[1, max_level] 各个级别的成交价
self.ask_volumes = torch.zeros(0) # indices=[1, max_level] 各个级别的成交量
self.bid_volumes = torch.zeros(0) # indices=[1, max_level] 各个级别的成交量
self.tech_factors = torch.zeros(0)
self.total_quantity = torch.zeros(0) # 订单执行的目标成交量(希望在一天内达成这个目标成交量)
self.data_dicts = self.load_share_data_dicts(
data_dir='./shares_data_by_day', share_name=share_name,
beg_date=beg_date, end_date=end_date)
'''reset'''
self.t = 0 # 时刻t
self.cash = torch.zeros(0) # 现金,不需要加入的state里,因为我们只卖出,不买入
self.quantity = torch.zeros(0) # 基础成交量
self.total_asset = torch.zeros(0) # 总资产,总资产等于现金+商品折算为现金。(在订单执行任务里,商品折算为0现金)
self.remain_quantity = torch.zeros(0) # 剩余成交量,智能体需要就是把商品都卖出,让它在最后变成0
'''env info'''
self.env_name = 'OrderExecutionVecEnv-v2'
self.num_envs = num_envs
self.max_step = max([data_dict['max_len'] for data_dict in self.data_dicts]) # 选取数据中最长的步数作为 max_step
self.state_dim = (4 + self.data_dicts[0]['tech_factors'].shape[1]) * self.n_stack
self.action_dim = 2
self.if_discrete = False
'''function for vmap'''
self.inplace_cash_quantity = vmap(
func=self._inplace_cash_quantity, in_dims=(0, 0, 0, None, None), out_dims=0
)
self._get_state = vmap(
func=lambda remain_quantity, quantity, remain_step_rate, last_price, tech_factor:
torch.hstack((remain_quantity, quantity, remain_step_rate, last_price, tech_factor)),
in_dims=(0, 0, None, None, None), out_dims=0
)
'''def get_data_dict'''
self.rand_id = 0
shuffle(self.data_dicts)
def get_data_dict(self):
self.rand_id += 1
if self.rand_id >= len(self.data_dicts):
self.rand_id = 0
shuffle(self.data_dicts)
return self.data_dicts[self.rand_id] # data_dict
def reset(self):
self.t = 0
'''load data from data_dict to device'''
data_dict = self.get_data_dict()
self.max_len = data_dict['max_len']
self.volume = data_dict['volume'].to(self.device)
self.price = data_dict['last_price'].to(self.device)
self.ask_prices = data_dict['ask_prices'].to(self.device)
self.bid_prices = data_dict['bid_prices'].to(self.device)
self.ask_volumes = data_dict['ask_volumes'].to(self.device)
self.bid_volumes = data_dict['bid_volumes'].to(self.device)
self.tech_factors = data_dict['tech_factors'].to(self.device)
total_quantity = data_dict['total_quantity'].to(self.device)
total_quantity = total_quantity.repeat(self.num_envs)
'''build internal state: cash'''
self.cash = torch.zeros(self.num_envs, dtype=torch.float32, device=self.device)
self.total_asset = self.cash.clone() # 总资产,总资产等于现金+商品折算为现金。(在订单执行任务里,商品折算为0现金)
'''build internal state: quantity'''
self.quantity = total_quantity * self.executed_scale / self.max_len
total_quantity_scale = torch.arange(self.num_envs).to(self.device) / self.num_envs
total_quantity_scale = total_quantity_scale * 0.9 + 0.1 # range in [0.1, 0.9]
self.total_quantity = total_quantity * self.volume_scale * total_quantity_scale
if self.if_random:
self.quantity *= torch.rand_like(self.quantity) * 0.2 + 0.9 # range in [0.9, 1.1]
self.total_quantity *= torch.rand_like(self.total_quantity) * 0.2 + 0.9 # range in [0.9, 1.1]
self.total_quantity = torch.round(self.total_quantity)
self.remain_quantity = torch.zeros_like(self.cash) + self.total_quantity
'''stack state'''
state = self.get_state()
self.n_state = [state, ] * 24
return self.get_n_state()
def step(self, action):
self.t += 1
done = self.t == self.max_len
'''action''' # 对策略输出的-1.0~+1.0 的动作进行线性变化,得到仿真环境实际需要的 挂单价格 + 挂单数量
curr_price = self.get_curr_price(action[:, 0])
curr_quantity = self.get_curr_quantity(action[:, 1])
prev_quantity = curr_quantity.clone()
'''executed in current step immediately'''
for level in range(self.num_levels):
self.inplace_cash_quantity(self.cash, curr_quantity, curr_price,
self.bid_prices[level, self.t], self.bid_volumes[level, self.t])
'''executed in next step'''
if not done:
self.inplace_cash_quantity(self.cash, curr_quantity, curr_price,
self.price[self.t + 1], self.volume[self.t + 1] * self.executed_scale)
'''update remain_quantity'''
diff_quantity = curr_quantity - prev_quantity
self.remain_quantity += diff_quantity
'''get (state, reward, done)'''
total_asset = self.cash
reward = (total_asset - self.total_asset) * 2 ** -14
self.total_asset = self.cash.clone()
# state = self.reset() if done else self.get_state() # after self.t += 1
if done:
self.cumulative_returns = total_asset / (self.total_quantity * self.price.mean()) * 100 # 100%
n_state = self.reset()
else:
state = self.get_state()
self.n_state.append(state)
del self.n_state[0]
n_state = self.get_n_state()
done = torch.tensor(done, dtype=torch.bool, device=self.device).expand(self.num_envs)
return n_state, reward, done, {}
def get_state(self): # 得到智能体观测的状态
return self._get_state(self.remain_quantity / self.total_quantity,
self.quantity / self.total_quantity,
self.get_tensor(1 - self.t / self.max_len), # remain_step_rate
self.price[self.t] * 2 ** -3,
self.tech_factors[self.t])
def get_n_state(self):
return torch.hstack([self.n_state[i] for i in (-1, -2, -3, -5, -7, -11, -15, -24)])
def get_tensor(self, ary):
return torch.tensor(ary, dtype=torch.float32, device=self.device)
def get_curr_price(self, action_price):
delta_price = action_price * (self.price_scale * 0.01)
return self.price[self.t - 1] + delta_price # after self.t += 1
def get_curr_quantity(self, action_quantity):
quantity_ratio = action_quantity + 1
curr_quantity = torch.round(quantity_ratio * self.quantity)
curr_quantity = torch.min(torch.stack((self.remain_quantity, curr_quantity)), dim=0)[0]
return curr_quantity
@staticmethod
def _inplace_cash_quantity(cash, quantity, price, ask_price, ask_volume):
executed_volume = torch.min(quantity, ask_volume) * (price >= ask_price)
# 乘以 (price >= ask_price),相当于一个if,如果是False,那么 execute_volume 相当于是 0,等价于不执行这里的代码
# 进行这种处理,是因为 vmap 现阶段(2022-11-09)无法加速含有逻辑分支的代码,只能加速静态的代码
cash += executed_volume * price
quantity -= executed_volume
return torch.empty(0)
@staticmethod
def get_tech_factors(volume, price, value,
ask_prices, ask_volumes,
bid_prices, bid_volumes):
"""
我随便写的根据 ask-bid 数据得到 特征的代码,用GPU计算,有微弱的效果
用于能检测仿真环境加入 technical factors 的模块是否正常运行
以后需要替换成更加专业的 technical factors
"""
ask_values = ask_prices * ask_volumes
bid_values = bid_prices * bid_volumes
mean_price = value / volume
delta_price = price - mean_price
ask_cum_values = torch.cumsum(ask_values, dim=0)
bid_cum_values = torch.cumsum(bid_values, dim=0)
ask_cum_volumes = torch.cumsum(ask_volumes, dim=0)
bid_cum_volumes = torch.cumsum(bid_volumes, dim=0)
ask_cum_prices = ask_cum_values / ask_cum_volumes
del ask_cum_values, ask_cum_volumes
bid_cum_prices = bid_cum_values / bid_cum_volumes
del bid_cum_values, bid_cum_volumes
v_adj_spreads = ask_cum_prices - bid_cum_prices
del ask_cum_prices, bid_cum_prices
'''normalization'''
tech_factors = torch.cat((
get_ts_trends(value * 2 ** -14, win_size=6, gap_size=6),
get_ts_trends(value * 2 ** -14, win_size=12, gap_size=8),
get_ts_trends(mean_price * 2 ** 3, win_size=6, gap_size=6),
get_ts_trends(mean_price * 2 ** 3, win_size=12, gap_size=8),
get_ts_trends(delta_price * 2 ** 9, win_size=6, gap_size=6),
get_ts_trends(delta_price * 2 ** 9, win_size=12, gap_size=8),
get_ts_trends(v_adj_spreads[0] * 2 ** 6, win_size=6, gap_size=6),
get_ts_trends(v_adj_spreads[1] * 2 ** 6, win_size=8, gap_size=6),
get_ts_trends(v_adj_spreads[2] * 2 ** 6, win_size=8, gap_size=8),
get_ts_trends(v_adj_spreads[3] * 2 ** 6, win_size=12, gap_size=8),
get_ts_trends(v_adj_spreads[4] * 2 ** 6, win_size=12, gap_size=12),
), dim=1)
torch.nan_to_num_(tech_factors, nan=0.0, posinf=0.0, neginf=0.0)
return tech_factors
def load_share_data_dicts(self, data_dir="./data",
share_name: str = '000768_XSHE',
beg_date='2022-09-01',
end_date='2022-09-30'):
assert share_name in {'000768_XSHE', '000685_XSHE'}
share_dir = f"{data_dir}/{share_name}"
share_dicts = get_share_dicts_by_day(share_dir=share_dir, share_name=share_name,
beg_date=beg_date, end_date=end_date,
n_levels=self.num_levels, n_days=5, device=self.device)
for share_dict in share_dicts:
for key, value in share_dict.items():
if isinstance(value, torch.Tensor):
share_dict[key] = value.to(torch.device('cpu'))
data_dicts = [] # 把不同股票的数据放在字典里,reset的时候会随机选择一只股票的数据,加载到GPU里,开始训练
print('| OrderExecutionEnv data pre processing:', share_name)
for i, share_dict in enumerate(share_dicts):
share_name = share_dict['share_name']
trade_date = share_dict['trade_date']
print(end=f'{trade_date} ')
print() if i % 8 == 7 else None
# 对这些订单流数据进行处理后,我们能得到一段时间内的 ask 和 bid 快照数据
ask_volumes = share_dict['ask_volumes'] # 各个级别的成交量
bid_volumes = share_dict['bid_volumes'] # 各个级别的成交量
ask_prices = share_dict['ask_prices'] # 各个级别的成交量
bid_prices = share_dict['bid_prices'] # 各个级别的成交量
volume = share_dict['volume'] # delta volume 成交的订单数量
price = share_dict['price'] # last price 最后成交价格
value = share_dict['value'] # delta value 成交金额总量,换手额度
tech_factors = self.get_tech_factors(volume, price, value,
ask_prices, ask_volumes,
bid_prices, bid_volumes)
# 先保存到内存里,reset的时候才加载到GPU
data_dict = {
'share_name': share_name,
'max_len': price.shape[0] - 1,
'total_quantity': volume.sum(),
'volume': volume,
'last_price': price,
'ask_prices': ask_prices,
'bid_prices': bid_prices,
'ask_volumes': ask_volumes,
'bid_volumes': bid_volumes,
'tech_factors': tech_factors,
}
data_dicts.append(data_dict)
return data_dicts
class OrderExecutionMinuteVecEnv(OrderExecutionVecEnv):
def __init__(self, num_envs: int = 4, gpu_id: int = 0, if_random=False,
share_name: str = '000768_XSHE', beg_date: str = '2022-09-01', end_date: str = '2022-09-03', ):
self.exec_level = 16 # 把聚合后的价格分为 exec_level 个档位
self.num_cluster = 20 # 把num_cluster 个快照聚合成一个,一个快照约3秒,那么 3秒*20=60秒
self.price_scale = 25 # 策略网络输出的第一个动作特征,是订单的卖出价格与上一时刻的变化量,表示30个档位
super(OrderExecutionMinuteVecEnv, self).__init__(num_envs=num_envs, gpu_id=gpu_id, if_random=if_random,
share_name=share_name, beg_date=beg_date, end_date=end_date)
'''stack state'''
self.n_stack = 8 # 保存 n_stack 个不同时刻t的state,用于堆叠state
self.n_state = [] # 保存 n_stack 个不同时刻t的state,用于堆叠state
'''load data'''
self.prices = torch.zeros(0)
self.volumes = torch.zeros(0)
def reset(self):
self.t = 0
'''load data from data_dict to device'''
data_dict = self.get_data_dict()
self.max_len = data_dict['max_len']
self.prices = data_dict['prices'].to(self.device)
self.volumes = data_dict['volumes'].to(self.device)
self.price = data_dict['price'].to(self.device)
self.volume = data_dict['volume'].to(self.device)
self.tech_factors = data_dict['tech_factors'].to(self.device)
total_quantity = data_dict['total_quantity'].to(self.device)
total_quantity = total_quantity.repeat(self.num_envs)
'''build internal state: cash'''
self.cash = torch.zeros(self.num_envs, dtype=torch.float32, device=self.device)
self.total_asset = self.cash.clone() # 总资产,总资产等于现金+商品折算为现金。(在订单执行任务里,商品折算为0现金)
'''build internal state: quantity'''
self.quantity = total_quantity * self.executed_scale / self.max_len
total_quantity_scale = torch.arange(self.num_envs).to(self.device) / self.num_envs
total_quantity_scale = total_quantity_scale * 0.9 + 0.1 # range in [0.1, 0.9]
self.total_quantity = total_quantity * self.volume_scale * total_quantity_scale
if self.if_random:
self.quantity *= torch.rand_like(self.quantity) * 0.2 + 0.9 # range in [0.9, 1.1]
self.total_quantity *= torch.rand_like(self.total_quantity) * 0.2 + 0.9 # range in [0.9, 1.1]
self.total_quantity = torch.round(self.total_quantity)
self.remain_quantity = torch.zeros_like(self.cash) + self.total_quantity
'''stack state'''
state = self.get_state()
self.n_state = [state, ] * 24
return self.get_n_state()
def step(self, action):
self.t += 1
done = self.t == self.max_len
'''action''' # 对策略输出的-1.0~+1.0 的动作进行线性变化,得到仿真环境实际需要的 挂单价格 + 挂单数量
curr_price = self.get_curr_price(action[:, 0])
curr_quantity = self.get_curr_quantity(action[:, 1])
prev_quantity = curr_quantity.clone()
'''executed'''
for level in range(self.exec_level):
self.inplace_cash_quantity(self.cash, curr_quantity, curr_price,
self.prices[self.t, level], self.volumes[self.t, level])
'''update remain_quantity'''
diff_quantity = curr_quantity - prev_quantity
self.remain_quantity += diff_quantity
'''get (state, reward, done)'''
total_asset = self.cash
reward = (total_asset - self.total_asset) * 2 ** -14
self.total_asset = self.cash.clone()
# state = self.reset() if done else self.get_state() # after self.t += 1
if done:
self.cumulative_returns = total_asset / (self.total_quantity * self.price.mean()) * 100 # 100%
n_state = self.reset()
else:
state = self.get_state()
self.n_state.append(state)
del self.n_state[0]
n_state = self.get_n_state()
done = torch.tensor(done, dtype=torch.bool, device=self.device).expand(self.num_envs)
return n_state, reward, done, {}
def get_state(self): # 得到智能体观测的状态
return self._get_state(self.remain_quantity / self.total_quantity,
self.quantity / self.total_quantity,
self.get_tensor(1 - self.t / self.max_len), # remain_step_rate
self.price[self.t] * 2 ** -3,
self.tech_factors[self.t])
def get_n_state(self):
return torch.hstack([self.n_state[i] for i in (-1, -2, -4, -8)])
def load_share_data_dicts(self, data_dir="./data",
share_name: str = '000768_XSHE',
beg_date='2022-09-01',
end_date='2022-09-30'):
assert share_name in {'000768_XSHE', '000685_XSHE'}
share_dir = f"{data_dir}/{share_name}"
share_dicts = get_share_dicts_by_day(share_dir=share_dir, share_name=share_name,
beg_date=beg_date, end_date=end_date,
n_levels=self.num_levels, n_days=5, device=self.device)
for share_dict in share_dicts:
for key, value in share_dict.items():
if isinstance(value, torch.Tensor):
share_dict[key] = value.to(torch.device('cpu'))
data_dicts = [] # 把不同股票的数据放在字典里,reset的时候会随机选择一只股票的数据,加载到GPU里,开始训练
print('| OrderExecutionEnv data pre processing:', share_name)
for i, share_dict in enumerate(share_dicts):
share_name = share_dict['share_name']
trade_date = share_dict['trade_date']
print(end=f'{trade_date} ')
print() if i % 8 == 7 else None
# 对这些订单流数据进行处理
price = share_dict['price'] # last price 最后成交价格
value = share_dict['value'] # delta value 成交金额总量,换手额度
volume = share_dict['volume'] # delta volume 成交的订单数量
ask_prices = share_dict['ask_prices'] # 各个级别的成交量
bid_prices = share_dict['bid_prices'] # 各个级别的成交量
ask_volumes = share_dict['ask_volumes'] # 各个级别的成交量
bid_volumes = share_dict['bid_volumes'] # 各个级别的成交量
'''进行聚合'''
prices, volumes = self.tick_to_minute_data(volume=volume, value=value)
'''进行聚合'''
n_step = price.shape[0] // self.num_cluster
# 进行聚合
price = price[:n_step * self.num_cluster].reshape((n_step, self.num_cluster)).mean(dim=1)
value = value[:n_step * self.num_cluster].reshape((n_step, self.num_cluster)).sum(dim=1)
volume = volume[:n_step * self.num_cluster].reshape((n_step, self.num_cluster)).sum(dim=1)
ask_prices = ask_prices[:, 0:n_step * self.num_cluster:self.num_cluster]
bid_prices = bid_prices[:, 0:n_step * self.num_cluster:self.num_cluster]
ask_volumes = ask_volumes[:, 0:n_step * self.num_cluster:self.num_cluster]
bid_volumes = bid_volumes[:, 0:n_step * self.num_cluster:self.num_cluster]
tech_factors = self.get_tech_factors(volume, price, value,
ask_prices, ask_volumes,
bid_prices, bid_volumes)
# 先保存到内存里,reset的时候才加载到GPU
data_dict = {
'share_name': share_name,
'max_len': price.shape[0] - 1,
'total_quantity': volume.sum(),
'price': price,
'volume': volume,
'prices': prices,
'volumes': volumes,
'tech_factors': tech_factors,
}
data_dicts.append(data_dict)
'''add the price and volume of previous day'''
for i, curr_dict in enumerate(data_dicts):
'''prev_dict'''
j = max(0, i - 1)
prev_dict = data_dicts[j]
prev_price = prev_dict['price']
prev_price_rate = prev_price / prev_price.mean()
prev_volume = prev_dict['volume']
prev_volume_rate = prev_volume / prev_volume.mean()
'''curr_dict'''
tech_factors = curr_dict['tech_factors']
tech_price_rate = self.get_diff_stack_tensor(prev_price_rate, tech_factors)
tech_volume_rate = self.get_diff_stack_tensor(prev_volume_rate, tech_factors)
'''append to tech_factors'''
curr_dict['tech_factors'] = torch.cat((tech_factors, tech_price_rate, tech_volume_rate), dim=1)
return data_dicts
@staticmethod
def get_diff_stack_tensor(prev_tensor, curr_tensor):
prev_len = prev_tensor.shape[0]
curr_len = curr_tensor.shape[0]
max_len = min(prev_len, curr_len)
tech_prices = torch.ones((curr_len, 8), dtype=torch.float32, device=curr_tensor.device)
tech_prices[:max_len, 0] = prev_tensor[:max_len]
tech_prices[:max_len - 2, 1] = prev_tensor[2:max_len]
tech_prices[:max_len - 4, 2] = prev_tensor[4:max_len]
tech_prices[:max_len - 6, 3] = prev_tensor[6:max_len]
tech_prices[:max_len - 9, 4] = prev_tensor[9:max_len]
tech_prices[:max_len - 15, 5] = prev_tensor[15:max_len]
tech_prices[2:max_len, 6] = prev_tensor[:max_len - 2]
tech_prices[5:max_len, 7] = prev_tensor[:max_len - 5]
return tech_prices
def get_tech_factors(self, volume, price, value,
ask_prices, ask_volumes,
bid_prices, bid_volumes):
"""
我随便写的根据 ask-bid 数据得到 特征的代码,用GPU计算,有微弱的效果
用于能检测仿真环境加入 technical factors 的模块是否正常运行
以后需要替换成更加专业的 technical factors
"""
ask_values = ask_prices * ask_volumes
bid_values = bid_prices * bid_volumes
mean_price = value / volume
delta_price = price - mean_price
ask_cum_values = torch.cumsum(ask_values, dim=0)
bid_cum_values = torch.cumsum(bid_values, dim=0)
ask_cum_volumes = torch.cumsum(ask_volumes, dim=0)
bid_cum_volumes = torch.cumsum(bid_volumes, dim=0)
ask_cum_prices = ask_cum_values / ask_cum_volumes
del ask_cum_values, ask_cum_volumes
bid_cum_prices = bid_cum_values / bid_cum_volumes
del bid_cum_values, bid_cum_volumes
v_adj_spreads = ask_cum_prices - bid_cum_prices
del ask_cum_prices, bid_cum_prices
'''normalization'''
tech_factors = torch.cat((
get_ts_trends(value * 2 ** -14, win_size=12, gap_size=8),
get_ts_trends(mean_price * 2 ** 3, win_size=6, gap_size=6),
get_ts_trends(mean_price * 2 ** 3, win_size=12, gap_size=8),
get_ts_trends(delta_price * 2 ** 9, win_size=6, gap_size=6),
get_ts_trends(delta_price * 2 ** 9, win_size=12, gap_size=8),
get_ts_trends(v_adj_spreads[0] * 2 ** 6, win_size=6, gap_size=6),
get_ts_trends(v_adj_spreads[1] * 2 ** 6, win_size=8, gap_size=6),
get_ts_trends(v_adj_spreads[2] * 2 ** 6, win_size=8, gap_size=8),
get_ts_trends(v_adj_spreads[3] * 2 ** 6, win_size=12, gap_size=8),
get_ts_trends(v_adj_spreads[4] * 2 ** 6, win_size=12, gap_size=12),
), dim=1)
torch.nan_to_num_(tech_factors, nan=0.0, posinf=0.0, neginf=0.0)
return tech_factors
def tick_to_minute_data(self, volume, value):
n_step = volume.shape[0] // self.num_cluster
device = volume.device
value = value[:n_step * self.num_cluster].reshape((n_step, self.num_cluster))
volume = volume[:n_step * self.num_cluster].reshape((n_step, self.num_cluster))
price = torch.nan_to_num_(value / volume, nan=0.0)
volume_norm = volume / volume.mean(dim=1, keepdim=True)
price_avg = (volume_norm * price).mean(dim=1, keepdim=True)
price_std = (volume_norm * (price - price_avg) ** 2).mean(dim=1, keepdim=True)
num_k = torch.arange(self.exec_level + 1, dtype=torch.float32, device=device) # range[0, self.exec_level]
num_k = num_k * (3 / self.exec_level) - 1 # range [-1, 2]
std_k = num_k * (-50) # range [50, -100]
std_k = std_k.unsqueeze(0)
prices = price_avg + price_std * std_k # price from high to low
vol_k = torch.exp(-num_k ** 2 / 2) # / (torch.pi*2)**0.5 = Probability Density Function with sigma=1.0
vol_k = vol_k / vol_k.sum() # sigma~=0.3, and the area of func PDF range[-0.3, 0.6] ~= 1.0
vol_k = vol_k.unsqueeze(0)
volumes = volume.sum(dim=1, keepdim=True) * vol_k
return prices, volumes
class OrderExecutionVecEnvForEval(OrderExecutionVecEnv):
def __init__(self, num_envs: int = 4, gpu_id: int = 0, if_random=False,
beg_date: str = '2022-09-01', end_date: str = '2022-09-03', share_name='000685_XSHE'):
OrderExecutionVecEnv.__init__(self, num_envs=num_envs, gpu_id=gpu_id, if_random=if_random,
beg_date=beg_date, end_date=end_date, share_name=share_name)
self.curr_price = None
self.curr_quantity = None
self.cumulative_returns_days = []
def reset(self):
self.rand_id = 0
self.cumulative_returns_days = []
return super().reset()
def step(self, action): # modified_mark
n_state, reward, done, info_dict = super().step(action)
if done[0]: # modified_mark
self.cumulative_returns_days.append(self.cumulative_returns)
self.cumulative_returns = torch.stack(self.cumulative_returns_days).mean(dim=0)
data_dict = self.data_dicts[self.rand_id]
self.bid_prices = data_dict['bid_prices'].to(self.device) # ForPlot
self.bid_volumes = data_dict['bid_volumes'].to(self.device) # ForPlot
return n_state, reward, done, info_dict
def get_curr_price(self, action_price):
self.curr_price = super().get_curr_price(action_price)
return self.curr_price
def get_curr_quantity(self, action_quantity):
self.curr_quantity = super().get_curr_quantity(action_quantity)
return self.curr_quantity
'''get_tech_factors'''
def get_re_cum_sum(ten):
cum_sum = torch.cumsum(ten, dim=0)
return ten - cum_sum + cum_sum[-1:None]
def get_all_cum_sum(level_tensors):
level_cum = level_tensors.clone()
for i in range(1, level_tensors.shape[1]):
level_cum[i] += level_cum[i - 1]
return level_cum
def get_ts_avg_std(ten, win_size=6): # could be higher performance
avg = torch.zeros_like(ten)
std = torch.zeros_like(ten)
for i in range(win_size, avg.shape[0]):
tmp = ten[i - win_size:i]
avg[i] = tmp.mean(dim=0)
std[i] = tmp.std(dim=0)
return avg, std
def get_ts_diff(ten, gap_size=6):
out = torch.zeros_like(ten)
out[gap_size:] = ten[gap_size:] - ten[:-gap_size]
return out
def get_ts_trends(ten, win_size=6, gap_size=6):
avg, std = get_ts_avg_std(ten, win_size)
avg_diff = get_ts_diff(avg, gap_size)
std_diff = get_ts_diff(std, gap_size)
return torch.stack((avg, avg_diff, std, std_diff), dim=1)
"""run"""
def check_with_twap():
num_envs = 2
share_name = ['000768_XSHE', '000685_XSHE'][0]
beg_date = '2022-09-01'
end_date = '2022-09-01'
# env = OrderExecutionVecEnv(num_envs=num_envs, gpu_id=0, if_random=False,
# share_name=share_name, beg_date=beg_date, end_date=end_date)
env = OrderExecutionMinuteVecEnv(num_envs=num_envs, gpu_id=0, if_random=False,
share_name=share_name, beg_date=beg_date, end_date=end_date)
env.reset()
action = torch.zeros((num_envs, env.action_dim), dtype=torch.float32, device=env.device)
# 0: the delta price is 0 in default
# 1: the quantity scale is +1 in default
cumulative_rewards = torch.zeros(num_envs, dtype=torch.float32, device=env.device)
for i in range(env.max_step):
state, reward, done, _ = env.step(action)
cumulative_rewards += reward
if i % 64 == 0:
env_cumulative_rewards = env.total_asset / env.total_quantity
print(f"{i:8} {str(env_cumulative_rewards):64} {env.remain_quantity} {reward}")
print(env.total_asset / env.total_quantity)
print(env.total_asset)
print(env.remain_quantity)
print(f'cumulative_returns {env.cumulative_returns.mean():9.3f} {env.cumulative_returns.std(dim=0):9.3f}')
print(f'cumulative_rewards {cumulative_rewards.mean():9.3f} {cumulative_rewards.std(dim=0):9.3f}')
def run1201(): # plot
os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE" # OMP: Error #15: Initializing libiomp5md.dll
import matplotlib.pyplot as plt
import numpy as np
num_envs = 4
env = OrderExecutionVecEnv(num_envs=num_envs, beg_date='2022-09-14', end_date='2022-09-14')
env.if_random = False
env.reset()
action = torch.zeros((4, 2), dtype=torch.float32, device=env.device)
action[0, 1] = -1.0
action[1, 1] = 0.0
action[2, 1] = 0.5
action[3, 1] = 1.0
# 0: the delta price is 0 in default
# 1: the quantity scale is +1 in default
ary_remain_quantity = []
ary_cum_returns = []
ary_cash = []
ary_last_price = []
cumulative_rewards = torch.zeros(num_envs, dtype=torch.float32, device=env.device)
for i in range(env.max_step):
state, reward, done, _ = env.step(action)
cumulative_rewards += reward
if done[0]:
break
ary_remain_quantity.append(env.remain_quantity.tolist())
ary_cum_returns.append((env.total_asset / env.total_quantity).tolist())
ary_cash.append(env.cash.tolist())
ary_last_price.append(env.price[env.t].tolist())
ary_remain_quantity = np.array(ary_remain_quantity)
ary_cum_returns = np.array(ary_cum_returns)
ary_cash = np.array(ary_cash)
ary_last_price = np.array(ary_last_price)
for env_i in range(1, num_envs):
# plt.plot(ary_remain_quantity[:, env_i])
# plt.plot(ary_cum_returns[:, env_i])
# plt.plot(ary_cash[:, env_i])
pass
plt.plot(ary_last_price)
plt.grid()
plt.show()
print(f'cumulative_returns {env.cumulative_returns.mean():9.3f} {env.cumulative_returns.std(dim=0):9.3f}')
print(f'cumulative_rewards {cumulative_rewards.mean():9.3f} {cumulative_rewards.std(dim=0):9.3f}')
if __name__ == '__main__':
check_with_twap()
| 33,366 | 41.559949 | 117 | py |
FinRL_Market_Simulator | FinRL_Market_Simulator-master/OrderExecution/plot.py | import os
import torch
from OrderExecutionEnv import OrderExecutionVecEnvForEval
"""run"""
def check__ask_price_volume():
os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE" # OMP: Error #15: Initializing libiomp5md.dll
import matplotlib.pyplot as plt
import numpy as np
num_envs = 2
env = OrderExecutionVecEnvForEval(num_envs=num_envs, beg_date='2022-09-09', end_date='2022-09-09')
env.if_random = False
env.reset()
max_len1 = env.max_len + 1 # after env.reset()
xs = np.arange(max_len1)
print('xs.shape', xs.shape)
'''ask bid price (from level 1 to 5)'''
from matplotlib.cm import get_cmap
color_map = get_cmap('bwr') # Blue White Red, input 0.0 ~ 1.0 or 0 ~ 1000
ask_prices = np.array(env.ask_prices)
ask_prices[ask_prices < 7.0] = 7.4 # todo 每天快结束的时候,总有一些成交量特别低的异常数据,因此把它们都赋值为最后一个正常的数值
print('ask_prices.shape', ask_prices.shape)
n_level, max_len1 = ask_prices.shape
for i in range(n_level): # todo 这里的代码,把 askPrices 画出来
face_color = color_map(float(1 - i / n_level) * 0.2 + 0.2) # todo 使用蓝色渐变
if i + 1 == n_level:
plt.fill_between(xs, ask_prices[i], np.zeros_like(ask_prices[i]) + np.nanmax(ask_prices[i]),
facecolor=face_color)
else:
plt.fill_between(xs, ask_prices[i], ask_prices[i + 1],
facecolor=face_color)
bid_prices = np.array(env.bid_prices)
bid_prices[bid_prices < 1] = np.nan
print('bid_prices.shape', bid_prices.shape)
n_level, max_len1 = bid_prices.shape
for i in range(n_level): # todo 这里的代码,把 askPrices 画出来
# face_color = color_map(float(i / n_level) * 0.3 + 0.5 + 0.1) # todo 使用红色渐变
face_color = color_map(float(1 - i / n_level) * 0.2 + 0.2) # todo 使用蓝色渐变
if i + 1 == n_level:
plt.fill_between(xs, bid_prices[i], np.zeros_like(bid_prices[i]) + np.nanmin(bid_prices[i]),
facecolor=face_color)
else:
plt.fill_between(xs, bid_prices[i], bid_prices[i + 1],
facecolor=face_color)
last_price = np.array(env.last_price)
plt.plot(xs, last_price, color='blue', label='last price') # todo 用蓝色把 last price 画出来
'''policy: VWAP (using the data in future)'''
actions = torch.zeros((max_len1, num_envs, 2), dtype=torch.float32, device=env.device)
print('actions.shape', actions.shape)
volume_weights = (env.volume / env.volume.mean() - 1) / env.volume.std(dim=0) + 1
k = 5 # 平滑操作,卷积核是 k*2+1=11
volume_smooths = volume_weights.clone()
for i in range(1, k):
volume_smooths[i:] += volume_weights[:-i]
volume_smooths[:-i] += volume_weights[i:]
volume_smooths /= 2 * k - 1 # convolve
volume_smooths[:k] = volume_smooths[k]
volume_smooths[-k:] = volume_smooths[-k]
prev_price = env.last_price.clone()
prev_price[1:] = env.last_price[:-1]
curr_price = env.last_price * ((volume_smooths - 1.0) * 16 + 1.0)
curr_price = torch.round(curr_price * 100) / 100
curr_price = torch.min(torch.stack((curr_price, env.ask_prices[4])), dim=0)[0]
curr_price[curr_price < 7.3] = 7.4
print(curr_price)
for env_i in range(num_envs):
actions[:, env_i, 0] = curr_price - prev_price
actions[:, env_i, 1] = volume_smooths - 0.75
actions[:, :, 1] = actions[:, :, 1].clip(-1, +1)
plt.plot(xs, curr_price, color='orange', label='VWAP price', linestyle='-') # todo 用橙色把 vmap策略的 执行价格画出来
plt.title(f'ask bid price (from level 1 to 5)')
plt.legend()
plt.grid()
plt.show()
# '''policy in env'''
# ary_remain_quantity = torch.zeros((num_envs, env.max_len + 1), dtype=torch.float32, device=env.device)
# ary_self_quantity = torch.zeros((num_envs, env.max_len + 1), dtype=torch.float32, device=env.device)
#
# cumulative_rewards = torch.zeros(num_envs, dtype=torch.float32, device=env.device)
# for i in range(1, env.max_len + 1):
# action = actions[i]
# state, reward, done, _ = env.step(action)
# cumulative_rewards += reward
# if done[0]:
# break
#
# ary_remain_quantity[:, i] = env.remain_quantity
# ary_self_quantity[:, i] = env.quantity
#
# ary_delta_quantity = ary_remain_quantity.clone()
# ary_delta_quantity[:, 1:] -= ary_delta_quantity[:, :-1]
# ary_delta_quantity = ary_delta_quantity[0]
#
# k = 5
# smooths = ary_delta_quantity.clone()
# for i in range(1, k):
# smooths[i:] += ary_delta_quantity[:-i]
# smooths[:-i] += ary_delta_quantity[i:]
# smooths /= 2 * k - 1 # convolve
# smooths[:k] = smooths[k]
# smooths[-k:] = smooths[-k]
#
# smooths = ary_delta_quantity.cpu().data.numpy()
#
# plt.plot(xs, smooths, label='VWAP quantity', linestyle='-')
#
# plt.title(f'ask bid price (from level 1 to 5)')
# plt.legend()
# plt.grid()
# plt.show()
def check__ask_price_volume_with_star():
os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE" # OMP: Error #15: Initializing libiomp5md.dll
import matplotlib.pyplot as plt
import numpy as np
num_envs = 2
smooth_kernel = 7
share_name = ['000768_XSHE', '000685_XSHE'][1]
env = OrderExecutionVecEnvForEval(num_envs=num_envs,
beg_date='2022-09-09',
end_date='2022-09-09',
share_name=share_name)
env.if_random = False
env.reset()
max_len1 = env.max_len + 1 # after env.reset()
xs = np.arange(max_len1)
print('xs.shape', xs.shape)
'''ask bid price (from level 1 to 5)'''
from matplotlib.cm import get_cmap
color_map = get_cmap('bwr') # Blue White Red, input 0.0 ~ 1.0 or 0 ~ 1000
ask_prices = np.array(env.ask_prices)
print('ask_prices.shape', ask_prices.shape)
n_level, max_len1 = ask_prices.shape
for i in range(n_level): # todo 这里的代码,把 askPrices 画出来
face_color = color_map(float(1 - i / n_level) * 0.2 + 0.2) # todo 使用蓝色渐变
if i + 1 == n_level:
plot_ask_price = np.zeros_like(ask_prices[i]) + np.nanmax(ask_prices[i])
plt.fill_between(xs, ask_prices[i], plot_ask_price, facecolor=face_color)
else:
plt.fill_between(xs, ask_prices[i], ask_prices[i + 1], facecolor=face_color)
bid_prices = np.array(env.bid_prices)
print('bid_prices.shape', bid_prices.shape)
n_level, max_len1 = bid_prices.shape
for i in range(n_level): # todo 这里的代码,把 bidPrices 画出来
# face_color = color_map(float(i / n_level) * 0.3 + 0.5 + 0.1) # red # todo 使用红色渐变
face_color = color_map(float(1 - i / n_level) * 0.2 + 0.2) # blue # todo 使用蓝色渐变
if i + 1 == n_level:
plot_bid_price = np.zeros_like(bid_prices[i]) + np.nanmin(bid_prices[i])
plt.fill_between(xs, bid_prices[i], plot_bid_price, facecolor=face_color)
else:
plt.fill_between(xs, bid_prices[i], bid_prices[i + 1], facecolor=face_color)
last_price = np.array(env.last_price)
plt.plot(xs, last_price, color='blue', label='last price') # todo 用蓝色把 last price 画出来
'''policy action'''
actions = torch.zeros((max_len1, num_envs, 2), dtype=torch.float32, device=env.device)
print('actions.shape', actions.shape)
# 0: the delta price is 0 in default
# 1: the quantity scale is +1 in default
'''policy: TWAP (one times of basic_quantity)'''
# actions[:, :, 0] = 0.0
# actions[:, :, 1] = 0.0 # (0.0+1) times of basic_quantity
'''policy: VWAP (using the data in future)'''
volume_weights = (env.volume / env.volume.mean() - 1) / env.volume.std(dim=0) + 1
volume_smooths = torch_convolve(volume_weights, k=smooth_kernel, dim=0)
prev_price = env.last_price.clone()
prev_price[1:] = env.last_price[:-1]
curr_price = env.last_price * ((volume_smooths - 1.0) * 2 * env.last_price.mean() + 1.0)
curr_price = torch.round(curr_price * 100) / 100
curr_price = torch.min(torch.stack((curr_price, env.ask_prices[4])), dim=0)[0]
for env_i in range(num_envs):
actions[:, env_i, 0] = curr_price - prev_price
action_quantity = (volume_smooths - volume_smooths.mean()) * 12e3 + 1.8
actions[:, env_i, 1] = action_quantity - 1
actions[:, :, 1] = actions[:, :, 1].clip(-1, +1 + 3)
'''policy in env'''
env_i = 0
ten_remain_quantity = torch.zeros((num_envs, env.max_len + 1), dtype=torch.float32, device=env.device)
ten_remain_quantity[:, 0] = env.remain_quantity
ten_sell_quantity = torch.zeros((num_envs, env.max_len + 1), dtype=torch.float32, device=env.device)
ten_sell_quantity[:, 0] = env.get_curr_quantity(actions[0][:, 1])
ten_curr_price = torch.zeros((num_envs, env.max_len + 1), dtype=torch.float32, device=env.device)
ten_curr_price[:, 0] = env.get_curr_price(actions[0][:, 0])
ten_rewards = torch.zeros((num_envs, env.max_len + 1), dtype=torch.float32, device=env.device)
ten_rewards[:, 0] = 0
for i in range(1, env.max_len + 1):
action = actions[i]
state, reward, done, _ = env.step(action)
ten_rewards[:, i] = reward
if done[0]:
break
ten_remain_quantity[:, i] = env.remain_quantity
ten_sell_quantity[:, i] = env.curr_quantity
ten_curr_price[:, i] = env.curr_price
# ary_remain_quantity = ten_remain_quantity[env_i].cpu().data.numpy()
# plt.plot(xs, ary_remain_quantity, label='VWAP remain_quantity', linestyle='-')
ten_exec_quantity = torch.zeros_like(ten_remain_quantity)
ten_exec_quantity[:, 1:] = ten_remain_quantity[:, :-1] - ten_remain_quantity[:, 1:]
filled_bool = (ten_exec_quantity == ten_sell_quantity)[env_i]
not_filled_bool = (ten_exec_quantity < ten_sell_quantity)[env_i]
"""
plt.scatter(marker=(5, 1)) # marker=(5, 1), 表示5角星里的第1款
https://matplotlib.org/3.1.1/gallery/lines_bars_and_markers/scatter_star_poly.html
"""
# plt.plot(xs, curr_price, color='orange', label='VWAP price', linestyle='-') # todo 用橙色把 vmap策略的 执行价格画出来
filled_xs = xs[filled_bool]
filled_price = curr_price[filled_bool]
plt.scatter(filled_xs, filled_price, color='orange', label='VWAP price (filled)', marker=(5, 1))
not_filled_xs = xs[not_filled_bool]
not_filled_price = curr_price[not_filled_bool]
plt.scatter(not_filled_xs, not_filled_price, color='brown', label='VWAP price (not filled)', marker=(5, 1))
plt.title(f'ask bid price (from level 1 to 5)')
plt.legend()
plt.grid()
plt.show()
'''draw executed_quantity <= sell_quantity'''
# smo_exec_quantity = torch_convolve(ten_exec_quantity.T, k=smooth_kernel, dim=0).T # todo smooth
# ary_exec_quantity = smo_exec_quantity[env_i].cpu().data.numpy()
# plt.plot(xs, ary_exec_quantity, label='VWAP executed_quantity', linestyle='-')
#
# smo_sell_quantity = torch_convolve(ten_sell_quantity.T, k=smooth_kernel, dim=0).T # todo smooth
# ary_sell_quantity = smo_sell_quantity.cpu().data.numpy()[env_i]
# plt.plot(xs, ary_sell_quantity, label='VWAP sell_quantity', linestyle='-')
#
# plt.title(f'ask bid price (from level 1 to 5)')
# plt.legend()
# plt.grid()
# plt.show()
def torch_convolve(inp, k=9, dim=0):
assert dim == 0
out = inp.clone()
for i in range(1, k):
out[i:] += inp[:-i]
out[:-i] += inp[i:]
out /= 2 * k - 1 # convolve
out[:k] = out[k]
out[-k:] = out[-k]
return out
if __name__ == '__main__':
# check__ask_price_volume()
check__ask_price_volume_with_star()
| 11,653 | 39.748252 | 111 | py |
MRMGA4VAD | MRMGA4VAD-main/calc_img_inputs.py | import torch
import numpy as np
import cv2
from collections import OrderedDict
import os
import glob
# import scipy.io as sio
from torch.utils.data import Dataset, DataLoader
from vad_datasets import ped_dataset, avenue_dataset, shanghaiTech_dataset
from FlowNet2_src import FlowNet2, flow_to_image
from torch.autograd import Variable
# from FlowNet2_src.flowlib import flow_to_image
def calc_optical_flow(dataset):
of_root_dir = './optical_flow'
len_original_root_dir = len(dataset.dir.split('/')) - 1
print(len_original_root_dir)
flownet2 = FlowNet2()
path = 'FlowNet2_src/pretrained/FlowNet2_checkpoint.pth.tar'
pretrained_dict = torch.load(path)['state_dict']
model_dict = flownet2.state_dict()
pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}
model_dict.update(pretrained_dict)
flownet2.load_state_dict(model_dict)
flownet2.cuda()
dataset_loader = DataLoader(dataset=dataset, batch_size=1, shuffle=False, num_workers=1)
for idx, (batch, _) in enumerate(dataset_loader):
print(1)
print('Calculating optical flow for {}-th frame'.format(idx+1))
cur_img_addr = dataset.all_frame_addr[idx]
cur_img_name = cur_img_addr.split('/')[-1]
cur_img_name = cur_img_name.split('.')[0]
# parent path to store optical flow
of_path = of_root_dir
tmp_path_segment = cur_img_addr.split('/')[len_original_root_dir: -1]
for cur_seg in tmp_path_segment:
of_path = os.path.join(of_path, cur_seg)
if os.path.exists(of_path) is False:
os.makedirs(of_path, exist_ok=True)
# calculate new img inputs: optical flow by flownet2
cur_imgs = np.transpose(batch[0].numpy(), [0, 2, 3, 1])
frameRange = dataset.context_range(idx)
old_size = (cur_imgs.shape[2], cur_imgs.shape[1])
if frameRange[1] == frameRange[0] or frameRange[1] == frameRange[2]:
if cur_imgs.shape[3] == 1:
im1 = cv2.resize(cur_imgs[0], (512, 384))[:, :, np.newaxis]
im2 = cv2.resize(cur_imgs[1], (512, 384))[:, :, np.newaxis]
im1 = np.concatenate([im1] * 3, axis=2)
im2 = np.concatenate([im2] * 3, axis=2)
else:
im1 = cv2.resize(cur_imgs[0], (512, 384))
im2 = cv2.resize(cur_imgs[1], (512, 384))
ims = np.array([[im1, im2]]).transpose((0, 4, 1, 2, 3)).astype(np.float32)
ims = torch.from_numpy(ims)
ims_v = Variable(ims.cuda(), requires_grad=False)
pred_flow = flownet2(ims_v).cpu().data
pred_flow = pred_flow[0].numpy().transpose((1, 2, 0))
new_inputs = cv2.resize(pred_flow, old_size)
else:
if cur_imgs.shape[3] == 1:
im1 = cv2.resize(cur_imgs[1], (512, 384))[:, :, np.newaxis]
im2 = cv2.resize(cur_imgs[2], (512, 384))[:, :, np.newaxis]
im1 = np.concatenate([im1] * 3, axis=2)
im2 = np.concatenate([im2] * 3, axis=2)
else:
im1 = cv2.resize(cur_imgs[1], (512, 384))
im2 = cv2.resize(cur_imgs[2], (512, 384))
ims = np.array([[im1, im2]]).transpose((0, 4, 1, 2, 3)).astype(np.float32)
ims = torch.from_numpy(ims)
ims_v = Variable(ims.cuda(), requires_grad=False)
pred_flow = flownet2(ims_v).cpu().data
pred_flow = pred_flow[0].numpy().transpose((1, 2, 0))
# visualization
# cv2.imshow('of', flow_to_image(pred_flow))
# cv2.waitKey(0)
new_inputs = cv2.resize(pred_flow, old_size)
# save new raw inputs
np.save(os.path.join(of_path, cur_img_name+'.npy'), new_inputs)
if __name__ == '__main__':
# mode = train or test. 'train' and 'test' are used for calculating optical flow of training dataset and testing dataset respectively.
dataset = ped_dataset(dir='./raw_datasets/UCSDped2', context_frame_num=1, mode='train', border_mode='hard')
calc_optical_flow(dataset)
dataset = ped_dataset(dir='./raw_datasets/UCSDped2', context_frame_num=1, mode='test', border_mode='hard')
calc_optical_flow(dataset)
# The optical flow calculation of avenue and ShanghaiTech sets is basically the same as above
dataset = avenue_dataset(dir='./raw_datasets/avenue', context_frame_num=1, mode='train', border_mode='hard')
calc_optical_flow(dataset)
dataset = avenue_dataset(dir='./raw_datasets/avenue', context_frame_num=1, mode='test', border_mode='hard')
calc_optical_flow(dataset)
dataset = shanghaiTech_dataset(dir='./raw_datasets/ShanghaiTech', context_frame_num=1, mode='train', border_mode='hard')
calc_optical_flow(dataset)
dataset = shanghaiTech_dataset(dir='./raw_datasets/ShanghaiTech', context_frame_num=1, mode='test', border_mode='hard')
calc_optical_flow(dataset)
| 4,952 | 44.027273 | 138 | py |
MRMGA4VAD | MRMGA4VAD-main/test.py | from xml.sax.xmlreader import InputSource
import torch
import numpy as np
import os
from torch.utils.data import DataLoader
from vad_datasets import unified_dataset_interface
from vad_datasets import bbox_collate, img_tensor2numpy, img_batch_tensor2numpy, frame_size, cube_to_train_dataset
from state_model import ConvTransformer_recon_correct
import torch.nn as nn
from utils import save_roc_pr_curve_data
import time
import argparse
import os
import sys
# from helper.visualization_helper import visualize_pair, visualize_batch, visualize_recon, visualize_pair_map
pyfile_name = "train"
pyfile_name_score = os.path.basename(sys.argv[0]).split(".")[0]
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('boolean value expected')
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--dataset', default='UCSDped2', type=str)
parser.add_argument('-n_l', '--num_layers', default=3, type=int)
parser.add_argument('-n_h', '--num_heads', default=4, type=int)
parser.add_argument('-pe', '--positional_encoding', default='learned', type=str)
parser.add_argument('-e', '--epochs', default=20, type=int)
parser.add_argument('-b', '--batch_size', default=128, type=int)
parser.add_argument('-l', '--temporal_length', default=3, type=int)
parser.add_argument('-lam_r', '--lambda_raw', default=1, type=float)
parser.add_argument('-lam_o', '--lambda_of', default=1, type=float)
parser.add_argument('-w_r', '--w_raw', default=1, type=float)
parser.add_argument('-w_o', '--w_of', default=1, type=float)
parser.add_argument('-test_b', '--test_bbox_saved', type=str2bool, default=True)
parser.add_argument('-test_f', '--test_foreground_saved', type=str2bool, default=True)
parser.add_argument('-f', '--use_flow', default=True, type=str2bool)
parser.add_argument('-s', '--scores_saved', default=False, type=str2bool)
parser.add_argument('-ep', '--epsilon', default=0.01, type=float)
args = parser.parse_args()
def calc_block_idx(x_min, x_max, y_min, y_max, h_step, w_step, mode):
all_blocks = list()
center = np.array([(y_min + y_max) / 2, (x_min + x_max) / 2])
all_blocks.append(center + center)
if mode > 1:
all_blocks.append(np.array([y_min, center[1]]) + center)
all_blocks.append(np.array([y_max, center[1]]) + center)
all_blocks.append(np.array([center[0], x_min]) + center)
all_blocks.append(np.array([center[0], x_max]) + center)
if mode >= 9:
all_blocks.append(np.array([y_min, x_min]) + center)
all_blocks.append(np.array([y_max, x_max]) + center)
all_blocks.append(np.array([y_max, x_min]) + center)
all_blocks.append(np.array([y_min, x_max]) + center)
all_blocks = np.array(all_blocks) / 2
h_block_idxes = all_blocks[:, 0] / h_step
w_block_idxes = all_blocks[:, 1] / w_step
h_block_idxes, w_block_idxes = list(h_block_idxes.astype(np.int)), list(w_block_idxes.astype(np.int))
# delete repeated elements
all_blocks = set([x for x in zip(h_block_idxes, w_block_idxes)])
all_blocks = [x for x in all_blocks]
return all_blocks
# /*------------------------------------overall parameter setting------------------------------------------*/
dataset_name = args.dataset
raw_dataset_dir = 'raw_datasets'
foreground_extraction_mode = 'obj_det_with_motion'
data_root_dir = 'data'
modality = 'raw2flow'
mode ='test'
method = 'SelfComplete'
num_layers = args.num_layers
num_heads = args.num_heads
pe = args.positional_encoding
context_frame_num = args.temporal_length
context_of_num = args.temporal_length
patch_size = 32
h_block = 1
w_block = 1
test_block_mode = 1
bbox_saved = args.test_bbox_saved
foreground_saved = args.test_foreground_saved
motionThr = 0
epochs = args.epochs
# visual_save_dir = args.save_dir
# /*------------------------------------------foreground extraction----------------------------------------------*/
config_file = './obj_det_config/cascade_rcnn_r101_fpn_1x.py'
checkpoint_file = './obj_det_checkpoints/cascade_rcnn_r101_fpn_1x_20181129-d64ebac7.pth'
# set dataset for foreground extraction
dataset = unified_dataset_interface(dataset_name=dataset_name, dir=os.path.join(raw_dataset_dir, dataset_name),
context_frame_num=1, mode=mode, border_mode='hard')
if not bbox_saved:
from fore_det.inference import init_detector
from fore_det.obj_det_with_motion import imshow_bboxes, getObBboxes, getFgBboxes, delCoverBboxes
from fore_det.simple_patch import get_patch_loc
# build the model from a config file and a checkpoint file
model = init_detector(config_file, checkpoint_file, device='cuda:0')
collate_func = bbox_collate('test')
dataset_loader = DataLoader(dataset=dataset, batch_size=1, shuffle=False, num_workers=1,
collate_fn=collate_func.collate)
all_bboxes = list()
for idx in range(dataset.__len__()):
batch, _ = dataset.__getitem__(idx)
print('Extracting bboxes of {}-th frame'.format(idx + 1))
cur_img = img_tensor2numpy(batch[1])
if foreground_extraction_mode == 'obj_det_with_motion':
# A coarse detection of bboxes by pretrained object detector
ob_bboxes = getObBboxes(cur_img, model, dataset_name)
ob_bboxes = delCoverBboxes(ob_bboxes, dataset_name)
# further foreground detection by motion
fg_bboxes = getFgBboxes(cur_img, img_batch_tensor2numpy(batch), ob_bboxes, dataset_name, verbose=False)
if fg_bboxes.shape[0] > 0:
cur_bboxes = np.concatenate((ob_bboxes, fg_bboxes), axis=0)
else:
cur_bboxes = ob_bboxes
elif foreground_extraction_mode == 'obj_det':
# A coarse detection of bboxes by pretrained object detector
ob_bboxes = getObBboxes(cur_img, model, dataset_name)
cur_bboxes = delCoverBboxes(ob_bboxes, dataset_name)
elif foreground_extraction_mode == 'simple_patch':
patch_num_list = [(3, 4), (6, 8)]
cur_bboxes = list()
for h_num, w_num in patch_num_list:
cur_bboxes.append(get_patch_loc(frame_size[dataset_name][0], frame_size[dataset_name][1], h_num, w_num))
cur_bboxes = np.concatenate(cur_bboxes, axis=0)
else:
raise NotImplementedError
all_bboxes.append(cur_bboxes)
np.save(os.path.join(dataset.dir, 'bboxes_test_{}.npy'.format(foreground_extraction_mode)), all_bboxes)
print('bboxes for testing data saved!')
else:
all_bboxes = np.load(os.path.join(dataset.dir, 'bboxes_test_{}.npy'.format(foreground_extraction_mode)),
allow_pickle=True)
print('bboxes for testing data loaded!')
# /------------------------- extract foreground using extracted bboxes---------------------------------------/
# set dataset for foreground bbox extraction
if method == 'SelfComplete':
border_mode = 'elastic'
else:
border_mode = 'hard'
if not foreground_saved:
if modality == 'raw_datasets':
file_format = frame_size[dataset_name][2]
elif modality == 'raw2flow':
file_format1 = frame_size[dataset_name][2]
file_format2 = '.npy'
else:
file_format = '.npy'
# set dataset for foreground bbox extraction
if modality == 'raw2flow':
dataset = unified_dataset_interface(dataset_name=dataset_name, dir=os.path.join('raw_datasets', dataset_name),
context_frame_num=context_frame_num, mode=mode,
border_mode=border_mode, all_bboxes=all_bboxes, patch_size=patch_size,
file_format=file_format1)
dataset2 = unified_dataset_interface(dataset_name=dataset_name, dir=os.path.join('optical_flow', dataset_name),
context_frame_num=context_of_num, mode=mode,
border_mode=border_mode, all_bboxes=all_bboxes, patch_size=patch_size,
file_format=file_format2)
else:
dataset = unified_dataset_interface(dataset_name=dataset_name, dir=os.path.join(modality, dataset_name),
context_frame_num=context_frame_num, mode=mode,
border_mode=border_mode, all_bboxes=all_bboxes, patch_size=patch_size,
file_format=file_format)
if dataset_name == 'ShanghaiTech':
np.save(os.path.join(data_root_dir, modality, dataset_name + '_' + 'scene_idx.npy'), dataset.scene_idx)
scene_idx = dataset.scene_idx
foreground_set = [[[[] for ww in range(w_block)] for hh in range(h_block)] for ii in range(dataset.__len__())]
if modality == 'raw2flow':
foreground_set2 = [[[[] for ww in range(w_block)] for hh in range(h_block)] for ii in range(dataset.__len__())]
foreground_bbox_set = [[[[] for ww in range(w_block)] for hh in range(h_block)] for ii in range(dataset.__len__())]
h_step, w_step = frame_size[dataset_name][0] / h_block, frame_size[dataset_name][1] / w_block
dataset_loader = DataLoader(dataset=dataset, batch_size=1, shuffle=False, num_workers=1,
collate_fn=bbox_collate(mode=mode).collate)
for idx in range(dataset.__len__()):
batch, _ = dataset.__getitem__(idx)
if modality == 'raw2flow':
batch2, _ = dataset2.__getitem__(idx)
print('Extracting foreground in {}-th batch, {} in total'.format(idx + 1, dataset.__len__() // 1))
cur_bboxes = all_bboxes[idx]
if len(cur_bboxes) > 0:
batch = img_batch_tensor2numpy(batch)
if modality == 'raw2flow':
batch2 = img_batch_tensor2numpy(batch2)
if modality == 'optical_flow':
if len(batch.shape) == 4:
mag = np.sum(np.sum(np.sum(batch ** 2, axis=3), axis=2), axis=1)
else:
mag = np.mean(np.sum(np.sum(np.sum(batch ** 2, axis=4), axis=3), axis=2), axis=1)
elif modality == 'raw2flow':
if len(batch2.shape) == 4:
mag = np.sum(np.sum(np.sum(batch2 ** 2, axis=3), axis=2), axis=1)
else:
mag = np.mean(np.sum(np.sum(np.sum(batch2 ** 2, axis=4), axis=3), axis=2), axis=1)
else:
mag = np.ones(batch.shape[0]) * 10000
for idx_bbox in range(cur_bboxes.shape[0]):
if mag[idx_bbox] > motionThr:
all_blocks = calc_block_idx(cur_bboxes[idx_bbox, 0], cur_bboxes[idx_bbox, 2],
cur_bboxes[idx_bbox, 1], cur_bboxes[idx_bbox, 3], h_step, w_step,
mode=test_block_mode)
for (h_block_idx, w_block_idx) in all_blocks:
foreground_set[idx][h_block_idx][w_block_idx].append(batch[idx_bbox])
if modality == 'raw2flow':
foreground_set2[idx][h_block_idx][w_block_idx].append(batch2[idx_bbox])
foreground_bbox_set[idx][h_block_idx][w_block_idx].append(cur_bboxes[idx_bbox])
foreground_set = [[[np.array(foreground_set[ii][hh][ww]) for ww in range(w_block)] for hh in range(h_block)] for ii
in range(dataset.__len__())]
if modality == 'raw2flow':
foreground_set2 = [[[np.array(foreground_set2[ii][hh][ww]) for ww in range(w_block)] for hh in range(h_block)]
for ii in range(dataset.__len__())]
foreground_bbox_set = [
[[np.array(foreground_bbox_set[ii][hh][ww]) for ww in range(w_block)] for hh in range(h_block)] for ii in
range(dataset.__len__())]
if modality == 'raw2flow':
np.save(os.path.join(data_root_dir, modality,
dataset_name + '_' + 'foreground_test_{}_{}_border_{}-raw.npy'.format(foreground_extraction_mode,
context_frame_num, border_mode)),
foreground_set)
np.save(os.path.join(data_root_dir, modality,
dataset_name + '_' + 'foreground_test_{}_{}_border_{}-flow.npy'.format(foreground_extraction_mode, context_frame_num, border_mode)),
foreground_set2)
else:
np.save(os.path.join(data_root_dir, modality,
dataset_name + '_' + 'foreground_test_{}_{}_border_{}.npy'.format(foreground_extraction_mode, context_frame_num, border_mode)),
foreground_set)
np.save(os.path.join(data_root_dir, modality,
dataset_name + '_' + 'foreground_bbox_test_{}.npy'.format(foreground_extraction_mode)),
foreground_bbox_set)
print('foreground for testing data saved!')
else:
if dataset_name == 'ShanghaiTech':
scene_idx = np.load(os.path.join(data_root_dir, modality, dataset_name + '_' + 'scene_idx.npy'))
if modality == 'raw2flow':
foreground_set = np.load(os.path.join(data_root_dir, modality,
dataset_name + '_' + 'foreground_test_{}_{}_border_{}-raw.npy'.format(
foreground_extraction_mode, context_frame_num, border_mode)), allow_pickle=True)
foreground_set2 = np.load(os.path.join(data_root_dir, modality,
dataset_name + '_' + 'foreground_test_{}_{}_border_{}-flow.npy'.format(
foreground_extraction_mode, context_frame_num, border_mode)), allow_pickle=True)
else:
foreground_set = np.load(os.path.join(data_root_dir, modality,
dataset_name + '_' + 'foreground_test_{}_{}_border_{}.npy'.format(
foreground_extraction_mode, context_frame_num, border_mode)), allow_pickle=True)
foreground_bbox_set = np.load(os.path.join(data_root_dir, modality,
dataset_name + '_' + 'foreground_bbox_test_{}.npy'.format(
foreground_extraction_mode)), allow_pickle=True)
print('foreground for testing data loaded!')
# /*------------------------------------------Abnormal event detection----------------------------------------------*/
results_dir = 'results'
scores_saved = args.scores_saved
big_number = 100000
time_start=time.time()
loss_func_perturb = nn.MSELoss()
if scores_saved is False:
if method == 'SelfComplete':
h, w, _, sn = frame_size[dataset_name]
if border_mode == 'predict':
tot_frame_num = context_frame_num + 1
tot_of_num = context_of_num + 1
else:
tot_frame_num = 2 * context_frame_num + 1
tot_of_num = 2 * context_of_num + 1
rawRange = 10
if rawRange >= tot_frame_num:
rawRange = None
useFlow = args.use_flow
padding = False
assert modality == 'raw2flow'
loss_func = nn.MSELoss(reduce=False)
in_channels = 3
pixel_result_dir = os.path.join(results_dir, dataset_name, 'score_mask_{}_head_{}_layer_{}_length_{}_pe_{}_epoch_{}_lambda_{}_{}_w_{}_{}_perturb_{}'.format(
border_mode, num_heads, num_layers, context_frame_num, pe, epochs, args.lambda_raw, args.lambda_of, args.w_raw, args.w_of, args.epsilon) + '_' + 'pyname_{}.npy'.format(pyfile_name_score))
os.makedirs(pixel_result_dir, exist_ok=True)
model_weights = torch.load(os.path.join(data_root_dir, modality, dataset_name + '_' + 'model_{}_head_{}_layer_{}_length_{}_pe_{}_epoch_{}_lambda_{}_{}'.format(
border_mode, num_heads, num_layers, context_frame_num, pe, epochs, args.lambda_raw, args.lambda_of) + '_' + 'pyname_{}.npy'.format(pyfile_name)))
if dataset_name == 'ShanghaiTech':
model_set = [[[[] for ww in range(len(model_weights[ss][hh]))] for hh in range(len(model_weights[ss]))]
for ss in range(len(model_weights))]
for ss in range(len(model_weights)):
for hh in range(len(model_weights[ss])):
for ww in range(len(model_weights[ss][hh])):
if len(model_weights[ss][hh][ww]) > 0:
cur_model = torch.nn.DataParallel(
ConvTransformer_recon_correct(
tot_raw_num=tot_frame_num, nums_hidden=[32, 64, 128], num_layers=num_layers,
num_dec_frames=1, num_heads=num_heads, with_residual=True,
with_pos=True, pos_kind=pe, mode=0, use_flow=args.use_flow)).cuda()
cur_model.load_state_dict(model_weights[ss][hh][ww][0])
model_set[ss][hh][ww].append(cur_model.eval())
# get training scores statistics
raw_training_scores_set = torch.load(os.path.join(data_root_dir, modality,
dataset_name + '_' + 'raw_training_scores_border_{}_head_{}_layer_{}_length_{}_pe_{}_epoch_{}_lambda_{}_{}'.format(
border_mode, num_heads, num_layers, context_frame_num, pe,
epochs, args.lambda_raw, args.lambda_of)+ '_' + 'pyname_{}.npy'.format(pyfile_name)))
of_training_scores_set = torch.load(os.path.join(data_root_dir, modality,
dataset_name + '_' + 'of_training_scores_border_{}_head_{}_layer_{}_length_{}_pe_{}_epoch_{}_lambda_{}_{}'.format(
border_mode, num_heads, num_layers, context_frame_num, pe,
epochs, args.lambda_raw, args.lambda_of) + '_' + 'pyname_{}.npy'.format(pyfile_name)))
raw_stats_set = [[[(np.mean(raw_training_scores_set[ss][hh][ww]),
np.std(raw_training_scores_set[ss][hh][ww])) for ww in range(len(model_weights[hh]))]
for hh in range(len(model_weights))] for ss in range(len(model_weights))]
if useFlow:
of_stats_set = [[[(np.mean(of_training_scores_set[ss][hh][ww]),
np.std(of_training_scores_set[ss][hh][ww])) for ww in range(len(model_weights[hh]))]
for hh in range(len(model_weights))] for ss in range(len(model_weights))]
del raw_training_scores_set, of_training_scores_set
else:
model_set = [[[] for ww in range(len(model_weights[hh]))] for hh in range(len(model_weights))]
for hh in range(len(model_weights)):
for ww in range(len(model_weights[hh])):
if len(model_weights[hh][ww]) > 0:
cur_model = torch.nn.DataParallel(
ConvTransformer_recon_correct(
tot_raw_num=tot_frame_num, nums_hidden=[32, 64, 128], num_layers=num_layers,
num_dec_frames=1, num_heads=num_heads, with_residual=True,
with_pos=True, pos_kind=pe, mode=0, use_flow=args.use_flow)).cuda()
print(model_weights[hh][ww][0].keys())
cur_model.load_state_dict(model_weights[hh][ww][0])
model_set[hh][ww].append(cur_model.eval())
# get training scores statistics
raw_training_scores_set = torch.load(os.path.join(data_root_dir, modality,
dataset_name + '_' + 'raw_training_scores_border_{}_head_{}_layer_{}_length_{}_pe_{}_epoch_{}_lambda_{}_{}'.format(
border_mode, num_heads, num_layers, context_frame_num, pe, epochs, args.lambda_raw, args.lambda_of) + '_' + 'pyname_{}.npy'.format(pyfile_name)))
of_training_scores_set = torch.load(os.path.join(data_root_dir, modality,
dataset_name + '_' + 'of_training_scores_border_{}_head_{}_layer_{}_length_{}_pe_{}_epoch_{}_lambda_{}_{}'.format( border_mode, num_heads, num_layers, context_frame_num, pe, epochs, args.lambda_raw, args.lambda_of) + '_' + 'pyname_{}.npy'.format(pyfile_name)))
# mean and std of training scores
raw_stats_set = [
[(np.mean(raw_training_scores_set[hh][ww]), np.std(raw_training_scores_set[hh][ww])) for ww in
range(len(model_weights[hh]))] for hh in range(len(model_weights))]
if useFlow:
of_stats_set = [
[(np.mean(of_training_scores_set[hh][ww]), np.std(of_training_scores_set[hh][ww])) for ww in
range(len(model_weights[hh]))] for hh in range(len(model_weights))]
del raw_training_scores_set, of_training_scores_set
# Get scores
for frame_idx in range(len(foreground_set)):
print('Calculating scores for {}-th frame'.format(frame_idx))
cur_data_set = foreground_set[frame_idx]
cur_data_set2 = foreground_set2[frame_idx]
cur_bboxes = foreground_bbox_set[frame_idx]
cur_pixel_results = -1 * np.ones(shape=(h, w)) * big_number
for h_idx in range(len(cur_data_set)):
for w_idx in range(len(cur_data_set[h_idx])):
if len(cur_data_set[h_idx][w_idx]) > 0:
if dataset_name == 'ShanghaiTech':
if len(model_set[scene_idx[frame_idx] - 1][h_idx][w_idx]) > 0:
# print(scene_idx[frame_idx])
cur_model = model_set[scene_idx[frame_idx] - 1][h_idx][w_idx][0]
cur_dataset = cube_to_train_dataset(cur_data_set[h_idx][w_idx],
target=cur_data_set2[h_idx][w_idx])
cur_dataloader = DataLoader(dataset=cur_dataset,
batch_size=cur_data_set[h_idx][w_idx].shape[0],
shuffle=False)
for idx, (inputs, of_targets_all, _) in enumerate(cur_dataloader):
inputs = inputs.cuda().type(torch.cuda.FloatTensor)
inputs = torch.autograd.Variable(inputs, requires_grad= True)
of_targets_all = of_targets_all.cuda().type(torch.cuda.FloatTensor)
of_outputs, raw_outputs, of_targets, raw_targets = cur_model(inputs, of_targets_all)
loss_raw = loss_func_perturb(raw_targets, raw_outputs)
if useFlow:
loss_of = loss_func_perturb(of_targets.detach(), of_outputs)
if useFlow:
loss = loss_raw + loss_of
else:
loss = loss_raw
loss.backward()
gradient = inputs.grad.data
sign_gradient = torch.sign(gradient)
middle_start_indice = 3*context_frame_num
inputs.requires_grad = False
inputs = torch.add(inputs.data, -args.epsilon, sign_gradient)
# end of perturb
inputs = torch.autograd.Variable(inputs)
of_outputs, raw_outputs, of_targets, raw_targets = cur_model(inputs, of_targets_all)
# # visualization
# for i in range(raw_targets.size(0)):
# visualize_recon(
# batch_1=img_batch_tensor2numpy(raw_targets.cpu().detach()[i]),
# batch_2=img_batch_tensor2numpy(raw_outputs.cpu().detach()[i]),
# frame_idx=frame_idx, obj_id = i, dataset_name = dataset_name, save_dir=visual_save_dir)
# visualize_recon(
# batch_1=img_batch_tensor2numpy(of_targets.cpu().detach()[i]),
# batch_2=img_batch_tensor2numpy(of_outputs.cpu().detach()[i]),
# frame_idx=frame_idx, obj_id = i, dataset_name = dataset_name, save_dir=visual_save_dir)
if useFlow:
of_scores = loss_func(of_targets, of_outputs).cpu().data.numpy()
of_scores = np.sum(np.sum(np.sum(np.sum(of_scores, axis=4), axis=3), axis=2), axis=1)
# print(of_scores)# mse
raw_scores = loss_func(raw_targets, raw_outputs).cpu().data.numpy()
raw_scores = np.sum(np.sum(np.sum(np.sum(raw_scores, axis=4), axis=3), axis=2), axis=1) # mse
# print(raw_scores)
raw_scores = (raw_scores - raw_stats_set[scene_idx[frame_idx] - 1][h_idx][w_idx][
0]) / raw_stats_set[scene_idx[frame_idx] - 1][h_idx][w_idx][1]
# print(raw_scores)
if useFlow:
of_scores = (of_scores - of_stats_set[scene_idx[frame_idx] - 1][h_idx][w_idx][
0]) / of_stats_set[scene_idx[frame_idx] - 1][h_idx][w_idx][1]
# print(of_scores)
if useFlow:
scores = args.w_raw * raw_scores + args.w_of* of_scores
# print(scores)
else:
scores = args.w_raw * raw_scores
else:
scores = np.ones(cur_data_set[h_idx][w_idx].shape[0], ) * big_number
else:
if len(model_set[h_idx][w_idx]) > 0:
cur_model = model_set[h_idx][w_idx][0]
cur_dataset = cube_to_train_dataset(cur_data_set[h_idx][w_idx],
target=cur_data_set2[h_idx][w_idx])
cur_dataloader = DataLoader(dataset=cur_dataset,
batch_size=cur_data_set[h_idx][w_idx].shape[0],
shuffle=False)
for idx, (inputs, of_targets_all, _) in enumerate(cur_dataloader):
inputs = inputs.cuda().type(torch.cuda.FloatTensor)
inputs = torch.autograd.Variable(inputs, requires_grad= True)
of_targets_all = of_targets_all.cuda().type(torch.cuda.FloatTensor)
of_outputs, raw_outputs, of_targets, raw_targets = cur_model(inputs, of_targets_all)
loss_raw = loss_func_perturb(raw_targets, raw_outputs)
if useFlow:
loss_of = loss_func_perturb(of_targets.detach(), of_outputs)
if useFlow:
loss = loss_raw + loss_of
else:
loss = loss_raw
loss.backward()
gradient = inputs.grad.data
sign_gradient = torch.sign(gradient)
middle_start_indice = 3*context_frame_num
inputs.requires_grad = False
inputs = torch.add(inputs.data, -args.epsilon, sign_gradient)
# end of perturb
inputs = torch.autograd.Variable(inputs)
of_outputs, raw_outputs, of_targets, raw_targets = cur_model(inputs, of_targets_all)
# # visualization
# for i in range(raw_targets.size(0)):
# visualize_recon(
# batch_1=img_batch_tensor2numpy(raw_targets.cpu().detach()[i]),
# batch_2=img_batch_tensor2numpy(raw_outputs.cpu().detach()[i]),
# frame_idx=frame_idx, obj_id = i, dataset_name = dataset_name, save_dir=visual_save_dir)
# visualize_recon(
# batch_1=img_batch_tensor2numpy(of_targets.cpu().detach()[i]),
# batch_2=img_batch_tensor2numpy(of_outputs.cpu().detach()[i]),
# frame_idx=frame_idx, obj_id = i, dataset_name = dataset_name, save_dir=visual_save_dir)
# mse
if useFlow:
of_scores = loss_func(of_targets, of_outputs).cpu().data.numpy()
# of_scores = np.sum(of_scores, axis=(4, 3, 2)) # bl
#
# for l in range(of_scores.shape[1]):
# of_scores[:, l] = of_scores[:, l] * (abs(l - context_frame_num) + 1)
# of_scores = np.sum(of_scores, axis=1)
of_scores = np.sum(np.sum(np.sum(np.sum(of_scores, axis=4), axis=3), axis=2), axis=1)
raw_scores = loss_func(raw_targets, raw_outputs).cpu().data.numpy()
raw_scores = np.sum(np.sum(np.sum(np.sum(raw_scores, axis=4), axis=3), axis=2), axis=1)
# raw_scores = np.sum(raw_scores, axis=(4, 3, 2)) # bl
#
# for l in range(raw_scores.shape[1]):
# raw_scores[:, l] = raw_scores[:, l] * (abs(l - context_frame_num)+1)
# raw_scores = np.sum(raw_scores, axis=1)
# normalize scores using training scores
raw_scores = (raw_scores - raw_stats_set[h_idx][w_idx][0]) / \
raw_stats_set[h_idx][w_idx][1]
if useFlow:
of_scores = (of_scores - of_stats_set[h_idx][w_idx][0]) / \
of_stats_set[h_idx][w_idx][1]
if useFlow:
scores = args.w_raw * raw_scores + args.w_of * of_scores
else:
scores = args.w_raw * raw_scores
# print(scores.shape)
else:
scores = np.ones(cur_data_set[h_idx][w_idx].shape[0], ) * big_number
for m in range(scores.shape[0]):
cur_score_mask = -1 * np.ones(shape=(h, w)) * big_number
cur_score = scores[m]
bbox = cur_bboxes[h_idx][w_idx][m]
x_min, x_max = np.int(np.ceil(bbox[0])), np.int(np.ceil(bbox[2]))
y_min, y_max = np.int(np.ceil(bbox[1])), np.int(np.ceil(bbox[3]))
cur_score_mask[y_min:y_max, x_min:x_max] = cur_score
cur_pixel_results = np.max(
np.concatenate([cur_pixel_results[:, :, np.newaxis], cur_score_mask[:, :, np.newaxis]],
axis=2), axis=2)
torch.save(cur_pixel_results, os.path.join(pixel_result_dir, '{}'.format(frame_idx)))
else:
raise NotImplementedError
# /*------------------------------------------Evaluation----------------------------------------------*/
criterion = 'frame'
batch_size = 1
# set dataset for evaluation
dataset = unified_dataset_interface(dataset_name=dataset_name, dir=os.path.join(raw_dataset_dir, dataset_name),
context_frame_num=0, mode=mode, border_mode='hard')
dataset_loader = DataLoader(dataset=dataset, batch_size=batch_size, shuffle=False, num_workers=0,
collate_fn=bbox_collate(mode).collate)
print('Evaluating {} by {}-criterion:'.format(dataset_name, criterion))
if criterion == 'frame':
if dataset_name == 'ShanghaiTech':
all_frame_scores = [[] for si in set(dataset.scene_idx)]
all_targets = [[] for si in set(dataset.scene_idx)]
for idx, (_, target) in enumerate(dataset_loader):
print('Processing {}-th frame'.format(idx))
cur_pixel_results = torch.load(os.path.join(results_dir, dataset_name, 'score_mask_{}_head_{}_layer_{}_length_{}_pe_{}_epoch_{}_lambda_{}_{}_w_{}_{}_perturb_{}'.format(
border_mode, num_heads, num_layers, context_frame_num, pe, epochs, args.lambda_raw, args.lambda_of, args.w_raw, args.w_of, args.epsilon) + '_' + 'pyname_{}.npy'.format(pyfile_name_score), '{}'.format(idx) ))
all_frame_scores[scene_idx[idx] - 1].append(cur_pixel_results.max())
all_targets[scene_idx[idx] - 1].append(target[0].numpy().max())
all_frame_scores = [np.array(all_frame_scores[si]) for si in range(dataset.scene_num)]
all_targets = [np.array(all_targets[si]) for si in range(dataset.scene_num)]
all_targets = [all_targets[si] > 0 for si in range(dataset.scene_num)]
print(dataset.scene_num)
print(all_frame_scores)
print(all_targets)
results = [save_roc_pr_curve_data(all_frame_scores[si], all_targets[si], os.path.join(results_dir, dataset_name,
'{}_{}_{}_frame_results_scene_{}.npz'.format(
modality,
foreground_extraction_mode,
method, si + 1))) for
si in range(dataset.scene_num)]
results = np.array(results).mean()
print('Average frame-level AUC is {}'.format(results))
print(np.max(all_frame_scores))
print(np.min(all_frame_scores))
else:
all_frame_scores = list()
all_targets = list()
for idx, (_, target) in enumerate(dataset_loader):
print('Processing {}-th frame'.format(idx))
cur_pixel_results = torch.load(os.path.join(results_dir, dataset_name, 'score_mask_{}_head_{}_layer_{}_length_{}_pe_{}_epoch_{}_lambda_{}_{}_w_{}_{}_perturb_{}'.format(
border_mode, num_heads, num_layers, context_frame_num, pe, epochs, args.lambda_raw, args.lambda_of, args.w_raw, args.w_of, args.epsilon) + '_' + 'pyname_{}.npy'.format(pyfile_name_score), '{}'.format(idx)))
all_frame_scores.append(cur_pixel_results.max())
all_targets.append(target[0].numpy().max())
time_end = time.time()
print('time cost', time_end - time_start, 's')
all_frame_scores = np.array(all_frame_scores)
all_targets = np.array(all_targets)
all_targets = all_targets > 0
results_path = os.path.join(results_dir, dataset_name,
'{}_{}_{}_frame_results.npz'.format(modality, foreground_extraction_mode, method))
print('Results written to {}:'.format(results_path))
np.save('output_scores_{}_pyname_{}'.format(dataset_name, pyfile_name_score), all_frame_scores)
np.save('labels_{}'.format(dataset_name), all_targets)
print(all_frame_scores)
print(all_targets)
auc = save_roc_pr_curve_data(all_frame_scores, all_targets, results_path,verbose=True)
print(auc)
elif criterion == 'pixel':
if dataset_name != 'ShanghaiTech':
all_pixel_scores = list()
all_targets = list()
thr = 0.4
for idx, (_, target) in enumerate(dataset_loader):
print('Processing {}-th frame'.format(idx))
cur_pixel_results = torch.load(os.path.join(results_dir, dataset_name, 'score_mask', '{}'.format(idx)))
target_mask = target[0].numpy()
all_targets.append(target[0].numpy().max())
if all_targets[-1] > 0:
cur_effective_scores = cur_pixel_results[target_mask > 0]
sorted_score = np.sort(cur_effective_scores)
cut_off_idx = np.int(np.round((1 - thr) * cur_effective_scores.shape[0]))
cut_off_score = cur_effective_scores[cut_off_idx]
else:
cut_off_score = cur_pixel_results.max()
all_pixel_scores.append(cut_off_score)
all_frame_scores = np.array(all_pixel_scores)
all_targets = np.array(all_targets)
all_targets = all_targets > 0
results_path = os.path.join(results_dir, dataset_name,
'{}_{}_{}_pixel_results.npz'.format(modality, foreground_extraction_mode, method))
print('Results written to {}:'.format(results_path))
results = save_roc_pr_curve_data(all_frame_scores, all_targets, results_path)
else:
raise NotImplementedError
else:
raise NotImplementedError
| 39,523 | 56.868228 | 321 | py |
MRMGA4VAD | MRMGA4VAD-main/state_model.py | import torch
import torch.nn as nn
import numpy as np
from module import *
# LSTM
class ConvLSTMCell(nn.Module):
def __init__(self, input_dim, hidden_dim, kernel_size, bias):
"""
Initialize ConvLSTM cell.
Parameters
----------
input_dim: int
Number of channels of input tensor.
hidden_dim: int
Number of channels of hidden state.
kernel_size: (int, int)
Size of the convolutional kernel.
bias: bool
Whether or not to add the bias.
"""
super(ConvLSTMCell, self).__init__()
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.kernel_size = kernel_size
self.padding = kernel_size[0] // 2, kernel_size[1] // 2
self.bias = bias
self.conv = nn.Conv2d(in_channels=self.input_dim + self.hidden_dim,
out_channels=4 * self.hidden_dim,
kernel_size=self.kernel_size,
padding=self.padding,
bias=self.bias)
def forward(self, input_tensor, cur_state):
h_cur, c_cur = cur_state
combined = torch.cat([input_tensor, h_cur], dim=1) # concatenate along channel axis
combined_conv = self.conv(combined)
cc_i, cc_f, cc_o, cc_g = torch.split(combined_conv, self.hidden_dim, dim=1)
i = torch.sigmoid(cc_i)
f = torch.sigmoid(cc_f)
o = torch.sigmoid(cc_o)
g = torch.tanh(cc_g)
c_next = f * c_cur + i * g
h_next = o * torch.tanh(c_next)
return h_next, c_next
def init_hidden(self, batch_size, image_size):
height, width = image_size
return (torch.zeros(batch_size, self.hidden_dim, height, width, device=self.conv.weight.device),
torch.zeros(batch_size, self.hidden_dim, height, width, device=self.conv.weight.device))
class LSTM(nn.Module):
"""
Parameters:
input_dim: Number of channels in input
hidden_dim: Number of hidden channels
kernel_size: Size of kernel in convolutions
num_layers: Number of LSTM layers stacked on each other
batch_first: Whether or not dimension 0 is the batch or not
bias: Bias or no bias in Convolution
return_all_layers: Return the list of computations for all layers
Note: Will do same padding.
Input:
A tensor of size B, T, C, H, W or T, B, C, H, W
Output:
A tuple of two lists of length num_layers (or length 1 if return_all_layers is False).
0 - layer_output_list is the list of lists of length T of each output
1 - last_state_list is the list of last states
each element of the list is a tuple (h, c) for hidden state and memory
Example:
>> x = torch.rand((32, 10, 64, 128, 128))
>> convlstm = ConvLSTM(64, 16, 3, 1, True, True, False)
>> _, last_states = convlstm(x)
>> h = last_states[0][0] # 0 for layer index, 0 for h index
"""
def __init__(self, input_dim, hidden_dim, kernel_size, num_layers,
batch_first=False, bias=True, return_all_layers=False):
super(LSTM, self).__init__()
self._check_kernel_size_consistency(kernel_size)
# Make sure that both `kernel_size` and `hidden_dim` are lists having len == num_layers
kernel_size = self._extend_for_multilayer(kernel_size, num_layers)
hidden_dim = self._extend_for_multilayer(hidden_dim, num_layers)
if not len(kernel_size) == len(hidden_dim) == num_layers:
raise ValueError('Inconsistent list length.')
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.kernel_size = kernel_size
self.num_layers = num_layers
self.batch_first = batch_first
self.bias = bias
self.return_all_layers = return_all_layers
cell_list = []
for i in range(0, self.num_layers):
cur_input_dim = self.input_dim if i == 0 else self.hidden_dim[i - 1]
cell_list.append(ConvLSTMCell(input_dim=cur_input_dim,
hidden_dim=self.hidden_dim[i],
kernel_size=self.kernel_size[i],
bias=self.bias))
self.cell_list = nn.ModuleList(cell_list)
def forward(self, input_tensor, hidden_state=None):
"""
Parameters
----------
input_tensor: todo
5-D Tensor either of shape (t, b, c, h, w) or (b, t, c, h, w)
hidden_state: todo
None. todo implement stateful
Returns
-------
last_state_list, layer_output
"""
if not self.batch_first:
# (t, b, c, h, w) -> (b, t, c, h, w)
input_tensor = input_tensor.permute(1, 0, 2, 3, 4)
b, _, _, h, w = input_tensor.size()
# Implement stateful ConvLSTM
if hidden_state is not None:
raise NotImplementedError()
else:
# Since the init is done in forward. Can send image size here
hidden_state = self._init_hidden(batch_size=b,
image_size=(h, w))
layer_output_list = []
last_state_list = []
seq_len = input_tensor.size(1)
cur_layer_input = input_tensor
for layer_idx in range(self.num_layers):
h, c = hidden_state[layer_idx]
output_inner = []
for t in range(seq_len):
h, c = self.cell_list[layer_idx](input_tensor=cur_layer_input[:, t, :, :, :],
cur_state=[h, c])
output_inner.append(h)
layer_output = torch.stack(output_inner, dim=1)
cur_layer_input = layer_output
layer_output_list.append(layer_output)
last_state_list.append([h, c])
if not self.return_all_layers:
layer_output_list = layer_output_list[-1:]
last_state_list = last_state_list[-1:]
return layer_output_list, last_state_list
def _init_hidden(self, batch_size, image_size):
init_states = []
for i in range(self.num_layers):
init_states.append(self.cell_list[i].init_hidden(batch_size, image_size))
return init_states
@staticmethod
def _check_kernel_size_consistency(kernel_size):
if not (isinstance(kernel_size, tuple) or
(isinstance(kernel_size, list) and all([isinstance(elem, tuple) for elem in kernel_size]))):
raise ValueError('`kernel_size` must be tuple or list of tuples')
@staticmethod
def _extend_for_multilayer(param, num_layers):
if not isinstance(param, list):
param = [param] * num_layers
return param
# ------------------------------------------------------------------------------------------------
class double_conv(nn.Module):
'''(conv => BN => ReLU) * 2'''
def __init__(self, in_ch, out_ch):
super(double_conv, self).__init__()
self.conv= nn.Sequential(
nn.Conv2d(in_channels=in_ch, out_channels=out_ch, kernel_size=3, padding=1),
nn.BatchNorm2d(out_ch),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=out_ch, out_channels=out_ch, kernel_size=3, padding=1),
nn.BatchNorm2d(out_ch),
nn.ReLU(inplace=True),
)
def forward(self, x):
x = self.conv(x)
return x
class inconv(nn.Module):
'''
inconv only changes the number of channels
'''
def __init__(self, in_ch, out_ch):
super(inconv, self).__init__()
self.conv = double_conv(in_ch, out_ch)
def forward(self, x):
x = self.conv(x)
return x
class down(nn.Module):
def __init__(self, in_ch, out_ch):
super(down, self).__init__()
self.mpconv = nn.Sequential(
nn.MaxPool2d(kernel_size=2),
double_conv(in_ch, out_ch),
)
def forward(self, x):
x = self.mpconv(x)
return x
class up(nn.Module):
def __init__(self, in_ch, out_ch, bilinear=False):
super(up, self).__init__()
self.bilinear=bilinear
if self.bilinear:
self.up = nn.Sequential(nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True),
nn.Conv2d(in_ch, in_ch//2, 1),)
else:
self.up = nn.ConvTranspose2d(in_channels=in_ch, out_channels=in_ch, kernel_size=3, stride=2, padding=1, output_padding=1)
self.conv = double_conv(in_ch, out_ch)
def forward(self, x):
x = self.up(x)
# x = torch.cat([x2, x1], dim=1)
x = self.conv(x)
return x
class up_unet(nn.Module):
def __init__(self, in_ch, out_ch, bilinear=False):
super(up_unet, self).__init__()
self.bilinear=bilinear
if self.bilinear:
self.up = nn.Sequential(nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True),
nn.Conv2d(in_ch, in_ch//2, 1),)
else:
self.up = nn.ConvTranspose2d(in_channels=in_ch, out_channels=in_ch // 2, kernel_size=3, stride=2, padding=1, output_padding=1)
self.conv = double_conv(in_ch, out_ch)
def forward(self, x1, x2):
x1 = self.up(x1)
x = torch.cat([x2, x1], dim=1)
x = self.conv(x)
return x
class outconv(nn.Module):
def __init__(self, in_ch, out_ch):
super(outconv, self).__init__()
self.conv = nn.Conv2d(in_ch, out_ch, 1)
def forward(self, x):
x = self.conv(x)
return x
class Spatial_Encoder(nn.Module):
def __init__(self, nums_hidden, channel_num):
super(Spatial_Encoder, self).__init__()
self.inc = inconv(channel_num, nums_hidden[0])
self.down1 = down(nums_hidden[0], nums_hidden[1])
self.down2 = down(nums_hidden[1], nums_hidden[2])
# self.down3 = down(nums_hidden[2], nums_hidden[3])
def forward(self, x):
# print(x.shape)
x = self.inc(x)
# print(x.shape)
x = self.down1(x)
x = self.down2(x)
# x = self.down3(x)
return x
class Spatial_Decoder(nn.Module):
def __init__(self, nums_hidden, channel_num):
super(Spatial_Decoder, self).__init__()
# self.up1 = up(nums_hidden[3], nums_hidden[2])
self.up2 = up(nums_hidden[2], nums_hidden[1])
self.up3 = up(nums_hidden[1], nums_hidden[0])
self.out = outconv(nums_hidden[0], channel_num)
def forward(self, x):
# x = self.up1(x)
x = self.up2(x)
x = self.up3(x)
x = self.out(x)
return x
class ConvTransformer_recon_correct(nn.Module):
def __init__(self, tot_raw_num, nums_hidden, num_layers=1, num_dec_frames=1, num_heads=4, with_residual=True,
with_pos=True, pos_kind='sine', mode=0, use_flow=True):
super(ConvTransformer_recon_correct, self).__init__()
self.raw_channel_num = 3 # RGB channel no.
self.of_channel_num = 2
# self.feature_embedding = FeatureEmbedding(model_depth)
self.feature_embedding = Spatial_Encoder(nums_hidden, self.raw_channel_num)
self.encoder = ConvTransformerEncoder(num_layers=num_layers, model_depth=nums_hidden[-1], num_heads=num_heads,
with_residual=with_residual, with_pos=with_pos, pos_kind=pos_kind)
self.prediction = Spatial_Decoder(nums_hidden, self.raw_channel_num)
if use_flow:
self.feature_embedding_of = Spatial_Encoder(nums_hidden, self.raw_channel_num)
self.encoder_of = ConvTransformerEncoder(num_layers=num_layers, model_depth=nums_hidden[-1],
num_heads=num_heads,
with_residual=with_residual, with_pos=with_pos, pos_kind=pos_kind)
self.prediction_of = Spatial_Decoder(nums_hidden, self.of_channel_num)
self.task = mode
self.num_dec_frames = num_dec_frames
self.tot_raw_num = tot_raw_num
self.tot_of_num = tot_raw_num
self.use_flow = use_flow
self.nums_hidden = nums_hidden
def forward(self, input, of_targets_full):
b,c_in,h,w = input.shape
assert c_in == self.raw_channel_num*self.tot_raw_num
# convert to 5 dimensions for inputs
input = input.permute(0, 2, 3, 1).contiguous() # b,h,w,c_in
new_shape_input = input.size()[:-1] + (self.tot_raw_num, self.raw_channel_num) # b,h,w,c,l
input = input.view(*new_shape_input)
input = input.permute(0, 3, 4, 1, 2).contiguous().cuda() # b,l,c,h,w
of_targets_full = of_targets_full.permute(0, 2, 3, 1).contiguous()
new_shape_of_targets = of_targets_full.size()[:-1] + (self.tot_of_num, self.of_channel_num)
of_targets_full = of_targets_full.view(*new_shape_of_targets)
of_targets_full = of_targets_full.permute(0, 3, 4, 1, 2).contiguous().cuda()
# interpolation
input_frames = input
raw_targets = input # [...,1:]
input_frames = torch.reshape(input_frames, (-1, self.raw_channel_num, h, w))
img_tensor = self.feature_embedding(input_frames) # b*l,c_f,h,w
_, c_f, h_small, w_small = img_tensor.shape
img_tensor = torch.reshape(img_tensor, (b, -1, self.nums_hidden[-1], h_small, w_small)) # b,l,c_f,h,w
encoderout = self.encoder(img_tensor) # b,l,c_f,h,w
encoderout = torch.reshape(encoderout, (-1, self.nums_hidden[-1], h_small, w_small))
raw_outputs = self.prediction(encoderout)
raw_outputs = torch.reshape(raw_outputs, (-1, self.tot_raw_num, self.raw_channel_num, h, w))
if self.use_flow:
of_targets = of_targets_full
input_of = input
input_of = torch.reshape(input_of, (-1, self.raw_channel_num, h, w))
img_tensor_of = self.feature_embedding_of(input_of)
_, c_f, h_small, w_small = img_tensor_of.shape
img_tensor_of = torch.reshape(img_tensor_of, (b, -1, self.nums_hidden[-1], h_small, w_small)) # b,l,c_f,h,w
encoderout_of = self.encoder_of(img_tensor_of) # b,l,c_f,h,w
encoderout_of = torch.reshape(encoderout_of, (-1, self.nums_hidden[-1], h_small, w_small))
of_outputs = self.prediction_of(encoderout_of)
of_outputs = torch.reshape(of_outputs, (-1, self.tot_of_num, self.of_channel_num, h, w))
else:
of_outputs = []
of_targets = []
return of_outputs, raw_outputs, of_targets, raw_targets
class Unet(nn.Module):
def __init__(self, tot_raw_num, nums_hidden, use_flow=True):
super(Unet, self).__init__()
self.use_flow=use_flow
self.tot_raw_num = tot_raw_num
self.tot_of_num = tot_raw_num
self.raw_channel_num = 3
self.of_channel_num = 2
self.inc = inconv(3, nums_hidden[0])
self.down1 = down(nums_hidden[0], nums_hidden[1])
self.down2 = down(nums_hidden[1], nums_hidden[2])
self.up1 = up_unet(nums_hidden[2], nums_hidden[1])
self.up2 = up_unet(nums_hidden[1], nums_hidden[0])
self.out = outconv(nums_hidden[0], self.raw_channel_num)
#of
if self.use_flow:
self.inc_of = inconv(3, nums_hidden[0])
self.down1_of = down(nums_hidden[0], nums_hidden[1])
self.down2_of = down(nums_hidden[1], nums_hidden[2])
self.up1_of = up_unet(nums_hidden[2], nums_hidden[1])
self.up2_of = up_unet(nums_hidden[1], nums_hidden[0])
self.out_of = outconv(nums_hidden[0], self.of_channel_num)
def forward(self, input, of_targets_full):
b,c_in,h,w = input.shape
assert c_in == self.raw_channel_num*self.tot_raw_num
# convert to 5 dimensions for inputs
input = input.permute(0, 2, 3, 1).contiguous() # b,h,w,c_in
new_shape_input = input.size()[:-1] + (self.raw_channel_num, self.tot_raw_num) # b,h,w,c,l
input = input.view(*new_shape_input)
input = input.permute(0, 4, 3, 1, 2).contiguous().cuda() # b,l,c,h,w
of_targets_full = of_targets_full.permute(0, 2, 3, 1).contiguous()
new_shape_of_targets = of_targets_full.size()[:-1] + (self.of_channel_num, self.tot_of_num)
of_targets_full = of_targets_full.view(*new_shape_of_targets)
of_targets_full = of_targets_full.permute(0, 4, 3, 1, 2).contiguous().cuda()
# interpolation
input_frames = input
raw_targets = input # [...,1:]
input_frames = torch.reshape(input_frames, (-1, self.raw_channel_num, h, w))
out_1 = self.inc(input_frames)
out_2 = self.down1(out_1)
out_3 = self.down2(out_2)
raw_outputs = self.up1(out_3, out_2)
raw_outputs = self.up2(raw_outputs, out_1)
raw_outputs = self.out(raw_outputs)
raw_outputs = torch.reshape(raw_outputs, (-1, self.tot_raw_num, self.raw_channel_num, h, w))
if self.use_flow:
of_targets = of_targets_full
input_of = input
input_of = torch.reshape(input_of, (-1, self.raw_channel_num, h, w))
out_1_of = self.inc_of(input_of)
out_2_of = self.down1_of(out_1_of)
out_3_of = self.down2_of(out_2_of)
of_outputs = self.up1_of(out_3_of, out_2_of)
of_outputs = self.up2_of(of_outputs, out_1_of)
of_outputs = self.out_of(of_outputs)
of_outputs = torch.reshape(of_outputs, (-1, self.tot_raw_num, self.of_channel_num, h, w))
else:
of_outputs = []
of_targets = []
return of_outputs, raw_outputs, of_targets, raw_targets
class Conv_LSTM(nn.Module):
def __init__(self, tot_raw_num, nums_hidden, use_flow=True):
super(Conv_LSTM, self).__init__()
self.raw_channel_num = 3 # RGB channel no.
self.of_channel_num = 2
# self.feature_embedding = FeatureEmbedding(model_depth)
self.feature_embedding = Spatial_Encoder(nums_hidden, self.raw_channel_num)
self.prediction = Spatial_Decoder(nums_hidden, self.raw_channel_num)
self.convlstm = LSTM(input_dim = nums_hidden[-1], hidden_dim=[nums_hidden[-1],nums_hidden[-1],nums_hidden[-1],
nums_hidden[-1], nums_hidden[-1]],
kernel_size=(3,3), num_layers=5,
batch_first=True, bias=True, return_all_layers=False)
if use_flow:
self.feature_embedding_of = Spatial_Encoder(nums_hidden, self.raw_channel_num)
self.convlstm_of = LSTM(input_dim=nums_hidden[-1],
hidden_dim=[nums_hidden[-1], nums_hidden[-1], nums_hidden[-1]],
kernel_size=(3, 3), num_layers=3,
batch_first=True, bias=True, return_all_layers=False)
self.prediction_of = Spatial_Decoder(nums_hidden, self.of_channel_num)
self.tot_raw_num = tot_raw_num
self.tot_of_num = tot_raw_num
self.use_flow = use_flow
self.nums_hidden = nums_hidden
def forward(self, input, of_targets_full):
b,c_in,h,w = input.shape
assert c_in == self.raw_channel_num*self.tot_raw_num
# convert to 5 dimensions for inputs
input = input.permute(0, 2, 3, 1).contiguous() # b,h,w,c_in
new_shape_input = input.size()[:-1] + (self.raw_channel_num, self.tot_raw_num) # b,h,w,c,l
input = input.view(*new_shape_input)
input = input.permute(0, 4, 3, 1, 2).contiguous().cuda() # b,l,c,h,w
of_targets_full = of_targets_full.permute(0, 2, 3, 1).contiguous()
new_shape_of_targets = of_targets_full.size()[:-1] + (self.of_channel_num, self.tot_of_num)
of_targets_full = of_targets_full.view(*new_shape_of_targets)
of_targets_full = of_targets_full.permute(0, 4, 3, 1, 2).contiguous().cuda()
raw_targets = input
input_frames = input
input_frames = torch.reshape(input_frames, (-1, self.raw_channel_num, h, w))
img_tensor = self.feature_embedding(input_frames) # b*l,c_f,h,w
_, c_f, h_small, w_small = img_tensor.shape
img_tensor = torch.reshape(img_tensor, (-1, self.tot_raw_num, self.nums_hidden[-1], h_small, w_small))
img_tensor, _ = self.convlstm(img_tensor)
# print(img_tensor[0].size())
# zz
# print(img_tensor[0][0].size())
img_tensor = torch.reshape(img_tensor[0], (-1, self.nums_hidden[-1], h_small, w_small))
raw_outputs = self.prediction(img_tensor)
raw_outputs = torch.reshape(raw_outputs, (-1, self.tot_raw_num, self.raw_channel_num, h, w))
if self.use_flow:
of_targets = of_targets_full
input_of = torch.reshape(input, (-1, self.raw_channel_num, h, w))
img_tensor_of = self.feature_embedding_of(input_of)
_, c_f, h_small, w_small = img_tensor_of.shape
img_tensor_of = torch.reshape(img_tensor_of, (-1, self.tot_of_num, self.nums_hidden[-1], h_small, w_small))
img_tensor_of, _ = self.convlstm_of(img_tensor_of)
img_tensor_of = torch.reshape(img_tensor_of[0], (-1, self.nums_hidden[-1], h_small, w_small))
of_outputs = self.prediction_of(img_tensor_of)
of_outputs = torch.reshape(of_outputs, (-1, self.tot_of_num, self.of_channel_num, h, w))
else:
of_outputs = []
of_targets = []
return of_outputs, raw_outputs, of_targets, raw_targets
| 21,874 | 36.521441 | 139 | py |
MRMGA4VAD | MRMGA4VAD-main/vad_datasets.py | import torch
import numpy as np
import cv2
from collections import OrderedDict
import os
import glob
import scipy.io as sio
import torch
from torch.utils.data import Dataset, DataLoader
import torchvision.transforms as transforms
transform = transforms.Compose([
transforms.ToTensor(),
])
# frame_size: the frame information of each dataset: (h, w, file_format, scene_num)
frame_size = {'UCSDped1' : (158, 238, '.tif', 1), 'UCSDped2': (240, 360, '.tif', 1), 'avenue': (360, 640, '.jpg', 1), 'ShanghaiTech': (480, 856, '.jpg', 1)}
def get_inputs(file_addr):
file_format = file_addr.split('.')[-1]
if file_format == 'mat':
return sio.loadmat(file_addr, verify_compressed_data_integrity=False)['uv']
elif file_format == 'npy':
return np.load(file_addr)
else:
return cv2.imread(file_addr)
def img_tensor2numpy(img):
# mutual transformation between ndarray-like imgs and Tensor-like images
# both intensity and rgb images are represented by 3-dim data
if isinstance(img, np.ndarray):
return torch.from_numpy(np.transpose(img, [2, 0, 1]))
else:
return np.transpose(img, [1, 2, 0]).numpy()
def img_batch_tensor2numpy(img_batch):
# both intensity and rgb image batch are represented by 4-dim data
if isinstance(img_batch, np.ndarray):
if len(img_batch.shape) == 4:
return torch.from_numpy(np.transpose(img_batch, [0, 3, 1, 2]))
else:
return torch.from_numpy(np.transpose(img_batch, [0, 1, 4, 2, 3]))
else:
if len(img_batch.numpy().shape) == 4:
return np.transpose(img_batch, [0, 2, 3, 1]).numpy()
else:
return np.transpose(img_batch, [0, 1, 3, 4, 2]).numpy()
class bbox_collate:
def __init__(self, mode):
self.mode = mode
def collate(self, batch):
if self.mode == 'train':
return bbox_collate_train(batch)
elif self.mode == 'test':
return bbox_collate_test(batch)
else:
raise NotImplementedError
def bbox_collate_train(batch):
batch_data = [x[0] for x in batch]
batch_target = [x[1] for x in batch]
return torch.cat(batch_data, dim=0), batch_target
def bbox_collate_test(batch):
batch_data = [x[0] for x in batch]
batch_target = [x[1] for x in batch]
return batch_data, batch_target
def get_foreground(img, bboxes, patch_size):
img_patches = list()
if len(img.shape) == 3:
for i in range(len(bboxes)):
x_min, x_max = np.int(np.ceil(bboxes[i][0])), np.int(np.ceil(bboxes[i][2]))
y_min, y_max = np.int(np.ceil(bboxes[i][1])), np.int(np.ceil(bboxes[i][3]))
cur_patch = img[:, y_min:y_max, x_min:x_max]
cur_patch = cv2.resize(np.transpose(cur_patch, [1, 2, 0]), (patch_size, patch_size))
img_patches.append(np.transpose(cur_patch, [2, 0, 1]))
img_patches = np.array(img_patches)
elif len(img.shape) == 4:
for i in range(len(bboxes)):
x_min, x_max = np.int(np.ceil(bboxes[i][0])), np.int(np.ceil(bboxes[i][2]))
y_min, y_max = np.int(np.ceil(bboxes[i][1])), np.int(np.ceil(bboxes[i][3]))
cur_patch_set = img[:, :, y_min:y_max, x_min:x_max]
tmp_set = list()
for j in range(img.shape[0]):
cur_patch = cur_patch_set[j]
cur_patch = cv2.resize(np.transpose(cur_patch, [1, 2, 0]), (patch_size, patch_size))
tmp_set.append(np.transpose(cur_patch, [2, 0, 1]))
cur_cube = np.array(tmp_set)
img_patches.append(cur_cube)
img_patches = np.array(img_patches)
return img_patches
def unified_dataset_interface(dataset_name, dir, mode='train', context_frame_num=0, border_mode='elastic', file_format=None, all_bboxes=None, patch_size=32):
if file_format is None:
if dataset_name in ['UCSDped1', 'UCSDped2']:
file_format = '.tif'
elif dataset_name in ['avenue', 'ShanghaiTech']:
file_format = '.jpg'
else:
raise NotImplementedError
if dataset_name in ['UCSDped1', 'UCSDped2']:
dataset = ped_dataset(dir=dir, context_frame_num=context_frame_num, mode=mode, border_mode=border_mode, all_bboxes=all_bboxes, patch_size=patch_size, file_format=file_format)
elif dataset_name == 'avenue':
dataset = avenue_dataset(dir=dir, context_frame_num=context_frame_num, mode=mode, border_mode=border_mode, all_bboxes=all_bboxes, patch_size=patch_size, file_format=file_format)
elif dataset_name == 'ShanghaiTech':
dataset = shanghaiTech_dataset(dir=dir, context_frame_num=context_frame_num, mode=mode, border_mode=border_mode, all_bboxes=all_bboxes, patch_size=patch_size, file_format=file_format)
else:
raise NotImplementedError
return dataset
class patch_to_train_dataset(Dataset):
def __init__(self, data, tranform=transform):
self.data = data
self.transform = tranform
def __len__(self):
return self.data.shape[0]
def __getitem__(self, indice):
if self.transform is not None:
return self.transform(self.data[indice])
else:
return self.data[indice]
class cube_to_train_dataset_ssl(Dataset):
def __init__(self, data, labels=None, transform=transform):
self.data = data # N,l,h,w,c
if labels is not None:
self.labels = labels
else:
self.labels = None
self.transform = transform
def __len__(self):
return self.data.shape[0]
def __getitem__(self, indice):
cur_data = self.data[indice]
if self.transform is not None:
cur_data2return = []
for idx in range(cur_data.shape[0]):
cur_data2return.append(self.transform(cur_data[idx])) # h,w,c -> c,h,w + ->[0,1]
cur_data2return = torch.stack(cur_data2return, 0) # l,c,h,w
else:
cur_data2return = cur_data
if self.labels is not None:
cur_label = self.labels[indice]
return cur_data2return, cur_label
else:
return cur_data2return
class cube_to_train_dataset(Dataset):
def __init__(self, data, target=None, tranform=transform):
if len(data.shape) == 4:
data = data[:, np.newaxis, :, :, :]
if target is not None:
if len(target.shape) == 4:
target = target[:, np.newaxis, :, :, :]
self.data = data
self.target = target
self.transform = tranform
def __len__(self):
return self.data.shape[0]
def __getitem__(self, indice):
if self.target is None:
cur_data = self.data[indice]
cur_train_data = cur_data[:-1]
cur_target = cur_data[-1]
cur_train_data = np.transpose(cur_train_data, [1, 2, 0, 3])
cur_train_data = np.reshape(cur_train_data, (cur_train_data.shape[0], cur_train_data.shape[1], -1))
if self.transform is not None:
return self.transform(cur_train_data), self.transform(cur_target)
else:
return cur_train_data, cur_target
else:
cur_data = self.data[indice]
cur_train_data = cur_data
cur_target = self.target[indice]
cur_target2 = cur_data.copy()
cur_train_data = np.transpose(cur_train_data, [1, 2, 0, 3])
cur_train_data = np.reshape(cur_train_data, (cur_train_data.shape[0], cur_train_data.shape[1], -1))
cur_target = np.transpose(cur_target, [1, 2, 0, 3])
cur_target = np.reshape(cur_target, (cur_target.shape[0], cur_target.shape[1], -1))
cur_target2 = np.transpose(cur_target2, [1, 2, 0, 3])
cur_target2 = np.reshape(cur_target2, (cur_target2.shape[0], cur_target2.shape[1], -1))
if self.transform is not None:
return self.transform(cur_train_data), self.transform(cur_target), self.transform(cur_target2)
else:
return cur_train_data, cur_target, cur_target2
class ped_dataset(Dataset):
'''
Loading dataset for UCSD ped2
'''
def __init__(self, dir, mode='train', context_frame_num=0, border_mode='elastic', file_format='.tif', all_bboxes=None, patch_size=32):
'''
:param dir: The directory to load UCSD ped2 dataset
mode: train/test dataset
'''
self.dir = dir
self.mode = mode
self.videos = OrderedDict()
self.all_frame_addr = list()
self.frame_video_idx = list()
self.tot_frame_num = 0
self.context_frame_num = context_frame_num
self.border_mode = border_mode
self.file_format = file_format
self.all_bboxes = all_bboxes
self.patch_size = patch_size
self.return_gt = False
if mode == 'test':
self.all_gt_addr = list()
self.gts = OrderedDict()
if self.dir[-1] == '1':
self.h = 158
self.w = 238
else:
self.h = 240
self.w = 360
self.dataset_init()
def __len__(self):
return self.tot_frame_num
def dataset_init(self):
if self.mode == 'train':
data_dir = os.path.join(self.dir, 'Train')
elif self.mode == 'test':
data_dir = os.path.join(self.dir, 'Test')
else:
raise NotImplementedError
if self.mode == 'train':
video_dir_list = glob.glob(os.path.join(data_dir, '*'))
idx = 1
for video in sorted(video_dir_list):
video_name = video.split('/')[-1]
if 'Train' in video_name:
self.videos[video_name] = {}
self.videos[video_name]['path'] = video
self.videos[video_name]['frame'] = glob.glob(os.path.join(video, '*'+self.file_format))
self.videos[video_name]['frame'].sort()
self.videos[video_name]['length'] = len(self.videos[video_name]['frame'])
self.frame_video_idx += [idx] * self.videos[video_name]['length']
idx += 1
# merge different frames of different videos into one list
for _, cont in self.videos.items():
self.all_frame_addr += cont['frame']
self.tot_frame_num = len(self.all_frame_addr)
elif self.mode == 'test':
dir_list = glob.glob(os.path.join(data_dir, '*'))
video_dir_list = []
gt_dir_list = []
for dir in sorted(dir_list):
if '_gt' in dir:
gt_dir_list.append(dir)
self.return_gt = True
else:
name = dir.split('/')[-1]
if 'Test' in name:
video_dir_list.append(dir)
# load frames for test
idx = 1
for video in sorted(video_dir_list):
video_name = video.split('/')[-1]
self.videos[video_name] = {}
self.videos[video_name]['path'] = video
self.videos[video_name]['frame'] = glob.glob(os.path.join(video, '*'+self.file_format))
self.videos[video_name]['frame'].sort()
self.videos[video_name]['length'] = len(self.videos[video_name]['frame'])
self.frame_video_idx += [idx] * self.videos[video_name]['length']
idx += 1
# merge different frames of different videos into one list
for _, cont in self.videos.items():
self.all_frame_addr += cont['frame']
self.tot_frame_num = len(self.all_frame_addr)
# load ground truth of frames
if self.return_gt:
for gt in sorted(gt_dir_list):
gt_name = gt.split('/')[-1]
self.gts[gt_name] = {}
self.gts[gt_name]['gt_frame'] = glob.glob(os.path.join(gt, '*.bmp'))
self.gts[gt_name]['gt_frame'].sort()
# merge different frames of different videos into one list
for _, cont in self.gts.items():
self.all_gt_addr += cont['gt_frame']
else:
raise NotImplementedError
def context_range(self, indice):
if self.border_mode == 'elastic':
# check head and tail
if indice - self.context_frame_num < 0:
indice = self.context_frame_num
elif indice + self.context_frame_num > self.tot_frame_num - 1:
indice = self.tot_frame_num - 1 - self.context_frame_num
start_idx = indice - self.context_frame_num
end_idx = indice + self.context_frame_num
need_context_num = 2 * self.context_frame_num + 1
elif self.border_mode == 'predict':
if indice - self.context_frame_num < 0:
start_idx = 0
else:
start_idx = indice - self.context_frame_num
end_idx = indice
need_context_num = self.context_frame_num + 1
else:
# check head and tail
if indice - self.context_frame_num < 0:
start_idx = 0
else:
start_idx = indice - self.context_frame_num
if indice + self.context_frame_num > self.tot_frame_num - 1:
end_idx = self.tot_frame_num - 1
else:
end_idx = indice + self.context_frame_num
need_context_num = 2 * self.context_frame_num + 1
center_idx = self.frame_video_idx[indice]
video_idx = self.frame_video_idx[start_idx:end_idx + 1]
pad = need_context_num - len(video_idx)
if pad > 0:
if start_idx == 0:
video_idx = [video_idx[0]] * pad + video_idx
else:
video_idx = video_idx + [video_idx[-1]] * pad
tmp = np.array(video_idx) - center_idx
offset = tmp.sum()
if tmp[0] != 0 and tmp[-1] != 0: # extreme condition that is not likely to happen
print('The video is too short or the context frame number is too large!')
raise NotImplementedError
if pad == 0 and offset == 0: # all frames are from the same video
idx = [x for x in range(start_idx, end_idx+1)]
return idx
else:
if self.border_mode == 'elastic':
idx = [x for x in range(start_idx - offset, end_idx - offset + 1)]
return idx
elif self.border_mode == 'predict':
if pad > 0 and np.abs(offset) > 0:
print('The video is too short or the context frame number is too large!')
raise NotImplementedError
idx = [x for x in range(start_idx - offset, end_idx + 1)]
idx = [idx[0]] * np.maximum(np.abs(offset), pad) + idx
return idx
else:
if pad > 0 and np.abs(offset) > 0:
print('The video is too short or the context frame number is too large!')
raise NotImplementedError
if offset > 0:
idx = [x for x in range(start_idx, end_idx - offset + 1)]
idx = idx + [idx[-1]] * np.abs(offset)
return idx
elif offset < 0:
idx = [x for x in range(start_idx - offset, end_idx + 1)]
idx = [idx[0]] * np.abs(offset) + idx
return idx
if pad > 0:
if start_idx == 0:
idx = [x for x in range(start_idx - offset, end_idx + 1)]
idx = [idx[0]] * pad + idx
return idx
else:
idx = [x for x in range(start_idx, end_idx - offset + 1)]
idx = idx + [idx[-1]] * pad
return idx
def __getitem__(self, indice):
if self.mode == 'train':
if self.context_frame_num == 0:
img_batch = np.transpose(get_inputs(self.all_frame_addr[indice]), [2, 0, 1])
if self.all_bboxes is not None:
img_batch = get_foreground(img=img_batch, bboxes=self.all_bboxes[indice], patch_size=self.patch_size)
img_batch = torch.from_numpy(img_batch)
else:
frame_range = self.context_range(indice=indice)
img_batch = []
for idx in frame_range:
cur_img = np.transpose(get_inputs(self.all_frame_addr[idx]), [2, 0, 1])
img_batch.append(cur_img)
img_batch = np.array(img_batch)
if self.all_bboxes is not None:
img_batch = get_foreground(img=img_batch, bboxes=self.all_bboxes[indice], patch_size=self.patch_size)
img_batch = torch.from_numpy(img_batch)
return img_batch, torch.zeros(1) # to unify the interface
elif self.mode == 'test':
if self.context_frame_num == 0:
img_batch = np.transpose(get_inputs(self.all_frame_addr[indice]), [2, 0, 1])
if self.all_bboxes is not None:
img_batch = get_foreground(img=img_batch, bboxes=self.all_bboxes[indice], patch_size=self.patch_size)
img_batch = torch.from_numpy(img_batch)
if self.return_gt:
gt_batch = cv2.imread(self.all_gt_addr[indice], cv2.IMREAD_GRAYSCALE)
gt_batch = torch.from_numpy(gt_batch)
else:
frame_range = self.context_range(indice=indice)
img_batch = []
for idx in frame_range:
cur_img = np.transpose(get_inputs(self.all_frame_addr[idx]), [2, 0, 1])
img_batch.append(cur_img)
img_batch = np.array(img_batch)
if self.all_bboxes is not None:
img_batch = get_foreground(img=img_batch, bboxes=self.all_bboxes[indice], patch_size=self.patch_size)
img_batch = torch.from_numpy(img_batch)
if self.return_gt:
gt_batch = cv2.imread(self.all_gt_addr[indice], cv2.IMREAD_GRAYSCALE)
gt_batch = torch.from_numpy(gt_batch)
if self.return_gt:
return img_batch, gt_batch
else:
return img_batch, torch.zeros(1) # to unify the interface
else:
raise NotImplementedError
class avenue_dataset(Dataset):
'''
Loading dataset for Avenue
'''
def __init__(self, dir, mode='train', context_frame_num=0, border_mode='elastic', file_format='.jpg', all_bboxes=None, patch_size=32):
'''
:param dir: The directory to load Avenue dataset
mode: train/test dataset
'''
self.dir = dir
self.mode = mode
self.videos = OrderedDict()
self.all_frame_addr = list()
self.frame_video_idx = list()
self.tot_frame_num = 0
self.context_frame_num = context_frame_num
self.border_mode = border_mode
self.file_format = file_format
self.all_bboxes = all_bboxes
self.patch_size = patch_size
self.return_gt = False
if mode == 'test':
self.all_gt = list()
self.dataset_init()
pass
def __len__(self):
return self.tot_frame_num
def dataset_init(self):
if self.mode == 'train':
data_dir = os.path.join(self.dir, 'training', 'frames')
elif self.mode == 'test':
data_dir = os.path.join(self.dir, 'testing', 'frames')
# gt_dir = os.path.join(self.dir, 'ground_truth_demo', 'testing_label_mask')
gt_dir = os.path.join(self.dir, 'ground_truth_demo', 'testing_label_mask')
if os.path.exists(gt_dir):
self.return_gt = True
else:
raise NotImplementedError
if self.mode == 'train':
video_dir_list = glob.glob(os.path.join(data_dir, '*'))
idx = 1
for video in sorted(video_dir_list):
video_name = video.split('/')[-1]
self.videos[video_name] = {}
self.videos[video_name]['path'] = video
self.videos[video_name]['frame'] = glob.glob(os.path.join(video, '*'+self.file_format))
self.videos[video_name]['frame'].sort()
self.videos[video_name]['length'] = len(self.videos[video_name]['frame'])
self.frame_video_idx += [idx] * self.videos[video_name]['length']
idx += 1
# merge different frames of different videos into one list
for _, cont in self.videos.items():
self.all_frame_addr += cont['frame']
self.tot_frame_num = len(self.all_frame_addr)
elif self.mode == 'test':
video_dir_list = glob.glob(os.path.join(data_dir, '*'))
idx = 1
for video in sorted(video_dir_list):
video_name = video.split('/')[-1]
self.videos[video_name] = {}
self.videos[video_name]['path'] = video
self.videos[video_name]['frame'] = glob.glob(os.path.join(video, '*'+self.file_format))
self.videos[video_name]['frame'].sort()
self.videos[video_name]['length'] = len(self.videos[video_name]['frame'])
self.frame_video_idx += [idx] * self.videos[video_name]['length']
idx += 1
# merge different frames of different videos into one list
for _, cont in self.videos.items():
self.all_frame_addr += cont['frame']
self.tot_frame_num = len(self.all_frame_addr)
# set address of ground truth of frames
if self.return_gt:
self.all_gt = [sio.loadmat(os.path.join(gt_dir, str(x + 1)+'_label.mat'))['volLabel'] for x in range(len(self.videos))]
self.all_gt = np.concatenate(self.all_gt, axis=1)
else:
raise NotImplementedError
def context_range(self, indice):
if self.border_mode == 'elastic':
# check head and tail
if indice - self.context_frame_num < 0:
indice = self.context_frame_num
elif indice + self.context_frame_num > self.tot_frame_num - 1:
indice = self.tot_frame_num - 1 - self.context_frame_num
start_idx = indice - self.context_frame_num
end_idx = indice + self.context_frame_num
need_context_num = 2 * self.context_frame_num + 1
elif self.border_mode == 'predict':
if indice - self.context_frame_num < 0:
start_idx = 0
else:
start_idx = indice - self.context_frame_num
end_idx = indice
need_context_num = self.context_frame_num + 1
else:
# check head and tail
if indice - self.context_frame_num < 0:
start_idx = 0
else:
start_idx = indice - self.context_frame_num
if indice + self.context_frame_num > self.tot_frame_num - 1:
end_idx = self.tot_frame_num - 1
else:
end_idx = indice + self.context_frame_num
need_context_num = 2 * self.context_frame_num + 1
center_idx = self.frame_video_idx[indice]
video_idx = self.frame_video_idx[start_idx:end_idx + 1]
pad = need_context_num - len(video_idx)
if pad > 0:
if start_idx == 0:
video_idx = [video_idx[0]] * pad + video_idx
else:
video_idx = video_idx + [video_idx[-1]] * pad
tmp = np.array(video_idx) - center_idx
offset = tmp.sum()
if tmp[0] != 0 and tmp[-1] != 0: # extreme condition that is not likely to happen
print('The video is too short or the context frame number is too large!')
raise NotImplementedError
if pad == 0 and offset == 0: # all frames are from the same video
idx = [x for x in range(start_idx, end_idx+1)]
return idx
else:
if self.border_mode == 'elastic':
idx = [x for x in range(start_idx - offset, end_idx - offset + 1)]
return idx
elif self.border_mode == 'predict':
if pad > 0 and np.abs(offset) > 0:
print('The video is too short or the context frame number is too large!')
raise NotImplementedError
idx = [x for x in range(start_idx - offset, end_idx + 1)]
idx = [idx[0]] * np.maximum(np.abs(offset), pad) + idx
return idx
else:
if pad > 0 and np.abs(offset) > 0:
print('The video is too short or the context frame number is too large!')
raise NotImplementedError
if offset > 0:
idx = [x for x in range(start_idx, end_idx - offset + 1)]
idx = idx + [idx[-1]] * np.abs(offset)
return idx
elif offset < 0:
idx = [x for x in range(start_idx - offset, end_idx + 1)]
idx = [idx[0]] * np.abs(offset) + idx
return idx
if pad > 0:
if start_idx == 0:
idx = [x for x in range(start_idx - offset, end_idx + 1)]
idx = [idx[0]] * pad + idx
return idx
else:
idx = [x for x in range(start_idx, end_idx - offset + 1)]
idx = idx + [idx[-1]] * pad
return idx
def __getitem__(self, indice):
if self.mode == 'train':
if self.context_frame_num == 0:
img_batch = np.transpose(get_inputs(self.all_frame_addr[indice]), [2, 0, 1])
if self.all_bboxes is not None:
img_batch = get_foreground(img=img_batch, bboxes=self.all_bboxes[indice], patch_size=self.patch_size)
img_batch = torch.from_numpy(img_batch)
else:
frame_range = self.context_range(indice=indice)
img_batch = []
for idx in frame_range:
cur_img = np.transpose(get_inputs(self.all_frame_addr[idx]), [2, 0, 1])
img_batch.append(cur_img)
img_batch = np.array(img_batch)
if self.all_bboxes is not None:
img_batch = get_foreground(img=img_batch, bboxes=self.all_bboxes[indice], patch_size=self.patch_size)
img_batch = torch.from_numpy(img_batch)
return img_batch, torch.zeros(1) # to unify the interface
elif self.mode == 'test':
if self.context_frame_num == 0:
img_batch = np.transpose(get_inputs(self.all_frame_addr[indice]), [2, 0, 1])
if self.all_bboxes is not None:
img_batch = get_foreground(img=img_batch, bboxes=self.all_bboxes[indice], patch_size=self.patch_size)
img_batch = torch.from_numpy(img_batch)
if self.return_gt:
gt_batch = self.all_gt[0, indice]
gt_batch = torch.from_numpy(gt_batch)
else:
frame_range = self.context_range(indice=indice)
img_batch = []
for idx in frame_range:
cur_img = np.transpose(get_inputs(self.all_frame_addr[idx]), [2, 0, 1])
img_batch.append(cur_img)
img_batch = np.array(img_batch)
if self.all_bboxes is not None:
img_batch = get_foreground(img=img_batch, bboxes=self.all_bboxes[indice], patch_size=self.patch_size)
img_batch = torch.from_numpy(img_batch)
if self.return_gt:
gt_batch = self.all_gt[0, indice]
gt_batch = torch.from_numpy(gt_batch)
if self.return_gt:
return img_batch, gt_batch
else:
return img_batch, torch.zeros(1)
else:
raise NotImplementedError
class shanghaiTech_dataset(Dataset):
'''
Loading dataset for ShanghaiTech
'''
def __init__(self, dir, mode='train', context_frame_num=0, border_mode='elastic', file_format='.jpg', all_bboxes=None, patch_size=32):
'''
:param dir: The directory to load ShanghaiTech dataset
mode: train/test dataset
'''
self.dir = dir
self.mode = mode
self.videos = OrderedDict()
self.all_frame_addr = list()
self.frame_video_idx = list()
self.tot_frame_num = 0
self.context_frame_num = context_frame_num
self.border_mode = border_mode
self.file_format = file_format
self.all_bboxes = all_bboxes
self.patch_size = patch_size
self.return_gt = False
self.save_scene_idx = list()
self.scene_idx = list()
self.scene_num = 0
if mode == 'test':
self.all_gt = list()
self.dataset_init()
pass
def __len__(self):
return self.tot_frame_num
def dataset_init(self):
if self.mode == 'train':
data_dir = os.path.join(self.dir, 'training', 'videosFrame')
elif self.mode == 'test':
data_dir = os.path.join(self.dir, 'testing', 'frames')
gt_dir = os.path.join(self.dir, 'testing', 'test_frame_mask')
if os.path.exists(gt_dir):
self.return_gt = True
else:
raise NotImplementedError
if self.mode == 'train':
video_dir_list = glob.glob(os.path.join(data_dir, '*'))
idx = 1
for video in sorted(video_dir_list):
video_name = video.split('/')[-1]
self.videos[video_name] = {}
self.videos[video_name]['path'] = video
self.videos[video_name]['frame'] = glob.glob(os.path.join(video, '*'+self.file_format))
self.videos[video_name]['frame'].sort()
self.videos[video_name]['length'] = len(self.videos[video_name]['frame'])
self.frame_video_idx += [idx] * self.videos[video_name]['length']
idx += 1
self.save_scene_idx += [int(video_name[:2])] * len(self.videos[video_name]['frame']) # frame data are saved by save_scene_idx
self.scene_idx += [1] * len(self.videos[video_name]['frame']) # frames are processed by scene idx
self.scene_num = len(set(self.scene_idx))
# merge different frames of different videos into one list
for _, cont in self.videos.items():
self.all_frame_addr += cont['frame']
self.tot_frame_num = len(self.all_frame_addr)
elif self.mode == 'test':
idx = 1
# for j in [1, 2]:
video_dir_list = glob.glob(os.path.join(data_dir, '*'))
for video in sorted(video_dir_list):
video_name = video.split('/')[-1]
self.videos[video_name] = {}
self.videos[video_name]['path'] = video
self.videos[video_name]['frame'] = glob.glob(os.path.join(video, '*'+self.file_format))
self.videos[video_name]['frame'].sort()
self.videos[video_name]['length'] = len(self.videos[video_name]['frame'])
self.frame_video_idx += [idx] * self.videos[video_name]['length']
idx += 1
self.save_scene_idx += [int(video_name[:2])] * len(self.videos[video_name]['frame'])
self.scene_idx += [1] * len(self.videos[video_name]['frame'])
self.scene_num = len(set(self.scene_idx))
# merge different frames of different videos into one list
for _, cont in self.videos.items():
self.all_frame_addr += cont['frame']
self.tot_frame_num = len(self.all_frame_addr)
# load ground truth of frames
if self.return_gt:
gt_dir_list = glob.glob(os.path.join(gt_dir, '*'))
for gt in sorted(gt_dir_list):
self.all_gt.append(np.load(gt))
# merge different frames of different videos into one list, only support frame gt now due to memory issue
self.all_gt = np.concatenate(self.all_gt, axis=0)
else:
raise NotImplementedError
def context_range(self, indice):
if self.border_mode == 'elastic':
# check head and tail
if indice - self.context_frame_num < 0:
indice = self.context_frame_num
elif indice + self.context_frame_num > self.tot_frame_num - 1:
indice = self.tot_frame_num - 1 - self.context_frame_num
start_idx = indice - self.context_frame_num
end_idx = indice + self.context_frame_num
need_context_num = 2 * self.context_frame_num + 1
elif self.border_mode == 'predict':
if indice - self.context_frame_num < 0:
start_idx = 0
else:
start_idx = indice - self.context_frame_num
end_idx = indice
need_context_num = self.context_frame_num + 1
else:
# check head and tail
if indice - self.context_frame_num < 0:
start_idx = 0
else:
start_idx = indice - self.context_frame_num
if indice + self.context_frame_num > self.tot_frame_num - 1:
end_idx = self.tot_frame_num - 1
else:
end_idx = indice + self.context_frame_num
need_context_num = 2 * self.context_frame_num + 1
center_idx = self.frame_video_idx[indice]
video_idx = self.frame_video_idx[start_idx:end_idx + 1]
pad = need_context_num - len(video_idx)
if pad > 0:
if start_idx == 0:
video_idx = [video_idx[0]] * pad + video_idx
else:
video_idx = video_idx + [video_idx[-1]] * pad
tmp = np.array(video_idx) - center_idx
offset = tmp.sum()
if tmp[0] != 0 and tmp[-1] != 0: # extreme condition that is not likely to happen
print('The video is too short or the context frame number is too large!')
raise NotImplementedError
if pad == 0 and offset == 0: # all frames are from the same video
idx = [x for x in range(start_idx, end_idx+1)]
return idx
else:
if self.border_mode == 'elastic':
idx = [x for x in range(start_idx - offset, end_idx - offset + 1)]
return idx
elif self.border_mode == 'predict':
if pad > 0 and np.abs(offset) > 0:
print('The video is too short or the context frame number is too large!')
raise NotImplementedError
idx = [x for x in range(start_idx - offset, end_idx + 1)]
idx = [idx[0]] * np.maximum(np.abs(offset), pad) + idx
return idx
else:
if pad > 0 and np.abs(offset) > 0:
print('The video is too short or the context frame number is too large!')
raise NotImplementedError
if offset > 0:
idx = [x for x in range(start_idx, end_idx - offset + 1)]
idx = idx + [idx[-1]] * np.abs(offset)
return idx
elif offset < 0:
idx = [x for x in range(start_idx - offset, end_idx + 1)]
idx = [idx[0]] * np.abs(offset) + idx
return idx
if pad > 0:
if start_idx == 0:
idx = [x for x in range(start_idx - offset, end_idx + 1)]
idx = [idx[0]] * pad + idx
return idx
else:
idx = [x for x in range(start_idx, end_idx - offset + 1)]
idx = idx + [idx[-1]] * pad
return idx
def __getitem__(self, indice):
if self.mode == 'train':
if self.context_frame_num == 0:
img_batch = np.transpose(get_inputs(self.all_frame_addr[indice]), [2, 0, 1])
if self.all_bboxes is not None:
img_batch = get_foreground(img=img_batch, bboxes=self.all_bboxes[indice], patch_size=self.patch_size)
img_batch = torch.from_numpy(img_batch)
else:
frame_range = self.context_range(indice=indice)
img_batch = []
for idx in frame_range:
cur_img = np.transpose(get_inputs(self.all_frame_addr[idx]), [2, 0, 1])
img_batch.append(cur_img)
img_batch = np.array(img_batch)
if self.all_bboxes is not None:
img_batch = get_foreground(img=img_batch, bboxes=self.all_bboxes[indice], patch_size=self.patch_size)
img_batch = torch.from_numpy(img_batch)
return img_batch, torch.zeros(1) # to unify the interface
elif self.mode == 'test':
if self.context_frame_num == 0:
img_batch = np.transpose(get_inputs(self.all_frame_addr[indice]), [2, 0, 1])
if self.all_bboxes is not None:
img_batch = get_foreground(img=img_batch, bboxes=self.all_bboxes[indice], patch_size=self.patch_size)
img_batch = torch.from_numpy(img_batch)
if self.return_gt:
gt_batch = np.array([self.all_gt[indice]])
gt_batch = torch.from_numpy(gt_batch)
else:
frame_range = self.context_range(indice=indice)
img_batch = []
for idx in frame_range:
cur_img = np.transpose(get_inputs(self.all_frame_addr[idx]), [2, 0, 1])
img_batch.append(cur_img)
img_batch = np.array(img_batch)
if self.all_bboxes is not None:
img_batch = get_foreground(img=img_batch, bboxes=self.all_bboxes[indice], patch_size=self.patch_size)
img_batch = torch.from_numpy(img_batch)
if self.return_gt:
gt_batch = np.array([self.all_gt[indice]])
gt_batch = torch.from_numpy(gt_batch)
if self.return_gt:
return img_batch, gt_batch
else:
return img_batch, torch.zeros(1) # to unify the interface
else:
raise NotImplementedError
| 39,197 | 43.291525 | 191 | py |
MRMGA4VAD | MRMGA4VAD-main/module.py |
import torch
import torch.nn as nn
import copy
from module_utils import *
import torch.nn.functional as F
from matplotlib import pyplot as plt
####################################################################################
######################### definition for encoder #################################
####################################################################################
class ConvTransformerEncoder(nn.Module):
def __init__(self, num_layers=5, model_depth=128, num_heads=4,
with_residual=True, with_pos=True, pos_kind='sine'):
super(ConvTransformerEncoder, self).__init__()
self.encoderlayer = ConvTransformerEncoderLayer(model_depth, num_heads, with_residual=with_residual,
with_pos=with_pos)
self.num_layers = num_layers
self.depth_perhead = model_depth//num_heads
self.encoder = self.__get_clones(self.encoderlayer, self.num_layers)
self.positionnet = PositionalEmbeddingLearned(int(model_depth/num_heads))
self.pos_kind = pos_kind
def __get_clones(self, module, n):
return nn.ModuleList([copy.deepcopy(module) for i in range(n)])
def forward(self, input_tensor):
out = input_tensor
if self.pos_kind == 'sine':
b, l, c, h, w = input_tensor.shape
pos = positional_encoding(l, self.depth_perhead, h, w)
elif self.pos_kind == 'learned':
pos = self.positionnet(input_tensor.shape[1:])
for layer in self.encoder:
out = layer(out, pos)
return out
class ConvTransformerEncoderLayer(nn.Module): # work as a bridge to handle multi-head
def __init__(self, model_depth=128, num_heads=4, with_residual=True, with_pos=True):
super(ConvTransformerEncoderLayer, self).__init__()
self.depth = model_depth
self.depth_perhead = int(model_depth/num_heads)
self.with_residual = with_residual
self.attention_heads = self.__get_clones(ConvTransformerEncoderLayerOneHead(self.depth_perhead,
with_pos=with_pos), num_heads)
self.feedforward = FeedForwardNet(self.depth)
self.GN1 = nn.GroupNorm(num_groups=4, num_channels=model_depth)
def __get_clones(self, module, n):
return nn.ModuleList([copy.deepcopy(module) for i in range(n)])
def forward(self, input_tensor, pos_encoding):
heads_out = []
i = 0
for head in self.attention_heads:
heads_out.append(head(input_tensor[:, :, i*self.depth_perhead:(i+1)*self.depth_perhead, :, :], pos_encoding))
i += 1
if self.with_residual:
att_out = torch.cat(heads_out, dim=2) + input_tensor # b,l,c,h,w
b,l,c,h,w = att_out.shape
att_out = torch.reshape(att_out, (-1,c,h,w))
out = self.feedforward(att_out) + att_out
else:
att_out = torch.cat(heads_out, dim=2)
b, l, c, h, w = att_out.shape
att_out = torch.reshape(att_out, (-1, c, h, w))
out = self.feedforward(att_out)
out = self.GN1(out)
out = torch.reshape(out, (b, l, c, h, w))
return out
class ConvTransformerEncoderLayerOneHead(nn.Module):
def __init__(self, head_depth=32, with_pos=True):
super(ConvTransformerEncoderLayerOneHead, self).__init__()
self.depth_perhead = head_depth
self.q_featuremap = QNet(self.depth_perhead)
self.k_v_featuremap = KVNet(self.depth_perhead)
self.attentionmap = AttentionNet(self.depth_perhead * 2)
self.feedforward = FeedForwardNet(self.depth_perhead)
self.with_pos = with_pos
def forward(self, input_tensor, pos_encoding):
batch, length, channel, height, width = input_tensor.shape
input_tensor = torch.reshape(input_tensor, (batch*length, channel, height, width)) # b*l,c,h,w
q_feature = self.q_featuremap(input_tensor)
k_feature = v_feature = self.k_v_featuremap(input_tensor)
q_feature = torch.reshape(q_feature, (batch, length, channel, height, width)) # b,l,c,h,w
k_feature = torch.reshape(k_feature, (batch, length, channel, height, width)) # b,l,c,h,w
v_feature = torch.reshape(v_feature, (batch, length, channel, height, width)) # b,l,c,h,w
if self.with_pos:
q_feature = (q_feature + pos_encoding)
k_feature = (k_feature + pos_encoding)
else:
q_feature = q_feature
k_feature = k_feature
# convolutional self-attention part
q_feature = q_feature.unsqueeze(dim=2).repeat(1, 1, length, 1, 1, 1) # b,l,l,c,h,w
k_feature = k_feature.unsqueeze(dim=1).repeat(1, length, 1, 1, 1, 1) # b,l,l,c,h,w
v_feature = v_feature.unsqueeze(dim=1).repeat(1, length, 1, 1, 1, 1) # b,l,l,c,h,w
q_k_concat = torch.cat((q_feature, k_feature), dim=3) # b,l,l,2c,h,w
dim0, dim1, dim2, dim3, dim4, dim5 = q_k_concat.shape
q_k_concat = torch.reshape(q_k_concat, (dim0 * dim1 * dim2, dim3, dim4, dim5))
attention_map = self.attentionmap(q_k_concat)
attention_map = torch.reshape(attention_map, (dim0, dim1, dim2, 1, dim4, dim5))
attention_map = nn.Softmax(dim=2)(attention_map) # b,l,l,1,h,w
attentioned_v_Feature = attention_map * v_feature # b,l,l,c,h,w
attentioned_v_Feature = torch.sum(attentioned_v_Feature, dim=2) # b,l,c,h,w
return attentioned_v_Feature
| 5,573 | 43.951613 | 121 | py |
MRMGA4VAD | MRMGA4VAD-main/resnet_pytorch.py | '''Resnet for cifar dataset.
Ported form
https://github.com/facebook/fb.resnet.torch
and
https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py
(c) YANG, Wei
'''
import torch.nn as nn
import math
__all__ = ['resnet']
def conv3x3(in_planes, out_planes, stride=1):
"3x3 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, depth, num_classes=1000, block_name='BasicBlock', in_channels=3):
super(ResNet, self).__init__()
# Model type specifies number of layers for CIFAR-10 model
if block_name.lower() == 'basicblock':
assert (depth - 2) % 6 == 0, 'When use basicblock, depth should be 6n+2, e.g. 20, 32, 44, 56, 110, 1202'
n = (depth - 2) // 6
block = BasicBlock
elif block_name.lower() == 'bottleneck':
assert (depth - 2) % 9 == 0, 'When use bottleneck, depth should be 9n+2, e.g. 20, 29, 47, 56, 110, 1199'
n = (depth - 2) // 9
block = Bottleneck
else:
raise ValueError('block_name shoule be Basicblock or Bottleneck')
self.inplanes = 16
self.conv1 = nn.Conv2d(in_channels, 16, kernel_size=3, padding=1,
bias=False)
self.bn1 = nn.BatchNorm2d(16)
self.relu = nn.ReLU(inplace=True)
self.layer1 = self._make_layer(block, 16, n)
self.layer2 = self._make_layer(block, 32, n, stride=2)
self.layer3 = self._make_layer(block, 64, n, stride=2)
self.avgpool = nn.AvgPool2d(8)
self.fc = nn.Linear(64 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x) # 32x32
x = self.layer1(x) # 32x32
x = self.layer2(x) # 16x16
x = self.layer3(x) # 8x8
x = self.avgpool(x)
feat = x.view(x.size(0), -1)
x = self.fc(feat)
return x, feat
def resnet(**kwargs):
"""
Constructs a ResNet model.
"""
return ResNet(**kwargs)
| 5,088 | 29.842424 | 116 | py |
MRMGA4VAD | MRMGA4VAD-main/module_utils.py | import torch
import torch.nn as nn
from torch.nn import init
import math
import copy
import numpy as np
from skimage import measure
class QNet(nn.Module):
def __init__(self, depth=32):
super(QNet, self).__init__()
self.conv0 = nn.Sequential(
nn.Conv2d(in_channels=depth, out_channels=depth, kernel_size=3, stride=1, padding=1, bias=True),
nn.LeakyReLU(0.2, inplace=True)
)
def forward(self, input_tensor):
q_feature = self.conv0(input_tensor)
return q_feature
class KVNet(nn.Module):
def __init__(self, depth=32):
super(KVNet, self).__init__()
self.conv0 = nn.Sequential(
nn.Conv2d(in_channels=depth, out_channels=depth, kernel_size=3, stride=1, padding=1, bias=True),
nn.LeakyReLU(0.2, inplace=True)
)
def forward(self, input_tensor):
k_v_feature = self.conv0(input_tensor)
return k_v_feature
class FeedForwardNet(nn.Module):
def __init__(self, depth=128):
super(FeedForwardNet, self).__init__()
self.conv0 = nn.Sequential(
nn.Conv2d(in_channels=depth, out_channels=depth, kernel_size=3, stride=1, padding=1, bias=True),
nn.LeakyReLU(0.2, inplace=True)
)
def forward(self, input_tensor):
out = self.conv0(input_tensor)
return out
class AttentionNet(nn.Module):
def __init__(self, depth=64):
super(AttentionNet, self).__init__()
self.conv0 = nn.Sequential(
nn.Conv2d(in_channels=depth, out_channels=1, kernel_size=3, stride=1, padding=1, bias=True),
nn.LeakyReLU(0.2, inplace=True)
)
def forward(self, input_tensor):
out = self.conv0(input_tensor)
return out
def _get_clones(module, n):
return nn.ModuleList([copy.deepcopy(module) for i in range(n)])
def weights_init_xavier(m):
classname = m.__class__.__name__
if classname.find('Conv2d') != -1:
init.xavier_normal(m.weight.data)
if classname.find('ConvTranspose2d') != -1:
init.xavier_normal(m.weight.data)
def cal_psnr(img1, img2):
img1_np = np.array(img1)
img2_np = np.array(img2)
return measure.compare_psnr(img1_np, img2_np)
def get_angles(pos, i, d_model):
angle_rates = 1 / np.power(10000, (2*(i // 2))/ np.float32(d_model))
return pos * angle_rates
def positional_encoding(position, d_model, h=128, w=226):
angle_rads = get_angles(np.arange(position)[:, np.newaxis],
np.arange(d_model)[np.newaxis, :],
d_model)
sines = np.sin(angle_rads[:, 0::2])
cones = np.cos(angle_rads[:, 1::2])
pos_encoding = np.concatenate([sines, cones], axis=-1).astype(np.float32)
pos_embedding = torch.from_numpy(0.5*pos_encoding)
pos = pos_embedding.unsqueeze(2).repeat(1, 1, h * w).reshape(position, d_model, h, w).cuda()
return pos
class PositionalEmbeddingLearned(nn.Module):
def __init__(self, embedding_depth=128):
super(PositionalEmbeddingLearned, self).__init__()
self.depth = embedding_depth
self.positional_embedding = nn.Embedding(10, self.depth).cuda()
def forward(self, shape):
b, c, h, w = shape
index = torch.arange(b).cuda()#to('cuda:0')
position = self.positional_embedding(index) # 5 * 64
position = position.unsqueeze(2).repeat(1, 1, h * w).reshape(b, self.depth, h, w)
return position
def get_model_name(cfg):
if cfg.w_res:
s_res = 'w_res-'
else:
s_res = 'wo_res-'
if cfg.w_pos:
s_pos = 'w_pos-'
s_pos_kind = cfg.pos_kind
else:
s_pos = 'wo_pos-'
s_pos_kind = 'none'
s_num_heads = f'{cfg.n_heads}heads-'
s_num_layers = f'{cfg.n_layers}layers-'
s_num_dec_frames = f'dec_{cfg.dec_frames}-'
s_model_type = '-inter' if cfg.model_type == 0 else '-extra'
model_kind = s_num_heads + s_num_layers + s_num_dec_frames + s_res + s_pos + s_pos_kind + s_model_type
return model_kind
if __name__ == '__main__':
x = positional_encoding(3, 64)
print('debug') | 4,119 | 30.212121 | 108 | py |
MRMGA4VAD | MRMGA4VAD-main/train.py | import numpy as np
import os
from torch.utils.data import DataLoader
from vad_datasets import unified_dataset_interface, cube_to_train_dataset
from vad_datasets import bbox_collate, img_tensor2numpy, img_batch_tensor2numpy, frame_size
from helper.misc import AverageMeter
import torch
from state_model import ConvTransformer_recon_correct
import torch.optim as optim
import torch.nn as nn
import argparse
import os
import sys
pyfile_name = os.path.basename(sys.argv[0]).split(".")[0]
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('boolean value expected')
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--dataset', default='avenue', type=str)
parser.add_argument('-n_l', '--num_layers', default=3, type=int)
parser.add_argument('-n_h', '--num_heads', default=4, type=int)
parser.add_argument('-pe', '--positional_encoding', default='learned', type=str)
parser.add_argument('-e', '--epochs', default=20, type=int)
parser.add_argument('-b', '--batch_size', default=128, type=int)
parser.add_argument('-l', '--temporal_length', default=3, type=int)
parser.add_argument('-lam_r', '--lambda_raw', default=1, type=float)
parser.add_argument('-lam_o', '--lambda_of', default=1, type=float)
parser.add_argument('-train_b', '--train_bbox_saved', default=True, type=str2bool)
parser.add_argument('-train_f', '--train_foreground_saved', default=True, type=str2bool)
parser.add_argument('-f', '--use_flow', default=True, type=str2bool)
parser.add_argument('-bd', '--border_mode', default='elastic', type=str)
args = parser.parse_args()
def calc_block_idx(x_min, x_max, y_min, y_max, h_step, w_step, mode):
all_blocks = list()
center = np.array([(y_min + y_max) / 2, (x_min + x_max) / 2])
all_blocks.append(center + center)
if mode > 1:
all_blocks.append(np.array([y_min, center[1]]) + center)
all_blocks.append(np.array([y_max, center[1]]) + center)
all_blocks.append(np.array([center[0], x_min]) + center)
all_blocks.append(np.array([center[0], x_max]) + center)
if mode >= 9:
all_blocks.append(np.array([y_min, x_min]) + center)
all_blocks.append(np.array([y_max, x_max]) + center)
all_blocks.append(np.array([y_max, x_min]) + center)
all_blocks.append(np.array([y_min, x_max]) + center)
all_blocks = np.array(all_blocks) / 2
h_block_idxes = all_blocks[:, 0] / h_step
w_block_idxes = all_blocks[:, 1] / w_step
h_block_idxes, w_block_idxes = list(h_block_idxes.astype(np.int)), list(w_block_idxes.astype(np.int))
# delete repeated elements
all_blocks = set([x for x in zip(h_block_idxes, w_block_idxes)])
all_blocks = [x for x in all_blocks]
return all_blocks
# /*------------------------------------overall parameter setting------------------------------------------*/
dataset_name = args.dataset
raw_dataset_dir = 'raw_datasets'
foreground_extraction_mode = 'obj_det_with_motion'
data_root_dir = 'data'
modality = 'raw2flow'
mode ='train'
method = 'SelfComplete'
num_layers = args.num_layers
num_heads = args.num_heads
pe = args.positional_encoding
context_frame_num = args.temporal_length
context_of_num = args.temporal_length
patch_size = 32
h_block = 1
w_block = 1
train_block_mode = 1
bbox_saved = args.train_bbox_saved
foreground_saved = args.train_foreground_saved
motionThr = 0
# /*------------------------------------------foreground extraction----------------------------------------------*/
config_file = './obj_det_config/cascade_rcnn_r101_fpn_1x.py'
checkpoint_file = './obj_det_checkpoints/cascade_rcnn_r101_fpn_1x_20181129-d64ebac7.pth'
# set dataset for foreground extraction
dataset = unified_dataset_interface(dataset_name=dataset_name, dir=os.path.join(raw_dataset_dir, dataset_name),
context_frame_num=1, mode=mode, border_mode='hard')
if not bbox_saved:
# build the model from a config file and a checkpoint file
from fore_det.inference import init_detector
from fore_det.obj_det_with_motion import imshow_bboxes, getObBboxes, getFgBboxes, delCoverBboxes
from fore_det.simple_patch import get_patch_loc
model = init_detector(config_file, checkpoint_file, device='cuda:0')
collate_func = bbox_collate('train')
dataset_loader = DataLoader(dataset=dataset, batch_size=1, shuffle=False, num_workers=1,
collate_fn=collate_func.collate)
all_bboxes = list()
for idx in range(dataset.__len__()):
batch, _ = dataset.__getitem__(idx)
print('Extracting bboxes of {}-th frame'.format(idx + 1))
cur_img = img_tensor2numpy(batch[1])
if foreground_extraction_mode == 'obj_det_with_motion':
# A coarse detection of bboxes by pretrained object detector
ob_bboxes = getObBboxes(cur_img, model, dataset_name)
ob_bboxes = delCoverBboxes(ob_bboxes, dataset_name)
# further foreground detection by motion
fg_bboxes = getFgBboxes(cur_img, img_batch_tensor2numpy(batch), ob_bboxes, dataset_name, verbose=False)
if fg_bboxes.shape[0] > 0:
cur_bboxes = np.concatenate((ob_bboxes, fg_bboxes), axis=0)
else:
cur_bboxes = ob_bboxes
elif foreground_extraction_mode == 'obj_det':
# A coarse detection of bboxes by pretrained object detector
ob_bboxes = getObBboxes(cur_img, model, dataset_name)
cur_bboxes = delCoverBboxes(ob_bboxes, dataset_name)
elif foreground_extraction_mode == 'simple_patch':
patch_num_list = [(3, 4), (6, 8)]
cur_bboxes = list()
for h_num, w_num in patch_num_list:
cur_bboxes.append(get_patch_loc(frame_size[dataset_name][0], frame_size[dataset_name][1], h_num, w_num))
cur_bboxes = np.concatenate(cur_bboxes, axis=0)
else:
raise NotImplementedError
# imshow_bboxes(cur_img, cur_bboxes)
all_bboxes.append(cur_bboxes)
np.save(os.path.join(dataset.dir, 'bboxes_train_{}.npy'.format(foreground_extraction_mode)), all_bboxes)
print('bboxes for training data saved!')
else:
all_bboxes = np.load(os.path.join(dataset.dir, 'bboxes_train_{}.npy'.format(foreground_extraction_mode)),
allow_pickle=True)
print('bboxes for training data loaded!')
# /------------------------- extract foreground using extracted bboxes---------------------------------------/
border_mode = args.border_mode
if not foreground_saved:
if modality == 'raw_datasets':
file_format = frame_size[dataset_name][2]
elif modality == 'raw2flow':
file_format1 = frame_size[dataset_name][2]
file_format2 = '.npy'
else:
file_format = '.npy'
if modality == 'raw2flow':
dataset = unified_dataset_interface(dataset_name=dataset_name, dir=os.path.join('raw_datasets', dataset_name),
context_frame_num=context_frame_num, mode=mode, border_mode=border_mode,
all_bboxes=all_bboxes, patch_size=patch_size, file_format=file_format1)
dataset2 = unified_dataset_interface(dataset_name=dataset_name, dir=os.path.join('optical_flow', dataset_name),
context_frame_num=context_of_num, mode=mode, border_mode=border_mode,
all_bboxes=all_bboxes, patch_size=patch_size, file_format=file_format2)
else:
dataset = unified_dataset_interface(dataset_name=dataset_name, dir=os.path.join(modality, dataset_name),
context_frame_num=context_frame_num, mode=mode, border_mode=border_mode,
all_bboxes=all_bboxes, patch_size=patch_size, file_format=file_format)
if dataset_name == 'ShanghaiTech':
foreground_set = [[[[] for ww in range(w_block)] for hh in range(h_block)] for ss in range(dataset.scene_num)]
if modality == 'raw2flow':
foreground_set2 = [[[[] for ww in range(w_block)] for hh in range(h_block)] for ss in
range(dataset.scene_num)]
else:
foreground_set = [[[] for ww in range(w_block)] for hh in range(h_block)]
if modality == 'raw2flow':
foreground_set2 = [[[] for ww in range(w_block)] for hh in range(h_block)]
h_step, w_step = frame_size[dataset_name][0] / h_block, frame_size[dataset_name][1] / w_block
dataset_loader = DataLoader(dataset=dataset, batch_size=1, shuffle=False, num_workers=1,
collate_fn=bbox_collate(mode=mode).collate)
if dataset_name == 'ShanghaiTech' and modality == 'raw2flow':
randIdx = np.random.permutation(dataset.__len__())
cout = 0
segIdx = 0
saveSegNum = 40000
for iidx in range(dataset.__len__()):
if dataset_name == 'ShanghaiTech' and modality == 'raw2flow':
idx = randIdx[iidx]
cout += 1
else:
idx = iidx
batch, _ = dataset.__getitem__(idx)
if modality == 'raw2flow':
batch2, _ = dataset2.__getitem__(idx)
if dataset_name == 'ShanghaiTech':
print(
'Extracting foreground in {}-th batch, {} in total, scene: {}'.format(iidx + 1, dataset.__len__() // 1,
dataset.scene_idx[idx]))
else:
print('Extracting foreground in {}-th batch, {} in total'.format(iidx + 1, dataset.__len__() // 1))
cur_bboxes = all_bboxes[idx]
if len(cur_bboxes) > 0:
batch = img_batch_tensor2numpy(batch)
if modality == 'raw2flow':
batch2 = img_batch_tensor2numpy(batch2)
if modality == 'optical_flow':
if len(batch.shape) == 4:
mag = np.sum(np.sum(np.sum(batch ** 2, axis=3), axis=2), axis=1)
else:
mag = np.mean(np.sum(np.sum(np.sum(batch ** 2, axis=4), axis=3), axis=2), axis=1)
elif modality == 'raw2flow':
if len(batch2.shape) == 4:
mag = np.sum(np.sum(np.sum(batch2 ** 2, axis=3), axis=2), axis=1)
else:
mag = np.mean(np.sum(np.sum(np.sum(batch2 ** 2, axis=4), axis=3), axis=2), axis=1)
else:
mag = np.ones(batch.shape[0]) * 10000
for idx_bbox in range(cur_bboxes.shape[0]):
if mag[idx_bbox] > motionThr:
all_blocks = calc_block_idx(cur_bboxes[idx_bbox, 0], cur_bboxes[idx_bbox, 2],
cur_bboxes[idx_bbox, 1], cur_bboxes[idx_bbox, 3], h_step, w_step,
mode=train_block_mode)
for (h_block_idx, w_block_idx) in all_blocks:
if dataset_name == 'ShanghaiTech':
foreground_set[dataset.scene_idx[idx] - 1][h_block_idx][w_block_idx].append(batch[idx_bbox])
if modality == 'raw2flow':
foreground_set2[dataset.scene_idx[idx] - 1][h_block_idx][w_block_idx].append(
batch2[idx_bbox])
else:
foreground_set[h_block_idx][w_block_idx].append(batch[idx_bbox])
if modality == 'raw2flow':
foreground_set2[h_block_idx][w_block_idx].append(batch2[idx_bbox])
if dataset_name == 'ShanghaiTech' and modality == 'raw2flow':
if cout == saveSegNum:
foreground_set = [
[[np.array(foreground_set[ss][hh][ww]) for ww in range(w_block)] for hh in range(h_block)] for ss in
range(dataset.scene_num)]
foreground_set2 = [
[[np.array(foreground_set2[ss][hh][ww]) for ww in range(w_block)] for hh in range(h_block)] for ss
in range(dataset.scene_num)]
np.save(os.path.join(data_root_dir, modality,
dataset_name + '_' + 'foreground_train_{}_seg_{}_{}_border_{}-raw.npy'.format(
foreground_extraction_mode, segIdx, context_frame_num, border_mode)), foreground_set)
np.save(os.path.join(data_root_dir, modality,
dataset_name + '_' + 'foreground_train_{}_seg_{}_{}_border_{}-flow.npy'.format(
foreground_extraction_mode, segIdx, context_frame_num, border_mode)), foreground_set2)
del foreground_set, foreground_set2
cout = 0
segIdx += 1
foreground_set = [[[[] for ww in range(w_block)] for hh in range(h_block)] for ss in
range(dataset.scene_num)]
foreground_set2 = [[[[] for ww in range(w_block)] for hh in range(h_block)] for ss in
range(dataset.scene_num)]
if dataset_name == 'ShanghaiTech':
if modality != 'raw2flow':
foreground_set = [[[np.array(foreground_set[ss][hh][ww]) for ww in range(w_block)] for hh in range(h_block)]
for ss in range(dataset.scene_num)]
np.save(os.path.join(data_root_dir, modality,
dataset_name + '_' + 'foreground_train_{}.npy'.format(foreground_extraction_mode)),
foreground_set)
else:
if dataset.__len__() % saveSegNum != 0:
foreground_set = [
[[np.array(foreground_set[ss][hh][ww]) for ww in range(w_block)] for hh in range(h_block)]
for ss in range(dataset.scene_num)]
foreground_set2 = [
[[np.array(foreground_set2[ss][hh][ww]) for ww in range(w_block)] for hh in range(h_block)] for ss
in
range(dataset.scene_num)]
np.save(os.path.join(data_root_dir, modality,
dataset_name + '_' + 'foreground_train_{}_seg_{}_{}_border_{}-raw.npy'.format(
foreground_extraction_mode, segIdx, context_frame_num, border_mode)), foreground_set)
np.save(os.path.join(data_root_dir, modality,
dataset_name + '_' + 'foreground_train_{}_seg_{}_{}_border_{}-flow.npy'.format(
foreground_extraction_mode, segIdx, context_frame_num, border_mode)), foreground_set2)
else:
if modality == 'raw2flow':
foreground_set = [[np.array(foreground_set[hh][ww]) for ww in range(w_block)] for hh in range(h_block)]
np.save(os.path.join(data_root_dir, modality,
dataset_name + '_' + 'foreground_train_{}_{}_border_{}-raw.npy'.format(foreground_extraction_mode, context_frame_num, border_mode)),
foreground_set)
foreground_set2 = [[np.array(foreground_set2[hh][ww]) for ww in range(w_block)] for hh in range(h_block)]
np.save(os.path.join(data_root_dir, modality, dataset_name + '_' + 'foreground_train_{}_{}_border_{}-flow.npy'.format(
foreground_extraction_mode, context_frame_num, border_mode)), foreground_set2)
else:
foreground_set = [[np.array(foreground_set[hh][ww]) for ww in range(w_block)] for hh in range(h_block)]
np.save(os.path.join(data_root_dir, modality,
dataset_name + '_' + 'foreground_train_{}_{}_border_{}.npy'.format(foreground_extraction_mode, context_frame_num, border_mode)),
foreground_set)
print('foreground for training data saved!')
else:
if dataset_name != 'ShanghaiTech':
if modality == 'raw2flow':
foreground_set = np.load(os.path.join(data_root_dir, modality,
dataset_name + '_' + 'foreground_train_{}_{}_border_{}-raw.npy'.format(
foreground_extraction_mode, context_frame_num, border_mode)), allow_pickle=True)
foreground_set2 = np.load(os.path.join(data_root_dir, modality,
dataset_name + '_' + 'foreground_train_{}_{}_border_{}-flow.npy'.format(
foreground_extraction_mode, context_frame_num, border_mode)), allow_pickle=True)
else:
foreground_set = np.load(os.path.join(data_root_dir, modality,
dataset_name + '_' + 'foreground_train_{}_{}_border_{}.npy'.format(
foreground_extraction_mode, context_frame_num, border_mode)), allow_pickle=True)
print('foreground for training data loaded!')
else:
if modality != 'raw2flow':
foreground_set = np.load(os.path.join(data_root_dir, modality,
dataset_name + '_' + 'foreground_train_{}_{}_border_{}.npy'.format(
foreground_extraction_mode, context_frame_num, border_mode)), allow_pickle=True)
# /*------------------------------------------Normal event modeling----------------------------------------------*/
if method == 'SelfComplete':
loss_func = nn.MSELoss()
epochs = args.epochs
batch_size = args.batch_size
useFlow = args.use_flow
if border_mode == 'predict':
tot_frame_num = context_frame_num + 1
tot_of_num = context_of_num + 1
else:
tot_frame_num = 2 * context_frame_num + 1
tot_of_num = 2 * context_of_num + 1
rawRange = 10
if rawRange >= tot_frame_num: # if rawRange is out of the range, use all frames
rawRange = None
padding = False
lambda_raw = args.lambda_raw
lambda_of = args.lambda_of
assert modality == 'raw2flow'
if dataset_name == 'ShanghaiTech':
model_set = [[[[] for ww in range(w_block)] for hh in range(h_block)] for ss in
range(frame_size[dataset_name][-1])]
raw_training_scores_set = [[[[] for ww in range(w_block)] for hh in range(h_block)] for ss in
range(frame_size[dataset_name][-1])]
of_training_scores_set = [[[[] for ww in range(w_block)] for hh in range(h_block)] for ss in
range(frame_size[dataset_name][-1])]
else:
model_set = [[[] for ww in range(len(foreground_set[hh]))] for hh in range(len(foreground_set))]
raw_training_scores_set = [[[] for ww in range(len(foreground_set[hh]))] for hh in range(len(foreground_set))]
of_training_scores_set = [[[] for ww in range(len(foreground_set[hh]))] for hh in range(len(foreground_set))]
# Prepare training data in current block
if dataset_name == 'ShanghaiTech':
saveSegNum = 40000
totSegNum = np.int(np.ceil(dataset.__len__() / saveSegNum))
for s_idx in range(len(model_set)):
for h_idx in range(len(model_set[s_idx])):
for w_idx in range(len(model_set[s_idx][h_idx])):
raw_losses = AverageMeter()
of_losses = AverageMeter()
cur_model = torch.nn.DataParallel(ConvTransformer_recon_correct(
tot_raw_num = tot_frame_num, nums_hidden = [32, 64, 128], num_layers=num_layers,
num_dec_frames=1, num_heads=num_heads, with_residual=True,
with_pos=True, pos_kind=pe, mode=0, use_flow=useFlow)).cuda()
optimizer = optim.Adam(cur_model.parameters(), eps=1e-7, weight_decay=0.000)
cur_model.train()
for epoch in range(epochs):
for segIdx in range(totSegNum):
foreground_set = np.load(os.path.join(data_root_dir, modality,
dataset_name + '_' + 'foreground_train_{}_seg_{}_{}_border_{}-raw.npy'.format(
foreground_extraction_mode, segIdx, context_frame_num, border_mode)))
foreground_set2 = np.load(os.path.join(data_root_dir, modality,
dataset_name + '_' + 'foreground_train_{}_seg_{}_{}_border_{}-flow.npy'.format(
foreground_extraction_mode, segIdx, context_frame_num, border_mode)))
cur_training_data = foreground_set[s_idx][h_idx][w_idx]
cur_training_data2 = foreground_set2[s_idx][h_idx][w_idx]
cur_dataset = cube_to_train_dataset(cur_training_data, target=cur_training_data2)
cur_dataloader = DataLoader(dataset=cur_dataset, batch_size=batch_size, shuffle=True)
for idx, (inputs, of_targets_all, _) in enumerate(cur_dataloader):
inputs = inputs.cuda().type(torch.cuda.FloatTensor)
of_targets_all = of_targets_all.cuda().type(torch.cuda.FloatTensor)
of_outputs, raw_outputs, of_targets, raw_targets = cur_model(inputs, of_targets_all)
loss_raw = loss_func(raw_targets.detach(), raw_outputs)
if useFlow:
loss_of = loss_func(of_targets.detach(), of_outputs)
if useFlow:
loss = lambda_raw * loss_raw + lambda_of * loss_of
else:
loss = loss_raw
raw_losses.update(loss_raw.item(), inputs.size(0))
if useFlow:
of_losses.update(loss_of.item(), inputs.size(0))
else:
of_losses.update(0., inputs.size(0))
optimizer.zero_grad()
loss.backward()
optimizer.step()
if idx % 5 == 0:
print(
'Block: ({}, {}), epoch {}, seg {}, batch {} of {}, raw loss: {}, of loss: {}'.format(
h_idx, w_idx, epoch, segIdx, idx, cur_dataset.__len__() // batch_size,
raw_losses.avg,
of_losses.avg))
# break
# break
# break
model_set[s_idx][h_idx][w_idx].append(cur_model.state_dict())
# /*-- A forward pass to store the training scores of optical flow and raw datasets respectively*/
for segIdx in range(totSegNum):
foreground_set = np.load(os.path.join(data_root_dir, modality,
dataset_name + '_' + 'foreground_train_{}_seg_{}_{}_border_{}-raw.npy'.format(
foreground_extraction_mode, segIdx, context_frame_num, border_mode)))
foreground_set2 = np.load(os.path.join(data_root_dir, modality,
dataset_name + '_' + 'foreground_train_{}_seg_{}_{}_border_{}-flow.npy'.format(
foreground_extraction_mode, segIdx, context_frame_num, border_mode)))
cur_training_data = foreground_set[s_idx][h_idx][w_idx]
cur_training_data2 = foreground_set2[s_idx][h_idx][w_idx]
cur_dataset = cube_to_train_dataset(cur_training_data, target=cur_training_data2)
forward_dataloader = DataLoader(dataset=cur_dataset, batch_size=batch_size//4, shuffle=False)
score_func = nn.MSELoss(reduce=False)
cur_model.eval()
for idx, (inputs, of_targets_all, _) in enumerate(forward_dataloader):
inputs = inputs.cuda().type(torch.cuda.FloatTensor)
of_targets_all = of_targets_all.cuda().type(torch.cuda.FloatTensor)
of_outputs, raw_outputs, of_targets, raw_targets = cur_model(inputs, of_targets_all)
raw_scores = score_func(raw_targets, raw_outputs).cpu().data.numpy()
raw_scores = np.sum(np.sum(np.sum(np.sum(raw_scores, axis=4), axis=3), axis=2), axis=1) # mse
raw_training_scores_set[s_idx][h_idx][w_idx].append(raw_scores)
if useFlow:
of_scores = score_func(of_targets, of_outputs).cpu().data.numpy()
of_scores = np.sum(np.sum(np.sum(np.sum(of_scores, axis=4), axis=3), axis=2), axis=1) # mse
of_training_scores_set[s_idx][h_idx][w_idx].append(of_scores)
raw_training_scores_set[s_idx][h_idx][w_idx] = np.concatenate(
raw_training_scores_set[s_idx][h_idx][w_idx], axis=0)
if useFlow:
of_training_scores_set[s_idx][h_idx][w_idx] = np.concatenate(
of_training_scores_set[s_idx][h_idx][w_idx], axis=0)
del cur_model, raw_losses, of_losses
torch.save(raw_training_scores_set, os.path.join(data_root_dir, modality,
dataset_name + '_' + 'raw_training_scores_border_{}_head_{}_layer_{}_length_{}_pe_{}_epoch_{}_lambda_{}_{}'.format(
border_mode, num_heads, num_layers, context_frame_num, pe,
epochs, lambda_raw, lambda_of) + '_' + 'pyname_{}.npy'.format(pyfile_name)))
torch.save(of_training_scores_set, os.path.join(data_root_dir, modality,
dataset_name + '_' + 'of_training_scores_border_{}_head_{}_layer_{}_length_{}_pe_{}_epoch_{}_lambda_{}_{}'.format(
border_mode, num_heads, num_layers, context_frame_num, pe,
epochs, lambda_raw, lambda_of) + '_' + 'pyname_{}.npy'.format(pyfile_name)))
else:
raw_losses = AverageMeter()
of_losses = AverageMeter()
torch.autograd.set_detect_anomaly(True)
for h_idx in range(len(foreground_set)):
for w_idx in range(len(foreground_set[h_idx])):
cur_training_data = foreground_set[h_idx][w_idx]
if len(cur_training_data) > 1: # num > 1 for data parallel
cur_training_data2 = foreground_set2[h_idx][w_idx]
cur_dataset = cube_to_train_dataset(cur_training_data, target=cur_training_data2)
cur_dataloader = DataLoader(dataset=cur_dataset, batch_size=batch_size, shuffle=True)
cur_model = torch.nn.DataParallel(ConvTransformer_recon_correct(
tot_raw_num = tot_frame_num, nums_hidden = [32, 64, 128], num_layers=num_layers,
num_dec_frames=1, num_heads=num_heads, with_residual=True,
with_pos=True, pos_kind=pe, mode=0, use_flow=useFlow)).cuda()
if dataset_name == 'UCSDped2':
optimizer = optim.Adam(cur_model.parameters(), eps=1e-7, weight_decay=0.0)
else:
optimizer = optim.Adam(cur_model.parameters(), eps=1e-7, weight_decay=0.0)
cur_model.train()
for epoch in range(epochs):
for idx, (inputs, of_targets_all, _) in enumerate(cur_dataloader):
inputs = inputs.cuda().type(torch.cuda.FloatTensor)
# print(torch.max(inputs))
of_targets_all = of_targets_all.cuda().type(torch.cuda.FloatTensor)
of_outputs, raw_outputs, of_targets, raw_targets = cur_model(inputs, of_targets_all)
loss_raw = loss_func(raw_targets.detach(), raw_outputs)
if useFlow:
loss_of = loss_func(of_targets.detach(), of_outputs)
if useFlow:
loss = lambda_raw * loss_raw + lambda_of * loss_of
else:
loss = loss_raw
raw_losses.update(loss_raw.item(), inputs.size(0))
if useFlow:
of_losses.update(loss_of.item(), inputs.size(0))
else:
of_losses.update(0., inputs.size(0))
optimizer.zero_grad()
loss.backward()
optimizer.step()
if idx % 5 == 0:
max_num = 20
print(
'Block: ({}, {}), epoch {}, batch {} of {}, raw loss: {}, of loss: {}'.format(h_idx,
w_idx,
epoch,
idx,
cur_dataset.__len__() // batch_size,
raw_losses.avg,
of_losses.avg))
model_set[h_idx][w_idx].append(cur_model.state_dict())
# /*-- A forward pass to store the training scores of optical flow and raw datasets respectively*/
forward_dataloader = DataLoader(dataset=cur_dataset, batch_size=batch_size//4, shuffle=False)
# raw_score_func = nn.MSELoss(reduce=False)
# of_score_func = nn.L1Loss(reduce=False)
score_func = nn.MSELoss(reduce=False)
cur_model.eval()
for idx, (inputs, of_targets_all, _) in enumerate(forward_dataloader):
inputs = inputs.cuda().type(torch.cuda.FloatTensor)
of_targets_all = of_targets_all.cuda().type(torch.cuda.FloatTensor)
of_outputs, raw_outputs, of_targets, raw_targets = cur_model(inputs, of_targets_all)
raw_scores = score_func(raw_targets, raw_outputs).cpu().data.numpy()
raw_scores = np.sum(np.sum(np.sum(np.sum(raw_scores, axis=4), axis=3), axis=2), axis=1) # mse
raw_training_scores_set[h_idx][w_idx].append(raw_scores)
if useFlow:
of_scores = score_func(of_targets, of_outputs).cpu().data.numpy()
of_scores = np.sum(np.sum(np.sum(np.sum(of_scores, axis=4), axis=3), axis=2), axis=1) # mse
of_training_scores_set[h_idx][w_idx].append(of_scores)
raw_training_scores_set[h_idx][w_idx] = np.concatenate(raw_training_scores_set[h_idx][w_idx], axis=0)
if useFlow:
of_training_scores_set[h_idx][w_idx] = np.concatenate(of_training_scores_set[h_idx][w_idx],
axis=0)
torch.save(raw_training_scores_set, os.path.join(data_root_dir, modality,
dataset_name + '_' + 'raw_training_scores_border_{}_head_{}_layer_{}_length_{}_pe_{}_epoch_{}_lambda_{}_{}'.format(
border_mode, num_heads, num_layers, context_frame_num, pe, epochs, lambda_raw, lambda_of) + '_' + 'pyname_{}.npy'.format(pyfile_name)))
torch.save(of_training_scores_set, os.path.join(data_root_dir, modality,
dataset_name + '_' + 'of_training_scores_border_{}_head_{}_layer_{}_length_{}_pe_{}_epoch_{}_lambda_{}_{}'.format(
border_mode, num_heads, num_layers, context_frame_num, pe, epochs, lambda_raw, lambda_of) + '_' + 'pyname_{}.npy'.format(pyfile_name)))
print('training scores saved')
torch.save(model_set, os.path.join(data_root_dir, modality,
dataset_name + '_' + 'model_{}_head_{}_layer_{}_length_{}_pe_{}_epoch_{}_lambda_{}_{}'.format(
border_mode, num_heads, num_layers, context_frame_num, pe, epochs, lambda_raw, lambda_of) + '_' + 'pyname_{}.npy'.format(pyfile_name)))
print('Training of {} for dataset: {} has completed!'.format(method, dataset_name))
else:
raise NotImplementedError
| 34,551 | 57.86201 | 197 | py |
MRMGA4VAD | MRMGA4VAD-main/fore_det/inference.py | import warnings
import matplotlib.pyplot as plt
import mmcv
import numpy as np
import pycocotools.mask as maskUtils
import torch
from mmcv.parallel import collate, scatter
from mmcv.runner import load_checkpoint
from mmdet.core import get_classes
from mmdet.datasets.pipelines import Compose
from mmdet.models import build_detector
from mmcv.image import imread, imwrite
import cv2
def imshow_bboxes(img,
bboxes,
bbox_color=(0, 255, 0),
thickness=1,
show=True,
win_name='',
wait_time=0,
out_file=None):
"""Draw bboxes on an image.
Args:
img (str or ndarray): The image to be displayed.
bboxes (ndarray): Bounding boxes (with scores), shaped (n, 4).
bbox_color (RGB value): Color of bbox lines.
thickness (int): Thickness of lines.
show (bool): Whether to show the image.
win_name (str): The window name.
wait_time (int): Value of waitKey param.
out_file (str or None): The filename to write the image.
"""
assert bboxes.ndim == 2
assert bboxes.shape[1] == 4 or bboxes.shape[1] == 5
img = imread(img)
for bbox in bboxes:
left_top = (bbox[0], bbox[1])
right_bottom = (bbox[2], bbox[3])
cv2.rectangle(img, left_top, right_bottom, bbox_color, thickness)
if show:
cv2.imshow(win_name, imread(img))
cv2.waitKey(wait_time)
if out_file is not None:
imwrite(img, out_file)
def init_detector(config, checkpoint=None, device='cuda:0'):
"""Initialize a detector from config file.
Args:
config (str or :obj:`mmcv.Config`): Config file path or the config
object.
checkpoint (str, optional): Checkpoint path. If left as None, the model
will not load any weights.
Returns:
nn.Module: The constructed detector.
"""
if isinstance(config, str):
config = mmcv.Config.fromfile(config)
elif not isinstance(config, mmcv.Config):
raise TypeError('config must be a filename or Config object, '
'but got {}'.format(type(config)))
config.model.pretrained = None
model = build_detector(config.model, test_cfg=config.test_cfg)
if checkpoint is not None:
checkpoint = load_checkpoint(model, checkpoint)
if 'CLASSES' in checkpoint['meta']:
model.CLASSES = checkpoint['meta']['CLASSES']
else:
warnings.warn('Class names are not saved in the checkpoint\'s '
'meta data, use COCO classes by default.')
model.CLASSES = get_classes('coco')
model.cfg = config # save the config in the model for convenience
model.to(device)
model.eval()
return model
class LoadImage(object):
def __call__(self, results):
if isinstance(results['img'], str):
results['filename'] = results['img']
else:
results['filename'] = None
img = mmcv.imread(results['img'])
results['img'] = img
results['img_shape'] = img.shape
results['ori_shape'] = img.shape
return results
def inference_detector(model, img):
"""Inference image(s) with the detector.
Args:
model (nn.Module): The loaded detector.
imgs (str/ndarray or list[str/ndarray]): Either image files or loaded
images.
Returns:
If imgs is a str, a generator will be returned, otherwise return the
detection results directly.
"""
cfg = model.cfg
device = next(model.parameters()).device # model device
# build the data pipeline
test_pipeline = [LoadImage()] + cfg.data.test.pipeline[1:]
test_pipeline = Compose(test_pipeline)
# prepare data
data = dict(img=img)
data = test_pipeline(data)
data = scatter(collate([data], samples_per_gpu=1), [device])[0]
# forward the model
with torch.no_grad():
result = model(return_loss=False, rescale=True, **data)
return result
# TODO: merge this method with the one in BaseDetector
def show_result(img,
result,
class_names,
score_thr=0.3,
wait_time=0,
show=True,
out_file=None):
"""Visualize the detection results on the image.
Args:
img (str or np.ndarray): Image filename or loaded image.
result (tuple[list] or list): The detection result, can be either
(bbox, segm) or just bbox.
class_names (list[str] or tuple[str]): A list of class names.
score_thr (float): The threshold to visualize the bboxes and masks.
wait_time (int): Value of waitKey param.
show (bool, optional): Whether to show the image with opencv or not.
out_file (str, optional): If specified, the visualization result will
be written to the out file instead of shown in a window.
Returns:
np.ndarray or None: If neither `show` nor `out_file` is specified, the
visualized image is returned, otherwise None is returned.
"""
assert isinstance(class_names, (tuple, list))
img = mmcv.imread(img)
img = img.copy()
if isinstance(result, tuple):
bbox_result, segm_result = result
else:
bbox_result, segm_result = result, None
bboxes = np.vstack(bbox_result)
# draw segmentation masks
if segm_result is not None:
segms = mmcv.concat_list(segm_result)
inds = np.where(bboxes[:, -1] > score_thr)[0]
for i in inds:
color_mask = np.random.randint(0, 256, (1, 3), dtype=np.uint8)
mask = maskUtils.decode(segms[i]).astype(np.bool)
img[mask] = img[mask] * 0.5 + color_mask * 0.5
# draw bounding boxes
labels = [
np.full(bbox.shape[0], i, dtype=np.int32)
for i, bbox in enumerate(bbox_result)
]
labels = np.concatenate(labels)
mmcv.imshow_det_bboxes(
img,
bboxes,
labels,
class_names=class_names,
score_thr=score_thr,
show=show,
wait_time=wait_time,
out_file=out_file)
# if not (show or out_file):
# return img
return bboxes
def show_result_pyplot(img,
result,
class_names,
score_thr=0.3,
fig_size=(15, 10)):
"""Visualize the detection results on the image.
Args:
img (str or np.ndarray): Image filename or loaded image.
result (tuple[list] or list): The detection result, can be either
(bbox, segm) or just bbox.
class_names (list[str] or tuple[str]): A list of class names.
score_thr (float): The threshold to visualize the bboxes and masks.
fig_size (tuple): Figure size of the pyplot figure.
out_file (str, optional): If specified, the visualization result will
be written to the out file instead of shown in a window.
"""
img = show_result(
img, result, class_names, score_thr=score_thr, show=False)
plt.figure(figsize=fig_size)
plt.imshow(mmcv.bgr2rgb(img))
| 7,224 | 33.241706 | 79 | py |
MRMGA4VAD | MRMGA4VAD-main/fore_det/obj_det_with_motion.py | import mmcv
from mmcv.image import imread, imwrite
import cv2
from fore_det.inference import inference_detector, init_detector, show_result
import numpy as np
from sklearn import preprocessing
import os
from torch.utils.data import Dataset, DataLoader
from vad_datasets import ped_dataset, avenue_dataset, shanghaiTech_dataset
from configparser import ConfigParser
import time
cp = ConfigParser()
cp.read("config.cfg")
def getObBboxes(img, model, dataset_name):
if dataset_name == 'UCSDped2':
score_thr = 0.5
min_area_thr = 10 * 10
elif dataset_name == 'avenue':
score_thr = 0.25
min_area_thr = 40 * 40
elif dataset_name == 'ShanghaiTech':
score_thr = 0.5
min_area_thr = 8 * 8
else:
raise NotImplementedError
result = inference_detector(model, img)
#bboxes = show_result(img, result, model.CLASSES, score_thr)
bbox_result = result
bboxes = np.vstack(bbox_result)
scores = bboxes[:, -1]
inds = scores > score_thr
bboxes = bboxes[inds, :]
x1 = bboxes[:, 0]
y1 = bboxes[:, 1]
x2 = bboxes[:, 2]
y2 = bboxes[:, 3]
bbox_areas = (y2 - y1 + 1) * (x2 - x1 + 1)
return bboxes[bbox_areas >= min_area_thr, :4]
def delCoverBboxes(bboxes, dataset_name):
if dataset_name == 'UCSDped2':
cover_thr = 0.6
elif dataset_name == 'avenue':
cover_thr = 0.6
elif dataset_name == 'ShanghaiTech':
cover_thr = 0.65
else:
raise NotImplementedError
assert bboxes.ndim == 2
assert bboxes.shape[1] == 4
x1 = bboxes[:,0]
y1 = bboxes[:,1]
x2 = bboxes[:,2]
y2 = bboxes[:,3]
bbox_areas = (y2-y1+1) * (x2-x1+1)
sort_idx = bbox_areas.argsort()#Index of bboxes sorted in ascending order by area size
keep_idx = []
for i in range(sort_idx.size):
#Calculate the point coordinates of the intersection
x11 = np.maximum(x1[sort_idx[i]], x1[sort_idx[i+1:]])
y11 = np.maximum(y1[sort_idx[i]], y1[sort_idx[i+1:]])
x22 = np.minimum(x2[sort_idx[i]], x2[sort_idx[i+1:]])
y22 = np.minimum(y2[sort_idx[i]], y2[sort_idx[i+1:]])
#Calculate the intersection area
w = np.maximum(0, x22-x11+1)
h = np.maximum(0, y22-y11+1)
overlaps = w * h
ratios = overlaps / bbox_areas[sort_idx[i]]
num = ratios[ratios > cover_thr]
if num.size == 0:
keep_idx.append(sort_idx[i])
return bboxes[keep_idx]
def imshow_bboxes(img,
bboxes,
bbox_color=(255,255,255),
thickness=1,
show=True,
win_name='',
wait_time=0,
out_file=None):
"""Draw bboxes on an image.
Args:
img (str or ndarray): The image to be displayed.
bboxes (ndarray): Bounding boxes (with scores), shaped (n, 4).
bbox_color (RGB value): Color of bbox lines.
thickness (int): Thickness of lines.
show (bool): Whether to show the image.
win_name (str): The window name.
wait_time (int): Value of waitKey param.
out_file (str or None): The filename to write the image.
"""
assert bboxes.ndim == 2
assert bboxes.shape[1] == 4 or bboxes.shape[1] == 5
for bbox in bboxes:
bbox_int = bbox.astype(np.int32)
left_top = (bbox_int[0], bbox_int[1])
right_bottom = (bbox_int[2], bbox_int[3])
img = cv2.rectangle(
img, left_top, right_bottom, bbox_color, thickness)
if show:
cv2.imshow(win_name, img)
cv2.waitKey(wait_time)
if out_file is not None:
imwrite(img, out_file)
def getFgBboxes(cur_img, img_batch, bboxes, dataset_name, verbose=False):
if dataset_name == 'UCSDped2':
area_thr = 10 * 10
binary_thr = 18
extend = 2
gauss_mask_size = 3
elif dataset_name == 'avenue':
area_thr = 40 * 40
binary_thr = 18
extend = 2
gauss_mask_size = 5
elif dataset_name == 'ShanghaiTech':
area_thr = 8 * 8
binary_thr = 15
extend = 2
gauss_mask_size = 5
else:
raise NotImplementedError
sum_grad = 0
for i in range(img_batch.shape[0]-1):
img1 = img_batch[i,:,:,:]
img2 = img_batch[i+1,:,:,:]
img1 = cv2.GaussianBlur(img1, (gauss_mask_size, gauss_mask_size), 0)
img2 = cv2.GaussianBlur(img2, (gauss_mask_size, gauss_mask_size), 0)
grad = cv2.absdiff(img1, img2)
sum_grad = grad + sum_grad
sum_grad = cv2.threshold(sum_grad, binary_thr, 255, cv2.THRESH_BINARY)[1]
if verbose is True:
cv2.imshow('grad', sum_grad)
cv2.waitKey(0)
for bbox in bboxes:
bbox_int = bbox.astype(np.int32)
extend_y1 = np.maximum(0, bbox_int[1]-extend)
extend_y2 = np.minimum(bbox_int[3]+extend, sum_grad.shape[0])
extend_x1 = np.maximum(0, bbox_int[0]-extend)
extend_x2 = np.minimum(bbox_int[2]+extend, sum_grad.shape[1])
sum_grad[extend_y1:extend_y2+1, extend_x1:extend_x2+1] = 0
if verbose is True:
cv2.imshow('del_ob_bboxes', sum_grad)
cv2.waitKey(0)
sum_grad = cv2.cvtColor(sum_grad, cv2.COLOR_BGR2GRAY)
contours, hierarchy = cv2.findContours(sum_grad, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
fg_bboxes = []
for c in contours:
x,y,w,h = cv2.boundingRect(c)
sum_grad = cv2.rectangle(sum_grad, (x,y), (x+w,y+h), 255, 1)
area = (w+1) * (h+1)
if area > area_thr and w / h < 10 and h / w < 10:
extend_x1 = np.maximum(0, x-extend)
extend_y1 = np.maximum(0, y-extend)
extend_x2 = np.minimum(x+w+extend, sum_grad.shape[1])
extend_y2 = np.minimum(y+h+extend, sum_grad.shape[0])
fg_bboxes.append([extend_x1, extend_y1, extend_x2, extend_y2])
cur_img=cv2.UMat(cur_img).get()
cur_img = cv2.rectangle(cur_img, (extend_x1,extend_y1), (extend_x2,extend_y2), (0,255,0), 1)
if verbose is True:
cv2.imshow('all_fg_bboxes', sum_grad)
cv2.waitKey(0)
cv2.imshow('filter', cur_img)
cv2.waitKey(0)
return np.array(fg_bboxes)
def getBatch(data_folder, X_dataset, context_frame_num, idx, mode, file_format):
dataset = X_dataset(dir=data_folder, context_frame_num=context_frame_num, mode=mode, border_mode='hard', file_format=file_format)
print(dataset.tot_frame_num)
batch, _ = dataset.__getitem__(idx)
start_idx, end_idx = dataset.context_range(idx)
return batch, start_idx, end_idx
| 6,865 | 29.927928 | 133 | py |
MRMGA4VAD | MRMGA4VAD-main/obj_det_config/cascade_rcnn_r101_fpn_1x.py | # model settings
model = dict(
type='CascadeRCNN',
num_stages=3,
pretrained='torchvision://resnet101',
backbone=dict(
type='ResNet',
depth=101,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
style='pytorch'),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5),
rpn_head=dict(
type='RPNHead',
in_channels=256,
feat_channels=256,
anchor_scales=[8],
anchor_ratios=[0.5, 1.0, 2.0],
anchor_strides=[4, 8, 16, 32, 64],
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0],
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)),
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=[
dict(
type='SharedFCBBoxHead',
num_fcs=2,
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=81,
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2],
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
dict(
type='SharedFCBBoxHead',
num_fcs=2,
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=81,
target_means=[0., 0., 0., 0.],
target_stds=[0.05, 0.05, 0.1, 0.1],
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
dict(
type='SharedFCBBoxHead',
num_fcs=2,
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=81,
target_means=[0., 0., 0., 0.],
target_stds=[0.033, 0.033, 0.067, 0.067],
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))
])
# model training and testing settings
train_cfg = dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_across_levels=False,
nms_pre=2000,
nms_post=2000,
max_num=2000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=[
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
pos_weight=-1,
debug=False),
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.6,
neg_iou_thr=0.6,
min_pos_iou=0.6,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
pos_weight=-1,
debug=False),
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.7,
min_pos_iou=0.7,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
pos_weight=-1,
debug=False)
],
stage_loss_weights=[1, 0.5, 0.25])
test_cfg = dict(
rpn=dict(
nms_across_levels=False,
nms_pre=1000,
nms_post=1000,
max_num=1000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(
score_thr=0.05, nms=dict(type='nms', iou_thr=0.5), max_per_img=100),
keep_all_stages=False)
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
imgs_per_gpu=2,
workers_per_gpu=2,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline))
# optimizer
optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=1.0 / 3,
step=[8, 11])
checkpoint_config = dict(interval=1)
# yapf:disable
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
# yapf:enable
# runtime settings
total_epochs = 12
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = './work_dirs/cascade_rcnn_r101_fpn_1x'
load_from = None
resume_from = None
workflow = [('train', 1)]
| 7,380 | 30.408511 | 78 | py |
MRMGA4VAD | MRMGA4VAD-main/helper/misc.py | '''Some helper functions for PyTorch, including:
- get_mean_and_std: calculate the mean and std value of dataset.
- msr_init: net parameter initialization.
- progress_bar: progress bar mimic xlua.progress.
'''
import errno
import os
import sys
import time
import math
import torch
import torch.nn as nn
import torch.nn.init as init
from torch.autograd import Variable
__all__ = ['get_mean_and_std', 'init_params', 'mkdir_p', 'AverageMeter']
def get_mean_and_std(dataset):
'''Compute the mean and std value of dataset.'''
dataloader = trainloader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=True, num_workers=2)
mean = torch.zeros(3)
std = torch.zeros(3)
print('==> Computing mean and std..')
for inputs, targets in dataloader:
for i in range(3):
mean[i] += inputs[:,i,:,:].mean()
std[i] += inputs[:,i,:,:].std()
mean.div_(len(dataset))
std.div_(len(dataset))
return mean, std
def init_params(net):
'''Init layer parameters.'''
for m in net.modules():
if isinstance(m, nn.Conv2d):
init.kaiming_normal(m.weight, mode='fan_out')
if m.bias:
init.constant(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
init.constant(m.weight, 1)
init.constant(m.bias, 0)
elif isinstance(m, nn.Linear):
init.normal(m.weight, std=1e-3)
if m.bias:
init.constant(m.bias, 0)
def mkdir_p(path):
'''make dir if not exist'''
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
class AverageMeter(object):
"""Computes and stores the average and current value
Imported from https://github.com/pytorch/examples/blob/master/imagenet/main.py#L247-L262
"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count | 2,218 | 28.197368 | 110 | py |
CANTM | CANTM-main/updateTopics_covid.py | import sys
from GateMIcateLib import ModelUltiUpdateCAtopic as ModelUlti
from GateMIcateLib import BatchIterBert, DictionaryProcess
from GateMIcateLib.batchPostProcessors import bowBertBatchProcessor as batchPostProcessor
from GateMIcateLib import ScholarPostProcessor as ReaderPostProcessor
from GateMIcateLib.readers import WVmisInfoDataIter as dataIter
from configobj import ConfigObj
from torch.nn import init
import argparse
import json
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("testReaderargs", help="args for test reader")
parser.add_argument("--configFile", help="config files if needed")
parser.add_argument("--cachePath", help="save models")
parser.add_argument("--randomSeed", type=int, help="randomSeed for reproduction")
parser.add_argument("--num_epoches", type=int, default=5, help="num epoches")
parser.add_argument("--patient", type=int, default=40, help="early_stop_patient")
parser.add_argument("--earlyStopping", default='cls_loss', help="early stopping")
parser.add_argument("--corpusType", default='wvmisinfo', help="corpus type, for select reader")
parser.add_argument("--x_fields", help="x fileds", default='Claim,Explaination')
parser.add_argument("--y_field", help="y filed", default='Label')
args = parser.parse_args()
testReaderargs = args.testReaderargs.split(',')
x_fields = args.x_fields.split(',')
config = ConfigObj(args.configFile)
mUlti = ModelUlti(load_path=args.cachePath, gpu=True)
trainable_weights = ['xy_topics.topic.weight',
'z_y_hidden.hidden1.weight',
'z2y_classifier.layer_output.weight',
]
trainable_bias = [
'xy_topics.topic.bias',
'z_y_hidden.hidden1.bias',
'z2y_classifier.layer_output.bias'
]
trainable_no_init = [
'mu_z2.weight',
'mu_z2.bias',
'log_sigma_z2.weight',
'log_sigma_z2.bias',
'x_y_hidden.hidden1.weight',
'x_y_hidden.hidden1.bias'
]
for name, param in mUlti.net.named_parameters():
print(name)
if name in trainable_weights:
param.requires_grad = True
param.data.uniform_(-1.0, 1.0)
elif name in trainable_bias:
param.requires_grad = True
param.data.fill_(0)
elif name in trainable_no_init:
param.requires_grad = True
else:
param.requires_grad = False
postProcessor = ReaderPostProcessor(config=config, word2id=True, remove_single_list=False, add_spec_tokens=True, x_fields=x_fields, y_field=args.y_field, max_sent_len=300)
postProcessor.dictProcess = mUlti.bowdict
testDataIter = dataIter(*testReaderargs, label_field=args.y_field, postProcessor=postProcessor, config=config, shuffle=True)
testBatchIter = BatchIterBert(testDataIter, filling_last_batch=True, postProcessor=batchPostProcessor, batch_size=32)
mUlti.train(testBatchIter, num_epohs=args.num_epoches, cache_path=args.cachePath)
| 3,093 | 37.675 | 175 | py |
CANTM | CANTM-main/getPerpare.py | import os
import torch
from transformers import *
import nltk
from pathlib import Path
nltk.download('stopwords')
nltk.download('punkt')
model = BertModel.from_pretrained('bert-base-uncased')
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
script_path = os.path.abspath(__file__)
print(script_path)
parent = os.path.dirname(script_path)
parent = os.path.join(parent, 'bert-base-uncased')
print(parent)
model_save_path = os.path.join(parent,'model')
path = Path(model_save_path)
path.mkdir(parents=True, exist_ok=True)
model.save_pretrained(model_save_path)
tokenizer_save_path = os.path.join(parent,'tokenizer')
path = Path(tokenizer_save_path)
path.mkdir(parents=True, exist_ok=True)
tokenizer.save_pretrained(tokenizer_save_path)
| 753 | 25 | 62 | py |
CANTM | CANTM-main/updateTopics.py | import sys
from GateMIcateLib import ModelUltiUpdateCAtopic as ModelUlti
from GateMIcateLib import BatchIterBert, DictionaryProcess
from GateMIcateLib.batchPostProcessors import bowBertBatchProcessor as batchPostProcessor
from GateMIcateLib import ScholarPostProcessor as ReaderPostProcessor
from GateMIcateLib.readers import ACLimdbReader as dataIter
from configobj import ConfigObj
from torch.nn import init
import argparse
import json
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("testReaderargs", help="args for test reader")
parser.add_argument("--configFile", help="config files if needed")
parser.add_argument("--cachePath", help="save models")
parser.add_argument("--randomSeed", type=int, help="randomSeed for reproduction")
parser.add_argument("--num_epoches", type=int, default=5, help="num epoches")
parser.add_argument("--patient", type=int, default=40, help="early_stop_patient")
parser.add_argument("--earlyStopping", default='cls_loss', help="early stopping")
parser.add_argument("--corpusType", default='wvmisinfo', help="corpus type, for select reader")
parser.add_argument("--x_fields", help="x fileds", default='text')
parser.add_argument("--y_field", help="y filed", default='selected_label')
args = parser.parse_args()
testReaderargs = args.testReaderargs.split(',')
x_fields = args.x_fields.split(',')
config = ConfigObj(args.configFile)
mUlti = ModelUlti(load_path=args.cachePath, gpu=True)
trainable_weights = ['xy_topics.topic.weight',
'z_y_hidden.hidden1.weight',
'z2y_classifier.layer_output.weight',
]
trainable_bias = [
'xy_topics.topic.bias',
'z_y_hidden.hidden1.bias',
'z2y_classifier.layer_output.bias'
]
for name, param in mUlti.net.named_parameters():
if name in trainable_weights:
param.requires_grad = True
param.data.uniform_(-1.0, 1.0)
elif name in trainable_bias:
param.requires_grad = True
param.data.fill_(0)
else:
param.requires_grad = False
postProcessor = ReaderPostProcessor(config=config, word2id=True, remove_single_list=False, add_spec_tokens=True, x_fields=x_fields, y_field=args.y_field, max_sent_len=510)
postProcessor.dictProcess = mUlti.bowdict
testDataIter = dataIter(*testReaderargs, postProcessor=postProcessor, config=config, shuffle=True)
testBatchIter = BatchIterBert(testDataIter, filling_last_batch=True, postProcessor=batchPostProcessor, batch_size=32)
mUlti.train(testBatchIter, num_epohs=args.num_epoches, cache_path=args.cachePath)
| 2,718 | 38.405797 | 175 | py |
CANTM | CANTM-main/GateMIcateLib/modelUltiClassTopic.py | import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import numpy as np
import copy
import os
from pathlib import Path
import pickle
import datetime
from .modelUlti import modelUlti
class ModelUltiClass(modelUlti):
def __init__(self, net=None, gpu=False, load_path=None):
super().__init__(net=net, gpu=gpu)
if load_path:
self.loadModel(load_path)
if self.gpu:
self.net.cuda()
def train(self, trainBatchIter, num_epohs=100, valBatchIter=None, cache_path=None, earlyStopping='cls_loss', patience=5):
pytorch_total_params = sum(p.numel() for p in self.net.parameters())
print('total_params: ',pytorch_total_params)
pytorch_train_params = sum(p.numel() for p in self.net.parameters() if p.requires_grad)
print('train_params: ',pytorch_train_params)
self.bowdict = trainBatchIter.dataIter.postProcessor.dictProcess
self.labels = trainBatchIter.dataIter.postProcessor.labelsFields
if earlyStopping == 'None':
earlyStopping = None
self.cache_path = cache_path
output_dict = {}
output_dict['accuracy'] = 'no val iter'
output_dict['perplexity'] = 'no val iter'
output_dict['perplexity_x_only'] = 'no val iter'
self.evaluation_history = []
self.optimizer = optim.Adam(self.net.parameters())
print(num_epohs)
for epoch in range(num_epohs):
begin_time = datetime.datetime.now()
all_loss = []
all_elboz1 = []
all_elboz2 = []
all_bow = []
trainIter = self.pred(trainBatchIter, train=True)
for current_prediction in trainIter:
self.optimizer.zero_grad()
pred = current_prediction['pred']
y = current_prediction['y']
atted = current_prediction['atted']
loss = pred['loss']
cls_loss = pred['cls_loss'].sum()
elbo_z1 = pred['elbo_x'].to('cpu').detach().numpy()
elbo_z2 = pred['elbo_xy'].to('cpu').detach().numpy()
bow_x = current_prediction['x_bow'].to('cpu').detach().numpy()
all_elboz1.append(elbo_z1)
all_elboz2.append(elbo_z2)
all_bow.append(bow_x)
loss.backward()
self.optimizer.step()
loss_value = float(cls_loss.data.item())
all_loss.append(loss_value)
all_elboz1
if epoch % 3 == 0:
topics = self.getTopics(trainBatchIter.dataIter.postProcessor.dictProcess, cache_path=self.cache_path)
#print('===========')
x_only_topic = self.get_x_only_Topics(trainBatchIter.dataIter.postProcessor.dictProcess, cache_path=self.cache_path)
#print('========================')
self.getClassTopics(trainBatchIter.dataIter.postProcessor.dictProcess, cache_path=self.cache_path)
cache_last_path = os.path.join(self.cache_path, 'last_net.model')
self.saveWeights(cache_last_path)
if valBatchIter:
output_dict = self.eval(valBatchIter, get_perp=True)
avg_loss = sum(all_loss)/len(all_loss)
output_dict['cls_loss'] = -avg_loss
perplexity_z1, log_perp_z1 = self._get_prep(all_elboz1, all_bow)
perplexity_z2, log_perp_z2 = self._get_prep(all_elboz2, all_bow)
output_dict['train_ppl_loss'] = -perplexity_z1
if earlyStopping:
stop_signal = self.earlyStop(output_dict, patience=patience, metric=earlyStopping, num_epoch=num_epohs)
if stop_signal:
print('stop signal received, stop training')
cache_load_path = os.path.join(self.cache_path, 'best_net.model')
print('finish training, load model from ', cache_load_path)
self.loadWeights(cache_load_path)
break
end_time = datetime.datetime.now()
timeused = end_time - begin_time
print('epoch ', epoch, 'loss', avg_loss, ' val acc: ', output_dict['accuracy'], 'test_pplz2: ', output_dict['perplexity'], 'test_perpz1: ', output_dict['perplexity_x_only'], 'train_pplz2: ', perplexity_z2, 'train_perpz1: ', perplexity_z1, 'time: ', timeused)
cache_last_path = os.path.join(self.cache_path, 'last_net.model')
self.saveWeights(cache_last_path)
self.saveModel(self.cache_path)
self.getTopics(trainBatchIter.dataIter.postProcessor.dictProcess, cache_path=self.cache_path)
#print('===========')
self.getClassTopics(trainBatchIter.dataIter.postProcessor.dictProcess, cache_path=self.cache_path)
#print('===========')
x_only_topic = self.get_x_only_Topics(trainBatchIter.dataIter.postProcessor.dictProcess, cache_path=self.cache_path)
def getClassTopics(self, dictProcess, ntop=10, cache_path=None):
termMatrix = self.net.get_class_topics()
topicWordList = []
for each_topic in termMatrix:
trans_list = list(enumerate(each_topic.cpu().numpy()))
#print(trans_list)
trans_list = sorted(trans_list, key=lambda k: k[1], reverse=True)
#print(trans_list)
topic_words = [dictProcess.get(item[0]) for item in trans_list[:ntop]]
#print(topic_words)
topicWordList.append(topic_words)
if cache_path:
save_path = os.path.join(cache_path, 'classtopics.txt')
self.saveTopic(topicWordList, save_path)
return topicWordList
def saveModel(self, cache_path):
model_path = os.path.join(cache_path, 'net.model')
dict_path = os.path.join(cache_path, 'dict.pkl')
label_path = os.path.join(cache_path, 'label.pkl')
torch.save(self.net, model_path)
with open(dict_path, 'wb') as fp:
pickle.dump(self.bowdict, fp)
with open(label_path, 'wb') as fp:
pickle.dump(self.labels, fp)
def loadModel(self, cache_path):
model_path = os.path.join(cache_path, 'net.model')
dict_path = os.path.join(cache_path, 'dict.pkl')
label_path = os.path.join(cache_path, 'label.pkl')
self.net = torch.load(model_path, map_location=torch.device("cpu"))
self.net.eval()
with open(dict_path, 'rb') as fp:
self.bowdict = pickle.load(fp)
with open(label_path, 'rb') as fp:
self.labels = pickle.load(fp)
def pred(self, batchGen, train=False, updateTopic=False):
if train or updateTopic:
self.net.train()
#self.optimizer.zero_grad()
else:
self.net.eval()
i=0
pre_embd = False
for x, x_bow, y in batchGen:
i+=1
print("processing batch", i, end='\r')
if self.gpu:
y = y.type(torch.cuda.LongTensor)
x_bow = x_bow.type(torch.cuda.FloatTensor)
x_bow.cuda()
y.cuda()
if batchGen.dataIter.postProcessor.embd_ready:
pre_embd = True
x = x.type(torch.cuda.FloatTensor).squeeze(1)
x.cuda()
else:
x = x.type(torch.cuda.LongTensor)
x.cuda()
if train:
one_hot_y = self.y2onehot(y)
if batchGen.dataIter.label_weights_list:
n_samples = self.get_num_samples(y, batchGen.dataIter.label_weights_list)
else:
n_samples = 10
#print(n_samples)
pred, atted = self.net(x, bow=x_bow, train=True, true_y=one_hot_y, n_samples=n_samples, pre_embd=pre_embd, true_y_ids=y)
elif updateTopic:
pred, atted = self.net(x, bow=x_bow, pre_embd=pre_embd, update_catopic=True)
else:
pred, atted = self.net(x, bow=x_bow, pre_embd=pre_embd)
#pred = pred['y_hat']
output_dict = {}
output_dict['pred'] = pred
output_dict['y'] = y
output_dict['atted'] = atted
output_dict['x_bow'] = x_bow
yield output_dict
def application_oneSent(self, x):
if self.gpu:
x = x.type(torch.cuda.LongTensor)
x.cuda()
pred, atted = self.net(x)
output_dict = {}
output_dict['pred'] = pred
output_dict['atted'] = atted
return output_dict
def get_num_samples(self, y, weight_list):
n_samples = 0
for y_item in y:
n_samples += weight_list[y_item.item()]
return n_samples
def y2onehot(self, y):
num_class = self.net.n_classes
one_hot_y_list = []
for i in range(len(y)):
current_one_hot = [0]*num_class
current_one_hot[y[i].item()] = 1
one_hot_y_list.append(copy.deepcopy(current_one_hot))
tensor_one_hot_y = torch.tensor(one_hot_y_list)
if self.gpu:
tensor_one_hot_y = tensor_one_hot_y.type(torch.cuda.FloatTensor)
tensor_one_hot_y = tensor_one_hot_y.cuda()
return tensor_one_hot_y
def getTopics(self, dictProcess, ntop=10, cache_path=None):
termMatrix = self.net.get_topics()
#print(termMatrix.shape)
topicWordList = []
for each_topic in termMatrix:
trans_list = list(enumerate(each_topic.cpu().numpy()))
#print(trans_list)
trans_list = sorted(trans_list, key=lambda k: k[1], reverse=True)
#print(trans_list)
topic_words = [dictProcess.get(item[0]) for item in trans_list[:ntop]]
#print(topic_words)
topicWordList.append(topic_words)
if cache_path:
save_path = os.path.join(cache_path, 'topics.txt')
self.saveTopic(topicWordList, save_path)
return topicWordList
def get_x_only_Topics(self, dictProcess, ntop=10, cache_path=None):
termMatrix = self.net.get_x_only_topics()
#print(termMatrix.shape)
topicWordList = []
for each_topic in termMatrix:
trans_list = list(enumerate(each_topic.cpu().numpy()))
#print(trans_list)
trans_list = sorted(trans_list, key=lambda k: k[1], reverse=True)
#print(trans_list)
topic_words = [dictProcess.get(item[0]) for item in trans_list[:ntop]]
#print(topic_words)
topicWordList.append(topic_words)
if cache_path:
save_path = os.path.join(cache_path, 'x_only_topics.txt')
self.saveTopic(topicWordList, save_path)
return topicWordList
def saveTopic(self, topics, save_path):
with open(save_path, 'w') as fo:
for each_topic in topics:
topic_line = ' '.join(each_topic)
fo.write(topic_line+'\n')
| 11,148 | 37.711806 | 270 | py |
CANTM | CANTM-main/GateMIcateLib/modelUltiUpdateCATopic.py | import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import numpy as np
import copy
import os
from pathlib import Path
import pickle
from .modelUltiClassTopic import ModelUltiClass
class ModelUltiUpdateCAtopic(ModelUltiClass):
def __init__(self, net=None, gpu=False, load_path=None):
super().__init__(net=net, gpu=gpu, load_path=load_path)
def train(self, trainBatchIter, num_epohs=100, valBatchIter=None, cache_path=None, earlyStopping='cls_loss', patience=5):
self.bowdict = trainBatchIter.dataIter.postProcessor.dictProcess
self.labels = trainBatchIter.dataIter.postProcessor.labelsFields
if earlyStopping == 'None':
earlyStopping = None
self.cache_path = cache_path
output_dict = {}
output_dict['accuracy'] = 'no val iter'
output_dict['perplexity'] = 'no val iter'
output_dict['perplexity_x_only'] = 'no val iter'
self.evaluation_history = []
self.optimizer = optim.Adam(self.net.parameters())
print(num_epohs)
for epoch in range(num_epohs):
all_loss = []
all_elboz1 = []
all_elboz2 = []
all_bow = []
trainIter = self.pred(trainBatchIter, train=False, updateTopic=True)
for current_prediction in trainIter:
self.optimizer.zero_grad()
pred = current_prediction['pred']
atted = current_prediction['atted']
loss = pred['loss']
bow_x = current_prediction['x_bow'].to('cpu').detach().numpy()
all_bow.append(bow_x)
loss.backward()
self.optimizer.step()
topics = self.getTopics(trainBatchIter.dataIter.postProcessor.dictProcess, cache_path=self.cache_path)
topics = self.getTopics(trainBatchIter.dataIter.postProcessor.dictProcess, cache_path=self.cache_path)
cache_last_path = os.path.join(self.cache_path, 'last_net.model')
self.saveWeights(cache_last_path)
print('finish epoch ', epoch)
cache_last_path = os.path.join(self.cache_path, 'last_net.model')
self.saveWeights(cache_last_path)
self.saveModel(self.cache_path)
self.getTopics(trainBatchIter.dataIter.postProcessor.dictProcess, cache_path=self.cache_path)
| 2,399 | 31 | 125 | py |
CANTM | CANTM-main/GateMIcateLib/batchPostProcessors.py | import torch
def xonlyBatchProcessor(x, y):
ss = [s[1] for s in x]
return ss[0]
def bowBertBatchProcessor(raw_x, y):
x = [s[0] for s in raw_x]
idded_words = [s[1] for s in raw_x]
y_class = y
return torch.tensor(x), torch.tensor(idded_words), torch.tensor(y_class)
def xyOnlyBertBatchProcessor(raw_x, y):
x = [s[0] for s in raw_x]
y_class = y
return torch.tensor(x), torch.tensor(y_class)
def singleProcessor_noy(raw_x):
x = [raw_x[0]]
return torch.tensor(x)
| 508 | 21.130435 | 76 | py |
CANTM | CANTM-main/GateMIcateLib/modelUltiVAEtm_noatt.py | import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import numpy as np
import copy
import os
from pathlib import Path
from .modelUlti import modelUlti
class ModelUltiVAEtmNOatt(modelUlti):
def __init__(self, net=None, gpu=False):
super().__init__(net=net, gpu=gpu)
def train(self, trainBatchIter, num_epohs=100, valBatchIter=None, cache_path=None, earlyStopping='cls_loss', patience=5):
self.cache_path = cache_path
output_dict = {}
output_dict['accuracy'] = 'no val iter'
self.evaluation_history = []
classifier_paramters = list(self.net.wv_hidden.parameters()) + list(self.net.wv_classifier.parameters())
topic_model_paramters = list(self.net.mu.parameters())+ list(self.net.log_sigma.parameters()) + list(self.net.topics.parameters())
self.optimizer_classifier = optim.Adam(classifier_paramters)
self.optimizer_topic_modelling = optim.Adam(topic_model_paramters)
#self.optimizer = optim.Adam(self.net.parameters())
self.criterion = nn.CrossEntropyLoss()
if self.gpu:
self.criterion.cuda()
for epoch in range(num_epohs):
all_loss = []
trainIter = self.pred(trainBatchIter, train=True)
for current_prediction in trainIter:
#self.optimizer.zero_grad()
self.optimizer_classifier.zero_grad()
self.optimizer_topic_modelling.zero_grad()
pred = current_prediction['pred']
y = current_prediction['y']
atted = current_prediction['atted']
#y_desc_representation = current_prediction['y_desc_representation']
#class_loss = self.criterion(pred['pred'], y)
#topic_loss = pred['loss']
#print(class_loss)
#desc_loss = self.desc_criterion(input=atted, target=y_desc_representation)
loss = pred['loss']
cls_loss = pred['cls_loss'].sum()
loss.backward()
#self.optimizer.step()
self.optimizer_classifier.step()
self.optimizer_topic_modelling.step()
#loss_value = float(loss.data.item())
loss_value = float(cls_loss.data.item())
all_loss.append(loss_value)
if epoch % 20 == 0:
self.getTopics(trainBatchIter.dataIter.postProcessor.dictProcess)
cache_last_path = os.path.join(self.cache_path, 'last_net.model')
self.saveWeights(cache_last_path)
print("Finish Epoch ", epoch)
if valBatchIter:
output_dict = self.eval(valBatchIter)
avg_loss = sum(all_loss)/len(all_loss)
output_dict['cls_loss'] = -avg_loss
if earlyStopping:
stop_signal = self.earlyStop(output_dict, patience=patience, metric=earlyStopping, num_epoch=num_epohs)
if stop_signal:
print('stop signal received, stop training')
cache_load_path = os.path.join(self.cache_path, 'best_net.model')
print('finish training, load model from ', cache_load_path)
self.loadWeights(cache_load_path)
break
print('epoch ', epoch, 'loss', avg_loss, ' val acc: ', output_dict['accuracy'])
cache_last_path = os.path.join(self.cache_path, 'last_net.model')
self.saveWeights(cache_last_path)
#cache_load_path = os.path.join(self.cache_path, 'best_net.model')
#print('finish training, load model from ', cache_load_path)
#self.loadWeights(cache_load_path)
def pred(self, batchGen, train=False):
if train:
self.net.train()
#self.optimizer.zero_grad()
else:
self.net.eval()
i=0
pre_embd = False
for x, x_bow, y in batchGen:
i+=1
print("processing batch", i, end='\r')
if self.gpu:
y = y.type(torch.cuda.LongTensor)
x_bow = x_bow.type(torch.cuda.FloatTensor)
x_bow.cuda()
y.cuda()
if batchGen.dataIter.postProcessor.embd_ready:
pre_embd = True
x = x.type(torch.cuda.FloatTensor).squeeze(1)
x.cuda()
else:
x = x.type(torch.cuda.LongTensor)
x.cuda()
if train:
one_hot_y = self.y2onehot(y)
if batchGen.dataIter.label_weights_list:
n_samples = self.get_num_samples(y, batchGen.dataIter.label_weights_list)
else:
n_samples = 10
#print(n_samples)
pred, atted = self.net(x, bow=x_bow, train=True, true_y=one_hot_y, n_samples=n_samples, pre_embd=pre_embd, true_y_ids=y)
else:
pred, atted = self.net(x, bow=x_bow, pre_embd=pre_embd)
output_dict = {}
output_dict['pred'] = pred
output_dict['y'] = y
output_dict['atted'] = atted
yield output_dict
def application_oneSent(self, x):
if self.gpu:
x = x.type(torch.cuda.LongTensor)
x.cuda()
pred, atted = self.net(x)
output_dict = {}
output_dict['pred'] = pred
output_dict['atted'] = atted
return output_dict
def get_num_samples(self, y, weight_list):
n_samples = 0
for y_item in y:
n_samples += weight_list[y_item.item()]
return n_samples
def y2onehot(self, y):
num_class = self.net.n_classes
one_hot_y_list = []
for i in range(len(y)):
current_one_hot = [0]*num_class
current_one_hot[y[i].item()] = 1
one_hot_y_list.append(copy.deepcopy(current_one_hot))
tensor_one_hot_y = torch.tensor(one_hot_y_list)
if self.gpu:
tensor_one_hot_y = tensor_one_hot_y.type(torch.cuda.FloatTensor)
tensor_one_hot_y = tensor_one_hot_y.cuda()
return tensor_one_hot_y
def getTopics(self, dictProcess, ntop=10):
termMatrix = self.net.get_topics()
#print(termMatrix.shape)
topicWordList = []
for each_topic in termMatrix:
trans_list = list(enumerate(each_topic.cpu().numpy()))
#print(trans_list)
trans_list = sorted(trans_list, key=lambda k: k[1], reverse=True)
#print(trans_list)
topic_words = [dictProcess.get(item[0]) for item in trans_list[:ntop]]
#print(topic_words)
topicWordList.append(topic_words)
return topicWordList
| 6,838 | 38.304598 | 138 | py |
CANTM | CANTM-main/GateMIcateLib/modelUlti.py | import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import numpy as np
import os
from pathlib import Path
class modelUlti:
def __init__(self, net=None, gpu=False):
if net:
self.net = net
self.gpu = gpu
if self.gpu and net:
self.net.cuda()
def train(self, trainBatchIter, num_epohs=100, valBatchIter=None, cache_path=None, patience=15, earlyStopping='cls_loss'):
self.cache_path = cache_path
output_dict = {}
output_dict['accuracy'] = 'no val iter'
self.evaluation_history = []
self.optimizer = optim.Adam(self.net.parameters())
self.criterion = nn.CrossEntropyLoss()
if self.gpu:
self.criterion.cuda()
for epoch in range(num_epohs):
all_loss = []
trainIter = self.pred(trainBatchIter, train=True)
for current_prediction in trainIter:
pred = current_prediction['pred']['y_hat']
y = current_prediction['y']
self.optimizer.zero_grad()
loss = self.criterion(pred, y)
loss.backward()
self.optimizer.step()
loss_value = float(loss.data.item())
all_loss.append(loss_value)
print("Finish batch")
if valBatchIter:
output_dict = self.eval(valBatchIter)
avg_loss = sum(all_loss)/len(all_loss)
output_dict['cls_loss'] = -avg_loss
if earlyStopping:
stop_signal = self.earlyStop(output_dict, num_epoch=num_epohs, patience=patience, metric=earlyStopping)
if stop_signal:
print('stop signal received, stop training')
cache_load_path = os.path.join(self.cache_path, 'best_net.model')
print('finish training, load model from ', cache_load_path)
self.loadWeights(cache_load_path)
break
print('epoch ', epoch, 'loss', avg_loss, ' val acc: ', output_dict['accuracy'])
if epoch % 20 == 0:
cache_last_path = os.path.join(self.cache_path, 'last_net.model')
self.saveWeights(cache_last_path)
cache_last_path = os.path.join(self.cache_path, 'last_net.model')
self.saveWeights(cache_last_path)
def earlyStop(self, output_dict, metric='accuracy', patience=40, num_epoch=None):
result = output_dict[metric]
stop_signal = False
self.evaluation_history.append(result)
num_epochs = len(self.evaluation_history)
max_result = max(self.evaluation_history)
max_epoch = self.evaluation_history.index(max_result)
max_passed = num_epochs - max_epoch
if max_passed >= patience:
stop_signal = True
if num_epoch:
#print('num epoch passed: ', len(self.evaluation_history))
#print('max_epoches:', num_epoch)
if len(self.evaluation_history) == num_epoch:
stop_signal = True
if max_passed == 1:
print('caching best ')
cache_path = os.path.join(self.cache_path, 'best_net.model')
self.saveWeights(cache_path)
return stop_signal
def pred(self, batchGen, train=False):
pre_embd = False
if train:
self.net.train()
else:
self.net.eval()
i=0
for x, y in batchGen:
i+=1
print("processing batch", i, end='\r')
if self.gpu:
y = y.type(torch.cuda.LongTensor)
y.cuda()
if batchGen.dataIter.postProcessor.embd_ready:
pre_embd = True
x = x.type(torch.cuda.FloatTensor).squeeze(1)
x.cuda()
else:
x = x.type(torch.cuda.LongTensor)
x.cuda()
pred = self.net(x, pre_embd=pre_embd)
output_dict = {}
output_dict['pred'] = pred
output_dict['y'] = y
yield output_dict
def eval(self, batchGen, get_perp=False):
output_dict = {}
all_prediction = []
all_true_label = []
all_elbo_x = []
all_elbo_xy = []
all_bow_x = []
#print(len(batchGen))
#print(len(batchGen.dataIter))
for current_prediction in self.pred(batchGen):
pred = current_prediction['pred']['y_hat']
y = current_prediction['y']
current_batch_out = F.softmax(pred, dim=-1)
label_prediction = torch.max(current_batch_out, -1)[1]
current_batch_out_list = current_batch_out.to('cpu').detach().numpy()
label_prediction_list = label_prediction.to('cpu').detach().numpy()
y_list = y.to('cpu').detach().numpy()
all_prediction.append(label_prediction_list)
all_true_label.append(y_list)
if get_perp:
elbo_x = current_prediction['pred']['elbo_x'].to('cpu').detach().numpy()
elbo_xy = current_prediction['pred']['elbo_xy'].to('cpu').detach().numpy()
bow_x = current_prediction['x_bow'].to('cpu').detach().numpy()
#print(elbo_xy)
all_elbo_x.append(elbo_x)
all_elbo_xy.append(elbo_xy)
all_bow_x.append(bow_x)
if get_perp:
perplexity, log_perp = self._get_prep(all_elbo_xy, all_bow_x)
output_dict['perplexity'] = perplexity
output_dict['log_perplexity'] = log_perp
perplexity_x_only, log_perp_x_only = self._get_prep(all_elbo_x, all_bow_x)
output_dict['perplexity_x_only'] = perplexity_x_only
output_dict['log_perplexity_x_only'] = log_perp_x_only
all_prediction = np.concatenate(all_prediction)
all_true_label = np.concatenate(all_true_label)
#print(len(all_true_label))
num_correct = (all_prediction == all_true_label).sum()
accuracy = num_correct / len(all_prediction)
output_dict['accuracy'] = accuracy
output_dict['f-measure'] = {}
num_classes = len(batchGen.dataIter.postProcessor.labelsFields)
for class_id in list(range(num_classes)):
f_measure_score = self.fMeasure(all_prediction, all_true_label, class_id)
output_dict['f-measure']['class '+str(class_id)] = f_measure_score
return output_dict
def _get_prep(self, all_elbo_list, all_bow_list):
all_elbo = np.concatenate(all_elbo_list)
all_bow = np.concatenate(all_bow_list)
###############################################
##num_token = all_bow.sum(axis=1)
##print(num_token)
##print(num_token.shape)
#log_perp = np.mean(all_elbo / all_bow.sum(axis=1))
#print(log_perp)
#############################################
num_token = all_bow.sum()
log_perp = all_elbo.sum() / num_token
#############################################
#print(log_perp)
perplexity = np.exp(log_perp)
#print(perplexity)
return perplexity, log_perp
def saveWeights(self, save_path):
torch.save(self.net.state_dict(), save_path)
def loadWeights(self, load_path, cpu=True):
if cpu:
self.net.load_state_dict(torch.load(load_path, map_location=torch.device('cpu')), strict=False)
else:
self.net.load_state_dict(torch.load(load_path), strict=False)
self.net.eval()
def fMeasure(self, all_prediction, true_label, class_id, ignoreid=None):
#print(class_id)
mask = [class_id] * len(all_prediction)
mask_arrary = np.array(mask)
pred_mask = np.argwhere(all_prediction==class_id)
#print(pred_mask)
true_mask = np.argwhere(true_label==class_id)
#print(true_mask)
#print(len(true_mask))
total_pred = 0
total_true = 0
pc = 0
for i in pred_mask:
if all_prediction[i[0]] == true_label[i[0]]:
pc+=1
if true_label[i[0]] != ignoreid:
total_pred += 1
rc = 0
for i in true_mask:
if all_prediction[i[0]] == true_label[i[0]]:
rc+=1
if true_label[i[0]] != ignoreid:
total_true += 1
if total_pred == 0:
precision = 0
else:
precision = float(pc)/total_pred
if total_true == 0:
recall = 0
else:
recall = float(rc)/total_true
if (precision+recall)==0:
f_measure = 0
else:
f_measure = 2*((precision*recall)/(precision+recall))
#print(total_true)
return precision, recall, f_measure, total_pred, total_true, pc, rc
| 8,968 | 35.02008 | 126 | py |
CANTM | CANTM-main/GateMIcateLib/EvaluationManager.py | import sys
import nltk
import math
from GateMIcateLib import BatchIterBert, DictionaryProcess
#from GateMIcateLib import WVPostProcessor as ReaderPostProcessor
from configobj import ConfigObj
import torch
import argparse
import copy
from sklearn.model_selection import KFold
import random
import os
from pathlib import Path
from gensim.corpora.dictionary import Dictionary
from gensim.models import LdaModel
import numpy as np
def get_average_fmeasure_score(results_dict, field):
t=0
score = 0
for class_field in results_dict['f-measure']:
score += sum(results_dict['f-measure'][class_field][field])
t += len(results_dict['f-measure'][class_field][field])
return score/t
def get_micro_fmeasure(results_dict, num_field, de_field):
score = 0
for class_field in results_dict['f-measure']:
numerator = sum(results_dict['f-measure'][class_field][num_field])
denominator = sum(results_dict['f-measure'][class_field][de_field])
if denominator != 0:
score += numerator/denominator
t = len(results_dict['f-measure'])
return score/t
class EvaluationManager:
def __init__(self, trainReaderargs, envargs, testReaderargs=None, valReaderargs=None):
self._initParams(envargs)
self.trainReaderargs = trainReaderargs
self.testReaderargs = testReaderargs
self.valReaderargs = valReaderargs
self.getLibs()
self._get_train_DataIter()
def get_covid_train_json_for_scholar(self):
current_traindataIter=dataIter(*self.trainReaderargs, config=self.config, shuffle=False)
all_json = []
for item in current_traindataIter:
claim = item['Claim']
explaination = item['Explaination']
label = item['selected_label']
sample_id = item['unique_wv_id']
text = claim+' '+explaination
current_dict = {}
current_dict['text'] = text
current_dict['sentiment'] = label
current_dict['id'] = sample_id
all_json.append(current_dict)
return all_json
def outputCorpus4NPMI(self):
all_doc = []
token_count = []
current_traindataIter=dataIter(*self.trainReaderargs, config=self.config, shuffle=False)
for item in current_traindataIter:
alltext=[]
for field in self.x_fields:
current_text = nltk.word_tokenize(item[field])
token_count.append(len(current_text))
alltext.append(' '.join(current_text))
all_doc.append(' '.join(alltext))
if self.testReaderargs:
self.testDataIter = dataIter(*self.testReaderargs, config=self.config, shuffle=False)
for item in current_traindataIter:
alltext=[]
for field in self.x_fields:
current_text = nltk.word_tokenize(item[field])
token_count.append(len(current_text))
alltext.append(' '.join(current_text))
all_doc.append(' '.join(alltext))
print(sum(token_count)/len(token_count))
return all_doc
def _get_train_DataIter(self):
self.postProcessor = ReaderPostProcessor(config=self.config, word2id=True, remove_single_list=False, add_spec_tokens=True, x_fields=self.x_fields, y_field=self.y_field, max_sent_len=self.max_sent_len)
print(*self.trainReaderargs)
self.trainDataIter = dataIter(*self.trainReaderargs, postProcessor=self.postProcessor, config=self.config, shuffle=True)
if self.testReaderargs:
self.testDataIter = dataIter(*self.testReaderargs, postProcessor=self.postProcessor, config=self.config, shuffle=False)
print(self.get_dict)
if self.get_dict:
print('building dict')
self.buildDict()
if self.preEmbd:
print('pre calculating embedding')
net = Model(self.config, vocab_dim=self.vocab_dim)
mUlti = modelUlti(net, gpu=self.gpu)
self.trainDataIter.preCalculateEmbed(mUlti.net.bert_embedding, 0)
if not self.testReaderargs:
self.all_ids = copy.deepcopy(self.trainDataIter.all_ids)
random.shuffle(self.all_ids)
## deep copy train reader to test reader
self.testDataIter = copy.deepcopy(self.trainDataIter)
self.valDataIter = None
def _initParams(self,envargs):
print(envargs)
self.get_perp = False
self.get_dict = False
self.vocab_dim = None
self.have_dict = False
self.config_file = envargs.get('configFile',None)
self.config = ConfigObj(self.config_file)
self.cache_path = envargs.get('cachePath',None)
self.n_fold = envargs.get('nFold',5)
self.randomSeed = envargs.get('randomSeed',None)
self.preEmbd = envargs.get('preEmbd',False)
self.dynamicSampling = envargs.get('dynamicSampling',False)
self.modelType = envargs.get('model', 'clsTopic')
self.corpusType = envargs.get('corpusType', 'wvmisinfo')
self.max_sent_len = envargs.get('max_sent_len', '300')
self.num_epoches = envargs.get('num_epoches', 150)
self.patient = envargs.get('patient', 40)
self.batch_size = envargs.get('batch_size', 32)
self.earlyStopping = envargs.get('earlyStopping', 'cls_loss')
self.x_fields = envargs.get('x_fields', 'Claim,Explaination')
self.x_fields = self.x_fields.split(',')
print(self.x_fields)
self.y_field = envargs.get('y_field', 'selected_label')
self.dict_no_below = envargs.get('dict_no_below', 0)
self.dict_no_above = envargs.get('dict_no_above', 1.0)
self.dict_keep_n = envargs.get('dict_keep_n', 5000)
self.splitValidation = envargs.get('splitValidation',None)
self.inspectTest = envargs.get('inspectTest', True)
self.trainLDA = envargs.get('trainLDA', False)
self.gpu = envargs.get('gpu', True)
self.envargs = envargs
def train_lda(self, cache_path):
print(cache_path)
trainBatchIter = BatchIterBert(self.trainDataIter, filling_last_batch=False, postProcessor=batchPostProcessor, batch_size=1)
bow_list = []
for item in trainBatchIter:
bow = item[1].squeeze().detach().numpy().tolist()
bow_list.append(self.bow_2_gensim(bow))
print(len(bow_list))
#print(self.dictProcess.common_dictionary.id2token)
lda = LdaModel(np.array(bow_list), num_topics=50, passes=200, chunksize=len(bow_list), id2word=self.dictProcess.common_dictionary)
#print(lda.show_topic(1, topn=10))
output_topic_line = ''
for topic_id in range(50):
current_topic_list = []
current_topic = lda.show_topic(topic_id, topn=10)
for topic_tuple in current_topic:
current_topic_list.append(topic_tuple[0])
output_topic_line += ' '.join(current_topic_list)+'\n'
#print(current_topic_list)
topic_file = os.path.join(cache_path, 'ldatopic.txt')
with open(topic_file, 'w') as fo:
fo.write(output_topic_line)
testBatchIter = BatchIterBert(self.testDataIter, filling_last_batch=False, postProcessor=batchPostProcessor, batch_size=1)
test_bow_list = []
word_count = 0
for item in testBatchIter:
bow = item[1].squeeze().detach().numpy().tolist()
word_count += sum(bow)
test_bow_list.append(self.bow_2_gensim(bow))
print(word_count)
ppl = lda.log_perplexity(test_bow_list, len(test_bow_list))
print(ppl)
bound = lda.bound(test_bow_list)
print(bound/word_count)
print(np.exp2(-bound/word_count))
def bow_2_gensim(self, bow):
gensim_format = []
for idx, count in enumerate(bow):
if count > 0:
gensim_format.append((idx,count))
return gensim_format
def train(self, cache_path=None):
if self.inspectTest and (not self.splitValidation):
print('inspecting test, please dont use val acc as early stoping')
self.valDataIter = self.testDataIter
elif self.inspectTest and self.splitValidation:
print('inspectTest and splitValidation can not use same time')
print('deset inspectTest')
self.inspectTest = False
if self.splitValidation:
print('splitting test for validation')
self.valDataIter = copy.deepcopy(self.trainDataIter)
train_val_ids = copy.deepcopy(self.trainDataIter.all_ids)
random.shuffle(train_val_ids)
split_4_train = 1-self.splitValidation
top_n_4_train = math.floor(len(train_val_ids) * split_4_train)
id_4_train = train_val_ids[:top_n_4_train]
id_4_val = train_val_ids[top_n_4_train:]
self.trainDataIter.all_ids = id_4_train
self.valDataIter.all_ids = id_4_val
assert self.inspectTest != self.splitValidation, 'splitValidation will overwrite inspectTest, dont use at the same time'
if self.dynamicSampling:
print('get training data sample weights')
trainDataIter.cal_sample_weights()
self.trainDataIter._reset_iter()
trainBatchIter = BatchIterBert(self.trainDataIter, filling_last_batch=True, postProcessor=batchPostProcessor, batch_size=self.batch_size)
if self.valDataIter:
self.valDataIter._reset_iter()
valBatchIter = BatchIterBert(self.valDataIter, filling_last_batch=False, postProcessor=batchPostProcessor, batch_size=self.batch_size)
else:
valBatchIter = None
print(self.vocab_dim)
net = Model(self.config, vocab_dim=self.vocab_dim)
self.mUlti = modelUlti(net, gpu=self.gpu)
#print(next(trainBatchIter))
self.mUlti.train(trainBatchIter, cache_path=cache_path, num_epohs=self.num_epoches, valBatchIter=valBatchIter, patience=self.patient, earlyStopping=self.earlyStopping)
def train_test_evaluation(self):
path = Path(self.cache_path)
path.mkdir(parents=True, exist_ok=True)
self.train(cache_path=self.cache_path)
testBatchIter = BatchIterBert(self.testDataIter, filling_last_batch=False, postProcessor=batchPostProcessor, batch_size=self.batch_size)
results = self.mUlti.eval(testBatchIter, get_perp=self.get_perp)
print(results)
def train_model_only(self):
path = Path(self.cache_path)
path.mkdir(parents=True, exist_ok=True)
self.train(cache_path=self.cache_path)
def cross_fold_evaluation(self):
kf = KFold(n_splits=self.n_fold)
fold_index = 1
results_dict = {}
results_dict['accuracy'] = []
results_dict['perplexity'] = []
results_dict['log_perplexity'] = []
results_dict['perplexity_x_only'] = []
results_dict['f-measure'] = {}
for each_fold in kf.split(self.all_ids):
train_ids, test_ids = self.reconstruct_ids(each_fold)
self.trainDataIter.all_ids = train_ids
self.testDataIter.all_ids = test_ids
self.testDataIter._reset_iter()
fold_cache_path = os.path.join(self.cache_path, 'fold'+str(fold_index))
path = Path(fold_cache_path)
path.mkdir(parents=True, exist_ok=True)
if self.trainLDA:
self.train_lda(cache_path=fold_cache_path)
else:
self.train(cache_path=fold_cache_path)
testBatchIter = BatchIterBert(self.testDataIter, filling_last_batch=False, postProcessor=batchPostProcessor, batch_size=self.batch_size)
results = self.mUlti.eval(testBatchIter, get_perp=self.get_perp)
print(results)
results_dict['accuracy'].append(results['accuracy'])
if 'perplexity' in results:
results_dict['perplexity'].append(results['perplexity'])
results_dict['log_perplexity'].append(results['log_perplexity'])
results_dict['perplexity_x_only'].append(results['perplexity_x_only'])
for f_measure_class in results['f-measure']:
if f_measure_class not in results_dict['f-measure']:
results_dict['f-measure'][f_measure_class] = {'precision':[], 'recall':[], 'f-measure':[], 'total_pred':[], 'total_true':[], 'matches':[]}
results_dict['f-measure'][f_measure_class]['precision'].append(results['f-measure'][f_measure_class][0])
results_dict['f-measure'][f_measure_class]['recall'].append(results['f-measure'][f_measure_class][1])
results_dict['f-measure'][f_measure_class]['f-measure'].append(results['f-measure'][f_measure_class][2])
results_dict['f-measure'][f_measure_class]['total_pred'].append(results['f-measure'][f_measure_class][3])
results_dict['f-measure'][f_measure_class]['total_true'].append(results['f-measure'][f_measure_class][4])
results_dict['f-measure'][f_measure_class]['matches'].append(results['f-measure'][f_measure_class][5])
fold_index += 1
print(results_dict)
overall_accuracy = sum(results_dict['accuracy'])/len(results_dict['accuracy'])
if len(results_dict['perplexity']) >0:
overall_perplexity = sum(results_dict['perplexity'])/len(results_dict['perplexity'])
print('perplexity: ', overall_perplexity)
overall_log_perplexity = sum(results_dict['log_perplexity'])/len(results_dict['log_perplexity'])
print('log perplexity: ', overall_log_perplexity)
overall_perplexity_x = sum(results_dict['perplexity_x_only'])/len(results_dict['perplexity_x_only'])
print('perplexity_x_only: ', overall_perplexity_x)
macro_precision = get_average_fmeasure_score(results_dict, 'precision')
macro_recall = get_average_fmeasure_score(results_dict, 'recall')
macro_fmeasure = get_average_fmeasure_score(results_dict, 'f-measure')
micro_precision = get_micro_fmeasure(results_dict, 'matches', 'total_pred')
micro_recall = get_micro_fmeasure(results_dict, 'matches', 'total_true')
micro_fmeasure = 2*((micro_precision*micro_recall)/(micro_precision+micro_recall))
print('accuracy: ', overall_accuracy)
print('micro_precision: ', micro_precision)
print('micro_recall: ', micro_recall)
print('micro_f-measure: ', micro_fmeasure)
print('macro_precision: ', macro_precision)
print('macro_recall: ', macro_recall)
print('macro_f-measure: ', macro_fmeasure)
def reconstruct_ids(self, each_fold):
output_ids = [[],[]] #[train_ids, test_ids]
for sp_id in range(len(each_fold)):
current_output_ids = output_ids[sp_id]
current_fold_ids = each_fold[sp_id]
for doc_id in current_fold_ids:
current_output_ids.append(self.all_ids[doc_id])
return output_ids
def buildDict(self):
batchiter = BatchIterBert(self.trainDataIter, filling_last_batch=False, postProcessor=xonlyBatchProcessor, batch_size=1)
common_dictionary = Dictionary(batchiter)
print(len(common_dictionary))
if self.testReaderargs:
print('update vocab from test set')
batchiter = BatchIterBert(self.testDataIter, filling_last_batch=False, postProcessor=xonlyBatchProcessor, batch_size=1)
common_dictionary.add_documents(batchiter)
print(len(common_dictionary))
common_dictionary.filter_extremes(no_below=self.dict_no_below, no_above=self.dict_no_above, keep_n=self.dict_keep_n)
self.dictProcess = DictionaryProcess(common_dictionary)
self.postProcessor.dictProcess = self.dictProcess
self.vocab_dim = len(self.dictProcess)
self.have_dict = True
if 1:
count_list = []
self.trainDataIter._reset_iter()
batchiter = BatchIterBert(self.trainDataIter, filling_last_batch=False, postProcessor=xonlyBatchProcessor, batch_size=1)
for item in batchiter:
current_count = sum(item)
count_list.append(current_count)
#print(current_count)
print(sum(count_list)/len(count_list))
def getModel(self):
self.net = Model(config, vocab_dim=vocab_dim)
def getLibs(self):
print('getting libs')
print(self.modelType)
global modelUlti
global Model
global xonlyBatchProcessor
global batchPostProcessor
global dataIter
global ReaderPostProcessor
if self.modelType == 'clsTopic':
from GateMIcateLib import ModelUltiClass as modelUlti
from GateMIcateLib.models import CLSAW_TopicModel as Model
from GateMIcateLib.batchPostProcessors import xonlyBatchProcessor
from GateMIcateLib.batchPostProcessors import bowBertBatchProcessor as batchPostProcessor
self.get_dict = True
self.get_perp = True
elif self.modelType == 'clsTopicSL':
from GateMIcateLib import ModelUltiClass as modelUlti
from GateMIcateLib.models import CLSAW_TopicModelSL as Model
from GateMIcateLib.batchPostProcessors import xonlyBatchProcessor
from GateMIcateLib.batchPostProcessors import bowBertBatchProcessor as batchPostProcessor
self.get_dict = True
self.get_perp = True
elif self.modelType == 'baselineBert':
from GateMIcateLib import ModelUlti as modelUlti
from GateMIcateLib.models import BERT_Simple as Model
from GateMIcateLib.batchPostProcessors import xyOnlyBertBatchProcessor as batchPostProcessor
elif self.modelType == 'nvdm':
from GateMIcateLib import ModelUltiClass as modelUlti
from GateMIcateLib.models import NVDM as Model
from GateMIcateLib.batchPostProcessors import xonlyBatchProcessor
from GateMIcateLib.batchPostProcessors import bowBertBatchProcessor as batchPostProcessor
self.get_dict = True
self.get_perp = True
elif self.modelType == 'orinvdm':
from GateMIcateLib import ModelUltiClass as modelUlti
from GateMIcateLib.models import ORINVDM as Model
from GateMIcateLib.batchPostProcessors import xonlyBatchProcessor
from GateMIcateLib.batchPostProcessors import bowBertBatchProcessor as batchPostProcessor
self.get_dict = True
self.get_perp = True
elif self.modelType == 'clsTopicBE':
from GateMIcateLib import ModelUltiClass as modelUlti
from GateMIcateLib.models import CLSAW_TopicModel_BERTEN as Model
from GateMIcateLib.batchPostProcessors import xonlyBatchProcessor
from GateMIcateLib.batchPostProcessors import bowBertBatchProcessor as batchPostProcessor
self.get_dict = True
self.get_perp = True
print(self.corpusType)
if self.corpusType == 'wvmisinfo':
from GateMIcateLib.readers import WVmisInfoDataIter as dataIter
from GateMIcateLib import WVPostProcessor as ReaderPostProcessor
self.dict_no_below = 3
self.dict_no_above = 0.7
elif self.corpusType == 'wvmisinfoScholar':
from GateMIcateLib.readers import WVmisInfoDataIter as dataIter
from GateMIcateLib import ScholarPostProcessor as ReaderPostProcessor
self.dict_keep_n = 2000
elif self.corpusType == 'aclIMDB':
from GateMIcateLib.readers import ACLimdbReader as dataIter
from GateMIcateLib import ScholarPostProcessor as ReaderPostProcessor
elif self.corpusType == 'tsvBinary':
from GateMIcateLib.readers import TsvBinaryFolderReader as dataIter
from GateMIcateLib import WVPostProcessor as ReaderPostProcessor
self.dict_no_below = 3
self.dict_no_above = 0.7
| 20,370 | 42.342553 | 208 | py |
CANTM | CANTM-main/GateMIcateLib/models/CLSAW_TopicModel_simple_loss.py | import torch
from torch import nn
from torch.nn import init
from torch.nn import functional as F
import math
from .miscLayer import BERT_Embedding, WVHidden, WVClassifier, Identity, Topics, kld, CLSAW_TopicModel_Base
class CLSAW_TopicModelSL(CLSAW_TopicModel_Base):
def __init__(self, config, vocab_dim=None):
super().__init__(config=config)
default_config = {}
self.bert_embedding = BERT_Embedding(config)
bert_dim = self.bert_embedding.bert_dim
if self.banlance_loss:
self.banlance_lambda = float(math.ceil(vocab_dim/self.n_classes))
else:
self.banlance_lambda = 1
#self.wv_hidden = WVHidden(bert_dim, self.hidden_dim)
self.hidden_dim = bert_dim
##############M1###########################################
self.mu_z1 = nn.Linear(self.hidden_dim, self.z_dim)
self.log_sigma_z1 = nn.Linear(self.hidden_dim, self.z_dim)
self.x_only_topics = Topics(self.z_dim, vocab_dim)
self.xy_classifier = WVClassifier(self.z_dim, self.n_classes)
self.class_criterion = nn.CrossEntropyLoss()
#############M2############################################
self.hidden_y_dim = self.hidden_dim + self.n_classes
self.z_y_dim = self.z_dim + self.n_classes
self.x_y_hidden = WVHidden(self.hidden_y_dim, self.hidden_dim)
self.z_y_hidden = WVHidden(self.z_y_dim, self.ntopics)
self.mu_z2 = nn.Linear(self.hidden_dim, self.z_dim)
self.log_sigma_z2 = nn.Linear(self.hidden_dim, self.z_dim)
self.xy_topics = Topics(self.ntopics, vocab_dim)
self.z2y_classifier = WVClassifier(self.ntopics, self.n_classes)
############################################################
self.h_to_z = Identity()
self.class_topics = Topics(self.n_classes, vocab_dim)
self.reset_parameters()
def forward(self,x, mask=None, n_samples=1, bow=None, train=False, true_y=None, pre_embd=False, true_y_ids=None):
#print(true_y.shape)
if pre_embd:
bert_rep = x
else:
bert_rep = self.bert_embedding(x, mask)
bert_rep = bert_rep[0]
atted = bert_rep[:,0]
#hidden = self.wv_hidden(atted)
hidden = atted
mu_z1 = self.mu_z1(hidden)
log_sigma_z1 = self.log_sigma_z1(hidden)
kldz1 = kld(mu_z1, log_sigma_z1)
rec_loss_z1 = 0
classifier_loss = 0
kldz2 = 0
rec_loss_z2 = 0
log_y_hat_rec_loss = 0
class_topic_rec_loss = 0
if not train:
### for discriminator, we only use mean
z1 = mu_z1
y_hat_logis = self.xy_classifier(z1)
log_probz_1 = self.x_only_topics(z1)
y_hat = torch.softmax(y_hat_logis, dim=-1)
log_prob_class_topic = self.class_topics(y_hat)
#y = y_hat_logis
for i in range(n_samples):
if train:
z1 = torch.zeros_like(mu_z1).normal_() * torch.exp(log_sigma_z1) + mu_z1
z1 = self.h_to_z(z1)
log_probz_1 = self.x_only_topics(z1)
y_hat_logis = self.xy_classifier(z1)
y_hat = torch.softmax(y_hat_logis, dim=-1)
log_prob_class_topic = self.class_topics(y_hat)
classifier_loss += self.class_criterion(y_hat_logis, true_y_ids)
y_hat_h = torch.cat((hidden, y_hat), dim=-1)
x_y_hidden = self.x_y_hidden(y_hat_h)
mu_z2 = self.mu_z2(x_y_hidden)
log_sigma_z2 = self.log_sigma_z2(x_y_hidden)
z2 = torch.zeros_like(mu_z2).normal_() * torch.exp(log_sigma_z2) + mu_z2
topic = z2
log_prob_z2 = self.xy_topics(topic)
#y_hat_rec = self.z2y_classifier(topic)
#log_y_hat_rec = torch.log_softmax(y_hat_rec, dim=-1)
rec_loss_z1 = rec_loss_z1-(log_probz_1 * bow).sum(dim=-1)
kldz2 += kld(mu_z2, log_sigma_z2)
rec_loss_z2 = rec_loss_z2 - (log_prob_z2 * bow).sum(dim=-1)
#log_y_hat_rec_loss = log_y_hat_rec_loss - (log_y_hat_rec*true_y).sum(dim=-1)
#log_y_hat_rec_loss = log_y_hat_rec_loss - (log_y_hat_rec*y_hat).sum(dim=-1)
class_topic_rec_loss = class_topic_rec_loss - (log_prob_class_topic*bow).sum(dim=-1)
rec_loss_z1 = rec_loss_z1/n_samples
#print(rec_loss_z1.shape)
classifier_loss = classifier_loss/n_samples
kldz2 = kldz2/n_samples
rec_loss_z2 = rec_loss_z2/n_samples
log_y_hat_rec_loss = log_y_hat_rec_loss/n_samples
class_topic_rec_loss = class_topic_rec_loss/n_samples
elbo_z1 = kldz1 + rec_loss_z1
#print(elbo_z1.shape)
#elbo_z1 = elbo_z1.sum()
elbo_z2 = kldz2 + rec_loss_z2# + log_y_hat_rec_loss
#print(elbo_z2)
#elbo_z2 = elbo_z2.sum()
#class_topic_rec_loss = class_topic_rec_loss.sum()
classifier_loss = classifier_loss
total_loss = elbo_z1.sum() + elbo_z2.sum() + class_topic_rec_loss.sum() + classifier_loss*self.banlance_lambda*self.classification_loss_lambda
y = {
'loss': total_loss,
'elbo_xy': elbo_z2,
'rec_loss': rec_loss_z2,
'kld': kldz2,
'cls_loss': classifier_loss,
'class_topic_loss': class_topic_rec_loss,
'y_hat': y_hat_logis,
'elbo_x': elbo_z1
}
####################################################################################################################################################
# else:
# z1 = mu_z1
# y_hat_logis = self.xy_classifier(z1)
# y_hat = torch.softmax(y_hat_logis, dim=-1)
# y = y_hat_logis
#
#
# y_hat_h = torch.cat((hidden, y_hat), dim=-1)
# x_y_hidden = self.x_y_hidden(y_hat_h)
# mu_z2 = self.mu_z2(x_y_hidden)
# log_sigma_z2 = self.log_sigma_z2(x_y_hidden)
# z2 = torch.zeros_like(mu_z2).normal_() * torch.exp(log_sigma_z2) + mu_z2
#
# kldz2 = kld(mu_z2, log_sigma_z2)
# log_prob_z2 = self.xy_topics(z2)
# y_hat_rec = self.z2y_classifier(z2)
# log_y_hat_rec = torch.log_softmax(y_hat_rec, dim=-1)
#
#
return y, None
def reset_parameters(self):
init.zeros_(self.log_sigma_z1.weight)
init.zeros_(self.log_sigma_z1.bias)
init.zeros_(self.log_sigma_z2.weight)
init.zeros_(self.log_sigma_z2.bias)
def get_topics(self):
return self.xy_topics.get_topics()
def get_class_topics(self):
return self.class_topics.get_topics()
def get_x_only_topics(self):
return self.x_only_topics.get_topics()
| 6,776 | 35.435484 | 150 | py |
CANTM | CANTM-main/GateMIcateLib/models/CLSAW_TopicModel.py | import torch
from torch import nn
from torch.nn import init
from torch.nn import functional as F
import math
from .miscLayer import BERT_Embedding, WVHidden, WVClassifier, Identity, Topics, kld, CLSAW_TopicModel_Base
class CLSAW_TopicModel(CLSAW_TopicModel_Base):
def __init__(self, config, vocab_dim=None):
super().__init__(config=config)
default_config = {}
self.bert_embedding = BERT_Embedding(config)
bert_dim = self.bert_embedding.bert_dim
if self.banlance_loss:
self.banlance_lambda = float(math.ceil(vocab_dim/self.n_classes))
else:
self.banlance_lambda = 1
#self.wv_hidden = WVHidden(bert_dim, self.hidden_dim)
self.hidden_dim = bert_dim
##############M1###########################################
self.mu_z1 = nn.Linear(self.hidden_dim, self.z_dim)
self.log_sigma_z1 = nn.Linear(self.hidden_dim, self.z_dim)
self.x_only_topics = Topics(self.z_dim, vocab_dim)
self.xy_classifier = WVClassifier(self.z_dim, self.n_classes)
self.class_criterion = nn.CrossEntropyLoss()
#############M2############################################
self.hidden_y_dim = self.hidden_dim + self.n_classes
self.z_y_dim = self.z_dim + self.n_classes
self.x_y_hidden = WVHidden(self.hidden_y_dim, self.hidden_dim)
self.z_y_hidden = WVHidden(self.z_y_dim, self.ntopics)
self.mu_z2 = nn.Linear(self.hidden_dim, self.z_dim)
self.log_sigma_z2 = nn.Linear(self.hidden_dim, self.z_dim)
self.xy_topics = Topics(self.ntopics, vocab_dim)
self.z2y_classifier = WVClassifier(self.ntopics, self.n_classes)
############################################################
self.h_to_z = Identity()
self.class_topics = Topics(self.n_classes, vocab_dim)
self.reset_parameters()
def forward(self,x, mask=None, n_samples=1, bow=None, train=False, true_y=None, pre_embd=False, true_y_ids=None, update_catopic=False):
#print(true_y.shape)
if pre_embd:
bert_rep = x
else:
bert_rep = self.bert_embedding(x, mask)
bert_rep = bert_rep[0]
atted = bert_rep[:,0]
#hidden = self.wv_hidden(atted)
hidden = atted
mu_z1 = self.mu_z1(hidden)
log_sigma_z1 = self.log_sigma_z1(hidden)
kldz1 = kld(mu_z1, log_sigma_z1)
rec_loss_z1 = 0
classifier_loss = 0
kldz2 = 0
rec_loss_z2 = 0
log_y_hat_rec_loss = 0
class_topic_rec_loss = 0
#if not train:
# ### for discriminator, we only use mean
# z1 = mu_z1
# y_hat_logis = self.xy_classifier(z1)
# log_probz_1 = self.x_only_topics(z1)
# y_hat = torch.softmax(y_hat_logis, dim=-1)
# log_prob_class_topic = self.class_topics(y_hat)
# #y = y_hat_logis
for i in range(n_samples):
z1 = torch.zeros_like(mu_z1).normal_() * torch.exp(log_sigma_z1) + mu_z1
z1 = self.h_to_z(z1)
log_probz_1 = self.x_only_topics(z1)
#if train or update_catopic:
# z1 = torch.zeros_like(mu_z1).normal_() * torch.exp(log_sigma_z1) + mu_z1
# z1 = self.h_to_z(z1)
# log_probz_1 = self.x_only_topics(z1)
# y_hat_logis = self.xy_classifier(z1)
# y_hat = torch.softmax(y_hat_logis, dim=-1)
# log_prob_class_topic = self.class_topics(y_hat)
if train or update_catopic:
y_hat_logis = self.xy_classifier(z1)
y_hat = torch.softmax(y_hat_logis, dim=-1)
#print(y_hat.shape)
else:
y_hat_logis = self.xy_classifier(mu_z1)
y_hat = torch.softmax(y_hat_logis, dim=-1)
if train:
classifier_loss += self.class_criterion(y_hat_logis, true_y_ids)
log_prob_class_topic = self.class_topics(y_hat)
y_hat_h = torch.cat((hidden, y_hat), dim=-1)
x_y_hidden = self.x_y_hidden(y_hat_h)
mu_z2 = self.mu_z2(x_y_hidden)
log_sigma_z2 = self.log_sigma_z2(x_y_hidden)
z2 = torch.zeros_like(mu_z2).normal_() * torch.exp(log_sigma_z2) + mu_z2
y_hat_z = torch.cat((z2, y_hat), dim=-1)
topic = self.z_y_hidden(y_hat_z)
log_prob_z2 = self.xy_topics(topic)
y_hat_rec = self.z2y_classifier(topic)
log_y_hat_rec = torch.log_softmax(y_hat_rec, dim=-1)
rec_loss_z1 = rec_loss_z1-(log_probz_1 * bow).sum(dim=-1)
kldz2 += kld(mu_z2, log_sigma_z2)
rec_loss_z2 = rec_loss_z2 - (log_prob_z2 * bow).sum(dim=-1)
#log_y_hat_rec_loss = log_y_hat_rec_loss - (log_y_hat_rec*true_y).sum(dim=-1)
log_y_hat_rec_loss = log_y_hat_rec_loss - (log_y_hat_rec*y_hat).sum(dim=-1)
class_topic_rec_loss = class_topic_rec_loss - (log_prob_class_topic*bow).sum(dim=-1)
rec_loss_z1 = rec_loss_z1/n_samples
#print(rec_loss_z1.shape)
classifier_loss = classifier_loss/n_samples
kldz2 = kldz2/n_samples
rec_loss_z2 = rec_loss_z2/n_samples
log_y_hat_rec_loss = log_y_hat_rec_loss/n_samples
class_topic_rec_loss = class_topic_rec_loss/n_samples
elbo_z1 = kldz1 + rec_loss_z1
#print(elbo_z1.shape)
#elbo_z1 = elbo_z1.sum()
elbo_z2 = kldz2 + rec_loss_z2 + log_y_hat_rec_loss
#print(elbo_z2)
#elbo_z2 = elbo_z2.sum()
#class_topic_rec_loss = class_topic_rec_loss.sum()
classifier_loss = classifier_loss
total_loss = elbo_z1.sum() + elbo_z2.sum() + class_topic_rec_loss.sum() + classifier_loss*self.banlance_lambda*self.classification_loss_lambda
if update_catopic:
total_loss = elbo_z2.sum()
y = {
'loss': total_loss,
'elbo_xy': elbo_z2,
'rec_loss': rec_loss_z2,
'kld': kldz2,
'cls_loss': classifier_loss,
'class_topic_loss': class_topic_rec_loss,
'y_hat': y_hat_logis,
'elbo_x': elbo_z1
}
####################################################################################################################################################
# else:
# z1 = mu_z1
# y_hat_logis = self.xy_classifier(z1)
# y_hat = torch.softmax(y_hat_logis, dim=-1)
# y = y_hat_logis
#
#
# y_hat_h = torch.cat((hidden, y_hat), dim=-1)
# x_y_hidden = self.x_y_hidden(y_hat_h)
# mu_z2 = self.mu_z2(x_y_hidden)
# log_sigma_z2 = self.log_sigma_z2(x_y_hidden)
# z2 = torch.zeros_like(mu_z2).normal_() * torch.exp(log_sigma_z2) + mu_z2
#
# kldz2 = kld(mu_z2, log_sigma_z2)
# log_prob_z2 = self.xy_topics(z2)
# y_hat_rec = self.z2y_classifier(z2)
# log_y_hat_rec = torch.log_softmax(y_hat_rec, dim=-1)
#
#
return y, None
def reset_parameters(self):
init.zeros_(self.log_sigma_z1.weight)
init.zeros_(self.log_sigma_z1.bias)
init.zeros_(self.log_sigma_z2.weight)
init.zeros_(self.log_sigma_z2.bias)
def get_topics(self):
return self.xy_topics.get_topics()
def get_class_topics(self):
return self.class_topics.get_topics()
def get_x_only_topics(self):
return self.x_only_topics.get_topics()
| 7,541 | 35.434783 | 150 | py |
CANTM | CANTM-main/GateMIcateLib/models/CLSAW_TopicModelBertEnrich.py | import torch
from torch import nn
from torch.nn import init
from torch.nn import functional as F
import math
from .miscLayer import BERT_Embedding, WVHidden, WVClassifier, Identity, Topics, kld, CLSAW_TopicModel_Base
class CLSAW_TopicModel_BERTEN(CLSAW_TopicModel_Base):
def __init__(self, config, vocab_dim=None):
super().__init__(config=config)
default_config = {}
self.bert_embedding = BERT_Embedding(config)
bert_dim = self.bert_embedding.bert_dim
if self.banlance_loss:
self.banlance_lambda = float(math.ceil(vocab_dim/self.n_classes))
else:
self.banlance_lambda = 1
self.hidden_dim = 500
self.bow_hidden = WVHidden(vocab_dim, 500)
self.mix_bert = WVHidden(500+ bert_dim, self.hidden_dim)
##############M1###########################################
self.mu_z1 = nn.Linear(self.hidden_dim, self.z_dim)
self.log_sigma_z1 = nn.Linear(self.hidden_dim, self.z_dim)
self.x_only_topics = Topics(self.z_dim, vocab_dim)
self.xy_classifier = WVClassifier(self.z_dim, self.n_classes)
self.class_criterion = nn.CrossEntropyLoss()
#############M2############################################
self.hidden_y_dim = self.hidden_dim + self.n_classes
self.z_y_dim = self.z_dim + self.n_classes
self.x_y_hidden = WVHidden(self.hidden_y_dim, self.hidden_dim)
self.z_y_hidden = WVHidden(self.z_y_dim, self.ntopics)
self.mu_z2 = nn.Linear(self.hidden_dim, self.z_dim)
self.log_sigma_z2 = nn.Linear(self.hidden_dim, self.z_dim)
self.xy_topics = Topics(self.ntopics, vocab_dim)
self.z2y_classifier = WVClassifier(self.ntopics, self.n_classes)
############################################################
self.h_to_z = Identity()
self.class_topics = Topics(self.n_classes, vocab_dim)
self.reset_parameters()
def forward(self,x, mask=None, n_samples=1, bow=None, train=False, true_y=None, pre_embd=False, true_y_ids=None):
#print(true_y.shape)
if pre_embd:
bert_rep = x
else:
bert_rep = self.bert_embedding(x, mask)
bert_rep = bert_rep[0]
bow_hidden = self.bow_hidden(bow)
atted = bert_rep[:,0]
bert_bow = torch.cat((atted, bow_hidden), dim=-1)
hidden = self.mix_bert(bert_bow)
#hidden = atted
mu_z1 = self.mu_z1(hidden)
log_sigma_z1 = self.log_sigma_z1(hidden)
kldz1 = kld(mu_z1, log_sigma_z1)
rec_loss_z1 = 0
classifier_loss = 0
kldz2 = 0
rec_loss_z2 = 0
log_y_hat_rec_loss = 0
class_topic_rec_loss = 0
if not train:
### for discriminator, we only use mean
z1 = mu_z1
y_hat_logis = self.xy_classifier(z1)
log_probz_1 = self.x_only_topics(z1)
y_hat = torch.softmax(y_hat_logis, dim=-1)
log_prob_class_topic = self.class_topics(y_hat)
#y = y_hat_logis
for i in range(n_samples):
if train:
z1 = torch.zeros_like(mu_z1).normal_() * torch.exp(log_sigma_z1) + mu_z1
z1 = self.h_to_z(z1)
log_probz_1 = self.x_only_topics(z1)
y_hat_logis = self.xy_classifier(z1)
y_hat = torch.softmax(y_hat_logis, dim=-1)
log_prob_class_topic = self.class_topics(y_hat)
classifier_loss += self.class_criterion(y_hat_logis, true_y_ids)
y_hat_h = torch.cat((hidden, y_hat), dim=-1)
x_y_hidden = self.x_y_hidden(y_hat_h)
mu_z2 = self.mu_z2(x_y_hidden)
log_sigma_z2 = self.log_sigma_z2(x_y_hidden)
z2 = torch.zeros_like(mu_z2).normal_() * torch.exp(log_sigma_z2) + mu_z2
y_hat_z = torch.cat((z2, y_hat), dim=-1)
topic = self.z_y_hidden(y_hat_z)
log_prob_z2 = self.xy_topics(topic)
y_hat_rec = self.z2y_classifier(topic)
log_y_hat_rec = torch.log_softmax(y_hat_rec, dim=-1)
rec_loss_z1 = rec_loss_z1-(log_probz_1 * bow).sum(dim=-1)
kldz2 += kld(mu_z2, log_sigma_z2)
rec_loss_z2 = rec_loss_z2 - (log_prob_z2 * bow).sum(dim=-1)
#log_y_hat_rec_loss = log_y_hat_rec_loss - (log_y_hat_rec*true_y).sum(dim=-1)
log_y_hat_rec_loss = log_y_hat_rec_loss - (log_y_hat_rec*y_hat).sum(dim=-1)
class_topic_rec_loss = class_topic_rec_loss - (log_prob_class_topic*bow).sum(dim=-1)
rec_loss_z1 = rec_loss_z1/n_samples
#print(rec_loss_z1.shape)
classifier_loss = classifier_loss/n_samples
kldz2 = kldz2/n_samples
rec_loss_z2 = rec_loss_z2/n_samples
log_y_hat_rec_loss = log_y_hat_rec_loss/n_samples
class_topic_rec_loss = class_topic_rec_loss/n_samples
elbo_z1 = kldz1 + rec_loss_z1
#print(elbo_z1.shape)
#elbo_z1 = elbo_z1.sum()
elbo_z2 = kldz2 + rec_loss_z2 + log_y_hat_rec_loss
#print(elbo_z2)
#elbo_z2 = elbo_z2.sum()
#class_topic_rec_loss = class_topic_rec_loss.sum()
classifier_loss = classifier_loss
total_loss = elbo_z1.sum() + elbo_z2.sum() + class_topic_rec_loss.sum() + classifier_loss*self.banlance_lambda*self.classification_loss_lambda
y = {
'loss': total_loss,
'elbo_xy': elbo_z2,
'rec_loss': rec_loss_z2,
'kld': kldz2,
'cls_loss': classifier_loss,
'class_topic_loss': class_topic_rec_loss,
'y_hat': y_hat_logis,
'elbo_x': elbo_z1
}
####################################################################################################################################################
# else:
# z1 = mu_z1
# y_hat_logis = self.xy_classifier(z1)
# y_hat = torch.softmax(y_hat_logis, dim=-1)
# y = y_hat_logis
#
#
# y_hat_h = torch.cat((hidden, y_hat), dim=-1)
# x_y_hidden = self.x_y_hidden(y_hat_h)
# mu_z2 = self.mu_z2(x_y_hidden)
# log_sigma_z2 = self.log_sigma_z2(x_y_hidden)
# z2 = torch.zeros_like(mu_z2).normal_() * torch.exp(log_sigma_z2) + mu_z2
#
# kldz2 = kld(mu_z2, log_sigma_z2)
# log_prob_z2 = self.xy_topics(z2)
# y_hat_rec = self.z2y_classifier(z2)
# log_y_hat_rec = torch.log_softmax(y_hat_rec, dim=-1)
#
#
return y, None
def reset_parameters(self):
init.zeros_(self.log_sigma_z1.weight)
init.zeros_(self.log_sigma_z1.bias)
init.zeros_(self.log_sigma_z2.weight)
init.zeros_(self.log_sigma_z2.bias)
def get_topics(self):
return self.xy_topics.get_topics()
def get_class_topics(self):
return self.class_topics.get_topics()
def get_x_only_topics(self):
return self.x_only_topics.get_topics()
| 7,008 | 34.760204 | 150 | py |
CANTM | CANTM-main/GateMIcateLib/models/miscLayer.py | from transformers import BertModel
import math
import os
import torch.nn.functional as F
import torch
import torch.nn as nn
class SingleHeadAttention(nn.Module):
def __init__(self, d_model, d_output, dropout = 0.1):
super().__init__()
self.q = nn.Parameter(torch.randn([d_output, 1]).float())
self.v_linear = nn.Linear(d_model, d_output)
self.dropout_v = nn.Dropout(dropout)
self.k_linear = nn.Linear(d_model, d_output)
self.dropout_k = nn.Dropout(dropout)
self.softmax_simi = nn.Softmax(dim=1)
self.dropout = nn.Dropout(dropout)
#self.out = nn.Linear(d_output, d_output)
def forward(self, x, mask=None):
k = self.k_linear(x)
k = F.relu(k)
k = self.dropout_k(k)
v = self.v_linear(x)
v = F.relu(v)
v = self.dropout_v(v)
dotProducSimi = k.matmul(self.q)
normedSimi = self.softmax_simi(dotProducSimi)
attVector = v.mul(normedSimi)
weightedSum = torch.sum(attVector, dim=1)
#output = self.out(weightedSum)
return weightedSum
class Norm(nn.Module):
def __init__(self, d_model, eps = 1e-6):
super().__init__()
self.size = d_model
# create two learnable parameters to calibrate normalisation
self.alpha = nn.Parameter(torch.ones(self.size))
self.bias = nn.Parameter(torch.zeros(self.size))
self.eps = eps
def forward(self, x):
norm = self.alpha * (x - x.mean(dim=-1, keepdim=True)) \
/ (x.std(dim=-1, keepdim=True) + self.eps) + self.bias
return norm
def attention(q, k, v, d_k, mask=None, dropout=None):
scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(d_k)
if mask is not None:
mask = mask.unsqueeze(1)
scores = scores.masked_fill(mask == 0, -1e9)
scores = F.softmax(scores, dim=-1)
if dropout is not None:
scores = dropout(scores)
output = torch.matmul(scores, v)
return output
class EncoderLayer(nn.Module):
def __init__(self, d_model, heads, dropout=0.1):
super().__init__()
self.norm_1 = Norm(d_model)
self.norm_2 = Norm(d_model)
self.attn = MultiHeadAttention(heads, d_model, dropout=dropout)
self.ff = FeedForward(d_model, dropout=dropout)
self.dropout_1 = nn.Dropout(dropout)
self.dropout_2 = nn.Dropout(dropout)
def forward(self, x, mask):
x2 = self.norm_1(x)
x = x + self.dropout_1(self.attn(x2,x2,x2,mask))
x2 = self.norm_2(x)
x = x + self.dropout_2(self.ff(x2))
return x
class MultiHeadAttention(nn.Module):
def __init__(self, heads, d_model, dropout = 0.1):
super().__init__()
self.d_model = d_model
self.d_k = d_model // heads
self.h = heads
self.q_linear = nn.Linear(d_model, d_model)
self.v_linear = nn.Linear(d_model, d_model)
self.k_linear = nn.Linear(d_model, d_model)
self.dropout = nn.Dropout(dropout)
self.out = nn.Linear(d_model, d_model)
def forward(self, q, k, v, mask=None):
bs = q.size(0)
# perform linear operation and split into N heads
# bs, sl, d_model --> bs, sl, heads, sub_d_model
# d_model = heads * sub_d_model
k = self.k_linear(k).view(bs, -1, self.h, self.d_k)
q = self.q_linear(q).view(bs, -1, self.h, self.d_k)
v = self.v_linear(v).view(bs, -1, self.h, self.d_k)
# transpose to get dimensions bs * N * sl * d_model
k = k.transpose(1,2)
q = q.transpose(1,2)
v = v.transpose(1,2)
# calculate attention using function we will define next
scores = attention(q, k, v, self.d_k, mask, self.dropout)
# concatenate heads and put through final linear layer
concat = scores.transpose(1,2).contiguous()\
.view(bs, -1, self.d_model)
output = self.out(concat)
return output
class FeedForward(nn.Module):
def __init__(self, d_model, d_ff=1024, dropout = 0.1):
super().__init__()
# We set d_ff as a default to 2048
self.linear_1 = nn.Linear(d_model, d_ff)
self.dropout = nn.Dropout(dropout)
self.linear_2 = nn.Linear(d_ff, d_model)
def forward(self, x):
x = self.dropout(F.relu(self.linear_1(x)))
x = self.linear_2(x)
return x
class BERT_Embedding(nn.Module):
def __init__(self, config):
super().__init__()
bert_model_path = os.path.join(config['BERT'].get('bert_path'), 'model')
self.bert_dim = int(config['BERT'].get('bert_dim'))
self.trainable_layers = config['BERT'].get('trainable_layers')
self.bert = BertModel.from_pretrained(bert_model_path)
if self.trainable_layers:
#print(self.trainable_layers)
#self.bert = BertModel.from_pretrained(bert_model_path)
for name, param in self.bert.named_parameters():
if name in self.trainable_layers:
param.requires_grad = True
#print(name, param)
else:
param.requires_grad = False
else:
for p in self.bert.parameters():
p.requires_grad = False
def forward(self, x, mask=None):
if mask == None:
mask = x != 0
mask.type(x.type())
bert_rep = self.bert(x, attention_mask=mask)
return bert_rep
class Dense(nn.Module):
def __init__(self, input_dim, out_dim, non_linear=None):
super().__init__()
self.dense = nn.Linear(input_dim, out_dim)
self.non_linear = non_linear
def forward(self, x):
output = self.dense(x)
if self.non_linear:
output = self.non_linear(output)
return output
class Topics(nn.Module):
def __init__(self, k, vocab_size, bias=True):
super(Topics, self).__init__()
self.k = k
self.vocab_size = vocab_size
self.topic = nn.Linear(k, vocab_size, bias=bias)
def forward(self, logit):
# return the log_prob of vocab distribution
return torch.log_softmax(self.topic(logit), dim=-1)
def get_topics(self):
#print('hey')
#print(self.topic.weight)
return torch.softmax(self.topic.weight.data.transpose(0, 1), dim=-1)
def get_topic_word_logit(self):
"""topic x V.
Return the logits instead of probability distribution
"""
return self.topic.weight.transpose(0, 1)
class Identity(nn.Module):
def __init__(self):
super(Identity, self).__init__()
def forward(self, *input):
if len(input) == 1:
return input[0]
return input
def kld(mu, log_sigma):
"""log q(z) || log p(z).
mu: batch_size x dim
log_sigma: batch_size x dim
"""
return -0.5 * (1 - mu ** 2 + 2 * log_sigma - torch.exp(2 * log_sigma)).sum(dim=-1)
class BERT_Mapping_mapping(nn.Module):
def __init__(self, bert_dim):
super().__init__()
self.att = SingleHeadAttention(bert_dim, bert_dim)
def forward(self,x):
atted = self.att(x)
return atted
class WVHidden(nn.Module):
def __init__(self, input_dim, hidden_dim):
super().__init__()
self.hidden1 = nn.Linear(input_dim, hidden_dim)
def forward(self, x):
hidden = F.leaky_relu(self.hidden1(x))
return hidden
class WVClassifier(nn.Module):
def __init__(self, n_hidden, n_classes):
super().__init__()
self.layer_output = torch.nn.Linear(n_hidden, n_classes)
def forward(self, x):
out = self.layer_output(x)
return out
class CLSAW_TopicModel_Base(nn.Module):
def __init__(self, config=None):
super().__init__()
self._init_params()
if config:
self._read_config(config)
def _init_params(self):
self.hidden_dim = 300
self.z_dim = 100
self.ntopics = 50
self.class_topic_loss_lambda = 1
self.classification_loss_lambda = 1
self.banlance_loss = False
def _read_config(self, config):
self.n_classes = len(config['TARGET'].get('labels'))
if 'MODEL' in config:
if 'hidden_dim' in config['MODEL']:
self.hidden_dim = int(config['MODEL'].get('hidden_dim'))
if 'z_dim' in config['MODEL']:
self.z_dim = int(config['MODEL'].get('z_dim'))
if 'ntopics' in config['MODEL']:
self.ntopics = int(config['MODEL'].get('ntopics'))
if 'class_topic_loss_lambda' in config['MODEL']:
self.class_topic_loss_lambda = float(config['MODEL'].get('class_topic_loss_lambda'))
if 'classification_loss_lambda' in config['MODEL']:
self.class_topic_loss_lambda = float(config['MODEL'].get('classification_loss_lambda'))
if 'banlance_loss' in config['MODEL']:
self.banlance_loss = config['MODEL'].as_bool('banlance_loss')
self.n_class_topics = self.z_dim+self.n_classes
| 9,182 | 29.407285 | 103 | py |
CANTM | CANTM-main/GateMIcateLib/models/CLSAW_TopicModel_linear.py | import torch
from torch import nn
from torch.nn import init
from torch.nn import functional as F
import math
from .miscLayer import BERT_Embedding, WVHidden, WVClassifier, Identity, Topics, kld, CLSAW_TopicModel_Base
class CLSAW_TopicModel(CLSAW_TopicModel_Base):
def __init__(self, config, vocab_dim=None):
super().__init__(config=config)
default_config = {}
self.bert_embedding = BERT_Embedding(config)
bert_dim = self.bert_embedding.bert_dim
if self.banlance_loss:
self.banlance_lambda = float(math.ceil(vocab_dim/self.n_classes))
else:
self.banlance_lambda = 1
#self.wv_hidden = WVHidden(bert_dim, self.hidden_dim)
self.hidden_dim = bert_dim
##############M1###########################################
self.mu_z1 = nn.Linear(self.hidden_dim, self.z_dim)
self.log_sigma_z1 = nn.Linear(self.hidden_dim, self.z_dim)
self.x_only_topics = Topics(self.z_dim, vocab_dim)
self.xy_classifier = WVClassifier(self.z_dim, self.n_classes)
self.class_criterion = nn.CrossEntropyLoss()
#############M2############################################
self.hidden_y_dim = self.hidden_dim + self.n_classes
self.z_y_dim = self.z_dim + self.n_classes
self.x_y_hidden = WVHidden(self.hidden_y_dim, self.hidden_dim)
self.z_y_hidden = WVHidden(self.z_y_dim, self.ntopics)
self.mu_z2 = nn.Linear(self.hidden_dim, self.z_dim)
self.log_sigma_z2 = nn.Linear(self.hidden_dim, self.z_dim)
self.xy_topics = Topics(self.ntopics, vocab_dim)
self.z2y_classifier = WVClassifier(self.ntopics, self.n_classes)
############################################################
self.h_to_z = Identity()
self.class_topics = Topics(self.n_classes, vocab_dim)
self.reset_parameters()
def forward(self,x, mask=None, n_samples=1, bow=None, train=False, true_y=None, pre_embd=False, true_y_ids=None):
#print(true_y.shape)
if pre_embd:
bert_rep = x
else:
bert_rep = self.bert_embedding(x, mask)
bert_rep = bert_rep[0]
atted = bert_rep[:,0]
#hidden = self.wv_hidden(atted)
hidden = atted
mu_z1 = self.mu_z1(hidden)
log_sigma_z1 = self.log_sigma_z1(hidden)
kldz1 = kld(mu_z1, log_sigma_z1)
rec_loss_z1 = 0
classifier_loss = 0
kldz2 = 0
rec_loss_z2 = 0
log_y_hat_rec_loss = 0
class_topic_rec_loss = 0
if not train:
### for discriminator, we only use mean
z1 = mu_z1
y_hat_logis = self.xy_classifier(z1)
log_probz_1 = self.x_only_topics(z1)
y_hat = torch.softmax(y_hat_logis, dim=-1)
log_prob_class_topic = self.class_topics(y_hat)
#y = y_hat_logis
for i in range(n_samples):
if train:
z1 = torch.zeros_like(mu_z1).normal_() * torch.exp(log_sigma_z1) + mu_z1
z1 = self.h_to_z(z1)
log_probz_1 = self.x_only_topics(z1)
y_hat_logis = self.xy_classifier(z1)
y_hat = torch.softmax(y_hat_logis, dim=-1)
log_prob_class_topic = self.class_topics(y_hat)
classifier_loss += self.class_criterion(y_hat_logis, true_y_ids)
y_hat_h = torch.cat((hidden, y_hat), dim=-1)
x_y_hidden = self.x_y_hidden(y_hat_h)
mu_z2 = self.mu_z2(x_y_hidden)
log_sigma_z2 = self.log_sigma_z2(x_y_hidden)
z2 = torch.zeros_like(mu_z2).normal_() * torch.exp(log_sigma_z2) + mu_z2
y_hat_z = torch.cat((z2, y_hat), dim=-1)
topic = self.z_y_hidden(y_hat_z)
log_prob_z2 = self.xy_topics(topic)
y_hat_rec = self.z2y_classifier(topic)
log_y_hat_rec = torch.log_softmax(y_hat_rec, dim=-1)
rec_loss_z1 = rec_loss_z1-(log_probz_1 * bow).sum(dim=-1)
kldz2 += kld(mu_z2, log_sigma_z2)
rec_loss_z2 = rec_loss_z2 - (log_prob_z2 * bow).sum(dim=-1)
#log_y_hat_rec_loss = log_y_hat_rec_loss - (log_y_hat_rec*true_y).sum(dim=-1)
log_y_hat_rec_loss = log_y_hat_rec_loss - (log_y_hat_rec*y_hat).sum(dim=-1)
class_topic_rec_loss = class_topic_rec_loss - (log_prob_class_topic*bow).sum(dim=-1)
rec_loss_z1 = rec_loss_z1/n_samples
#print(rec_loss_z1.shape)
classifier_loss = classifier_loss/n_samples
kldz2 = kldz2/n_samples
rec_loss_z2 = rec_loss_z2/n_samples
log_y_hat_rec_loss = log_y_hat_rec_loss/n_samples
class_topic_rec_loss = class_topic_rec_loss/n_samples
elbo_z1 = kldz1 + rec_loss_z1
#print(elbo_z1.shape)
#elbo_z1 = elbo_z1.sum()
elbo_z2 = kldz2 + rec_loss_z2 + log_y_hat_rec_loss
#print(elbo_z2)
#elbo_z2 = elbo_z2.sum()
#class_topic_rec_loss = class_topic_rec_loss.sum()
classifier_loss = classifier_loss
total_loss = elbo_z1.sum() + elbo_z2.sum() + class_topic_rec_loss.sum() + classifier_loss*self.banlance_lambda*self.classification_loss_lambda
y = {
'loss': total_loss,
'elbo_xy': elbo_z2,
'rec_loss': rec_loss_z2,
'kld': kldz2,
'cls_loss': classifier_loss,
'class_topic_loss': class_topic_rec_loss,
'y_hat': y_hat_logis,
'elbo_x': elbo_z1
}
####################################################################################################################################################
# else:
# z1 = mu_z1
# y_hat_logis = self.xy_classifier(z1)
# y_hat = torch.softmax(y_hat_logis, dim=-1)
# y = y_hat_logis
#
#
# y_hat_h = torch.cat((hidden, y_hat), dim=-1)
# x_y_hidden = self.x_y_hidden(y_hat_h)
# mu_z2 = self.mu_z2(x_y_hidden)
# log_sigma_z2 = self.log_sigma_z2(x_y_hidden)
# z2 = torch.zeros_like(mu_z2).normal_() * torch.exp(log_sigma_z2) + mu_z2
#
# kldz2 = kld(mu_z2, log_sigma_z2)
# log_prob_z2 = self.xy_topics(z2)
# y_hat_rec = self.z2y_classifier(z2)
# log_y_hat_rec = torch.log_softmax(y_hat_rec, dim=-1)
#
#
return y, None
def reset_parameters(self):
init.zeros_(self.log_sigma_z1.weight)
init.zeros_(self.log_sigma_z1.bias)
init.zeros_(self.log_sigma_z2.weight)
init.zeros_(self.log_sigma_z2.bias)
def get_topics(self):
return self.xy_topics.get_topics()
def get_class_topics(self):
return self.class_topics.get_topics()
def get_x_only_topics(self):
return self.x_only_topics.get_topics()
| 6,843 | 35.795699 | 150 | py |
Subsets and Splits