repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
Augmentation-Adapted-Retriever | Augmentation-Adapted-Retriever-main/src/Retriever/retriever/__init__.py | from .dense_retriever import Retriever, SuccessiveRetriever
from .reranker import RRPredictDataset, Reranker | 108 | 53.5 | 59 | py |
Augmentation-Adapted-Retriever | Augmentation-Adapted-Retriever-main/src/Retriever/retriever/reranker.py | import logging
import os
from contextlib import nullcontext
from typing import Dict
import torch
from torch.cuda import amp
from torch.nn import functional as F
from torch.utils.data import DataLoader, Dataset, IterableDataset
from tqdm import tqdm
from transformers import PreTrainedTokenizer
from transformers.trainer_pt_utils import IterableDatasetShard
from ..arguments import InferenceArguments as EncodingArguments
from ..dataset import InferenceDataset, RRInferenceCollator
from ..modeling import RRModel
from ..utils import (load_from_trec, merge_retrieval_results_by_score,
save_as_trec)
logger = logging.getLogger(__name__)
def encode_pair(tokenizer, item1, item2, max_len_1=32, max_len_2=128):
return tokenizer.encode_plus(
item1 + item2,
truncation='longest_first',
padding='max_length',
max_length=max_len_1 + max_len_2 + 2,
)
def add_to_result_dict(result_dicts, qids, dids, scores):
for qid, did, score in zip(qids, dids, scores):
if qid not in result_dicts:
result_dicts[qid] = {}
result_dicts[qid][did] = float(score)
class RRPredictDataset(IterableDataset):
def __init__(
self,
tokenizer: PreTrainedTokenizer,
query_dataset: InferenceDataset,
corpus_dataset: InferenceDataset,
run: Dict[str, Dict[str, float]]
):
super(RRPredictDataset, self).__init__()
self.tokenizer = tokenizer
self.query_dataset = query_dataset
self.corpus_dataset = corpus_dataset
self.run = run
def __iter__(self):
def gen_q_d_pair():
for qid, did_and_scores in self.run.items():
for did, _ in did_and_scores.items():
yield {
"query_id": qid,
"doc_id": did,
**encode_pair(
self.tokenizer,
self.query_dataset[qid]["input_ids"],
self.corpus_dataset[did]["input_ids"],
self.query_dataset.max_len,
self.corpus_dataset.max_len
),
}
return gen_q_d_pair()
class Reranker:
def __init__(
self,
model: RRModel,
tokenizer: PreTrainedTokenizer,
corpus_dataset: Dataset,
args: EncodingArguments
):
logger.info("Initializing reranker")
self.model = model
self.tokenizer = tokenizer
self.corpus_dataset = corpus_dataset
self.args = args
self.model = model.to(self.args.device)
self.model.eval()
def rerank(self, query_dataset: InferenceDataset, run: Dict[str, Dict[str, float]]):
return_dict = {}
dataset = RRPredictDataset(self.tokenizer, query_dataset, self.corpus_dataset, run)
if self.args.world_size > 1:
dataset = IterableDatasetShard(
dataset,
batch_size=self.args.per_device_eval_batch_size,
drop_last=False,
num_processes=self.args.world_size,
process_index=self.args.process_index
)
dataloader = DataLoader(
dataset,
batch_size=self.args.eval_batch_size,
collate_fn=RRInferenceCollator(),
num_workers=self.args.dataloader_num_workers,
pin_memory=self.args.dataloader_pin_memory,
)
with torch.no_grad():
for qids, dids, batch in tqdm(dataloader, desc="Reranking", disable=self.args.local_process_index > 0):
with amp.autocast() if self.args.fp16 else nullcontext():
for k, v in batch.items():
batch[k] = v.to(self.args.device)
outputs = self.model.encode(batch)
if len(outputs.shape) == 2 and outputs.shape[1] == 2:
outputs = F.log_softmax(outputs, dim=1)[:, 1]
scores = outputs.cpu().numpy()
add_to_result_dict(return_dict, qids, dids, scores)
if self.args.world_size > 1:
save_as_trec(return_dict, self.args.trec_save_path + ".rank.{}".format(self.args.process_index))
torch.distributed.barrier()
if self.args.process_index == 0:
# aggregate results
all_results = []
for i in range(self.args.world_size):
all_results.append(load_from_trec(self.args.trec_save_path + ".rank.{}".format(i)))
return_dict = merge_retrieval_results_by_score(all_results)
# remove temp files
for i in range(self.args.world_size):
os.remove(self.args.trec_save_path + ".rank.{}".format(i))
torch.distributed.barrier()
return return_dict
| 4,921 | 35.731343 | 115 | py |
Augmentation-Adapted-Retriever | Augmentation-Adapted-Retriever-main/src/Retriever/retriever/dense_retriever.py | import gc
import glob
import logging
import os
import pickle
from contextlib import nullcontext
from typing import Dict, List
import faiss
import numpy as np
import torch
from torch.cuda import amp
from torch.utils.data import DataLoader, IterableDataset
from tqdm import tqdm
from ..arguments import InferenceArguments as EncodingArguments
from ..dataset import DRInferenceCollator
from ..modeling import DRModelForInference, DROutput
from ..utils import merge_retrieval_results_by_score
logger = logging.getLogger(__name__)
class Retriever:
def __init__(self, model: DRModelForInference, corpus_dataset: IterableDataset, args: EncodingArguments):
logger.info("Initializing retriever")
self.model = model
self.corpus_dataset = corpus_dataset
self.args = args
self.doc_lookup = []
self.query_lookup = []
self.model.to(self.args.device)
self.model.eval()
def _initialize_faiss_index(self, dim: int):
self.index = None
cpu_index = faiss.IndexFlatIP(dim)
self.index = cpu_index
def _move_index_to_gpu(self):
logger.info("Moving index to GPU(s)")
ngpu = faiss.get_num_gpus()
gpu_resources = []
for i in range(ngpu):
res = faiss.StandardGpuResources()
gpu_resources.append(res)
co = faiss.GpuMultipleClonerOptions()
co.shard = True
co.usePrecomputed = False
vres = faiss.GpuResourcesVector()
vdev = faiss.IntVector()
for i in range(0, ngpu):
vdev.push_back(i)
vres.push_back(gpu_resources[i])
self.index = faiss.index_cpu_to_gpu_multiple(
vres, vdev, self.index, co)
def doc_embedding_inference(self):
# Note: during evaluation, there's no point in wrapping the model
# inside a DistributedDataParallel as we'll be under `no_grad` anyways.
if self.corpus_dataset is None:
raise ValueError("No corpus dataset provided")
dataloader = DataLoader(
self.corpus_dataset,
# Note that we do not support DataParallel here
batch_size=self.args.per_device_eval_batch_size,
collate_fn=DRInferenceCollator(),
num_workers=self.args.dataloader_num_workers,
pin_memory=self.args.dataloader_pin_memory,
)
os.makedirs(self.args.output_dir, exist_ok=True)
encoded = []
lookup_indices = []
idx = 0
prev_idx = 0
for (batch_ids, batch) in tqdm(dataloader, disable=self.args.process_index > 0):
lookup_indices.extend(batch_ids)
idx += len(batch_ids)
with amp.autocast() if self.args.fp16 else nullcontext():
with torch.no_grad():
for k, v in batch.items():
batch[k] = v.to(self.args.device)
model_output: DROutput = self.model(passage=batch)
encoded.append(model_output.p_reps.cpu().detach().numpy())
if len(lookup_indices) >= self.args.max_inmem_docs // self.args.world_size:
encoded = np.concatenate(encoded)
with open(os.path.join(self.args.output_dir, "embeddings.corpus.rank.{}.{}-{}".format(self.args.process_index, prev_idx, idx)), 'wb') as f:
pickle.dump((encoded, lookup_indices), f, protocol=4)
encoded = []
lookup_indices = []
prev_idx = idx
gc.collect()
if len(lookup_indices) > 0:
encoded = np.concatenate(encoded)
with open(os.path.join(self.args.output_dir, "embeddings.corpus.rank.{}.{}-{}".format(self.args.process_index, prev_idx, idx)), 'wb') as f:
pickle.dump((encoded, lookup_indices), f, protocol=4)
del encoded
del lookup_indices
if self.args.world_size > 1:
torch.distributed.barrier()
def init_index_and_add(self, partition: str = None):
logger.info(
"Initializing Faiss index from pre-computed document embeddings")
partitions = [partition] if partition is not None else glob.glob(
os.path.join(self.args.output_dir, "embeddings.corpus.rank.*"))
for i, part in enumerate(partitions):
with open(part, 'rb') as f:
data = pickle.load(f)
encoded = data[0]
lookup_indices = data[1]
if i == 0:
dim = encoded.shape[1]
self._initialize_faiss_index(dim)
self.index.add(encoded)
self.doc_lookup.extend(lookup_indices)
@classmethod
def build_all(cls, model: DRModelForInference, corpus_dataset: IterableDataset, args: EncodingArguments):
retriever = cls(model, corpus_dataset, args)
retriever.doc_embedding_inference()
if args.process_index == 0:
retriever.init_index_and_add()
if args.world_size > 1:
torch.distributed.barrier()
return retriever
@classmethod
def build_embeddings(cls, model: DRModelForInference, corpus_dataset: IterableDataset, args: EncodingArguments):
retriever = cls(model, corpus_dataset, args)
retriever.doc_embedding_inference()
return retriever
@classmethod
def from_embeddings(cls, model: DRModelForInference, args: EncodingArguments):
retriever = cls(model, None, args)
if args.process_index == 0:
retriever.init_index_and_add()
if args.world_size > 1:
torch.distributed.barrier()
return retriever
def reset_index(self):
if self.index:
self.index.reset()
self.doc_lookup = []
self.query_lookup = []
def query_embedding_inference(self, query_dataset: IterableDataset):
dataloader = DataLoader(
query_dataset,
batch_size=self.args.per_device_eval_batch_size,
collate_fn=DRInferenceCollator(),
num_workers=self.args.dataloader_num_workers,
pin_memory=self.args.dataloader_pin_memory,
)
encoded = []
lookup_indices = []
for (batch_ids, batch) in tqdm(dataloader, disable=self.args.process_index > 0):
lookup_indices.extend(batch_ids)
with amp.autocast() if self.args.fp16 else nullcontext():
with torch.no_grad():
for k, v in batch.items():
batch[k] = v.to(self.args.device)
if not self.args.encode_query_as_passage:
model_output: DROutput = self.model(query=batch)
encoded.append(
model_output.q_reps.cpu().detach().numpy())
else:
model_output: DROutput = self.model(passage=batch)
encoded.append(
model_output.p_reps.cpu().detach().numpy())
if len(encoded) > 0: # If there is no data in the process, we don't do anything
encoded = np.concatenate(encoded)
with open(os.path.join(self.args.output_dir, "embeddings.query.rank.{}".format(self.args.process_index)), 'wb') as f:
pickle.dump((encoded, lookup_indices), f, protocol=4)
if self.args.world_size > 1:
torch.distributed.barrier()
def search(self, topk: int = 100):
logger.info("Searching")
if self.index is None:
raise ValueError("Index is not initialized")
encoded = []
for i in range(self.args.world_size):
with open(os.path.join(self.args.output_dir, "embeddings.query.rank.{}".format(i)), 'rb') as f:
data = pickle.load(f)
lookup_indices = data[1]
if len(lookup_indices) == 0: # No data
continue
encoded.append(data[0])
self.query_lookup.extend(lookup_indices)
encoded = np.concatenate(encoded)
return_dict = {}
D, I = self.index.search(encoded, topk)
original_indices = np.array(self.doc_lookup)[I]
q = 0
for scores_per_q, doc_indices_per_q in zip(D, original_indices):
qid = str(self.query_lookup[q])
return_dict[qid] = {}
for doc_index, score in zip(doc_indices_per_q, scores_per_q):
doc_index = str(doc_index)
if self.args.remove_identical and qid == doc_index:
continue
return_dict[qid][doc_index] = float(score)
q += 1
logger.info("End searching with {} queries".format(len(return_dict)))
return return_dict
def retrieve(self, query_dataset: IterableDataset, topk: int = 100):
self.query_embedding_inference(query_dataset)
self.model.cpu()
del self.model
torch.cuda.empty_cache()
results = {}
if self.args.process_index == 0:
if self.args.use_gpu:
self._move_index_to_gpu()
results = self.search(topk)
if self.args.world_size > 1:
torch.distributed.barrier()
return results
class SuccessiveRetriever(Retriever):
def __init__(self, model: DRModelForInference, corpus_dataset: IterableDataset, args: EncodingArguments):
super().__init__(model, corpus_dataset, args)
@classmethod
def from_embeddings(cls, model: DRModelForInference, args: EncodingArguments):
retriever = cls(model, None, args)
return retriever
def retrieve(self, query_dataset: IterableDataset, topk: int = 100):
self.query_embedding_inference(query_dataset)
del self.model
torch.cuda.empty_cache()
final_result = {}
if self.args.process_index == 0:
all_partitions = glob.glob(os.path.join(
self.args.output_dir, "embeddings.corpus.rank.*"))
for partition in all_partitions:
logger.info("Loading partition {}".format(partition))
self.init_index_and_add(partition)
if self.args.use_gpu:
self._move_index_to_gpu()
cur_result = self.search(topk)
self.reset_index()
final_result = merge_retrieval_results_by_score(
[final_result, cur_result], topk)
if self.args.world_size > 1:
torch.distributed.barrier()
return final_result
| 10,514 | 38.382022 | 155 | py |
tifresi | tifresi-master/setup.py | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name='tifresi',
version='0.1.5',
description='Time Frequency Spectrogram Inversion',
url='https://github.com/andimarafioti/tifresi',
author='Andrés Marafioti, Nathanael Perraudin, Nicki Hollighaus',
author_email='[email protected]',
license='MIT',
packages=setuptools.find_packages(),
zip_safe=False,
long_description=long_description,
long_description_content_type="text/markdown",
extras_require={'testing': ['flake8', 'pytest', 'jupyterlab', 'twine', 'setuptools', 'wheel']},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Science/Research',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Developers', 'Natural Language :: English',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX :: Linux', 'Programming Language :: C',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Scientific/Engineering'
],
install_requires=[
'cython', 'ltfatpy', 'numpy', 'numba', 'librosa', 'matplotlib'
],
python_requires='>=3.6',
) | 1,478 | 38.972973 | 99 | py |
tifresi | tifresi-master/tifresi/stft.py | from ltfatpy import dgtreal, idgtreal
from ltfatpy.gabor.gabdual import gabdual
import numpy as np
from tifresi.hparams import HParams as p
from tifresi.phase.modGabPhaseGrad import modgabphasegrad
from tifresi.phase.pghi import pghi
class GaussTF(object):
"""Time frequency transform object based on a Gauss window.
The Gauss window is necessary to apply the PGHI (Phase gradient heap integration) algorithm.
"""
def __init__(self, hop_size=p.hop_size, stft_channels=p.stft_channels):
assert (np.mod(stft_channels, 2) == 0), 'The number of stft channels needs to be even'
self.hop_size = hop_size
self.stft_channels = stft_channels
def dgt(self, x, hop_size=None, stft_channels=None):
"""Compute the DGT of a real signal with a gauss window."""
if hop_size is None:
hop_size = self.hop_size
if stft_channels is None:
stft_channels = self.stft_channels
assert (len(x.shape) == 1)
assert (np.mod(len(x), hop_size) == 0)
assert (np.mod(stft_channels, 2) == 0), 'The number of stft channels needs to be even'
assert (np.mod(len(x), stft_channels) == 0)
g_analysis = self._analysis_window(x)
return dgtreal(x.astype(np.float64), g_analysis, hop_size, stft_channels)[0]
def idgt(self, X, hop_size=None, stft_channels=None):
"""Compute the inverse DGT of real signal x with a gauss window."""
if hop_size is None:
hop_size = self.hop_size
if stft_channels is None:
stft_channels = self.stft_channels
assert (len(X.shape) == 2)
assert (np.mod(stft_channels, 2) == 0), 'The number of stft channels needs to be even'
assert (X.shape[0] == stft_channels // 2 + 1)
g_synthesis = self._synthesis_window(X, hop_size, stft_channels)
return idgtreal(X.astype(np.complex128), g_synthesis, hop_size, stft_channels)[0]
def invert_spectrogram(self, spectrogram, stft_channels=None, hop_size=None):
"""Invert a spectrogram by reconstructing the phase with PGHI."""
if hop_size is None:
hop_size = self.hop_size
if stft_channels is None:
stft_channels = self.stft_channels
audio_length = hop_size * spectrogram.shape[1]
tfr = self.hop_size * self.stft_channels / audio_length
g_analysis = {'name': 'gauss', 'tfr': tfr}
tgrad, fgrad = modgabphasegrad('abs', spectrogram, g_analysis, hop_size,
stft_channels)
phase = pghi(spectrogram, tgrad, fgrad, hop_size, stft_channels, audio_length)
reComplexStft = spectrogram * np.exp(1.0j * phase)
return self.idgt(reComplexStft, hop_size, stft_channels)
def spectrogram(self, time_signal, normalize=p.normalize, **kwargs):
"""Compute the spectrogram of a real signal."""
stft = self.dgt(time_signal, **kwargs)
magSpectrogram = np.abs(stft)
if normalize:
magSpectrogram = magSpectrogram / np.max(magSpectrogram)
return magSpectrogram
def _analysis_window(self, x):
return {'name': 'gauss', 'tfr': self.hop_size * self.stft_channels / len(x)}
def _synthesis_window(self, X, hop_size, stft_channels):
L = hop_size * X.shape[1]
tfr = self.hop_size * self.stft_channels / L
g_analysis = {'name': 'gauss', 'tfr': tfr}
return {'name': ('dual', g_analysis['name']), 'tfr': tfr}
class GaussTruncTF(GaussTF):
"""Time frequency transform object based on a Truncated Gauss window.
"""
def __init__(self, hop_size=p.hop_size, stft_channels=p.stft_channels, min_height=1e-4):
super().__init__(hop_size, stft_channels)
self.min_height = min_height
def _analysis_window(self, x):
Lgtrue = np.sqrt(-4 * self.hop_size * self.stft_channels * np.log(self.min_height) / np.pi)
LgLong = np.ceil(Lgtrue / self.stft_channels) * self.stft_channels
x = (1 / Lgtrue) * np.concatenate([np.arange(.5 * LgLong), np.arange(-.5 * LgLong, 0)])
g = np.exp(4 * np.log(self.min_height) * (x ** 2))
g = g / np.linalg.norm(g)
return g
def _synthesis_window(self, X, hop_size, stft_channels):
g_analysis = self._analysis_window(None)
return gabdual(g_analysis, hop_size, stft_channels, 8*max(stft_channels, self.stft_channels))
| 4,423 | 40.735849 | 101 | py |
tifresi | tifresi-master/tifresi/utils.py | import numpy as np
import librosa
from tifresi.hparams import HParams as p
# This function might need another name
def preprocess_signal(y, M=p.M):
"""Trim and cut signal.
The function ensures that the signal length is a multiple of M.
"""
# Trimming
y, _ = librosa.effects.trim(y)
# Preemphasis
# y = np.append(y[0], y[1:] - 0.97 * y[:-1])
# Padding
left_over = np.mod(len(y), M)
extra = M - left_over
y = np.pad(y, (0, extra))
assert (np.mod(len(y), M) == 0)
return y
def load_signal(fpath, sr=None):
"""Load a signal from path."""
# Loading sound file
y, sr = librosa.load(fpath, sr=sr)
return y, sr
def downsample_tf_time(mel, rr):
"""Downsample a TF representation along the time axis."""
tmp = np.zeros([mel.shape[0], mel.shape[1] // rr], mel.dtype)
for i in range(rr):
tmp += mel[:, i::rr]
return tmp / rr
| 920 | 21.463415 | 67 | py |
tifresi | tifresi-master/tifresi/hparams.py | import librosa
import numpy as np
class HParams(object):
# Signal parameters
sr = 22050 # Sampling frequency of the signal
M = 1024 # Ensure that the signal will be a multiple of M
# STFT parameters
stft_channels = 1024 # Number of frequency channels
hop_size = 256 # Hop size
stft_dynamic_range_dB = 50 # dynamic range in dB for the STFT
normalize = True # Normalize STFT
# MEL parameters
n_mels = 80 # Number of mel frequency band
fmin = 0 # Minimum frequency for the MEL
fmax = None # Maximum frequency for the MEL (None -> Nyquist frequency)
mel_dynamic_range_dB = 50 # dynamic range in dB for the MEL
mel_basis = librosa.filters.mel(sr=sr, n_fft=stft_channels, n_mels=n_mels, fmin=fmin, fmax=fmax)
mel_inverse_basis = np.linalg.pinv(mel_basis)
| 841 | 30.185185 | 100 | py |
tifresi | tifresi-master/tifresi/metrics.py | import numpy as np
from tifresi.transforms import inv_log_spectrogram
__author__ = 'Andres'
def projection_loss(target_spectrogram, original_spectrogram):
magnitude_error = np.linalg.norm(np.abs(target_spectrogram) - np.abs(original_spectrogram), 'fro') / \
np.linalg.norm(np.abs(target_spectrogram), 'fro')
return 20 * np.log10(1 / magnitude_error)
def consistency(log10_spectrogram):
log_spectrogram = np.log(inv_log_spectrogram(log10_spectrogram))
ttderiv = log_spectrogram[1:-1, :-2] - 2 * log_spectrogram[1:-1, 1:-1] + log_spectrogram[1:-1, 2:] + np.pi / 4
ffderiv = log_spectrogram[:-2, 1:-1] - 2 * log_spectrogram[1:-1, 1:-1] + log_spectrogram[2:, 1:-1] + np.pi / 4
absttderiv = substractMeanAndDivideByStd(np.abs(ttderiv))
absffderiv = substractMeanAndDivideByStd(np.abs(ffderiv))
consistencies = np.sum(absttderiv * absffderiv)
return consistencies
def substractMeanAndDivideByStd(aDistribution):
unmeaned = aDistribution - np.mean(aDistribution, keepdims=True)
shiftedtt = unmeaned / np.sqrt(np.sum(np.abs(unmeaned) ** 2, keepdims=True))
return shiftedtt
| 1,127 | 36.6 | 114 | py |
tifresi | tifresi-master/tifresi/__init__.py | try:
import matplotlib.pyplot as pyplot
except:
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as pyplot
from tifresi import stft
from tifresi import hparams
from tifresi import metrics
from tifresi import utils
| 251 | 20 | 38 | py |
tifresi | tifresi-master/tifresi/transforms.py | import librosa
import numpy as np
from tifresi.hparams import HParams as p
__author__ = 'Andres'
def log_spectrogram(spectrogram, dynamic_range_dB=p.stft_dynamic_range_dB):
"""Compute the log spectrogram representation from a spectrogram."""
spectrogram = np.abs(spectrogram) # for safety
minimum_relative_amplitude = np.max(spectrogram) / 10 ** (dynamic_range_dB / 10)
return 10 * np.log10(np.clip(spectrogram, a_min=minimum_relative_amplitude, a_max=None))
def inv_log_spectrogram(log_spec):
"""Inverse the log representation of the spectrogram or mel spectrogram."""
return 10 ** (log_spec / 10)
def log_mel_spectrogram(spectrogram, stft_channels=p.stft_channels, n_mels=p.n_mels, fmin=p.fmin, fmax=p.fmax, sr=p.sr,
dynamic_range_dB=p.mel_dynamic_range_dB):
"""Compute the log mel spectrogram from a spectrogram."""
melSpectrogram = mel_spectrogram(spectrogram, stft_channels=stft_channels, n_mels=n_mels, fmin=fmin, fmax=fmax,
sr=sr)
return log_spectrogram(melSpectrogram, dynamic_range_dB)
def mel_spectrogram(spectrogram, stft_channels=p.stft_channels, n_mels=p.n_mels, fmin=p.fmin, fmax=p.fmax, sr=p.sr):
"""Compute the mel spectrogram from a spectrogram."""
if stft_channels != p.stft_channels or n_mels != p.n_mels or fmin != p.fmin or fmax != p.fmax or sr != p.sr:
mel_basis = librosa.filters.mel(sr=sr, n_fft=stft_channels, n_mels=n_mels, fmin=fmin, fmax=fmax)
else:
mel_basis = p.mel_basis
return np.dot(mel_basis, spectrogram)
def pseudo_unmel_spectrogram(mel_spectrogram, stft_channels=p.stft_channels, n_mels=p.n_mels, fmin=p.fmin, fmax=p.fmax, sr=p.sr):
"""Compute the inverse mel spectrogram from a mel spectrogram."""
if stft_channels != p.stft_channels or n_mels != p.n_mels or fmin != p.fmin or fmax != p.fmax or sr != p.sr:
mel_basis = librosa.filters.mel(sr=sr, n_fft=stft_channels, n_mels=n_mels, fmin=fmin, fmax=fmax)
mel_inverse_basis = np.linalg.pinv(mel_basis)
else:
mel_inverse_basis = p.mel_inverse_basis
return np.matmul(mel_inverse_basis, mel_spectrogram)
| 2,168 | 47.2 | 129 | py |
tifresi | tifresi-master/tifresi/phase/pghi.py | import numpy as np
import heapq
import numba
from numba import njit
__author__ = 'Andres'
@njit
def pghi(spectrogram, tgrad, fgrad, a, M, L, tol=1e-7):
""""Implementation of "A noniterativemethod for reconstruction of phase from STFT magnitude". by Prusa, Z., Balazs, P., and Sondergaard, P. Published in IEEE/ACM Transactions on Audio, Speech and LanguageProcessing, 25(5):1154–1164 on 2017.
a = hop size
M = fft window size
L = signal length
tol = tolerance under the max value of the spectrogram
"""
spectrogram = spectrogram.copy()
abstol = np.array([1e-10], dtype=spectrogram.dtype)[0] # if abstol is not the same type as spectrogram then casting occurs
phase = np.zeros_like(spectrogram)
max_val = np.amax(spectrogram) # Find maximum value to start integration
max_x, max_y = np.where(spectrogram == max_val)
max_pos = max_x[0], max_y[0]
if max_val <= abstol: # Avoid integrating the phase for the spectogram of a silent signal
print('Empty spectrogram')
return phase
M2 = spectrogram.shape[0]
N = spectrogram.shape[1]
b = L / M
sampToRadConst = 2.0 * np.pi / L # Rescale the derivs to rad with step 1 in both directions
tgradw = a * tgrad * sampToRadConst
fgradw = - b * (fgrad + np.arange(spectrogram.shape[1]) * a) * sampToRadConst # also convert relative to freqinv convention
magnitude_heap = [(-max_val, max_pos)] # Numba requires heap to be initialized with content
spectrogram[max_pos] = abstol
small_x, small_y = np.where(spectrogram < max_val*tol)
for x, y in zip(small_x, small_y):
spectrogram[x, y] = abstol # Do not integrate over silence
while max_val > abstol:
while len(magnitude_heap) > 0: # Integrate around maximum value until reaching silence
max_val, max_pos = heapq.heappop(magnitude_heap)
col = max_pos[0]
row = max_pos[1]
#Spread to 4 direct neighbors
N_pos = col+1, row
S_pos = col-1, row
E_pos = col, row+1
W_pos = col, row-1
if max_pos[0] < M2-1 and spectrogram[N_pos] > abstol:
phase[N_pos] = phase[max_pos] + (fgradw[max_pos] + fgradw[N_pos])/2
heapq.heappush(magnitude_heap, (-spectrogram[N_pos], N_pos))
spectrogram[N_pos] = abstol
if max_pos[0] > 0 and spectrogram[S_pos] > abstol:
phase[S_pos] = phase[max_pos] - (fgradw[max_pos] + fgradw[S_pos])/2
heapq.heappush(magnitude_heap, (-spectrogram[S_pos], S_pos))
spectrogram[S_pos] = abstol
if max_pos[1] < N-1 and spectrogram[E_pos] > abstol:
phase[E_pos] = phase[max_pos] + (tgradw[max_pos] + tgradw[E_pos])/2
heapq.heappush(magnitude_heap, (-spectrogram[E_pos], E_pos))
spectrogram[E_pos] = abstol
if max_pos[1] > 0 and spectrogram[W_pos] > abstol:
phase[W_pos] = phase[max_pos] - (tgradw[max_pos] + tgradw[W_pos])/2
heapq.heappush(magnitude_heap, (-spectrogram[W_pos], W_pos))
spectrogram[W_pos] = abstol
max_val = np.amax(spectrogram) # Find new maximum value to start integration
max_x, max_y = np.where(spectrogram==max_val)
max_pos = max_x[0], max_y[0]
heapq.heappush(magnitude_heap, (-max_val, max_pos))
spectrogram[max_pos] = abstol
return phase
| 3,471 | 40.831325 | 245 | py |
tifresi | tifresi-master/tifresi/phase/modGabPhaseGrad.py | # -*- coding: utf-8 -*-
# ######### COPYRIGHT #########
# Credits
# #######
#
# Copyright(c) 2015-2018
# ----------------------
#
# * `LabEx Archimède <http://labex-archimede.univ-amu.fr/>`_
# * `Laboratoire d'Informatique Fondamentale <http://www.lif.univ-mrs.fr/>`_
# (now `Laboratoire d'Informatique et Systèmes <http://www.lis-lab.fr/>`_)
# * `Institut de Mathématiques de Marseille <http://www.i2m.univ-amu.fr/>`_
# * `Université d'Aix-Marseille <http://www.univ-amu.fr/>`_
#
# This software is a port from LTFAT 2.1.0 :
# Copyright (C) 2005-2018 Peter L. Soendergaard <[email protected]>.
#
# Contributors
# ------------
#
# * Denis Arrivault <contact.dev_AT_lis-lab.fr>
# * Florent Jaillet <contact.dev_AT_lis-lab.fr>
#
# Description
# -----------
#
# ltfatpy is a partial Python port of the
# `Large Time/Frequency Analysis Toolbox <http://ltfat.sourceforge.net/>`_,
# a MATLAB®/Octave toolbox for working with time-frequency analysis and
# synthesis.
#
# Version
# -------
#
# * ltfatpy version = 1.0.16
# * LTFAT version = 2.1.0
#
# Licence
# -------
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# ######### COPYRIGHT #########
"""Module of phase gradient computation
Ported from ltfat_2.1.0/gabor/gabphasegrad.m
.. moduleauthor:: Florent Jaillet
"""
from __future__ import print_function, division
import numpy as np
from ltfatpy.comp.comp_sigreshape_pre import comp_sigreshape_pre
from ltfatpy.gabor.dgtlength import dgtlength
from ltfatpy.gabor.gabwin import gabwin
from ltfatpy.tools.postpad import postpad
from ltfatpy.fourier.fftindex import fftindex
from ltfatpy.comp.comp_sepdgt import comp_sepdgt
from ltfatpy.fourier.pderiv import pderiv
def modgabphasegrad(method, *args, **kwargs):
"""Modified Phase gradient of the discrete Gabor transform
We modified this to work with dgtreals on the phase and abs case
Phase case we did a lot of changes,
abs case we added M as a mandatory parameter
- Usage:
| ``(tgrad, fgrad, c) = gabphasegrad('dgt', f, g, a, M, L=None)``
| ``(tgrad, fgrad) = gabphasegrad('phase', cphase, a)``
| ``(tgrad, fgrad) = gabphasegrad('abs', s, g, a, M, difforder=2)``
- Input parameters:
:param str method: Method used to compute the phase gradient, see the
possible values below
:param numpy.ndarray f: (defined if ``method='dgt'``) Input signal
:param numpy.ndarray cphase: (defined if ``method='phase'``) Phase of a
:func:`~ltfatpy.gabor.dgt.dgt` of the signal
:param numpy.ndarray s: (defined if ``method='abs'``) Spectrogram of the
signal
:param numpy.ndarray g: (defined if ``method='dgt'`` or ``method='phase'``)
Window function
:param int a: (defined if ``method='dgt'`` or ``method='phase'`` or
``method='abs'``) Length of time shift
:param int M: (defined if ``method='dgt'``) Number of channels
:param int L: (defined if ``method='dgt'``, optional) Length of transform
to do
:param int difforder: (defined if ``method='abs'``, optional) Order of the
centered finite difference scheme used to perform the needed numerical
differentiation
- Output parameters:
:returns: ``(tgrad, fgrad, c)`` if ``method='dgt'``, or ``(tgrad, fgrad)``
if ``method='phase'`` or ``method='abs'``
:rtype: tuple
:var numpy.ndarray tgrad: Instantaneous frequency
:var numpy.ndarray fgrad: Local group delay
:var numpy.ndarray c: Gabor coefficients
``gabphasegrad`` computes the time-frequency gradient of the phase of the
:func:`~ltfatpy.gabor.dgt.dgt` of a signal. The derivative in time
**tgrad** is the instantaneous frequency while the frequency derivative
**fgrad** is the local group delay.
**tgrad** and **fgrad** measure the deviation from the current time and
frequency, so a value of zero means that the instantaneous frequency is
equal to the center frequency of the considered channel.
**tgrad** is scaled such that distances are measured in samples. Similarly,
**fgrad** is scaled such that the Nyquist frequency (the highest possible
frequency) corresponds to a value of ``L/2``.
The computation of **tgrad** and **fgrad** is inaccurate when the absolute
value of the Gabor coefficients is low. This is due to the fact the the
phase of complex numbers close to the machine precision is almost
random. Therefore, **tgrad** and **fgrad** may attain very large random
values when ``abs(c)`` is close to zero.
The computation can be done using three different methods:
=========== ===========================================================
``'dgt'`` Directly from the signal.
``'phase'`` From the phase of a :func:`~ltfatpy.gabor.dgt.dgt` of the
signal. This is the classic method used in the phase
vocoder.
``'abs'`` From the absolute value of the
:func:`~ltfatpy.gabor.dgt.dgt`. Currently this method works
only for Gaussian windows.
=========== ===========================================================
``(tgrad, fgrad, c) = gabphasegrad('dgt', f, g, a, M)`` computes the
time-frequency gradient using a :func:`~ltfatpy.gabor.dgt.dgt` of the
signal **f**. The :func:`~ltfatpy.gabor.dgt.dgt` is computed using the
window **g** on the lattice specified by the time shift **a** and the
number of channels **M**. The algorithm used to perform this calculation
computes several DGTs, and therefore this routine takes the exact same
input parameters as :func:`~ltfatpy.gabor.dgt.dgt`.
The window **g** may be specified as in :func:`~ltfatpy.gabor.dgt.dgt`. If
the window used is ``'gauss'``, the computation will be done by a faster
algorithm.
``(tgrad, fgrad, c) = gabphasegrad('dgt', f, g, a, M)`` additionally
returns the Gabor coefficients ``c``, as they are always computed as a
byproduct of the algorithm.
``(tgrad, fgrad) = gabphasegrad('phase', cphase, a)`` computes the phase
gradient from the phase **cphase** of a :func:`~ltfatpy.gabor.dgt.dgt` of
the signal. The original :func:`~ltfatpy.gabor.dgt.dgt` from which the
phase is obtained must have been computed using a time-shift of **a**.
``(tgrad, fgrad) = gabphasegrad('abs', s, g, a)`` computes the phase
gradient from the spectrogram **s**. The spectrogram must have been
computed using the window **g** and time-shift **a**.
``(tgrad, fgrad) = gabphasegrad('abs', s, g, a, difforder=ord)`` uses a
centered finite difference scheme of order ``ord`` to perform the needed
numerical differentiation. Default is to use a 4th order scheme.
Currently the 'abs' method only works if the window **g** is a Gaussian
window specified as a string or cell array.
.. seealso:: :func:`resgram`, :func:`gabreassign`,
:func:`~ltfatpy.gabor.dgt.dgt`
- References:
:cite:`aufl95,cmdaaufl97,fl65`
"""
# NOTE: This function doesn't support the parameter lt (lattice type)
# supported by the corresponding octave function and the lattice used is
# seperable (square lattice lt = (0, 1)).
# NOTE: As in the octave version of this function, if needed, the
# undocumented optional keyword minlvl is available when using method=dgt.
# So it can be passed using a call of the following form:
# (tgrad, fgrad, c) = gabphasegrad('dgt', f, g, a, M, minlvl=val)
if not isinstance(method, str):
raise TypeError('First argument must be a str containing the method '
'name, "dgt", "phase" or "abs".')
method = method.lower()
if method == 'dgt':
raise Exception("We dont know if this works")
# --------------------------- DGT method ------------------------
(f, g, a, M) = args
if 'L' in kwargs:
L = kwargs['L']
else:
L = None
if 'minlvl' in kwargs:
minlvl = kwargs['minlvl']
else:
minlvl = np.finfo(np.float64).tiny
# # ----- step 1 : Verify f and determine its length -------
# Change f to correct shape.
f, Ls, W, wasrow, remembershape = comp_sigreshape_pre(f, 0)
# # ------ step 2: Verify a, M and L
if not L:
# ----- step 2b : Verify a, M and get L from the signal length f---
L = dgtlength(Ls, a, M)
else:
# ----- step 2a : Verify a, M and get L
Luser = dgtlength(L, a, M)
if Luser != L:
raise ValueError('Incorrect transform length L = {0:d} '
'specified. Next valid length is L = {1:d}. '
'See the help of dgtlength for the '
'requirements.'.format(L, Luser))
# # ----- step 3 : Determine the window
g, info = gabwin(g, a, M, L)
if L < info['gl']:
raise ValueError('Window is too long.')
# # ----- step 4: final cleanup ---------------
f = postpad(f, L)
# # ------ algorithm starts --------------------
# Compute the time weighted version of the window.
hg = fftindex(L) * g
# The computation done this way is insensitive to whether the dgt is
# phaselocked or not.
c = comp_sepdgt(f, g, a, M, 0)
c_h = comp_sepdgt(f, hg, a, M, 0)
c_s = np.abs(c) ** 2
# Remove small values because we need to divide by c_s
c_s = np.maximum(c_s, minlvl * np.max(c_s))
# Compute the group delay
fgrad = np.real(c_h * c.conjugate() / c_s)
if info['gauss']:
# The method used below only works for the Gaussian window, because
# the time derivative and the time multiplicative of the Gaussian
# are identical.
tgrad = np.imag(c_h * c.conjugate() / c_s) / info['tfr']
else:
# The code below works for any window, and not just the Gaussian
dg = pderiv(g, difforder=float('inf')) / (2 * np.pi)
c_d = comp_sepdgt(f, dg, a, M, 0)
# NOTE: There is a bug here in the original octave file as it
# contains a reshape that uses an undefined variable N.
# You can get the error with LTFAT 2.1.0 in octave by running for
# example:
# gabphasegrad('dgt', rand(16,1), rand(16,1), 4, 16)
#
# So we just comment out the corresponding line here, as it appears
# to be unneeded:
# c_d.shape = (M, N, W)
# Compute the instantaneous frequency
tgrad = -np.imag(c_d * c.conjugate() / c_s)
return (tgrad, fgrad, c)
elif method == 'phase':
# --------------------------- phase method ------------------------
(cphase, a, M) = args
if not np.isrealobj(cphase):
raise TypeError("Input phase must be real valued. Use the 'angle'"
" function to compute the argument of complex "
"numbers.")
# --- linear method ---
if cphase.ndim == 3:
M2, N, W = cphase.shape # M2 is the number of channels from 0 to Nyquist
else:
M2, N = cphase.shape # M2 is the number of channels from 0 to Nyquist
L = N * a
b = L / M
# NOTE: The following code found in the original octave version of the function
# hasn't been translated here to Python as it is not used:
# if 0
#
# # This is the classic phase vocoder algorithm by Flanagan.
#
# tgrad = cphase-circshift(cphase,[0,-1]);
# tgrad = tgrad- 2*pi*round(tgrad/(2*pi));
# tgrad = -tgrad/(2*pi)*L;
#
# # Phase-lock the angles.
# TimeInd = (0:(N-1))*a;
# FreqInd = (0:(M-1))/M;
#
# phl = FreqInd'*TimeInd;
# cphase = cphase+2*pi.*phl;
#
# fgrad = cphase-circshift(cphase,[1,0]);
# fgrad = fgrad- 2*pi*round(fgrad/(2*pi));
# fgrad = -fgrad/(2*pi)*L;
#
# end;
# This is the classic phase vocoder algorithm by Flanagan modified to
# yield a second order centered difference approximation.
# Forward approximation
tgrad_1 = cphase - np.roll(cphase, -1, axis=1)
# numpy round function doesn't use the same convention than octave for
# half-integers but the standard Python round function uses the same
# convention than octave, so we use the Python standard round in the
# computation below
octave_round = np.vectorize(round)
tgrad_1 = tgrad_1 - 2 * np.pi * octave_round(tgrad_1 / (2 * np.pi))
# Backward approximation
tgrad_2 = np.roll(cphase, 1, axis=1) - cphase
tgrad_2 = tgrad_2 - 2 * np.pi * octave_round(tgrad_2 / (2 * np.pi))
# Average
tgrad = (tgrad_1 + tgrad_2) / 2
tgrad = -tgrad / (2 * np.pi * a) * L
# Phase-lock the angles.
TimeInd = np.arange(N) * a
FreqInd = np.arange(M2) / M
phl = np.dot(FreqInd.reshape((FreqInd.shape[0], 1)),
TimeInd.reshape((1, TimeInd.shape[0])))
# NOTE: in the following lines, the shape of phl is changed so that
# broadcasting works in the following addition with cphase when cphase
# has more than two dimensions
new_shape = np.ones((len(cphase.shape),), dtype=int)
new_shape[0] = phl.shape[0]
new_shape[1] = phl.shape[1]
phl = phl.reshape(tuple(new_shape))
cphase = cphase + 2 * np.pi * phl
cphase_to_aprox = np.concatenate([-cphase[1:2], cphase, -cphase[-2:-1]])
# Forward approximation
fgrad_1 = cphase_to_aprox - np.roll(cphase_to_aprox, -1, axis=0)
fgrad_1 = fgrad_1 - 2 * np.pi * octave_round(fgrad_1 / (2 * np.pi))
fgrad_1 = fgrad_1[1:-1]
# Backward approximation
fgrad_2 = np.roll(cphase_to_aprox, 1, axis=0) - cphase_to_aprox
fgrad_2 = fgrad_2 - 2 * np.pi * octave_round(fgrad_2 / (2 * np.pi))
fgrad_2 = fgrad_2[1:-1]
# Average
fgrad = (fgrad_1 + fgrad_2) / 2
fgrad = fgrad / (2 * np.pi * b) * L
return (tgrad, fgrad)
elif method == 'abs':
# --------------------------- abs method ------------------------
(s, g, a, M) = args
if 'difforder' in kwargs:
difforder = kwargs['difforder']
else:
difforder = 2
if not np.all(s >= 0.):
raise ValueError('First input argument must be positive or zero.')
if s.ndim == 3:
M2, N, W = s.shape
else:
M2, N = s.shape
L = N * a
g, info = gabwin(g, a, M, L)
if not info['gauss']:
raise ValueError('The window must be a Gaussian window (specified '
'as a string or as a dictionary).')
b = L / M
# We must avoid taking the log of zero.
# Therefore we add the smallest possible
# number
logs = np.log(s + np.finfo(s.dtype).tiny)
# XXX REMOVE Add a small constant to limit the dynamic range. This
# should lessen the problem of errors in the differentiation for points
# close to (but not exactly) zeros points.
maxmax = np.max(logs)
tt = -11.
logs[logs < (maxmax + tt)] = tt
fgrad = pderiv(logs, 1, difforder) / (2 * np.pi) * info['tfr']
tgrad = pderiv(logs, 0, difforder) / (2 * np.pi * info['tfr']) * (M/M2)
# Fix the first and last rows .. the
# borders are symmetric so the centered difference is 0
tgrad[0, :] = 0
tgrad[-1, :] = 0
return (tgrad, fgrad)
else:
raise ValueError("First argument must be the method name, 'dgt', "
"'phase' or 'abs'.")
| 15,380 | 33.956818 | 81 | py |
tifresi | tifresi-master/tifresi/phase/pghi_masked.py | import numpy as np
import heapq
import numba
from numba import njit
__author__ = 'Andres'
@njit
def pghi(spectrogram, tgrad, fgrad, a, M, L, mask, tol=1e-7, phase=None):
""""Implementation of "A noniterativemethod for reconstruction of phase from STFT magnitude". by Prusa, Z., Balazs, P., and Sondergaard, P. Published in IEEE/ACM Transactions on Audio, Speech and LanguageProcessing, 25(5):1154–1164 on 2017.
a = hop size
M = fft window size
L = signal length
tol = tolerance under the max value of the spectrogram
mask = binary mask to be used with partially known phase
phase = partially known phase
"""
spectrogram = spectrogram.copy()
if phase is None:
phase = np.zeros_like(spectrogram)
abstol = np.array([1e-10], dtype=spectrogram.dtype)[0] # if abstol is not the same type as spectrogram then casting occurs
masked_x, masked_y = np.where(mask == 1)
for x, y in zip(masked_x, masked_y):
spectrogram[x, y] = abstol # Do not integrate over the mask
max_val = np.amax(spectrogram) # Find maximum value to start integration
max_x, max_y = np.where(spectrogram == max_val)
max_pos = max_x[0], max_y[0]
if max_val <= abstol: # Avoid integrating the phase for the spectogram of a silent signal
print('Empty spectrogram')
return phase, mask
M2 = spectrogram.shape[0]
N = spectrogram.shape[1]
b = L / M
sampToRadConst = 2.0 * np.pi / L # Rescale the derivs to rad with step 1 in both directions
tgradw = a * tgrad * sampToRadConst
fgradw = - b * (
fgrad + np.arange(spectrogram.shape[1]) * a) * sampToRadConst # also convert relative to freqinv convention
magnitude_heap = [(-max_val, max_pos)] # Numba requires heap to be initialized with content
mask[max_pos] = 1
spectrogram[max_pos] = abstol
small_x, small_y = np.where(spectrogram < max_val * tol)
for x, y in zip(small_x, small_y):
spectrogram[x, y] = abstol # Do not integrate over silence
while max_val > abstol:
while len(magnitude_heap) > 0: # Integrate around maximum value until reaching silence
max_val, max_pos = heapq.heappop(magnitude_heap)
col = max_pos[0]
row = max_pos[1]
# Spread to 4 direct neighbors
N_pos = col + 1, row
S_pos = col - 1, row
E_pos = col, row + 1
W_pos = col, row - 1
if max_pos[0] < M2 - 1 and spectrogram[N_pos] > abstol and mask[N_pos] == 0:
phase[N_pos] = phase[max_pos] + (fgradw[max_pos] + fgradw[N_pos]) / 2
heapq.heappush(magnitude_heap, (-spectrogram[N_pos], N_pos))
mask[N_pos] = 1
spectrogram[N_pos] = abstol
if max_pos[0] > 0 and spectrogram[S_pos] > abstol and mask[S_pos] == 0:
phase[S_pos] = phase[max_pos] - (fgradw[max_pos] + fgradw[S_pos]) / 2
heapq.heappush(magnitude_heap, (-spectrogram[S_pos], S_pos))
mask[S_pos] = 1
spectrogram[S_pos] = abstol
if max_pos[1] < N - 1 and spectrogram[E_pos] > abstol and mask[E_pos] == 0:
phase[E_pos] = phase[max_pos] + (tgradw[max_pos] + tgradw[E_pos]) / 2
heapq.heappush(magnitude_heap, (-spectrogram[E_pos], E_pos))
mask[E_pos] = 1
spectrogram[E_pos] = abstol
if max_pos[1] > 0 and spectrogram[W_pos] > abstol and mask[W_pos] == 0:
phase[W_pos] = phase[max_pos] - (tgradw[max_pos] + tgradw[W_pos]) / 2
heapq.heappush(magnitude_heap, (-spectrogram[W_pos], W_pos))
mask[W_pos] = 1
spectrogram[W_pos] = abstol
max_val = np.amax(spectrogram) # Find new maximum value to start integration
max_x, max_y = np.where(spectrogram == max_val)
max_pos = max_x[0], max_y[0]
heapq.heappush(magnitude_heap, (-max_val, max_pos))
mask[max_pos] = 1
spectrogram[max_pos] = abstol
return phase, mask
| 4,078 | 40.622449 | 244 | py |
tifresi | tifresi-master/tifresi/phase/__init__.py | from tifresi.phase import modGabPhaseGrad
from tifresi.phase import pghi_masked
from tifresi.phase import pghi | 110 | 36 | 41 | py |
tifresi | tifresi-master/tifresi/tests/test_stft.py | import sys
# sys.path.append('../')
import numpy as np
from tifresi.stft import GaussTF, GaussTruncTF
def test_stft_different_length(a = 128, M = 1024, trunc=False):
L = 128 * 1024
if trunc:
tfsystem = GaussTruncTF(a, M)
else:
tfsystem = GaussTF(a, M)
x = np.random.rand(L) * 2 - 1
x = x / np.linalg.norm(x)
x[:8 * M] = 0
x[-8 * M:] = 0
x2 = np.pad(x.copy(), L)[L:]
X = tfsystem.dgt(x)
xdot = tfsystem.idgt(X)
X2 = tfsystem.dgt(x2)
x2dot = tfsystem.idgt(X2)
if trunc:
assert (np.linalg.norm(xdot - x) < 1e-10)
assert (np.linalg.norm(x2dot - x2) < 1e-10)
assert (np.sum(np.abs(X2[:, :X.shape[1]] - X)) < 1e-6)
else:
assert (np.linalg.norm(xdot - x) < 1e-12)
assert (np.linalg.norm(x2dot - x2) < 1e-12)
assert (np.sum(np.abs(X2[:, :X.shape[1]] - X)) < 1e-6)
def test_stft_different_hop_size(a = 128, M = 1024, trunc=False):
hop_size = a
if trunc:
tfsystem = GaussTruncTF(hop_size, M)
else:
tfsystem = GaussTF(hop_size, M)
L = 128 * 1024
x = np.random.rand(L) * 2 - 1
x = x / np.linalg.norm(x)
X128 = tfsystem.dgt(x, hop_size=128)
X256 = tfsystem.dgt(x, hop_size=256)
assert (np.sum(np.abs(X256 - X128[:, ::2])) < 1e-12)
x256dot = tfsystem.idgt(X256, hop_size=256)
x128dot = tfsystem.idgt(X128, hop_size=128)
if trunc:
assert (np.linalg.norm(x128dot - x) < 1e-10)
assert (np.linalg.norm(x256dot - x) < 1e-10)
else:
assert (np.linalg.norm(x128dot - x) < 1e-12)
assert (np.linalg.norm(x256dot - x) < 1e-12)
def test_stft_different_channels(a = 128, M = 1024, trunc=False):
hop_size = a
stft_channels = M
if trunc:
tfsystem = GaussTruncTF(hop_size, stft_channels)
else:
tfsystem = GaussTF(hop_size, stft_channels)
L = 128 * 1024
x = np.random.rand(L) * 2 - 1
x = x / np.linalg.norm(x)
X1024 = tfsystem.dgt(x, stft_channels=1024)
X512 = tfsystem.dgt(x, stft_channels=512)
assert (np.sum(np.abs(X512 - X1024[::2, :])) < 1e-12)
x1024dot = tfsystem.idgt(X1024, stft_channels=1024)
x512dot = tfsystem.idgt(X512, stft_channels=512)
if trunc:
assert (np.linalg.norm(x1024dot - x) < 1e-5)
assert (np.linalg.norm(x512dot - x) < 1e-5)
else:
assert (np.linalg.norm(x1024dot - x) < 1e-12)
assert (np.linalg.norm(x512dot - x) < 1e-12)
def main():
combinations = [
(128, 1024),
(128, 512),
(256, 1024)
]
for trunc in [True, False]:
for a,M in combinations:
print("Test combination {},{},{}".format(trunc, a,M))
test_stft_different_length(a,M, trunc)
test_stft_different_hop_size(a,M, trunc)
test_stft_different_channels(a,M, trunc)
if __name__ == "__main__":
main()
| 2,917 | 30.042553 | 65 | py |
tifresi | tifresi-master/tifresi/tests/__init__.py | 0 | 0 | 0 | py |
|
tifresi | tifresi-master/tifresi/tests/test_transforms.py | import librosa
import numpy as np
import sys
# sys.path.append('../')
from tifresi.transforms import log_spectrogram, inv_log_spectrogram, log_mel_spectrogram, mel_spectrogram
__author__ = 'Andres'
def test_log_spectrogram():
x = np.random.rand(1024 * 1024).reshape([1024, 1024])
log_x = log_spectrogram(x, dynamic_range_dB=80)
inv_log_x = inv_log_spectrogram(log_x)
assert (np.linalg.norm(inv_log_x - x) < 1e-7)
def test_log_spectrogram_small_range():
x = np.random.rand(1024 * 1024).reshape([1024, 1024])
log_x = log_spectrogram(x, dynamic_range_dB=30)
inv_log_x = inv_log_spectrogram(log_x)
assert (np.linalg.norm(inv_log_x - x) < 0.08)
def test_log_mel_spectrogram():
x = np.random.rand(1024 * 513).reshape([513, 1024])
x_mel = mel_spectrogram(x)
log_x = log_mel_spectrogram(x, dynamic_range_dB=80)
inv_log_x = inv_log_spectrogram(log_x)
assert (np.linalg.norm(inv_log_x - x_mel) < 1e-7)
def test_log_mel_spectrogram_small_range():
x = np.random.rand(1024 * 513).reshape([513, 1024])
x_mel = mel_spectrogram(x)
log_x = log_mel_spectrogram(x, dynamic_range_dB=30)
inv_log_x = inv_log_spectrogram(log_x)
assert (np.linalg.norm(inv_log_x - x_mel) < 0.08)
def test_mel_spectrogram():
x = np.random.rand(256 * 1025).reshape([1025, 256])
sr = 28000
stft_channels = 2048
n_mels = 40
fmin = 40
fmax = 12000
mel_basis = librosa.filters.mel(sr=sr, n_fft=stft_channels, n_mels=n_mels, fmin=fmin, fmax=fmax)
x_mel = mel_spectrogram(x, stft_channels=stft_channels, n_mels=n_mels, fmin=fmin, fmax=fmax, sr=sr)
x_test_mel = np.matmul(mel_basis, x)
assert (np.linalg.norm(x_test_mel - x_mel) < 1e-20)
if __name__ == "__main__":
test_log_spectrogram()
test_log_spectrogram_small_range()
test_log_mel_spectrogram()
test_log_mel_spectrogram_small_range()
test_mel_spectrogram()
| 1,922 | 25.342466 | 105 | py |
tifresi | tifresi-master/tifresi/pipelines/LJspeech.py | import numpy as np
import librosa
from tifresi.stft import GaussTF, GaussTruncTF
from tifresi.pipelines.LJparams import LJParams as p
from tifresi.transforms import mel_spectrogram, log_spectrogram
from tifresi.utils import downsample_tf_time, preprocess_signal, load_signal
def compute_mag_mel_from_path(path):
y, sr = load_signal(path, p.sr)
y = preprocess_signal(y, p.M)
return compute_mag_mel(y)
def compute_mag_mel(y):
'''Compute spectrogram and MEL spectrogram from signal.
Args:
y : signal
Returns:
mel: A 2d array of shape (T, n_mels) and dtype of float32.
mag: A 2d array of shape (T, 1+stft_channels/2) and dtype of float32.
'''
if p.use_truncated:
tfsystem = GaussTruncTF(hop_size=p.hop_size, stft_channels=p.stft_channels)
else:
tfsystem = GaussTF(hop_size=p.hop_size, stft_channels=p.stft_channels)
# magnitude spectrogram
mag = tfsystem.spectrogram(y, normalize=p.normalize)
# mel spectrogram
mel = mel_spectrogram(mag, stft_channels=p.stft_channels, n_mels=p.n_mels, fmin=p.fmin, fmax=p.fmax, sr=p.sr)
# to decibel
mag = log_spectrogram(mag, dynamic_range_dB=p.stft_dynamic_range_dB)/p.stft_dynamic_range_dB+1
assert(np.max(mag)<=1)
assert(np.min(mag)>=0)
# Reduction rate
if p.reduction_rate > 1:
mel = downsample_tf_time(mel, p.reduction_rate)
mel = log_spectrogram(mel, dynamic_range_dB=p.mel_dynamic_range_dB)/p.mel_dynamic_range_dB+1
# Float32
mel = mel.astype(np.float32)
mag = mag.astype(np.float32)
return mel, mag
| 1,611 | 32.583333 | 113 | py |
tifresi | tifresi-master/tifresi/pipelines/LJparams.py | from tifresi.hparams import HParams
from tifresi.utils import downsample_tf_time
import librosa
import numpy as np
class LJParams(HParams):
# Signal parameters
sr = 22050 # Sampling frequency of the signal
M = 2*1024 # Ensure that the signal will be a multiple of M
# STFT parameters
stft_channels = 1024 # Number of frequency channels
hop_size = 256 # Hop size
use_truncated = True # use a truncated Gaussian
stft_dynamic_range_dB = 50 # dynamic range in dB for the STFT
normalize = True # Normalize STFT
# MEL parameters
n_mels = 80 # Number of mel frequency band
fmin = 0 # Minimum frequency for the MEL
fmax = None # Maximum frequency for the MEL (None -> Nyquist frequency)
reduction_rate = 2 # Reduction rate for the frequency channel
mel_dynamic_range_dB = 50 # dynamic range in dB for the MEL
mel_basis = librosa.filters.mel(sr=sr, n_fft=stft_channels, n_mels=n_mels, fmin=fmin, fmax=fmax)
mel_inverse_basis = np.linalg.pinv(mel_basis)
| 1,044 | 32.709677 | 100 | py |
tifresi | tifresi-master/tifresi/pipelines/__init__.py | from tifresi.pipelines import LJspeech | 38 | 38 | 38 | py |
ccc_mse_ser | ccc_mse_ser-master/code/ser_iemocap_gemaps_ccc.py | # Dimensional speech emotion recognition
# To evaluate loss function (MSE vs CCC)
# Coded by Bagus Tris Atmaja ([email protected])
# changelog
# 2020-02-13: Modified from gemaps-paa hfs
# 2020-02-14: Use 'tanh' activation to lock the output range in [-1, 1]
# with RMSprop optimizer
import numpy as np
import pickle
import pandas as pd
import keras.backend as K
from keras.models import Model
from keras.layers import Input, Dense, CuDNNLSTM, Flatten, \
Embedding, Dropout, BatchNormalization, \
RNN, concatenate, Activation
from keras.callbacks import EarlyStopping
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from keras.preprocessing.text import Tokenizer
from keras.preprocessing import sequence
import random as rn
import tensorflow as tf
rn.seed(123)
np.random.seed(99)
tf.set_random_seed(1234)
# load feature and labels
feat = np.load('/home/s1820002/spro2020/data/feat_ws_3.npy')
vad = np.load('/home/s1820002/IEMOCAP-Emotion-Detection/y_egemaps.npy')
# use only mean and std
feat = feat[:,:-1]
# for LSTM input shape (batch, steps, features/channel)
#feat = feat.reshape(feat.shape[0], 1, feat.shape[1])
# remove outlier, < 1, > 5
vad = np.where(vad==5.5, 5.0, vad)
vad = np.where(vad==0.5, 1.0, vad)
# standardization
scaled_feature = True
# set Dropout
do = 0.3
if scaled_feature == True:
scaler = StandardScaler()
scaler = scaler.fit(feat) #.reshape(feat.shape[0]*feat.shape[1], feat.shape[2]))
scaled_feat = scaler.transform(feat) #.reshape(feat.shape[0]*feat.shape[1], feat.shape[2]))
#scaled_feat = scaled_feat.reshape(feat.shape[0], feat.shape[1], feat.shape[2])
feat = scaled_feat
else:
feat = feat
scaled_vad = True
# standardization
if scaled_vad:
scaler = MinMaxScaler(feature_range=(-1, 1))
scaler = scaler.fit(vad) #.reshape(vad.shape[0]*vad.shape[1], vad.shape[2]))
scaled_vad = scaler.transform(vad) #.reshape(vad.shape[0]*vad.shape[1], vad.shape[2]))
vad = scaled_vad
else:
vad = vad
# Concordance correlation coefficient (CCC)-based loss function - using non-inductive statistics
def ccc(gold, pred):
gold = K.squeeze(gold, axis=-1)
pred = K.squeeze(pred, axis=-1)
gold_mean = K.mean(gold, axis=-1, keepdims=True)
pred_mean = K.mean(pred, axis=-1, keepdims=True)
covariance = (gold-gold_mean)*(pred-pred_mean)
gold_var = K.mean(K.square(gold-gold_mean), axis=-1, keepdims=True)
pred_var = K.mean(K.square(pred-pred_mean), axis=-1, keepdims=True)
ccc = K.constant(2.) * covariance / (gold_var + pred_var + K.square(gold_mean - pred_mean) + K.common.epsilon())
return ccc
def ccc_loss(gold, pred):
# input (num_batches, seq_len, 1)
ccc_loss = K.constant(1.) - ccc(gold, pred)
return ccc_loss
# reshape input feature for LSTM
feat = feat.reshape(feat.shape[0], 1, feat.shape[1])
# API model, if use RNN, first two rnn layer must return_sequences=True
def api_model(alpha, beta, gamma):
# speech network
input_speech = Input(shape=(feat.shape[1], feat.shape[2]), name='speech_input')
net_speech = BatchNormalization()(input_speech)
net_speech = CuDNNLSTM(feat.shape[2], return_sequences=True)(net_speech)
net_speech = CuDNNLSTM(256, return_sequences=True)(net_speech)
net_speech = CuDNNLSTM(256, return_sequences=False)(net_speech)
#net_speech = Flatten()(net_speech)
net_speech = Dense(64)(net_speech)
#net_speech = Dropout(0.1)(net_speech)
target_names = ('v', 'a', 'd')
model_combined = [Dense(1, name=name, activation='tanh')(net_speech) for name in target_names]
model = Model(input_speech, model_combined)
#model.compile(loss=ccc_loss, optimizer='rmsprop', metrics=[ccc])
model.compile(loss=ccc_loss,
loss_weights={'v': alpha, 'a': beta, 'd': gamma},
optimizer='rmsprop', metrics=[ccc, 'mse'])
return model
#def main(alpha, beta, gamma):
model = api_model(0.1, 0.5, 0.4)
model.summary()
# 7869 first data of session 5 (for LOSO)
earlystop = EarlyStopping(monitor='val_loss', mode='min', patience=10,
restore_best_weights=True)
hist = model.fit(feat[:7869], vad[:7869].T.tolist(), batch_size=64, #best:8
validation_split=0.2, epochs=200, verbose=1, shuffle=True,
callbacks=[earlystop])
metrik = model.evaluate(feat[7869:], vad[7869:].T.tolist())
print('CCC= ', np.array(metrik)[[-6,-4,-2]])
print('MSE= ', np.array(metrik)[[-5,-3,-1]])
# Plot scatter
#va = vad[7869:, :-1]
#predik_vad = model.predict(feat[7869:], batch_size=64)
#predik_va = np.array(predik_vad).T.reshape(2170,3)[:,:-1]
#import matplotlib.pyplot as plt
#plt.scatter(va[:,0], va[:,1])
#plt.scatter(predik_va[:,0], predik_va[:,1])
#plt.savefig('scatter_gemaps_mse.pdf')
## check max min
#predik_va.max()
#predik_va.min()
| 4,905 | 34.042857 | 123 | py |
ccc_mse_ser | ccc_mse_ser-master/code/ser_iemocap_gemaps_mse.py | # Dimensional speech emotion recognition
# To evaluate loss function (MSE vs CCC)
# Coded by Bagus Tris Atmaja ([email protected])
# changelog
# 2020-02-13: Modified from gemaps-paa hfs
import numpy as np
import pickle
import pandas as pd
import keras.backend as K
from keras.models import Model
from keras.layers import Input, Dense, CuDNNLSTM, Flatten, \
Embedding, Dropout, BatchNormalization, \
RNN, concatenate, Activation
from keras.callbacks import EarlyStopping
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from keras.preprocessing.text import Tokenizer
from keras.preprocessing import sequence
import random as rn
import tensorflow as tf
rn.seed(123)
np.random.seed(99)
tf.set_random_seed(1234)
# load feature and labels
feat = np.load('/home/s1820002/spro2020/data/feat_ws_3.npy')
vad = np.load('/home/s1820002/IEMOCAP-Emotion-Detection/y_egemaps.npy')
# use only mean and std
feat = feat[:,:-1]
# for LSTM input shape (batch, steps, features/channel)
#feat = feat.reshape(feat.shape[0], 1, feat.shape[1])
# remove outlier, < 1, > 5
vad = np.where(vad==5.5, 5.0, vad)
vad = np.where(vad==0.5, 1.0, vad)
# standardization
scaled_feature = True
if scaled_feature == True:
scaler = StandardScaler()
scaler = scaler.fit(feat) #.reshape(feat.shape[0]*feat.shape[1], feat.shape[2]))
scaled_feat = scaler.transform(feat)#.reshape(feat.shape[0]*feat.shape[1], feat.shape[2]))
#scaled_feat = scaled_feat.reshape(feat.shape[0], feat.shape[1], feat.shape[2])
feat = scaled_feat
else:
feat = feat
scaled_vad = True
# standardization
if scaled_vad:
scaler = MinMaxScaler(feature_range=(-1, 1))
scaler = scaler.fit(vad) #.reshape(vad.shape[0]*vad.shape[1], vad.shape[2]))
scaled_vad = scaler.transform(vad) #.reshape(vad.shape[0]*vad.shape[1], vad.shape[2]))
vad = scaled_vad
else:
vad = vad
# Concordance correlation coefficient (CCC)-based loss function - using non-inductive statistics
def ccc(gold, pred):
gold = K.squeeze(gold, axis=-1)
pred = K.squeeze(pred, axis=-1)
gold_mean = K.mean(gold, axis=-1, keepdims=True)
pred_mean = K.mean(pred, axis=-1, keepdims=True)
covariance = (gold-gold_mean)*(pred-pred_mean)
gold_var = K.mean(K.square(gold-gold_mean), axis=-1, keepdims=True)
pred_var = K.mean(K.square(pred-pred_mean), axis=-1, keepdims=True)
ccc = K.constant(2.) * covariance / (gold_var + pred_var + K.square(gold_mean - pred_mean) + K.common.epsilon())
return ccc
def ccc_loss(gold, pred):
# input (num_batches, seq_len, 1)
ccc_loss = K.constant(1.) - ccc(gold, pred)
return ccc_loss
# reshape input feature for LSTM
feat = feat.reshape(feat.shape[0], 1, feat.shape[1])
# API model, if use RNN, first two rnn layer must return_sequences=True
def api_model(alpha, beta, gamma):
# speech network
input_speech = Input(shape=(feat.shape[1], feat.shape[2]), name='speech_input')
net_speech = BatchNormalization()(input_speech)
net_speech = CuDNNLSTM(feat.shape[2], return_sequences=True)(net_speech)
net_speech = CuDNNLSTM(256, return_sequences=True)(net_speech)
net_speech = CuDNNLSTM(256, return_sequences=False)(net_speech)
#net_speech = Flatten()(net_speech)
net_speech = Dense(64)(net_speech)
#net_speech = Dropout(0.1)(net_speech)
target_names = ('v', 'a', 'd')
model_combined = [Dense(1, name=name, activation='tanh')(net_speech) for name in target_names]
model = Model(input_speech, model_combined)
#model.compile(loss=ccc_loss, optimizer='rmsprop', metrics=[ccc])
model.compile(loss='mse',
loss_weights={'v': alpha, 'a': beta, 'd': gamma},
optimizer='rmsprop', metrics=[ccc, 'mse'])
return model
#def main(alpha, beta, gamma):
model = api_model(0.1, 0.5, 0.4)
model.summary()
# 7869 first data of session 5 (for LOSO)
earlystop = EarlyStopping(monitor='val_loss', mode='min', patience=10,
restore_best_weights=True)
hist = model.fit(feat[:7869], vad[:7869].T.tolist(), batch_size=64, #best:8
validation_split=0.2, epochs=200, verbose=1, shuffle=True,
callbacks=[earlystop])
metrik = model.evaluate(feat[7869:], vad[7869:].T.tolist())
print('CCC= ', np.array(metrik)[[-6,-4,-2]])
print('MSE= ', np.array(metrik)[[-5,-3,-1]])
# Plot scatter
#va = vad[7869:, :-1]
#predik_vad = model.predict(feat[7869:], batch_size=64)
#predik_va = np.array(predik_vad).T.reshape(2170,3)[:,:-1]
#import matplotlib.pyplot as plt
#plt.scatter(va[:,0], va[:,1])
#plt.scatter(predik_va[:,0], predik_va[:,1])
#plt.savefig('scatter_gemaps_mse.pdf')
## check max min
#predik_va.max()
#predik_va.min()
| 4,769 | 34.333333 | 123 | py |
ccc_mse_ser | ccc_mse_ser-master/code/ser_improv_gemaps_mse.py | # ser_improv_paa_ccc.py
# speech emotion recognition for MSP-IMPROV dataset with pyAudioAnalysis
# HFS features using CCC-based loss function
# coded by Bagus Tris Atmaja ([email protected])
# changelog:
# 2020-02-13: Inital code, modified from deepMLP repo
import numpy as np
import pickle
import pandas as pd
import keras.backend as K
from keras.models import Model
from keras.layers import Input, Dense, CuDNNLSTM, Flatten, \
Embedding, Dropout, BatchNormalization, \
RNN, concatenate, Activation
from keras.callbacks import EarlyStopping
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from keras.preprocessing.text import Tokenizer
from keras.preprocessing import sequence
import random as rn
import tensorflow as tf
rn.seed(123)
np.random.seed(99)
tf.set_random_seed(1234)
# loading file and label
feat_train = np.load('/home/s1820002/ccc_mse/data/feat_hfs_gemaps_msp_train.npy')
feat_test = np.load('/home/s1820002/ccc_mse/data/feat_hfs_gemaps_msp_test.npy')
feat = np.vstack([feat_train, feat_test])
list_path = '/home/s1820002/msp-improv/helper/improv_data.csv'
list_file = pd.read_csv(list_path, index_col=None)
list_file = pd.DataFrame(list_file)
data = list_file.sort_values(by=['wavfile'])
vad_train = []
vad_test = []
for index, row in data.iterrows():
#print(row['wavfile'], row['v'], row['a'], row['d'])
if int(row['wavfile'][18]) in range(1,6):
#print("Process vad..", row['wavfile'])
vad_train.append([row['v'], row['a'], row['d']])
else:
#print("Process..", row['wavfile'])
vad_test.append([row['v'], row['a'], row['d']])
vad = np.vstack([vad_train, vad_test])
# standardization
scaled_feature = True
if scaled_feature:
scaler = StandardScaler()
scaler = scaler.fit(feat)
scaled_feat = scaler.transform(feat)
feat = scaled_feat
else:
feat = feat
scaled_vad = True
# standardization
if scaled_vad:
scaler = MinMaxScaler(feature_range=(-1, 1))
scaler = scaler.fit(vad) #.reshape(vad.shape[0]*vad.shape[1], vad.shape[2]))
scaled_vad = scaler.transform(vad) #.reshape(vad.shape[0]*vad.shape[1], vad.shape[2]))
vad = scaled_vad
else:
vad = vad
# reshape feat size to match LSTM config
feat = feat.reshape(feat.shape[0], 1, feat.shape[1])
# train/test split, LOSO
X_train = feat[:len(feat_train)]
X_test = feat[len(feat_train):]
y_train = vad[:len(vad_train)]
y_test = vad[len(vad_train):]
# Concordance correlation coefficient (CCC)-based loss function - using non-inductive statistics
def ccc(gold, pred):
gold = K.squeeze(gold, axis=-1)
pred = K.squeeze(pred, axis=-1)
gold_mean = K.mean(gold, axis=-1, keepdims=True)
pred_mean = K.mean(pred, axis=-1, keepdims=True)
covariance = (gold-gold_mean)*(pred-pred_mean)
gold_var = K.mean(K.square(gold-gold_mean), axis=-1, keepdims=True)
pred_var = K.mean(K.square(pred-pred_mean), axis=-1, keepdims=True)
ccc = K.constant(2.) * covariance / (gold_var + pred_var + K.square(gold_mean - pred_mean) + K.common.epsilon())
return ccc
def ccc_loss(gold, pred):
# input (num_batches, seq_len, 1)
ccc_loss = K.constant(1.) - ccc(gold, pred)
return ccc_loss
# API model, if use RNN, first two rnn layer must return_sequences=True
def api_model():
inputs = Input(shape=(feat.shape[1], feat.shape[2]), name='feat_input')
net = BatchNormalization()(inputs)
#net = Bidirectional(LSTM(64, return_sequences=True, dropout=do, recurrent_dropout=do))(net)
net = CuDNNLSTM(feat.shape[2], return_sequences=True)(net)
net = CuDNNLSTM(256, return_sequences=True)(net)
net = CuDNNLSTM(256, return_sequences=False)(net)
net = Dense(64)(net)
target_names = ('v', 'a', 'd')
outputs = [Dense(1, name=name, activation='tanh')(net) for name in target_names]
model = Model(inputs=inputs, outputs=outputs) #=[out1, out2, out3])
model.compile(loss='mse', #{'v': ccc_loss, 'a': ccc_loss, 'd': ccc_loss},
loss_weights={'v': 0.3, 'a': 0.6, 'd': 0.1},
optimizer='rmsprop', metrics=[ccc, 'mse'])
return model
model2 = api_model()
model2.summary()
earlystop = EarlyStopping(monitor='val_loss', mode='min', patience=10, restore_best_weights=True)
hist = model2.fit(X_train, y_train.T.tolist(), batch_size=64,
validation_split=0.2, epochs=50, verbose=1, shuffle=True,
callbacks=[earlystop])
metrik = model2.evaluate(X_test, y_test.T.tolist())
print('CCC= ', np.array(metrik)[[-6,-4,-2]])
print('MSE= ', np.array(metrik)[[-5,-3,-1]])
| 4,639 | 32.868613 | 123 | py |
ccc_mse_ser | ccc_mse_ser-master/code/ser_iemocap_paa_ccc.py | # Dimensional speech emotion recognition from acoustic
# Changelog:
# 2019-09-01: initial version
# 2019-10-06: optimizer MTL parameters with linear search (in progress)
# 2020-12-25: modified fot ser_iemocap_loso_hfs.py
# feature is either std+mean or std+mean+silence (uncomment line 44)
# 2020-02-13: Modified to evaluate loss function (MSE vs CCC) for EUSIPCO
import numpy as np
import pickle
import pandas as pd
import keras.backend as K
from keras.models import Model
from keras.layers import Input, Dense, CuDNNLSTM, Flatten, \
Embedding, Dropout, BatchNormalization, \
RNN, concatenate, Activation
from keras.callbacks import EarlyStopping
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from keras.preprocessing.text import Tokenizer
from keras.preprocessing import sequence
import random as rn
import tensorflow as tf
rn.seed(123)
np.random.seed(99)
tf.set_random_seed(1234)
# load feature and labels
feat = np.load('/home/s1820002/atsit/data/feat_34_hfs.npy')
vad = np.load('/home/s1820002/IEMOCAP-Emotion-Detection/y_egemaps.npy')
# for LSTM input shape (batch, steps, features/channel)
#feat = feat.reshape(feat.shape[0], 1, feat.shape[1])
# remove outlier, < 1, > 5
vad = np.where(vad==5.5, 5.0, vad)
vad = np.where(vad==0.5, 1.0, vad)
# standardization
scaled_feature = False
# set Dropout
do = 0.3
if scaled_feature == True:
scaler = StandardScaler()
scaler = scaler.fit(feat.reshape(feat.shape[0]*feat.shape[1], feat.shape[2]))
scaled_feat = scaler.transform(feat.reshape(feat.shape[0]*feat.shape[1], feat.shape[2]))
scaled_feat = scaled_feat.reshape(feat.shape[0], feat.shape[1], feat.shape[2])
feat = scaled_feat
else:
feat = feat
scaled_vad = True
# standardization
if scaled_vad:
scaler = MinMaxScaler(feature_range=(-1, 1))
scaler = scaler.fit(vad) #.reshape(vad.shape[0]*vad.shape[1], vad.shape[2]))
scaled_vad = scaler.transform(vad) #.reshape(vad.shape[0]*vad.shape[1], vad.shape[2]))
vad = scaled_vad
else:
vad = vad
# Concordance correlation coefficient (CCC)-based loss function - using non-inductive statistics
def ccc(gold, pred):
gold = K.squeeze(gold, axis=-1)
pred = K.squeeze(pred, axis=-1)
gold_mean = K.mean(gold, axis=-1, keepdims=True)
pred_mean = K.mean(pred, axis=-1, keepdims=True)
covariance = (gold-gold_mean)*(pred-pred_mean)
gold_var = K.mean(K.square(gold-gold_mean), axis=-1, keepdims=True)
pred_var = K.mean(K.square(pred-pred_mean), axis=-1, keepdims=True)
ccc = K.constant(2.) * covariance / (gold_var + pred_var + K.square(gold_mean - pred_mean) + K.common.epsilon())
return ccc
def ccc_loss(gold, pred):
# input (num_batches, seq_len, 1)
ccc_loss = K.constant(1.) - ccc(gold, pred)
return ccc_loss
# API model, if use RNN, first two rnn layer must return_sequences=True
def api_model(alpha, beta, gamma):
# speech network
input_speech = Input(shape=(feat.shape[1], feat.shape[2]), name='speech_input')
net_speech = BatchNormalization()(input_speech)
net_speech = CuDNNLSTM(feat.shape[2], return_sequences=True)(net_speech)
net_speech = CuDNNLSTM(256, return_sequences=True)(net_speech)
net_speech = CuDNNLSTM(256, return_sequences=False)(net_speech)
#net_speech = Flatten()(net_speech)
net_speech = Dense(64)(net_speech)
#net_speech = Dropout(0.1)(net_speech)
target_names = ('v', 'a', 'd')
model_combined = [Dense(1, name=name, activation='tanh')(net_speech) for name in target_names]
model = Model(input_speech, model_combined)
#model.compile(loss=ccc_loss, optimizer='rmsprop', metrics=[ccc])
model.compile(loss=ccc_loss,
loss_weights={'v': alpha, 'a': beta, 'd': gamma},
optimizer='rmsprop', metrics=[ccc, 'mse'])
return model
#def main(alpha, beta, gamma):
model = api_model(0.1, 0.5, 0.4)
model.summary()
# 7869 first data of session 5 (for LOSO)
earlystop = EarlyStopping(monitor='val_loss', mode='min', patience=10,
restore_best_weights=True)
hist = model.fit(feat[:7869], vad[:7869].T.tolist(), batch_size=64, #best:8
validation_split=0.2, epochs=200, verbose=1, shuffle=True,
callbacks=[earlystop])
metrik = model.evaluate(feat[7869:], vad[7869:].T.tolist())
print('CCC= ', np.array(metrik)[[-6,-4,-2]])
print('MSE= ', np.array(metrik)[[-5,-3,-1]])
| 4,495 | 35.552846 | 123 | py |
ccc_mse_ser | ccc_mse_ser-master/code/ser_improv_paa_ccc.py | # ser_improv_paa_ccc.py
# speech emotion recognition for MSP-IMPROV dataset with pyAudioAnalysis
# HFS features using CCC-based loss function
# coded by Bagus Tris Atmaja ([email protected])
# changelog:
# 2020-02-13: Inital code, modified from deepMLP repo
import numpy as np
import pickle
import pandas as pd
import keras.backend as K
from keras.models import Model
from keras.layers import Input, Dense, CuDNNLSTM, Flatten, \
Embedding, Dropout, BatchNormalization, \
RNN, concatenate, Activation
from keras.callbacks import EarlyStopping
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from keras.preprocessing.text import Tokenizer
from keras.preprocessing import sequence
import random as rn
import tensorflow as tf
rn.seed(123)
np.random.seed(99)
tf.set_random_seed(1234)
# loading file and label
feat_train = np.load('/home/s1820002/ccc_mse/data/feat_hfs_paa_msp_train.npy')
feat_test = np.load('/home/s1820002/ccc_mse/data/feat_hfs_paa_msp_test.npy')
feat = np.vstack([feat_train, feat_test])
list_path = '/home/s1820002/msp-improv/helper/improv_data.csv'
list_file = pd.read_csv(list_path, index_col=None)
list_file = pd.DataFrame(list_file)
data = list_file.sort_values(by=['wavfile'])
vad_train = []
vad_test = []
for index, row in data.iterrows():
#print(row['wavfile'], row['v'], row['a'], row['d'])
if int(row['wavfile'][18]) in range(1,6):
#print("Process vad..", row['wavfile'])
vad_train.append([row['v'], row['a'], row['d']])
else:
#print("Process..", row['wavfile'])
vad_test.append([row['v'], row['a'], row['d']])
vad = np.vstack([vad_train, vad_test])
# standardization
scaled_feature = False
if scaled_feature:
scaler = StandardScaler()
scaler = scaler.fit(feat)
scaled_feat = scaler.transform(feat)
feat = scaled_feat
else:
feat = feat
scaled_vad = True
# standardization
if scaled_vad:
scaler = MinMaxScaler(feature_range=(-1, 1))
scaler = scaler.fit(vad) #.reshape(vad.shape[0]*vad.shape[1], vad.shape[2]))
scaled_vad = scaler.transform(vad) #.reshape(vad.shape[0]*vad.shape[1], vad.shape[2]))
vad = scaled_vad
else:
vad = vad
# reshape feat size to match LSTM config
feat = feat.reshape(feat.shape[0], 1, feat.shape[1])
# train/test split, LOSO
X_train = feat[:len(feat_train)]
X_test = feat[len(feat_train):]
y_train = vad[:len(vad_train)]
y_test = vad[len(vad_train):]
# Concordance correlation coefficient (CCC)-based loss function - using non-inductive statistics
def ccc(gold, pred):
gold = K.squeeze(gold, axis=-1)
pred = K.squeeze(pred, axis=-1)
gold_mean = K.mean(gold, axis=-1, keepdims=True)
pred_mean = K.mean(pred, axis=-1, keepdims=True)
covariance = (gold-gold_mean)*(pred-pred_mean)
gold_var = K.mean(K.square(gold-gold_mean), axis=-1, keepdims=True)
pred_var = K.mean(K.square(pred-pred_mean), axis=-1, keepdims=True)
ccc = K.constant(2.) * covariance / (gold_var + pred_var + K.square(gold_mean - pred_mean) + K.common.epsilon())
return ccc
def ccc_loss(gold, pred):
# input (num_batches, seq_len, 1)
ccc_loss = K.constant(1.) - ccc(gold, pred)
return ccc_loss
# API model, if use RNN, first two rnn layer must return_sequences=True
def api_model():
inputs = Input(shape=(feat.shape[1], feat.shape[2]), name='feat_input')
net = BatchNormalization()(inputs)
#net = Bidirectional(LSTM(64, return_sequences=True, dropout=do, recurrent_dropout=do))(net)
net = CuDNNLSTM(feat.shape[2], return_sequences=True)(net)
net = CuDNNLSTM(256, return_sequences=True)(net)
net = CuDNNLSTM(256, return_sequences=False)(net)
net = Dense(64)(net)
#net = Dropout(0.1)(net)
target_names = ('v', 'a', 'd')
outputs = [Dense(1, name=name, activation='tanh')(net) for name in target_names]
model = Model(inputs=inputs, outputs=outputs) #=[out1, out2, out3])
model.compile(loss=ccc_loss, #{'v': ccc_loss, 'a': ccc_loss, 'd': ccc_loss},
loss_weights={'v': 0.3, 'a': 0.6, 'd': 0.1},
optimizer='rmsprop', metrics=[ccc, 'mse'])
return model
model2 = api_model()
model2.summary()
earlystop = EarlyStopping(monitor='val_loss', mode='min', patience=10, restore_best_weights=True)
hist = model2.fit(X_train, y_train.T.tolist(), batch_size=64,
validation_split=0.2, epochs=50, verbose=1, shuffle=True,
callbacks=[earlystop])
metrik = model2.evaluate(X_test, y_test.T.tolist())
print('CCC= ', np.array(metrik)[[-6,-4,-2]])
print('MSE= ', np.array(metrik)[[-5,-3,-1]])
| 4,666 | 32.818841 | 123 | py |
ccc_mse_ser | ccc_mse_ser-master/code/ser_improv_paa_mse.py | # ser_improv_paa_ccc.py
# speech emotion recognition for MSP-IMPROV dataset with pyAudioAnalysis
# HFS features using CCC-based loss function
# coded by Bagus Tris Atmaja ([email protected])
# changelog:
# 2020-02-13: Inital code, modified from deepMLP repo
import numpy as np
import pickle
import pandas as pd
import keras.backend as K
from keras.models import Model
from keras.layers import Input, Dense, CuDNNLSTM, Flatten, \
Embedding, Dropout, BatchNormalization, \
RNN, concatenate, Activation
from keras.callbacks import EarlyStopping
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from keras.preprocessing.text import Tokenizer
from keras.preprocessing import sequence
import random as rn
import tensorflow as tf
rn.seed(123)
np.random.seed(99)
tf.set_random_seed(1234)
# loading file and label
feat_train = np.load('/home/s1820002/ccc_mse/data/feat_hfs_paa_msp_train.npy')
feat_test = np.load('/home/s1820002/ccc_mse/data/feat_hfs_paa_msp_test.npy')
feat = np.vstack([feat_train, feat_test])
list_path = '/home/s1820002/msp-improv/helper/improv_data.csv'
list_file = pd.read_csv(list_path, index_col=None)
list_file = pd.DataFrame(list_file)
data = list_file.sort_values(by=['wavfile'])
vad_train = []
vad_test = []
for index, row in data.iterrows():
#print(row['wavfile'], row['v'], row['a'], row['d'])
if int(row['wavfile'][18]) in range(1,6):
#print("Process vad..", row['wavfile'])
vad_train.append([row['v'], row['a'], row['d']])
else:
#print("Process..", row['wavfile'])
vad_test.append([row['v'], row['a'], row['d']])
vad = np.vstack([vad_train, vad_test])
# standardization
scaled_feature = False
if scaled_feature:
scaler = StandardScaler()
scaler = scaler.fit(feat)
scaled_feat = scaler.transform(feat)
feat = scaled_feat
else:
feat = feat
scaled_vad = True
# standardization
if scaled_vad:
scaler = MinMaxScaler(feature_range=(-1, 1))
scaler = scaler.fit(vad) #.reshape(vad.shape[0]*vad.shape[1], vad.shape[2]))
scaled_vad = scaler.transform(vad) #.reshape(vad.shape[0]*vad.shape[1], vad.shape[2]))
vad = scaled_vad
else:
vad = vad
# reshape feat size to match LSTM config
feat = feat.reshape(feat.shape[0], 1, feat.shape[1])
# train/test split, LOSO
X_train = feat[:len(feat_train)]
X_test = feat[len(feat_train):]
y_train = vad[:len(vad_train)]
y_test = vad[len(vad_train):]
# Concordance correlation coefficient (CCC)-based loss function - using non-inductive statistics
def ccc(gold, pred):
gold = K.squeeze(gold, axis=-1)
pred = K.squeeze(pred, axis=-1)
gold_mean = K.mean(gold, axis=-1, keepdims=True)
pred_mean = K.mean(pred, axis=-1, keepdims=True)
covariance = (gold-gold_mean)*(pred-pred_mean)
gold_var = K.mean(K.square(gold-gold_mean), axis=-1, keepdims=True)
pred_var = K.mean(K.square(pred-pred_mean), axis=-1, keepdims=True)
ccc = K.constant(2.) * covariance / (gold_var + pred_var + K.square(gold_mean - pred_mean) + K.common.epsilon())
return ccc
def ccc_loss(gold, pred):
# input (num_batches, seq_len, 1)
ccc_loss = K.constant(1.) - ccc(gold, pred)
return ccc_loss
# API model, if use RNN, first two rnn layer must return_sequences=True
def api_model():
inputs = Input(shape=(feat.shape[1], feat.shape[2]), name='feat_input')
net = BatchNormalization()(inputs)
#net = Bidirectional(LSTM(64, return_sequences=True, dropout=do, recurrent_dropout=do))(net)
net = CuDNNLSTM(feat.shape[2], return_sequences=True)(net)
net = CuDNNLSTM(256, return_sequences=True)(net)
net = CuDNNLSTM(256, return_sequences=False)(net)
net = Dense(64)(net)
#net = Dropout(0.1)(net)
target_names = ('v', 'a', 'd')
outputs = [Dense(1, name=name, activation='tanh')(net) for name in target_names]
model = Model(inputs=inputs, outputs=outputs) #=[out1, out2, out3])
model.compile(loss='mse', #{'v': ccc_loss, 'a': ccc_loss, 'd': ccc_loss},
loss_weights={'v': 0.3, 'a': 0.6, 'd': 0.1},
optimizer='rmsprop', metrics=[ccc, 'mse'])
return model
model2 = api_model()
model2.summary()
earlystop = EarlyStopping(monitor='val_loss', mode='min', patience=10, restore_best_weights=True)
hist = model2.fit(X_train, y_train.T.tolist(), batch_size=64,
validation_split=0.2, epochs=50, verbose=1, shuffle=True,
callbacks=[earlystop])
metrik = model2.evaluate(X_test, y_test.T.tolist())
print('CCC= ', np.array(metrik)[[-6,-4,-2]])
print('MSE= ', np.array(metrik)[[-5,-3,-1]])
| 4,663 | 32.797101 | 123 | py |
ccc_mse_ser | ccc_mse_ser-master/code/ser_improv_gemaps_ccc.py | # ser_improv_paa_ccc.py
# speech emotion recognition for MSP-IMPROV dataset with pyAudioAnalysis
# HFS features using CCC-based loss function
# coded by Bagus Tris Atmaja ([email protected])
# changelog:
# 2020-02-13: Inital code, modified from deepMLP repo
import numpy as np
import pickle
import pandas as pd
import keras.backend as K
from keras.models import Model
from keras.layers import Input, Dense, CuDNNLSTM, Flatten, \
Embedding, Dropout, BatchNormalization, \
RNN, concatenate, Activation
from keras.callbacks import EarlyStopping
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from keras.preprocessing.text import Tokenizer
from keras.preprocessing import sequence
import random as rn
import tensorflow as tf
rn.seed(123)
np.random.seed(99)
tf.set_random_seed(1234)
# loading file and label
feat_train = np.load('/home/s1820002/ccc_mse/data/feat_hfs_gemaps_msp_train.npy')
feat_test = np.load('/home/s1820002/ccc_mse/data/feat_hfs_gemaps_msp_test.npy')
feat = np.vstack([feat_train, feat_test])
list_path = '/home/s1820002/msp-improv/helper/improv_data.csv'
list_file = pd.read_csv(list_path, index_col=None)
list_file = pd.DataFrame(list_file)
data = list_file.sort_values(by=['wavfile'])
vad_train = []
vad_test = []
for index, row in data.iterrows():
#print(row['wavfile'], row['v'], row['a'], row['d'])
if int(row['wavfile'][18]) in range(1,6):
#print("Process vad..", row['wavfile'])
vad_train.append([row['v'], row['a'], row['d']])
else:
#print("Process..", row['wavfile'])
vad_test.append([row['v'], row['a'], row['d']])
vad = np.vstack([vad_train, vad_test])
# standardization
scaled_feature = True
if scaled_feature:
scaler = StandardScaler()
scaler = scaler.fit(feat)
scaled_feat = scaler.transform(feat)
feat = scaled_feat
else:
feat = feat
scaled_vad = True
# standardization
if scaled_vad:
scaler = MinMaxScaler(feature_range=(-1, 1))
scaler = scaler.fit(vad) #.reshape(vad.shape[0]*vad.shape[1], vad.shape[2]))
scaled_vad = scaler.transform(vad) #.reshape(vad.shape[0]*vad.shape[1], vad.shape[2]))
vad = scaled_vad
else:
vad = vad
# reshape feat size to match LSTM config
feat = feat.reshape(feat.shape[0], 1, feat.shape[1])
# train/test split, LOSO
X_train = feat[:len(feat_train)]
X_test = feat[len(feat_train):]
y_train = vad[:len(vad_train)]
y_test = vad[len(vad_train):]
# Concordance correlation coefficient (CCC)-based loss function - using non-inductive statistics
def ccc(gold, pred):
gold = K.squeeze(gold, axis=-1)
pred = K.squeeze(pred, axis=-1)
gold_mean = K.mean(gold, axis=-1, keepdims=True)
pred_mean = K.mean(pred, axis=-1, keepdims=True)
covariance = (gold-gold_mean)*(pred-pred_mean)
gold_var = K.mean(K.square(gold-gold_mean), axis=-1, keepdims=True)
pred_var = K.mean(K.square(pred-pred_mean), axis=-1, keepdims=True)
ccc = K.constant(2.) * covariance / (gold_var + pred_var + K.square(gold_mean - pred_mean) + K.common.epsilon())
return ccc
def ccc_loss(gold, pred):
# input (num_batches, seq_len, 1)
ccc_loss = K.constant(1.) - ccc(gold, pred)
return ccc_loss
# API model, if use RNN, first two rnn layer must return_sequences=True
def api_model():
inputs = Input(shape=(feat.shape[1], feat.shape[2]), name='feat_input')
net = BatchNormalization()(inputs)
net = CuDNNLSTM(feat.shape[2], return_sequences=True)(net)
net = CuDNNLSTM(256, return_sequences=True)(net)
net = CuDNNLSTM(256, return_sequences=False)(net)
net = Dense(64)(net)
target_names = ('v', 'a', 'd')
outputs = [Dense(1, name=name, activation='tanh')(net) for name in target_names]
model = Model(inputs=inputs, outputs=outputs) #=[out1, out2, out3])
model.compile(loss=ccc_loss, #{'v': ccc_loss, 'a': ccc_loss, 'd': ccc_loss},
loss_weights={'v': 0.3, 'a': 0.6, 'd': 0.1},
optimizer='rmsprop', metrics=[ccc, 'mse'])
return model
model2 = api_model()
model2.summary()
earlystop = EarlyStopping(monitor='val_loss', mode='min', patience=10, restore_best_weights=True)
hist = model2.fit(X_train, y_train.T.tolist(), batch_size=64,
validation_split=0.2, epochs=50, verbose=1, shuffle=True,
callbacks=[earlystop])
metrik = model2.evaluate(X_test, y_test.T.tolist())
print('CCC= ', np.array(metrik)[[-6,-4,-2]])
print('MSE= ', np.array(metrik)[[-5,-3,-1]])
| 4,545 | 32.426471 | 123 | py |
ccc_mse_ser | ccc_mse_ser-master/code/ser_iemocap_paa_mse.py | # CSL Paper: Dimensional speech emotion recognition from acoustic and text
# Changelog:
# 2019-09-01: initial version
# 2019-10-06: optimizer MTL parameters with linear search (in progress)
# 2012-12-25: modified fot ser_iemocap_loso_hfs.py
# feature is either std+mean or std+mean+silence (uncomment line 44)
import numpy as np
import pickle
import pandas as pd
import keras.backend as K
from keras.models import Model
from keras.layers import Input, Dense, CuDNNLSTM, Flatten, \
Embedding, Dropout, BatchNormalization, \
RNN, concatenate, Activation
from keras.callbacks import EarlyStopping
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from keras.preprocessing.text import Tokenizer
from keras.preprocessing import sequence
import random as rn
import tensorflow as tf
rn.seed(123)
np.random.seed(99)
tf.set_random_seed(1234)
# load feature and labels
feat = np.load('/home/s1820002/atsit/data/feat_34_hfs.npy')
vad = np.load('/home/s1820002/IEMOCAP-Emotion-Detection/y_egemaps.npy')
# for LSTM input shape (batch, steps, features/channel)
#feat = feat.reshape(feat.shape[0], 1, feat.shape[1])
# remove outlier, < 1, > 5
vad = np.where(vad==5.5, 5.0, vad)
vad = np.where(vad==0.5, 1.0, vad)
# standardization
scaled_feature = False
# set Dropout
do = 0.3
if scaled_feature == True:
scaler = StandardScaler()
scaler = scaler.fit(feat.reshape(feat.shape[0]*feat.shape[1], feat.shape[2]))
scaled_feat = scaler.transform(feat.reshape(feat.shape[0]*feat.shape[1], feat.shape[2]))
scaled_feat = scaled_feat.reshape(feat.shape[0], feat.shape[1], feat.shape[2])
feat = scaled_feat
else:
feat = feat
scaled_vad = True
# standardization
if scaled_vad:
scaler = MinMaxScaler(feature_range=(-1, 1))
scaler = scaler.fit(vad) #.reshape(vad.shape[0]*vad.shape[1], vad.shape[2]))
scaled_vad = scaler.transform(vad) #.reshape(vad.shape[0]*vad.shape[1], vad.shape[2]))
vad = scaled_vad
else:
vad = vad
# Concordance correlation coefficient (CCC)-based loss function - using non-inductive statistics
def ccc(gold, pred):
gold = K.squeeze(gold, axis=-1)
pred = K.squeeze(pred, axis=-1)
gold_mean = K.mean(gold, axis=-1, keepdims=True)
pred_mean = K.mean(pred, axis=-1, keepdims=True)
covariance = (gold-gold_mean)*(pred-pred_mean)
gold_var = K.mean(K.square(gold-gold_mean), axis=-1, keepdims=True)
pred_var = K.mean(K.square(pred-pred_mean), axis=-1, keepdims=True)
ccc = K.constant(2.) * covariance / (gold_var + pred_var + K.square(gold_mean - pred_mean) + K.common.epsilon())
return ccc
def ccc_loss(gold, pred):
# input (num_batches, seq_len, 1)
ccc_loss = K.constant(1.) - ccc(gold, pred)
return ccc_loss
# API model, if use RNN, first two rnn layer must return_sequences=True
def api_model(alpha, beta, gamma):
# speech network
input_speech = Input(shape=(feat.shape[1], feat.shape[2]), name='speech_input')
net_speech = BatchNormalization()(input_speech)
net_speech = CuDNNLSTM(feat.shape[2], return_sequences=True)(net_speech)
net_speech = CuDNNLSTM(256, return_sequences=True)(net_speech)
net_speech = CuDNNLSTM(256, return_sequences=False)(net_speech)
#net_speech = Flatten()(net_speech)
net_speech = Dense(64)(net_speech)
#net_speech = Dropout(0.1)(net_speech)
target_names = ('v', 'a', 'd')
model_combined = [Dense(1, name=name, activation='tanh')(net_speech) for name in target_names]
model = Model(input_speech, model_combined)
#model.compile(loss=ccc_loss, optimizer='rmsprop', metrics=[ccc])
model.compile(loss='mse',
loss_weights={'v': alpha, 'a': beta, 'd': gamma},
optimizer='rmsprop', metrics=[ccc, 'mse'])
return model
#def main(alpha, beta, gamma):
model = api_model(0.1, 0.5, 0.4)
model.summary()
# 7869 first data of session 5 (for LOSO)
earlystop = EarlyStopping(monitor='val_loss', mode='min', patience=10,
restore_best_weights=True)
hist = model.fit(feat[:7869], vad[:7869].T.tolist(), batch_size=64, #best:8
validation_split=0.2, epochs=200, verbose=1, shuffle=True,
callbacks=[earlystop])
metrik = model.evaluate(feat[7869:], vad[7869:].T.tolist())
print(metrik)
print('CCC= ', np.array(metrik)[[-6,-4,-2]])
print('MSE= ', np.array(metrik)[[-5,-3,-1]])
| 4,452 | 35.203252 | 123 | py |
LRS3-For-Speech-Separation | LRS3-For-Speech-Separation-master/audio_process/audio_cut.py | '''
The code is to get the audio in the
video and downsample it to 16Khz.
'''
import os
import subprocess
from tqdm import tqdm
# Setting audio Parameters
sr = 16000 # sample rate
start_time = 0.0 # cut start time
length_time = 2.0 # cut audio length
outpath = '../raw_audio'
os.makedirs(outpath, exist_ok=True)
train_mouth = []
val_mouth = []
test_mouth = []
train = open('../train.txt', 'r').readlines()
test = open('../test.txt', 'r').readlines()
val = open('../val.txt', 'r').readlines()
for l in tqdm(train):
l = l.replace('\n','')
train_mouth.append(l)
for l in tqdm(val):
l = l.replace('\n','')
val_mouth.append(l)
for l in tqdm(test):
l = l.replace('\n','')
test_mouth.append(l)
with open('../video_process/video_path.txt', 'r') as f:
lines = f.readlines()
for line in tqdm(lines):
if line != "":
line = line.replace('\n','')
l = line.split('/')[-2]+'_'+line.split('/')[-1].split('.')[0]
if l in train_mouth:
path = outpath+"/train"
os.makedirs(path, exist_ok=True)
command = ""
command += 'ffmpeg -i {} -f wav -ar {} -ac 1 {}/tmp_{}.wav;'.format(line, sr, path, l)
command += 'sox {}/tmp_{}.wav {}/{}.wav trim {} {};'.format(path, l, path, l, start_time, length_time)
command += 'rm {}/tmp_{}.wav;'.format(path, l)
p = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p.wait()
if l in test_mouth:
path = outpath+"/test"
os.makedirs(path, exist_ok=True)
command = ""
command += 'ffmpeg -i {} -f wav -ar {} -ac 1 {}/tmp_{}.wav;'.format(line, sr, path, l)
command += 'sox {}/tmp_{}.wav {}/{}.wav trim {} {};'.format(path, l, path, l, start_time, length_time)
command += 'rm {}/tmp_{}.wav;'.format(path, l)
p = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p.wait()
if l in val_mouth:
path = outpath+"/val"
os.makedirs(path, exist_ok=True)
command = ""
command += 'ffmpeg -i {} -f wav -ar {} -ac 1 {}/tmp_{}.wav;'.format(line, sr, path, l)
command += 'sox {}/tmp_{}.wav {}/{}.wav trim {} {};'.format(path, l, path, l, start_time, length_time)
command += 'rm {}/tmp_{}.wav;'.format(path, l)
p = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p.wait()
else:
pass
| 2,692 | 38.028986 | 118 | py |
LRS3-For-Speech-Separation | LRS3-For-Speech-Separation-master/audio_process/check_file.py | file = open('mix_2_spk_tt.txt', 'r').readlines()
lines = []
for l in file:
lines.append(l.replace('\n', ''))
non_same = []
for line in lines:
line = line.split(' ')
if line[0] not in non_same:
non_same.append(line[0])
if line[2] not in non_same:
non_same.append(line[2])
print(len(non_same)) | 328 | 18.352941 | 48 | py |
LRS3-For-Speech-Separation | LRS3-For-Speech-Separation-master/audio_process/audio_check.py | from tqdm import tqdm
file = open('mix_2_spk_tt.txt', 'r').readlines()
index = []
for line in file:
line = line.split(' ')
s1 = line[0].split('/')[-1]
s2 = line[2].replace('\n','').split('/')[-1]
if s1 not in index:
index.append(s1)
if s2 not in index:
index.append(s2)
print(len(index)) | 328 | 18.352941 | 48 | py |
LRS3-For-Speech-Separation | LRS3-For-Speech-Separation-master/audio_process/audio_path.py | '''
This part of the code is mainly to
generate a txt file of mixed audio,
the file format is: spk1 SDR spk2 SDR.
'''
import os
import random
import decimal
# step1: get all audio path
train_audio = []
val_audio = []
test_audio = []
path = '/data2/likai/AV-Model-lrs3/AV_data/raw_audio'
for root, dirs, files in os.walk(path):
for file in files:
if 'train' in root:
train_audio.append(os.path.join(root, file))
if 'test' in root:
test_audio.append(os.path.join(root, file))
if 'val' in root:
val_audio.append(os.path.join(root, file))
random.shuffle(train_audio)
random.shuffle(test_audio)
random.shuffle(val_audio)
# step2: write path into file
tr_file = open('mix_2_spk_tr.txt', 'w')
cv_file = open('mix_2_spk_cv.txt', 'w')
tt_file = open('mix_2_spk_tt.txt', 'w')
# train data path
index = 1
repeat_path = []
audio_use = {}
train_audio_c = train_audio.copy()
while True:
spk1 = random.choice(train_audio_c)
spk2 = random.choice(train_audio_c)
spk1_split = spk1.split('/')
spk2_split = spk2.split('/')
if spk1+spk2 not in repeat_path and spk2+spk1 not in repeat_path and spk1_split[-1].split('_')[0] != spk2_split[-1].split('_')[0]:
snr_1 = float(decimal.Decimal(random.randrange(0, 500000))/decimal.Decimal(100000))
snr_2 = -snr_1
line = spk1 + ' ' + str(snr_1) + ' ' + spk2 + ' ' + str(snr_2) + '\n'
if spk1 not in audio_use.keys():
audio_use[spk1] = 1
if spk2 not in audio_use.keys():
audio_use[spk2] = 1
if spk1 in audio_use.keys():
if audio_use[spk1] == 6:
train_audio_c.remove(spk1)
continue
if audio_use[spk1] != 6:
if audio_use[spk2] != 6:
audio_use[spk1] += 1
if audio_use[spk2] == 6:
train_audio_c.remove(spk2)
continue
if spk2 in audio_use.keys():
if audio_use[spk2] != 6:
audio_use[spk2] += 1
tr_file.write(line)
repeat_path.append(spk1+spk2)
repeat_path.append(spk2+spk1)
print('\r {}'.format(index), end='')
index += 1
if index == 50001:
print('\n')
break
tr_file.close()
# validation data path
index = 1
repeat_path = []
audio_use = {}
val_audio_c = val_audio.copy()
while True:
spk1 = random.choice(val_audio_c)
spk2 = random.choice(val_audio_c)
spk1_split = spk1.split('/')
spk2_split = spk2.split('/')
if spk1+spk2 not in repeat_path and spk2+spk1 not in repeat_path and spk1_split[-1].split('_')[0] != spk2_split[-1].split('_')[0]:
snr_1 = float(decimal.Decimal(random.randrange(0, 500000))/decimal.Decimal(100000))
snr_2 = -snr_1
line = spk1 + ' ' + str(snr_1) + ' ' + spk2 + ' ' + str(snr_2) + '\n'
if spk1 not in audio_use.keys():
audio_use[spk1] = 1
if spk2 not in audio_use.keys():
audio_use[spk2] = 1
if spk1 in audio_use.keys():
if audio_use[spk1] == 8:
val_audio_c.remove(spk1)
continue
else:
if audio_use[spk2] != 8:
audio_use[spk1] += 1
else:
val_audio_c.remove(spk2)
continue
if spk2 in audio_use.keys():
if audio_use[spk2] != 8:
audio_use[spk2] += 1
cv_file.write(line)
repeat_path.append(spk1+spk2)
repeat_path.append(spk2+spk1)
print('\r {}'.format(index), end='')
index += 1
if index == 5001:
print('\n')
break
cv_file.close()
# test data path
index = 1
repeat_path = []
audio_use = {}
test_audio_c = test_audio.copy()
while True:
spk1 = random.choice(test_audio_c)
spk2 = random.choice(test_audio_c)
spk1_split = spk1.split('/')
spk2_split = spk2.split('/')
if spk1+spk2 not in repeat_path and spk2+spk1 not in repeat_path and spk1_split[-1].split('_')[0] != spk2_split[-1].split('_')[0]:
snr_1 = float(decimal.Decimal(random.randrange(0, 500000))/decimal.Decimal(100000))
snr_2 = -snr_1
line = spk1 + ' ' + str(snr_1) + ' ' + spk2 + ' ' + str(snr_2) + '\n'
if spk1 not in audio_use.keys():
audio_use[spk1] = 1
if spk2 not in audio_use.keys():
audio_use[spk2] = 1
if spk1 in audio_use.keys():
if audio_use[spk1] == 22:
test_audio_c.remove(spk1)
continue
else:
if audio_use[spk2] != 22:
audio_use[spk1] += 1
else:
test_audio_c.remove(spk2)
continue
if spk2 in audio_use.keys():
if audio_use[spk2] != 22:
audio_use[spk2] += 1
tt_file.write(line)
repeat_path.append(spk1+spk2)
repeat_path.append(spk2+spk1)
print('\r {}'.format(index), end='')
index += 1
if index == 3001:
print('\n')
break
tt_file.close()
| 5,128 | 30.466258 | 134 | py |
LRS3-For-Speech-Separation | LRS3-For-Speech-Separation-master/audio_process/.ipynb_checkpoints/audio_mix-checkpoint.py | import os
import librosa
import numpy as np
from tqdm import tqdm
data_type = ['tr', 'cv', 'tt']
dataroot = '../raw_audio'
output_dir16k = '../audio_mouth/2speakers/wav16k'
output_dir8k = '../audio_mouth/2speakers/wav8k'
# create data path
for i_type in data_type:
# 16k
os.makedirs(os.path.join(output_dir16k, i_type, 's1'), exist_ok=True)
os.makedirs(os.path.join(output_dir16k, i_type, 's2'), exist_ok=True)
os.makedirs(os.path.join(output_dir16k, i_type, 'mix'), exist_ok=True)
s1_16k_path = os.path.join(output_dir16k, i_type, 's1')
s2_16k_path = os.path.join(output_dir16k, i_type, 's2')
mix_16k_path = os.path.join(output_dir16k, i_type, 'mix')
# 8k
os.makedirs(os.path.join(output_dir8k, i_type, 's1'), exist_ok=True)
os.makedirs(os.path.join(output_dir8k, i_type, 's2'), exist_ok=True)
os.makedirs(os.path.join(output_dir8k, i_type, 'mix'), exist_ok=True)
s1_8k_path = os.path.join(output_dir8k, i_type, 's1')
s2_8k_path = os.path.join(output_dir8k, i_type, 's2')
mix_8k_path = os.path.join(output_dir8k, i_type, 'mix')
# open file
file = open('mix_2_spk_'+i_type+'.txt', 'r')
lines = file.readlines()
sr8k = 8000
sr16k = 16000
for line in tqdm(lines):
if line != '':
line = line.replace('\n', '').split(' ')
spk1 = line[0]
spk2 = line[2]
snr1 = float(line[1])
snr2 = float(line[3])
mix_name = spk1.split(
'/')[-1].split('.')[0]+'_'+str(snr1)+'_'+spk2.split('/')[-1].split('.')[0]+'_'+str(snr2)+'.wav'
# reading audio
s1_8k, _ = librosa.load(spk1, sr8k)
s2_8k, _ = librosa.load(spk2, sr8k)
s1_16k, _ = librosa.load(spk1, sr16k)
s2_16k, _ = librosa.load(spk2, sr16k)
# getting weight
weight_1 = np.power(10, snr1/20)
weight_2 = np.power(10, snr2/20)
# weight * audio
s1_8k = weight_1 * s1_8k
s2_8k = weight_2 * s2_8k
s1_16k = weight_1 * s1_16k
s2_16k = weight_2 * s2_16k
# mix audio
mix_8k = s1_8k + s2_8k
mix_16k = s1_16k + s2_16k
# save audio
librosa.output.write_wav(os.path.join(s1_8k_path, mix_name), s1_8k, sr8k)
librosa.output.write_wav(os.path.join(s2_8k_path, mix_name), s2_8k, sr8k)
librosa.output.write_wav(os.path.join(mix_8k_path, mix_name), mix_8k, sr8k)
librosa.output.write_wav(os.path.join(s1_16k_path, mix_name), s1_16k, sr16k)
librosa.output.write_wav(os.path.join(s2_16k_path, mix_name), s2_16k, sr16k)
librosa.output.write_wav(os.path.join(mix_16k_path, mix_name), mix_16k, sr16k)
else:
pass
| 2,795 | 41.363636 | 111 | py |
LRS3-For-Speech-Separation | LRS3-For-Speech-Separation-master/video_process/video_process.py | import cv2
import os
import matplotlib.pyplot as plt
import dlib
import numpy as np
from tqdm import tqdm
import subprocess
import face_recognition
def get_frames(pathlist, fps=25):
# pathlist type list
for path in tqdm(pathlist):
index = path.split('/')[-2]+'_'+path.split('/')[-1].split('.')[0]
os.makedirs('../frames/{}'.format(index), exist_ok=True)
command = subprocess.Popen('ffmpeg -i {} -vf fps={} ../frames/{}/%02d.png;'.format(
path, fps, index), shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
command.wait()
def detect_mouth(inpath, outpath, fps=25, time=2):
'''
inpath: file path of frames
outpath: file path of mouth
fps: video fps
time: video time
'''
# Dlib requirements.
#predictor_path = '../mouth_detector/shape_predictor_68_face_landmarks.dat'
#detector = dlib.get_frontal_face_detector()
#predictor = dlib.shape_predictor(predictor_path)
# folders getting
folder = os.listdir(inpath)
#folder = ['1']
for f in tqdm(folder):
# Required parameters for mouth extraction.
width_crop_max = 0
height_crop_max = 0
index = 1
while True:
path = os.path.join(inpath, f, '{:02d}.png'.format(index))
# Load the jpg file into a numpy array
image = face_recognition.load_image_file(path)
face_locations = face_recognition.face_locations(
image, number_of_times_to_upsample=0, model="cnn")
# Find all facial features in all the faces in the image
face_landmarks_list = face_recognition.face_landmarks(
image, face_locations=face_locations)
if len(face_locations) == 0:
break
# mkdir path
os.makedirs(os.path.join(outpath, f), exist_ok=True)
frame = cv2.imread(path)
h, w, _ = frame.shape
# 20 mark for mouth
marks = np.zeros((2, 24))
for face_landmarks in face_landmarks_list:
# Print the location of each facial feature in this image
co = 0
for facial_feature in face_landmarks.keys():
if facial_feature == 'top_lip':
lip = face_landmarks[facial_feature]
for i in lip:
marks[0, co] = i[0]
marks[1, co] = i[1]
co += 1
if facial_feature == 'bottom_lip':
lip = face_landmarks[facial_feature]
for i in lip:
marks[0, co] = i[0]
marks[1, co] = i[1]
co += 1
# Get the extreme points(top-left & bottom-right)
X_left, Y_left, X_right, Y_right = [int(np.amin(marks, axis=1)[0]), int(np.amin(marks, axis=1)[1]),
int(np.amax(marks, axis=1)[0]),
int(np.amax(marks, axis=1)[1])]
# Find the center of the mouth.
X_center = (X_left + X_right) / 2.0
Y_center = (Y_left + Y_right) / 2.0
# Make a boarder for cropping.
border = 30
X_left_new = X_left - border
Y_left_new = Y_left - border
X_right_new = X_right + border
Y_right_new = Y_right + border
# Width and height for cropping(before and after considering the border).
width_new = X_right_new - X_left_new
height_new = Y_right_new - Y_left_new
width_current = X_right - X_left
height_current = Y_right - Y_left
# Determine the cropping rectangle dimensions(the main purpose is to have a fixed area).
if width_crop_max == 0 and height_crop_max == 0:
width_crop_max = width_new
height_crop_max = height_new
else:
width_crop_max += 1.5 * \
np.maximum(width_current - width_crop_max, 0)
height_crop_max += 1.5 * \
np.maximum(height_current - height_crop_max, 0)
# # # Uncomment if the lip area is desired to be rectangular # # # #
#########################################################
# Find the cropping points(top-left and bottom-right).
X_left_crop = int(X_center - width_crop_max / 2.0)
X_right_crop = int(X_center + width_crop_max / 2.0)
Y_left_crop = int(Y_center - height_crop_max / 2.0)
Y_right_crop = int(Y_center + height_crop_max / 2.0)
if X_left_crop >= 0 and Y_left_crop >= 0 and X_right_crop < w and Y_right_crop < h:
mouth = frame[Y_left_crop:Y_right_crop,
X_left_crop:X_right_crop, :]
# Save the mouth area.
mouth_gray = cv2.cvtColor(mouth, cv2.COLOR_BGR2GRAY)
mouth_gray = cv2.resize(mouth_gray, (120, 120))
cv2.imwrite(os.path.join(
outpath, f, "{:02d}.png".format(index)), mouth_gray)
else:
pass
index += 1
if index == 51:
break
def resize_img(path, size):
for root, dirs, files in tqdm(os.walk(path)):
for file in files:
if file.endswith('png'):
f = os.path.join(root, file)
img = cv2.imread(f)
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img_gray = cv2.resize(img_gray, size)
cv2.imwrite(f, img_gray)
def file_to_path(filename, root):
lines = open(filename, 'r').readlines()
filename_list = []
for file in lines:
file = file.replace('\n', '').split('_')
filename_list.append(os.path.join(root, file[0], file[1]+'.mp4'))
return filename_list
if __name__ == "__main__":
video_path = 'valid_mouth.txt'
pathlist = file_to_path(video_path, '../lrs3')
inpath = '../frames'
outpath = '../mouth'
change_root = '../frames'
print('-------------Getting video frames-------------')
get_frames(pathlist)
print('--------------Detection the mouth-------------')
detect_mouth(inpath, outpath)
print('--------------Resize the frames-------------')
resize_img(change_root, (120, 120))
| 6,490 | 39.56875 | 111 | py |
LRS3-For-Speech-Separation | LRS3-For-Speech-Separation-master/video_process/video_to_np.py | '''
reading image file to npz file
'''
import numpy as np
import os
import cv2
from tqdm import tqdm
root = '../mouth'
save_path = '../npz'
#save_path = './'
os.makedirs(save_path, exist_ok=True)
mouth = open('valid_mouth.txt', 'r').readlines()
if_use_mouth = True # if use mouth image(True: use mouth, False: use frames)
# get valid mouth file
valid_mouth = []
for m in mouth:
m = m.replace('\n', '').split('_')
m = m[0]+'_'+m[1]
valid_mouth.append(m)
train_mouth = []
val_mouth = []
test_mouth = []
train = open('../train.txt', 'r').readlines()
test = open('../test.txt', 'r').readlines()
val = open('../val.txt', 'r').readlines()
for l in tqdm(train):
train_mouth.append(l.replace('\n',''))
for l in tqdm(test):
test_mouth.append(l.replace('\n',''))
for l in tqdm(val):
val_mouth.append(l.replace('\n',''))
folder = os.listdir(root)
#folder = ['00039_true']
mean = 0
std = 0
index = 0
for f in tqdm(folder):
if f in train_mouth:
path = os.path.join(root, f)
frames = []
for i in range(1, 51):
img_path = os.path.join(path, "{:02d}.png".format(i))
img = cv2.imread(img_path, 0)
img = img / 255
frames.append(img)
m, s = cv2.meanStdDev(img) # m: mean, s:std
mean += float(m)
std += float(s)
index += 1
frames = np.array(frames)
os.makedirs(os.path.join(save_path, 'train'),exist_ok=True)
np.savez(os.path.join(save_path, 'train', '{}.npz'.format(f)), data=frames)
if f in test_mouth:
path = os.path.join(root, f)
frames = []
for i in range(1, 51):
img_path = os.path.join(path, "{:02d}.png".format(i))
img = cv2.imread(img_path, 0)
img = img / 255
frames.append(img)
m, s = cv2.meanStdDev(img) # m: mean, s:std
mean += float(m)
std += float(s)
index += 1
frames = np.array(frames)
os.makedirs(os.path.join(save_path, 'test'),exist_ok=True)
np.savez(os.path.join(save_path, 'test', '{}.npz'.format(f)), data=frames)
if f in val_mouth:
path = os.path.join(root, f)
frames = []
for i in range(1, 51):
img_path = os.path.join(path, "{:02d}.png".format(i))
img = cv2.imread(img_path, 0)
img = img / 255
frames.append(img)
m, s = cv2.meanStdDev(img) # m: mean, s:std
mean += float(m)
std += float(s)
index += 1
frames = np.array(frames)
os.makedirs(os.path.join(save_path, 'val'),exist_ok=True)
np.savez(os.path.join(save_path, 'val', '{}.npz'.format(f)), data=frames)
print('mean: {:06f}, std: {:06f}'.format(mean/index, std/index)) | 2,814 | 28.631579 | 83 | py |
LRS3-For-Speech-Separation | LRS3-For-Speech-Separation-master/video_process/.ipynb_checkpoints/video_to_np-checkpoint.py | '''
reading image file to npz file
'''
import numpy as np
import os
import cv2
from tqdm import tqdm
root = '../mouth'
#save_path = '../video_mouth'
save_path = '../video/'
os.makedirs(save_path, exist_ok=True)
mouth = open('valid_mouth.txt', 'r').readlines()
if_use_mouth = True # if use mouth image(True: use mouth, False: use frames)
# get valid mouth file
valid_mouth = []
for m in mouth:
m = m.replace('\n', '').split('_')
m = m[0]+'_'+m[1]
valid_mouth.append(m)
'''
train_mouth = []
val_mouth = []
test_mouth = []
train = open('../train.txt', 'r').readlines()
test = open('../test.txt', 'r').readlines()
val = open('../val.txt', 'r').readlines()
for l in tqdm(train):
l = l.replace('\n','').split('/')
l = l[0]+'_'+l[1]
if l in valid_mouth:
train_mouth.append(l)
for l in tqdm(val):
l = l.replace('\n','').split('/')
l = l[0]+'_'+l[1]
if l in valid_mouth:
val_mouth.append(l)
for l in tqdm(test):
l = l.split(' ')[0].split('/')
l = l[0]+'_'+l[1]
if l in valid_mouth:
test_mouth.append(l)
'''
folder = os.listdir(root)
#folder = ['5']
mean = 0
std = 0
index = 0
for f in tqdm(folder):
if f in valid_mouth:
path = os.path.join(root, f)
frames = []
for i in range(1, 51):
img_path = os.path.join(path, "{:02d}.png".format(i))
img = cv2.imread(img_path, 0)
img = img / 255
frames.append(img)
m, s = cv2.meanStdDev(img) # m: mean, s:std
mean += float(m)
std += float(s)
index += 1
frames = np.array(frames)
os.makedirs(os.path.join(save_path),exist_ok=True)
np.savez(os.path.join(save_path, '{}.npz'.format(f)), data=frames)
'''
if f in test_mouth:
path = os.path.join(root, f)
frames = []
for i in range(1, 51):
img_path = os.path.join(path, "{:02d}.png".format(i))
img = cv2.imread(img_path, 0)
img = img / 255
frames.append(img)
m, s = cv2.meanStdDev(img) # m: mean, s:std
mean += float(m)
std += float(s)
index += 1
frames = np.array(frames)
os.makedirs(os.path.join(save_path, 'test'),exist_ok=True)
np.savez(os.path.join(save_path, 'test', '{}.npz'.format(f)), data=frames)
if f in val_mouth:
path = os.path.join(root, f)
frames = []
for i in range(1, 51):
img_path = os.path.join(path, "{:02d}.png".format(i))
img = cv2.imread(img_path, 0)
img = img / 255
frames.append(img)
m, s = cv2.meanStdDev(img) # m: mean, s:std
mean += float(m)
std += float(s)
index += 1
frames = np.array(frames)
os.makedirs(os.path.join(save_path, 'val'),exist_ok=True)
np.savez(os.path.join(save_path, 'val', '{}.npz'.format(f)), data=frames)
'''
print('mean: {:06f}, std: {:06f}'.format(mean/index, std/index)) | 3,037 | 27.933333 | 82 | py |
LRS3-For-Speech-Separation | LRS3-For-Speech-Separation-master/video_process/.ipynb_checkpoints/video_process-checkpoint.py | import cv2
import os
import matplotlib.pyplot as plt
import dlib
import numpy as np
from tqdm import tqdm
import subprocess
import face_recognition
def get_frames(file, fps=25):
# file: path of video txt
pathlist = []
with open(file, 'r') as f:
lines = f.readlines()
for line in lines:
if line != '':
line = line.replace('\n', '')
pathlist.append(line)
# pathlist type list
for path in tqdm(pathlist):
index = path.split('/')[-2]+'_'+path.split('/')[-1].split('.')[0]
os.makedirs('../frames/{}'.format(index), exist_ok=True)
command = subprocess.Popen('ffmpeg -i {} -vf fps={} ../frames/{}/%02d.png;'.format(
path, fps, index), shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
command.wait()
def detect_mouth(inpath, outpath, fps=25, time=2):
'''
inpath: file path of frames
outpath: file path of mouth
fps: video fps
time: video time
'''
# Dlib requirements.
#predictor_path = '../mouth_detector/shape_predictor_68_face_landmarks.dat'
#detector = dlib.get_frontal_face_detector()
#predictor = dlib.shape_predictor(predictor_path)
# folders getting
folder = os.listdir(inpath)
#folder = ['1']
for f in tqdm(folder):
# Required parameters for mouth extraction.
width_crop_max = 0
height_crop_max = 0
index = 1
while True:
path = os.path.join(inpath, f, '{:02d}.png'.format(index))
# Load the jpg file into a numpy array
image = face_recognition.load_image_file(path)
face_locations = face_recognition.face_locations(
image, number_of_times_to_upsample=0, model="cnn")
# Find all facial features in all the faces in the image
face_landmarks_list = face_recognition.face_landmarks(
image, face_locations=face_locations)
if len(face_locations) == 0:
break
# mkdir path
os.makedirs(os.path.join(outpath, f), exist_ok=True)
frame = cv2.imread(path)
h, w, _ = frame.shape
# 20 mark for mouth
marks = np.zeros((2, 24))
for face_landmarks in face_landmarks_list:
# Print the location of each facial feature in this image
co = 0
for facial_feature in face_landmarks.keys():
if facial_feature == 'top_lip':
lip = face_landmarks[facial_feature]
for i in lip:
marks[0, co] = i[0]
marks[1, co] = i[1]
co += 1
if facial_feature == 'bottom_lip':
lip = face_landmarks[facial_feature]
for i in lip:
marks[0, co] = i[0]
marks[1, co] = i[1]
co += 1
# Get the extreme points(top-left & bottom-right)
X_left, Y_left, X_right, Y_right = [int(np.amin(marks, axis=1)[0]), int(np.amin(marks, axis=1)[1]),
int(np.amax(marks, axis=1)[0]),
int(np.amax(marks, axis=1)[1])]
# Find the center of the mouth.
X_center = (X_left + X_right) / 2.0
Y_center = (Y_left + Y_right) / 2.0
# Make a boarder for cropping.
border = 30
X_left_new = X_left - border
Y_left_new = Y_left - border
X_right_new = X_right + border
Y_right_new = Y_right + border
# Width and height for cropping(before and after considering the border).
width_new = X_right_new - X_left_new
height_new = Y_right_new - Y_left_new
width_current = X_right - X_left
height_current = Y_right - Y_left
# Determine the cropping rectangle dimensions(the main purpose is to have a fixed area).
if width_crop_max == 0 and height_crop_max == 0:
width_crop_max = width_new
height_crop_max = height_new
else:
width_crop_max += 1.5 * \
np.maximum(width_current - width_crop_max, 0)
height_crop_max += 1.5 * \
np.maximum(height_current - height_crop_max, 0)
# # # Uncomment if the lip area is desired to be rectangular # # # #
#########################################################
# Find the cropping points(top-left and bottom-right).
X_left_crop = int(X_center - width_crop_max / 2.0)
X_right_crop = int(X_center + width_crop_max / 2.0)
Y_left_crop = int(Y_center - height_crop_max / 2.0)
Y_right_crop = int(Y_center + height_crop_max / 2.0)
if X_left_crop >= 0 and Y_left_crop >= 0 and X_right_crop < w and Y_right_crop < h:
mouth = frame[Y_left_crop:Y_right_crop,
X_left_crop:X_right_crop, :]
# Save the mouth area.
mouth_gray = cv2.cvtColor(mouth, cv2.COLOR_BGR2GRAY)
mouth_gray = cv2.resize(mouth_gray, (120, 120))
cv2.imwrite(os.path.join(
outpath, f, "{:02d}.png".format(index)), mouth_gray)
else:
pass
index += 1
if index == 51:
break
def resize_img(path, size):
for root, dirs, files in tqdm(os.walk(path)):
for file in files:
if file.endswith('png'):
f = os.path.join(root, file)
img = cv2.imread(f)
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img_gray = cv2.resize(img_gray, size)
cv2.imwrite(f, img_gray)
if __name__ == "__main__":
video_path = 'video_path.txt'
inpath = '../frames'
outpath = '../mouth'
change_root = '../frames'
print('-------------Getting video frames-------------')
get_frames('video_path.txt')
print('--------------Detection the mouth-------------')
detect_mouth(inpath, outpath)
print('--------------Resize the frames-------------')
resize_img(change_root, (120, 120))
| 6,427 | 39.683544 | 111 | py |
LRS3-For-Speech-Separation | LRS3-For-Speech-Separation-master/video_process/.ipynb_checkpoints/check_mouth-checkpoint.py | '''
Check if all mouths contain 50 frames.
'''
import os
from tqdm import tqdm
file = open('valid_mouth.txt', 'w')
mouth = '../mouth'
folder = os.listdir(mouth)
for f in tqdm(folder):
flag = True
for i in range(1, 51):
fi = os.path.join(mouth, f, '{:02d}.png').format(i)
if not os.path.exists(fi):
flag = False
if flag:
file.write(str(f)+'\n')
else:
pass
file.close()
| 434 | 18.772727 | 59 | py |
LDU | LDU-main/monocular_depth_estimation/utils/eval_with_pngs.py | # Copyright (C) 2019 Jin Han Lee
#
# This file is a part of BTS.
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
from __future__ import absolute_import, division, print_function
import os
import argparse
import fnmatch
import cv2
import numpy as np
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '1'
def convert_arg_line_to_args(arg_line):
for arg in arg_line.split():
if not arg.strip():
continue
yield arg
parser = argparse.ArgumentParser(description='BTS TensorFlow implementation.', fromfile_prefix_chars='@')
parser.convert_arg_line_to_args = convert_arg_line_to_args
parser.add_argument('--pred_path', type=str, help='path to the prediction results in png', required=True)
parser.add_argument('--gt_path', type=str, help='root path to the groundtruth data', required=False)
parser.add_argument('--dataset', type=str, help='dataset to test on, nyu or kitti', default='nyu')
parser.add_argument('--eigen_crop', help='if set, crops according to Eigen NIPS14', action='store_true')
parser.add_argument('--garg_crop', help='if set, crops according to Garg ECCV16', action='store_true')
parser.add_argument('--min_depth_eval', type=float, help='minimum depth for evaluation', default=1e-3)
parser.add_argument('--max_depth_eval', type=float, help='maximum depth for evaluation', default=80)
parser.add_argument('--do_kb_crop', help='if set, crop input images as kitti benchmark images', action='store_true')
args = parser.parse_args()
def compute_errors(gt, pred):
thresh = np.maximum((gt / pred), (pred / gt))
d1 = (thresh < 1.25).mean()
d2 = (thresh < 1.25 ** 2).mean()
d3 = (thresh < 1.25 ** 3).mean()
rmse = (gt - pred) ** 2
rmse = np.sqrt(rmse.mean())
rmse_log = (np.log(gt) - np.log(pred)) ** 2
rmse_log = np.sqrt(rmse_log.mean())
abs_rel = np.mean(np.abs(gt - pred) / gt)
sq_rel = np.mean(((gt - pred)**2) / gt)
err = np.log(pred) - np.log(gt)
silog = np.sqrt(np.mean(err ** 2) - np.mean(err) ** 2) * 100
err = np.abs(np.log10(pred) - np.log10(gt))
log10 = np.mean(err)
return silog, log10, abs_rel, sq_rel, rmse, rmse_log, d1, d2, d3
def test():
global gt_depths, missing_ids, pred_filenames
gt_depths = []
missing_ids = set()
pred_filenames = []
for root, dirnames, filenames in os.walk(args.pred_path):
for pred_filename in fnmatch.filter(filenames, '*.png'):
if 'cmap' in pred_filename or 'gt' in pred_filename:
continue
dirname = root.replace(args.pred_path, '')
pred_filenames.append(os.path.join(dirname, pred_filename))
num_test_samples = len(pred_filenames)
pred_depths = []
for i in range(num_test_samples):
pred_depth_path = os.path.join(args.pred_path, pred_filenames[i])
pred_depth = cv2.imread(pred_depth_path, -1)
if pred_depth is None:
print('Missing: %s ' % pred_depth_path)
missing_ids.add(i)
continue
if args.dataset == 'nyu':
pred_depth = pred_depth.astype(np.float32) / 1000.0
else:
pred_depth = pred_depth.astype(np.float32) / 256.0
pred_depths.append(pred_depth)
print('Raw png files reading done')
print('Evaluating {} files'.format(len(pred_depths)))
if args.dataset == 'kitti':
for t_id in range(num_test_samples):
file_dir = pred_filenames[t_id].split('.')[0]
filename = file_dir.split('_')[-1]
directory = file_dir.replace('_' + filename, '')
gt_depth_path = os.path.join(args.gt_path, directory, 'proj_depth/groundtruth/image_02', filename + '.png')
depth = cv2.imread(gt_depth_path, -1)
if depth is None:
print('Missing: %s ' % gt_depth_path)
missing_ids.add(t_id)
continue
depth = depth.astype(np.float32) / 256.0
gt_depths.append(depth)
elif args.dataset == 'nyu':
for t_id in range(num_test_samples):
file_dir = pred_filenames[t_id].split('.')[0]
filename = file_dir.split('_')[-1]
directory = file_dir.replace('_rgb_'+file_dir.split('_')[-1], '')
gt_depth_path = os.path.join(args.gt_path, directory, 'sync_depth_' + filename + '.png')
depth = cv2.imread(gt_depth_path, -1)
if depth is None:
print('Missing: %s ' % gt_depth_path)
missing_ids.add(t_id)
continue
depth = depth.astype(np.float32) / 1000.0
gt_depths.append(depth)
print('GT files reading done')
print('{} GT files missing'.format(len(missing_ids)))
print('Computing errors')
eval(pred_depths)
print('Done.')
def eval(pred_depths):
num_samples = len(pred_depths)
pred_depths_valid = []
i = 0
for t_id in range(num_samples):
if t_id in missing_ids:
continue
pred_depths_valid.append(pred_depths[t_id])
num_samples = num_samples - len(missing_ids)
silog = np.zeros(num_samples, np.float32)
log10 = np.zeros(num_samples, np.float32)
rms = np.zeros(num_samples, np.float32)
log_rms = np.zeros(num_samples, np.float32)
abs_rel = np.zeros(num_samples, np.float32)
sq_rel = np.zeros(num_samples, np.float32)
d1 = np.zeros(num_samples, np.float32)
d2 = np.zeros(num_samples, np.float32)
d3 = np.zeros(num_samples, np.float32)
for i in range(num_samples):
gt_depth = gt_depths[i]
pred_depth = pred_depths_valid[i]
pred_depth[pred_depth < args.min_depth_eval] = args.min_depth_eval
pred_depth[pred_depth > args.max_depth_eval] = args.max_depth_eval
pred_depth[np.isinf(pred_depth)] = args.max_depth_eval
gt_depth[np.isinf(gt_depth)] = 0
gt_depth[np.isnan(gt_depth)] = 0
valid_mask = np.logical_and(gt_depth > args.min_depth_eval, gt_depth < args.max_depth_eval)
if args.do_kb_crop:
height, width = gt_depth.shape
top_margin = int(height - 352)
left_margin = int((width - 1216) / 2)
pred_depth_uncropped = np.zeros((height, width), dtype=np.float32)
pred_depth_uncropped[top_margin:top_margin + 352, left_margin:left_margin + 1216] = pred_depth
pred_depth = pred_depth_uncropped
if args.garg_crop or args.eigen_crop:
gt_height, gt_width = gt_depth.shape
eval_mask = np.zeros(valid_mask.shape)
if args.garg_crop:
eval_mask[int(0.40810811 * gt_height):int(0.99189189 * gt_height), int(0.03594771 * gt_width):int(0.96405229 * gt_width)] = 1
elif args.eigen_crop:
if args.dataset == 'kitti':
eval_mask[int(0.3324324 * gt_height):int(0.91351351 * gt_height), int(0.0359477 * gt_width):int(0.96405229 * gt_width)] = 1
else:
eval_mask[45:471, 41:601] = 1
valid_mask = np.logical_and(valid_mask, eval_mask)
silog[i], log10[i], abs_rel[i], sq_rel[i], rms[i], log_rms[i], d1[i], d2[i], d3[i] = compute_errors(gt_depth[valid_mask], pred_depth[valid_mask])
print("{:>7}, {:>7}, {:>7}, {:>7}, {:>7}, {:>7}, {:>7}, {:>7}, {:>7}".format(
'd1', 'd2', 'd3', 'AbsRel', 'SqRel', 'RMSE', 'RMSElog', 'SILog', 'log10'))
print("{:7.3f}, {:7.3f}, {:7.3f}, {:7.3f}, {:7.3f}, {:7.3f}, {:7.3f}, {:7.3f}, {:7.3f}".format(
d1.mean(), d2.mean(), d3.mean(),
abs_rel.mean(), sq_rel.mean(), rms.mean(), log_rms.mean(), silog.mean(), log10.mean()))
return silog, log10, abs_rel, sq_rel, rms, log_rms, d1, d2, d3
def main():
test()
if __name__ == '__main__':
main()
| 8,447 | 35.89083 | 153 | py |
LDU | LDU-main/monocular_depth_estimation/utils/extract_official_train_test_set_from_mat.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#######################################################################################
# The MIT License
# Copyright (c) 2014 Hannes Schulz, University of Bonn <[email protected]>
# Copyright (c) 2013 Benedikt Waldvogel, University of Bonn <[email protected]>
# Copyright (c) 2008-2009 Sebastian Nowozin <[email protected]>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#######################################################################################
#
# Helper script to convert the NYU Depth v2 dataset Matlab file into a set of
# PNG and JPEG images.
#
# See https://github.com/deeplearningais/curfil/wiki/Training-and-Prediction-with-the-NYU-Depth-v2-Dataset
from __future__ import print_function
import h5py
import numpy as np
import os
import scipy.io
import sys
import cv2
def convert_image(i, scene, depth_raw, image):
idx = int(i) + 1
if idx in train_images:
train_test = "train"
else:
assert idx in test_images, "index %d neither found in training set nor in test set" % idx
train_test = "test"
folder = "%s/%s/%s" % (out_folder, train_test, scene)
if not os.path.exists(folder):
os.makedirs(folder)
img_depth = depth_raw * 1000.0
img_depth_uint16 = img_depth.astype(np.uint16)
cv2.imwrite("%s/sync_depth_%05d.png" % (folder, i), img_depth_uint16)
image = image[:, :, ::-1]
image_black_boundary = np.zeros((480, 640, 3), dtype=np.uint8)
image_black_boundary[7:474, 7:632, :] = image[7:474, 7:632, :]
cv2.imwrite("%s/rgb_%05d.jpg" % (folder, i), image_black_boundary)
if __name__ == "__main__":
if len(sys.argv) < 4:
print("usage: %s <h5_file> <train_test_split> <out_folder>" % sys.argv[0], file=sys.stderr)
sys.exit(0)
h5_file = h5py.File(sys.argv[1], "r")
# h5py is not able to open that file. but scipy is
train_test = scipy.io.loadmat(sys.argv[2])
out_folder = sys.argv[3]
test_images = set([int(x) for x in train_test["testNdxs"]])
train_images = set([int(x) for x in train_test["trainNdxs"]])
print("%d training images" % len(train_images))
print("%d test images" % len(test_images))
depth_raw = h5_file['rawDepths']
print("reading", sys.argv[1])
images = h5_file['images']
scenes = [u''.join(chr(c) for c in h5_file[obj_ref]) for obj_ref in h5_file['sceneTypes'][0]]
print("processing images")
for i, image in enumerate(images):
print("image", i + 1, "/", len(images))
convert_image(i, scenes[i], depth_raw[i, :, :].T, image.T)
print("Finished") | 3,664 | 37.989362 | 106 | py |
LDU | LDU-main/monocular_depth_estimation/utils/download_from_gdrive.py | # Source: https://stackoverflow.com/a/39225039
import requests
def download_file_from_google_drive(id, destination):
def get_confirm_token(response):
for key, value in response.cookies.items():
if key.startswith('download_warning'):
return value
return None
def save_response_content(response, destination):
CHUNK_SIZE = 32768
with open(destination, "wb") as f:
for chunk in response.iter_content(CHUNK_SIZE):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
URL = "https://docs.google.com/uc?export=download"
session = requests.Session()
response = session.get(URL, params = { 'id' : id }, stream = True)
token = get_confirm_token(response)
if token:
params = { 'id' : id, 'confirm' : token }
response = session.get(URL, params = params, stream = True)
save_response_content(response, destination)
if __name__ == "__main__":
import sys
if len(sys.argv) is not 3:
print("Usage: python google_drive.py drive_file_id destination_file_path")
else:
# TAKE ID FROM SHAREABLE LINK
file_id = sys.argv[1]
# DESTINATION FILE ON YOUR DISK
destination = sys.argv[2]
download_file_from_google_drive(file_id, destination)
| 1,353 | 28.434783 | 82 | py |
LDU | LDU-main/monocular_depth_estimation/pytorch/distributed_sampler_no_evenly_divisible.py | import math
import torch
from torch.utils.data import Sampler
import torch.distributed as dist
class DistributedSamplerNoEvenlyDivisible(Sampler):
"""Sampler that restricts data loading to a subset of the dataset.
It is especially useful in conjunction with
:class:`torch.nn.parallel.DistributedDataParallel`. In such case, each
process can pass a DistributedSampler instance as a DataLoader sampler,
and load a subset of the original dataset that is exclusive to it.
.. note::
Dataset is assumed to be of constant size.
Arguments:
dataset: Dataset used for sampling.
num_replicas (optional): Number of processes participating in
distributed training.
rank (optional): Rank of the current process within num_replicas.
shuffle (optional): If true (default), sampler will shuffle the indices
"""
def __init__(self, dataset, num_replicas=None, rank=None, shuffle=True):
if num_replicas is None:
if not dist.is_available():
raise RuntimeError("Requires distributed package to be available")
num_replicas = dist.get_world_size()
if rank is None:
if not dist.is_available():
raise RuntimeError("Requires distributed package to be available")
rank = dist.get_rank()
self.dataset = dataset
self.num_replicas = num_replicas
self.rank = rank
self.epoch = 0
num_samples = int(math.floor(len(self.dataset) * 1.0 / self.num_replicas))
rest = len(self.dataset) - num_samples * self.num_replicas
if self.rank < rest:
num_samples += 1
self.num_samples = num_samples
self.total_size = len(dataset)
# self.total_size = self.num_samples * self.num_replicas
self.shuffle = shuffle
def __iter__(self):
# deterministically shuffle based on epoch
g = torch.Generator()
g.manual_seed(self.epoch)
if self.shuffle:
indices = torch.randperm(len(self.dataset), generator=g).tolist()
else:
indices = list(range(len(self.dataset)))
# add extra samples to make it evenly divisible
# indices += indices[:(self.total_size - len(indices))]
# assert len(indices) == self.total_size
# subsample
indices = indices[self.rank:self.total_size:self.num_replicas]
self.num_samples = len(indices)
# assert len(indices) == self.num_samples
return iter(indices)
def __len__(self):
return self.num_samples
def set_epoch(self, epoch):
self.epoch = epoch
| 2,659 | 35.438356 | 82 | py |
LDU | LDU-main/monocular_depth_estimation/pytorch/bts_live_3d.py | # Copyright (C) 2019 Jin Han Lee
#
# This file is a part of BTS.
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
from __future__ import absolute_import, division, print_function
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '1'
import os
import sys
import time
import argparse
import numpy as np
# Computer Vision
import cv2
from scipy import ndimage
from skimage.transform import resize
# Visualization
import matplotlib.pyplot as plt
plasma = plt.get_cmap('plasma')
greys = plt.get_cmap('Greys')
# UI and OpenGL
from PySide2 import QtCore, QtGui, QtWidgets, QtOpenGL
from OpenGL import GL, GLU
from OpenGL.arrays import vbo
from OpenGL.GL import shaders
import glm
# Argument Parser
parser = argparse.ArgumentParser(description='BTS Live 3D')
parser.add_argument('--model_name', type=str, help='model name', default='bts_nyu_v2')
parser.add_argument('--encoder', type=str, help='type of encoder, densenet121_bts or densenet161_bts', default='densenet161_bts')
parser.add_argument('--max_depth', type=float, help='maximum depth in estimation', default=10)
parser.add_argument('--checkpoint_path', type=str, help='path to a checkpoint to load', required=True)
parser.add_argument('--input_height', type=int, help='input height', default=480)
parser.add_argument('--input_width', type=int, help='input width', default=640)
parser.add_argument('--dataset', type=str, help='dataset this model trained on', default='nyu')
args = parser.parse_args()
model_dir = os.path.join("./models", args.model_name)
sys.path.append(model_dir)
for key, val in vars(__import__(args.model_name)).items():
if key.startswith('__') and key.endswith('__'):
continue
vars()[key] = val
# Image shapes
height_rgb, width_rgb = 480, 640
height_depth, width_depth = height_rgb, width_rgb
height_rgb = height_rgb
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
from torch.autograd import Variable
# Intrinsic parameters for your own webcam camera
camera_matrix = np.zeros(shape=(3, 3))
camera_matrix[0, 0] = 5.4765313594010649e+02
camera_matrix[0, 2] = 3.2516069906172453e+02
camera_matrix[1, 1] = 5.4801781476172562e+02
camera_matrix[1, 2] = 2.4794113960783835e+02
camera_matrix[2, 2] = 1
dist_coeffs = np.array([ 3.7230261423972011e-02, -1.6171708069773008e-01, -3.5260752900266357e-04, 1.7161234226767313e-04, 1.0192711400840315e-01 ])
# Parameters for a model trained on NYU Depth V2
new_camera_matrix = np.zeros(shape=(3, 3))
new_camera_matrix[0, 0] = 518.8579
new_camera_matrix[0, 2] = 320
new_camera_matrix[1, 1] = 518.8579
new_camera_matrix[1, 2] = 240
new_camera_matrix[2, 2] = 1
R = np.identity(3, dtype=np.float)
map1, map2 = cv2.initUndistortRectifyMap(camera_matrix, dist_coeffs, R, new_camera_matrix, (640, 480), cv2.CV_32FC1)
def load_model():
args.mode = 'test'
model = BtsModel(params=args)
model = torch.nn.DataParallel(model)
checkpoint = torch.load(args.checkpoint_path)
model.load_state_dict(checkpoint['model'])
model.eval()
model.cuda()
return model
# Function timing
ticTime = time.time()
def tic():
global ticTime;
ticTime = time.time()
def toc():
print('{0} seconds.'.format(time.time() - ticTime))
# Conversion from Numpy to QImage and back
def np_to_qimage(a):
im = a.copy()
return QtGui.QImage(im.data, im.shape[1], im.shape[0], im.strides[0], QtGui.QImage.Format_RGB888).copy()
def qimage_to_np(img):
img = img.convertToFormat(QtGui.QImage.Format.Format_ARGB32)
return np.array(img.constBits()).reshape(img.height(), img.width(), 4)
# Compute edge magnitudes
def edges(d):
dx = ndimage.sobel(d, 0) # horizontal derivative
dy = ndimage.sobel(d, 1) # vertical derivative
return np.abs(dx) + np.abs(dy)
# Main window
class Window(QtWidgets.QWidget):
updateInput = QtCore.Signal()
def __init__(self, parent=None):
QtWidgets.QWidget.__init__(self, parent)
self.model = None
self.capture = None
self.glWidget = GLWidget()
mainLayout = QtWidgets.QVBoxLayout()
# Input / output views
viewsLayout = QtWidgets.QGridLayout()
self.inputViewer = QtWidgets.QLabel("[Click to start]")
self.inputViewer.setPixmap(QtGui.QPixmap(width_rgb, height_rgb))
self.outputViewer = QtWidgets.QLabel("[Click to start]")
self.outputViewer.setPixmap(QtGui.QPixmap(width_rgb, height_rgb))
imgsFrame = QtWidgets.QFrame()
inputsLayout = QtWidgets.QVBoxLayout()
imgsFrame.setLayout(inputsLayout)
inputsLayout.addWidget(self.inputViewer)
inputsLayout.addWidget(self.outputViewer)
viewsLayout.addWidget(imgsFrame, 0, 0)
viewsLayout.addWidget(self.glWidget, 0, 1)
viewsLayout.setColumnStretch(1, 10)
mainLayout.addLayout(viewsLayout)
# Load depth estimation model
toolsLayout = QtWidgets.QHBoxLayout()
self.button2 = QtWidgets.QPushButton("Webcam")
self.button2.clicked.connect(self.loadCamera)
toolsLayout.addWidget(self.button2)
self.button4 = QtWidgets.QPushButton("Pause")
self.button4.clicked.connect(self.loadImage)
toolsLayout.addWidget(self.button4)
self.button6 = QtWidgets.QPushButton("Refresh")
self.button6.clicked.connect(self.updateCloud)
toolsLayout.addWidget(self.button6)
mainLayout.addLayout(toolsLayout)
self.setLayout(mainLayout)
self.setWindowTitle(self.tr("BTS Live"))
# Signals
self.updateInput.connect(self.update_input)
# Default example
if self.glWidget.rgb.any() and self.glWidget.depth.any():
img = (self.glWidget.rgb * 255).astype('uint8')
self.inputViewer.setPixmap(QtGui.QPixmap.fromImage(np_to_qimage(img)))
coloredDepth = (plasma(self.glWidget.depth[:, :, 0])[:, :, :3] * 255).astype('uint8')
self.outputViewer.setPixmap(QtGui.QPixmap.fromImage(np_to_qimage(coloredDepth)))
def loadModel(self):
QtGui.QGuiApplication.setOverrideCursor(QtCore.Qt.WaitCursor)
tic()
self.model = load_model()
print('Model loaded.')
toc()
self.updateCloud()
QtGui.QGuiApplication.restoreOverrideCursor()
def loadCamera(self):
tic()
self.model = load_model()
print('Model loaded.')
toc()
self.capture = cv2.VideoCapture(0)
self.updateInput.emit()
def loadVideoFile(self):
self.capture = cv2.VideoCapture('video.mp4')
self.updateInput.emit()
def loadImage(self):
self.capture = None
img = (self.glWidget.rgb * 255).astype('uint8')
self.inputViewer.setPixmap(QtGui.QPixmap.fromImage(np_to_qimage(img)))
self.updateCloud()
def loadImageFile(self):
self.capture = None
filename = \
QtWidgets.QFileDialog.getOpenFileName(None, 'Select image', '', self.tr('Image files (*.jpg *.png)'))[0]
img = QtGui.QImage(filename).scaledToHeight(height_rgb)
xstart = 0
if img.width() > width_rgb: xstart = (img.width() - width_rgb) // 2
img = img.copy(xstart, 0, xstart + width_rgb, height_rgb)
self.inputViewer.setPixmap(QtGui.QPixmap.fromImage(img))
self.updateCloud()
def update_input(self):
# Don't update anymore if no capture device is set
if self.capture == None:
return
# Capture a frame
ret, frame = self.capture.read()
# Loop video playback if current stream is video file
if not ret:
self.capture.set(cv2.CAP_PROP_POS_FRAMES, 0)
ret, frame = self.capture.read()
# Prepare image and show in UI
frame_ud = cv2.remap(frame, map1, map2, interpolation=cv2.INTER_LINEAR)
frame = cv2.cvtColor(frame_ud, cv2.COLOR_BGR2RGB)
image = np_to_qimage(frame)
self.inputViewer.setPixmap(QtGui.QPixmap.fromImage(image))
# Update the point cloud
self.updateCloud()
def updateCloud(self):
rgb8 = qimage_to_np(self.inputViewer.pixmap().toImage())
self.glWidget.rgb = (rgb8[:, :, :3] / 255)[:, :, ::-1]
if self.model:
input_image = rgb8[:, :, :3].astype(np.float32)
# Normalize image
input_image[:, :, 0] = (input_image[:, :, 0] - 123.68) * 0.017
input_image[:, :, 1] = (input_image[:, :, 1] - 116.78) * 0.017
input_image[:, :, 2] = (input_image[:, :, 2] - 103.94) * 0.017
input_image_cropped = input_image[32:-1 - 31, 32:-1 - 31, :]
input_images = np.expand_dims(input_image_cropped, axis=0)
input_images = np.transpose(input_images, (0, 3, 1, 2))
with torch.no_grad():
image = Variable(torch.from_numpy(input_images)).cuda()
focal = Variable(torch.tensor([518.8579])).cuda()
# Predict
lpg8x8, lpg4x4, lpg2x2, reduc1x1, depth_cropped = self.model(image, focal)
depth = np.zeros((480, 640), dtype=np.float32)
depth[32:-1-31, 32:-1-31] = depth_cropped[0].cpu().squeeze() / args.max_depth
coloredDepth = (greys(np.log10(depth * args.max_depth))[:, :, :3] * 255).astype('uint8')
self.outputViewer.setPixmap(QtGui.QPixmap.fromImage(np_to_qimage(coloredDepth)))
self.glWidget.depth = depth
else:
self.glWidget.depth = 0.5 + np.zeros((height_rgb // 2, width_rgb // 2, 1))
self.glWidget.updateRGBD()
self.glWidget.updateGL()
# Update to next frame if we are live
QtCore.QTimer.singleShot(10, self.updateInput)
class GLWidget(QtOpenGL.QGLWidget):
def __init__(self, parent=None):
QtOpenGL.QGLWidget.__init__(self, parent)
self.object = 0
self.xRot = 5040
self.yRot = 40
self.zRot = 0
self.zoomLevel = 9
self.lastPos = QtCore.QPoint()
self.green = QtGui.QColor.fromCmykF(0.0, 0.0, 0.0, 1.0)
self.black = QtGui.QColor.fromCmykF(0.0, 0.0, 0.0, 1.0)
# Precompute for world coordinates
self.xx, self.yy = self.worldCoords(width=width_rgb, height=height_rgb)
self.rgb = np.zeros((480, 640, 3), dtype=np.uint8)
self.depth = np.zeros((480, 640), dtype=np.float32)
self.col_vbo = None
self.pos_vbo = None
if self.rgb.any() and self.detph.any():
self.updateRGBD()
def xRotation(self):
return self.xRot
def yRotation(self):
return self.yRot
def zRotation(self):
return self.zRot
def minimumSizeHint(self):
return QtCore.QSize(640, 480)
def sizeHint(self):
return QtCore.QSize(640, 480)
def setXRotation(self, angle):
if angle != self.xRot:
self.xRot = angle
self.emit(QtCore.SIGNAL("xRotationChanged(int)"), angle)
self.updateGL()
def setYRotation(self, angle):
if angle != self.yRot:
self.yRot = angle
self.emit(QtCore.SIGNAL("yRotationChanged(int)"), angle)
self.updateGL()
def setZRotation(self, angle):
if angle != self.zRot:
self.zRot = angle
self.emit(QtCore.SIGNAL("zRotationChanged(int)"), angle)
self.updateGL()
def resizeGL(self, width, height):
GL.glViewport(0, 0, width, height)
def mousePressEvent(self, event):
self.lastPos = QtCore.QPoint(event.pos())
def mouseMoveEvent(self, event):
dx = -(event.x() - self.lastPos.x())
dy = (event.y() - self.lastPos.y())
if event.buttons() & QtCore.Qt.LeftButton:
self.setXRotation(self.xRot + dy)
self.setYRotation(self.yRot + dx)
elif event.buttons() & QtCore.Qt.RightButton:
self.setXRotation(self.xRot + dy)
self.setZRotation(self.zRot + dx)
self.lastPos = QtCore.QPoint(event.pos())
def wheelEvent(self, event):
numDegrees = event.delta() / 8
numSteps = numDegrees / 15
self.zoomLevel = self.zoomLevel + numSteps
event.accept()
self.updateGL()
def initializeGL(self):
self.qglClearColor(self.black.darker())
GL.glShadeModel(GL.GL_FLAT)
GL.glEnable(GL.GL_DEPTH_TEST)
GL.glEnable(GL.GL_CULL_FACE)
VERTEX_SHADER = shaders.compileShader("""#version 330
layout(location = 0) in vec3 position;
layout(location = 1) in vec3 color;
uniform mat4 mvp; out vec4 frag_color;
void main() {gl_Position = mvp * vec4(position, 1.0);frag_color = vec4(color, 1.0);}""", GL.GL_VERTEX_SHADER)
FRAGMENT_SHADER = shaders.compileShader("""#version 330
in vec4 frag_color; out vec4 out_color;
void main() {out_color = frag_color;}""", GL.GL_FRAGMENT_SHADER)
self.shaderProgram = shaders.compileProgram(VERTEX_SHADER, FRAGMENT_SHADER)
self.UNIFORM_LOCATIONS = {
'position': GL.glGetAttribLocation(self.shaderProgram, 'position'),
'color': GL.glGetAttribLocation(self.shaderProgram, 'color'),
'mvp': GL.glGetUniformLocation(self.shaderProgram, 'mvp'),
}
shaders.glUseProgram(self.shaderProgram)
def paintGL(self):
if self.rgb.any() and self.depth.any():
GL.glClear(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)
self.drawObject()
def worldCoords(self, width, height):
cx, cy = width / 2, height / 2
fx = 518.8579
fy = 518.8579
xx, yy = np.tile(range(width), height), np.repeat(range(height), width)
xx = (xx - cx) / fx
yy = (yy - cy) / fy
return xx, yy
def posFromDepth(self, depth):
length = depth.shape[0] * depth.shape[1]
depth[edges(depth) > 0.3] = 1e6 # Hide depth edges
z = depth.reshape(length)
return np.dstack((self.xx * z, self.yy * z, z)).reshape((length, 3))
def createPointCloudVBOfromRGBD(self):
# Create position and color VBOs
self.pos_vbo = vbo.VBO(data=self.pos, usage=GL.GL_DYNAMIC_DRAW, target=GL.GL_ARRAY_BUFFER)
self.col_vbo = vbo.VBO(data=self.col, usage=GL.GL_DYNAMIC_DRAW, target=GL.GL_ARRAY_BUFFER)
def updateRGBD(self):
# RGBD dimensions
width, height = self.depth.shape[1], self.depth.shape[0]
# Reshape
points = self.posFromDepth(self.depth.copy())
colors = resize(self.rgb, (height, width)).reshape((height * width, 3))
# Flatten and convert to float32
self.pos = points.astype('float32')
self.col = colors.reshape(height * width, 3).astype('float32')
# Move center of scene
self.pos = self.pos + glm.vec3(0, -0.06, -0.3)
# Create VBOs
if not self.col_vbo:
self.createPointCloudVBOfromRGBD()
def drawObject(self):
# Update camera
model, view, proj = glm.mat4(1), glm.mat4(1), glm.perspective(45, self.width() / self.height(), 0.01, 100)
center, up, eye = glm.vec3(0, -0.075, 0), glm.vec3(0, -1, 0), glm.vec3(0, 0, -0.4 * (self.zoomLevel / 10))
view = glm.lookAt(eye, center, up)
model = glm.rotate(model, self.xRot / 160.0, glm.vec3(1, 0, 0))
model = glm.rotate(model, self.yRot / 160.0, glm.vec3(0, 1, 0))
model = glm.rotate(model, self.zRot / 160.0, glm.vec3(0, 0, 1))
mvp = proj * view * model
GL.glUniformMatrix4fv(self.UNIFORM_LOCATIONS['mvp'], 1, False, glm.value_ptr(mvp))
# Update data
self.pos_vbo.set_array(self.pos)
self.col_vbo.set_array(self.col)
# Point size
GL.glPointSize(2)
# Position
self.pos_vbo.bind()
GL.glEnableVertexAttribArray(0)
GL.glVertexAttribPointer(0, 3, GL.GL_FLOAT, GL.GL_FALSE, 0, None)
# Color
self.col_vbo.bind()
GL.glEnableVertexAttribArray(1)
GL.glVertexAttribPointer(1, 3, GL.GL_FLOAT, GL.GL_FALSE, 0, None)
# Draw
GL.glDrawArrays(GL.GL_POINTS, 0, self.pos.shape[0])
if __name__ == '__main__':
app = QtWidgets.QApplication(sys.argv)
window = Window()
window.show()
res = app.exec_() | 17,345 | 34.4 | 148 | py |
LDU | LDU-main/monocular_depth_estimation/pytorch/bts_main.py | # Copyright (C) 2019 Jin Han Lee
#
# This file is a part of BTS.
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
import time
import argparse
import sys
import os
import torch
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.multiprocessing as mp
from tensorboardX import SummaryWriter
import matplotlib
import matplotlib.cm
from tqdm import tqdm
from bts_dataloader import *
from bts_ldu import *
def convert_arg_line_to_args(arg_line):
for arg in arg_line.split():
if not arg.strip():
continue
yield arg
parser = argparse.ArgumentParser(description='BTS PyTorch implementation.', fromfile_prefix_chars='@')
parser.convert_arg_line_to_args = convert_arg_line_to_args
parser.add_argument('--mode', type=str, help='train or test', default='train')
parser.add_argument('--model_name', type=str, help='model name', default='bts_eigen_v2')
parser.add_argument('--encoder', type=str, help='type of encoder, desenet121_bts, densenet161_bts, '
'resnet101_bts, resnet50_bts, resnext50_bts or resnext101_bts',
default='densenet161_bts')
# Dataset
parser.add_argument('--dataset', type=str, help='dataset to train on, kitti or nyu', default='nyu')
parser.add_argument('--data_path', type=str, help='path to the data', required=True)
parser.add_argument('--gt_path', type=str, help='path to the groundtruth data', required=True)
parser.add_argument('--filenames_file', type=str, help='path to the filenames text file', required=True)
parser.add_argument('--input_height', type=int, help='input height', default=480)
parser.add_argument('--input_width', type=int, help='input width', default=640)
parser.add_argument('--max_depth', type=float, help='maximum depth in estimation', default=10)
# Log and save
parser.add_argument('--log_directory', type=str, help='directory to save checkpoints and summaries', default='')
parser.add_argument('--checkpoint_path', type=str, help='path to a checkpoint to load', default='')
parser.add_argument('--log_freq', type=int, help='Logging frequency in global steps', default=100)
parser.add_argument('--save_freq', type=int, help='Checkpoint saving frequency in global steps', default=500)
# Training
parser.add_argument('--fix_first_conv_blocks', help='if set, will fix the first two conv blocks', action='store_true')
parser.add_argument('--fix_first_conv_block', help='if set, will fix the first conv block', action='store_true')
parser.add_argument('--bn_no_track_stats', help='if set, will not track running stats in batch norm layers', action='store_true')
parser.add_argument('--weight_decay', type=float, help='weight decay factor for optimization', default=1e-2)
parser.add_argument('--bts_size', type=int, help='initial num_filters in bts', default=512)
parser.add_argument('--retrain', help='if used with checkpoint_path, will restart training from step zero', action='store_true')
parser.add_argument('--adam_eps', type=float, help='epsilon in Adam optimizer', default=1e-6)
parser.add_argument('--batch_size', type=int, help='batch size', default=4)
parser.add_argument('--num_epochs', type=int, help='number of epochs', default=50)
parser.add_argument('--learning_rate', type=float, help='initial learning rate', default=1e-4)
parser.add_argument('--end_learning_rate', type=float, help='end learning rate', default=-1)
parser.add_argument('--variance_focus', type=float, help='lambda in paper: [0, 1], higher value more focus on minimizing variance of error', default=0.85)
parser.add_argument('--nb_proto', type=int, help='initial num_proto in bts', default=30)
parser.add_argument('--loss_lambda', type=float, help='weight of the additional losses', default=0.1)
# Preprocessing
parser.add_argument('--do_random_rotate', help='if set, will perform random rotation for augmentation', action='store_true')
parser.add_argument('--degree', type=float, help='random rotation maximum degree', default=2.5)
parser.add_argument('--do_kb_crop', help='if set, crop input images as kitti benchmark images', action='store_true')
parser.add_argument('--use_right', help='if set, will randomly use right images when train on KITTI', action='store_true')
# Multi-gpu training
parser.add_argument('--num_threads', type=int, help='number of threads to use for data loading', default=1)
parser.add_argument('--world_size', type=int, help='number of nodes for distributed training', default=1)
parser.add_argument('--rank', type=int, help='node rank for distributed training', default=0)
parser.add_argument('--dist_url', type=str, help='url used to set up distributed training', default='tcp://127.0.0.1:1234')
parser.add_argument('--dist_backend', type=str, help='distributed backend', default='nccl')
parser.add_argument('--gpu', type=int, help='GPU id to use.', default=None)
parser.add_argument('--multiprocessing_distributed', help='Use multi-processing distributed training to launch '
'N processes per node, which has N GPUs. This is the '
'fastest way to use PyTorch for either single node or '
'multi node data parallel training', action='store_true',)
# Online eval
parser.add_argument('--do_online_eval', help='if set, perform online eval in every eval_freq steps', action='store_true')
parser.add_argument('--data_path_eval', type=str, help='path to the data for online evaluation', required=False)
parser.add_argument('--gt_path_eval', type=str, help='path to the groundtruth data for online evaluation', required=False)
parser.add_argument('--filenames_file_eval', type=str, help='path to the filenames text file for online evaluation', required=False)
parser.add_argument('--min_depth_eval', type=float, help='minimum depth for evaluation', default=1e-3)
parser.add_argument('--max_depth_eval', type=float, help='maximum depth for evaluation', default=80)
parser.add_argument('--eigen_crop', help='if set, crops according to Eigen NIPS14', action='store_true')
parser.add_argument('--garg_crop', help='if set, crops according to Garg ECCV16', action='store_true')
parser.add_argument('--eval_freq', type=int, help='Online evaluation frequency in global steps', default=500)
parser.add_argument('--eval_summary_directory', type=str, help='output directory for eval summary,'
'if empty outputs to checkpoint folder', default='')
if sys.argv.__len__() == 2:
arg_filename_with_prefix = '@' + sys.argv[1]
args = parser.parse_args([arg_filename_with_prefix])
else:
args = parser.parse_args()
inv_normalize = transforms.Normalize(
mean=[-0.485/0.229, -0.456/0.224, -0.406/0.225],
std=[1/0.229, 1/0.224, 1/0.225]
)
eval_metrics = ['silog', 'abs_rel', 'log10', 'rms', 'sq_rel', 'log_rms', 'd1', 'd2', 'd3']
def compute_errors(gt, pred):
thresh = np.maximum((gt / pred), (pred / gt))
d1 = (thresh < 1.25).mean()
d2 = (thresh < 1.25 ** 2).mean()
d3 = (thresh < 1.25 ** 3).mean()
rms = (gt - pred) ** 2
rms = np.sqrt(rms.mean())
log_rms = (np.log(gt) - np.log(pred)) ** 2
log_rms = np.sqrt(log_rms.mean())
abs_rel = np.mean(np.abs(gt - pred) / gt)
sq_rel = np.mean(((gt - pred) ** 2) / gt)
err = np.log(pred) - np.log(gt)
silog = np.sqrt(np.mean(err ** 2) - np.mean(err) ** 2) * 100
err = np.abs(np.log10(pred) - np.log10(gt))
log10 = np.mean(err)
return [silog, abs_rel, log10, rms, sq_rel, log_rms, d1, d2, d3]
def block_print():
sys.stdout = open(os.devnull, 'w')
def enable_print():
sys.stdout = sys.__stdout__
def get_num_lines(file_path):
f = open(file_path, 'r')
lines = f.readlines()
f.close()
return len(lines)
def colorize(value, vmin=None, vmax=None, cmap='Greys'):
value = value.cpu().numpy()[:, :, :]
value = np.log10(value)
vmin = value.min() if vmin is None else vmin
vmax = value.max() if vmax is None else vmax
if vmin != vmax:
value = (value - vmin) / (vmax - vmin)
else:
value = value*0.
cmapper = matplotlib.cm.get_cmap(cmap)
value = cmapper(value, bytes=True)
img = value[:, :, :3]
return img.transpose((2, 0, 1))
def normalize_result(value, vmin=None, vmax=None):
value = value.cpu().numpy()[0, :, :]
vmin = value.min() if vmin is None else vmin
vmax = value.max() if vmax is None else vmax
if vmin != vmax:
value = (value - vmin) / (vmax - vmin)
else:
value = value * 0.
return np.expand_dims(value, 0)
def set_misc(model):
if args.bn_no_track_stats:
print("Disabling tracking running stats in batch norm layers")
model.apply(bn_init_as_tf)
if args.fix_first_conv_blocks:
if 'resne' in args.encoder:
fixing_layers = ['base_model.conv1', 'base_model.layer1.0', 'base_model.layer1.1', '.bn']
else:
fixing_layers = ['conv0', 'denseblock1.denselayer1', 'denseblock1.denselayer2', 'norm']
print("Fixing first two conv blocks")
elif args.fix_first_conv_block:
if 'resne' in args.encoder:
fixing_layers = ['base_model.conv1', 'base_model.layer1.0', '.bn']
else:
fixing_layers = ['conv0', 'denseblock1.denselayer1', 'norm']
print("Fixing first conv block")
else:
if 'resne' in args.encoder:
fixing_layers = ['base_model.conv1', '.bn']
else:
fixing_layers = ['conv0', 'norm']
print("Fixing first conv layer")
for name, child in model.named_children():
if not 'encoder' in name:
continue
for name2, parameters in child.named_parameters():
# print(name, name2)
if any(x in name2 for x in fixing_layers):
parameters.requires_grad = False
def online_eval(model, dataloader_eval, gpu, ngpus):
eval_measures = torch.zeros(10).cuda(device=gpu)
for _, eval_sample_batched in enumerate(tqdm(dataloader_eval.data)):
with torch.no_grad():
image = torch.autograd.Variable(eval_sample_batched['image'].cuda(gpu, non_blocking=True))
focal = torch.autograd.Variable(eval_sample_batched['focal'].cuda(gpu, non_blocking=True))
gt_depth = eval_sample_batched['depth']
has_valid_depth = eval_sample_batched['has_valid_depth']
if not has_valid_depth:
continue
pred_depth, _ = model(image, focal)
pred_depth = pred_depth.cpu().numpy().squeeze()
gt_depth = gt_depth.cpu().numpy().squeeze()
if args.do_kb_crop:
height, width = gt_depth.shape
top_margin = int(height - 352)
left_margin = int((width - 1216) / 2)
pred_depth_uncropped = np.zeros((height, width), dtype=np.float32)
pred_depth_uncropped[top_margin:top_margin + 352, left_margin:left_margin + 1216] = pred_depth
pred_depth = pred_depth_uncropped
pred_depth[pred_depth < args.min_depth_eval] = args.min_depth_eval
pred_depth[pred_depth > args.max_depth_eval] = args.max_depth_eval
pred_depth[np.isinf(pred_depth)] = args.max_depth_eval
pred_depth[np.isnan(pred_depth)] = args.min_depth_eval
valid_mask = np.logical_and(gt_depth > args.min_depth_eval, gt_depth < args.max_depth_eval)
if args.garg_crop or args.eigen_crop:
gt_height, gt_width = gt_depth.shape
eval_mask = np.zeros(valid_mask.shape)
if args.garg_crop:
eval_mask[int(0.40810811 * gt_height):int(0.99189189 * gt_height), int(0.03594771 * gt_width):int(0.96405229 * gt_width)] = 1
elif args.eigen_crop:
if args.dataset == 'kitti':
eval_mask[int(0.3324324 * gt_height):int(0.91351351 * gt_height), int(0.0359477 * gt_width):int(0.96405229 * gt_width)] = 1
else:
eval_mask[45:471, 41:601] = 1
valid_mask = np.logical_and(valid_mask, eval_mask)
measures = compute_errors(gt_depth[valid_mask], pred_depth[valid_mask])
eval_measures[:9] += torch.tensor(measures).cuda(device=gpu)
eval_measures[9] += 1
if args.multiprocessing_distributed:
group = dist.new_group([i for i in range(ngpus)])
dist.all_reduce(tensor=eval_measures, op=dist.ReduceOp.SUM, group=group)
if not args.multiprocessing_distributed or gpu == 0:
eval_measures_cpu = eval_measures.cpu()
cnt = eval_measures_cpu[9].item()
eval_measures_cpu /= cnt
print('Computing errors for {} eval samples'.format(int(cnt)))
print("{:>7}, {:>7}, {:>7}, {:>7}, {:>7}, {:>7}, {:>7}, {:>7}, {:>7}".format('silog', 'abs_rel', 'log10', 'rms',
'sq_rel', 'log_rms', 'd1', 'd2',
'd3'))
for i in range(8):
print('{:7.3f}, '.format(eval_measures_cpu[i]), end='')
print('{:7.3f}'.format(eval_measures_cpu[8]))
return eval_measures_cpu
return None
def main_worker(gpu, ngpus_per_node, args):
args.gpu = gpu
if args.gpu is not None:
print("Use GPU: {} for training".format(args.gpu))
if args.distributed:
if args.dist_url == "env://" and args.rank == -1:
args.rank = int(os.environ["RANK"])
if args.multiprocessing_distributed:
args.rank = args.rank * ngpus_per_node + gpu
dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url, world_size=args.world_size, rank=args.rank)
# Create model
model = BtsModel(args)
model.train()
model.decoder.apply(weights_init_xavier)
set_misc(model)
num_params = sum([np.prod(p.size()) for p in model.parameters()])
print("Total number of parameters: {}".format(num_params))
num_params_update = sum([np.prod(p.shape) for p in model.parameters() if p.requires_grad])
print("Total number of learning parameters: {}".format(num_params_update))
if args.distributed:
if args.gpu is not None:
torch.cuda.set_device(args.gpu)
model.cuda(args.gpu)
args.batch_size = int(args.batch_size / ngpus_per_node)
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu], find_unused_parameters=True)
else:
model.cuda()
model = torch.nn.parallel.DistributedDataParallel(model, find_unused_parameters=True)
else:
model = torch.nn.DataParallel(model)
model.cuda()
if args.distributed:
print("Model Initialized on GPU: {}".format(args.gpu))
else:
print("Model Initialized")
global_step = 0
best_eval_measures_lower_better = torch.zeros(6).cpu() + 1e3
best_eval_measures_higher_better = torch.zeros(3).cpu()
best_eval_steps = np.zeros(9, dtype=np.int32)
# Training parameters
optimizer = torch.optim.AdamW([{'params': model.module.encoder.parameters(), 'weight_decay': args.weight_decay},
{'params': model.module.decoder.parameters(), 'weight_decay': 0}],
lr=args.learning_rate, eps=args.adam_eps)
model_just_loaded = False
if args.checkpoint_path != '':
if os.path.isfile(args.checkpoint_path):
print("Loading checkpoint '{}'".format(args.checkpoint_path))
if args.gpu is None:
checkpoint = torch.load(args.checkpoint_path)
else:
loc = 'cuda:{}'.format(args.gpu)
checkpoint = torch.load(args.checkpoint_path, map_location=loc)
global_step = checkpoint['global_step']
model.load_state_dict(checkpoint['model'])
optimizer.load_state_dict(checkpoint['optimizer'])
try:
best_eval_measures_higher_better = checkpoint['best_eval_measures_higher_better'].cpu()
best_eval_measures_lower_better = checkpoint['best_eval_measures_lower_better'].cpu()
best_eval_steps = checkpoint['best_eval_steps']
except KeyError:
print("Could not load values for online evaluation")
print("Loaded checkpoint '{}' (global_step {})".format(args.checkpoint_path, checkpoint['global_step']))
else:
print("No checkpoint found at '{}'".format(args.checkpoint_path))
model_just_loaded = True
if args.retrain:
global_step = 0
cudnn.benchmark = True
dataloader = BtsDataLoader(args, 'train')
dataloader_eval = BtsDataLoader(args, 'online_eval')
# Logging
if not args.multiprocessing_distributed or (args.multiprocessing_distributed and args.rank % ngpus_per_node == 0):
writer = SummaryWriter(args.log_directory + '/' + args.model_name + '/summaries', flush_secs=30)
if args.do_online_eval:
if args.eval_summary_directory != '':
eval_summary_path = os.path.join(args.eval_summary_directory, args.model_name)
else:
eval_summary_path = os.path.join(args.log_directory, 'eval')
eval_summary_writer = SummaryWriter(eval_summary_path, flush_secs=30)
criterion = silog_loss(args.variance_focus)
criterion_uncer = uncertainty_loss(args)
criterion_entro = entropy_loss()
criterion_dissi = dissimilar_loss()
start_time = time.time()
duration = 0
num_log_images = args.batch_size
end_learning_rate = args.end_learning_rate if args.end_learning_rate != -1 else 0.1 * args.learning_rate
steps_per_epoch = len(dataloader.data)
num_total_steps = args.num_epochs * steps_per_epoch
epoch = global_step // steps_per_epoch
while epoch < args.num_epochs:
if args.distributed:
dataloader.train_sampler.set_epoch(epoch)
for step, sample_batched in enumerate(dataloader.data):
optimizer.zero_grad()
before_op_time = time.time()
image = torch.autograd.Variable(sample_batched['image'].cuda(args.gpu, non_blocking=True))
focal = torch.autograd.Variable(sample_batched['focal'].cuda(args.gpu, non_blocking=True))
depth_gt = torch.autograd.Variable(sample_batched['depth'].cuda(args.gpu, non_blocking=True))
final_depth, final_uncer, omega, embedding_ = model(image, focal)
if args.dataset == 'nyu':
mask = depth_gt > 0.1
else:
mask = depth_gt > 1.0
mask = mask.to(torch.bool)
loss_depth = criterion.forward(final_depth, depth_gt, mask)
loss_uncer = criterion_uncer.forward(final_uncer, final_depth, depth_gt, mask)
loss_omega = criterion_entro.forward(embedding_)
loss_dissi = criterion_dissi.forward(omega)
loss = loss_depth + (loss_uncer + loss_omega + loss_dissi) * args.loss_lambda
loss.backward()
for param_group in optimizer.param_groups:
current_lr = (args.learning_rate - end_learning_rate) * (1 - global_step / num_total_steps) ** 0.9 + end_learning_rate
param_group['lr'] = current_lr
optimizer.step()
if not args.multiprocessing_distributed or (args.multiprocessing_distributed and args.rank % ngpus_per_node == 0):
print('[epoch][s/s_per_e/gs]: [{}][{}/{}/{}], lr: {:.12f}, loss: {:.12f}'.format(epoch, step, steps_per_epoch, global_step, current_lr, loss))
if np.isnan(loss.cpu().item()):
print('NaN in loss occurred. Aborting training.')
return -1
duration += time.time() - before_op_time
if global_step and global_step % args.log_freq == 0 and not model_just_loaded:
examples_per_sec = args.batch_size / duration * args.log_freq
duration = 0
time_sofar = (time.time() - start_time) / 3600
training_time_left = (num_total_steps / global_step - 1.0) * time_sofar
if not args.multiprocessing_distributed or (args.multiprocessing_distributed and args.rank % ngpus_per_node == 0):
print("{}".format(args.model_name))
print_string = 'GPU: {} | examples/s: {:4.2f} | loss: {:.5f} | time elapsed: {:.2f}h | time left: {:.2f}h'
print(print_string.format(args.gpu, examples_per_sec, loss, time_sofar, training_time_left))
if not args.multiprocessing_distributed or (args.multiprocessing_distributed
and args.rank % ngpus_per_node == 0):
writer.add_scalar('total_loss', loss, global_step)
writer.add_scalar('loss_depth', loss_depth, global_step)
writer.add_scalar('loss_uncer', loss_uncer, global_step)
writer.add_scalar('learning_rate', current_lr, global_step)
for i in range(num_log_images):
writer.add_image('depth_mean/image/{}'.format(i), normalize_result(1/(final_depth)[i, :, :, :].data), global_step)
writer.add_image('depth_var/image/{}'.format(i), normalize_result((final_uncer.detach().sigmoid())[i, :, :, :].data), global_step)
writer.add_image('image/image/{}'.format(i), inv_normalize(image[i, :, :, :]).data, global_step)
writer.flush()
if not args.do_online_eval and global_step and global_step % args.save_freq == 0:
if not args.multiprocessing_distributed or (args.multiprocessing_distributed and args.rank % ngpus_per_node == 0):
checkpoint = {'global_step': global_step,
'model': model.state_dict(),
'optimizer': optimizer.state_dict()}
torch.save(checkpoint, args.log_directory + '/' + args.model_name + '/model-{}'.format(global_step))
if args.do_online_eval and global_step and global_step % args.eval_freq == 0 and not model_just_loaded:
time.sleep(0.1)
model.eval()
eval_measures = online_eval(model, dataloader_eval, gpu, ngpus_per_node)
if eval_measures is not None:
for i in range(9):
eval_summary_writer.add_scalar(eval_metrics[i], eval_measures[i].cpu(), int(global_step))
measure = eval_measures[i]
is_best = False
if i < 6 and measure < best_eval_measures_lower_better[i]:
old_best = best_eval_measures_lower_better[i].item()
best_eval_measures_lower_better[i] = measure.item()
is_best = True
elif i >= 6 and measure > best_eval_measures_higher_better[i-6]:
old_best = best_eval_measures_higher_better[i-6].item()
best_eval_measures_higher_better[i-6] = measure.item()
is_best = True
if is_best:
old_best_step = best_eval_steps[i]
old_best_name = '/model-{}-best_{}_{:.5f}'.format(old_best_step, eval_metrics[i], old_best)
model_path = args.log_directory + '/' + args.model_name + old_best_name
if os.path.exists(model_path):
command = 'rm {}'.format(model_path)
os.system(command)
best_eval_steps[i] = global_step
model_save_name = '/model-{}-best_{}_{:.5f}'.format(global_step, eval_metrics[i], measure)
print('New best for {}. Saving model: {}'.format(eval_metrics[i], model_save_name))
checkpoint = {'global_step': global_step,
'model': model.state_dict(),
'best_eval_measures_higher_better': best_eval_measures_higher_better,
'best_eval_measures_lower_better': best_eval_measures_lower_better,
'best_eval_steps': best_eval_steps
}
torch.save(checkpoint, args.log_directory + '/' + args.model_name + model_save_name)
eval_summary_writer.flush()
model.train()
block_print()
set_misc(model)
enable_print()
model_just_loaded = False
global_step += 1
epoch += 1
if not args.multiprocessing_distributed or (args.multiprocessing_distributed and args.rank % ngpus_per_node == 0):
writer.close()
if args.do_online_eval:
eval_summary_writer.close()
def main():
if args.mode != 'train':
print('bts_main.py is only for training. Use bts_test.py instead.')
return -1
model_filename = args.model_name + '.py'
command = 'mkdir ' + args.log_directory + '/' + args.model_name
os.system(command)
args_out_path = args.log_directory + '/' + args.model_name + '/' + sys.argv[1]
command = 'cp ' + sys.argv[1] + ' ' + args_out_path
os.system(command)
if args.checkpoint_path == '':
model_out_path = args.log_directory + '/' + args.model_name + '/' + model_filename
command = 'cp bts.py ' + model_out_path
os.system(command)
aux_out_path = args.log_directory + '/' + args.model_name + '/.'
command = 'cp bts_main.py ' + aux_out_path
os.system(command)
command = 'cp bts_dataloader.py ' + aux_out_path
os.system(command)
else:
loaded_model_dir = os.path.dirname(args.checkpoint_path)
loaded_model_name = os.path.basename(loaded_model_dir)
loaded_model_filename = loaded_model_name + '.py'
model_out_path = args.log_directory + '/' + args.model_name + '/' + model_filename
command = 'cp ' + loaded_model_dir + '/' + loaded_model_filename + ' ' + model_out_path
os.system(command)
torch.cuda.empty_cache()
args.distributed = args.world_size > 1 or args.multiprocessing_distributed
ngpus_per_node = torch.cuda.device_count()
if ngpus_per_node > 1 and not args.multiprocessing_distributed:
print("This machine has more than 1 gpu. Please specify --multiprocessing_distributed, or set \'CUDA_VISIBLE_DEVICES=0\'")
return -1
if args.do_online_eval:
print("You have specified --do_online_eval.")
print("This will evaluate the model every eval_freq {} steps and save best models for individual eval metrics."
.format(args.eval_freq))
if args.multiprocessing_distributed:
args.world_size = ngpus_per_node * args.world_size
mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args))
else:
main_worker(args.gpu, ngpus_per_node, args)
if __name__ == '__main__':
main()
| 28,993 | 47.976351 | 165 | py |
LDU | LDU-main/monocular_depth_estimation/pytorch/bts_ldu.py | # Copyright (C) 2019 Jin Han Lee
#
# This file is a part of BTS.
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
import torch
import torch.nn as nn
import torch.nn.functional as torch_nn_func
import math
import numpy as np
# This sets the batch norm layers in pytorch as if {'is_training': False, 'scale': True} in tensorflow
def bn_init_as_tf(m):
if isinstance(m, nn.BatchNorm2d):
m.track_running_stats = True # These two lines enable using stats (moving mean and var) loaded from pretrained model
m.eval() # or zero mean and variance of one if the batch norm layer has no pretrained values
m.affine = True
m.requires_grad = True
def weights_init_xavier(m):
if isinstance(m, nn.Conv2d):
torch.nn.init.xavier_uniform_(m.weight)
if m.bias is not None:
torch.nn.init.zeros_(m.bias)
#_______________________________________________________________________________________#
class silog_loss(nn.Module):
def __init__(self, variance_focus):
super(silog_loss, self).__init__()
self.variance_focus = variance_focus
def forward(self, depth_est, depth_gt, mask):
d = torch.log(depth_est[mask]) - torch.log(depth_gt[mask])
return torch.sqrt((d ** 2).mean() - self.variance_focus * (d.mean() ** 2)) * 10.0
class entropy_loss(nn.Module):
def __init__(self):
super(entropy_loss, self).__init__()
def forward(self, embedding):
embedding = nn.Softmax(dim=1)(embedding)
minus_entropy = embedding * torch.log(embedding)
minus_entropy = torch.sum(minus_entropy, dim=1)
return minus_entropy.mean()
class uncertainty_loss(nn.Module):
def __init__(self, args):
super(uncertainty_loss, self).__init__()
self.max_depth = args.max_depth
def forward(self, uncer, final_depth, depth_gt, mask):
abs_error = abs(final_depth.detach() - depth_gt)/self.max_depth
abs_error[abs_error>1] = 1
abs_error = abs_error[mask].detach()
loss = nn.BCEWithLogitsLoss(pos_weight = torch.tensor([5.0]).cuda(), reduction='mean')(uncer[mask], abs_error)
return loss
class dissimilar_loss(nn.Module):
def __init__(self):
super(dissimilar_loss, self).__init__()
def forward(self, protos):
loss = -1 * torch.mean(torch.cdist(protos, protos))
return loss
#_______________________________________________________________________________________#
class atrous_conv(nn.Sequential):
def __init__(self, in_channels, out_channels, dilation, apply_bn_first=True):
super(atrous_conv, self).__init__()
self.atrous_conv = torch.nn.Sequential()
if apply_bn_first:
self.atrous_conv.add_module('first_bn', nn.BatchNorm2d(in_channels, momentum=0.01, affine=True, track_running_stats=True, eps=1.1e-5))
self.atrous_conv.add_module('aconv_sequence', nn.Sequential(nn.ReLU(),
nn.Conv2d(in_channels=in_channels, out_channels=out_channels*2, bias=False, kernel_size=1, stride=1, padding=0),
nn.BatchNorm2d(out_channels*2, momentum=0.01, affine=True, track_running_stats=True),
nn.ReLU(),
nn.Conv2d(in_channels=out_channels * 2, out_channels=out_channels, bias=False, kernel_size=3, stride=1,
padding=(dilation, dilation), dilation=dilation)))
def forward(self, x):
return self.atrous_conv.forward(x)
class upconv(nn.Module):
def __init__(self, in_channels, out_channels, ratio=2):
super(upconv, self).__init__()
self.elu = nn.ELU()
self.conv = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, bias=False, kernel_size=3, stride=1, padding=1)
self.ratio = ratio
def forward(self, x):
up_x = torch_nn_func.interpolate(x, scale_factor=self.ratio, mode='nearest')
out = self.conv(up_x)
out = self.elu(out)
return out
class reduction_1x1(nn.Sequential):
def __init__(self, num_in_filters, num_out_filters, max_depth, is_final=False):
super(reduction_1x1, self).__init__()
self.max_depth = max_depth
self.is_final = is_final
self.sigmoid = nn.Sigmoid()
self.reduc = torch.nn.Sequential()
while num_out_filters >= 4:
if num_out_filters < 8:
if self.is_final:
self.reduc.add_module('final', torch.nn.Sequential(nn.Conv2d(num_in_filters, out_channels=1, bias=False,
kernel_size=1, stride=1, padding=0),
nn.Sigmoid()))
else:
self.reduc.add_module('plane_params', torch.nn.Conv2d(num_in_filters, out_channels=3, bias=False,
kernel_size=1, stride=1, padding=0))
break
else:
self.reduc.add_module('inter_{}_{}'.format(num_in_filters, num_out_filters),
torch.nn.Sequential(nn.Conv2d(in_channels=num_in_filters, out_channels=num_out_filters,
bias=False, kernel_size=1, stride=1, padding=0),
nn.ELU()))
num_in_filters = num_out_filters
num_out_filters = num_out_filters // 2
def forward(self, net):
net = self.reduc.forward(net)
if not self.is_final:
theta = self.sigmoid(net[:, 0, :, :]) * math.pi / 3
phi = self.sigmoid(net[:, 1, :, :]) * math.pi * 2
dist = self.sigmoid(net[:, 2, :, :]) * self.max_depth
n1 = torch.mul(torch.sin(theta), torch.cos(phi)).unsqueeze(1)
n2 = torch.mul(torch.sin(theta), torch.sin(phi)).unsqueeze(1)
n3 = torch.cos(theta).unsqueeze(1)
n4 = dist.unsqueeze(1)
net = torch.cat([n1, n2, n3, n4], dim=1)
return net
class local_planar_guidance(nn.Module):
def __init__(self, upratio):
super(local_planar_guidance, self).__init__()
self.upratio = upratio
self.u = torch.arange(self.upratio).reshape([1, 1, self.upratio]).float()
self.v = torch.arange(int(self.upratio)).reshape([1, self.upratio, 1]).float()
self.upratio = float(upratio)
def forward(self, plane_eq, focal):
plane_eq_expanded = torch.repeat_interleave(plane_eq, int(self.upratio), 2)
plane_eq_expanded = torch.repeat_interleave(plane_eq_expanded, int(self.upratio), 3)
n1 = plane_eq_expanded[:, 0, :, :]
n2 = plane_eq_expanded[:, 1, :, :]
n3 = plane_eq_expanded[:, 2, :, :]
n4 = plane_eq_expanded[:, 3, :, :]
u = self.u.repeat(plane_eq.size(0), plane_eq.size(2) * int(self.upratio), plane_eq.size(3)).cuda()
u = (u - (self.upratio - 1) * 0.5) / self.upratio
v = self.v.repeat(plane_eq.size(0), plane_eq.size(2), plane_eq.size(3) * int(self.upratio)).cuda()
v = (v - (self.upratio - 1) * 0.5) / self.upratio
return n4 / (n1 * u + n2 * v + n3)
class Distanceminimi_Layer_learned(nn.Module):
def __init__(self, in_features=0, out_features=0, dist='lin'):
super(Distanceminimi_Layer_learned, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.dist=dist
self.omega = nn.Parameter(torch.Tensor(1, out_features, in_features, 1, 1))
self.reset_parameters()
def reset_parameters(self):
nn.init.normal_(self.omega, mean=0, std=1)#/self.out_features)
def forward(self, x):
x = x.unsqueeze(1)
out = torch_nn_func.cosine_similarity(x, self.omega, dim=2, eps=1e-30)
return out, self.omega
class bts(nn.Module):
def __init__(self, params, feat_out_channels, num_features=512, nb_prototype = 80):
super(bts, self).__init__()
self.params = params
self.upconv5 = upconv(feat_out_channels[4], num_features)
self.bn5 = nn.BatchNorm2d(num_features, momentum=0.01, affine=True, eps=1.1e-5)
self.conv5 = torch.nn.Sequential(nn.Conv2d(num_features + feat_out_channels[3], num_features, 3, 1, 1, bias=False),
nn.ELU())
self.upconv4 = upconv(num_features, num_features // 2)
self.bn4 = nn.BatchNorm2d(num_features // 2, momentum=0.01, affine=True, eps=1.1e-5)
self.conv4 = torch.nn.Sequential(nn.Conv2d(num_features // 2 + feat_out_channels[2], num_features // 2, 3, 1, 1, bias=False),
nn.ELU())
self.bn4_2 = nn.BatchNorm2d(num_features // 2, momentum=0.01, affine=True, eps=1.1e-5)
self.daspp_3 = atrous_conv(num_features // 2, num_features // 4, 3, apply_bn_first=False)
self.daspp_6 = atrous_conv(num_features // 2 + num_features // 4 + feat_out_channels[2], num_features // 4, 6)
self.daspp_12 = atrous_conv(num_features + feat_out_channels[2], num_features // 4, 12)
self.daspp_18 = atrous_conv(num_features + num_features // 4 + feat_out_channels[2], num_features // 4, 18)
self.daspp_24 = atrous_conv(num_features + num_features // 2 + feat_out_channels[2], num_features // 4, 24)
self.daspp_conv = torch.nn.Sequential(nn.Conv2d(num_features + num_features // 2 + num_features // 4, num_features // 4, 3, 1, 1, bias=False),
nn.ELU())
self.reduc8x8 = reduction_1x1(num_features // 4, num_features // 4, self.params.max_depth)
self.lpg8x8 = local_planar_guidance(8)
self.upconv3 = upconv(num_features // 4, num_features // 4)
self.bn3 = nn.BatchNorm2d(num_features // 4, momentum=0.01, affine=True, eps=1.1e-5)
self.conv3 = torch.nn.Sequential(nn.Conv2d(num_features // 4 + feat_out_channels[1] + 1, num_features // 4, 3, 1, 1, bias=False),
nn.ELU())
self.reduc4x4 = reduction_1x1(num_features // 4, num_features // 8, self.params.max_depth)
self.lpg4x4 = local_planar_guidance(4)
self.upconv2 = upconv(num_features // 4, num_features // 8)
self.bn2 = nn.BatchNorm2d(num_features // 8, momentum=0.01, affine=True, eps=1.1e-5)
self.conv2 = torch.nn.Sequential(nn.Conv2d(num_features // 8 + feat_out_channels[0] + 1, num_features // 8, 3, 1, 1, bias=False),
nn.ELU())
self.reduc2x2 = reduction_1x1(num_features // 8, num_features // 16, self.params.max_depth)
self.lpg2x2 = local_planar_guidance(2)
self.upconv1 = upconv(num_features // 8, num_features // 16)
self.reduc1x1 = reduction_1x1(num_features // 16, num_features // 32, self.params.max_depth, is_final=True)
self.conv1 = torch.nn.Sequential(nn.Conv2d(num_features // 16 + 4, num_features // 16, 3, 1, 1, bias=False),
nn.ELU())
self.DMlayer = Distanceminimi_Layer_learned(in_features=(num_features // 16), out_features = nb_prototype, dist='cos')
self.DMBN = nn.BatchNorm2d(nb_prototype)
self.get_uncer = nn.Conv2d(nb_prototype, 1, 1)
self.get_depth = nn.Sequential(nn.Conv2d(nb_prototype, 1, 1), nn.Sigmoid())
def forward(self, features, focal):
skip0, skip1, skip2, skip3 = features[0], features[1], features[2], features[3]
dense_features = torch.nn.ReLU()(features[4])
upconv5 = self.upconv5(dense_features) # H/16
upconv5 = self.bn5(upconv5)
concat5 = torch.cat([upconv5, skip3], dim=1)
iconv5 = self.conv5(concat5)
upconv4 = self.upconv4(iconv5) # H/8
upconv4 = self.bn4(upconv4)
concat4 = torch.cat([upconv4, skip2], dim=1)
iconv4 = self.conv4(concat4)
iconv4 = self.bn4_2(iconv4)
daspp_3 = self.daspp_3(iconv4)
concat4_2 = torch.cat([concat4, daspp_3], dim=1)
daspp_6 = self.daspp_6(concat4_2)
concat4_3 = torch.cat([concat4_2, daspp_6], dim=1)
daspp_12 = self.daspp_12(concat4_3)
concat4_4 = torch.cat([concat4_3, daspp_12], dim=1)
daspp_18 = self.daspp_18(concat4_4)
concat4_5 = torch.cat([concat4_4, daspp_18], dim=1)
daspp_24 = self.daspp_24(concat4_5)
concat4_daspp = torch.cat([iconv4, daspp_3, daspp_6, daspp_12, daspp_18, daspp_24], dim=1)
daspp_feat = self.daspp_conv(concat4_daspp)
reduc8x8 = self.reduc8x8(daspp_feat)
plane_normal_8x8 = reduc8x8[:, :3, :, :]
plane_normal_8x8 = torch_nn_func.normalize(plane_normal_8x8, 2, 1)
plane_dist_8x8 = reduc8x8[:, 3, :, :]
plane_eq_8x8 = torch.cat([plane_normal_8x8, plane_dist_8x8.unsqueeze(1)], 1)
depth_8x8 = self.lpg8x8(plane_eq_8x8, focal)
depth_8x8_scaled = depth_8x8.unsqueeze(1) / self.params.max_depth
depth_8x8_scaled_ds = torch_nn_func.interpolate(depth_8x8_scaled, scale_factor=0.25, mode='nearest')
upconv3 = self.upconv3(daspp_feat) # H/4
upconv3 = self.bn3(upconv3)
concat3 = torch.cat([upconv3, skip1, depth_8x8_scaled_ds], dim=1)
iconv3 = self.conv3(concat3)
reduc4x4 = self.reduc4x4(iconv3)
plane_normal_4x4 = reduc4x4[:, :3, :, :]
plane_normal_4x4 = torch_nn_func.normalize(plane_normal_4x4, 2, 1)
plane_dist_4x4 = reduc4x4[:, 3, :, :]
plane_eq_4x4 = torch.cat([plane_normal_4x4, plane_dist_4x4.unsqueeze(1)], 1)
depth_4x4 = self.lpg4x4(plane_eq_4x4, focal)
depth_4x4_scaled = depth_4x4.unsqueeze(1) / self.params.max_depth
depth_4x4_scaled_ds = torch_nn_func.interpolate(depth_4x4_scaled, scale_factor=0.5, mode='nearest')
upconv2 = self.upconv2(iconv3) # H/2
upconv2 = self.bn2(upconv2)
concat2 = torch.cat([upconv2, skip0, depth_4x4_scaled_ds], dim=1)
iconv2 = self.conv2(concat2)
reduc2x2 = self.reduc2x2(iconv2)
plane_normal_2x2 = reduc2x2[:, :3, :, :]
plane_normal_2x2 = torch_nn_func.normalize(plane_normal_2x2, 2, 1)
plane_dist_2x2 = reduc2x2[:, 3, :, :]
plane_eq_2x2 = torch.cat([plane_normal_2x2, plane_dist_2x2.unsqueeze(1)], 1)
depth_2x2 = self.lpg2x2(plane_eq_2x2, focal)
depth_2x2_scaled = depth_2x2.unsqueeze(1) / self.params.max_depth
upconv1 = self.upconv1(iconv2)
reduc1x1 = self.reduc1x1(upconv1)
concat1 = torch.cat([upconv1, reduc1x1, depth_2x2_scaled, depth_4x4_scaled, depth_8x8_scaled], dim=1)
feature_output = self.conv1(concat1)
# Before the last layer, DM layer is added
embedding_, omega = self.DMlayer(feature_output)
embedding = torch.exp(-embedding_)
out = self.DMBN(embedding)
final_uncer = self.get_uncer(out)
final_depth = self.get_depth(out) * self.params.max_depth
if self.training:
return final_depth, final_uncer, omega.squeeze(), embedding_
else:
return final_depth, torch.sigmoid(final_uncer)
class encoder(nn.Module):
def __init__(self, params):
super(encoder, self).__init__()
self.params = params
import torchvision.models as models
if params.encoder == 'densenet121_bts':
self.base_model = models.densenet121(pretrained=True).features
self.feat_names = ['relu0', 'pool0', 'transition1', 'transition2', 'norm5']
self.feat_out_channels = [64, 64, 128, 256, 1024]
elif params.encoder == 'densenet161_bts':
self.base_model = models.densenet161(pretrained=True).features
self.feat_names = ['relu0', 'pool0', 'transition1', 'transition2', 'norm5']
self.feat_out_channels = [96, 96, 192, 384, 2208]
elif params.encoder == 'resnet50_bts':
self.base_model = models.resnet50(pretrained=True)
self.feat_names = ['relu', 'layer1', 'layer2', 'layer3', 'layer4']
self.feat_out_channels = [64, 256, 512, 1024, 2048]
elif params.encoder == 'resnet101_bts':
self.base_model = models.resnet101(pretrained=True)
self.feat_names = ['relu', 'layer1', 'layer2', 'layer3', 'layer4']
self.feat_out_channels = [64, 256, 512, 1024, 2048]
elif params.encoder == 'resnext50_bts':
self.base_model = models.resnext50_32x4d(pretrained=True)
self.feat_names = ['relu', 'layer1', 'layer2', 'layer3', 'layer4']
self.feat_out_channels = [64, 256, 512, 1024, 2048]
elif params.encoder == 'resnext101_bts':
self.base_model = models.resnext101_32x8d(pretrained=True)
self.feat_names = ['relu', 'layer1', 'layer2', 'layer3', 'layer4']
self.feat_out_channels = [64, 256, 512, 1024, 2048]
elif params.encoder == 'mobilenetv2_bts':
self.base_model = models.mobilenet_v2(pretrained=True).features
self.feat_inds = [2, 4, 7, 11, 19]
self.feat_out_channels = [16, 24, 32, 64, 1280]
self.feat_names = []
else:
print('Not supported encoder: {}'.format(params.encoder))
def forward(self, x):
feature = x
skip_feat = []
i = 1
for k, v in self.base_model._modules.items():
if 'fc' in k or 'avgpool' in k:
continue
feature = v(feature)
if self.params.encoder == 'mobilenetv2_bts':
if i == 2 or i == 4 or i == 7 or i == 11 or i == 19:
skip_feat.append(feature)
else:
if any(x in k for x in self.feat_names):
skip_feat.append(feature)
i = i + 1
return skip_feat
class BtsModel(nn.Module):
def __init__(self, params):
super(BtsModel, self).__init__()
self.encoder = encoder(params)
self.decoder = bts(params, self.encoder.feat_out_channels, params.bts_size, params.nb_proto)
def forward(self, x, focal):
skip_feat = self.encoder(x)
return self.decoder(skip_feat, focal) | 19,379 | 47.693467 | 180 | py |
LDU | LDU-main/monocular_depth_estimation/pytorch/bts_test_kitti_ldu.py | # Copyright (C) 2019 Jin Han Lee
#
# This file is a part of BTS.
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
from __future__ import absolute_import, division, print_function
import os
import argparse
import time
import numpy as np
import sys
import torch
from torch.autograd import Variable
from tqdm import tqdm
from bts_dataloader import *
from sparsification import sparsification_error_gpu
from bts_ldu import BtsModel
def convert_arg_line_to_args(arg_line):
for arg in arg_line.split():
if not arg.strip():
continue
yield arg
parser = argparse.ArgumentParser(description='BTS PyTorch implementation.', fromfile_prefix_chars='@')
parser.convert_arg_line_to_args = convert_arg_line_to_args
parser.add_argument('--model_name', type=str, help='model name', default='bts_nyu_v2')
parser.add_argument('--encoder', type=str, help='type of encoder, vgg or desenet121_bts or densenet161_bts',
default='densenet161_bts')
parser.add_argument('--data_path_eval', type=str, help='path to the data', required=True)
parser.add_argument('--gt_path_eval', type=str, help='path to the data', required=True)
parser.add_argument('--filenames_file_eval', type=str, help='path to the filenames text file', required=True)
parser.add_argument('--input_height', type=int, help='input height', default=480)
parser.add_argument('--input_width', type=int, help='input width', default=640)
parser.add_argument('--max_depth', type=float, help='maximum depth in estimation', default=80)
parser.add_argument('--checkpoint_path', type=str, help='path to a specific checkpoint to load', default='')
parser.add_argument('--dataset', type=str, help='dataset to train on, make3d or nyudepthv2', default='nyu')
parser.add_argument('--do_kb_crop', help='if set, crop input images as kitti benchmark images', action='store_true')
parser.add_argument('--garg_crop', help='if set, crops according to Garg ECCV16', action='store_true')
parser.add_argument('--bts_size', type=int, help='initial num_filters in bts', default=512)
parser.add_argument('--clip_gt', help='if set, clipping the ground truth to the min-max depth', action='store_true')
parser.add_argument('--min_depth_eval', type=float, help='minimum depth for evaluation', default=1e-3)
parser.add_argument('--max_depth_eval', type=float, help='maximum depth for evaluation', default=80)
parser.add_argument('--nb_proto', type=int, help='initial num_proto in bts', default=30)
if sys.argv.__len__() == 2:
arg_filename_with_prefix = '@' + sys.argv[1]
args = parser.parse_args([arg_filename_with_prefix])
else:
args = parser.parse_args()
def get_num_lines(file_path):
f = open(file_path, 'r')
lines = f.readlines()
f.close()
return len(lines)
def compute_errors(gt, pred):
thresh = np.maximum((gt / pred), (pred / gt))
d1 = (thresh < 1.25).mean()
d2 = (thresh < 1.25 ** 2).mean()
d3 = (thresh < 1.25 ** 3).mean()
rmse = (gt - pred) ** 2
rmse = np.sqrt(rmse.mean())
rmse_log = (np.log(gt) - np.log(pred)) ** 2
rmse_log = np.sqrt(rmse_log.mean())
abs_rel = np.mean(np.abs(gt - pred) / gt)
sq_rel = np.mean(((gt - pred)**2) / gt)
err = np.log(pred) - np.log(gt)
silog = np.sqrt(np.mean(err ** 2) - np.mean(err) ** 2) * 100
err = np.abs(np.log10(pred) - np.log10(gt))
log10 = np.mean(err)
return silog, log10, abs_rel, sq_rel, rmse, rmse_log, d1, d2, d3
inv_normalize = transforms.Normalize(
mean=[-0.485/0.229, -0.456/0.224, -0.406/0.225],
std=[1/0.229, 1/0.224, 1/0.225]
)
def test():
"""Test function."""
args.mode = 'online_eval'
args.distributed = False
dataloader = BtsDataLoader(args, 'online_eval')
model = BtsModel(params=args)
model = torch.nn.DataParallel(model)
if os.path.exists(args.checkpoint_path):
checkpoint = torch.load(args.checkpoint_path)
else:
print('Wrong checkpoint path. Exit.')
exit()
model.load_state_dict(checkpoint['model'])
model.eval()
model.cuda()
num_params = sum([np.prod(p.size()) for p in model.parameters()])
print("Total number of parameters: {}".format(num_params))
num_test_samples = get_num_lines(args.filenames_file_eval)
print('now testing {} files with {}'.format(num_test_samples, args.checkpoint_path))
start_time = time.time()
with torch.no_grad():
num_samples = len(dataloader.data)
print(num_samples)
nb_valid = 0
silog = np.zeros(num_samples, np.float32)
log10 = np.zeros(num_samples, np.float32)
rms = np.zeros(num_samples, np.float32)
log_rms = np.zeros(num_samples, np.float32)
abs_rel = np.zeros(num_samples, np.float32)
sq_rel = np.zeros(num_samples, np.float32)
d1 = np.zeros(num_samples, np.float32)
d2 = np.zeros(num_samples, np.float32)
d3 = np.zeros(num_samples, np.float32)
hist_pred_rmses = 0
hist_oracle_rmses = 0
nb_remain_rmses = 0
hist_pred_rmses = 0
hist_oracle_rmses = 0
nb_remain_rmses = 0
ausc_rmse = np.zeros(num_samples, np.float32)
hist_pred_absrels = 0
hist_oracle_absrels = 0
nb_remain_absrels = 0
hist_pred_absrels = 0
hist_oracle_absrels = 0
nb_remain_absrels = 0
ausc_absrel = np.zeros(num_samples, np.float32)
spar_rmse = 0
spar_absr = 0
for i, sample in tqdm(enumerate(tqdm(dataloader.data))):
is_valid = sample['has_valid_depth']
if not is_valid: continue
else: nb_valid += 1
image = Variable(sample['image'].cuda())
focal = Variable(sample['focal'].cuda())
depth_gt = Variable(sample['depth'].cuda())
# Predict
depth_gt = depth_gt.cpu().numpy().squeeze()
depth_est, uncertainty = model(image, focal)
depth_est = depth_est.cpu().numpy().squeeze()
uncertainty = uncertainty.cpu().numpy().squeeze()
if args.clip_gt:
valid_mask = np.logical_and(depth_gt > args.min_depth_eval, depth_gt < args.max_depth_eval)
else:
valid_mask = (depth_gt > args.min_depth_eval)
# We are using online-eval here, and the following operation is to imitate the operation in the test case in the original work.
if args.do_kb_crop:
height, width = depth_gt.shape
top_margin = int(height - 352)
left_margin = int((width - 1216) / 2)
pred_depth_uncropped = np.zeros((height, width), dtype=np.float32)
pred_depth_uncropped[top_margin:top_margin + 352, left_margin:left_margin + 1216] = depth_est
depth_est = pred_depth_uncropped
pred_depth_uncropped = np.zeros((height, width), dtype=np.float32)
pred_depth_uncropped[top_margin:top_margin + 352, left_margin:left_margin + 1216] = uncertainty
uncertainty = pred_depth_uncropped
if args.clip_gt:
depth_est[depth_est < args.min_depth_eval] = args.min_depth_eval
depth_est[depth_est > args.max_depth_eval] = args.max_depth_eval
depth_est[np.isinf(depth_est)] = args.max_depth_eval
depth_gt[np.isinf(depth_gt)] = args.max_depth_eval
depth_gt[np.isnan(depth_gt)] = args.min_depth_eval
if args.garg_crop:
gt_height, gt_width = depth_gt.shape
eval_mask = np.zeros(valid_mask.shape)
eval_mask[int(0.40810811 * gt_height):int(0.99189189 * gt_height), int(0.03594771 * gt_width):int(0.96405229 * gt_width)] = 1
valid_mask = np.logical_and(valid_mask, eval_mask)
uncertainty = torch.tensor(uncertainty).cuda()
depth_est = torch.tensor(depth_est).cuda()
depth_gt = torch.tensor(depth_gt).cuda()
valid_mask = torch.tensor(valid_mask).cuda()
hist_pred_rmse, hist_oracle_rmse, nb_remain_rmse, ausc_rmse[i] = sparsification_error_gpu(unc_tensor = uncertainty[valid_mask], pred_tensor = depth_est[valid_mask], gt_tensor = depth_gt[valid_mask], is_rmse = True)
hist_pred_rmses += hist_pred_rmse
hist_oracle_rmses += hist_oracle_rmse
nb_remain_rmses += nb_remain_rmse
spar_rmse += np.trapz((hist_pred_rmse - hist_oracle_rmse), x = list(np.arange(start=0.0, stop=1.0, step=(1/100))))
hist_pred_absrel, hist_oracle_absrel, nb_remain_absrel, ausc_absrel[i] = sparsification_error_gpu(unc_tensor = uncertainty[valid_mask], pred_tensor = depth_est[valid_mask], gt_tensor = depth_gt[valid_mask], is_rmse = False)
hist_pred_absrels += hist_pred_absrel
hist_oracle_absrels += hist_oracle_absrel
nb_remain_absrels += nb_remain_absrel
spar_absr += np.trapz((hist_pred_absrel - hist_oracle_absrel), x = list(np.arange(start=0.0, stop=1.0, step=(1/100))))
depth_est = depth_est.cpu().numpy()
depth_gt = depth_gt.cpu().numpy()
valid_mask = valid_mask.cpu().numpy()
silog[i], log10[i], abs_rel[i], sq_rel[i], rms[i], log_rms[i], d1[i], d2[i], d3[i] = compute_errors(depth_gt[valid_mask], depth_est[valid_mask])
print(nb_valid)
print("{:>7}, {:>7}, {:>7}, {:>7}, {:>7}, {:>7}, {:>7}, {:>7}, {:>7}".format(
'd1', 'd2', 'd3', 'AbsRel', 'SqRel', 'RMSE', 'RMSElog', 'SILog', 'log10'))
print("{:7.3f}, {:7.3f}, {:7.3f}, {:7.3f}, {:7.3f}, {:7.3f}, {:7.3f}, {:7.3f}, {:7.3f}".format(
d1.sum()/nb_valid, d2.sum()/nb_valid, d3.sum()/nb_valid,
abs_rel.sum()/nb_valid, sq_rel.sum()/nb_valid, rms.sum()/nb_valid,
log_rms.sum()/nb_valid, silog.sum()/nb_valid, log10.sum()/nb_valid))
hist_pred_rmses = hist_pred_rmses/nb_valid
hist_oracle_rmses = hist_oracle_rmses/nb_valid
nb_remain_rmses = nb_remain_rmses/nb_valid
hist_pred_absrels = hist_pred_absrels/nb_valid
hist_oracle_absrels = hist_oracle_absrels/nb_valid
nb_remain_absrels = nb_remain_absrels/nb_valid
spar_rmse = spar_rmse/nb_valid
spar_absr = spar_absr/nb_valid
# to verify that the averages obtained by the two different methods are consistent.
print('ausc_rmse', np.trapz((hist_pred_rmses - hist_oracle_rmses), x = list(np.arange(start=0.0, stop=1.0, step=(1/100)))))
print('ausc_abrel', np.trapz((hist_pred_absrels - hist_oracle_absrels), x = list(np.arange(start=0.0, stop=1.0, step=(1/100)))))
print('ausc_rmse', spar_rmse)
print('ausc_abrel', spar_absr)
elapsed_time = time.time() - start_time
print('Elapesed time: %s' % str(elapsed_time))
print('Done.')
if __name__ == '__main__':
test() | 11,451 | 40.492754 | 235 | py |
LDU | LDU-main/monocular_depth_estimation/pytorch/sparsification.py | import numpy as np
import torch
"""Calculate the sparsification error.
Calcualte the sparsification error for a given array according to a reference array.
Args:
unc_tensor: Flatten estimated uncertainty tensor.
pred_tensor: Flatten depth prediction tensor.
gt_tensor: Flatten ground truth tensor.
nb_bins: Number of bins using for uncertainty estimation. Each time, 1/nb_bins * 100% items with highest value will be removed.
return_hist: if return histograms for drawing the sparsification curve, otherwise, directly return the sum of sparsification error.
Returns:
By default, sum of the sparsification error after removing all the items in two given vectors given nb_bins.
Given return_hist = True, three arrays corresponding to the components of sparsification curve.
"""
def sparsification_error_gpu(unc_tensor, pred_tensor, gt_tensor, nb_bins = 100, return_hist=True, is_rmse = True):
hist_pred = []
hist_oracle = []
nb_remain = []
# From small to big
argsorted_U = torch.argsort(unc_tensor)
err_tensor = abs(pred_tensor - gt_tensor)
if not is_rmse:
err_tensor = err_tensor/gt_tensor
else:
err_tensor = err_tensor**2
argsorted_E = torch.argsort(err_tensor)
total_len = len(unc_tensor)
sigma_pred_curves = []
error_curves = []
fractions = list(torch.arange(start=0.0, end=1.0, step=(1/nb_bins)))
for fraction in fractions:
if is_rmse:
sigma_pred_curve = torch.mean(err_tensor[argsorted_U[0:int((1.0-fraction)*total_len)]])
error_curve = torch.mean(err_tensor[argsorted_E[0:int((1.0-fraction)*total_len)]])
sigma_pred_curve = torch.sqrt(sigma_pred_curve)
error_curve = torch.sqrt(error_curve)
else:
sigma_pred_curve = torch.mean(err_tensor[argsorted_U[0:int((1.0-fraction)*total_len)]])
error_curve = torch.mean(err_tensor[argsorted_E[0:int((1.0-fraction)*total_len)]])
sigma_pred_curves.append(sigma_pred_curve)
error_curves.append(error_curve)
nb_remain.append(int((1.0-fraction)*total_len))
hist_oracle = torch.tensor(error_curves)/error_curves[0].cpu()
hist_pred = torch.tensor(sigma_pred_curves)/sigma_pred_curves[0].cpu()
nb_remain = torch.tensor(nb_remain)
sparsification_errors_pred = torch.trapz((hist_pred - hist_oracle), torch.arange(start=0.0, end=1.0, step=(1/nb_bins)))
# without normalization. in our paper we use the codes shown above.
# hist_oracle = torch.tensor(error_curves)
# hist_pred = torch.tensor(sigma_pred_curves)
# nb_remain = torch.tensor(nb_remain)
# sparsification_errors_pred = torch.trapz((hist_pred), torch.arange(start=0.0, end=1.0, step=(1/nb_bins))) - torch.trapz((hist_oracle), torch.arange(start=0.0, end=1.0, step=(1/nb_bins)))
if return_hist:
return hist_pred, hist_oracle, nb_remain, sparsification_errors_pred
else:
return sparsification_errors_pred | 3,034 | 42.357143 | 192 | py |
LDU | LDU-main/monocular_depth_estimation/pytorch/bts_dataloader.py | # Copyright (C) 2019 Jin Han Lee
#
# This file is a part of BTS.
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
import numpy as np
import torch
from torch.utils.data import Dataset, DataLoader
import torch.utils.data.distributed
from torchvision import transforms
from PIL import Image
import os
import random
from distributed_sampler_no_evenly_divisible import *
def _is_pil_image(img):
return isinstance(img, Image.Image)
def _is_numpy_image(img):
return isinstance(img, np.ndarray) and (img.ndim in {2, 3})
def preprocessing_transforms(mode):
return transforms.Compose([
ToTensor(mode=mode)
])
class BtsDataLoader(object):
def __init__(self, args, mode):
if mode == 'train':
self.training_samples = DataLoadPreprocess(args, mode, transform=preprocessing_transforms(mode))
if args.distributed:
self.train_sampler = torch.utils.data.distributed.DistributedSampler(self.training_samples)
else:
self.train_sampler = None
self.data = DataLoader(self.training_samples, args.batch_size,
shuffle=(self.train_sampler is None),
num_workers=args.num_threads,
pin_memory=True,
sampler=self.train_sampler)
elif mode == 'online_eval':
self.testing_samples = DataLoadPreprocess(args, mode, transform=preprocessing_transforms(mode))
if args.distributed:
# self.eval_sampler = torch.utils.data.distributed.DistributedSampler(self.testing_samples, shuffle=False)
self.eval_sampler = DistributedSamplerNoEvenlyDivisible(self.testing_samples, shuffle=False)
else:
self.eval_sampler = None
self.data = DataLoader(self.testing_samples, 1,
shuffle=False,
num_workers=1,
pin_memory=True,
sampler=self.eval_sampler)
elif mode == 'test':
self.testing_samples = DataLoadPreprocess(args, mode, transform=preprocessing_transforms(mode))
self.data = DataLoader(self.testing_samples, 1, shuffle=False, num_workers=1)
else:
print('mode should be one of \'train, test, online_eval\'. Got {}'.format(mode))
class DataLoadPreprocess(Dataset):
def __init__(self, args, mode, transform=None, is_for_online_eval=False):
self.args = args
if mode == 'online_eval':
with open(args.filenames_file_eval, 'r') as f:
self.filenames = f.readlines()
else:
with open(args.filenames_file, 'r') as f:
self.filenames = f.readlines()
self.mode = mode
self.transform = transform
self.to_tensor = ToTensor
self.is_for_online_eval = is_for_online_eval
def __getitem__(self, idx):
sample_path = self.filenames[idx]
focal = float(sample_path.split()[2])
if self.mode == 'train':
if self.args.dataset == 'kitti' and self.args.use_right is True and random.random() > 0.5:
image_path = os.path.join(self.args.data_path, "./" + sample_path.split()[3])
depth_path = os.path.join(self.args.gt_path, "./" + sample_path.split()[4])
else:
image_path = os.path.join(self.args.data_path, "./" + sample_path.split()[0])
depth_path = os.path.join(self.args.gt_path, "./" + sample_path.split()[1])
image = Image.open(image_path)
depth_gt = Image.open(depth_path)
if self.args.do_kb_crop is True:
height = image.height
width = image.width
top_margin = int(height - 352)
left_margin = int((width - 1216) / 2)
depth_gt = depth_gt.crop((left_margin, top_margin, left_margin + 1216, top_margin + 352))
image = image.crop((left_margin, top_margin, left_margin + 1216, top_margin + 352))
# To avoid blank boundaries due to pixel registration
if self.args.dataset == 'nyu':
depth_gt = depth_gt.crop((43, 45, 608, 472))
image = image.crop((43, 45, 608, 472))
if self.args.do_random_rotate is True:
random_angle = (random.random() - 0.5) * 2 * self.args.degree
image = self.rotate_image(image, random_angle)
depth_gt = self.rotate_image(depth_gt, random_angle, flag=Image.NEAREST)
image = np.asarray(image, dtype=np.float32) / 255.0
depth_gt = np.asarray(depth_gt, dtype=np.float32)
depth_gt = np.expand_dims(depth_gt, axis=2)
if self.args.dataset == 'nyu':
depth_gt = depth_gt / 1000.0
else:
depth_gt = depth_gt / 256.0
image, depth_gt = self.random_crop(image, depth_gt, self.args.input_height, self.args.input_width)
image, depth_gt = self.train_preprocess(image, depth_gt)
sample = {'image': image, 'depth': depth_gt, 'focal': focal}
else:
if self.mode == 'online_eval':
data_path = self.args.data_path_eval
else:
data_path = self.args.data_path
image_path = os.path.join(data_path, "./" + sample_path.split()[0])
image = np.asarray(Image.open(image_path), dtype=np.float32) / 255.0
if self.mode == 'online_eval':
gt_path = self.args.gt_path_eval
depth_path = os.path.join(gt_path, "./" + sample_path.split()[1])
has_valid_depth = False
try:
depth_gt = Image.open(depth_path)
has_valid_depth = True
except IOError:
depth_gt = False
# print('Missing gt for {}'.format(image_path))
if has_valid_depth:
depth_gt = np.asarray(depth_gt, dtype=np.float32)
depth_gt = np.expand_dims(depth_gt, axis=2)
if self.args.dataset == 'nyu':
depth_gt = depth_gt / 1000.0
else:
depth_gt = depth_gt / 256.0
if self.args.do_kb_crop is True:
height = image.shape[0]
width = image.shape[1]
top_margin = int(height - 352)
left_margin = int((width - 1216) / 2)
image = image[top_margin:top_margin + 352, left_margin:left_margin + 1216, :]
if self.mode == 'online_eval' and has_valid_depth:
depth_gt = depth_gt[top_margin:top_margin + 352, left_margin:left_margin + 1216, :]
if self.mode == 'online_eval':
sample = {'image': image, 'depth': depth_gt, 'focal': focal, 'has_valid_depth': has_valid_depth}
else:
sample = {'image': image, 'focal': focal}
if self.transform:
sample = self.transform(sample)
return sample
def rotate_image(self, image, angle, flag=Image.BILINEAR):
result = image.rotate(angle, resample=flag)
return result
def random_crop(self, img, depth, height, width):
assert img.shape[0] >= height
assert img.shape[1] >= width
assert img.shape[0] == depth.shape[0]
assert img.shape[1] == depth.shape[1]
x = random.randint(0, img.shape[1] - width)
y = random.randint(0, img.shape[0] - height)
img = img[y:y + height, x:x + width, :]
depth = depth[y:y + height, x:x + width, :]
return img, depth
def train_preprocess(self, image, depth_gt):
# Random flipping
do_flip = random.random()
if do_flip > 0.5:
image = (image[:, ::-1, :]).copy()
depth_gt = (depth_gt[:, ::-1, :]).copy()
# Random gamma, brightness, color augmentation
do_augment = random.random()
if do_augment > 0.5:
image = self.augment_image(image)
return image, depth_gt
def augment_image(self, image):
# gamma augmentation
gamma = random.uniform(0.9, 1.1)
image_aug = image ** gamma
# brightness augmentation
if self.args.dataset == 'nyu':
brightness = random.uniform(0.75, 1.25)
else:
brightness = random.uniform(0.9, 1.1)
image_aug = image_aug * brightness
# color augmentation
colors = np.random.uniform(0.9, 1.1, size=3)
white = np.ones((image.shape[0], image.shape[1]))
color_image = np.stack([white * colors[i] for i in range(3)], axis=2)
image_aug *= color_image
image_aug = np.clip(image_aug, 0, 1)
return image_aug
def __len__(self):
return len(self.filenames)
class ToTensor(object):
def __init__(self, mode):
self.mode = mode
self.normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
def __call__(self, sample):
image, focal = sample['image'], sample['focal']
image = self.to_tensor(image)
image = self.normalize(image)
if self.mode == 'test':
return {'image': image, 'focal': focal}
depth = sample['depth']
if self.mode == 'train':
depth = self.to_tensor(depth)
return {'image': image, 'depth': depth, 'focal': focal}
else:
has_valid_depth = sample['has_valid_depth']
return {'image': image, 'depth': depth, 'focal': focal, 'has_valid_depth': has_valid_depth}
def to_tensor(self, pic):
if not (_is_pil_image(pic) or _is_numpy_image(pic)):
raise TypeError(
'pic should be PIL Image or ndarray. Got {}'.format(type(pic)))
if isinstance(pic, np.ndarray):
img = torch.from_numpy(pic.transpose((2, 0, 1)))
return img
# handle PIL Image
if pic.mode == 'I':
img = torch.from_numpy(np.array(pic, np.int32, copy=False))
elif pic.mode == 'I;16':
img = torch.from_numpy(np.array(pic, np.int16, copy=False))
else:
img = torch.ByteTensor(torch.ByteStorage.from_buffer(pic.tobytes()))
# PIL image mode: 1, L, P, I, F, RGB, YCbCr, RGBA, CMYK
if pic.mode == 'YCbCr':
nchannel = 3
elif pic.mode == 'I;16':
nchannel = 1
else:
nchannel = len(pic.mode)
img = img.view(pic.size[1], pic.size[0], nchannel)
img = img.transpose(0, 1).transpose(0, 2).contiguous()
if isinstance(img, torch.ByteTensor):
return img.float()
else:
return img
| 11,674 | 38.982877 | 122 | py |
LDU | LDU-main/monocular_depth_estimation/pytorch/bts_eval.py | # Copyright (C) 2019 Jin Han Lee
#
# This file is a part of BTS.
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
from __future__ import absolute_import, division, print_function
import os
import argparse
import time
import numpy as np
import cv2
import sys
import torch
import torch.nn as nn
import torch.nn.utils as utils
import torchvision.utils as vutils
import torch.backends.cudnn as cudnn
from torch.autograd import Variable
from tensorboardX import SummaryWriter
from bts_dataloader import *
def convert_arg_line_to_args(arg_line):
for arg in arg_line.split():
if not arg.strip():
continue
yield arg
parser = argparse.ArgumentParser(description='BTS PyTorch implementation.', fromfile_prefix_chars='@')
parser.convert_arg_line_to_args = convert_arg_line_to_args
parser.add_argument('--model_name', type=str, help='model name', default='bts_v0_0_1')
parser.add_argument('--encoder', type=str, help='type of encoder, desenet121_bts or densenet161_bts',
default='densenet161_bts')
parser.add_argument('--data_path', type=str, help='path to the data', required=True)
parser.add_argument('--gt_path', type=str, help='path to the groundtruth data', required=False)
parser.add_argument('--filenames_file', type=str, help='path to the filenames text file', required=True)
parser.add_argument('--input_height', type=int, help='input height', default=480)
parser.add_argument('--input_width', type=int, help='input width', default=640)
parser.add_argument('--max_depth', type=float, help='maximum depth in estimation', default=80)
parser.add_argument('--output_directory', type=str,
help='output directory for summary, if empty outputs to checkpoint folder', default='')
parser.add_argument('--checkpoint_path', type=str, help='path to a specific checkpoint to load', default='')
parser.add_argument('--dataset', type=str, help='dataset to train on, make3d or nyudepthv2', default='nyu')
parser.add_argument('--eigen_crop', help='if set, crops according to Eigen NIPS14', action='store_true')
parser.add_argument('--garg_crop', help='if set, crops according to Garg ECCV16', action='store_true')
parser.add_argument('--min_depth_eval', type=float, help='minimum depth for evaluation', default=1e-3)
parser.add_argument('--max_depth_eval', type=float, help='maximum depth for evaluation', default=80)
parser.add_argument('--do_kb_crop', help='if set, crop input images as kitti benchmark images', action='store_true')
parser.add_argument('--bts_size', type=int, help='initial num_filters in bts', default=512)
if sys.argv.__len__() == 2:
arg_filename_with_prefix = '@' + sys.argv[1]
args = parser.parse_args([arg_filename_with_prefix])
else:
args = parser.parse_args()
model_dir = os.path.dirname(args.checkpoint_path)
sys.path.append(model_dir)
for key, val in vars(__import__(args.model_name)).items():
if key.startswith('__') and key.endswith('__'):
continue
vars()[key] = val
def compute_errors(gt, pred):
thresh = np.maximum((gt / pred), (pred / gt))
d1 = (thresh < 1.25).mean()
d2 = (thresh < 1.25 ** 2).mean()
d3 = (thresh < 1.25 ** 3).mean()
rmse = (gt - pred) ** 2
rmse = np.sqrt(rmse.mean())
rmse_log = (np.log(gt) - np.log(pred)) ** 2
rmse_log = np.sqrt(rmse_log.mean())
abs_rel = np.mean(np.abs(gt - pred) / gt)
sq_rel = np.mean(((gt - pred) ** 2) / gt)
err = np.log(pred) - np.log(gt)
silog = np.sqrt(np.mean(err ** 2) - np.mean(err) ** 2) * 100
err = np.abs(np.log10(pred) - np.log10(gt))
log10 = np.mean(err)
return silog, log10, abs_rel, sq_rel, rmse, rmse_log, d1, d2, d3
def get_num_lines(file_path):
f = open(file_path, 'r')
lines = f.readlines()
f.close()
return len(lines)
def test(params):
global gt_depths, is_missing, missing_ids
gt_depths = []
is_missing = []
missing_ids = set()
write_summary = False
steps = set()
if os.path.isdir(args.checkpoint_path):
import glob
models = [f for f in glob.glob(args.checkpoint_path + "/model*")]
for model in models:
step = model.split('-')[-1]
steps.add('{:06d}'.format(int(step)))
lines = []
if os.path.exists(args.checkpoint_path + '/evaluated_checkpoints'):
with open(args.checkpoint_path + '/evaluated_checkpoints') as file:
lines = file.readlines()
for line in lines:
if line.rstrip() in steps:
steps.remove(line.rstrip())
steps = sorted(steps)
if args.output_directory != '':
summary_path = os.path.join(args.output_directory, args.model_name)
else:
summary_path = os.path.join(args.checkpoint_path, 'eval')
write_summary = True
else:
steps.add('{:06d}'.format(int(args.checkpoint_path.split('-')[-1])))
if len(steps) == 0:
print('No new model to evaluate. Abort.')
return
args.mode = 'test'
dataloader = BtsDataLoader(args, 'eval')
model = BtsModel(params=params)
model = torch.nn.DataParallel(model)
cudnn.benchmark = True
if write_summary:
summary_writer = SummaryWriter(summary_path, flush_secs=30)
for step in steps:
if os.path.isdir(args.checkpoint_path):
checkpoint = torch.load(os.path.join(args.checkpoint_path, 'model-' + str(int(step))))
model.load_state_dict(checkpoint['model'])
else:
checkpoint = torch.load(args.checkpoint_path)
model.load_state_dict(checkpoint['model'])
model.eval()
model.cuda()
num_test_samples = get_num_lines(args.filenames_file)
with open(args.filenames_file) as f:
lines = f.readlines()
print('now testing {} files for step {}'.format(num_test_samples, step))
pred_depths = []
start_time = time.time()
with torch.no_grad():
for _, sample in enumerate(dataloader.data):
image = Variable(sample['image'].cuda())
focal = Variable(sample['focal'].cuda())
# image = Variable(sample['image'])
# focal = Variable(sample['focal'])
# Predict
lpg8x8, lpg4x4, lpg2x2, reduc1x1, depth_est = model(image, focal)
pred_depths.append(depth_est.cpu().numpy().squeeze())
elapsed_time = time.time() - start_time
print('Elapesed time: %s' % str(elapsed_time))
print('Done.')
if len(gt_depths) == 0:
for t_id in range(num_test_samples):
gt_depth_path = os.path.join(args.gt_path, lines[t_id].split()[1])
depth = cv2.imread(gt_depth_path, -1)
if depth is None:
print('Missing: %s ' % gt_depth_path)
missing_ids.add(t_id)
continue
if args.dataset == 'nyu':
depth = depth.astype(np.float32) / 1000.0
else:
depth = depth.astype(np.float32) / 256.0
gt_depths.append(depth)
print('Computing errors')
silog, log10, abs_rel, sq_rel, rms, log_rms, d1, d2, d3 = eval(pred_depths, int(step))
if write_summary:
summary_writer.add_scalar('silog', silog.mean(), int(step))
summary_writer.add_scalar('abs_rel', abs_rel.mean(), int(step))
summary_writer.add_scalar('log10', log10.mean(), int(step))
summary_writer.add_scalar('sq_rel', sq_rel.mean(), int(step))
summary_writer.add_scalar('rms', rms.mean(), int(step))
summary_writer.add_scalar('log_rms', log_rms.mean(), int(step))
summary_writer.add_scalar('d1', d1.mean(), int(step))
summary_writer.add_scalar('d2', d2.mean(), int(step))
summary_writer.add_scalar('d3', d3.mean(), int(step))
summary_writer.flush()
with open(os.path.dirname(args.checkpoint_path) + '/evaluated_checkpoints', 'a') as file:
file.write(step + '\n')
print('Evaluation done')
def eval(pred_depths, step):
num_samples = get_num_lines(args.filenames_file)
pred_depths_valid = []
for t_id in range(num_samples):
if t_id in missing_ids:
continue
pred_depths_valid.append(pred_depths[t_id])
num_samples = num_samples - len(missing_ids)
silog = np.zeros(num_samples, np.float32)
log10 = np.zeros(num_samples, np.float32)
rms = np.zeros(num_samples, np.float32)
log_rms = np.zeros(num_samples, np.float32)
abs_rel = np.zeros(num_samples, np.float32)
sq_rel = np.zeros(num_samples, np.float32)
d1 = np.zeros(num_samples, np.float32)
d2 = np.zeros(num_samples, np.float32)
d3 = np.zeros(num_samples, np.float32)
for i in range(num_samples):
gt_depth = gt_depths[i]
pred_depth = pred_depths_valid[i]
if args.do_kb_crop:
height, width = gt_depth.shape
top_margin = int(height - 352)
left_margin = int((width - 1216) / 2)
pred_depth_uncropped = np.zeros((height, width), dtype=np.float32)
pred_depth_uncropped[top_margin:top_margin + 352, left_margin:left_margin + 1216] = pred_depth
pred_depth = pred_depth_uncropped
pred_depth[pred_depth < args.min_depth_eval] = args.min_depth_eval
pred_depth[pred_depth > args.max_depth_eval] = args.max_depth_eval
pred_depth[np.isinf(pred_depth)] = args.max_depth_eval
pred_depth[np.isnan(pred_depth)] = args.min_depth_eval
valid_mask = np.logical_and(gt_depth > args.min_depth_eval, gt_depth < args.max_depth_eval)
if args.garg_crop or args.eigen_crop:
gt_height, gt_width = gt_depth.shape
eval_mask = np.zeros(valid_mask.shape)
if args.garg_crop:
eval_mask[int(0.40810811 * gt_height):int(0.99189189 * gt_height), int(0.03594771 * gt_width):int(0.96405229 * gt_width)] = 1
elif args.eigen_crop:
if args.dataset == 'kitti':
eval_mask[int(0.3324324 * gt_height):int(0.91351351 * gt_height), int(0.0359477 * gt_width):int(0.96405229 * gt_width)] = 1
else:
eval_mask[45:471, 41:601] = 1
valid_mask = np.logical_and(valid_mask, eval_mask)
silog[i], log10[i], abs_rel[i], sq_rel[i], rms[i], log_rms[i], d1[i], d2[i], d3[i] = compute_errors(
gt_depth[valid_mask], pred_depth[valid_mask])
print("{:>7}, {:>7}, {:>7}, {:>7}, {:>7}, {:>7}, {:>7}, {:>7}, {:>7}".format('silog', 'abs_rel', 'log10', 'rms',
'sq_rel', 'log_rms', 'd1', 'd2', 'd3'))
print("{:7.4f}, {:7.4f}, {:7.3f}, {:7.3f}, {:7.3f}, {:7.3f}, {:7.3f}, {:7.3f}, {:7.3f}".format(
silog.mean(), abs_rel.mean(), log10.mean(), rms.mean(), sq_rel.mean(), log_rms.mean(), d1.mean(), d2.mean(),
d3.mean()))
return silog, log10, abs_rel, sq_rel, rms, log_rms, d1, d2, d3
if __name__ == '__main__':
test(args) | 12,104 | 38.819079 | 143 | py |
LDU | LDU-main/monocular_depth_estimation/pytorch/bts_test.py | # Copyright (C) 2019 Jin Han Lee
#
# This file is a part of BTS.
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
from __future__ import absolute_import, division, print_function
import os
import argparse
import time
import numpy as np
import cv2
import sys
import torch
import torch.nn as nn
from torch.autograd import Variable
from bts_dataloader import *
import errno
import matplotlib.pyplot as plt
from tqdm import tqdm
from bts_dataloader import *
def convert_arg_line_to_args(arg_line):
for arg in arg_line.split():
if not arg.strip():
continue
yield arg
parser = argparse.ArgumentParser(description='BTS PyTorch implementation.', fromfile_prefix_chars='@')
parser.convert_arg_line_to_args = convert_arg_line_to_args
parser.add_argument('--model_name', type=str, help='model name', default='bts_nyu_v2')
parser.add_argument('--encoder', type=str, help='type of encoder, vgg or desenet121_bts or densenet161_bts',
default='densenet161_bts')
parser.add_argument('--data_path', type=str, help='path to the data', required=True)
parser.add_argument('--filenames_file', type=str, help='path to the filenames text file', required=True)
parser.add_argument('--input_height', type=int, help='input height', default=480)
parser.add_argument('--input_width', type=int, help='input width', default=640)
parser.add_argument('--max_depth', type=float, help='maximum depth in estimation', default=80)
parser.add_argument('--checkpoint_path', type=str, help='path to a specific checkpoint to load', default='')
parser.add_argument('--dataset', type=str, help='dataset to train on, make3d or nyudepthv2', default='nyu')
parser.add_argument('--do_kb_crop', help='if set, crop input images as kitti benchmark images', action='store_true')
parser.add_argument('--save_lpg', help='if set, save outputs from lpg layers', action='store_true')
parser.add_argument('--bts_size', type=int, help='initial num_filters in bts', default=512)
if sys.argv.__len__() == 2:
arg_filename_with_prefix = '@' + sys.argv[1]
args = parser.parse_args([arg_filename_with_prefix])
else:
args = parser.parse_args()
model_dir = os.path.dirname(args.checkpoint_path)
sys.path.append(model_dir)
for key, val in vars(__import__(args.model_name)).items():
if key.startswith('__') and key.endswith('__'):
continue
vars()[key] = val
def get_num_lines(file_path):
f = open(file_path, 'r')
lines = f.readlines()
f.close()
return len(lines)
def test(params):
"""Test function."""
args.mode = 'test'
dataloader = BtsDataLoader(args, 'test')
model = BtsModel(params=args)
model = torch.nn.DataParallel(model)
checkpoint = torch.load(args.checkpoint_path)
model.load_state_dict(checkpoint['model'])
model.eval()
model.cuda()
num_params = sum([np.prod(p.size()) for p in model.parameters()])
print("Total number of parameters: {}".format(num_params))
num_test_samples = get_num_lines(args.filenames_file)
with open(args.filenames_file) as f:
lines = f.readlines()
print('now testing {} files with {}'.format(num_test_samples, args.checkpoint_path))
pred_depths = []
pred_8x8s = []
pred_4x4s = []
pred_2x2s = []
pred_1x1s = []
start_time = time.time()
with torch.no_grad():
for _, sample in enumerate(tqdm(dataloader.data)):
image = Variable(sample['image'].cuda())
focal = Variable(sample['focal'].cuda())
# Predict
lpg8x8, lpg4x4, lpg2x2, reduc1x1, depth_est = model(image, focal)
pred_depths.append(depth_est.cpu().numpy().squeeze())
pred_8x8s.append(lpg8x8[0].cpu().numpy().squeeze())
pred_4x4s.append(lpg4x4[0].cpu().numpy().squeeze())
pred_2x2s.append(lpg2x2[0].cpu().numpy().squeeze())
pred_1x1s.append(reduc1x1[0].cpu().numpy().squeeze())
elapsed_time = time.time() - start_time
print('Elapesed time: %s' % str(elapsed_time))
print('Done.')
save_name = 'result_' + args.model_name
print('Saving result pngs..')
if not os.path.exists(os.path.dirname(save_name)):
try:
os.mkdir(save_name)
os.mkdir(save_name + '/raw')
os.mkdir(save_name + '/cmap')
os.mkdir(save_name + '/rgb')
os.mkdir(save_name + '/gt')
except OSError as e:
if e.errno != errno.EEXIST:
raise
for s in tqdm(range(num_test_samples)):
if args.dataset == 'kitti':
date_drive = lines[s].split('/')[1]
filename_pred_png = save_name + '/raw/' + date_drive + '_' + lines[s].split()[0].split('/')[-1].replace(
'.jpg', '.png')
filename_cmap_png = save_name + '/cmap/' + date_drive + '_' + lines[s].split()[0].split('/')[
-1].replace('.jpg', '.png')
filename_image_png = save_name + '/rgb/' + date_drive + '_' + lines[s].split()[0].split('/')[-1]
elif args.dataset == 'kitti_benchmark':
filename_pred_png = save_name + '/raw/' + lines[s].split()[0].split('/')[-1].replace('.jpg', '.png')
filename_cmap_png = save_name + '/cmap/' + lines[s].split()[0].split('/')[-1].replace('.jpg', '.png')
filename_image_png = save_name + '/rgb/' + lines[s].split()[0].split('/')[-1]
else:
scene_name = lines[s].split()[0].split('/')[0]
filename_pred_png = save_name + '/raw/' + scene_name + '_' + lines[s].split()[0].split('/')[1].replace(
'.jpg', '.png')
filename_cmap_png = save_name + '/cmap/' + scene_name + '_' + lines[s].split()[0].split('/')[1].replace(
'.jpg', '.png')
filename_gt_png = save_name + '/gt/' + scene_name + '_' + lines[s].split()[0].split('/')[1].replace(
'.jpg', '.png')
filename_image_png = save_name + '/rgb/' + scene_name + '_' + lines[s].split()[0].split('/')[1]
rgb_path = os.path.join(args.data_path, './' + lines[s].split()[0])
image = cv2.imread(rgb_path)
if args.dataset == 'nyu':
gt_path = os.path.join(args.data_path, './' + lines[s].split()[1])
gt = cv2.imread(gt_path, -1).astype(np.float32) / 1000.0 # Visualization purpose only
gt[gt == 0] = np.amax(gt)
pred_depth = pred_depths[s]
pred_8x8 = pred_8x8s[s]
pred_4x4 = pred_4x4s[s]
pred_2x2 = pred_2x2s[s]
pred_1x1 = pred_1x1s[s]
if args.dataset == 'kitti' or args.dataset == 'kitti_benchmark':
pred_depth_scaled = pred_depth * 256.0
else:
pred_depth_scaled = pred_depth * 1000.0
pred_depth_scaled = pred_depth_scaled.astype(np.uint16)
cv2.imwrite(filename_pred_png, pred_depth_scaled, [cv2.IMWRITE_PNG_COMPRESSION, 0])
if args.save_lpg:
cv2.imwrite(filename_image_png, image[10:-1 - 9, 10:-1 - 9, :])
if args.dataset == 'nyu':
plt.imsave(filename_gt_png, np.log10(gt[10:-1 - 9, 10:-1 - 9]), cmap='Greys')
pred_depth_cropped = pred_depth[10:-1 - 9, 10:-1 - 9]
plt.imsave(filename_cmap_png, np.log10(pred_depth_cropped), cmap='Greys')
pred_8x8_cropped = pred_8x8[10:-1 - 9, 10:-1 - 9]
filename_lpg_cmap_png = filename_cmap_png.replace('.png', '_8x8.png')
plt.imsave(filename_lpg_cmap_png, np.log10(pred_8x8_cropped), cmap='Greys')
pred_4x4_cropped = pred_4x4[10:-1 - 9, 10:-1 - 9]
filename_lpg_cmap_png = filename_cmap_png.replace('.png', '_4x4.png')
plt.imsave(filename_lpg_cmap_png, np.log10(pred_4x4_cropped), cmap='Greys')
pred_2x2_cropped = pred_2x2[10:-1 - 9, 10:-1 - 9]
filename_lpg_cmap_png = filename_cmap_png.replace('.png', '_2x2.png')
plt.imsave(filename_lpg_cmap_png, np.log10(pred_2x2_cropped), cmap='Greys')
pred_1x1_cropped = pred_1x1[10:-1 - 9, 10:-1 - 9]
filename_lpg_cmap_png = filename_cmap_png.replace('.png', '_1x1.png')
plt.imsave(filename_lpg_cmap_png, np.log10(pred_1x1_cropped), cmap='Greys')
else:
plt.imsave(filename_cmap_png, np.log10(pred_depth), cmap='Greys')
filename_lpg_cmap_png = filename_cmap_png.replace('.png', '_8x8.png')
plt.imsave(filename_lpg_cmap_png, np.log10(pred_8x8), cmap='Greys')
filename_lpg_cmap_png = filename_cmap_png.replace('.png', '_4x4.png')
plt.imsave(filename_lpg_cmap_png, np.log10(pred_4x4), cmap='Greys')
filename_lpg_cmap_png = filename_cmap_png.replace('.png', '_2x2.png')
plt.imsave(filename_lpg_cmap_png, np.log10(pred_2x2), cmap='Greys')
filename_lpg_cmap_png = filename_cmap_png.replace('.png', '_1x1.png')
plt.imsave(filename_lpg_cmap_png, np.log10(pred_1x1), cmap='Greys')
return
if __name__ == '__main__':
test(args)
| 9,732 | 43.040724 | 116 | py |
LDU | LDU-main/monocular_depth_estimation/pytorch/run_bts_eval_schedule.py | # Copyright (C) 2019 Jin Han Lee
#
# This file is a part of BTS.
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
import os
import datetime
from apscheduler.schedulers.blocking import BlockingScheduler
scheduler = BlockingScheduler()
@scheduler.scheduled_job('interval', minutes=1, start_date=datetime.datetime.now() + datetime.timedelta(0,3))
def run_eval():
command = 'export CUDA_VISIBLE_DEVICES=0; ' \
'/usr/bin/python ' \
'bts_eval.py ' \
'--encoder densenet161_bts ' \
'--dataset kitti ' \
'--data_path ../../dataset/kitti_dataset/ ' \
'--gt_path ../../dataset/kitti_dataset/data_depth_annotated/ ' \
'--filenames_file ../train_test_inputs/eigen_test_files_with_gt.txt ' \
'--input_height 352 ' \
'--input_width 1216 ' \
'--garg_crop ' \
'--max_depth 80 ' \
'--max_depth_eval 80 ' \
'--output_directory ./models/eval-eigen/ ' \
'--model_name bts_eigen_v0_0_1 ' \
'--checkpoint_path ./models/bts_eigen_v0_0_1/ ' \
'--do_kb_crop '
print('Executing: %s' % command)
os.system(command)
print('Finished: %s' % datetime.datetime.now())
scheduler.configure()
scheduler.start() | 1,900 | 39.446809 | 109 | py |
LDU | LDU-main/monocular_depth_estimation/pytorch/bts.py | # Copyright (C) 2019 Jin Han Lee
#
# This file is a part of BTS.
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
import torch
import torch.nn as nn
import torch.nn.functional as torch_nn_func
import math
from collections import namedtuple
# This sets the batch norm layers in pytorch as if {'is_training': False, 'scale': True} in tensorflow
def bn_init_as_tf(m):
if isinstance(m, nn.BatchNorm2d):
m.track_running_stats = True # These two lines enable using stats (moving mean and var) loaded from pretrained model
m.eval() # or zero mean and variance of one if the batch norm layer has no pretrained values
m.affine = True
m.requires_grad = True
def weights_init_xavier(m):
if isinstance(m, nn.Conv2d):
torch.nn.init.xavier_uniform_(m.weight)
if m.bias is not None:
torch.nn.init.zeros_(m.bias)
class silog_loss(nn.Module):
def __init__(self, variance_focus):
super(silog_loss, self).__init__()
self.variance_focus = variance_focus
def forward(self, depth_est, depth_gt, mask):
d = torch.log(depth_est[mask]) - torch.log(depth_gt[mask])
return torch.sqrt((d ** 2).mean() - self.variance_focus * (d.mean() ** 2)) * 10.0
class atrous_conv(nn.Sequential):
def __init__(self, in_channels, out_channels, dilation, apply_bn_first=True):
super(atrous_conv, self).__init__()
self.atrous_conv = torch.nn.Sequential()
if apply_bn_first:
self.atrous_conv.add_module('first_bn', nn.BatchNorm2d(in_channels, momentum=0.01, affine=True, track_running_stats=True, eps=1.1e-5))
self.atrous_conv.add_module('aconv_sequence', nn.Sequential(nn.ReLU(),
nn.Conv2d(in_channels=in_channels, out_channels=out_channels*2, bias=False, kernel_size=1, stride=1, padding=0),
nn.BatchNorm2d(out_channels*2, momentum=0.01, affine=True, track_running_stats=True),
nn.ReLU(),
nn.Conv2d(in_channels=out_channels * 2, out_channels=out_channels, bias=False, kernel_size=3, stride=1,
padding=(dilation, dilation), dilation=dilation)))
def forward(self, x):
return self.atrous_conv.forward(x)
class upconv(nn.Module):
def __init__(self, in_channels, out_channels, ratio=2):
super(upconv, self).__init__()
self.elu = nn.ELU()
self.conv = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, bias=False, kernel_size=3, stride=1, padding=1)
self.ratio = ratio
def forward(self, x):
up_x = torch_nn_func.interpolate(x, scale_factor=self.ratio, mode='nearest')
out = self.conv(up_x)
out = self.elu(out)
return out
class reduction_1x1(nn.Sequential):
def __init__(self, num_in_filters, num_out_filters, max_depth, is_final=False):
super(reduction_1x1, self).__init__()
self.max_depth = max_depth
self.is_final = is_final
self.sigmoid = nn.Sigmoid()
self.reduc = torch.nn.Sequential()
while num_out_filters >= 4:
if num_out_filters < 8:
if self.is_final:
self.reduc.add_module('final', torch.nn.Sequential(nn.Conv2d(num_in_filters, out_channels=1, bias=False,
kernel_size=1, stride=1, padding=0),
nn.Sigmoid()))
else:
self.reduc.add_module('plane_params', torch.nn.Conv2d(num_in_filters, out_channels=3, bias=False,
kernel_size=1, stride=1, padding=0))
break
else:
self.reduc.add_module('inter_{}_{}'.format(num_in_filters, num_out_filters),
torch.nn.Sequential(nn.Conv2d(in_channels=num_in_filters, out_channels=num_out_filters,
bias=False, kernel_size=1, stride=1, padding=0),
nn.ELU()))
num_in_filters = num_out_filters
num_out_filters = num_out_filters // 2
def forward(self, net):
net = self.reduc.forward(net)
if not self.is_final:
theta = self.sigmoid(net[:, 0, :, :]) * math.pi / 3
phi = self.sigmoid(net[:, 1, :, :]) * math.pi * 2
dist = self.sigmoid(net[:, 2, :, :]) * self.max_depth
n1 = torch.mul(torch.sin(theta), torch.cos(phi)).unsqueeze(1)
n2 = torch.mul(torch.sin(theta), torch.sin(phi)).unsqueeze(1)
n3 = torch.cos(theta).unsqueeze(1)
n4 = dist.unsqueeze(1)
net = torch.cat([n1, n2, n3, n4], dim=1)
return net
class local_planar_guidance(nn.Module):
def __init__(self, upratio):
super(local_planar_guidance, self).__init__()
self.upratio = upratio
self.u = torch.arange(self.upratio).reshape([1, 1, self.upratio]).float()
self.v = torch.arange(int(self.upratio)).reshape([1, self.upratio, 1]).float()
self.upratio = float(upratio)
def forward(self, plane_eq, focal):
plane_eq_expanded = torch.repeat_interleave(plane_eq, int(self.upratio), 2)
plane_eq_expanded = torch.repeat_interleave(plane_eq_expanded, int(self.upratio), 3)
n1 = plane_eq_expanded[:, 0, :, :]
n2 = plane_eq_expanded[:, 1, :, :]
n3 = plane_eq_expanded[:, 2, :, :]
n4 = plane_eq_expanded[:, 3, :, :]
u = self.u.repeat(plane_eq.size(0), plane_eq.size(2) * int(self.upratio), plane_eq.size(3)).cuda()
u = (u - (self.upratio - 1) * 0.5) / self.upratio
v = self.v.repeat(plane_eq.size(0), plane_eq.size(2), plane_eq.size(3) * int(self.upratio)).cuda()
v = (v - (self.upratio - 1) * 0.5) / self.upratio
return n4 / (n1 * u + n2 * v + n3)
class bts(nn.Module):
def __init__(self, params, feat_out_channels, num_features=512):
super(bts, self).__init__()
self.params = params
self.upconv5 = upconv(feat_out_channels[4], num_features)
self.bn5 = nn.BatchNorm2d(num_features, momentum=0.01, affine=True, eps=1.1e-5)
self.conv5 = torch.nn.Sequential(nn.Conv2d(num_features + feat_out_channels[3], num_features, 3, 1, 1, bias=False),
nn.ELU())
self.upconv4 = upconv(num_features, num_features // 2)
self.bn4 = nn.BatchNorm2d(num_features // 2, momentum=0.01, affine=True, eps=1.1e-5)
self.conv4 = torch.nn.Sequential(nn.Conv2d(num_features // 2 + feat_out_channels[2], num_features // 2, 3, 1, 1, bias=False),
nn.ELU())
self.bn4_2 = nn.BatchNorm2d(num_features // 2, momentum=0.01, affine=True, eps=1.1e-5)
self.daspp_3 = atrous_conv(num_features // 2, num_features // 4, 3, apply_bn_first=False)
self.daspp_6 = atrous_conv(num_features // 2 + num_features // 4 + feat_out_channels[2], num_features // 4, 6)
self.daspp_12 = atrous_conv(num_features + feat_out_channels[2], num_features // 4, 12)
self.daspp_18 = atrous_conv(num_features + num_features // 4 + feat_out_channels[2], num_features // 4, 18)
self.daspp_24 = atrous_conv(num_features + num_features // 2 + feat_out_channels[2], num_features // 4, 24)
self.daspp_conv = torch.nn.Sequential(nn.Conv2d(num_features + num_features // 2 + num_features // 4, num_features // 4, 3, 1, 1, bias=False),
nn.ELU())
self.reduc8x8 = reduction_1x1(num_features // 4, num_features // 4, self.params.max_depth)
self.lpg8x8 = local_planar_guidance(8)
self.upconv3 = upconv(num_features // 4, num_features // 4)
self.bn3 = nn.BatchNorm2d(num_features // 4, momentum=0.01, affine=True, eps=1.1e-5)
self.conv3 = torch.nn.Sequential(nn.Conv2d(num_features // 4 + feat_out_channels[1] + 1, num_features // 4, 3, 1, 1, bias=False),
nn.ELU())
self.reduc4x4 = reduction_1x1(num_features // 4, num_features // 8, self.params.max_depth)
self.lpg4x4 = local_planar_guidance(4)
self.upconv2 = upconv(num_features // 4, num_features // 8)
self.bn2 = nn.BatchNorm2d(num_features // 8, momentum=0.01, affine=True, eps=1.1e-5)
self.conv2 = torch.nn.Sequential(nn.Conv2d(num_features // 8 + feat_out_channels[0] + 1, num_features // 8, 3, 1, 1, bias=False),
nn.ELU())
self.reduc2x2 = reduction_1x1(num_features // 8, num_features // 16, self.params.max_depth)
self.lpg2x2 = local_planar_guidance(2)
self.upconv1 = upconv(num_features // 8, num_features // 16)
self.reduc1x1 = reduction_1x1(num_features // 16, num_features // 32, self.params.max_depth, is_final=True)
self.conv1 = torch.nn.Sequential(nn.Conv2d(num_features // 16 + 4, num_features // 16, 3, 1, 1, bias=False),
nn.ELU())
self.get_depth = torch.nn.Sequential(nn.Conv2d(num_features // 16, 1, 3, 1, 1, bias=False),
nn.Sigmoid())
def forward(self, features, focal):
skip0, skip1, skip2, skip3 = features[0], features[1], features[2], features[3]
dense_features = torch.nn.ReLU()(features[4])
upconv5 = self.upconv5(dense_features) # H/16
upconv5 = self.bn5(upconv5)
concat5 = torch.cat([upconv5, skip3], dim=1)
iconv5 = self.conv5(concat5)
upconv4 = self.upconv4(iconv5) # H/8
upconv4 = self.bn4(upconv4)
concat4 = torch.cat([upconv4, skip2], dim=1)
iconv4 = self.conv4(concat4)
iconv4 = self.bn4_2(iconv4)
daspp_3 = self.daspp_3(iconv4)
concat4_2 = torch.cat([concat4, daspp_3], dim=1)
daspp_6 = self.daspp_6(concat4_2)
concat4_3 = torch.cat([concat4_2, daspp_6], dim=1)
daspp_12 = self.daspp_12(concat4_3)
concat4_4 = torch.cat([concat4_3, daspp_12], dim=1)
daspp_18 = self.daspp_18(concat4_4)
concat4_5 = torch.cat([concat4_4, daspp_18], dim=1)
daspp_24 = self.daspp_24(concat4_5)
concat4_daspp = torch.cat([iconv4, daspp_3, daspp_6, daspp_12, daspp_18, daspp_24], dim=1)
daspp_feat = self.daspp_conv(concat4_daspp)
reduc8x8 = self.reduc8x8(daspp_feat)
plane_normal_8x8 = reduc8x8[:, :3, :, :]
plane_normal_8x8 = torch_nn_func.normalize(plane_normal_8x8, 2, 1)
plane_dist_8x8 = reduc8x8[:, 3, :, :]
plane_eq_8x8 = torch.cat([plane_normal_8x8, plane_dist_8x8.unsqueeze(1)], 1)
depth_8x8 = self.lpg8x8(plane_eq_8x8, focal)
depth_8x8_scaled = depth_8x8.unsqueeze(1) / self.params.max_depth
depth_8x8_scaled_ds = torch_nn_func.interpolate(depth_8x8_scaled, scale_factor=0.25, mode='nearest')
upconv3 = self.upconv3(daspp_feat) # H/4
upconv3 = self.bn3(upconv3)
concat3 = torch.cat([upconv3, skip1, depth_8x8_scaled_ds], dim=1)
iconv3 = self.conv3(concat3)
reduc4x4 = self.reduc4x4(iconv3)
plane_normal_4x4 = reduc4x4[:, :3, :, :]
plane_normal_4x4 = torch_nn_func.normalize(plane_normal_4x4, 2, 1)
plane_dist_4x4 = reduc4x4[:, 3, :, :]
plane_eq_4x4 = torch.cat([plane_normal_4x4, plane_dist_4x4.unsqueeze(1)], 1)
depth_4x4 = self.lpg4x4(plane_eq_4x4, focal)
depth_4x4_scaled = depth_4x4.unsqueeze(1) / self.params.max_depth
depth_4x4_scaled_ds = torch_nn_func.interpolate(depth_4x4_scaled, scale_factor=0.5, mode='nearest')
upconv2 = self.upconv2(iconv3) # H/2
upconv2 = self.bn2(upconv2)
concat2 = torch.cat([upconv2, skip0, depth_4x4_scaled_ds], dim=1)
iconv2 = self.conv2(concat2)
reduc2x2 = self.reduc2x2(iconv2)
plane_normal_2x2 = reduc2x2[:, :3, :, :]
plane_normal_2x2 = torch_nn_func.normalize(plane_normal_2x2, 2, 1)
plane_dist_2x2 = reduc2x2[:, 3, :, :]
plane_eq_2x2 = torch.cat([plane_normal_2x2, plane_dist_2x2.unsqueeze(1)], 1)
depth_2x2 = self.lpg2x2(plane_eq_2x2, focal)
depth_2x2_scaled = depth_2x2.unsqueeze(1) / self.params.max_depth
upconv1 = self.upconv1(iconv2)
reduc1x1 = self.reduc1x1(upconv1)
concat1 = torch.cat([upconv1, reduc1x1, depth_2x2_scaled, depth_4x4_scaled, depth_8x8_scaled], dim=1)
iconv1 = self.conv1(concat1)
final_depth = self.params.max_depth * self.get_depth(iconv1)
if self.params.dataset == 'kitti':
final_depth = final_depth * focal.view(-1, 1, 1, 1).float() / 715.0873
return depth_8x8_scaled, depth_4x4_scaled, depth_2x2_scaled, reduc1x1, final_depth
class encoder(nn.Module):
def __init__(self, params):
super(encoder, self).__init__()
self.params = params
import torchvision.models as models
if params.encoder == 'densenet121_bts':
self.base_model = models.densenet121(pretrained=True).features
self.feat_names = ['relu0', 'pool0', 'transition1', 'transition2', 'norm5']
self.feat_out_channels = [64, 64, 128, 256, 1024]
elif params.encoder == 'densenet161_bts':
self.base_model = models.densenet161(pretrained=True).features
self.feat_names = ['relu0', 'pool0', 'transition1', 'transition2', 'norm5']
self.feat_out_channels = [96, 96, 192, 384, 2208]
elif params.encoder == 'resnet50_bts':
self.base_model = models.resnet50(pretrained=True)
self.feat_names = ['relu', 'layer1', 'layer2', 'layer3', 'layer4']
self.feat_out_channels = [64, 256, 512, 1024, 2048]
elif params.encoder == 'resnet101_bts':
self.base_model = models.resnet101(pretrained=True)
self.feat_names = ['relu', 'layer1', 'layer2', 'layer3', 'layer4']
self.feat_out_channels = [64, 256, 512, 1024, 2048]
elif params.encoder == 'resnext50_bts':
self.base_model = models.resnext50_32x4d(pretrained=True)
self.feat_names = ['relu', 'layer1', 'layer2', 'layer3', 'layer4']
self.feat_out_channels = [64, 256, 512, 1024, 2048]
elif params.encoder == 'resnext101_bts':
self.base_model = models.resnext101_32x8d(pretrained=True)
self.feat_names = ['relu', 'layer1', 'layer2', 'layer3', 'layer4']
self.feat_out_channels = [64, 256, 512, 1024, 2048]
elif params.encoder == 'mobilenetv2_bts':
self.base_model = models.mobilenet_v2(pretrained=True).features
self.feat_inds = [2, 4, 7, 11, 19]
self.feat_out_channels = [16, 24, 32, 64, 1280]
self.feat_names = []
else:
print('Not supported encoder: {}'.format(params.encoder))
def forward(self, x):
feature = x
skip_feat = []
i = 1
for k, v in self.base_model._modules.items():
if 'fc' in k or 'avgpool' in k:
continue
feature = v(feature)
if self.params.encoder == 'mobilenetv2_bts':
if i == 2 or i == 4 or i == 7 or i == 11 or i == 19:
skip_feat.append(feature)
else:
if any(x in k for x in self.feat_names):
skip_feat.append(feature)
i = i + 1
return skip_feat
class BtsModel(nn.Module):
def __init__(self, params):
super(BtsModel, self).__init__()
self.encoder = encoder(params)
self.decoder = bts(params, self.encoder.feat_out_channels, params.bts_size)
def forward(self, x, focal):
skip_feat = self.encoder(x)
return self.decoder(skip_feat, focal)
| 17,122 | 50.575301 | 180 | py |
ros-sharp | ros-sharp-master/ROS/unity_simulation_scene/scripts/mouse_to_joy.py | #!/usr/bin/env python
# Siemens AG, 2018
# Author: Berkay Alp Cakal ([email protected])
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# <http://www.apache.org/licenses/LICENSE-2.0>.
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import rospy
import numpy
from sensor_msgs.msg import Joy
from geometry_msgs.msg import Twist
from Xlib import display
from Xlib.ext import randr
def mouseToJoy():
# initialize node
rospy.init_node('mouseToJoy', anonymous = True)
#### Setup MouseToJoy Publisher
mouseToJoyPublisher = rospy.Publisher("joy", Joy, queue_size = 5)
rate = rospy.Rate(10) # 10hz
msg = Joy()
while not rospy.is_shutdown():
#### Initialize joy msg every loop
msg.axes = []
msg.buttons = []
pos_x = 0.0
pos_y = 0.0
#### Get Display Dependent Parameters
d = display.Display()
screen = d.screen()
window = screen.root.create_window(0, 0, 1, 1, 1, screen.root_depth)
res = randr.get_screen_resources(window)
resolution_x = res.modes[0].width # i.e. 1920
resolution_y = res.modes[0].height # i.e. 1080
middlePoint_x = resolution_x / 2.0
middlePoint_y = resolution_y / 2.0
#### Start Getting Postion of Mouse
MouseData = display.Display().screen().root.query_pointer()._data
pos_x = MouseData["root_x"]
pos_y = MouseData["root_y"]
#### Start Mapping from Mouse Position to Joy
vel_linear = (pos_y - middlePoint_y) / resolution_y * (2)
vel_angular = (pos_x - middlePoint_x) / resolution_x * (2)
msg.axes.append(vel_linear)
msg.axes.append(vel_angular)
#### Publish msg
#rospy.loginfo([pos_x, pos_y])
rospy.loginfo(msg)
mouseToJoyPublisher.publish(msg)
#rate.sleep()
if __name__ == '__main__':
mouseToJoy()
| 2,113 | 28.774648 | 74 | py |
ros-sharp | ros-sharp-master/ROS/gazebo_simulation_scene/scripts/joy_to_twist.py | #!/usr/bin/env python
# Siemens AG, 2018
# Author: Berkay Alp Cakal ([email protected])
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# <http://www.apache.org/licenses/LICENSE-2.0>.
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import rospy
import numpy
from sensor_msgs.msg import Joy
from geometry_msgs.msg import Twist
def JoyToTwist():
# initialize node
rospy.init_node('JoyToTwist', anonymous=True)
# setup joy topic subscription
joy_subscriber = rospy.Subscriber("joy", Joy, handleJoyMsg, queue_size=10)
# spin() simply keeps python from exiting until this node is stopped
rospy.spin()
def handleJoyMsg(data):
#### Initialize Speed Parameter
# axes [l.x l.y l.z a.x a.y a.z]
scalers = [0.7, 0.7, 0.7, -3.14, -3.14, -3.14]
#### Setup Twist Publisher
twist_publisher = rospy.Publisher("cmd_vel_mux/input/navi", Twist)
msg = Twist()
#### Start Mapping from Joy to Twist
if len(data.axes) >= 6 :
msg.angular.x = data.axes[5] * scalers[3]
if len(data.axes) >= 5 :
msg.linear.z = data.axes[4] * scalers[2]
if len(data.axes) >= 4 :
msg.angular.y = data.axes[3] * scalers[4]
if len(data.axes) >= 3 :
msg.linear.y = data.axes[2] * scalers[1]
if len(data.axes) >= 2 :
msg.angular.z = data.axes[1] * scalers[5]
if len(data.axes) >= 1 :
msg.linear.x = data.axes[0] * scalers[0]
#### Publish msg
rate = rospy.Rate(100) # 10hz
rospy.loginfo(msg)
twist_publisher.publish(msg)
rate.sleep()
if __name__ == '__main__':
JoyToTwist()
| 1,921 | 26.855072 | 75 | py |
PSE | PSE-master/PSEv1/variant.py | ## \package PSEv1.variant
# classes representing the variant class to facilitate box_resize
from hoomd.PSEv1 import _PSEv1
from hoomd.PSEv1 import shear_function
from hoomd import variant
from hoomd import _hoomd
import hoomd
import sys
## Variant class holding a functional form of shear field
# Used as an argument for box_resize class to deform the box
class shear_variant(hoomd.variant._variant):
## Specify shear field represented by a function form with a limited timesteps
#
# \param function_form the functional form of the sinusoidal shear
# \param total_timestep the total timesteps of the shear, equal to shear_end_timestep - shear_start_timestep, must be positive
# \param max_strain the maximum absolute value of the strain, use 0.5 in almost all the cases
def __init__(self, function_form, total_timestep, max_strain = 0.5):
# initialize the base class
_variant.__init__(self)
# check total_timestep is positive
if total_timestep <= 0:
hoomd.context.msg.error("Cannot create a shear_variant with 0 or negative points\n")
raise RuntimeError('Error creating variant')
# create the c++ mirror class
self.cpp_variant = _PSEv1.VariantShearFunction(function_form.cpp_function, int(total_timestep), -max_strain, max_strain)
| 1,327 | 39.242424 | 130 | py |
PSE | PSE-master/PSEv1/integrate.py | # First, we need to import the C++ module. It has the same name as this module (plugin_template) but with an underscore
# in front
from hoomd.PSEv1 import _PSEv1
from hoomd.PSEv1 import shear_function
# Next, since we are extending an integrator, we need to bring in the base class integrator and some other parts from
# hoomd_script
import hoomd
from hoomd import _hoomd
from hoomd import compute
from hoomd.md import _md
import math
## One step overdamped integration with hydrodynamic interactions
class PSEv1(hoomd.md.integrate._integration_method):
## Specifies the Stokes integrator
#
# \param group Group of particles on which to apply this method.
# \param T Temperature of the simulation (in energy units)
# \param seed Random seed to use for the run. Simulations that are identical, except for the seed, will follow
# different trajectories.
# \param xi Ewald splitting parameter
# \param error Relative error for all calculations
# \param function_form Functional form for shear
# \param max_strain Maximum box deformation for shear
#
#
# T can be a variant type, allowing for temperature ramps in simulation runs.
#
# Internally, a compute.thermo is automatically specified and associated with \a group.
def __init__(self, group, T, seed=0, xi = 0.5, error = 0.001, function_form = None, max_strain = 0.5, nlist_type = "cell" ):
# Print the status of the initialization
hoomd.util.print_status_line();
# initialize base class
hoomd.md.integrate._integration_method.__init__(self);
# setup the variant inputs
T = hoomd.variant._setup_variant_input(T);
# create the compute thermo
compute._get_unique_thermo(group=group);
# Real space neighborlist cutoff based on error estimate for spectral sums
self.rcut = math.sqrt( - math.log( error ) ) / xi;
# If this line is changed, remember to change in C++ code as well!!
# initialize the reflected c++ class
if not hoomd.context.exec_conf.isCUDAEnabled():
hoomd.context.msg.error("Sorry, we have not written CPU code for PSE RPY simulation. \n");
raise RuntimeError('Error creating Stokes');
else:
# Create a neighborlist exclusively for real space interactions. Use cell lists by
# default, but also allow the user to specify
if ( nlist_type.upper() == "CELL" ):
cl_stokes = _hoomd.CellListGPU(hoomd.context.current.system_definition);
hoomd.context.current.system.addCompute(cl_stokes, "stokes_cl")
self.neighbor_list = _md.NeighborListGPUBinned(hoomd.context.current.system_definition, self.rcut, 0.4, cl_stokes);
elif ( nlist_type.upper() == "TREE" ):
self.neighbor_list = _md.NeighborListGPUTree(hoomd.context.current.system_definition, self.rcut, 0.4)
elif ( nlist_type.upper() == "STENCIL" ):
cl_stokes = _hoomd.CellListGPU(hoomd.context.current.system_definition)
hoomd.context.current.system.addCompute(cl_stokes, "stokes_cl")
cls_stokes = _hoomd.CellListStencil( hoomd.context.current.system_definition, cl_stokes )
hoomd.context.current.system.addCompute( cls_stokes, "stokes_cls")
self.neighbor_list = _md.NeighborListGPUStencil(hoomd.context.current.system_definition, self.rcut, 0.4, cl_stokes, cls_stokes)
else:
hoomd.context.msg.error("Invalid neighborlist method specified. Valid options are: cell, tree, stencil. \n");
raise RuntimeError('Error constructing neighborlist');
# Set neighborlist properties
self.neighbor_list.setEvery(1, True);
hoomd.context.current.system.addCompute(self.neighbor_list, "stokes_nlist")
self.neighbor_list.countExclusions();
# Call the stokes integrator
self.cpp_method = _PSEv1.Stokes(hoomd.context.current.system_definition, group.cpp_group, T.cpp_variant, seed, self.neighbor_list, xi, error);
self.cpp_method.validateGroup()
if function_form is not None:
self.cpp_method.setShear(function_form.cpp_function, max_strain)
else:
no_shear_function = shear_function.steady(dt = 0)
self.cpp_method.setShear(no_shear_function.cpp_function, max_strain)
self.cpp_method.setParams()
## Changes parameters of an existing integrator
# \param self self
# \param T Temperature
#
# To change the parameters of an existing integrator, you must save it in a variable when it is
# specified, like so:
# \code
# integrator = integrate.nve(group=all)
# \endcode
def set_params(self, T=None, function_form = None, max_strain=0.5):
util.print_status_line();
self.check_initialization();
if T is not None:
# setup the variant inputs
T = hoomd.variant._setup_variant_input(T);
self.cpp_method.setT(T.cpp_variant);
if function_form is not None:
self.cpp_method.setShear(function_form.cpp_function, max_strain)
## Stop any shear
def stop_shear(self, max_strain = 0.5):
no_shear_function = shear_function.steady(dt = 0)
self.cpp_method.setShear(no_shear_function.cpp_function, max_strain)
| 5,578 | 43.277778 | 154 | py |
PSE | PSE-master/PSEv1/__init__.py | # this file exists to mark this directory as a python module
# need to import all submodules defined in this directory
from hoomd.PSEv1 import integrate
from hoomd.PSEv1 import shear_function
from hoomd.PSEv1 import variant
| 224 | 36.5 | 60 | py |
PSE | PSE-master/PSEv1/shear_function.py | ## \package PSEv1.shear_function
# classes representing shear functions, which can be input of an integrator and variant
# to shear the box of a simulation
from hoomd.PSEv1 import _PSEv1
import hoomd
## shear function interface representing shear flow field described by a function
class _shear_function:
## Constructor and check the validity of zero param
# \param zero Specify absolute time step number location for 0 in \a points. Use 'now' to indicate the current step.
def __init__(self, zero = 'now'):
self.cpp_function = None
if zero == 'now':
self._offset = hoomd.context.current.system.getCurrentTimeStep()
else:
# validate zero
if zero < 0:
hoomd.context.msg.error("Cannot create a shear_function variant with a negative zero\n")
raise RuntimeError('Error creating shear function')
if zero > hoomd.context.current.system.getCurrentTimeStep():
hoomd.context.msg.error("Cannot create a shear_function variant with a zero in the future\n")
raise RuntimeError('Error creating shear function')
self._offset = zero
## Get shear rate at a certain time step, might be useful when switching strain field
# \param timestep the timestep
def get_shear_rate(self, timestep):
return self.cpp_function.getShearRate(timestep)
## Get the strain at a certain time step. The strain is not wrapped
# \param timestep the timestep
def get_strain(self, timestep):
return self.cpp_function.getStrain(timestep)
## Get the offset of this shear function
def get_offset(self):
return self.cpp_function.getOffset()
## concrete class representing steady shear, no shear by default if shear_rate is not provided
class steady(_shear_function):
## Constructor of steady shear function
# \param dt the time interval between each timestep, must be the same with the global timestep
# \param shear_rate the shear rate of the shear, default is zero, should be zero or positive
# \param zero the time offset
def __init__(self, dt, shear_rate = 0, zero = 'now'):
_shear_function.__init__(self, zero)
self.cpp_function = _PSEv1.SteadyShearFunction(shear_rate, self._offset, dt)
## concrete class representing simple sinusoidal oscillatory shear
class sine(_shear_function):
## Constructor of simple sinusoidal oscillatory shear
# \param dt the time interval between each timestep, must be the same with the global timestep
# \param shear_rate the maximum shear rate of the ocsillatory shear, must be positive
# \param shear_freq the frequency (real frequency, not angular frequency) of the ocsillatory shear, must be positive
# \param zero the time offset
def __init__(self, dt, shear_rate, shear_freq, zero = 'now'):
if shear_rate <= 0:
hoomd.context.msg.error("Shear rate must be positive (use steady class instead for zero shear)\n")
raise RuntimeError("Error creating shear function")
if shear_freq <= 0:
hoomd.context.msg.error("Shear frequency must be positive (use steady class instead for steady shear)\n")
raise RuntimeError("Error creating shear function")
_shear_function.__init__(self, zero)
self.cpp_function = _PSEv1.SinShearFunction(shear_rate, shear_freq, self._offset, dt)
## concrete class representing chirp oscillatory shear
class chirp(_shear_function):
## Constructor of chirp oscillatory shear
# \param dt the time interval between each timestep, must be the same with the global timestep
# \param amplitude the strain amplitude of Chirp oscillatory shear, must be positive
# \param omega_0 minimum angular frequency, must be positive
# \param omega_f maximum angular frequency, must be positive and larger than omega_0
# \param periodT final time of chirp
# \param zero the time offset
def __init__(self, dt, amplitude, omega_0, omega_f, periodT, zero = 'now'):
_shear_function.__init__(self, zero)
self.cpp_function = _PSEv1.ChirpShearFunction(amplitude, omega_0, omega_f, periodT, self._offset, dt)
## concrete class representing Tukey window function
class tukey_window(_shear_function):
## Constructor of Tukey window function
# \param dt the time interval between each timestep, must be the same with the global timestep
# \param periodT time length of the Tukey window function
# \param tukey_param Tukey window function parameter, must be within (0, 1]
# \param zero the time offset
def __init__(self, dt, periodT, tukey_param, zero = 'now'):
if tukey_param <= 0 or tukey_param > 1:
hoomd.context.msg.error("Tukey parameter must be within (0, 1]")
raise RuntimeError("Error creating Tukey window function")
_shear_function.__init__(self, zero)
self.cpp_function = _PSEv1.TukeyWindowFunction(periodT, tukey_param, self._offset, dt)
## concrete class represeting a windowed shear function
class windowed(_shear_function):
## Constructor of a windowed shear function
# The strain of the resulting windowed shear function will be the product of the original shear function and
# the provided window function
# \param function_form the original shear function
# \param window the window function. It is recommended to make sure the offset (zero) of the window function is the same with shear function
def __init__(self, function_form, window):
_shear_function.__init__(self, 'now') # zero parameter is not used in windowed class anyways
self.cpp_function = _PSEv1.WindowedFunction(function_form.cpp_function, window.cpp_function)
| 5,763 | 49.121739 | 144 | py |
PSE | PSE-master/examples/run.py | import hoomd;
from hoomd import _hoomd
from hoomd.md import _md
import hoomd.PSEv1
import os;
import math
hoomd.context.initialize('');
# Time stepping information
dt = 1e-3 # time step
tf = 1e0 # the final time of the simulation (in units of bare particle diffusion time)
nrun = tf / dt # number of steps
# Particle size
#
# Changing this won't change the PSE hydrodynamics, which assumes that all particles
# have radius = 1.0, and ignores HOOMD's size data. However, might be necessary if
# hydrodynamic radius is different from other radii needed.
radius = 1.0
diameter = 2.0 * radius
# File output location
loc = 'Data/'
if not os.path.isdir( loc ):
os.mkdir( loc )
# Simple cubic crystal of 1000 particles
N = 1000;
L = 64
n = math.ceil(N ** (1.0/3.0)) # number of particles along 1D
a = L / n # spacing between particles
# Create the box and particles
hoomd.init.create_lattice(unitcell=hoomd.lattice.sc(a=a),n=n)
# Shear function form, using sinusoidal oscillatory shear as example
#
# Options are: none (no shear. default if left unspecified in integrator call)
# steady (steady shear)
# sine (sinusoidal oscillatory shear)
# chirp (chirp frequency sweep)
function_form = hoomd.PSEv1.shear_function.sine( dt = dt, shear_rate = 1.0, shear_freq = 1.0 )
# Set up PSE integrator
#
# Arguments to PSE integrator (default values given in parentheses):
# group -- group of particle to act on (should be all)
# seed (1) -- Seed for the random number generator used in Brownian calculations
# T (1.0) -- Temperature
# xi (0.5) -- Ewald splitting parameter. Changing value will not affect results, only speed.
# error (1E-3) -- Calculation error tolerance
# function_form (none) -- Functional form for shearing. See above (or source code) for valid options.
hoomd.md.integrate.mode_standard(dt=dt)
pse = hoomd.PSEv1.integrate.PSEv1( group = hoomd.group.all(), seed = 1, T = 1.0, xi = 0.5, error = 1E-3, function_form = function_form )
# Run the simulation
hoomd.run( nrun )
| 2,068 | 33.483333 | 136 | py |
online-active-model-selection | online-active-model-selection-master/__init__.py | from . import *
| 16 | 7.5 | 15 | py |
online-active-model-selection | online-active-model-selection-master/src/__init__.py | 0 | 0 | 0 | py |
|
online-active-model-selection | online-active-model-selection-master/src/methods/model_picker.py | import numpy as np
"""This code runs stream based model picker (proposed algorithm)."""
def model_picker(data, idx_budget, streaming_data_indices, tuning_par, mode):
"""
:param data:
:param streaming_data_indices:
:param tuning_par:
:param mode: modes include {predictive}
:return:
"""
# Set params
eta_0 = np.sqrt(np.log(data._num_models)/2)
if idx_budget == 'tuning mode':
budget = data._num_instances
else:
budget = data._budgets[idx_budget]
# Edit the input data accordingly with the indices of streaming data
predictions = data._predictions[streaming_data_indices, :]
oracle = data._oracle[streaming_data_indices]
# Initialize
loss_t = np.zeros(data._num_models) # loss per models
z_t_log = np.zeros(data._num_instances, dtype=int) # binary query decision
z_t_budget = np.zeros(data._num_instances, dtype=int) # binary query decision
posterior_t_log = np.zeros((data._num_instances, data._num_models)) # posterior log
mp_oracle = np.zeros(data._num_instances)
hidden_loss_log = np.zeros(data._num_instances, dtype=int)
It_log = np.zeros(data._num_instances, dtype=int)
posterior_t = np.ones(data._num_models)/data._num_models
# For each streaming data instance
for t in np.arange(1, data._num_instances+1, 1):
# Edit eta
eta = eta_0 / np.sqrt(t)
posterior_t = np.exp(-eta * (loss_t-np.min(loss_t)))
# Note that above equation is equivalent to np.exp(-eta * loss_t).
# `-np.min(loss_t)` is applied only to avoid entries being near zero for large eta*loss_t values before the normalization
posterior_t /= np.sum(posterior_t) # normalize
# Log posterior_t
posterior_t_log[t-1, :] = posterior_t
# Compute u_t
u_t = _compute_u_t(data, posterior_t, predictions[t-1, :], tuning_par)
# Sanity checks for sampling probability
if u_t > 1:
u_t = 1
if np.logical_and(u_t>=0, u_t<=1):
u_t = u_t
else:
u_t = 0
# Is x_t in the region of disagreement? yes if dis_t>1, no otherwise
dist_t = len(np.unique(predictions[t-1, :]))
# If u_t is in the region of agreement, don't query anything
if dist_t == 1:
u_t = 0
z_t = 0
z_t_log[t-1] = z_t
else:
#Else, make a random query decision
if u_t>0:
u_t = np.maximum(u_t, eta)
if u_t>1:
u_t=1
z_t = np.random.binomial(size=1, n=1, p=u_t)
z_t_log[t-1] = z_t
if z_t == 1:
loss_t += (np.array((predictions[t-1, :] != oracle[t-1]) * 1) / u_t)
loss_t = loss_t.reshape(data._num_models, 1)
loss_t = np.squeeze(np.asarray(loss_t))
m_star = np.random.choice(list(range(data._num_models)), p=posterior_t)
# Incur hidden loss
hidden_loss_log[t-1] = (predictions[t-1, m_star] != oracle[t-1]) * 1
# print(z_t)
# print(loss_t)
# Terminate if it exceeds the budget
if np.sum(z_t_log) < budget:
z_t_budget[t-1] = z_t_log[t-1]
# Labelling decisions as 0's and 1's
labelled_instances = z_t_log
ct_log = np.ones(data._num_instances, dtype=int)
return (labelled_instances, ct_log, z_t_budget, hidden_loss_log, posterior_t_log)
##
def _compute_u_t(data, posterior_t, predictions_c, tuning_par):
# Initialize possible u_t's
u_t_list = np.zeros(data._num_classes)
# Repeat for each class
for c in range(data._num_classes):
# Compute the loss of models if the label of the streamed data is "c"
loss_c = np.array(predictions_c != c)*1
#
# Compute the respective u_t value (conditioned on class c)
term1 = np.inner(posterior_t, loss_c)
u_t_list[c] = term1*(1-term1)
# Return the final u_t
u_t = tuning_par * np.max(u_t_list)
return u_t | 4,004 | 32.375 | 129 | py |
online-active-model-selection | online-active-model-selection-master/src/methods/query_by_committee.py | import numpy as np
import scipy.stats as stats
def query_by_committee(data, idx_budget, streaming_data_indices, tuning_par):
# Set vals, params
if idx_budget == 'tuning mode':
budget = data._num_instances
else:
budget = data._budgets[idx_budget]
# Edit the input data accordingly with the indices of streaming data
predictions = data._predictions[streaming_data_indices, :]
# Initialize
prior = np.ones(data._num_models) / data._num_models
posterior = prior
z_i_log = np.zeros(data._num_instances, dtype=int)
z_t_budget = np.zeros(data._num_instances, dtype=int)
# If the strategy is adaptive,
for i in range(data._num_instances):
# Measure the normalized entropy of the incoming data
hist, bin_edges = np.histogram(predictions[i, :], bins=data._num_classes)
prob_i = hist/np.sum(hist)
entropy_i = stats.entropy(prob_i, base=2) / np.log2(data._num_classes) * tuning_par
# Check if the normalized entropy is greater than 1
if entropy_i > 1:
entropy_i = 1
if entropy_i < 0:
entropy_i = 0
# Randomly decide whether to query z_i or not
z_i = np.random.binomial(size=1, n=1, p=entropy_i)
# Log the value
z_i_log[i] = z_i
# Terminate if budget is exceeded
if np.sum(z_i_log) <= budget:
z_t_budget[i] = z_i_log[i]
# Labelling decisions as 0's and 1's
labelled_instances = z_i_log
ct_log = np.ones(data._num_instances, dtype=int)
return (labelled_instances, ct_log, z_t_budget) | 1,601 | 31.693878 | 91 | py |
online-active-model-selection | online-active-model-selection-master/src/methods/efficient_active_learning.py | import numpy as np
import sys
import mpmath
sys.modules['sympy.mpmath'] = mpmath
from sympy.solvers.solvers import *
def efficient_active_learning(data, idx_budget, streaming_data_indices, c0, constant_efal):
# Set vals, params
c1 = 1
c2 = c1
if idx_budget == 'tuning mode':
budget = data._num_instances
else:
budget = data._budgets[idx_budget]
# Edit the input data accordingly with the indices of streaming data
predictions = data._predictions[streaming_data_indices, :]
oracle = data._oracle[streaming_data_indices]
# Initialize
p_t_log = np.zeros(data._num_instances)
z_t_log = np.zeros(data._num_instances)
z_t_budget = np.zeros(data._num_instances, dtype=int)
# Repeat for each streaming instance
for t in range(data._num_instances):
# if no data streamed in before, set err to 0
if t == 0:
err = 0
else: # Else, compute the error of models
err = _compute_err(data, predictions[:t, :], oracle[:t], t, z_t_log[:t], p_t_log[:t])
# Is x_t in the region of disagreement?
dis_t = len(np.unique(predictions[t, :]))
# Query decision only if x_t is in the region of disagreement
if dis_t != 1:
# Find the errors of best and the second best model
#
# The best model
h_t = np.min(err)
#
# The second best model
if len(np.unique(err)) == 1:
h_t_ = h_t
else:
h_t_ = np.flip(sorted(set(err)))[-2]
# Compute G[t]
G_t = h_t_ - h_t
# Compute the threshold
if t == 0:
threshold = 1000
else:
threshold = _compute_threshold(t, c0, constant_efal)
# Compute P[t]
if G_t <= threshold:
p_t = 1
else:
s = _compute_s(G_t, data._num_models, t, c0, c1, c2)
p_t = s
if p_t > 1:
p_t = 1
elif p_t < 0:
p_t = 0
else:
p_t = p_t
# Toss a coin
z_t = np.random.binomial(size=1, n=1, p=float(p_t))
# Log the result
else: # If x_t is not in the region of disagreement, do not query
p_t = 0
z_t = 0
p_t_log[t] += p_t
z_t_log[t] += z_t
# Terminate if budget is exceeded
if np.sum(z_t_log) <= budget:
z_t_budget[t] = z_t_log[t]
# Assign z[t]'s to labelled instances
p_t_log[p_t_log==0] = 1
c_t_log = np.divide(1, p_t_log)
return (z_t_log, c_t_log, z_t_budget)
def _compute_err(data, predictions_s, oracle_s, t, z_t_s, p_t_s):
# Compute the error
#
# Compute the loss
oracle_replicated = np.matlib.repmat(oracle_s.reshape(t, 1), 1, data._num_models)
loss_s = np.asarray(predictions_s != oracle_replicated)*1
#
# Compute the weights
p_t_s[p_t_s==0] = 1
ratio = np.divide(z_t_s, p_t_s)
ratio_replicated = np.matlib.repmat(ratio.reshape(t, 1), 1, data._num_models)
#
# Error computed by
err = np.mean(np.multiply(ratio_replicated, loss_s), axis=0)
return err
#
def _compute_threshold(t, c0, constant_efal):
# num_streamed[t] = t+1
t = t+1
# Set params
#c0 = 16 * np.log(num_models * 2 * (3 + t * np.log2(t)) * t * (t+1) / delta) / np.log(t+1)
# Set terms
term2 = c0 * np.log(t) / (t - 1)
term1 = np.sqrt(term2)
# Compute the threshold
threshold = term1 + term2
return threshold*constant_efal
#
def _compute_s(G_t, num_models, t, c0, c1, c2):
# num_streamed[t] = t+1
t = t + 1
# Set terms
term2 = c0 * np.log(t) / (t - 1)
term1 = np.sqrt(term2)
# # Set variable
# x = Symbol('x')
#
# # c1 = 5
# # c2 = 5
# Solve the equation
# s = solve(term1 * (c1/sqrt(x) - c1 + 1) + term2 * (c2/x - c2 + 1) - G_t, x)
#
term_1 = 2 * G_t * term2
term_2 = term2 * np.sqrt(4 * G_t + 1)
term_3 = 2 * G_t**2
s = []
x_1 = (term_1 - term_2 + term2)/term_3
x_2 = (term_1 + term_2 + term2) / term_3
s.append(x_1)
s.append(x_2)
# Find the ind of positive solution
s = np.array(s)
ind_pos = np.where(np.logical_and((s > 0), (s < 1)))[0]
if len(ind_pos) == 0:
p_t = 0
elif len(ind_pos) == 1:
p_t = s[ind_pos]
else:
p_t = np.mean(s)
p_t = p_t
return p_t | 4,531 | 24.60452 | 97 | py |
online-active-model-selection | online-active-model-selection-master/src/methods/importance_weighted_active_learning.py | import numpy as np
import numpy.matlib
def importance_weighted_active_learning(data, idx_budget, streaming_data_indices, tuning_par, constant_iwal):
# Set vals, params
if idx_budget == 'tuning mode':
budget = data._num_instances
else:
budget = data._budgets[idx_budget]
# Edit the input data accordingly with the indices of streaming data
predictions = data._predictions[streaming_data_indices, :]
oracle = data._oracle[streaming_data_indices]
# Initialize
p_t_log = np.zeros(data._num_instances) # probability of being queried for the streaming data
c_t_log = np.zeros(data._num_instances) # weight of each streaming instance: 1/p
z_t_log = np.zeros(data._num_instances) # query decision
z_t_budget = np.zeros(data._num_instances, dtype=int)
models_t = np.ones(data._num_models, dtype=int) # the ensemble at epoch t: 1 if the model is in the ensembele, 0 otherwise
L_t_log = np.zeros(data._num_models, dtype=float) # error of models at epoch t
# For each streaming instance
for t in np.arange(data._num_instances):
# Is x_t in the region of disagreement?
dis_t = len(np.unique(predictions[t, :]))
# Query decision only if x_t is in the region of disagreement
if dis_t != 1:
# Set the rejection threshold
(p_t, models_t_updated) = _loss_weighting(predictions[t, :], t, data._num_classes, 0.1, L_t_log, models_t)
# #print('pt='+str(p_t))
p_t = p_t * tuning_par
# Update the ensemble
models_t = models_t_updated
if p_t > 1:
p_t = 1
if p_t < 0:
p_t = 0
# Log the rejection threshold/probability of being queried
p_t_log[t] = p_t
# Randomly decide whether to query its label or not
z_t = np.random.binomial(size=1, n=1, p=p_t)
z_t_log[t] = z_t
# Log c_t's
if p_t != 0:
c_t = 1/p_t
else:
c_t = 0
c_t_log[t] = c_t
# Update L[t] log
oracle_replicated = np.matlib.repmat(oracle.reshape(data._num_instances, 1), 1, data._num_models)
loss_accumulated = np.asarray(predictions[:t+1, :] != oracle_replicated[:t+1, :])*1
ratio = np.multiply(z_t_log[:t+1], c_t_log[:t+1]).reshape(t+1,1)
ratio_replicated = np.matlib.repmat(ratio, 1, data._num_models)
L_t_log = np.mean(np.multiply(loss_accumulated, ratio_replicated), axis=0)
else:
z_t_log[t] = 0
c_t_log[t] = 1
# Terminate if budget is exceeded
if np.sum(z_t_log) <= budget:
z_t_budget[t] = z_t_log[t]
# Labelling decisions as 0's and 1's
labelled_instances = z_t_log
return (labelled_instances, c_t_log, z_t_budget)
#
def _loss_weighting(predictions_t, t, num_classes, delta, L_t_log, models_t):
# Find the ensemble: the models that have survived so far
models_t_ind = np.where(models_t.reshape(np.size(models_t), 1) == 1)[0]
# Find the relative L[t-1]
L_t = np.min(L_t_log[models_t_ind])
# Compute delta[t-1]
num_models_t = len(models_t_ind)
delta_t = _rejection_threshold(t, num_models_t, delta)
# Compute the upper bound for ensemble learning
ensemble_threshold = L_t + delta_t
# Find the hypothesis below the ensemble threshold
models_t_next = (L_t_log <= ensemble_threshold)
# Find the overlapping models with already survived ones
models_t_updated = np.logical_and(models_t_next, models_t)
num_models = np.size(predictions_t)
models_t_updated_ind = np.where(models_t_updated.reshape(num_models, 1) == 1)[0]
# Compute p[t]
#
# Initialize the introspective losses
introspective_losses = np.zeros(num_classes)
# For each possible label of y_t
for c in np.arange(num_classes):
###
# Log the number of models in this epoch
num_models_t = np.size(models_t_updated_ind)
# Compute the loss of models.
loss_models = np.asarray(predictions_t[models_t_updated_ind] != c) * 1
# Compute the introspective loss.
introspective_losses[c] = np.max(loss_models) - np.min(loss_models)
# Set p_t the maximum among all possible pairwise losses
p_t = np.max(introspective_losses)
# Check if p_t is outside of [0, 1]
if p_t > 1:
p_t = 1
# Return p_t
return (p_t, models_t_updated)
def _rejection_threshold(t, num_models_t, delta):
# Set delta[t] to 0 if no instance has streamed before the current one yet
if t == 0:
delta_t = 0
else:
t +=1
# Compute delta_t
delta = 0.01
term1 = 8/t
term2 = np.log(2*t*(t+1)*num_models_t**2 / delta)
delta_t = np.sqrt(term1*term2)
return delta_t | 4,912 | 31.322368 | 126 | py |
online-active-model-selection | online-active-model-selection-master/src/methods/random_sampling_disagreement.py | import numpy as np
"""Random sampling code for stream based model selection."""
def random_sampling_disagreement(data, idx_budget, streaming_data_indices, tuning_par_rs):
"""
:param data:
:param streaming_data_indices:
:return:
"""
# Set params
num_instances = data._num_instances
budget = data._budgets[idx_budget]
# Initialize
z_t_log = np.zeros(num_instances, dtype=int)
z_t_budget = np.zeros(num_instances)
ct_log = np.ones(data._num_instances, dtype=int)
# Set probability of querying specific to the given budget
p_budget = tuning_par_rs
# Identify the instances in the region of disagreement
predictions_sample = data._predictions[streaming_data_indices, :]
loc_disagreement, num_disagreement = measure_disagreement(predictions_sample)
idx_disagreement = np.squeeze(np.asarray(np.nonzero(loc_disagreement))).astype(int)
# Randomly select queries
z_temp = np.random.binomial(1, p=p_budget, size=num_disagreement)
z_t_log[idx_disagreement] += z_temp
# Set the budgeted indices variables
for i in np.arange(num_instances):
if np.sum(z_t_log[:i+1]) <= budget:
z_t_budget[i] += z_t_log[i]
# print(np.sum(z_t_log))
return (z_t_log, ct_log, z_t_budget)
def measure_disagreement(predictions):
"""This function counts the number of instances in the region of disagreement."""
# Set params
n, m = predictions.shape
# Initialize
loc_disagreement = np.zeros(n)
# For each instance, count the number of non-unique elements
for i in np.arange(n):
num_uniques = len(np.unique(predictions[i, :]))
if num_uniques != 1: # If models have different predictions, set the respective index to one
loc_disagreement[i] += 1
# Count the total number of instances in the region of disagreement
num_disagreement = np.sum(loc_disagreement).astype(int)
return loc_disagreement, num_disagreement
| 1,975 | 32.491525 | 100 | py |
online-active-model-selection | online-active-model-selection-master/src/methods/__init__.py | from . import *
| 16 | 7.5 | 15 | py |
online-active-model-selection | online-active-model-selection-master/src/methods/random_sampling.py | import numpy as np
"""Random sampling code for stream based model selection (unused)."""
def random_sampling(data, idx_budget, streaming_data_indices):
"""
:param data:
:param streaming_data_indices:
:return:
"""
# Set params
num_instances = data._num_instances
budget = data._budgets[idx_budget]
p_budget = budget/num_instances
# Randomly select queries
z_t_log = np.random.binomial(1, p=p_budget, size=num_instances)
# Set other variables
z_t_budget = z_t_log
ct_log = np.ones(data._num_instances, dtype=int)
return (z_t_log, ct_log, z_t_budget) | 612 | 25.652174 | 69 | py |
online-active-model-selection | online-active-model-selection-master/src/methods/structural_query_by_committee.py | import numpy as np
import scipy.stats as stats
def structural_query_by_committee(data, idx_budget, streaming_data_indices, tuning_par, constant_sqbc):
# Set vals, params
if idx_budget == 'tuning mode':
budget = data._num_instances
else:
budget = data._budgets[idx_budget]
# Edit the input data accordingly with the indices of streaming data
predictions = data._predictions[streaming_data_indices, :]
oracle = data._oracle[streaming_data_indices]
# Initialize
prior = np.ones(data._num_models) / data._num_models
posterior = prior
z_t_log = np.zeros(data._num_instances, dtype=int)
z_t_budget = np.zeros(data._num_instances, dtype=int)
loss_acc = np.zeros(data._num_models)
loss_t = 0
# If the strategy is adaptive,
for t in range(data._num_instances):
if len(np.unique(predictions[t, :])) != 1: # If the instance is in the region of disagreement
# Randomly sample two models from the posterior
#posterior[posterior<0.01] = 0.01
posterior = posterior / np.sum(posterior)
g1, g2 = np.random.choice(data._num_models, p=posterior, size=2, replace=True)
disagreement = (predictions[:t+1, g1] != predictions[:t+1, g2]).astype(int)
p_t = np.mean(disagreement) * constant_sqbc
if p_t > 1:
p_t = 1
if np.logical_and(p_t>=0, p_t<=1):
p_t = p_t
else:
p_t = 0
z_t = np.random.binomial(size=1, n=1, p=p_t)
# If queried, update the loss
if z_t == 1:
loss_t = (predictions[t, :] != oracle[t]).astype(int)
# Accumulate the loss
loss_acc += loss_t
# Update posterior
beta = tuning_par
exp_loss_t = np.exp(-beta * loss_t)
posterior = np.multiply(posterior, exp_loss_t)
posterior = posterior / np.sum(posterior) # normalize posterior
else:
z_t = 0
z_t_log[t] = z_t
# Terminate if budget is exceeded
if np.sum(z_t_log) <= budget:
z_t_budget[t] = z_t_log[t]
# Labelling decisions as 0's and 1's
labelled_instances = z_t_log
ct_log = np.ones(data._num_instances, dtype=int)
return (labelled_instances, ct_log, z_t_budget) | 2,392 | 32.704225 | 103 | py |
online-active-model-selection | online-active-model-selection-master/src/evaluation/evaluate_base.py | """Base class for the evaluations."""
from src.evaluation.evaluation_pipeline.evaluate_main import *
class Evals:
def __init__(self, data, client=None):
"""Evaluate methods"""
eval_results = evaluate_main(data, client=client)
"""Assigns evaluations to the self"""
self._prob_succ = eval_results['prob_succ']
self._acc = eval_results['acc']
self._regret = eval_results['regret']
self._sampled_regret = eval_results['sampled_regret']
self._num_queries = eval_results['num_queries']
self._log_acc = eval_results['log_acc']
self._true_acc = eval_results['true_acc']
self._num_queries_t = eval_results['num_queries_t']
self._regret_time = eval_results['regret_time']
self._sampled_regret_time = eval_results['sampled_regret_time']
| 842 | 31.423077 | 71 | py |
online-active-model-selection | online-active-model-selection-master/src/evaluation/__init__.py | from . import *
| 16 | 7.5 | 15 | py |
online-active-model-selection | online-active-model-selection-master/src/evaluation/evaluation_pipeline/evaluate_realizations.py | from src.evaluation.aux.compute_precision_measures import *
import tqdm
import zlib, cloudpickle
def evaluate_realizations(log_slice, predictions, oracle, freq_window_size, method):
"""
This function evaluates the method in interest for given realization of the pool/streaming instances
Parameters:
:param predictions: predictions on the streaming instances (specific to the realization in interest)
:param oracle: ground truth for the streaming instances (specific to the realization in interest)
:param streaming_instances_i: instances that were part of a stream
:param zt_real: {0, 1} vectors where 1's indicate the instances that are querid
:param ct_real: Respective importance weights for the zt_real
:param posterior_real: the posterior of the model picker only
:param freq_window_size: the sliding window size where frequency of each model showing up is computed
:param method: the method in interest
Returns:
Realization specific evaluations
prob_succ_real, acc_real, regret_real, post_ratio_real,
freq_models_real, gap_star_freqs_real, gap_freqs_rea
"""
streaming_instances_i, zt_real, ct_real, posterior_real = log_slice
# predictions = cloudpickle.loads(zlib.decompress(predictions))
# oracle = cloudpickle.loads(zlib.decompress(oracle))
# Extract predictions from models and oracle for given streaming instances.
predictions = predictions[streaming_instances_i, :]
oracle = oracle[streaming_instances_i]
# Set params
num_instances, num_models = predictions.shape
# Extract true predictions.
true_precisions = compute_precisions(predictions, oracle, num_models)
true_winner = np.where(np.equal(true_precisions, np.max(true_precisions)))[0]
winner_randint = np.random.randint(len(true_winner))
true_winner_random = true_winner[winner_randint]
true_acc = true_precisions[true_winner_random]
# Squeeze the unit dimensions of posterior real and streaming instance indices.
streaming_instances_i = np.squeeze(streaming_instances_i).astype(int)
posterior_real = np.squeeze(np.asarray(posterior_real))
# Convert z[t] to indices format
# labelled_ins = np.squeeze(np.asarray(zt_real.nonzero())) # the indices whose labels are queried
labelled_ins = np.ravel(np.asarray(zt_real.nonzero())) # the indices whose labels are queried
num_labelled = np.size(labelled_ins) # number of queries for this realization ~budget in interest
if num_labelled == 0:
labelled_ins = 0
num_labelled = 1
# Evaluate the methods upon seeing all the streamed instances
# Compute the weighted loss
weighted_losses = compute_weighted_loss(predictions[labelled_ins, :], oracle[labelled_ins], ct_real[labelled_ins], num_models)
weighted_accuracies = compute_weighted_accuracy(predictions[labelled_ins, :], oracle[labelled_ins], ct_real[labelled_ins], num_models)
# Declare the winners
if method == 'mp': # If model picker, declare the winner through its posterior
arg_winners_t = np.where(np.equal(posterior_real[-1, :].reshape(num_models, 1), np.max(posterior_real[-1, :])))[0]
else: # else, through the weighted losses
if np.size(weighted_losses) > 1:
arg_winners_t = np.where(np.equal(weighted_losses.reshape(num_models, 1), np.min(weighted_losses)))[0] # Winners of the round
else:
arg_winners_t = np.ones(num_models)
# If multi winners, choose randomly
len_winners = np.size(arg_winners_t)
if len_winners > 1:
idx_winner_t = np.random.choice(len_winners, 1)
winner_t = arg_winners_t[idx_winner_t]
winner_t = winner_t.astype(int)
else:
winner_t = arg_winners_t.astype(int)
# Probability of success
if winner_t in true_winner:
prob_succ_real = 1
else:
prob_succ_real = 0
# prob_succ_real = (winner_t == true_winner).astype(int)
# Accuracy of the returned model
acc_real = true_precisions[winner_t]
# Log posterior
if method == 'mp': # If MP, use its own posterior
posterior = posterior_real[-1, :]
else: # Else, form a posterior from weighted losses
if np.sum(weighted_accuracies) == 0:
posterior = np.ones(num_models)/num_models
else:
posterior = weighted_accuracies / np.sum(weighted_accuracies)
if len(np.unique(posterior)) == 1:
post_ratio_real = 0
else:
best_posterior = np.max(posterior)
second_best_posterior = float((sorted(set(posterior)))[-2])
if second_best_posterior == 0:
post_ratio_real= 0
else:
post_ratio_real = np.log(best_posterior/second_best_posterior)
# Regret
# Initialize
loss_true = 0
loss_winner = 0
regret_real = 0
sampled_regret_real = 0
regret_t = np.zeros(num_instances)
sampled_regret_t = np.zeros(num_instances)
num_queries_t_real = np.zeros(num_instances)
# losses_models = np.zeros(num_models)
# Compute hidden regret at each instance (not only queried!)
for t in np.arange(num_instances):
# losses_winners += (predictions[t, :] != oracle[t]).astype(int)
if t == 0:
num_queries_t_real[t] = zt_real[t]
else:
num_queries_t_real[t] = num_queries_t_real[t-1]+zt_real[t]
# Set posterior
if method == 'mp': # If MP, use its own posterior
posterior_t = posterior_real[t, :]
arg_winners_t = np.where(np.equal(posterior_t, np.max(posterior_t)))[0]
else: # else, check the weighted losses
posterior_t = np.ones(num_models)/num_models
if num_labelled == 1:
labelled_instances_t = 0
else:
idx_labelled_instances_transient = np.where(labelled_ins.reshape(num_labelled, 1) < t)[0] # find the location of labelled points that are smaller than t
labelled_instances_t = labelled_ins[idx_labelled_instances_transient] # find all labelled points so far
weighted_losses_t = compute_loss(predictions[labelled_instances_t, :], oracle[labelled_instances_t], num_models)
if np.size(labelled_instances_t)>1:
if np.sum(weighted_losses_t) == 0: # if no true positive yet, set the posterior uniform
arg_winners_t = np.arange(num_models)
else:
arg_winners_t = np.where(np.equal(weighted_losses_t.reshape(num_models, 1), np.min(weighted_losses_t)))[0]
else:
arg_winners_t = np.arange(num_models)
# If multi winners, choose randomly
len_winners = np.size(arg_winners_t)
if len_winners > 1:
idx_winner_t = np.random.choice(len_winners, 1)
winner_t = arg_winners_t[idx_winner_t]
else:
winner_t = arg_winners_t
# Accumulate the error of returned model
loss_winner = int((predictions[t, int(winner_t)] != oracle[t])*1)
# Accumulate the error of true winner
loss_true = int((predictions[t, int(true_winner_random)] != oracle[t])*1)
# Sampled regret time
m_star = np.random.choice(list(range(num_models)), p=posterior_t)
# Incur hidden loss
loss_sampled = (predictions[t, m_star] != oracle[t]) * 1
regret_real += (loss_winner - loss_true)
sampled_regret_real += (loss_sampled - loss_true)
# print(regret_real)
regret_t[t] = regret_real
sampled_regret_t[t] = sampled_regret_real
#
# Return all
return (true_acc, acc_real, prob_succ_real, regret_real, regret_t, sampled_regret_real, sampled_regret_t, num_queries_t_real)
#
| 7,747 | 39.778947 | 168 | py |
online-active-model-selection | online-active-model-selection-master/src/evaluation/evaluation_pipeline/evaluate_main.py | # from src.evaluation.evaluation_pipeline.evaluate_method import *
from src.evaluation.evaluation_pipeline.evaluate_realizations import *
from src.evaluation.aux.load_results import *
from dask.distributed import Client, as_completed
from tqdm.auto import tqdm, trange
import cloudpickle, zlib
def evaluate_main(data, client=None):
"""
This function evaluates the streaming methods one by one, and saves the evaluation results.
Parameters:
:param data:
Returns:
"""
# Set params
len_budgets = len(data._budgets)
# Initialization
num_queries = np.zeros((len(data._methods), len_budgets))
# Set params
num_reals = data._num_reals # number of realizations which the evaluation will be averaged over
freq_window_size = data._eval_window # the window which model frequencies will be calculated over
num_instances = data._num_instances # number of instances per realization
predictions = data._predictions
oracle = data._oracle
# Initialize the evaluations
prob_succ = np.zeros((len_budgets, len(data._methods)))
acc = np.zeros((len_budgets, len(data._methods)))
#
regret = np.zeros((len_budgets, len(data._methods)))
sampled_regret = np.zeros((len_budgets, len(data._methods)))
#
# Initialize the log accuracies
log_acc = np.zeros((len_budgets, num_reals, len(data._methods)))
true_acc = np.zeros((len_budgets, num_reals))
# Regret over time
regret_time = np.zeros((len_budgets, num_instances, len(data._methods)))
sampled_regret_time = np.zeros((len_budgets, num_instances, len(data._methods)))
num_queries_t = np.zeros((len_budgets, num_instances, len(data._methods)))
if client is not None:
tqdm.write("Broadcasting model and oracle predictions to workers.")
[predictions_future] = client.scatter([predictions], broadcast=True)
[oracle_future] = client.scatter([oracle], broadcast=True)
# For each budget, repeat the experiment
for idx_budget in trange(len_budgets, desc="Evaluating Budgets"):
desc = "Evaluating Methods (Budget: %d)" % data._budgets[idx_budget]
# Load results
(idx_all, ct_all, streaming_instances_log, idx_queries, posterior_t_log) = load_results(data, idx_budget)
# idx_budgeted_queries = idx_queries
# idx_queries = idx_all
if client is not None:
tqdm.write("Broadcasting experiment logs to workers.")
arg_futures = [None for _ in range(len(data._methods))]
for i in range(len(data._methods)):
arg_futures[i] = client.scatter([
(streaming_instances_log[:, j], idx_queries[:, j, i], ct_all[:, j, i], posterior_t_log[:, :, j])
for j in range(num_reals)]
)
tqdm.write("Submitting tasks.")
method_result_futures = []
for i in range(len(data._methods)):
realization_result_futures = []
for j in range(num_reals):
realization_result_futures.append(client.submit(evaluate_realizations, arg_futures[i][j], predictions_future, oracle_future, freq_window_size, data._methods[i], pure=False, priority=-i))
method_result_futures.append(client.submit(
lambda realizations: (
np.array(realizations)[:, 0],
np.array(realizations)[:, 1],
np.array(realizations)[:, 1:].mean(axis=0).tolist()
),
realization_result_futures))
if client is None:
# Evaluate each method
for i in trange(len(data._methods), desc=desc):
method_result = []
# For each realization for the method of interest, evaluate the realization and add accumulate the results (normalized by number of realizations)
for j in trange(num_reals, desc="Realizations (Method: %s)" % data._methods[i]):
method_result.append(evaluate_realizations((streaming_instances_log[:, j], idx_queries[:, j, i],
ct_all[:, j, i], posterior_t_log[:, :, j]),
predictions, oracle, freq_window_size, data._methods[i]))
(true_acc_method, log_acc_method, prob_succ_real, regret_real, regret_t, sampled_regret_real, sampled_regret_t, num_queries_t_real, ) = zip(*method_result)
# print('round: '+str(i))
# print('true_acc_method:'+str(true_acc_method))
# print('log_acc_method:'+str(log_acc_method))
# print('prob_succ_real:'+str(prob_succ_real))
# Raw x-axis
prob_succ[idx_budget, i] = np.mean(prob_succ_real)
acc[idx_budget, i] = np.mean(log_acc_method)
regret[idx_budget, i] = np.mean(regret_real)
sampled_regret[idx_budget, i] = np.mean(sampled_regret_real)
# print('freq_models_real:'+str(np.size(freq_models_real)))
#
log_acc[idx_budget, :, i] = log_acc_method
#
true_acc[idx_budget, :] = true_acc_method
# Calculate the plain budget usage
num_queries[i, idx_budget] = np.sum(idx_all[:, :, i]) / data._num_reals
# print(regret_t)
regret_time[idx_budget, :, i] = np.mean(regret_t, axis=0)
sampled_regret_time[idx_budget, :, i] = np.mean(sampled_regret_t, axis=0)
num_queries_t[idx_budget, :, i] = np.mean(num_queries_t_real, axis=0)
else:
for i, future in enumerate(tqdm(method_result_futures, total=len(data._methods), desc=desc)):
true_acc_method, log_acc_method, mean_values = future.result()
log_acc_method_m, prob_succ_real_m, regret_real_m, regret_t, sampled_regret_real_m, sampled_regret_t, num_queries_t_real = mean_values
# print('round: '+str(i))
# print('true_acc_method:'+str(true_acc_method))
# print('log_acc_method:'+str(log_acc_method))
# print('log_acc_method_frequent:'+str(log_acc_method_frequent))
# print('log_acc_method_m:'+str(log_acc_method_m))
# print('prob_succ_real_m:'+str(prob_succ_real_m))
# Raw x-axis
prob_succ[idx_budget, i] = prob_succ_real_m
acc[idx_budget, i] = log_acc_method_m
regret[idx_budget, i] = regret_real_m
sampled_regret[idx_budget, i] = sampled_regret_real_m
# print('freq_models_real_m:'+str(np.size(freq_models_real_m)))
#
log_acc[idx_budget, :, i] = log_acc_method
true_acc[idx_budget, :] = true_acc_method
#
regret_time[idx_budget, :, i] = regret_t
sampled_regret_time[idx_budget, :, i] = sampled_regret_t
num_queries_t[idx_budget, :, i] = num_queries_t_real
# Calculate the plain budget usage
num_queries[i, idx_budget] = np.sum(idx_all[:, :, i]) / data._num_reals
"""Save evaluations"""
np.savez(str(data._resultsdir) + '/eval_results.npz',
prob_succ=prob_succ, acc=acc, regret=regret,
sampled_regret=sampled_regret,
#
num_queries=num_queries,
#
log_acc=log_acc,
true_acc=true_acc,
idx_queries = idx_queries,
regret_time = regret_time,
sampled_regret_time=sampled_regret_time,
num_queries_t = num_queries_t,
)
"""Form the dictionary"""
eval_results = {
'prob_succ':prob_succ,
'acc':acc,
'regret':regret,
'sampled_regret':sampled_regret,
#
'num_queries': num_queries,
#
'log_acc':log_acc,
'true_acc':true_acc,
#
'idx_queries':idx_queries,
#
'regret_time':regret_time,
'sampled_regret_time':sampled_regret_time,
'num_queries_t':num_queries_t,
}
return eval_results | 8,318 | 41.015152 | 206 | py |
online-active-model-selection | online-active-model-selection-master/src/evaluation/evaluation_pipeline/__init__.py | 0 | 0 | 0 | py |
|
online-active-model-selection | online-active-model-selection-master/src/evaluation/aux/load_results.py | import numpy as np
def load_results(data, idx_budget):
"""
This function loads the experiment results in the results folder for a given budget
"""
# Load data
experiment_results = np.load(str(data._resultsdir) + '/experiment_results_'+ 'budget'+str(data._budgets[idx_budget]) + '.npz')
# Extract vars
idx_log = experiment_results['idx_log']
idx_budget_log = experiment_results['idx_budget_log']
ct_log = experiment_results['ct_log']
streaming_instances_log = experiment_results['streaming_instances_log']
hidden_loss_log = experiment_results['hidden_loss_log']
posterior_log = experiment_results['posterior_log']
return (idx_log, ct_log, streaming_instances_log, idx_budget_log, posterior_log)
| 749 | 38.473684 | 130 | py |
online-active-model-selection | online-active-model-selection-master/src/evaluation/aux/compute_precision_measures.py | import numpy as np
import numpy.matlib
def compute_precisions(pred, orac, num_models):
"""
This function computes the agreements
"""
# Replicate oracle realization
orac_rep = np.matlib.repmat(orac.reshape(np.size(orac), 1), 1, num_models)
# Compute errors
true_pos = (pred == orac_rep) * 1
# Compute the weighted loss
precisions = np.mean(true_pos, axis=0)
# Squeeze precision
precisions = np.squeeze(np.asarray(precisions))
return precisions
"""
This function computes the agreements between two methods
"""
def compute_agreements(pred, orac, num_models):
"""
This function computes the agreements
"""
# Replicate oracle realization
orac_rep = np.matlib.repmat(orac.reshape(np.size(orac), 1), 1, num_models)
# Compute errors
true_pos = (pred == orac_rep) * 1
# Compute the weighted loss
agreements = np.sum(true_pos, axis=0)
# Reduce the extra dimension
agreements = np.squeeze(np.asarray(agreements))
return agreements
def compute_weighted_loss(pred, orac, ct_method, num_models):
#ct_method = np.asarray(ct_method)
"""
This function computes the weighted loss
"""
# Replicate oracle realization
orac_rep = np.matlib.repmat(orac.reshape(np.size(orac), 1), 1, num_models)
# Compute errors
errors = (pred != orac_rep)*1
# Replicate the weights
# print('ct shape:'+str(ct_method.shape))
# print('orac shape:' + str(orac)[0])
# print('ctshape = ' + str(np.size(ct_method.shape)))
# print('oracshape = ' + str(ct_method.shape))
if ct_method.shape != ():
ct_method_rep = np.matlib.repmat(ct_method.reshape(np.size(orac), 1), 1, np.size(pred, 1))
# Compute the weighted errors
weighted_errors = np.multiply(errors, ct_method_rep)
# Compute the weighted loss
weighted_loss = np.mean(weighted_errors, axis=0)
weighted_loss = np.squeeze(np.asarray(weighted_loss))
else:
weighted_loss = 0
#weighted_loss = np.mean(errors, axis=0)
return weighted_loss
def compute_weighted_accuracy(pred, orac, ct_method, num_models):
#ct_method = np.asarray(ct_method)
"""
This function computes the weighted loss
"""
# Replicate oracle realization
orac_rep = np.matlib.repmat(orac.reshape(np.size(orac), 1), 1, num_models)
# Compute errors
true_positives = (pred == orac_rep)*1
# Replicate the weights
# print('ct shape:'+str(ct_method.shape))
# print('orac shape:' + str(orac)[0])
# print('ctshape = ' + str(np.size(ct_method.shape)))
# print('oracshape = ' + str(ct_method.shape))
if ct_method.shape != ():
ct_method_rep = np.matlib.repmat(ct_method.reshape(np.size(orac), 1), 1, np.size(pred, 1))
# Compute the weighted errors
weighted_true_positives = np.multiply(true_positives, ct_method_rep)
# Compute the weighted loss
weighted_true_positives = np.mean(weighted_true_positives, axis=0)
weighted_true_positives = np.squeeze(np.asarray(weighted_true_positives))
else:
weighted_true_positives = 0
#weighted_loss = np.mean(errors, axis=0)
return weighted_true_positives
def compute_loss(pred, orac, num_models):
"""
This function computes the weighted loss
"""
# Replicate oracle realization
orac_rep = np.matlib.repmat(orac.reshape(np.size(orac), 1), 1, num_models)
# Compute errors
errors = (pred != orac_rep)*1
# Compute the weighted loss
loss = np.mean(errors, axis=0)
loss = np.squeeze(np.asarray(loss))
return loss
| 3,628 | 26.08209 | 98 | py |
online-active-model-selection | online-active-model-selection-master/src/publish_evals/publish_evals.py | import matplotlib.pyplot as plt
plt.rc('text', usetex=True)
plt.style.use('classic')
plt.style.use('default')
import seaborn as sns
import numpy as np
sns.set()
"""This function plots the evaluation results for the streaming setting."""
def publish_evals(resultsdir):
"""
:param resultsdir:
:return:
"""
"""Load experiments"""
# experiment_results = np.load(str(resultsdir) + '/experiment_results.npz')
"""Extract params, vals"""
# Load data specific details
data = np.load(str(resultsdir) + "/data.npz")
methods = data['methods']
methods_fullname = data['methods_fullname']
num_instances = data['num_instances']
num_models = data['num_models']
num_reals = data['num_reals']
eval_window = data['eval_window']
methods = data['methods']
methods = methods_fullname
budget = data['budgets']
"""Load evaluations"""
eval_results = np.load(str(resultsdir) + '/eval_results.npz')
"""Extract evaluations"""
prob_succ = eval_results['prob_succ']
acc = eval_results['acc']
regret = eval_results['regret']
regret_time = eval_results['regret_time']
#
num_queries = eval_results['num_queries']
log_acc = eval_results['log_acc']
true_acc = eval_results['true_acc']
#
num_queries_t = eval_results['num_queries_t']
print(num_queries_t.shape)
# Determine for which budget point to monitor regret over the stream
idx_regret = int(round(len(budget)/2))
"""Compute expected and worst-case accuracy gaps:"""
# Compute the gaps per realization, per budget and per method
log_gap = np.zeros(log_acc.shape)
log_gap_frequent = np.zeros(log_gap.shape)
for i in range(np.size(log_acc, 2)):
log_gap[:, :, i] = true_acc - np.squeeze(log_acc[:, :, i])
# Compute the expected accuracy gap
mean_acc_gap = 100 * np.mean(log_gap, axis=1) # percentage
worst_acc_gap = 100 * np.percentile(log_gap, 90, axis=1) # percentage
"""Print the evaluation results."""
for i in np.arange(np.size(log_acc, 2)):
print('\nMethod: ' + str(methods_fullname[i]) + ' \n|| Number of Queries: ' + str(
num_queries[i, :]) + ' \n|| Budget: ' + str(budget) + ' \n|| Identification Probability: ' + str(prob_succ[:, i]) +
' \n|| Accuracy gap: ' + str(mean_acc_gap[:, i]) + '\n|| Worst case accuracy gap: ' + str(
worst_acc_gap[:, i]))
# Note: If you would like to monitor regret (over stream), please uncomment below. We omit this to avoid printing a huge matrix
# print('\nMethod: '+str(methods_fullname[i]) + ' \n|| Regret over time: ' +str(regret_time[idx_regret, :, i]))
return [regret_time, num_queries_t, prob_succ, budget]
| 2,742 | 34.166667 | 135 | py |
online-active-model-selection | online-active-model-selection-master/dev/cluster-up.py | #!/usr/bin/env python3
import os
import paramiko
import sys
print(os.environ.keys())
SCHEDULER_HOST = os.environ.get("SCHEDULER_HOST", None)
if not SCHEDULER_HOST:
raise ValueError("The variable SCHEDULER_HOST not defined.")
print("SCHEDULER_HOST=%s" % SCHEDULER_HOST)
WORKER_HOSTS = os.environ.get("WORKER_HOSTS", None)
if not WORKER_HOSTS:
raise ValueError("The variable WORKER_HOSTS not defined.")
print("WORKER_HOSTS=%s" % WORKER_HOSTS)
SSH_WORKINGDIR = os.environ.get("SSH_WORKINGDIR", None)
if not SSH_WORKINGDIR:
raise ValueError("The variable SSH_WORKINGDIR not defined.")
print("SSH_WORKINGDIR=%s" % SSH_WORKINGDIR)
SSH_USERNAME = os.environ.get("SSH_USERNAME", None)
if not SSH_USERNAME:
raise ValueError("The variable SSH_USERNAME not defined.")
print("SSH_USERNAME=%s" % SSH_USERNAME)
SSH_PASSWORD = os.environ.get("SSH_PASSWORD", None)
# Start scheduler.
print("Starting scheduler on: %s" % SCHEDULER_HOST)
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.load_system_host_keys()
client.connect(SCHEDULER_HOST, username=SSH_USERNAME, password=SSH_PASSWORD)
stdin, stdout, stderr = client.exec_command("bash %s" % os.path.join(SSH_WORKINGDIR, "dev/src/start-dask-scheduler"))
print(stdout.read().decode())
err = stderr.read().decode()
if err:
print(err)
sys.exit(-1)
# Start the workers.
for WORKER_HOST in WORKER_HOSTS.split(","):
WORKER_HOST.strip()
print("Starting worker on: %s" % WORKER_HOST)
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.load_system_host_keys()
client.connect(WORKER_HOST, username=SSH_USERNAME, password=SSH_PASSWORD)
stdin, stdout, stderr = client.exec_command("bash %s" % os.path.join(SSH_WORKINGDIR, "dev/src/start-dask-worker"))
print(stdout.read().decode())
err = stderr.read().decode()
if err:
print(err)
sys.exit(-1)
| 1,951 | 32.084746 | 118 | py |
online-active-model-selection | online-active-model-selection-master/dev/cluster-down.py | #!/usr/bin/env python3
import os
import paramiko
import sys
print(os.environ.keys())
SCHEDULER_HOST = os.environ.get("SCHEDULER_HOST", None)
if not SCHEDULER_HOST:
raise ValueError("The variable SCHEDULER_HOST not defined.")
print("SCHEDULER_HOST=%s" % SCHEDULER_HOST)
WORKER_HOSTS = os.environ.get("WORKER_HOSTS", None)
if not WORKER_HOSTS:
raise ValueError("The variable WORKER_HOSTS not defined.")
print("WORKER_HOSTS=%s" % WORKER_HOSTS)
SSH_WORKINGDIR = os.environ.get("SSH_WORKINGDIR", None)
if not SSH_WORKINGDIR:
raise ValueError("The variable SSH_WORKINGDIR not defined.")
print("SSH_WORKINGDIR=%s" % SSH_WORKINGDIR)
SSH_USERNAME = os.environ.get("SSH_USERNAME", None)
if not SSH_USERNAME:
raise ValueError("The variable SSH_USERNAME not defined.")
print("SSH_USERNAME=%s" % SSH_USERNAME)
SSH_PASSWORD = os.environ.get("SSH_PASSWORD", None)
# Start scheduler.
print("Stopping scheduler on: %s" % SCHEDULER_HOST)
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.load_system_host_keys()
client.connect(SCHEDULER_HOST, username=SSH_USERNAME, password=SSH_PASSWORD)
stdin, stdout, stderr = client.exec_command("bash %s" % os.path.join(SSH_WORKINGDIR, "dev/src/stop-dask-scheduler"))
print(stdout.read().decode())
err = stderr.read().decode()
if err:
print(err)
sys.exit(-1)
# Start the workers.
for WORKER_HOST in WORKER_HOSTS.split(","):
WORKER_HOST.strip()
print("Stopping worker on: %s" % WORKER_HOST)
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.load_system_host_keys()
client.connect(WORKER_HOST, username=SSH_USERNAME, password=SSH_PASSWORD)
stdin, stdout, stderr = client.exec_command("bash %s" % os.path.join(SSH_WORKINGDIR, "dev/src/stop-dask-worker"))
print(stdout.read().decode())
err = stderr.read().decode()
if err:
print(err)
sys.exit(-1)
| 1,949 | 32.050847 | 117 | py |
online-active-model-selection | online-active-model-selection-master/experiments/run_experiment.py | from experiments.base.tune_hyperpar_base import *
from experiments.base.experiments_base import *
from src.evaluation.evaluate_base import *
from experiments.base.set_data import *
from src.publish_evals.publish_evals import *
from datetime import datetime
import time
import os
import shelve
import sys
from dask.distributed import Client, LocalCluster
from dask_cloudprovider import FargateCluster
def run_experiment(dataset, stream_size, stream_setting, budgets, num_reals, eval_window, num_reals_tuning, grid_size, load_hyperparameters, hyperparameter_bounds, which_methods, constants, cluster=None, aws_workers=32):
"""
The main script for running online model selection experiments.
"""
start_time = time.time()
# Initialize dask cluster if it was specified. Otherwise, we will not use any parallelism.
if cluster is not None:
if cluster == "localhost":
cluster = LocalCluster(processes=True, threads_per_worker=1, memory_limit="4GB", n_workers=8)
elif cluster == "fargate":
cluster = FargateCluster(n_workers=aws_workers, image="<here>",
worker_extra_args=["--nprocs", "4", "--nthreads", "1"],
scheduler_timeout="55 minutes")
raise ValueError("Please upload repo image to dockerhub and paste link <here>.")
print("Connecting to client.")
client = Client(address=cluster)
print("\n >>> Monitoring dashboard: %s \n" % client.dashboard_link)
else:
print("No cluster specified. Running in single-process mode.")
client = None
"""Create a results directory."""
now = datetime.now().strftime("_Date-%Y-%m-%d_Time-%H-%M-%S") # get datetime
which_methods_print = ''.join(map(str, which_methods))
os.mkdir('resources/results/'+dataset+'_streamsize'+str(stream_size)+'_numreals'+str(num_reals)+str(now)+'_which_methods'+str(which_methods_print)) # create the folder
results_dir = Path('resources/results/'+dataset+'_streamsize'+str(stream_size)+'_numreals'+str(num_reals)+str(now)+'_which_methods'+str(which_methods_print)) # assign it to the results directory var
"""Set data."""
# Set the data
data = SetData(dataset, stream_size, stream_setting, budgets, num_reals, eval_window, results_dir, num_reals_tuning, grid_size, load_hyperparameters, hyperparameter_bounds, which_methods, constants) # data class
# Save
data.save_data()
# Load the cache.
# with shelve.open("run_experiment.cache") as cachedb:
#
"""Hyperparameter tuning."""
print("\n# Tuning Hyperparameters\n")
hyperparams = tune_hyperpar_base(data, client=client, cache=None)
data._hyperparams = hyperparams # assign hyperpars to the data
"""Run experiments."""
print("\n# Running Experiments\n")
experiments_base(data, client=client, cache=None)
#
#
"""Evaluate data."""
print("\n# Running Final Evaluation\n")
Evals(data, client=client)
"""Announce the evaluation results."""
publish_evals(results_dir)
elapsed_time = time.time() - start_time
print("\n# Work Completed...\n")
print("Elapsed time: %s" % time.strftime("%H:%M:%S", time.gmtime(elapsed_time)))
print("The experiment results can be found at: %s" % str(results_dir))
# Close the client connection if it was opened.
if client is not None:
client.close()
print("Client closed.")
if __name__ == "__main__":
args = sys.argv[1:]
if len(args) < 12:
print("Missing arguments (or too many)")
print("Usage: python -m experiments.run_experiments [dataset_name] [stream_size] [stream_setting] [budgets] [num_reals] [winner_eval_window] [num_reals_tuning] [grid_size] [load_hyperparameters (true/false)] [cluster]")
exit(1)
else:
if args[8] == "true":
load_hyperparameters = True
elif args[8] == "false":
load_hyperparameters = None
else:
raise ValueError("Incorrect value for load hyperparameters")
if load_hyperparameters:
hyperparameter_bounds = list(map(lambda x: float(x), args[9].split(",")))
else:
if args[9] != "empty":
raise ValueError("Load hyper parameters must be empty")
hyperparameter_bounds = []
if len(args) == 12:
run_experiment(
args[0], # dataset,
int(args[1]), # pool_size,
args[2], # pool_setting,
list(map(lambda x: int(x), args[3].split(","))), # budgets,
int(args[4]), # num_reals,
int(args[5]), # eval_window,
int(args[6]), # num_reals_tuning,
int(args[7]), # grid_size,
load_hyperparameters,
hyperparameter_bounds, # hyperparameter_bounds,
list(map(lambda x: int(x), args[10].split(","))), # which_methods
float(args[11]), # constant_sqbc,
)
if len(args) == 13:
cluster = args[12]
run_experiment(
args[0], # dataset,
int(args[1]), # pool_size,
args[2], # pool_setting,
list(map(lambda x: int(x), args[3].split(","))), # budgets,
int(args[4]), # num_reals,
int(args[5]), # eval_window,
int(args[6]), # num_reals_tuning,
int(args[7]), # grid_size,
load_hyperparameters,
hyperparameter_bounds, # hyperparameter_bounds,
list(map(lambda x: int(x), args[10].split(","))), # which_methods
float(args[11]), # constant_sqbc,
cluster
)
if len(args) == 14:
cluster = args[12]
aws_workers = int(args[13])
run_experiment(
args[0], # dataset,
int(args[1]), # pool_size,
args[2], # pool_setting,
list(map(lambda x: int(x), args[3].split(","))), # budgets,
int(args[4]), # num_reals,
int(args[5]), # eval_window,
int(args[6]), # num_reals_tuning,
int(args[7]), # grid_size,
load_hyperparameters,
hyperparameter_bounds, # hyperparameter_bounds,
list(map(lambda x: int(x), args[10].split(","))), # which_methods
float(args[11]), # constant_sqbc,
cluster,
aws_workers
)
if len(args) > 14:
raise ValueError("Too many arguments")
| 6,687 | 39.533333 | 227 | py |
online-active-model-selection | online-active-model-selection-master/experiments/reproduce_experiment.py | from experiments.run_experiment import *
from dask.distributed import LocalCluster
def main(dataset_name, cluster=None):
experiment = dataset_name
load_hyperparameters = 'true'
if experiment == 'EmoContext':
# Emotion Detection
DatasetName = 'emotion_detection'
#
StreamSize = 1000
#
hyper_mp = 60
hyper_qbc = 4
hyper_sqbc = 4
hyper_iwal = 5
hyper_efal = 0.00005
#
constants = [150, 0.1, 6]
#
budgets = list(np.arange(10, 200, 20))
#
grid_size = 250
num_reals_tuning = 100
NumReals = 500
which_methods = list([1, 1, 1, 1, 1, 1]) # In order, mp, qbc, sqbc, rs, iwal, efal
elif experiment == 'DomainDrift':
# Emotion Detection
DatasetName = 'domain_drift'
#
StreamSize = 2500
#
hyper_mp = 60
hyper_qbc = 4
hyper_sqbc = 4
hyper_iwal = 5
hyper_efal = 0.00005
#
constants = [150, 0.1, 6]
#
budgets = [50, 100, 150, 200, 250, 300, 350, 400, 450, 500, 550, 600, 650, 700, 750, 800, 850, 900, 950, 1000]
#
grid_size = 250
num_reals_tuning = 100
NumReals = 500
which_methods = list([1, 1, 1, 1, 1, 0]) # In order, mp, qbc, sqbc, rs, iwal, efal
#
elif experiment == 'CIFAR10':
# CIFAR10 55-92
DatasetName = 'cifar10_5592'
#
#
StreamSize = 5000
#
hyper_mp = 2900
hyper_qbc = 1.47
hyper_sqbc = 4.54
hyper_iwal = 0.9
hyper_efal = 0.00002
#
constant_sqbc = 1.4
constants = [150, 0.1, 5]
#
budgets = [50,250,500,750,1000,1250,1500,2000,2500,3000,3500,4000]
#
grid_size = 250
num_reals_tuning = 100
NumReals = 500
which_methods = list([1, 1, 1, 1, 1, 1]) # In order, mp, qbc, sqbc, rs, iwal, efal
#
elif experiment == 'CIFAR10 V2':
# CIFAR10 40-70
DatasetName = 'cifar10_4070'
#
StreamSize = 5000
hyper_mp = 50000
hyper_qbc = 3
hyper_sqbc = 10
hyper_iwal = 1
hyper_efal = 0.004
constants = [10, 0.1, 6] # sqbc: increase
#
budgets = [50, 250, 500, 750, 1000, 1250, 1500, 2000, 2500, 3000, 3500, 4000]
grid_size = 250
num_reals_tuning = 100
NumReals = 500
which_methods = list([1, 1, 1, 1, 1, 0]) # In order, mp, qbc, sqbc, rs, iwal, efal
#
elif experiment == 'ImageNet':
DatasetName = 'imagenet'
#
#
StreamSize = 10000
#
hyper_mp = 135
hyper_qbc = 22
hyper_sqbc = 20
hyper_iwal = 1
hyper_efal = 0.003
#
constant_sqbc = 2
constants = [constant_sqbc, 0.1, 3]
#
budgets = [50, 300, 600, 900, 1200, 1500, 1750, 2000, 2250, 2500, 3000, 4000, 5000, 7000, 10000]
#
grid_size = 250
num_reals_tuning = 100
NumReals = 500
which_methods = list([1, 1, 1, 1, 1, 0]) # In order, mp, qbc, sqbc, rs, iwal, efal
else:
raise ValueError('The model collection does not exist')
"""Set common params."""
StreamSetting = 'floating'
WinnerEvalWindow = 15
#
hyperparameter_bounds = [hyper_mp, hyper_qbc, hyper_sqbc, hyper_iwal, hyper_efal] # budget:160max
hyperparameter_bounds_experiment = []
for i in np.arange(6):
if i < 3:
if which_methods[i] == 1:
hyperparameter_bounds_experiment.append(hyperparameter_bounds[i])
elif i > 3:
if which_methods[i] == 1:
hyperparameter_bounds_experiment.append(hyperparameter_bounds[i-1])
"""Run experiment."""
cluster = None
# cluster = 'localhost'
aws_workers = 32
run_experiment(DatasetName, StreamSize, StreamSetting, budgets, NumReals, WinnerEvalWindow, num_reals_tuning, grid_size, load_hyperparameters, hyperparameter_bounds_experiment, which_methods, constants, cluster, aws_workers)
if __name__ == '__main__':
args = sys.argv[1:]
if len(args) != 1 and len(args) != 2:
print("Missing arguments (or too many)")
print("Usage: python -m experiments.reproduce_experiment [dataset_name] [cluster (optional)]")
exit(1)
else:
if len(args) == 1:
main(
args[0]
)
else:
main(
args[0],
args[1]
)
| 4,600 | 27.937107 | 228 | py |
online-active-model-selection | online-active-model-selection-master/experiments/__init__.py | from . import *
| 16 | 7.5 | 15 | py |
online-active-model-selection | online-active-model-selection-master/experiments/base/tune_hyperpar_base.py | import numpy as np
from src.methods.model_picker import *
from src.methods.random_sampling import *
from src.methods.query_by_committee import *
from src.methods.efficient_active_learning import *
from src.evaluation.aux.compute_precision_measures import *
from src.methods.structural_query_by_committee import *
from pathlib import Path
from src.methods.importance_weighted_active_learning import *
from dask.distributed import Client, as_completed
from tqdm.auto import tqdm, trange
import cloudpickle, zlib
from scipy import optimize
def tune_hyperpar_base(data, client=None, cache=None):
"""
The base function for the experiments.
Parameters:
:param data: attributes to data
:param num_processes:
:param chunksize:
Returns:
hyperparameters for each budget and for each method (excluding random sampling)
"""
# Set params
budgets = data._budgets
hyperparameter_bounds = data._hyperparameter_bounds
if cache is None:
cache = {}
# Initialize
which_methods = data._which_methods
idx_all_methods_exclude_random = np.asarray([0, 1, 2, 4, 5])
which_methods = np.asarray(which_methods)
idx_methods_exclude_random = np.asarray(which_methods[idx_all_methods_exclude_random]).nonzero()
num_models_exclude_random = np.sum(np.asarray(which_methods[idx_all_methods_exclude_random])) # count every method that will run except random sampling
hyperpars = np.zeros((len(budgets), num_models_exclude_random))
if data._load_hyperparameters is 'true': # If 'true', load the hyperparameters.
# Load data
hyper_dir = Path(r'resources/hyperparameters/'+str(data._data_set_name)+'/hyperparameters')
hyperparameter_data = np.load(str(hyper_dir) + '.npz')
# Assign hyperparameters and query degrees
grids = hyperparameter_data['grids']
num_labels = hyperparameter_data['num_labels']
grids = np.squeeze(grids[:, idx_methods_exclude_random])
num_labels = np.squeeze(num_labels[:, idx_methods_exclude_random])
hyperpars_rs = np.zeros(len(budgets))
else: # If not 'true', tune the hyperparameters
# Set params
num_reals_tuning = data._num_reals_tuning # number of realizations over which hyperparameters will be tuned
# Set grids
# Initialization
grid_size = data._grid_size
grids = np.zeros((grid_size, num_models_exclude_random))
# Note: 4 comes from the size of model selection methods excluding random sampling. It does not require hyperparameter tuning.
for grid_method in range(num_models_exclude_random):
min_grid = 0.00000000001
max_grid = float(hyperparameter_bounds[grid_method])
if np.logical_or(np.logical_or(which_methods[0]==1, which_methods[2]==1), which_methods[5]==1): # for some methods, place grid points logarithmically
max_grid = np.log2(max_grid)
min_grid = -12 # good for cifar10 40-70 for all
if np.logical_or(np.logical_and(which_methods[0]==1, which_methods[2]==1), which_methods[4]==1):
min_grid = -6
grids[:, grid_method] = np.logspace(min_grid, max_grid, num=grid_size, base=2)
else:
grids[:, grid_method] = np.linspace(min_grid, max_grid, num=grid_size)
hyperpars_rs = np.zeros(len(budgets))
# Initialize the inputs
num_labels = np.zeros((grid_size, num_models_exclude_random)) # remove the coordinate of random sampling
# If client was specified, we can already transfer the data to all workers.
if client is not None:
tqdm.write("Broadcasting data to workers.")
[data_future] = client.scatter([data], broadcast=True)
# We can also submit all the jobs.
tqdm.write("Submitting tasks.")
futures = []
for i in range(grid_size):
required_realizations = num_reals_tuning #- len(cache.get(i, []))
futures.append([client.submit(run_realization, data_future, grids[i, :], pure=False, priority=-i) for _ in range(required_realizations)])
# Run for each grid point.
for i in trange(grid_size, desc="Hyperparameter Tuning Grid"):
tuning_parameters = grids[i, :] # set tuning parameters of methods to a grid coordinate
desc = "Realizations (Grid point: %d/%d)" % (i+1, grid_size)
result = []
# Check if some grid points were cached.
# result.extend(cache.get(i, []))
if len(result) > 0:
tqdm.write("(Grid point: %d/%d) Found %d realizations in the cache." % (i+1, grid_size, len(result)))
if len(result) < num_reals_tuning:
if client is None:
# If no cluster was specified, we do a simple loop over all realizations, using tqdm to track progress.
required_realizations = num_reals_tuning - len(cache.get(i, []))
for _ in trange(required_realizations, desc=desc):
result.append(run_realization(data, tuning_parameters))
cache.setdefault(i, []).append(result[-1])
else:
# All jobs were submitted so we just collect results as they arrive and append them to the result list.
for future in tqdm(as_completed(futures[i]), total=len(futures[i]), desc=desc):
result.append(future.result())
cache.setdefault(i, []).append(result[-1])
# Assemble results of the experiment.
idx_log_all = list(zip(*result))
idx_log = np.stack(idx_log_all, axis=1)
# # Calculate the average number of queries
# tuned_methods = list(range(len(data._methods))) # list coordinate of methods
# del tuned_methods[3] # remove random sampling
# For each method, measure the expected number of queries throughout the streaming instances (over all realizations)
for j in np.arange(num_models_exclude_random):
num_labels[i, j] = np.sum(idx_log[:, :, j]) / num_reals_tuning
for method_id in np.arange(num_models_exclude_random):
# :find hyperparameter for each budget and evaluate (every method except random sampling)
for i in np.arange(np.size(budgets)):
if num_models_exclude_random != 1:
idx_closest = np.argmin(abs(num_labels[:, method_id] - budgets[i]))
# Assign true closes to the hyperparameters
hyperpars[i, method_id] = grids[idx_closest, method_id]
else:
idx_closest = np.argmin(abs(num_labels - budgets[i]))
# Assign true closes to the hyperparameters
hyperpars[i] = grids[idx_closest]
# Hyperparameter for random sampling
if which_methods[3] == 1:
num_disagreements_real = measure_disagreement(data._predictions) * data._num_instances / data._size_entire_pool # resize the disagreements on the entire pool to the streaming instances
hyperpars_rs = budgets/num_disagreements_real # hyperparameters for the random sampling
hyperpars_rs[hyperpars_rs>1] = 1 # If the hyperparameter is greater than 1, set it to 1 (they will be probability of querying)
data._hyperpars_rs = hyperpars_rs
# Save the results.
np.savez(str(data._resultsdir) + '/hyperparameters.npz', hyperpars=hyperpars, budgets=data._budgets, grids=grids, num_labels=num_labels, hyperpars_rs=hyperpars_rs)
# print('Grids= '+str(grids))
# print(' Number of labels= ' + str(num_labels))
return hyperpars
def run_realization(data, tuning_parameters):
# Initialize
which_methods = data._which_methods
which_methods = np.asarray(which_methods)
idx_all_methods_exclude_random = np.asarray([0, 1, 2, 4, 5])
idx_methods_exclude_random = np.asarray(which_methods[idx_all_methods_exclude_random]).nonzero()
num_models_exclude_random = np.sum(np.asarray(which_methods[idx_all_methods_exclude_random])) # count every method that will run except random sampling
# data = cloudpickle.loads(zlib.decompress(data))
# Set the mode of operation
mode = 'tuning mode'
# Initialize the query decision for instances
idx_log_i = np.zeros((data._num_instances, num_models_exclude_random))
"""Set the streaming instances"""
# If the stream is floating, draw streaming instances uniformly at random
if data._pool_setting == 'floating':
# Set the streaming instances for this realization
streaming_data_instances = np.random.permutation(int(data._size_entire_pool)) # shuffle the entire pool
streaming_data_instances_real = streaming_data_instances[:data._num_instances] # select first n instance
else:
streaming_data_instances_fixed = np.random.permutation(
int(data._size_entire_pool)) # shuffle the entire pool
streaming_data_instances_fixed = streaming_data_instances_fixed[
:data._num_instances] # select first n instances
random_perm = np.random.permutation(data._num_instances) # shuffle the instances
streaming_data_instances_real = streaming_data_instances_fixed[random_perm] # update the streaming order
#
num_runing_models = 0
constant_sqbc = data._constant_sqbc
constant_iwal = data._constant_iwal
constant_efal = data._constant_efal
if 'mp' in data._methods:
# MODEL PICKER
tuning_par_mp = tuning_parameters[num_runing_models]
(idx_mp, ct_mp, idx_budget_mp, hidden_loss_log_i, posterior_t_log_i) = model_picker(data, mode, streaming_data_instances_real, tuning_par_mp, 'Variance')
# Logging
idx_log_i[:, num_runing_models] = idx_mp
num_runing_models += 1
if 'qbc' in data._methods:
# QUERY BY COMMITTEE
tuning_par_qbc = tuning_parameters[num_runing_models]
(idx_qbc, ct_qbc, idx_budget_qbc) = query_by_committee(data, mode, streaming_data_instances_real, tuning_par_qbc)
# Logging
idx_log_i[:, num_runing_models] = idx_qbc
num_runing_models += 1
if 'sqbc' in data._methods:
# STRUCTURAL QUERY BY COMMITTEE
tuning_par_sqbc = tuning_parameters[num_runing_models]
(idx_sqbc, ct_sqbc, idx_budget_sqbc) = structural_query_by_committee(data, mode, streaming_data_instances_real, tuning_par_sqbc, constant_sqbc)
# Logging
idx_log_i[:, num_runing_models] = idx_sqbc
num_runing_models += 1
if 'iwal' in data._methods:
# IMPORTANCE WEIGHTED ACTIVE LEARNING
tuning_par_iwal = tuning_parameters[num_runing_models]
(idx_iwal, ct_iwal, idx_budget_iwal) = importance_weighted_active_learning(data, mode, streaming_data_instances_real, tuning_par_iwal, constant_iwal)
# Logging
idx_log_i[:, num_runing_models] = idx_iwal
num_runing_models += 1
#
if 'efal' in data._methods:
# EFFICIENT ACTIVE LEARNING
c0 = tuning_parameters[num_runing_models] # threshold on the efal, increasing means use of more labelling budget
(idx_efal, ct_efal, idx_budget_efal) = efficient_active_learning(data, mode, streaming_data_instances_real, c0, constant_efal)
# Logging
idx_log_i[:, num_runing_models] = idx_efal
num_runing_models += 1
return idx_log_i
def measure_disagreement(predictions):
"""This function counts the number of instances in the region of disagreement."""
# Set params
n, m = predictions.shape
# Initialize
idx_disagreement = np.zeros(n)
# For each instance, count the number of non-unique elements
for i in np.arange(n):
num_uniques = len(np.unique(predictions[i, :]))
if num_uniques != 1: # If models have different predictions, set the respective index to one
idx_disagreement[i] += 1
# Count the total number of instances in the region of disagreement
num_disagreement = np.sum(idx_disagreement)
return num_disagreement
| 12,220 | 43.60219 | 192 | py |
online-active-model-selection | online-active-model-selection-master/experiments/base/set_data.py | """Preprocess the model predictions"""
from src.evaluation.aux.compute_precision_measures import *
from pathlib import Path
import numpy as np
class SetData():
def __init__(self, data_set_name, pool_size, pool_setting, budgets, num_reals, eval_window_size, resultsdir, num_reals_tuning, grid_size, load_hyperparameters, hyperparameter_bounds, which_methods, constants):
"""
Base class to set data for the experiments.
Parameters:
:param dataset: Options include {'emotion_detection', 'cifar10_5592', 'cifar10_4070', 'domain_drift'}
:param pool_size: Size of streaming instances in a realization
:param pool_setting: It must be 'floating' by default. Options include {'floating', 'fixed'}
:param budgets: A list of budgets which the model selection methods will be evaluated
:param num_reals: Number of realizations over which the evaluations are averaged. Set it to a few thousands at least
:param eval_window: Size of sliding window over which winner frequencies will be measured. Set it to 3 by default
:param num_reals_tuning: Number of realizations over which the hyperparameters will be tuned
:param grid_size: Size of grid which will be used to create a training data set to learn the mapping between the hyperparameters and number of labels each method queries throughout the streaming instances
Attributes:
:num_instances:
:pool_setting:
:eval_window:
:num_classes:
:methods_fullname:
:methods:
:num_models:
:num_reals:
:budgets:
:resultsdir:
:data_set_name:
:size_entire_pool:
:num_reals_tuning:
:grid_size:
"""
# Attribution to the self
if data_set_name == 'emotion_detection':
# Data path
path_emotiondata = Path(r'resources/datasets/emotion_detection/')
# Preprocess
predictions = np.load(str(path_emotiondata) + "/predictions.npy")
oracle = np.load(str(path_emotiondata) + "/oracle.npy")
# Dataset specific attributes
self._predictions = predictions
self._oracle = oracle
self._num_classes = 4
self._num_models = np.size(predictions, 1)
self._size_entire_pool = np.size(predictions, 0)
elif data_set_name == 'imagenet':
# Load and preprocess data
path_domain = Path(r'resources/datasets/imagenet/')
# Preprocess data
predictions = np.load(str(path_domain) + "/predictions.npy")
predictions -= 1 # Correct the predicted labels to be in between 0 - C-1
predictions[predictions==-1] = 1001 # Set background labels to C+1
# Remove the identical models
idx_range = list(np.arange(np.size(predictions, 1)))
# Delete the identical models
del idx_range[92]
del idx_range[55]
del idx_range[22]
predictions = predictions[:, idx_range]
# process oracle
oracle = np.load(str(path_domain) + "/oracle.npy")
oracle -= 1 # Correct the true labels to be in between 0 - C-1
precs = compute_precisions(predictions, oracle, np.size(predictions, 1))
# Dataset specific attributes
self._predictions = predictions
self._oracle = oracle
self._num_classes = 1000
self._num_models = np.size(predictions, 1)
self._size_entire_pool = np.size(predictions, 0)
elif data_set_name == 'cifar10_5592':
# Load and preprocess data
path_cifar10data5592 = Path(r'resources/datasets/cifar10_5592/')
# Preprocess data
predictions = np.load(str(path_cifar10data5592) + "/predictions.npy")
oracle = np.load(str(path_cifar10data5592) + "/oracle.npy")
# Dataset specific attributes
self._predictions = predictions
self._oracle = oracle
self._num_classes = 10
self._num_models = np.size(predictions, 1)
self._size_entire_pool = np.size(predictions, 0)
elif data_set_name == 'cifar10_4070':
# Load and preprocess data
path_cifar10data4070 = Path(r'resources/datasets/cifar10_4070/')
# Preprocess data
predictions = np.load(str(path_cifar10data4070) + "/predictions.npy")
oracle = np.load(str(path_cifar10data4070) + "/oracle.npy")
# Dataset specific attributes
self._predictions = predictions
self._oracle = oracle
self._num_classes = 10
self._num_models = np.size(predictions, 1)
self._size_entire_pool = np.size(predictions, 0)
elif data_set_name == 'domain_drift':
# Load and preprocess data
path_domain = Path(r'resources/datasets/domain_drift/')
# Preprocess data
predictions = np.load(str(path_domain) + "/predictions.npy")
predictions -= 1
oracle = np.load(str(path_domain) + "/oracle.npy")
oracle -= 1
# Dataset specific attributes
self._predictions = predictions
self._oracle = oracle
self._num_classes = 6
self._num_models = np.size(predictions, 1)
self._size_entire_pool = np.size(predictions, 0)
else:
assert 'Dataset name has not been specified!'
# Assign constants
constant_sqbc = constants[0]
constant_iwal = constants[1]
constant_efal = constants[2]
# Attribute other values to self
self._budgets = budgets
self._num_reals = num_reals
self._num_instances = pool_size # This parameter is more experiment dependent, is different (smaller) than the entire pool size
self._resultsdir = resultsdir
self._eval_window = eval_window_size
self._pool_setting = pool_setting
self._data_set_name = data_set_name
self._num_reals_tuning = num_reals_tuning
self._grid_size = grid_size
self._which_methods = which_methods
self._load_hyperparameters = load_hyperparameters
self._hyperparameter_bounds = hyperparameter_bounds
self._constant_sqbc = constant_sqbc
self._constant_iwal = constant_iwal
self._constant_efal = constant_efal
# Attribute the set of methods and their full names
all_methods = list(['mp', 'qbc', 'sqbc', 'rs', 'iwal', 'efal'])
all_methods_fullname = list(
['Model Picker', 'Query by Committee', 'Structural Query by Committee', 'Random Sampling',
'Importance Weighted Active Learning', 'Efficient Active Learning'])
methods = []
methods_fullname = []
for i in range(len(which_methods)):
if which_methods[i] == 1:
methods.append(all_methods[i])
methods_fullname.append(all_methods_fullname[i])
self._methods = methods
self._methods_fullname = methods_fullname
def save_data(self):
"""
This function saves the setting details.
"""
# Extract variables
num_instances = self._num_instances
pool_setting = self._pool_setting
eval_window = self._eval_window
num_classes = self._num_classes
methods_fullname = self._methods_fullname
methods = self._methods
num_models = self._num_models
num_reals = self._num_reals
budgets = self._budgets
resultsdir = self._resultsdir
data_set_name = self._data_set_name
size_entire_pool = self._size_entire_pool
num_reals_tuning = self._num_reals_tuning
grid_size = self._grid_size
load_hyperparameters = self._load_hyperparameters
hyperparameter_bounds = self._hyperparameter_bounds
which_methods = self._which_methods
constant_sqbc = self._constant_sqbc
constant_iwal = self._constant_iwal
constant_efal = self._constant_efal
# Save data
np.savez(str(resultsdir)+"/data.npz", num_instances=num_instances, pool_setting=pool_setting, eval_window=eval_window, num_classes=num_classes, num_models=num_models, num_reals=num_reals, budgets=budgets, methods=methods, methods_fullname=methods_fullname, data_set_name=data_set_name, size_entire_pool=size_entire_pool, num_reals_tuning=num_reals_tuning, grid_size=grid_size, load_hyperparameters=load_hyperparameters, hyperparameter_bounds=hyperparameter_bounds, which_methods=which_methods, constant_iwal=constant_iwal, constant_sqbc=constant_sqbc, constant_efal=constant_efal)
| 8,799 | 43 | 588 | py |
online-active-model-selection | online-active-model-selection-master/experiments/base/experiments_base.py | from src.methods.model_picker import *
from src.methods.random_sampling import *
from src.methods.query_by_committee import *
from src.methods.efficient_active_learning import *
from src.methods.random_sampling_disagreement import *
from src.methods.importance_weighted_active_learning import *
from src.methods.structural_query_by_committee import *
from dask.distributed import Client, as_completed
from tqdm.auto import tqdm, trange
import cloudpickle, zlib
def experiments_base(data, client=None, cache=None):
"""
The base function for the experiments.
Parameters:
:param data: Data attributes
:param num_processes:
:param chunksize:
Returns:
resources/results/resultsdir/experiment_results.npz
"""
# Set params
num_reals = data._num_reals # number of realizations over which the results will be averaged over
if cache is None:
cache = {}
# If client was specified, we can already transfer the data to all workers.
if client is not None:
tqdm.write("Broadcasting data to workers.")
[data_future] = client.scatter([data], broadcast=True)
# We can also submit all the jobs.
tqdm.write("Submitting tasks.")
futures = []
for i in range(len(data._budgets)):
required_realizations = num_reals - len(cache.get(i, []))
futures.append([client.submit(run_realization, data_future, i, pure=False, priority=-i) for _ in range(required_realizations)])
# For each budget, run the experiment (many realizations)
for i in trange(len(data._budgets), desc="Iterating over Budgets"):
desc="Realizations (Budget: %d)" % data._budgets[i]
result = []
# Check if some grid points were cached.
result.extend(cache.get(i, []))
if len(result) > 0:
tqdm.write("(Budget: %d) Found %d realizations in the cache." % (data._budgets[i], len(result)))
if len(result) < num_reals:
if client is None:
# If no cluster was specified, we do a simple loop over all realizations, using tqdm to track progress.
required_realizations = num_reals - len(cache.get(i, []))
for _ in trange(required_realizations, desc=desc):
result.append(run_realization(data, i))
cache.setdefault(i, []).append(result[-1])
else:
# All jobs were submitted so we just collect results as they arrive and append them to the result list.
for future in tqdm(as_completed(futures[i]), total=len(futures[i]), desc=desc):
result.append(future.result())
cache.setdefault(i, []).append(result[-1])
# Assemble results of the experiment.
idx_log_all, idx_budget_log_all, ct_log_all, streaming_instances_log_all, hidden_loss_log_all, posterior_log_all = zip(
*result)
idx_log = np.stack(idx_log_all, axis=1)
idx_budget_log = np.stack(idx_budget_log_all, axis=1)
ct_log = np.stack(ct_log_all, axis=1)
streaming_instances_log = np.stack(streaming_instances_log_all, axis=1)
hidden_loss_log = np.stack(hidden_loss_log_all, axis=1)
posterior_log = np.stack(posterior_log_all, axis=2)
# Prints
tqdm.write("\nExperiment Measurements: ")
for j in np.arange(len(data._methods_fullname)):
dummy = np.asarray(np.squeeze(idx_log[:, :, j]))
dummy = np.sum(dummy)/num_reals
tqdm.write("Method: %-10s Budget: %-10d; Number of Queried Instances: %-10d" % (data._methods[j], data._budgets[i], dummy))
tqdm.write("")
"""Save the results"""
np.savez(str(data._resultsdir) + '/experiment_results_'+ 'budget'+str(data._budgets[i]) + '.npz', idx_log=idx_log, idx_budget_log=idx_budget_log,
ct_log=ct_log, streaming_instances_log=streaming_instances_log, hidden_loss_log=hidden_loss_log,
posterior_log=posterior_log)
def run_realization(data, budget_idx):
# data = cloudpickle.loads(zlib.decompress(data))
# Set params
budget = data._budgets[budget_idx]
hyperparameters = data._hyperparams[budget_idx, :]
hyperpars_rs = data._hyperpars_rs[budget_idx]
constant_sqbc = data._constant_sqbc
constant_iwal = data._constant_iwal
constant_efal = data._constant_efal
num_methods = np.sum(np.asarray(data._which_methods))
# Initialize the Boolean instance logs and the weights c's for this realization.
idx_log_i = np.zeros((data._num_instances, num_methods))
idx_budget_log_i = np.zeros((data._num_instances, num_methods))
ct_log_i = np.zeros((data._num_instances, num_methods))
"""Set the streaming instances"""
# If the pool is floating, sample which instance will stream uniformly random
if data._pool_setting == 'floating':
# Set the streaming instances for this realization
streaming_data_instances = np.random.permutation(int(data._size_entire_pool)) # shuffle the entire pool
streaming_data_instances_real = streaming_data_instances[:data._num_instances] # select first n instance
else:
streaming_data_instances_fixed = np.random.permutation(
int(data._size_entire_pool)) # shuffle the entire pool
streaming_data_instances_fixed = streaming_data_instances_fixed[
:data._num_instances] # select first n instances
random_perm = np.random.permutation(data._num_instances) # shuffle the instances
streaming_data_instances_real = streaming_data_instances_fixed[random_perm] # update the streaming order
"""Run the model selection methods"""
# Input streaming data to the model selection methods
num_runing_models = 0
if 'mp' in data._methods:
# MODEL PICKER
tuning_par_mp = hyperparameters[num_runing_models]
(idx_mp, ct_mp, idx_budget_mp, hidden_loss_log_i, posterior_t_log_i) = model_picker(data, budget_idx, streaming_data_instances_real, tuning_par_mp, 'Variance')
# Logging
idx_log_i[:, num_runing_models] = idx_mp
ct_log_i[:, num_runing_models] = ct_mp
idx_budget_log_i[:, num_runing_models] = idx_budget_mp
num_runing_models += 1
if 'qbc' in data._methods:
# QUERY BY COMMITTEE
tuning_par_qbc = hyperparameters[num_runing_models]
(idx_qbc, ct_qbc, idx_budget_qbc) = query_by_committee(data, budget_idx, streaming_data_instances_real, tuning_par_qbc)
# Logging
idx_log_i[:, num_runing_models] = idx_qbc
ct_log_i[:, num_runing_models] = ct_qbc
idx_budget_log_i[:, num_runing_models] = idx_budget_qbc
num_runing_models += 1
if 'sqbc' in data._methods:
# STRUCTURAL QUERY BY COMMITTEE
tuning_par_qbc = hyperparameters[num_runing_models]
(idx_qbc, ct_qbc, idx_budget_qbc) = structural_query_by_committee(data, budget_idx, streaming_data_instances_real, tuning_par_qbc, constant_sqbc)
# Logging
idx_log_i[:, num_runing_models] = idx_qbc
ct_log_i[:, num_runing_models] = ct_qbc
idx_budget_log_i[:, num_runing_models] = idx_budget_qbc
num_runing_models += 1
num_runing_models_hyper = num_runing_models
if 'rs' in data._methods:
# RANDOM SAMPLING
tuning_par_rs = hyperpars_rs
(idx_rs, ct_rs, idx_budget_rs) = random_sampling_disagreement(data, budget_idx, streaming_data_instances_real, tuning_par_rs)
# Logging
idx_log_i[:, num_runing_models] = idx_rs
ct_log_i[:, num_runing_models] = ct_rs
idx_budget_log_i[:, num_runing_models] = idx_budget_rs
num_runing_models += 1
if 'iwal' in data._methods:
# IMPORTANCE WEIGHTED ACTIVE LEARNING
tuning_par_iwal = hyperparameters[num_runing_models_hyper]
(idx_iwal, ct_iwal, idx_budget_iwal) = importance_weighted_active_learning(data, budget_idx, streaming_data_instances_real, tuning_par_iwal, constant_iwal)
# Logging
idx_log_i[:, num_runing_models] = idx_iwal
ct_log_i[:, num_runing_models] = ct_iwal
idx_budget_log_i[:, num_runing_models] = idx_budget_iwal
num_runing_models += 1
num_runing_models_hyper += 1
if 'efal' in data._methods:
# EFFICIENT ACTIVE LEARNING
c0 = hyperparameters[num_runing_models_hyper] # threshold on the efal, increasing means use of more labelling budget
(idx_efal, ct_efal, idx_budget_efal) = efficient_active_learning(data, budget_idx, streaming_data_instances_real, c0, constant_efal)
# Logging
idx_log_i[:, num_runing_models] = idx_efal
ct_log_i[:, num_runing_models] = ct_efal
idx_budget_log_i[:, num_runing_models] = idx_budget_efal
if 'mp' in data._methods:
hidden_loss_log_i = hidden_loss_log_i
posterior_t_log_i = posterior_t_log_i
else:
hidden_loss_log_i = np.zeros(data._num_instances)
posterior_t_log_i = np.zeros((data._num_instances, data._num_models))
return idx_log_i, idx_budget_log_i, ct_log_i, streaming_data_instances_real, hidden_loss_log_i, posterior_t_log_i
| 9,226 | 43.574879 | 167 | py |
online-active-model-selection | online-active-model-selection-master/experiments/base/__init__.py | from . import *
| 16 | 7.5 | 15 | py |
online-active-model-selection | online-active-model-selection-master/resources/__init__.py | from . import *
| 16 | 7.5 | 15 | py |
Turkish-Word2Vec | Turkish-Word2Vec-master/trainCorpus.py | from __future__ import print_function
import logging
import sys
import multiprocessing
from gensim.models import Word2Vec
from gensim.models.word2vec import LineSentence
if __name__ == '__main__':
if len(sys.argv) < 3:
print("Please provide two arguments, first one is path to the revised corpus, second one is path to the output file for model.")
print("Example command: python3 word2vec.py wiki.tr.txt trmodel")
sys.exit()
inputFile = sys.argv[1]
outputFile = sys.argv[2]
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
model = Word2Vec(LineSentence(inputFile), size=400, window=5, min_count=5, workers=multiprocessing.cpu_count())
model.wv.save_word2vec_format(outputFile, binary=True)
| 770 | 29.84 | 130 | py |
Turkish-Word2Vec | Turkish-Word2Vec-master/preprocess.py | from __future__ import print_function
import os.path
import sys
from gensim.corpora import WikiCorpus
import xml.etree.ElementTree as etree
import warnings
import logging
import string
from gensim import utils
def tokenize_tr(content,token_min_len=2,token_max_len=50,lower=True):
if lower:
lowerMap = {ord(u'A'): u'a',ord(u'A'): u'a',ord(u'B'): u'b',ord(u'C'): u'c',ord(u'Ç'): u'ç',ord(u'D'): u'd',ord(u'E'): u'e',ord(u'F'): u'f',ord(u'G'): u'g',ord(u'Ğ'): u'ğ',ord(u'H'): u'h',ord(u'I'): u'ı',ord(u'İ'): u'i',ord(u'J'): u'j',ord(u'K'): u'k',ord(u'L'): u'l',ord(u'M'): u'm',ord(u'N'): u'n',ord(u'O'): u'o',ord(u'Ö'): u'ö',ord(u'P'): u'p',ord(u'R'): u'r',ord(u'S'): u's',ord(u'Ş'): u'ş',ord(u'T'): u't',ord(u'U'): u'u',ord(u'Ü'): u'ü',ord(u'V'): u'v',ord(u'Y'): u'y',ord(u'Z'): u'z'}
content = content.translate(lowerMap)
return [
utils.to_unicode(token) for token in utils.tokenize(content, lower=False, errors='ignore')
if token_min_len <= len(token) <= token_max_len and not token.startswith('_')
]
if __name__ == '__main__':
if len(sys.argv) < 3:
print("Please provide two arguments, first one is path to the wikipedia dump, second one is path to the output file")
print("Example command: python3 preprocess.py trwiki-20180101-pages-articles.xml.bz2 wiki.tr.txt")
sys.exit()
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
inputFile = sys.argv[1]
outputFile = sys.argv[2]
wiki = WikiCorpus(inputFile, lemmatize=False,tokenizer_func = tokenize_tr)
logging.info("Wikipedia dump is opened.")
output = open(outputFile,"w",encoding="utf-8")
logging.info("Output file is created.")
i = 0
for text in wiki.get_texts():
output.write(" ".join(text)+"\n")
i+=1
if (i % 10000 == 0):
logging.info("Saved " +str(i) + " articles.")
output.close()
| 1,844 | 37.4375 | 494 | py |
DelayResolvedRL | DelayResolvedRL-main/Gym(Stochastic)/env_stochasticdelay.py | import gym
# import gym_minigrid
import numpy as np
import random
from collections import deque
import copy
class Environment:
def __init__(self, seed, game_name, gamma, use_stochastic_delay, delay, min_delay):
"""Initialize Environment"""
self.game_name = game_name
self.env = gym.make(self.game_name)
self.env.seed(seed)
np.random.seed(seed)
random.seed(seed)
self.number_of_actions = self.env.action_space.n
self.delay = delay
if 'MiniGrid' in self.game_name:
self.state_space = self.env.observation_space['image']
else:
self.state_space = self.env.observation_space
self.use_stochastic_delay = use_stochastic_delay
self.no_action = 0
self.index = 0
self.use_stochastic_delay = use_stochastic_delay
if self.use_stochastic_delay:
self.min_delay = min_delay
self.delay = self.min_delay
self.max_delay = delay
else:
self.min_delay = delay
self.delay = delay
self.max_delay = delay
self.state_buffer = deque(maxlen=self.max_delay + 2)
self.reward_buffer = deque(maxlen=self.max_delay + 2)
self.done_buffer = deque(maxlen=self.max_delay + 2)
self.turn_limit = 200
self.state = self.reset()
self.update_delay()
self.train = True
self.step_count = 0
self.delayed_action = 0
self.gamma = gamma
def process_state(self, observation):
"""Pre-process state if required"""
if 'MiniGrid' in self.game_name:
return np.array(observation['image'], dtype='float32') # Using only image as state (7x7x3)
else:
return observation
def reset(self):
state = self.env.reset()
if 'MiniGrid' in self.game_name:
return self.process_state(state)
else:
return state
def update_delay(self):
if self.use_stochastic_delay:
self.delay = random.randint(self.min_delay, self.max_delay)
else:
self.delay = self.max_delay
def step(self, state, action):
if self.max_delay != 0:
self.train = True
if True not in self.done_buffer:
next_state, rewards, done, _ = self.env.step(action)
else:
next_state = state
rewards = 0
done = True
if len(self.state_buffer) < self.delay: # delay is greater than the number of unobserved states
self.state_buffer.append(next_state)
self.reward_buffer.append(rewards)
self.done_buffer.append(done)
self.train = False
return state, 0, False
elif len(self.state_buffer) > self.delay: # delay is less than the number of unobserved states
self.state_buffer.append(next_state)
self.reward_buffer.append(rewards)
self.done_buffer.append(done)
rewards = 0
no_observed_states = len(self.state_buffer) - self.delay
for i in range(no_observed_states):
next_state = self.state_buffer.popleft()
gamma = np.power(self.gamma, no_observed_states-(i+1))
rewards += gamma * self.reward_buffer.popleft() # add all unobserved rewards
done = self.done_buffer.popleft()
self.update_delay()
if done:
self.state_buffer.clear()
self.reward_buffer.clear()
self.done_buffer.clear()
return next_state, rewards, done
else:
self.state_buffer.append(next_state)
self.reward_buffer.append(rewards)
self.done_buffer.append(done)
delayed_next_state = self.state_buffer.popleft()
delayed_rewards = self.reward_buffer.popleft()
delayed_done = self.done_buffer.popleft()
self.update_delay()
if delayed_done:
self.state_buffer.clear()
self.reward_buffer.clear()
self.done_buffer.clear()
return delayed_next_state, delayed_rewards, delayed_done
else:
next_state, rewards, done, _ = self.env.step(action)
return next_state, rewards, done
def render(self):
return self.env.render()
def close(self):
return self.env.close()
| 4,618 | 37.491667 | 108 | py |
DelayResolvedRL | DelayResolvedRL-main/Gym(Stochastic)/agent.py | import tensorflow as tf
import numpy as np
import random
import copy
from statistics import mean
from collections import deque
GPUs = tf.config.experimental.list_physical_devices('GPU')
if GPUs:
try:
for gpu in GPUs:
tf.config.experimental.set_memory_growth(gpu, True)
except RuntimeError as e:
print(e)
def to_onehot(size, value):
"""1 hot encoding for observed state"""
return np.eye(size)[value]
class Model(tf.keras.Model):
"""DQN Model"""
def __init__(self, num_states, hidden_units, num_actions, alg, use_stochastic_delay, max_dimension):
super(Model, self).__init__()
if alg == 'IS':
if use_stochastic_delay:
self.input_layer = tf.keras.layers.InputLayer(input_shape=(num_states + 1 + max_dimension,))
else:
self.input_layer = tf.keras.layers.InputLayer(input_shape=(num_states + max_dimension,))
else:
self.input_layer = tf.keras.layers.InputLayer(input_shape=(num_states,))
self.hidden_layers = []
for i in hidden_units:
self.hidden_layers.append(tf.keras.layers.Dense(
i, activation='tanh', kernel_initializer='RandomNormal'))
self.output_layer = tf.keras.layers.Dense(
num_actions, activation='linear', kernel_initializer='RandomNormal')
@tf.function
def call(self, inputs):
z = self.input_layer(inputs)
for layer in self.hidden_layers:
z = layer(z)
output = self.output_layer(z)
return output
class DQN:
def __init__(self, num_states, num_actions, model_params, alg_params):
np.random.seed(alg_params['seed'])
tf.random.set_seed(alg_params['seed'])
random.seed(alg_params['seed'])
self.num_actions = num_actions
self.alg = alg_params['algorithm']
self.batch_size = alg_params['batch_size']
self.optimizer = tf.optimizers.Adam(alg_params['learning_rate'])
self.use_stochastic_delay = alg_params['use_stochastic_delay']
self.max_dimension = model_params['max_dimension']
hidden_units = model_params['hidden_units']
self.delay = alg_params['delay']
self.gamma = alg_params['gamma']
self.model = Model(num_states, hidden_units, num_actions, self.use_stochastic_delay, self.max_dimension,
self.alg)
self.experience = {'s': [], 'a': [], 'r': [], 's2': [], 'done': []}
self.max_experiences = model_params['max_buffer_size']
self.min_experiences = model_params['min_buffer_size']
if self.alg != 'normal':
self.action_buffer = deque(maxlen=self.max_dimension + 1)
self.action_buffer_padded = deque(maxlen=self.max_dimension + 1)
def predict(self, inputs):
return self.model(np.atleast_2d(inputs.astype('float32')))
def fill_up_buffer(self):
self.action_buffer_padded.clear()
for _ in range(self.max_dimension):
self.action_buffer_padded.append(0)
def buffer_padding(self):
current_length = len(self.action_buffer)
self.action_buffer_padded = copy.deepcopy(self.action_buffer)
for _ in range(0, self.max_dimension - current_length):
self.action_buffer_padded.append(0)
def train(self, TargetNet):
if len(self.experience['s']) < self.min_experiences:
return 0
ids = np.random.randint(low=0, high=len(self.experience['s']), size=self.batch_size)
states = np.asarray([self.experience['s'][i] for i in ids])
actions = np.asarray([self.experience['a'][i] for i in ids])
rewards = np.asarray([self.experience['r'][i] for i in ids])
states_next = np.asarray([self.experience['s2'][i] for i in ids])
dones = np.asarray([self.experience['done'][i] for i in ids])
value_next = np.max(TargetNet.predict(states_next), axis=1)
actual_values = np.where(dones, rewards, rewards + self.gamma * value_next)
with tf.GradientTape() as tape:
selected_action_values = tf.math.reduce_sum(
self.predict(states) * tf.one_hot(actions, self.num_actions), axis=1)
loss = tf.math.reduce_mean(tf.square(actual_values - selected_action_values))
variables = self.model.trainable_variables
gradients = tape.gradient(loss, variables)
self.optimizer.apply_gradients(zip(gradients, variables))
return loss
def get_action(self, states, epsilon):
if np.random.random() < epsilon:
return np.random.choice(self.num_actions)
else:
return np.argmax(self.predict(np.atleast_2d(states))[0])
def add_experience(self, exp):
if len(self.experience['s']) >= self.max_experiences:
for key in self.experience.keys():
self.experience[key].pop(0)
for key, value in exp.items():
self.experience[key].append(value)
def copy_weights(self, TrainNet):
variables1 = self.model.trainable_variables
variables2 = TrainNet.model.trainable_variables
for v1, v2 in zip(variables1, variables2):
v1.assign(v2.numpy())
def play_game(global_step, env, TrainNet, TargetNet, epsilon, copy_step):
rewards = 0
episode_step = 0
last_state_observed = 0
done = False
observations = env.reset()
observations_original = observations
if env.game_name.startswith('Frozen'):
observations = to_onehot(env.state_space.n, observations)
if TrainNet.alg != 'normal':
TrainNet.fill_up_buffer()
losses = list()
clear = False
while not done:
delay = env.delay
len_buffer = len(env.state_buffer)
if TrainNet.alg == 'normal':
action = TrainNet.get_action(observations, epsilon)
prev_observations = observations
observations, reward, done = env.step(observations_original, action)
observations_original = observations
if env.game_name.startswith('Frozen'):
observations = to_onehot(env.state_space.n, observations)
else:
if episode_step == 0:
if env.use_stochastic_delay:
last_state_observed = (episode_step - env.turn_limit / 2) / env.turn_limit
action_state = np.append(last_state_observed, TrainNet.action_buffer_padded)
information_state = np.append(observations, action_state)
# information_state = np.append(observations, TrainNet.action_buffer_padded)
else:
information_state = np.append(observations, TrainNet.action_buffer_padded)
if TrainNet.alg == 'IS':
action = TrainNet.get_action(information_state, epsilon)
else:
action = TrainNet.get_action(observations, epsilon)
prev_observations = observations
prev_information_state = information_state
observations, reward, done = env.step(observations_original, action)
observations_original = observations
if env.game_name.startswith('Frozen'):
observations = to_onehot(env.state_space.n, observations)
episode_step += 1
if env.train:
last_state_observed = (episode_step - 1 - env.turn_limit / 2) / env.turn_limit
TrainNet.action_buffer.append(action + 1)
for i in range(len_buffer + 1 - delay):
TrainNet.action_buffer.popleft() - 1
TrainNet.buffer_padding()
else:
# delayed_action = random.randint(0, TrainNet.num_actions)
TrainNet.action_buffer.append(action + 1)
TrainNet.buffer_padding()
if env.delay == 0:
delayed_action = action
else:
if not TrainNet.action_buffer:
delayed_action = random.randint(0, TrainNet.num_actions)
else:
delayed_action = TrainNet.action_buffer[0]
if delay == 0:
delayed_action = action
if len(TrainNet.action_buffer) == TrainNet.max_dimension + 1:
TrainNet.action_buffer.clear()
TrainNet.buffer_padding()
observations = env.state_buffer.pop()
env.state_buffer.clear()
reward = np.sum(env.reward_buffer)
done = env.done_buffer.pop()
env.done_buffer.clear()
env.reward_buffer.clear()
clear = True
if env.use_stochastic_delay:
action_state = np.append(last_state_observed, TrainNet.action_buffer_padded)
information_state = np.append(observations, action_state)
# information_state = np.append(observations, TrainNet.action_buffer_padded)
else:
information_state = np.append(observations, TrainNet.action_buffer_padded)
rewards += reward
if done:
episode_step = 0
env.reset()
if TrainNet.alg != 'normal':
TrainNet.action_buffer.clear()
TrainNet.buffer_padding()
global_step += 1
if TrainNet.alg == 'normal':
exp = {'s': prev_observations, 'a': action, 'r': reward, 's2': observations, 'done': done}
if TrainNet.alg == 'delay':
exp = {'s': prev_observations, 'a': delayed_action, 'r': reward, 's2': observations, 'done': done}
if TrainNet.alg == 'IS':
exp = {'s': prev_information_state, 'a': action, 'r': reward, 's2': information_state, 'done': done}
TrainNet.add_experience(exp)
loss = TrainNet.train(TargetNet)
if isinstance(loss, int):
losses.append(loss)
else:
losses.append(loss.numpy())
if global_step % copy_step == 0:
TargetNet.copy_weights(TrainNet)
return global_step, rewards, mean(losses)
def test(env, TrainNet, logs, num_episodes):
for _ in range(num_episodes):
observation = env.reset()
rewards = 0
steps = 0
done = False
while not done:
action = TrainNet.get_action(observation, 0)
observation, reward, done, _ = env.step(action)
steps += 1
rewards += reward
with open(logs['log_file_name'], "a") as f:
print("Testing steps: {} rewards :{} ".format(steps, rewards), file=f)
print("Testing steps: {} rewards :{} ".format(steps, rewards))
def train_agent(env, num_frames, model_params, algorithm_params, logs, verbose):
num_actions = env.number_of_actions
try:
state_space = len(env.state_space.sample())
except TypeError:
state_space = env.state_space.n
copy_step = model_params['copy_step']
TrainNet = DQN(state_space, num_actions, model_params, algorithm_params)
TargetNet = DQN(state_space, num_actions, model_params, algorithm_params)
# N = num_episodes
total_rewards_list = []
total_losses_list = []
epsilon_start = algorithm_params['start_epsilon']
decay = algorithm_params['epsilon_decay']
min_epsilon = algorithm_params['stop_epsilon']
global_step = 1
n = 0
while True:
epsilon = min_epsilon + (epsilon_start - min_epsilon) * np.exp(-decay * global_step)
global_step, total_reward, losses = play_game(global_step, env, TrainNet, TargetNet, epsilon, copy_step)
total_rewards_list.append(total_reward)
total_losses_list.append(losses)
total_rewards = np.array(total_rewards_list)
total_losses = np.array(total_losses_list)
avg_rewards = total_rewards[max(0, n - 100):(n + 1)].mean()
avg_losses = total_losses[max(0, n - 100):(n + 1)].mean()
if n % logs['log_interval'] == 0:
if verbose:
with open(logs['log_file_name'], "a") as f:
print("episode:{}, eps:{:.3f}, avg reward (last 100):{:.2f}, avg loss:{:.2f}"
.format(n, epsilon, avg_rewards, avg_losses), file=f)
if not verbose:
print("episode:{}, eps:{:.3f}, avg reward (last 100):{:.2f}"
.format(n, epsilon, avg_rewards))
# test(env, TrainNet, logs, 100)
n += 1
if global_step > num_frames:
break
env.close()
return total_rewards, total_losses
| 12,554 | 41.849829 | 112 | py |
DelayResolvedRL | DelayResolvedRL-main/Gym(Stochastic)/plot.py | import numpy as np
import matplotlib.pyplot as plt
# import matplotlib.ticker as mtick
import os
import matplotlib
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['ps.fonttype'] = 42
matplotlib.rcParams.update({'font.size': 13})
# ver = '6.0'
def running_mean(x, n):
cumulative_sum = np.cumsum(np.insert(x, 0, 0))
return (cumulative_sum[n:] - cumulative_sum[:-n]) / float(n)
def get_file(index, ver):
save_dir = os.getcwd() + '/Results/Results-{}/'.format(ver) # Save Directory
files_list = os.listdir(save_dir)
if ver == '6.12':
with open(save_dir + files_list[index] + '/log.txt', 'r') as f:
env_name = f.readline().split(',')[0].split(':')[1] # Gets the environment name
else:
with open(save_dir + files_list[index] + '/log_sd.txt', 'r') as f:
env_name = f.readline().split(',')[0].split(':')[1] # Gets the environment name
file_name = save_dir + files_list[index] # Final files directory
return env_name, file_name
def plot_reward(index, runs, delays, n):
env_name, file_name = get_file(index)
plt.figure()
if index == -1:
plt.title('DQN')
save_file = os.getcwd() + '/Plots/v{}/rewards_normal.pdf'.format(ver)
if index == -2:
plt.title('DQN+IS')
save_file = os.getcwd() + '/Plots/v{}/rewards_IS.pdf'.format(ver)
if index == -3:
plt.title('delay-DQN')
save_file = os.getcwd() + '/Plots/v{}/rewards_delay.pdf'.format(ver)
for delay in delays:
episodes = 10000
X_axis = np.arange(episodes)
rewards_plot = np.zeros([runs, episodes])
for run in range(runs):
if delay == 'stochastic':
rewards = np.load(file_name[index] + '/rewards_delay_20_sd_run_{}.npy'.format(run), allow_pickle=True)[
()]
else:
rewards = np.load(file_name + '/rewards_delay_{}_run_{}.npy'.format(delay, run), allow_pickle=True)[()]
# plt.plot(running_mean(rewards, n), alpha=0.25, linestyle='-.', color='blue')
rewards_plot[run] = rewards[0:episodes]
rewards_mean = np.mean(rewards_plot, axis=0)
rewards_deviation = np.std(rewards_plot, axis=0) / np.sqrt(runs)
plt.xlabel('Episodes')
plt.ylabel('Rewards')
# plt.ylim(0, 210)
plt.plot(running_mean(rewards_mean, n), label='delay={}'.format(delay))
# plt.fill_between(X_axis, rewards_mean+rewards_deviation, rewards_mean-rewards_deviation, alpha=1.5)
plt.legend(title='Delays', bbox_to_anchor=(1.05, 1), loc='upper left')
plt.savefig(save_file, bbox_inches="tight")
plt.savefig(file_name + '/rewards.pdf', bbox_inches="tight")
plt.tight_layout()
plt.show()
def compare_learning_curves(indices, label, ver, runs, delay, n=1000):
env_name = []
file_name = []
for index in indices:
env, file = get_file(index, ver)
env_name.append(env)
file_name.append(file)
# if not env_name.count(env_name[0]) == len(env_name): # Check if all the environments are same
# raise Exception('Environments are different')
plt.figure()
plt.title(env_name[0])
plt.xlabel('Episodes')
plt.ylabel('Rewards')
colors = ['blue', 'tab:orange', 'green']
for index in range(len(indices)):
# episodes = 10000
# X_axis = np.arange(episodes)
# rewards_plot = np.zeros([runs, episodes])
for run in range(runs):
if delay == 'stochastic':
rewards = \
np.load(file_name[index] + '/rewards_delay_10_sd_run_{}.npy'.format(run), allow_pickle=True)[()]
# rewards_plot[run] = np.mean(rewards)
# print('Algorithm: {} Delay: Stochastic Run: {} Reward: {}'.format(index, run, rewards_plot[run]))
else:
rewards = \
np.load(file_name[index] + '/rewards_delay_{}_run_{}.npy'.format(delay, run), allow_pickle=True)[()]
# rewards_plot[run] = np.mean(rewards)
# print('Algorithm: {} Delay: {} Run: {} Reward: {}'.format(index, delay, run, rewards_plot[run]))
plt.plot(running_mean(rewards, n), label=label[index] if run == 0 else '', color=colors[index], alpha=0.5)
plt.legend(title='Algorithms', bbox_to_anchor=(1.05, 1), loc='upper left')
save_dir = os.getcwd() + '/Plots/v{}'.format(ver)
try:
plt.savefig(save_dir + '/rewards_curves.pdf', bbox_inches="tight")
except FileNotFoundError:
os.makedirs(os.getcwd() + '/Plots/v{}'.format(ver))
plt.savefig(save_dir + '/rewards_curves.pdf', bbox_inches="tight")
plt.tight_layout()
plt.show()
def plot_losses(index, runs, n):
env_name, file_name = get_file(index)
plt.figure()
plt.title(env_name)
losses = np.load(file_name + '/loss.npy', allow_pickle=True)[()]
episodes = len(losses[0])
X_axis = np.arange(episodes)
losses_plot = np.zeros([runs, episodes])
for run in range(runs):
losses_plot[run] = losses[run]
losses_mean = np.mean(losses_plot, axis=0)
losses_deviation = np.std(losses_plot, axis=0) / np.sqrt(runs)
plt.xlabel('Episodes')
plt.ylabel('Losses')
plt.plot(running_mean(losses_mean, n))
plt.savefig(file_name + '/losses.pdf')
plt.show()
def compare_algorithms(indices, label, runs, delays, ver, colors):
env_name = []
file_name = []
file_name_sd = []
for index in indices:
env, file = get_file(index, ver)
env_name.append(env)
file_name.append(file)
for index in indices:
_, file_sd = get_file(index, ver)
file_name_sd.append(file_sd)
# if not env_name.count(env_name[0]) == len(env_name): # Check if all the environments are same
# raise Exception('Environments are different')
plt.figure()
plt.title(env_name[0], fontsize=20)
for index in range(len(indices)):
count = 0
X_axis = list(map(str, delays))
r_mean = np.zeros(len(delays))
r_std = np.zeros(len(delays))
episodes = 10000
# rewards_plot = np.zeros([runs, episodes])
rewards_plot = np.zeros(runs)
for delay in delays:
for run in range(runs):
if delay == 'stochastic':
rewards = \
np.load(file_name_sd[index] + '/rewards_delay_10_sd_run_{}.npy'.format(run), allow_pickle=True)[
()]
rewards_plot[run] = np.mean(rewards)
print('Algorithm: {} Delay: Stochastic Run: {} Reward: {}'.format(index, run, rewards_plot[run]))
else:
rewards = \
np.load(file_name[index] + '/rewards_delay_{}_run_{}.npy'.format(delay, run), allow_pickle=True)[()]
rewards_plot[run] = np.mean(rewards)
print('Algorithm: {} Delay: {} Run: {} Reward: {}'.format(index, delay, run, rewards_plot[run]))
rewards_mean = rewards_plot
# rewards_mean = np.mean(rewards_plot, axis=0)
rewards_deviation = np.std(rewards_plot, axis=0) / np.sqrt(runs)
r_mean[count] = np.mean(rewards_mean)
r_std[count] = np.mean(rewards_deviation)
count += 1
if label[index] == 'DQN+IS':
alg = 'DRDQN'
else:
alg = label[index]
# plt.plot(X_axis, r_mean, marker='o', label=alg, color=colors[label[index]])
plt.errorbar(X_axis, r_mean, marker='o', yerr=r_std, label=alg, color=colors[label[index]], uplims=True, lolims=True)
plt.legend()
plt.xlabel('Delays', fontsize=16)
plt.xticks(fontsize=16)
plt.ylabel('Rewards', fontsize=16)
plt.yticks(fontsize=16)
save_dir = os.getcwd() + '/CartPole/Plots/v{}'.format(ver)
try:
plt.savefig(save_dir + '/rewards_comparison.pdf', bbox_inches="tight")
except FileNotFoundError:
os.makedirs(os.getcwd() + '/CartPole/Plots/v{}'.format(ver))
plt.savefig(save_dir + '/rewards_comparison.pdf', bbox_inches="tight")
plt.tight_layout()
plt.show()
def plot_time(indices, labels, delays, ver):
plt.figure()
env_name = []
file_name = []
file_name_sd = []
for index in indices:
env, file = get_file(index, ver)
env_name.append(env)
file_name.append(file)
for index in indices:
_, file_sd = get_file(index, ver)
file_name_sd.append(file_sd)
for index in range(len(indices)):
time = np.zeros([len(delays)])
X_axis = list(map(str, delays))
for delay in range(len(delays)):
if delays[delay] == 'stochastic':
time[delay] = np.mean(np.load(file_name_sd[index] + '/time_delay_10_sd.npy'))
else:
time[delay] = np.mean(np.load(file_name[index] + '/time_delay_{}.npy'.format(delays[delay])))
plt.plot(X_axis, time / 3600, label=labels[index], marker='o')
plt.title(env_name[0])
plt.xlabel('Delays')
plt.ylabel('Average Hours per run')
plt.legend()
save_dir = os.getcwd() + '/CartPole/Plots/v{}/'.format(ver)
try:
plt.savefig(save_dir + '/time_comparison.pdf', bbox_inches="tight")
except FileNotFoundError:
os.makedirs(os.getcwd() + '/CartPole/Plots/v{}'.format(ver))
plt.savefig(save_dir + '/time_comparison.pdf', bbox_inches="tight")
plt.tight_layout()
plt.show()
if __name__ == "__main__":
runs = 10
delays = [2, 4, 6, 8, 10, 'stochastic']
ver = '6.12'
compare_indices = [-2, -1]#, -3]
labels = ['DQN+IS', 'DQN']#, 'delay-DQN']
colors = {'DQN+IS': u'#1f77b4', 'Delay-DQN': 'red', 'DQN': u'#2ca02c'}
compare_algorithms(compare_indices, labels, runs, delays, ver, colors)
plot_time(compare_indices, labels, delays, ver)
delay = 'stochastic'
compare_learning_curves(compare_indices, labels, ver, runs, delay)
| 9,961 | 40.508333 | 125 | py |
DelayResolvedRL | DelayResolvedRL-main/Gym(Stochastic)/train.py | import datetime
import os
import argparse
import time
os.environ['TF_CPP_MIN_LOG_LEVEL'] = "3" # Suppress Tensorflow Messages
os.environ["CUDA_VISIBLE_DEVICES"] = "-1" # Set CPU/GPU
import numpy as np
from agent import *
from env_stochasticdelay import Environment
parser = argparse.ArgumentParser()
parser.add_argument("--algorithm", help="algorithm")
parser.add_argument("--stochastic", help="use stochastic delays")
parser.add_argument("--delay", help="environment delay")
parser.add_argument("--verbose", help="log files")
args = parser.parse_args()
algorithm = args.algorithm
delay = int(args.delay)
if args.verbose == 'True':
verbose = True
else:
verbose = False
if args.stochastic == 'True':
use_stochastic_delay = True
else:
use_stochastic_delay = False
'''Log directory'''
# verbose = False
if verbose:
save_dir = os.getcwd() + '/Results/Results-1.0/Results-'+algorithm
if not os.path.exists(save_dir):
os.makedirs(save_dir)
if use_stochastic_delay:
log_file_name = save_dir + '/log_sd.txt'
else:
log_file_name = save_dir + '/log.txt'
reward_file_name = save_dir + '/rewards'
loss_file_name = save_dir + '/loss'
else:
log_file_name = ""
'''Environment Parameters'''
game = 'CartPole-v0'
seed = 0 # Seed for Env, TF, Numpy
num_frames = 1e6 # Million Frames
num_episodes = 10000
logs = {'log_interval': 100, # Number of Episodes after which to print output/save batch output
'log_file_name': log_file_name
}
'''Parameters of Algorithm'''
algorithm_params = {'algorithm': algorithm, # normal, delay, IS
'batch_size': 32,
'gamma': 0.99,
'learning_rate': 1e-3,
'start_epsilon': 1.0,
'stop_epsilon': 1e-3,
'epsilon_decay': 1e-4,
'use_stochastic_delay': use_stochastic_delay,
'delay': delay,
'min_delay': 0,
'seed': seed
}
model_params = {'hidden_units': [200], # model architecture
'max_buffer_size': 1000,
'min_buffer_size': 100,
'copy_step': 25, # 1 means no target network
'max_dimension': 10
}
'''Runs'''
runs = 10
rewards = {}
losses = {}
time_taken = np.zeros(runs)
model_params['max_dimension'] = min(delay, model_params['max_dimension'])
if verbose:
if use_stochastic_delay:
time_file_name = save_dir + '/time_delay_{}_sd'.format(delay)
if algorithm == 'IS':
delay_avg = (delay + algorithm_params['min_delay'])/2
# algorithm_params['gamma'] = np.power(algorithm_params['gamma'], 1/delay_avg)
else:
time_file_name = save_dir + '/time_delay_{}'.format(delay)
for run in range(runs):
if verbose:
if use_stochastic_delay:
reward_file_name_cur = reward_file_name + '_delay_{}_sd_run_{}'.format(delay, run)
loss_file_name_cur = loss_file_name + '_delay_{}_sd_run_{}'.format(delay, run)
else:
reward_file_name_cur = reward_file_name + '_delay_{}_run_{}'.format(delay, run)
loss_file_name_cur = loss_file_name + '_delay_{}_run_{}'.format(delay, run)
'''Set seed'''
seed = run
algorithm_params['seed'] = seed
algorithm_params['delay'] = delay
'''Write Parameters to log_file'''
if verbose:
with open(log_file_name, "a") as f:
f.write('Environment: {}, Frames: {}\n'.format(game, num_frames))
f.write('Algorithm Parameters: {} \n'.format(algorithm_params))
f.write('Model Parameters: {} \n'.format(model_params))
f.write('Run: {} \n'.format(run))
f.flush()
'''Initialize Environment & Model'''
env = Environment(seed, game, algorithm_params['gamma'],
algorithm_params['use_stochastic_delay'], algorithm_params['delay'], algorithm_params['min_delay'])
'''Train the Agent'''
start_time = time.time()
reward_history, loss_history = train_agent(env, num_frames, model_params, algorithm_params, logs, verbose)
end_time = time.time()
time_taken[run] = end_time - start_time
if verbose:
with open(log_file_name, "a") as f:
f.write('Time taken: {}\n'.format(time_taken))
f.flush()
'''Store the results'''
rewards = reward_history
losses = loss_history
'''Save Rewards and Losses'''
if verbose:
np.save(reward_file_name_cur, rewards)
np.save(loss_file_name_cur, losses)
np.save(time_file_name, time_taken)
| 4,643 | 35.28125 | 121 | py |
DelayResolvedRL | DelayResolvedRL-main/Gym(Stochastic)/env.py | import gym
# import gym_minigrid
import numpy as np
from collections import deque
class Environment:
def __init__(self, game_name, delay, seed):
"""Initialize Environment"""
self.game_name = game_name
self.env = gym.make(self.game_name)
self.env.seed(seed)
np.random.seed(seed)
self.number_of_actions = self.env.action_space.n
self.delay = delay
if 'MiniGrid' in self.game_name:
self.state_space = self.env.observation_space['image']
else:
self.state_space = self.env.observation_space
self.actions_in_buffer = deque(maxlen=self.delay)
self.fill_up_buffer()
self.delayed_action = 0
def process_state(self, observation):
"""Pre-process state if required"""
if 'MiniGrid' in self.game_name:
return np.array(observation['image'], dtype='float32') # Using only image as state (7x7x3)
else:
return observation
def fill_up_buffer(self):
for _ in range(self.delay):
action = np.random.choice(self.number_of_actions)
self.actions_in_buffer.append(action)
def reset(self):
state = self.env.reset()
self.fill_up_buffer()
if 'MiniGrid' in self.game_name:
return self.process_state(state)
else:
return state
def step(self, action):
if self.delay != 0:
chosen_action = action
self.delayed_action = self.actions_in_buffer.popleft()
self.actions_in_buffer.append(chosen_action)
else:
self.delayed_action = action
if 'MiniGrid' in self.game_name:
next_state, reward, done, info = self.env.step(self.delayed_action)
return self.process_state(next_state), reward, done, info
else:
return self.env.step(self.delayed_action)
def render(self):
return self.env.render()
def close(self):
return self.env.close()
| 2,014 | 31.5 | 103 | py |
DelayResolvedRL | DelayResolvedRL-main/W-Maze/DQN/env_stochasticdelay.py | import numpy as np
from collections import deque
import copy
import random
class Environment:
"""Initialize Environment"""
def __init__(self, seed, gamma, use_stochastic_delay, delay, min_delay):
np.random.seed(seed)
random.seed(seed)
self.call = 0
self.breadth = 7
self.length = 11
self.state_space = np.empty([self.breadth, self.length], dtype='<U1')
self.state_space[:] = 'E'
self.state_space[0] = 'X'
self.state_space[1:4, self.length // 2 - 2] = 'X'
self.state_space[1:4, self.length // 2 + 2] = 'X'
self.state_space[0, self.length // 2 - 1:self.length // 2 + 2] = 'G'
self.state_space[self.breadth - 1, 0] = 'P'
self.actions = [1, 2, 3, 4] # UP, DOWN, LEFT, RIGHT
self.no_action = 0
self.index = 0
self.number_of_actions = len(self.actions)
self.turn_limit = 300
self.min_delay = min_delay
self.delay = self.min_delay
self.max_delay = delay
self.use_stochastic_delay = use_stochastic_delay
self.state_buffer = deque(maxlen=self.max_delay+2)
self.reward_buffer = deque(maxlen=self.max_delay+2)
self.done_buffer = deque(maxlen=self.max_delay+2)
self.state = self.reset()
self.update_delay()
self.train = True
self.step_count = 0
self.delayed_action = 0
self.gamma = gamma
def reset(self):
x = random.randint(0, self.breadth - 1)
y = 0
starting_state = [x, y]
self.state_space[x, y] = 'P'
self.step_count = 0
return starting_state
def update_delay(self):
if self.use_stochastic_delay:
self.delay = random.randint(self.min_delay, self.max_delay)
else:
self.delay = self.max_delay
def step(self, state, action):
if self.max_delay != 0:
self.train = True
if True not in self.done_buffer:
next_state, rewards, done = self.env_step(action)
else:
next_state = state
rewards = 0
done = True
if len(self.state_buffer) < self.delay: # delay is greater than the number of unobserved states
self.state_buffer.append(next_state)
self.reward_buffer.append(rewards)
self.done_buffer.append(done)
self.train = False
return state, 0, False
elif len(self.state_buffer) > self.delay: # delay is smaller than the number of unobserved states
self.state_buffer.append(next_state)
self.reward_buffer.append(rewards)
self.done_buffer.append(done)
rewards = 0
no_observed_states = len(self.state_buffer) - self.delay
for i in range(no_observed_states):
next_state = self.state_buffer.popleft()
gamma = np.power(self.gamma, no_observed_states - (i + 1))
rewards += gamma*self.reward_buffer.popleft() # add all unobserved rewards
done = self.done_buffer.popleft()
self.update_delay()
if done:
self.state_buffer.clear()
self.reward_buffer.clear()
self.done_buffer.clear()
return next_state, rewards, done
else:
self.state_buffer.append(next_state)
self.reward_buffer.append(rewards)
self.done_buffer.append(done)
delayed_next_state = self.state_buffer.popleft()
delayed_rewards = self.reward_buffer.popleft()
delayed_done = self.done_buffer.popleft()
self.update_delay()
if delayed_done:
self.state_buffer.clear()
self.reward_buffer.clear()
self.done_buffer.clear()
return delayed_next_state, delayed_rewards, delayed_done
else:
return self.env_step(action)
def env_step(self, action):
action += 1 # (0,3) -> (1,4)
self.step_count += 1
done = False
player_position = self.state
reward = -1
"""UP"""
if action == 1:
if player_position[0] - 1 >= 0 and self.state_space[player_position[0] - 1, player_position[1]] != 'X':
self.state_space[player_position[0], player_position[1]] = 'E'
if self.state_space[player_position[0] - 1, player_position[1]] == 'G':
done = True
self.state = self.reset()
reward = 11
else:
self.state = [player_position[0] - 1, player_position[1]]
self.state_space[player_position[0] - 1, player_position[1]] = 'P'
"""DOWN"""
if action == 2:
if player_position[0] + 1 < self.breadth \
and self.state_space[player_position[0] + 1, player_position[1]] != 'X':
self.state_space[player_position[0], player_position[1]] = 'E'
if self.state_space[player_position[0] + 1, player_position[1]] == 'G':
done = True
self.state = self.reset()
reward = 11
else:
self.state = [player_position[0] + 1, player_position[1]]
self.state_space[player_position[0] + 1, player_position[1]] = 'P'
"""LEFT"""
if action == 3:
if player_position[1] - 1 >= 0 and self.state_space[player_position[0], player_position[1] - 1] != 'X':
self.state_space[player_position[0], player_position[1]] = 'E'
if self.state_space[player_position[0], player_position[1] - 1] == 'G':
done = True
self.state = self.reset()
reward = 11
else:
self.state = [player_position[0], player_position[1] - 1]
self.state_space[player_position[0], player_position[1] - 1] = 'P'
"""RIGHT"""
if action == 4:
if player_position[1] + 1 < self.length \
and self.state_space[player_position[0], player_position[1] + 1] != 'X':
self.state_space[player_position[0], player_position[1]] = 'E'
if self.state_space[player_position[0], player_position[1] + 1] == 'G':
done = True
self.state = self.reset()
reward = 11
else:
self.state = [player_position[0], player_position[1] + 1]
self.state_space[player_position[0], player_position[1] + 1] = 'P'
if self.step_count == self.turn_limit:
done = True
return self.state, reward, done
| 6,966 | 43.094937 | 115 | py |
DelayResolvedRL | DelayResolvedRL-main/W-Maze/DQN/agent.py | import tensorflow as tf
import numpy as np
import random
import copy
from statistics import mean
from collections import deque
GPUs = tf.config.experimental.list_physical_devices('GPU')
if GPUs:
try:
for gpu in GPUs:
tf.config.experimental.set_memory_growth(gpu, True)
except RuntimeError as e:
print(e)
'''DQN Model'''
class Model(tf.keras.Model):
def __init__(self, state_space_shape, hidden_units, num_actions, use_stochastic_delay, max_dimension, alg):
super(Model, self).__init__()
input_shape = state_space_shape.ndim
if alg == 'IS':
if use_stochastic_delay:
self.input_layer = tf.keras.layers.InputLayer(input_shape=(input_shape + 1 + max_dimension,))
else:
self.input_layer = tf.keras.layers.InputLayer(input_shape=(input_shape + max_dimension,))
else:
self.input_layer = tf.keras.layers.InputLayer(input_shape=(input_shape,))
self.hidden_layers = []
for i in hidden_units:
self.hidden_layers.append(tf.keras.layers.Dense(
i, activation='tanh', kernel_initializer='RandomNormal'))
self.output_layer = tf.keras.layers.Dense(
num_actions, activation='linear', kernel_initializer='RandomNormal')
@tf.function
def call(self, inputs):
z = self.input_layer(inputs)
for layer in self.hidden_layers:
z = layer(z)
output = self.output_layer(z)
return output
class DQN:
def __init__(self, state_space_shape, num_actions, model_params, alg_params):
self.num_actions = num_actions
self.actions = np.linspace(1, self.num_actions, num=self.num_actions, dtype=np.int32)
self.alg = alg_params['algorithm']
self.batch_size = alg_params['batch_size']
self.optimizer = tf.optimizers.Adam(alg_params['learning_rate'])
self.delay = alg_params['delay']
self.gamma = alg_params['gamma']
self.use_stochastic_delay = alg_params['use_stochastic_delay']
self.max_dimension = model_params['max_dimension']
hidden_units = model_params['hidden_units']
self.model = Model(state_space_shape, hidden_units, num_actions, self.use_stochastic_delay, self.max_dimension, self.alg)
self.experience = {'s': [], 'a': [], 'r': [], 's2': [], 'done': []}
self.max_experiences = model_params['max_buffer_size']
self.min_experiences = model_params['min_buffer_size']
if self.alg != 'normal':
self.action_buffer = deque(maxlen=self.max_dimension+1)
self.action_buffer_padded = deque(maxlen=self.max_dimension+1)
def predict(self, inputs):
return self.model(np.atleast_2d(inputs.astype('float32')))
def fill_up_buffer(self):
self.action_buffer_padded.clear()
for _ in range(self.max_dimension):
self.action_buffer_padded.append(0)
def buffer_padding(self):
current_length = len(self.action_buffer)
self.action_buffer_padded = copy.deepcopy(self.action_buffer)
for _ in range(0, self.max_dimension - current_length):
self.action_buffer_padded.append(0)
def train(self, TargetNet):
if len(self.experience['s']) < self.min_experiences:
return 0
ids = np.random.randint(low=0, high=len(self.experience['s']), size=self.batch_size)
states = np.asarray([self.experience['s'][i] for i in ids])
actions = np.asarray([self.experience['a'][i] for i in ids])
rewards = np.asarray([self.experience['r'][i] for i in ids])
states_next = np.asarray([self.experience['s2'][i] for i in ids])
dones = np.asarray([self.experience['done'][i] for i in ids])
value_next = np.max(TargetNet.predict(states_next), axis=1)
actual_values = np.where(dones, rewards, rewards + self.gamma * value_next)
with tf.GradientTape() as tape:
selected_action_values = tf.math.reduce_sum(
self.predict(states) * tf.one_hot(actions, self.num_actions), axis=1)
loss = tf.math.reduce_mean(tf.square(actual_values - selected_action_values))
variables = self.model.trainable_variables
gradients = tape.gradient(loss, variables)
self.optimizer.apply_gradients(zip(gradients, variables))
return loss
def get_action(self, states, epsilon):
if np.random.random() < epsilon:
return np.random.choice(self.num_actions)
else:
return np.argmax(self.predict(np.atleast_2d(states))[0])
def add_experience(self, exp):
if len(self.experience['s']) >= self.max_experiences:
for key in self.experience.keys():
self.experience[key].pop(0)
for key, value in exp.items():
self.experience[key].append(value)
def copy_weights(self, TrainNet):
variables1 = self.model.trainable_variables
variables2 = TrainNet.model.trainable_variables
for v1, v2 in zip(variables1, variables2):
v1.assign(v2.numpy())
def play_game(global_step, env, TrainNet, TargetNet, epsilon, copy_step):
rewards = 0
episode_step = 0
last_state_observed = 0
done = False
observations = env.reset()
if TrainNet.alg != 'normal':
TrainNet.fill_up_buffer()
losses = list()
clear = False
while not done:
delay = env.delay
len_buffer = len(env.state_buffer)
if TrainNet.alg == 'normal':
action = TrainNet.get_action(observations, epsilon)
prev_observations = observations
observations, reward, done = env.step(observations, action)
else:
if episode_step == 0:
if env.use_stochastic_delay: # append the last time this state was observed normalized by the max step of the episode
last_state_observed = (episode_step-env.turn_limit/2)/env.turn_limit
action_state = np.append(last_state_observed, TrainNet.action_buffer_padded)
information_state = np.append(observations, action_state)
else:
information_state = np.append(observations, TrainNet.action_buffer_padded)
if TrainNet.alg == 'IS':
action = TrainNet.get_action(information_state, epsilon)
else:
action = TrainNet.get_action(observations, epsilon)
prev_observations = observations
prev_information_state = information_state
observations, reward, done = env.step(observations, action)
episode_step += 1
if env.delay == 0:
delayed_action = action
else:
if not TrainNet.action_buffer: # buffer empty
delayed_action = random.randint(0, TrainNet.num_actions)
else:
delayed_action = TrainNet.action_buffer[0]
if env.train:
last_state_observed = (episode_step-env.turn_limit/2)/env.turn_limit
TrainNet.action_buffer.append(action + 1)
for i in range(len_buffer + 1 - delay):
TrainNet.action_buffer.popleft() - 1
TrainNet.buffer_padding()
else:
TrainNet.action_buffer.append(action + 1)
TrainNet.buffer_padding()
if len(TrainNet.action_buffer) == TrainNet.max_dimension+1:
TrainNet.action_buffer.clear()
TrainNet.buffer_padding()
observations = env.state_buffer.pop()
env.state_buffer.clear()
reward = np.sum(env.reward_buffer)
done = env.done_buffer.pop()
env.done_buffer.clear()
env.reward_buffer.clear()
clear = True
if env.use_stochastic_delay: # append the last time this state was observed normalized by the max step of the episode
action_state = np.append(last_state_observed, TrainNet.action_buffer_padded)
information_state = np.append(observations, action_state)
else:
information_state = np.append(observations, TrainNet.action_buffer_padded)
rewards += reward
if done:
episode_step = 0
env.reset()
if TrainNet.alg != 'normal':
TrainNet.action_buffer.clear()
TrainNet.buffer_padding()
global_step += 1
if TrainNet.alg == 'normal':
exp = {'s': prev_observations, 'a': action, 'r': reward, 's2': observations, 'done': done}
if TrainNet.alg == 'delay':
exp = {'s': prev_observations, 'a': delayed_action, 'r': reward, 's2': observations, 'done': done}
if TrainNet.alg == 'IS':
exp = {'s': prev_information_state, 'a': action, 'r': reward, 's2': information_state, 'done': done}
TrainNet.add_experience(exp)
loss = TrainNet.train(TargetNet)
if isinstance(loss, int):
losses.append(loss)
else:
losses.append(loss.numpy())
if global_step % copy_step == 0:
TargetNet.copy_weights(TrainNet)
return global_step, rewards, mean(losses)
def test(env, TrainNet, logs, num_episodes):
for _ in range(num_episodes):
observation = env.reset()
rewards = 0
steps = 0
done = False
while not done:
action = TrainNet.get_action(observation, 0)
observation, reward, done, _ = env.step(action)
steps += 1
rewards += reward
with open(logs['log_file_name'], "a") as f:
print("Testing steps: {} rewards :{} ".format(steps, rewards), file=f)
print("Testing steps: {} rewards :{} ".format(steps, rewards))
def train_agent(env, num_frames, model_params, algorithm_params, logs, verbose):
np.random.seed(algorithm_params['seed'])
tf.random.set_seed(algorithm_params['seed'])
random.seed(algorithm_params['seed'])
num_actions = env.number_of_actions
state_space = env.state_space.shape
copy_step = model_params['copy_step']
TrainNet = DQN(state_space, num_actions, model_params, algorithm_params)
TargetNet = DQN(state_space, num_actions, model_params, algorithm_params)
# N = num_episodes
total_rewards_list = []
total_losses_list = []
epsilon_start = algorithm_params['start_epsilon']
decay = algorithm_params['epsilon_decay']
min_epsilon = algorithm_params['stop_epsilon']
global_step = 1
n = 0
while True:
epsilon = min_epsilon + (epsilon_start - min_epsilon) * np.exp(-decay * global_step)
global_step, total_reward, losses = play_game(global_step, env, TrainNet, TargetNet, epsilon, copy_step)
total_rewards_list.append(total_reward)
total_losses_list.append(losses)
total_rewards = np.array(total_rewards_list)
total_losses = np.array(total_losses_list)
avg_rewards = total_rewards[max(0, n - 100):(n + 1)].mean()
avg_losses = total_losses[max(0, n - 100):(n + 1)].mean()
if n % logs['log_interval'] == 0:
if verbose:
with open(logs['log_file_name'], "a") as f:
print("episode:{}, eps:{:.4f}, avg reward (last 100):{:.2f}, avg loss:{:.2f}"
.format(n, epsilon, avg_rewards, avg_losses), file=f)
if not verbose:
print("episode:{}, eps:{:.3f}, avg reward (last 100):{:.2f}"
.format(n, epsilon, avg_rewards))
# test(env, TrainNet, logs, 100)
n += 1
if global_step > num_frames:
break
# env.close()
return total_rewards, total_losses
| 11,830 | 42.818519 | 134 | py |
DelayResolvedRL | DelayResolvedRL-main/W-Maze/DQN/plot.py | import numpy as np
import matplotlib.pyplot as plt
# import matplotlib.ticker as mtick
import os
import matplotlib
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['ps.fonttype'] = 42
matplotlib.rcParams.update({'font.size': 13})
def running_mean(x, n):
cumulative_sum = np.cumsum(np.insert(x, 0, 0))
return (cumulative_sum[n:] - cumulative_sum[:-n]) / float(n)
def get_file(index, ver):
save_dir = os.getcwd() + '/Results/Results-{}/'.format(ver) # Save Directory
files_list = os.listdir(save_dir)
if ver == '6.8':
with open(save_dir + files_list[index] + '/log.txt', 'r') as f:
env_name = f.readline().split(',')[0].split(':')[1] # Gets the environment name
else:
with open(save_dir + files_list[index] + '/log_sd.txt', 'r') as f:
env_name = f.readline().split(',')[0].split(':')[1] # Gets the environment name
file_name = save_dir + files_list[index] # Final files directory
return env_name, file_name
def plot_reward(index, runs, delays, n):
env_name, file_name = get_file(index)
plt.figure()
if index == -1:
plt.title('DQN')
save_file = os.getcwd() + '/Plots/v{}/rewards_normal.pdf'.format(ver)
if index == -2:
plt.title('DQN+IS')
save_file = os.getcwd() + '/Plots/v{}/rewards_IS.pdf'.format(ver)
if index == -3:
plt.title('delay-DQN')
save_file = os.getcwd() + '/Plots/v{}/rewards_delay.pdf'.format(ver)
for delay in delays:
episodes = 10000
X_axis = np.arange(episodes)
rewards_plot = np.zeros([runs, episodes])
for run in range(runs):
if delay == 'stochastic':
rewards = np.load(file_name[index] + '/rewards_delay_20_sd_run_{}.npy'.format(run), allow_pickle=True)[
()]
else:
rewards = np.load(file_name + '/rewards_delay_{}_run_{}.npy'.format(delay, run), allow_pickle=True)[()]
# plt.plot(running_mean(rewards, n), alpha=0.25, linestyle='-.', color='blue')
rewards_plot[run] = rewards[0:episodes]
rewards_mean = np.mean(rewards_plot, axis=0)
rewards_deviation = np.std(rewards_plot, axis=0) / np.sqrt(runs)
plt.xlabel('Episodes')
plt.ylabel('Rewards')
# plt.ylim(0, 210)
plt.plot(running_mean(rewards_mean, n), label='delay={}'.format(delay))
# plt.fill_between(X_axis, rewards_mean+rewards_deviation, rewards_mean-rewards_deviation, alpha=1.5)
plt.legend(title='Delays', bbox_to_anchor=(1.05, 1), loc='upper left')
plt.savefig(save_file, bbox_inches="tight")
plt.savefig(file_name + '/rewards.pdf', bbox_inches="tight")
plt.tight_layout()
plt.show()
def compare_learning_curves(indices, label, ver, runs, delay, n=1000):
env_name = []
file_name = []
for index in indices:
env, file = get_file(index, ver)
env_name.append(env)
file_name.append(file)
# if not env_name.count(env_name[0]) == len(env_name): # Check if all the environments are same
# raise Exception('Environments are different')
plt.figure()
plt.title(env_name[0])
plt.xlabel('Episodes')
plt.ylabel('Rewards')
colors = ['blue', 'tab:orange', 'green']
for index in range(len(indices)):
for run in range(runs):
if delay == 'stochastic':
rewards = \
np.load(file_name[index] + '/rewards_delay_10_sd_run_{}.npy'.format(run), allow_pickle=True)[()]
else:
rewards = \
np.load(file_name[index] + '/rewards_delay_{}_run_{}.npy'.format(delay, run), allow_pickle=True)[()]
plt.plot(running_mean(rewards, n), label=label[index] if run == 0 else '', color=colors[index], alpha=0.5)
plt.legend(title='Algorithms', bbox_to_anchor=(1.05, 1), loc='upper left')
save_dir = os.getcwd() + '/Plots/v{}/'.format(ver)
try:
plt.savefig(save_dir + '/rewards_curves.pdf', bbox_inches="tight")
except FileNotFoundError:
os.makedirs(os.getcwd() + '/Plots/v{}'.format(ver))
plt.savefig(save_dir + '/rewards_curves.pdf', bbox_inches="tight")
plt.tight_layout()
plt.show()
def plot_losses(index, runs, n):
env_name, file_name = get_file(index)
plt.figure()
plt.title(env_name)
losses = np.load(file_name + '/loss.npy', allow_pickle=True)[()]
episodes = len(losses[0])
X_axis = np.arange(episodes)
losses_plot = np.zeros([runs, episodes])
for run in range(runs):
losses_plot[run] = losses[run]
losses_mean = np.mean(losses_plot, axis=0)
losses_deviation = np.std(losses_plot, axis=0) / np.sqrt(runs)
plt.xlabel('Episodes')
plt.ylabel('Losses')
plt.plot(running_mean(losses_mean, n))
plt.savefig(file_name + '/losses.pdf')
plt.show()
def compare_algorithms(indices, label, runs, delays, ver, colors):
env_name = []
file_name = []
file_name_sd = []
for index in indices:
env, file = get_file(index, ver)
env_name.append(env)
file_name.append(file)
for index in indices:
_, file_sd = get_file(index, ver)
file_name_sd.append(file_sd)
# if not env_name.count(env_name[0]) == len(env_name): # Check if all the environments are same
# raise Exception('Environments are different')
plt.figure()
plt.title(env_name[0], fontsize=20)
plt.xlabel('Delay')
plt.ylabel('Rewards')
for index in range(len(indices)):
count = 0
X_axis = list(map(str, delays))
r_mean = np.zeros(len(delays))
r_std = np.zeros(len(delays))
episodes = 10000
# rewards_plot = np.zeros([runs, episodes])
rewards_plot = np.zeros(runs)
for delay in delays:
for run in range(runs):
if delay == 'stochastic':
rewards = \
np.load(file_name_sd[index] + '/rewards_delay_10_sd_run_{}.npy'.format(run), allow_pickle=True)[
()]
rewards_plot[run] = np.mean(rewards)
print('Algorithm: {} Delay: Stochastic Run: {} Reward: {}'.format(index, run, rewards_plot[run]))
else:
rewards = \
np.load(file_name[index] + '/rewards_delay_{}_run_{}.npy'.format(delay, run),
allow_pickle=True)[()]
rewards_plot[run] = np.mean(rewards)
print('Algorithm: {} Delay: {} Run: {} Reward: {}'.format(index, delay, run, rewards_plot[run]))
rewards_mean = rewards_plot
rewards_mean = np.mean(rewards_plot, axis=0)
rewards_deviation = np.std(rewards_plot, axis=0) / np.sqrt(runs)
r_mean[count] = np.mean(rewards_mean)
r_std[count] = np.mean(rewards_deviation)
count += 1
if label[index] == 'DQN+IS':
alg = 'DRDQN'
else:
alg = label[index]
# plt.plot(X_axis, r_mean, marker='o', label=alg, color=colors[label[index]])
plt.errorbar(X_axis, r_mean, marker='o', yerr=r_std, label=alg, color=colors[label[index]], uplims=True,
lolims=True)
# plt.legend(title='Algorithms', bbox_to_anchor=(1.05, 1), loc='upper left')
plt.legend()
plt.xlabel('Delays', fontsize=16)
plt.xticks(fontsize=16)
plt.ylabel('Rewards', fontsize=16)
plt.yticks(fontsize=16)
save_dir = os.getcwd() + '/Plots/v{}/'.format(ver)
try:
plt.savefig(save_dir + '/rewards_comparison.pdf', bbox_inches="tight")
except FileNotFoundError:
os.makedirs(os.getcwd() + '/Plots/v{}'.format(ver))
plt.savefig(save_dir + '/rewards_comparison.pdf', bbox_inches="tight")
plt.tight_layout()
plt.show()
def plot_time(indices, labels, delays, ver):
plt.figure()
env_name = []
file_name = []
file_name_sd = []
for index in indices:
env, file = get_file(index, ver)
env_name.append(env)
file_name.append(file)
for index in indices:
_, file_sd = get_file(index, ver)
file_name_sd.append(file_sd)
for index in range(len(indices)):
time = np.zeros([len(delays)])
X_axis = list(map(str, delays))
for delay in range(len(delays)):
if delays[delay] == 'stochastic':
time[delay] = np.mean(np.load(file_name_sd[index] + '/time_delay_6_sd.npy'))
else:
time[delay] = np.mean(np.load(file_name[index] + '/time_delay_{}.npy'.format(delays[delay])))
plt.plot(X_axis, time / 3600, label=labels[index], marker='o')
plt.title(env_name[0])
plt.xlabel('Delays')
plt.ylabel('Average Hours per run')
plt.legend()
save_dir = os.getcwd() + '/Plots/v{}/'.format(ver)
try:
plt.savefig(save_dir + '/time_comparison.pdf', bbox_inches="tight")
except FileNotFoundError:
os.makedirs(os.getcwd() + '/Plots/v{}'.format(ver))
plt.savefig(save_dir + '/time_comparison.pdf', bbox_inches="tight")
plt.tight_layout()
plt.show()
if __name__ == "__main__":
runs = 10
delays = [2, 4, 6, 8, 10] # , 'stochastic']
ver = '5.5'
compare_indices = [-2, -1, -3]
labels = ['DQN+IS', 'DQN', 'delay-DQN']
colors = {'DQN+IS': u'#1f77b4', 'delay-DQN': 'red', 'DQN': u'#2ca02c'}
compare_algorithms(compare_indices, labels, runs, delays, ver, colors)
plot_time(compare_indices, labels, delays, ver)
delay = 'stochastic'
compare_learning_curves(compare_indices, labels, ver, runs, delay)
| 9,639 | 39.504202 | 120 | py |
DelayResolvedRL | DelayResolvedRL-main/W-Maze/DQN/train.py | import datetime
import os
import argparse
import time
'''Hyperparameters'''
# W-Maze
# Number of Runs:10 \\
# Number of Frames: 1 Million \\
# Batch Size: 32 \\
# $\gamma$: 0.99 \\
# Learning Rate: 1e-3 \\
# $\epsilon$-Start: 1.0 \\
# $\epsilon$-Stop: 1e-4 \\
# $\epsilon$-Decay: 1e-5 \\
# Hidden Units: [200] \\
# Replay Buffer Size: 1000 \\
# Target Network Frequency Update: 25 \\
os.environ['TF_CPP_MIN_LOG_LEVEL'] = "3" # Suppress Tensorflow Messages
os.environ["CUDA_VISIBLE_DEVICES"] = "-1" # Set CPU/GPU
import numpy as np
from agent import *
from env_stochasticdelay import Environment
parser = argparse.ArgumentParser()
parser.add_argument("--algorithm", help="algorithm") # normal, delay (https://ieeexplore.ieee.org/document/5650345), IS (DRDQN)
parser.add_argument("--stochastic", help="use stochastic delays")
parser.add_argument("--delay", help="environment delay")
parser.add_argument("--verbose", help="log files")
args = parser.parse_args()
algorithm = args.algorithm
delay = int(args.delay)
if args.verbose == 'True':
verbose = True
else:
verbose = False
if args.stochastic == 'True':
use_stochastic_delay = True
else:
use_stochastic_delay = False
'''Log directory'''
# verbose = False
if verbose:
save_dir = os.getcwd() + '/Results-1.0/Results-'+algorithm
if not os.path.exists(save_dir):
os.makedirs(save_dir)
if use_stochastic_delay:
log_file_name = save_dir + '/log_sd.txt'
else:
log_file_name = save_dir + '/log.txt'
reward_file_name = save_dir + '/rewards'
loss_file_name = save_dir + '/loss'
else:
log_file_name = ""
'''Environment Parameters'''
game = 'W-Maze'
seed = 0 # Seed for Env, TF, Numpy
num_frames = 1e6 # Number of Frames
# num_episodes = 1e5
logs = {'log_interval': 100, # Number of Episodes after which to print output/save batch output
'log_file_name': log_file_name
}
'''Parameters of Algorithm'''
algorithm_params = {'algorithm': algorithm, # normal, delay, IS
'batch_size': 32,
'gamma': 0.99,
'learning_rate': 1e-3,
'start_epsilon': 1.0,
'stop_epsilon': 1e-4,
'epsilon_decay': 1e-5,
'use_stochastic_delay': use_stochastic_delay,
'delay': delay,
'min_delay': 0,
'seed': seed
}
model_params = {'hidden_units': [200, 200], # model architecture
'max_buffer_size': 1000,
'min_buffer_size': 100,
'copy_step': 25, # 1 means no target network
'max_dimension': 10
}
'''Runs'''
runs = 10
rewards = {}
losses = {}
time_taken = np.zeros(runs)
model_params['max_dimension'] = min(delay, model_params['max_dimension'])
if verbose:
if use_stochastic_delay:
time_file_name = save_dir + '/time_delay_{}_sd'.format(delay)
if algorithm == 'IS':
delay_avg = (delay + algorithm_params['min_delay'])/2
# algorithm_params['gamma'] = np.power(algorithm_params['gamma'], 1/delay_avg)
else:
time_file_name = save_dir + '/time_delay_{}'.format(delay)
for run in range(runs):
if verbose:
if use_stochastic_delay:
reward_file_name_cur = reward_file_name + '_delay_{}_sd_run_{}'.format(delay, run)
loss_file_name_cur = loss_file_name + '_delay_{}_sd_run_{}'.format(delay, run)
else:
reward_file_name_cur = reward_file_name + '_delay_{}_run_{}'.format(delay, run)
loss_file_name_cur = loss_file_name + '_delay_{}_run_{}'.format(delay, run)
'''Set seed'''
seed = run
algorithm_params['seed'] = seed
'''Write Parameters to log_file'''
if verbose:
with open(log_file_name, "a") as f:
f.write('Environment: {}, Frames: {}\n'.format(game, num_frames))
f.write('Algorithm Parameters: {} \n'.format(algorithm_params))
f.write('Model Parameters: {} \n'.format(model_params))
f.write('Run: {} \n'.format(run))
f.flush()
'''Initialize Environment & Model'''
env = Environment(seed, algorithm_params['gamma'], algorithm_params['use_stochastic_delay'],
algorithm_params['delay'], algorithm_params['min_delay'])
'''Train the Agent'''
start_time = time.time()
reward_history, loss_history = train_agent(env, num_frames, model_params, algorithm_params, logs, verbose)
end_time = time.time()
time_taken[run] = end_time - start_time
if verbose:
with open(log_file_name, "a") as f:
f.write('Time taken: {}\n'.format(time_taken))
f.flush()
'''Store the results'''
rewards = reward_history
losses = loss_history
'''Save Rewards and Losses'''
if verbose:
np.save(reward_file_name_cur, rewards)
np.save(loss_file_name_cur, losses)
if verbose:
np.save(time_file_name, time_taken)
| 5,018 | 33.854167 | 128 | py |
DelayResolvedRL | DelayResolvedRL-main/W-Maze/DQN/env.py | import numpy as np
from collections import deque
import random
class Environment:
"""Initialize Environment"""
def __init__(self, seed, delay):
np.random.seed(seed)
random.seed(seed)
self.call = 0
self.breadth = 7
self.length = 11
self.state_space = np.empty([self.breadth, self.length], dtype='<U1')
self.state_space[:] = 'E'
self.state_space[0] = 'X'
self.state_space[1:4, self.length // 2 - 2] = 'X'
self.state_space[1:4, self.length // 2 + 2] = 'X'
self.state_space[0, self.length // 2 - 1:self.length // 2 + 2] = 'G'
self.state_space[self.breadth - 1, 0] = 'P'
self.actions = [1, 2, 3, 4] # UP, DOWN, LEFT, RIGHT
self.number_of_actions = len(self.actions)
self.turn_limit = 300
self.delay = delay
self.actions_in_buffer = deque(maxlen=self.delay)
self.fill_up_buffer()
self.delayed_action = 0
self.state = self.reset()
self.step_count = 0
def reset(self):
x = random.randint(0, self.breadth-1)
y = 0
starting_state = [x, y]
self.state_space[x, y] = 'P'
self.fill_up_buffer()
self.step_count = 0
return starting_state
def fill_up_buffer(self):
for _ in range(self.delay):
action = 0
self.actions_in_buffer.append(action)
def step(self, state, action):
self.step_count += 1
if self.delay != 0:
chosen_action = action
self.delayed_action = self.actions_in_buffer.popleft() # get delayed action from buffer
self.actions_in_buffer.append(chosen_action) # append undelayed action to buffer
action = self.delayed_action
else:
self.delayed_action = action
done = False
player_position = state
self.state = player_position
reward = -1
"""UP"""
if action == 1:
if player_position[0] - 1 >= 0 and self.state_space[player_position[0] - 1, player_position[1]] != 'X':
self.state_space[player_position[0], player_position[1]] = 'E'
if self.state_space[player_position[0] - 1, player_position[1]] == 'G':
done = True
self.state = self.reset()
reward = 11
else:
self.state = [player_position[0] - 1, player_position[1]]
self.state_space[player_position[0] - 1, player_position[1]] = 'P'
"""DOWN"""
if action == 2:
if player_position[0] + 1 < self.breadth \
and self.state_space[player_position[0] + 1, player_position[1]] != 'X':
self.state_space[player_position[0], player_position[1]] = 'E'
if self.state_space[player_position[0] + 1, player_position[1]] == 'G':
done = True
self.state = self.reset()
reward = 11
else:
self.state = player_position[0] + 1, player_position[1]
self.state_space[player_position[0] + 1, player_position[1]] = 'P'
"""LEFT"""
if action == 3:
if player_position[1] - 1 >= 0 and self.state_space[player_position[0], player_position[1] - 1] != 'X':
self.state_space[player_position[0], player_position[1]] = 'E'
if self.state_space[player_position[0], player_position[1] - 1] == 'G':
done = True
self.state = self.reset()
reward = 11
else:
self.state = player_position[0], player_position[1] - 1
self.state_space[player_position[0], player_position[1] - 1] = 'P'
"""RIGHT"""
if action == 4:
if player_position[1] + 1 < self.length \
and self.state_space[player_position[0], player_position[1] + 1] != 'X':
self.state_space[player_position[0], player_position[1]] = 'E'
if self.state_space[player_position[0], player_position[1] + 1] == 'G':
done = True
self.state = self.reset()
reward = 11
else:
self.state = [player_position[0], player_position[1] + 1]
self.state_space[player_position[0], player_position[1] + 1] = 'P'
if self.step_count == self.turn_limit:
done = True
return self.state, reward, done
| 4,592 | 41.925234 | 115 | py |
DelayResolvedRL | DelayResolvedRL-main/W-Maze/Tabular-Q/dr_agent.py | import numpy as np
from collections import deque
'''Q-learning agent for the augmented agent'''
class Agent:
def __init__(self, state_space, num_actions, delay):
self.epsilon = 1.0
self.num_actions = num_actions
self.delay = delay
self.actions_in_buffer = deque(maxlen=self.delay)
self.actions_in_buffer_prev = deque(maxlen=self.delay)
tabular_value_shape = [state_space.shape[0]] + [state_space.shape[1]] + \
[num_actions for _ in range(self.delay + 1)]
self.Q_values = np.zeros(tabular_value_shape)
# self.E = np.zeros(tabular_value_shape)
@staticmethod
def randargmax(b, **kw):
""" a random tie-breaking argmax"""
return np.argmax(np.random.random(b.shape) * (b == b.max()), **kw)
def update_epsilon(self, epsilon):
self.epsilon = epsilon
"""fill up action buffer with the action from the current state"""
def fill_up_buffer(self, state):
for _ in range(self.delay):
action = self.act(state)
self.actions_in_buffer.append(action)
def choose_action(self, state):
if self.delay == 0:
return self.act(state), self.act(state) # return undelayed action
next_action = self.act(state)
self.actions_in_buffer_prev = np.copy(self.actions_in_buffer)
action = self.actions_in_buffer.popleft() # get delayed action
self.actions_in_buffer.append(next_action) # put undelayed action into the buffer
return action, next_action
def act(self, state):
if self.epsilon < np.random.random(): # exploration
action = self.randargmax(self.Q_values[(state[0], state[1]) + tuple(self.actions_in_buffer)])
else:
action = np.random.randint(self.num_actions) # greedy
return action
| 1,859 | 38.574468 | 105 | py |
Subsets and Splits