id
stringlengths 3
8
| content
stringlengths 100
981k
|
---|---|
43467
|
from __future__ import absolute_import
__author__ = '<NAME>'
import time
import struct
try:
from pebble import pulse2
except ImportError:
pass
from . import BaseTransport, MessageTargetWatch
from libpebble2.exceptions import ConnectionError, PebbleError
class PULSETransport(BaseTransport):
"""
Represents a direct connection to a physical/virtual Pebble uses the PULSEv2 interface.
This transport expects to be given a PULSE2 Link object.
:param connection: A PULSE2 Link object to tunnel Pebble Protocol over.
:type link: pulse2.link.Link
"""
must_initialise = True
PPOPULSE_PORT = 0x3e22
OPCODE_PROTOCOL_DATA = 0x1
OPCODE_PROTOCOL_OPEN = 0x2
OPCODE_PROTOCOL_CLOSE = 0x3
def __init__(self, link):
self.link = link
self.connection = None
self.buffer = b''
@staticmethod
def _chunks(list_items, chunk_length):
for i in xrange(0, len(list_items), chunk_length):
yield list_items[i:i+chunk_length]
def connect(self):
self.connection = self.link.open_socket('reliable', self.PPOPULSE_PORT)
if not self.connection:
raise ConnectionError('Failed to open PPoPULSE socket')
self._send_with_opcode(self.OPCODE_PROTOCOL_OPEN)
start_time = time.time()
while time.time() < start_time + 10.0:
opcode, _ = self._recv_with_opcode()
if opcode == self.OPCODE_PROTOCOL_OPEN:
break
else:
raise ConnectionError('Timeout waiting for PPoPULSE open ACK')
def disconnect(self):
if self.connected:
try:
self._send_with_opcode(self.OPCODE_PROTOCOL_CLOSE)
except pulse2.exceptions.SocketClosed:
pass
self.connection.close()
self.connection = None
@property
def connected(self):
return self.connection is not None
def read_packet(self):
while self.connected:
if len(self.buffer) >= 2:
length, = struct.unpack('!H', self.buffer[:2])
length += 4
if len(self.buffer) >= length:
msg_data = self.buffer[:length]
self.buffer = self.buffer[length:]
return MessageTargetWatch(), msg_data
opcode, data = self._recv_with_opcode()
if opcode == self.OPCODE_PROTOCOL_DATA:
self.buffer += data
def send_packet(self, message, target=MessageTargetWatch()):
assert isinstance(target, MessageTargetWatch)
for chunk in self._chunks(message, self.connection.mtu - 1):
self._send_with_opcode(self.OPCODE_PROTOCOL_DATA, chunk)
def _recv_with_opcode(self):
try:
packet = self.connection.receive(block=True)
except (AttributeError, pulse2.exceptions.SocketClosed):
self.connection = None
raise ConnectionError('PULSE transport closed')
assert len(packet) >= 1
opcode = ord(packet[0])
data = packet[1:]
return opcode, data
def _send_with_opcode(self, opcode, body=None):
assert self.connected
data = chr(opcode)
if body:
data += body
self.connection.send(data)
|
43490
|
import symjax
import symjax.tensor as T
import matplotlib.pyplot as plt
import numpy as np
J = 5
Q = 4
scales = T.power(2, T.linspace(0.1, J - 1, J * Q))
scales = scales[:, None]
print(scales.get())
wavelet = symjax.tensor.signal.complex_morlet(5 * scales, np.pi / scales)
waveletw = symjax.tensor.signal.fourier_complex_morlet(
5 * scales, np.pi / scales, wavelet.shape[-1]
)
waveletlp = symjax.tensor.signal.littewood_paley_normalization(
waveletw, down=np.pi / scales[-1, 0]
)
wavelet = wavelet.get()
waveletw = waveletw.get()
waveletlp = waveletlp.get()
plt.subplot(321)
for i in range(J * Q):
fr = np.real(np.fft.fft(np.fft.ifftshift(wavelet[i])))
fi = np.imag(np.fft.fft(np.fft.ifftshift(wavelet[i])))
plt.plot(i + fr, "--b")
plt.plot(i + fi, "--r")
plt.subplot(322)
for i in range(J * Q):
plt.plot(2 * i + wavelet[i].real, c="b")
plt.plot(2 * i + wavelet[i].imag, c="r")
plt.subplot(324)
for i in range(J * Q):
fr = np.real(np.fft.fftshift(np.fft.ifft(waveletw[i])))
fi = np.imag(np.fft.fftshift(np.fft.ifft(waveletw[i])))
plt.plot(2 * i + fr / fr.max(), "--b")
plt.plot(2 * i + fi / fi.max(), "--r")
plt.subplot(323)
for i in range(J * Q):
plt.plot(i + waveletw[i].real, c="b")
plt.plot(i + waveletw[i].imag, c="r")
plt.subplot(325)
for i in range(J * Q):
plt.plot(i + waveletlp[i].real, c="b")
plt.plot(i + waveletlp[i].imag, c="r")
plt.plot(np.abs(waveletlp).sum(0), c="g")
plt.subplot(326)
for i in range(J * Q):
fr = np.real(np.fft.fftshift(np.fft.ifft(waveletlp[i])))
fi = np.imag(np.fft.fftshift(np.fft.ifft(waveletlp[i])))
plt.plot(2 * i + fr / fr.max(), "--b")
plt.plot(2 * i + fi / fi.max(), "--r")
# plt.show()
plt.savefig("wavelets.png")
|
43526
|
import torchvision.transforms as T
from torchvision.datasets import ImageFolder
class WHURS19(ImageFolder):
""" WHU-RS19 dataset from'Structural High-resolution Satellite Image Indexing', Xia at al. (2010)
https://hal.archives-ouvertes.fr/file/index/docid/458685/filename/structural_satellite_indexing_XYDG.pdf
"""
def __init__(
self,
root: str = ".data/WHU-RS19",
transform: T.Compose = T.Compose([T.ToTensor()])
):
super().__init__(
root=root,
transform=transform
)
|
43534
|
from lcu_driver import Connector
connector = Connector()
@connector.ready
async def connect(connection):
print('LCU API is ready to be used.')
@connector.close
async def disconnect(connection):
print('Finished task')
connector.start()
|
43568
|
import theano.tensor as tt
from theano import scan
from . import multivariate
from . import continuous
from . import distribution
__all__ = [
'AR1',
'GaussianRandomWalk',
'GARCH11',
'EulerMaruyama',
'MvGaussianRandomWalk',
'MvStudentTRandomWalk'
]
class AR1(distribution.Continuous):
"""
Autoregressive process with 1 lag.
Parameters
----------
k : tensor
effect of lagged value on current value
tau_e : tensor
precision for innovations
"""
def __init__(self, k, tau_e, *args, **kwargs):
super(AR1, self).__init__(*args, **kwargs)
self.k = k
self.tau_e = tau_e
self.tau = tau_e * (1 - k ** 2)
self.mode = 0.
def logp(self, x):
k = self.k
tau_e = self.tau_e
x_im1 = x[:-1]
x_i = x[1:]
boundary = continuous.Normal.dist(0, tau_e).logp
innov_like = continuous.Normal.dist(k * x_im1, tau_e).logp(x_i)
return boundary(x[0]) + tt.sum(innov_like) + boundary(x[-1])
class GaussianRandomWalk(distribution.Continuous):
"""
Random Walk with Normal innovations
Parameters
----------
tau : tensor
tau > 0, innovation precision
sd : tensor
sd > 0, innovation standard deviation (alternative to specifying tau)
mu: tensor
innovation drift, defaults to 0.0
init : distribution
distribution for initial value (Defaults to Flat())
"""
def __init__(self, tau=None, init=continuous.Flat.dist(), sd=None, mu=0.,
*args, **kwargs):
super(GaussianRandomWalk, self).__init__(*args, **kwargs)
self.tau = tau
self.sd = sd
self.mu = mu
self.init = init
self.mean = 0.
def logp(self, x):
tau = self.tau
sd = self.sd
mu = self.mu
init = self.init
x_im1 = x[:-1]
x_i = x[1:]
innov_like = continuous.Normal.dist(mu=x_im1 + mu, tau=tau, sd=sd).logp(x_i)
return init.logp(x[0]) + tt.sum(innov_like)
class GARCH11(distribution.Continuous):
"""
GARCH(1,1) with Normal innovations. The model is specified by
y_t = sigma_t * z_t
sigma_t^2 = omega + alpha_1 * y_{t-1}^2 + beta_1 * sigma_{t-1}^2
with z_t iid and Normal with mean zero and unit standard deviation.
Parameters
----------
omega : distribution
omega > 0, distribution for mean variance
alpha_1 : distribution
alpha_1 >= 0, distribution for autoregressive term
beta_1 : distribution
beta_1 >= 0, alpha_1 + beta_1 < 1, distribution for moving
average term
initial_vol : distribution
initial_vol >= 0, distribution for initial volatility, sigma_0
"""
def __init__(self, omega=None, alpha_1=None, beta_1=None,
initial_vol=None, *args, **kwargs):
super(GARCH11, self).__init__(*args, **kwargs)
self.omega = omega
self.alpha_1 = alpha_1
self.beta_1 = beta_1
self.initial_vol = initial_vol
self.mean = 0
def get_volatility(self, x):
x = x[:-1]
def volatility_update(x, vol, w, a, b):
return tt.sqrt(w + a * tt.square(x) + b * tt.square(vol))
vol, _ = scan(fn=volatility_update,
sequences=[x],
outputs_info=[self.initial_vol],
non_sequences=[self.omega, self.alpha_1,
self.beta_1])
return tt.concatenate(self.initial_vol, vol)
def logp(self, x):
vol = self.get_volatility(x)
return tt.sum(continuous.Normal.dist(0, sd=vol).logp(x))
class EulerMaruyama(distribution.Continuous):
"""
Stochastic differential equation discretized with the Euler-Maruyama method.
Parameters
----------
dt : float
time step of discretization
sde_fn : callable
function returning the drift and diffusion coefficients of SDE
sde_pars : tuple
parameters of the SDE, passed as *args to sde_fn
"""
def __init__(self, dt, sde_fn, sde_pars, *args, **kwds):
super(EulerMaruyama, self).__init__(*args, **kwds)
self.dt = dt
self.sde_fn = sde_fn
self.sde_pars = sde_pars
def logp(self, x):
xt = x[:-1]
f, g = self.sde_fn(x[:-1], *self.sde_pars)
mu = xt + self.dt * f
sd = tt.sqrt(self.dt) * g
return tt.sum(continuous.Normal.dist(mu=mu, sd=sd).logp(x[1:]))
class MvGaussianRandomWalk(distribution.Continuous):
"""
Multivariate Random Walk with Normal innovations
Parameters
----------
mu : tensor
innovation drift, defaults to 0.0
cov : tensor
pos def matrix, innovation covariance matrix
tau : tensor
pos def matrix, innovation precision (alternative to specifying cov)
init : distribution
distribution for initial value (Defaults to Flat())
"""
def __init__(self, mu=0., cov=None, tau=None, init=continuous.Flat.dist(),
*args, **kwargs):
super(MvGaussianRandomWalk, self).__init__(*args, **kwargs)
tau, cov = multivariate.get_tau_cov(mu, tau=tau, cov=cov)
self.tau = tau
self.cov = cov
self.mu = mu
self.init = init
self.mean = 0.
def logp(self, x):
tau = self.tau
mu = self.mu
init = self.init
x_im1 = x[:-1]
x_i = x[1:]
innov_like = multivariate.MvNormal.dist(mu=x_im1 + mu, tau=tau).logp(x_i)
return init.logp(x[0]) + tt.sum(innov_like)
class MvStudentTRandomWalk(distribution.Continuous):
"""
Multivariate Random Walk with StudentT innovations
Parameters
----------
nu : degrees of freedom
mu : tensor
innovation drift, defaults to 0.0
cov : tensor
pos def matrix, innovation covariance matrix
tau : tensor
pos def matrix, innovation precision (alternative to specifying cov)
init : distribution
distribution for initial value (Defaults to Flat())
"""
def __init__(self, nu, mu=0., cov=None, tau=None, init=continuous.Flat.dist(),
*args, **kwargs):
super(MvStudentTRandomWalk, self).__init__(*args, **kwargs)
tau, cov = multivariate.get_tau_cov(mu, tau=tau, cov=cov)
self.tau = tau
self.cov = cov
self.mu = mu
self.nu = nu
self.init = init
self.mean = 0.
def logp(self, x):
cov = self.cov
mu = self.mu
nu = self.nu
init = self.init
x_im1 = x[:-1]
x_i = x[1:]
innov_like = multivariate.MvStudentT.dist(nu, cov, mu=x_im1 + mu).logp(x_i)
return init.logp(x[0]) + tt.sum(innov_like)
|
43571
|
import pytest
from monty.serialization import MontyDecoder
from monty.serialization import loadfn
from emmet.core.thermo import ThermoDoc
@pytest.fixture(scope="session")
def Fe3O4_structure(test_dir):
structure = loadfn(test_dir / "thermo/Fe3O4_structure.json")
return structure
@pytest.fixture(scope="session")
def Fe2O3a_structure(test_dir):
structure = loadfn(test_dir / "thermo/Fe2O3a_structure.json")
return structure
@pytest.fixture(scope="session")
def Fe2O3b_structure(test_dir):
structure = loadfn(test_dir / "thermo/Fe2O3b_structure.json")
return structure
@pytest.fixture(scope="session")
def Fe_structure(test_dir):
structure = loadfn(test_dir / "thermo/Fe_structure.json")
return structure
@pytest.fixture(scope="session")
def O_structure(test_dir):
structure = loadfn(test_dir / "thermo/O_structure.json")
return structure
@pytest.fixture
def entries(
Fe3O4_structure, Fe2O3a_structure, Fe2O3b_structure, Fe_structure, O_structure
):
return MontyDecoder().process_decoded(
[
{
"@module": "pymatgen.entries.computed_entries",
"@class": "ComputedStructureEntry",
"correction": 0.0,
"structure": Fe3O4_structure.as_dict(),
"entry_id": "mp-1",
"energy": -382.146593528,
"composition": {"Fe": 24.0, "O": 32.0},
"name": "Fe3O4",
"attribute": None,
"@version": "2020.4.29",
},
{
"@module": "pymatgen.entries.computed_entries",
"@class": "ComputedStructureEntry",
"correction": 0.0,
"structure": Fe2O3a_structure.as_dict(),
"entry_id": "mp-2",
"energy": -270.38765404,
"composition": {"Fe": 16.0, "O": 24.0},
"name": "Fe2O3",
"attribute": None,
"@version": "2020.4.29",
},
{
"@module": "pymatgen.entries.computed_entries",
"@class": "ComputedStructureEntry",
"correction": 0.0,
"structure": O_structure.as_dict(),
"entry_id": "mp-3",
"energy": -92.274692568,
"composition": {"O": 24.0},
"name": "O",
"attribute": None,
"@version": "2020.4.29",
},
{
"@module": "pymatgen.entries.computed_entries",
"@class": "ComputedStructureEntry",
"correction": 0.0,
"structure": Fe_structure.as_dict(),
"entry_id": "mp-4",
"energy": -13.00419661,
"composition": {"Fe": 2.0},
"name": "Fe",
"attribute": None,
"@version": "2020.4.29",
},
{
"@module": "pymatgen.entries.computed_entries",
"@class": "ComputedStructureEntry",
"correction": 0.0,
"structure": Fe2O3b_structure.as_dict(),
"entry_id": "mp-5",
"energy": -1080.82678592,
"composition": {"Fe": 64.0, "O": 96.0},
"name": "Fe2O3",
"attribute": None,
"@version": "2020.4.29",
},
]
)
def test_from_entries(entries):
docs, pd = ThermoDoc.from_entries(entries, deprecated=False)
assert len(docs) == len(entries)
assert all([d.energy_type == "Unknown" for d in docs])
unstable_doc = next(d for d in docs if d.material_id == "mp-5")
assert unstable_doc.is_stable is False
assert all([d.is_stable for d in docs if d != unstable_doc])
|
43591
|
from colorama import Fore, Style
def console_log(text, _type=None, title=None, space=False, space_number=0):
# Checking text instance is string
if isinstance(text, str):
if title is None:
if _type == 'success':
return print(Style.DIM + Fore.GREEN + '[SUCCESS]'
+ Style.RESET_ALL + ' ' + text)
elif _type == 'warning':
return print(Style.DIM + Fore.YELLOW + '[WARNING]'
+ Style.RESET_ALL + ' ' + text)
elif _type == 'error':
return print(Style.DIM + Fore.RED + '[ERROR]'
+ Style.RESET_ALL + ' ' + text)
else:
return print(text)
elif title is not None \
and isinstance(title, str) and not space:
if _type == 'success':
return print(Style.DIM + Fore.GREEN + '[SUCCESS]'
+ Style.RESET_ALL + ' ' + Fore.WHITE + title
+ ': ' + Style.RESET_ALL + text)
elif _type == 'warning':
return print(Style.DIM + Fore.YELLOW + '[WARNING]'
+ Style.RESET_ALL + ' ' + Fore.WHITE + title
+ ': ' + Style.RESET_ALL + text)
elif _type == 'error':
return print(Style.DIM + Fore.RED + '[ERROR]'
+ Style.RESET_ALL + ' ' + Fore.WHITE + title
+ ': ' + Style.RESET_ALL + text)
else:
return print(Fore.WHITE + title
+ ': ' + Style.RESET_ALL + text)
elif title is not None \
and isinstance(title, str) and space:
if _type == 'success':
return print(Style.DIM + Fore.GREEN + ' '
+ Style.RESET_ALL + ' ' + Fore.WHITE + title
+ ': ' + Style.RESET_ALL + text)
elif _type == 'warning':
return print(Style.DIM + Fore.YELLOW + ' '
+ Style.RESET_ALL + ' ' + Fore.WHITE + title
+ ': ' + Style.RESET_ALL + text)
elif _type == 'error':
return print(Style.DIM + Fore.RED + ' '
+ Style.RESET_ALL + ' ' + Fore.WHITE + title
+ ': ' + Style.RESET_ALL + text)
else:
if space_number is 0:
return print(Fore.WHITE + '' + title
+ ': ' + Style.RESET_ALL + text)
else:
return print(Fore.WHITE + ' ' * space_number + title
+ ': ' + Style.RESET_ALL + text)
|
43611
|
import torch
import torch.nn.functional as F
from exptune.exptune import ExperimentSettings, Metric, TrialResources
from exptune.hyperparams import (
ChoiceHyperParam,
LogUniformHyperParam,
UniformHyperParam,
)
from exptune.search_strategies import GridSearchStrategy
from exptune.summaries.final_run_summaries import TestMetricSummaries, TrialCurvePlotter
from exptune.utils import PatientStopper
from ogb.graphproppred.evaluate import Evaluator
from ray.tune.schedulers import AsyncHyperBandScheduler
from torch.optim import Adam
from torch.optim.lr_scheduler import ReduceLROnPlateau
from experiments.exp_config import BaseGraphConfig, Extra
from experiments.mol.pna_style_models import (
EgcHIVNet,
GatHIVNet,
GcnHIVNet,
GinHIVNet,
MpnnHIVNet,
)
from experiments.mol.utils import mol_data
from experiments.utils import data_location, load_pretrained, print_model_parameters
REPEATS = 10
ITERS = 100
NUM_LAYERS = 4
PRETRAINED_CONF = {
"gcn": (240, "https://www.dropbox.com/s/wn0wpmko8vl5aq1/gcn.pt?dl=1"),
"gat": (240, "https://www.dropbox.com/s/ohihapt36lykekw/gat.pt?dl=1"),
"gin": (240, "https://www.dropbox.com/s/0rjxeixit6jtinq/gin.pt?dl=1"),
"mpnn_max": (180, "https://www.dropbox.com/s/jxuams6l82tdb1v/mpnn_max.pt?dl=1"),
"mpnn_add": (180, "https://www.dropbox.com/s/op0takj73p1qwzy/mpnn_sum.pt?dl=1"),
"egc_s": (236, "https://www.dropbox.com/s/hrohxtt9vlps9sf/mcn_s.pt?dl=1"),
"egc_m": (224, "https://www.dropbox.com/s/hnbtmzka1r1t2hk/mcn_m.pt?dl=1"),
}
def train(model, optimizer, data, device, loss_fn):
model = model.to(device)
model.train()
num_batches = 0
loss_total = 0.0
for batch in data["train"]:
batch = batch.to(device)
optimizer.zero_grad()
out = model(batch)
# nan targets (unlabeled) should be ignored when computing training loss
is_labeled = batch.y == batch.y
loss = loss_fn(
out.to(torch.float32)[is_labeled], batch.y.to(torch.float32)[is_labeled]
)
loss.backward()
optimizer.step()
loss_total += loss.item()
num_batches += 1
return {"train_loss": loss_total / num_batches}
@torch.no_grad()
def evaluate(model, data, device, split, evaluator, metric_key, loss_fn):
model = model.to(device)
model.eval()
y_true = []
y_pred = []
loss_total = 0.0
num_batches = 0
for batch in data[split]:
batch = batch.to(device)
pred = model(batch)
is_labeled = batch.y == batch.y
loss_total += loss_fn(
pred.to(torch.float32)[is_labeled], batch.y.to(torch.float32)[is_labeled]
).item()
y_true.append(batch.y.view(pred.shape).detach().cpu())
y_pred.append(pred.detach().cpu())
num_batches += 1
y_true = torch.cat(y_true, dim=0).numpy()
y_pred = torch.cat(y_pred, dim=0).numpy()
input_dict = {"y_true": y_true, "y_pred": y_pred}
return {
f"{split}_metric": evaluator.eval(input_dict)[metric_key],
f"{split}_loss": loss_total / num_batches,
}
class MolConfig(BaseGraphConfig):
def __init__(
self,
dataset,
hidden,
) -> None:
super().__init__(debug_mode=False)
assert dataset in ["hiv", "pcba"]
self.dataset = dataset
self.hidden = hidden
if self.dataset == "hiv":
self.loss_fn = F.binary_cross_entropy_with_logits
self.num_tasks = 1
self.eval_metric_key = "rocauc"
elif self.dataset == "pcba":
self.loss_fn = F.binary_cross_entropy_with_logits
self.num_tasks = 128
self.eval_metric_key = "ap"
self.evaluator = Evaluator(f"ogbg-mol{self.dataset}")
def settings(self) -> ExperimentSettings:
return ExperimentSettings(
f"mol-{self.dataset}",
final_repeats=REPEATS,
final_max_iterations=ITERS,
)
def resource_requirements(self) -> TrialResources:
return TrialResources(cpus=2, gpus=0.25)
def search_strategy(self):
return GridSearchStrategy({"lr": 5, "wd": 2, "dropout": 2})
def trial_scheduler(self):
metric = self.trial_metric()
return AsyncHyperBandScheduler(
metric=metric.name, mode=metric.mode, max_t=ITERS, grace_period=30
)
def trial_metric(self) -> Metric:
return Metric("valid_metric", "max")
def stoppers(self):
metric = self.trial_metric()
return [
PatientStopper(
metric=metric.name, mode=metric.mode, patience=20, max_iters=ITERS
)
]
def optimizer(self, model, hparams):
return Adam(model.parameters(), lr=hparams["lr"], weight_decay=hparams["wd"])
def extra_setup(self, model, optimizer, hparams):
print_model_parameters(model)
return Extra(
device=torch.device("cuda" if torch.cuda.is_available() else "cpu"),
lr_scheduler=ReduceLROnPlateau(
optimizer, mode="max", factor=0.5, patience=10, min_lr=1e-5
),
)
def data(self, pinned_objs, hparams):
return mol_data(
data_location(), dataset=self.dataset, batch_size=hparams["batch_size"]
)
def hyperparams(self):
return {
"lr": LogUniformHyperParam(0.0001, 0.01, default=0.001),
"batch_size": ChoiceHyperParam([32, 64], default=32),
"wd": LogUniformHyperParam(0.0001, 0.001, default=0.0005),
"dropout": UniformHyperParam(0.0, 0.2, default=0.2),
}
def train(self, model, optimizer, data, extra, iteration: int):
return train(model, optimizer, data, extra.device, self.loss_fn), None
def val(self, model, data, extra, iteration: int):
v_metrics = evaluate(
model,
data,
extra.device,
"valid",
self.evaluator,
self.eval_metric_key,
self.loss_fn,
)
t_metrics = evaluate(
model,
data,
extra.device,
"test",
self.evaluator,
self.eval_metric_key,
self.loss_fn,
)
extra.lr_scheduler.step(v_metrics["valid_metric"])
return {**v_metrics, **t_metrics}, None
def test(self, model, data, extra):
return (
evaluate(
model,
data,
extra.device,
"test",
self.evaluator,
self.eval_metric_key,
self.loss_fn,
),
None,
)
def persist_trial(self, checkpoint_dir, model, optimizer, hparams, extra):
out = {
"model": model.state_dict(),
"opt": optimizer.state_dict(),
"lr_scheduler": extra.lr_scheduler.state_dict(),
"hparams": hparams,
}
torch.save(out, str(checkpoint_dir / "checkpoint.pt"))
def restore_trial(self, checkpoint_dir, map_location=None):
checkpoint = torch.load(
str(checkpoint_dir / "checkpoint.pt"), map_location=map_location
)
hparams = checkpoint["hparams"]
model = self.model(hparams)
model.load_state_dict(checkpoint["model"])
opt = self.optimizer(model, hparams)
opt.load_state_dict(checkpoint["opt"])
extra = self.extra_setup(model, opt, hparams)
extra.lr_scheduler.load_state_dict(checkpoint["lr_scheduler"])
return model, opt, hparams, extra
def final_runs_summaries(self):
return [
TrialCurvePlotter(
[
"train_loss",
"valid_metric",
"test_metric",
"valid_loss",
"test_loss",
],
name="loss_curves",
),
TestMetricSummaries(),
]
class GcnMolConfig(MolConfig):
def model(self, hparams):
return GcnHIVNet(
hidden_dim=self.hidden,
num_graph_layers=NUM_LAYERS,
in_feat_drop=hparams["dropout"],
residual=True,
)
def pretrained(self, model_dir):
return load_pretrained(
self,
dataset_name="hiv",
model_name="gcn",
hidden=self.hidden,
model_dir=model_dir,
pretrained_conf=PRETRAINED_CONF,
)
class GatMolConfig(MolConfig):
def model(self, hparams):
return GatHIVNet(
hidden_dim=self.hidden,
num_graph_layers=NUM_LAYERS,
in_feat_drop=hparams["dropout"],
residual=True,
)
def pretrained(self, model_dir):
return load_pretrained(
self,
dataset_name="hiv",
model_name="gat",
hidden=self.hidden,
model_dir=model_dir,
pretrained_conf=PRETRAINED_CONF,
)
class GinMolConfig(MolConfig):
def model(self, hparams):
return GinHIVNet(
hidden_dim=self.hidden,
num_graph_layers=NUM_LAYERS,
in_feat_drop=hparams["dropout"],
residual=True,
)
def pretrained(self, model_dir):
return load_pretrained(
self,
dataset_name="hiv",
model_name="gin",
hidden=self.hidden,
model_dir=model_dir,
pretrained_conf=PRETRAINED_CONF,
)
class EgcMolConfig(MolConfig):
def __init__(self, dataset, hidden, softmax, num_bases, num_heads, aggrs) -> None:
super().__init__(dataset=dataset, hidden=hidden)
self.softmax = softmax
self.num_bases = num_bases
self.num_heads = num_heads
self.aggrs = aggrs.split(",")
def model(self, hparams):
return EgcHIVNet(
hidden_dim=self.hidden,
num_graph_layers=NUM_LAYERS,
in_feat_drop=hparams["dropout"],
residual=True,
readout="mean",
softmax=self.softmax,
bases=self.num_bases,
heads=self.num_heads,
aggrs=self.aggrs,
)
def pretrained(self, model_dir):
assert not self.softmax
if len(self.aggrs) == 1:
assert "symadd" in self.aggrs
assert self.hidden == 236 and self.num_heads == 4 and self.num_bases == 4
model = "egc_s"
elif len(self.aggrs) == 3:
assert set(self.aggrs).issuperset({"add", "max", "mean"})
assert self.hidden == 224 and self.num_heads == 4 and self.num_bases == 4
model = "egc_m"
else:
raise ValueError
return load_pretrained(
self,
dataset_name="hiv",
model_name=model,
hidden=self.hidden,
model_dir=model_dir,
pretrained_conf=PRETRAINED_CONF,
)
class MpnnMolConfig(MolConfig):
def __init__(self, dataset, hidden, aggr) -> None:
super().__init__(dataset, hidden)
self.aggr = aggr
def model(self, hparams):
return MpnnHIVNet(
hidden_dim=self.hidden,
num_graph_layers=NUM_LAYERS,
in_feat_drop=hparams["dropout"],
residual=True,
aggr=self.aggr,
)
def pretrained(self, model_dir):
return load_pretrained(
self,
dataset_name="hiv",
model_name=f"mpnn_{self.aggr}",
hidden=self.hidden,
model_dir=model_dir,
pretrained_conf=PRETRAINED_CONF,
)
|
43614
|
import angr
######################################
# accept (but not really)
######################################
class accept(angr.SimProcedure):
#pylint:disable=arguments-differ
def run(self, sockfd, addr, addrlen):
conc_addrlen = self.state.mem[addrlen].int.concrete
addr_data = self.state.solver.BVS('accept_addr', conc_addrlen*8, key=('api', 'accept', 'addr'))
self.state.memory.store(addr, addr_data)
ident = 'unknown'
if not sockfd.symbolic:
sockfd = self.state.solver.eval(sockfd)
if sockfd in self.state.posix.fd:
simsockfd = self.state.posix.fd[sockfd]
for potential_ident in self.state.posix.sockets:
if self.state.posix.sockets[potential_ident][0] is simsockfd.read_storage and \
self.state.posix.sockets[potential_ident][1] is simsockfd.write_storage:
ident = potential_ident
break
ident_counters = dict(self.state.globals.get('accept_idents', {}))
ident_counters[ident] = ident_counters.get(ident, 0) + 1
self.state.globals['accept_idents'] = ident_counters
fd = self.state.posix.open_socket(('accept', ident, ident_counters[ident]))
return fd
|
43627
|
import panel as pn
def test_alert():
my_alert = pn.pane.Alert("foo", alert_type="primary")
my_button = pn.widgets.Button(name="Toggle")
def toggle(event):
if my_alert.alert_type == "primary":
my_alert.alert_type == "success"
else:
my_alert.alert_type = "primary"
my_alert.object = my_alert.alert_type
my_button.on_click(toggle)
pn.Row(my_alert, my_button).show()
test_alert()
|
43653
|
import torch
import torch.nn as nn
import argparse
from torch.utils.data import Dataset
import sys
'''
Block of net
'''
def net_block(n_in, n_out):
block = nn.Sequential(nn.Linear(n_in, n_out),
nn.BatchNorm1d(n_out),
nn.ReLU())
return block
class Model(nn.Module):
def __init__(self, n_input, n_hidden, num_class, opt, toplevel=False):
super(Model, self).__init__()
self.opt = opt
self.toplevel = toplevel
self.block1 = net_block(n_input, n_hidden)
self.dropout = nn.Dropout(p=0.1)
if (opt.glove or opt.sift or opt.prefix10m):
#if include skip connection:
#self.block_mid = net_block(n_hidden + n_input, n_hidden)
self.block_mid = net_block(n_hidden, n_hidden)
if toplevel:
self.block2 = net_block(n_hidden, n_hidden)
self.fc1 = nn.Linear(n_hidden, num_class)
self.softmax = nn.Softmax(dim=-1)
for m in self.modules():
if isinstance(m, nn.Linear):
nn.init.xavier_normal_(m.weight)
nn.init.constant_(m.bias, 0.01)
def forward(self, x):
y = self.block1(x)
#y = self.dropout(x1)
if self.opt.glove or self.opt.sift or self.opt.prefix10m:
#if include skip connection:
#y = self.block_mid(torch.cat([x, y], dim=1))
y = self.block_mid(y)
if self.toplevel:
y = self.block2(y)
y = self.dropout(y)
out = self.fc1(y)
out = self.softmax(out)
return out
def get_dataset(data, shuffle, param_batch_size):
X, y = data
dset = torch.utils.data.TensorDataset(X.float(), y)
loader = torch.utils.data.DataLoader(dataset=dset, batch_size=param_batch_size,
shuffle=shuffle)
return loader
def write_results(result, output):
result = (-result.detach().numpy()).argsort(axis=1)
for i in range(result.shape[0]):
output.write(" ".join([str(x) for x in result[i]]) + "\n")
def run(param_feat, param_lr, param_batch_size):
print("RUNNING WITH: features="+str(param_feat)+"; lr="+str(param_lr)+"; batch_size="+str(param_batch_size))
input_dim = 100
# read data
X, y = torch.load('./data/parts64/data.path')
import numpy as np
dataset = np.load('./data/parts64/dataset.npy')
queries = np.load('./data/parts64/queries.npy')
n_data = X.size(0)
split = int(n_data * 0.95)
trainloader = get_dataset((X[:split], y[:split]), shuffle=True, param_batch_size=param_batch_size)
valloader = get_dataset((X[split:], y[split:]), shuffle=False, param_batch_size=param_batch_size)
# build model
m = Model
model = m(input_dim=input_dim, feat_dim=param_feat, num_class=64, args=None).cuda()
# criterion
crit = nn.CrossEntropyLoss().cuda()
# optimizer
# optimizer = torch.optim.RMSprop(model.parameters(), args.lr)
lr = param_lr
optimizer = torch.optim.Adam(model.parameters(), lr=lr, weight_decay=10**(-4))
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[20, 30, 35, 38, 39], gamma=0.1)
# start training!
losses = []
iterations = 40
for ep in range(1, iterations + 1):
print("==="+str(ep)+"===")
loss_sum = 0.
train_acc_tot = 0
train_n_tot = 0
scheduler.step()
for i, (X, y) in enumerate(trainloader):
y_pred = model(X.cuda())
loss = crit(y_pred, y.cuda())
optimizer.zero_grad()
loss.backward()
optimizer.step()
loss_sum += loss.item()
train_acc_tot += (y_pred.argmax(dim=1).cpu() == y).sum().item()
train_n_tot += X.size(0)
print("loss:", loss_sum)
print("train acc:", train_acc_tot*1. / train_n_tot)
losses.append(loss_sum / len(trainloader))
acc_tot = 0
n_tot = 0.
for i, (X, y) in enumerate(valloader):
y_pred = model(X.cuda())
acc_tot += (y_pred.argmax(dim=1).cpu() == y).sum().item()
n_tot += X.size(0)
print("val acc:", acc_tot / n_tot)
print("Doing inference and writing result files...")
# inference on data
batch_size = 10000
param_str = "_".join(sys.argv[1:])
with open("./data/parts64/data_prediction"+param_str+".txt","w") as output:
for b in range(0, n_data, batch_size):
data_batch_results = model(torch.from_numpy(dataset[b:b+batch_size]).float().cuda()).cpu()
write_results(data_batch_results, output)
# inference on queries
query_results = model(torch.from_numpy(queries).float().cuda()).cpu()
with open("./data/parts64/queries_prediction"+param_str+".txt","w") as output:
write_results(query_results, output)
if __name__ == "__main__":
run(int(sys.argv[1]), float(sys.argv[2]), int(sys.argv[3]))
|
43688
|
import copy
import torch as th
from torch.optim import Adam
import torch.nn.functional as F
from torch.nn.utils import clip_grad_norm_
class SACLearner:
def __init__(self, mac, args):
self.args = args
self.mac = mac
self.params = list(mac.parameters())
self.learn_cnt= 0
self.optimiser = Adam(self.params, lr=args.lr)
self.clip_grad_param = 1
# a little wasteful to deepcopy (e.g. duplicates action selector), but should work for any MAC
self.target_mac = copy.deepcopy(mac)
self.target_param = list(self.target_mac.parameters())
self.last_target_update_episode = 0
self.tau = self.args.tau
self.gpu_enable = True
def soft_update(self, local_model , target_model):
for target_param, local_param in zip(target_model.parameters(), local_model.parameters()):
target_param.data.copy_(self.tau*local_param.data + (1.0-self.tau)*target_param.data)
def _update_targets(self):
for target_param, local_param in zip(self.target_mac.parameters(), self.mac.parameters()):
target_param.data.copy_(self.tau*local_param.data + (1.0-self.tau)*target_param.data)
def train(self, batch):
# Get the relevant quantities
obs = batch['obs']
feat = batch['feat']
avail = batch['avail']
action_list = batch['action']
reward_list = batch['reward']
next_obs = batch['next_obs']
next_feat = batch['next_feat']
mask_list = 1 - batch['done']
next_avail = batch['next_avail']
y_list = [0]
obs = th.FloatTensor(obs)
feat = th.FloatTensor(feat)
avail = th.FloatTensor(avail)
a = th.LongTensor(action_list)#batch.action))
rew = th.FloatTensor(reward_list)##batch.reward))
rew = rew.view(-1)#, 1)
mask = th.LongTensor(mask_list)#batch.mask))
mask = mask.view(-1)#, 1)
next_obs = th.FloatTensor(next_obs)
next_feat = th.FloatTensor(next_feat)
next_avail = th.FloatTensor(next_avail)
ind = th.arange(a.shape[0])
if self.gpu_enable:
obs = obs.cuda()
feat = feat.cuda()
avail = avail.cuda()
a = a.cuda()
rew = rew.cuda()
mask = mask.cuda()
next_obs = next_obs.cuda()
next_feat = next_feat.cuda()
next_avail = next_avail.cuda()
ind = ind.cuda()
# Calculate estimated Q-Values
mac_out, _ = self.mac.forward([[obs, feat], avail])
# Pick the Q-Values for the actions taken by each agent
chosen_action_qvals = mac_out[ind, a] # Remove the last dim
target_mac_out = self.target_mac.forward([[next_obs, next_feat], next_avail])[0]
# ---------------------------- update actor ---------------------------- #
current_alpha = copy.deepcopy(self.mac.agent.alpha)
_, action_probs, log_pis = self.mac.get_act_probs([[obs, feat], avail])
q1 = self.mac.agent.critic1([obs, feat])
V1 = (action_probs.cuda() * q1.cuda()).sum(1)
q2 = self.mac.agent.critic2([obs, feat])
V2 = (action_probs.cuda() * q2.cuda()).sum(1)
min_Q = action_probs.cuda() * th.min(q1,q2)
actor_loss = (self.mac.agent.alpha.cuda() * log_pis.cuda() - min_Q).sum(1).mean()
self.mac.agent.actor_optimizer.zero_grad()
actor_loss.backward(retain_graph=True)
self.mac.agent.actor_optimizer.step()
# Compute alpha loss
entropy = (log_pis * action_probs).sum(1)
alpha_loss = - (self.mac.agent.log_alpha.exp() * (entropy.cpu() + self.mac.agent.target_entropy).detach().cpu()).mean()
self.mac.agent.alpha_optimizer.zero_grad()
alpha_loss.backward(retain_graph=True)
self.mac.agent.alpha_optimizer.step()
self.mac.agent.alpha = self.mac.agent.log_alpha.exp().detach()
# ---------------------------- update critic ---------------------------- #
# Get predicted next-state actions and Q values from target models
with th.no_grad():
idx = th.eq(mask, 1)
Q_targets = rew
_, action_probs_next, log_pis_next = self.mac.get_act_probs\
([[next_obs[idx],next_feat[idx]],None])
Q_target1_next = self.mac.agent.critic1_target([next_obs[idx],next_feat[idx]])
Q_target2_next = self.mac.agent.critic2_target([next_obs[idx],next_feat[idx]])
V1_next = (action_probs_next.cuda() * Q_target1_next).sum(1)
V2_next = (action_probs_next.cuda() * Q_target2_next).sum(1)
V_target_next = th.min(V1_next,V2_next) - (self.mac.agent.alpha.cuda()* log_pis_next.cuda()).sum(1)
# Compute Q targets for current states (y_i)
Q_targets[idx] = rew[idx] + (self.args.gamma * V_target_next)
# Compute critic loss
critic1_loss = 0.5 * F.mse_loss(V1, Q_targets)
critic2_loss = 0.5 * F.mse_loss(V2, Q_targets)
# Update critics
# critic 1
self.mac.agent.critic1_optimizer.zero_grad()
critic1_loss.backward(retain_graph=True)
clip_grad_norm_(self.mac.agent.critic1.parameters(), self.clip_grad_param)
self.mac.agent.critic1_optimizer.step()
# critic 2
self.mac.agent.critic2_optimizer.zero_grad()
critic2_loss.backward()
clip_grad_norm_(self.mac.agent.critic2.parameters(), self.clip_grad_param)
self.mac.agent.critic2_optimizer.step()
self.learn_cnt += 1
if self.learn_cnt / self.args.target_update_interval >= 1.0:
self._update_targets()
self.soft_update(self.mac.agent.critic1, self.mac.agent.critic1_target)
self.soft_update(self.mac.agent.critic2, self.mac.agent.critic2_target)
self.learn_cnt = 0
return {
'actor_loss': actor_loss.item(),
'alpha_loss': alpha_loss.item(),
'critic1_loss': critic1_loss.item(),
'critic2_loss': critic2_loss.item()
}
def cuda(self):
self.mac.cuda()
self.target_mac.cuda()
def save_models(self, path):
self.mac.save_models(path)
th.save(self.optimiser.state_dict(), "{}/opt.th".format(path))
def load_models(self, path):
self.mac.load_models(path)
# Not quite right but I don't want to save target networks
self.target_mac.load_models(path)
self.optimiser.load_state_dict(th.load("{}/opt.th".format(path), map_location=lambda storage, loc: storage))
|
43697
|
import numpy as np
import pytest
import pytoolkit as tk
def test_load_voc_od_split(data_dir):
ds = tk.datasets.load_voc_od_split(data_dir / "od", split="train")
assert len(ds) == 3
assert tuple(ds.metadata["class_names"]) == ("~", "〇")
ann = ds.labels[0]
assert ann.path == (data_dir / "od" / "JPEGImages" / "無題.jpg")
assert ann.width == 768
assert ann.height == 614
assert len(ann.classes) == 1
assert ann.classes[0] == 0
assert (ann.difficults == np.array([False])).all()
assert ann.bboxes[0] == pytest.approx(
np.array([203 - 1, 255 - 1, 601 - 1, 355 - 1]) / [768, 614, 768, 614]
)
|
43700
|
try:
from . import generic as g
except BaseException:
import generic as g
class AdjacencyTest(g.unittest.TestCase):
def test_radius(self):
for radius in [0.1, 1.0, 3.1459, 29.20]:
m = g.trimesh.creation.cylinder(
radius=radius, height=radius * 10)
# remove the cylinder cap
signs = (g.np.sign(m.vertices[:, 2]) < 0)[m.faces]
not_cap = ~g.np.logical_or(
signs.all(axis=1), ~signs.any(axis=1))
m.update_faces(not_cap)
# compare the calculated radius
radii = m.face_adjacency_radius
radii = radii[g.np.isfinite(radii)]
assert g.np.allclose(radii, radius, atol=radius / 100)
if __name__ == '__main__':
g.trimesh.util.attach_to_log()
g.unittest.main()
|
43711
|
import datetime
import io
import json
import zipfile
from pathlib import Path
import pyrsistent
import pytest
import yaml
from aiohttp import web
from openapi_core.shortcuts import create_spec
from yarl import URL
from rororo import (
BaseSettings,
get_openapi_context,
get_openapi_schema,
get_openapi_spec,
openapi_context,
OperationTableDef,
setup_openapi,
setup_settings_from_environ,
)
from rororo.annotations import DictStrAny
from rororo.openapi import get_validated_data
from rororo.openapi.exceptions import (
ConfigurationError,
OperationError,
validation_error_context,
ValidationError,
)
ROOT_PATH = Path(__file__).parent
INVALID_OPENAPI_JSON_PATH = ROOT_PATH / "invalid-openapi.json"
INVALID_OPENAPI_YAML_PATH = ROOT_PATH / "invalid-openapi.yaml"
OPENAPI_JSON_PATH = ROOT_PATH / "openapi.json"
OPENAPI_YAML_PATH = ROOT_PATH / "openapi.yaml"
TEST_NESTED_OBJECT = {
"uid": "6fccda1b-0873-4c8a-bceb-a2acfe5851da",
"type": "nested-object",
"data": {
"data_item": {"key": "value1", "any_data": {}},
"data_items": [
{"key": "value2", "any_data": {"two": 2}},
{"key": "value3", "any_data": {"three": 3}},
],
"str_items": ["1", "2", "3"],
},
"any_data": {"key1": "value1", "key2": "value2", "list": [1, 2, 3]},
}
operations = OperationTableDef()
invalid_operations = OperationTableDef()
def custom_json_loader(content: bytes) -> DictStrAny:
return json.load(io.BytesIO(content))
def custom_yaml_loader(content: bytes) -> DictStrAny:
return yaml.load(content, Loader=yaml.SafeLoader)
@invalid_operations.register("does-not-exist")
async def does_not_exist(request: web.Request) -> web.Response:
return web.Response(text="Hello, world!")
@operations.register("create-post")
async def create_post(request: web.Request) -> web.Response:
data = get_validated_data(request)
published_at: datetime.datetime = data["published_at"]
with validation_error_context("body", "published_at"):
if published_at.tzinfo is None:
raise ValidationError(message="Invalid value")
return web.json_response(
{**data, "id": 1, "published_at": data["published_at"].isoformat()},
status=201,
)
@operations.register
async def hello_world(request: web.Request) -> web.Response:
with openapi_context(request) as context:
name = context.parameters.query.get("name") or "world"
email = context.parameters.query.get("email") or "<EMAIL>"
return web.json_response(
{"message": f"Hello, {name}!", "email": email}
)
@operations.register
async def retrieve_any_object_from_request_body(
request: web.Request,
) -> web.Response:
return web.json_response(pyrsistent.thaw(get_validated_data(request)))
@operations.register
async def retrieve_array_from_request_body(
request: web.Request,
) -> web.Response:
with openapi_context(request) as context:
return web.json_response(pyrsistent.thaw(context.data))
@operations.register
async def retrieve_empty(request: web.Request) -> web.Response:
context = get_openapi_context(request)
return web.Response(
status=204, headers={"X-API-Key": context.security.get("apiKey") or ""}
)
@operations.register
async def retrieve_invalid_response(request: web.Request) -> web.Response:
return web.json_response({})
@operations.register
async def retrieve_post(request: web.Request) -> web.Response:
context = get_openapi_context(request)
return web.json_response(
{"id": context.parameters.path["post_id"], "title": "The Post"}
)
@operations.register
async def retrieve_nested_object_from_request_body(
request: web.Request,
) -> web.Response:
with openapi_context(request) as context:
data = pyrsistent.thaw(context.data)
data["uid"] = str(data["uid"])
return web.json_response(
data,
headers={
"X-Data-Type": str(type(context.data)),
"X-Data-Data-Data-Items-Type": str(
type(context.data["data"]["data_items"])
),
"X-Data-Data-Str-Items-Type": str(
type(context.data["data"]["str_items"])
),
"X-Data-UID-Type": str(type(context.data["uid"])),
},
)
@operations.register
async def retrieve_zip(request: web.Request) -> web.Response:
output = io.BytesIO()
with zipfile.ZipFile(output, "w") as handler:
handler.writestr("hello.txt", "Hello, world!")
output.seek(0)
return web.Response(
body=output,
content_type="application/zip",
headers={"Content-Disposition": "attachment; filename=hello.zip"},
)
@operations.register
async def upload_image(request: web.Request) -> web.Response:
return web.Response(
body=get_openapi_context(request).data,
content_type=request.content_type,
status=201,
)
@operations.register
async def upload_text(request: web.Request) -> web.Response:
return web.Response(
text=get_openapi_context(request).data,
content_type=request.content_type,
status=201,
)
@pytest.mark.parametrize("schema_path", (OPENAPI_JSON_PATH, OPENAPI_YAML_PATH))
async def test_any_object_request_body(aiohttp_client, schema_path):
app = setup_openapi(
web.Application(), schema_path, operations, server_url=URL("/api/")
)
client = await aiohttp_client(app)
response = await client.post("/api/any-object", json=TEST_NESTED_OBJECT)
assert response.status == 200
assert await response.json() == TEST_NESTED_OBJECT
@pytest.mark.parametrize(
"data, expected_status, expected_response",
(
(
{},
422,
{"detail": [{"loc": ["body"], "message": "[] is too short"}]},
),
(
[],
422,
{"detail": [{"loc": ["body"], "message": "[] is too short"}]},
),
(
[""],
422,
{"detail": [{"loc": ["body", 0], "message": "'' is too short"}]},
),
(["Hello", "world!"], 200, ["Hello", "world!"]),
),
)
async def test_array_request_body(
aiohttp_client, data, expected_status, expected_response
):
app = setup_openapi(
web.Application(),
OPENAPI_YAML_PATH,
operations,
server_url=URL("/api"),
)
client = await aiohttp_client(app)
response = await client.post("/api/array", json=data)
assert response.status == expected_status
assert await response.json() == expected_response
@pytest.mark.parametrize("schema_path", (OPENAPI_JSON_PATH, OPENAPI_YAML_PATH))
async def test_create_post_201(aiohttp_client, schema_path):
app = setup_openapi(
web.Application(), schema_path, operations, server_url="/api/"
)
published_at = "2020-04-01T12:00:00+02:00"
client = await aiohttp_client(app)
response = await client.post(
"/api/create-post",
json={
"title": "Post",
"slug": "post",
"content": "Post Content",
"published_at": published_at,
},
)
assert response.status == 201
assert await response.json() == {
"id": 1,
"title": "Post",
"slug": "post",
"content": "Post Content",
"published_at": published_at,
}
@pytest.mark.parametrize(
"schema_path, invalid_data, expected_detail",
(
(
OPENAPI_JSON_PATH,
{},
[
{"loc": ["body", "title"], "message": "Field required"},
{"loc": ["body", "slug"], "message": "Field required"},
{"loc": ["body", "content"], "message": "Field required"},
{"loc": ["body", "published_at"], "message": "Field required"},
],
),
(
OPENAPI_YAML_PATH,
{"title": "Title"},
[
{"loc": ["body", "slug"], "message": "Field required"},
{"loc": ["body", "content"], "message": "Field required"},
{"loc": ["body", "published_at"], "message": "Field required"},
],
),
(
OPENAPI_JSON_PATH,
{"title": "Title", "slug": "slug"},
[
{"loc": ["body", "content"], "message": "Field required"},
{"loc": ["body", "published_at"], "message": "Field required"},
],
),
(
OPENAPI_YAML_PATH,
{"title": "Title", "slug": "slug", "content": "Content"},
[{"loc": ["body", "published_at"], "message": "Field required"}],
),
),
)
async def test_create_post_422(
aiohttp_client, schema_path, invalid_data, expected_detail
):
app = setup_openapi(
web.Application(),
schema_path,
operations,
server_url=URL("/dev-api"),
)
client = await aiohttp_client(app)
response = await client.post("/dev-api/create-post", json=invalid_data)
assert response.status == 422
assert (await response.json())["detail"] == expected_detail
@pytest.mark.parametrize(
"schema_path, schema_loader",
(
(OPENAPI_JSON_PATH, custom_json_loader),
(OPENAPI_YAML_PATH, custom_yaml_loader),
),
)
def test_custom_schema_loader(schema_path, schema_loader):
app = setup_openapi(
web.Application(),
schema_path,
operations,
server_url="/api/",
schema_loader=schema_loader,
)
assert isinstance(get_openapi_schema(app), dict)
@pytest.mark.parametrize("schema_path", (OPENAPI_JSON_PATH, OPENAPI_YAML_PATH))
async def test_email_format(aiohttp_client, schema_path):
app = setup_openapi(
web.Application(), schema_path, operations, server_url="/api/"
)
client = await aiohttp_client(app)
response = await client.get(
"/api/hello", params={"email": "<EMAIL>"}
)
assert response.status == 200
assert (await response.json())["email"] == "<EMAIL>"
@pytest.mark.parametrize("schema_path", (OPENAPI_JSON_PATH, OPENAPI_YAML_PATH))
async def test_invalid_parameter_format(aiohttp_client, schema_path):
app = setup_openapi(
web.Application(), schema_path, operations, server_url="/api/"
)
client = await aiohttp_client(app)
response = await client.get("/api/posts/not-an-integer")
assert response.status == 422
assert await response.json() == {
"detail": [
{
"loc": ["parameters", "post_id"],
"message": "'not-an-integer' is not a type of 'integer'",
}
]
}
@pytest.mark.parametrize("schema_path", (OPENAPI_JSON_PATH, OPENAPI_YAML_PATH))
async def test_invalid_parameter_value(aiohttp_client, schema_path):
app = setup_openapi(
web.Application(), schema_path, operations, server_url="/api/"
)
client = await aiohttp_client(app)
response = await client.get("/api/posts/0")
assert response.status == 422
assert await response.json() == {
"detail": [
{
"loc": ["parameters", "post_id"],
"message": "0 is less than the minimum of 1",
}
]
}
def test_get_openapi_schema_no_schema():
with pytest.raises(ConfigurationError):
get_openapi_schema(web.Application())
def test_get_openapi_spec_no_spec():
with pytest.raises(ConfigurationError):
get_openapi_spec(web.Application())
@pytest.mark.parametrize("schema_path", (OPENAPI_JSON_PATH, OPENAPI_YAML_PATH))
async def test_multiple_request_errors(aiohttp_client, schema_path):
app = setup_openapi(
web.Application(), schema_path, operations, server_url="/api/"
)
client = await aiohttp_client(app)
response = await client.get("/api/hello?name=&email=")
assert response.status == 422
assert await response.json() == {
"detail": [
{
"loc": ["parameters", "name"],
"message": "Empty parameter value",
},
{
"loc": ["parameters", "email"],
"message": "Empty parameter value",
},
]
}
@pytest.mark.parametrize(
"schema_path, query_string, expected_message",
(
(OPENAPI_JSON_PATH, None, "Hello, world!"),
(OPENAPI_JSON_PATH, "?name=Name", "Hello, Name!"),
(str(OPENAPI_JSON_PATH), None, "Hello, world!"),
(str(OPENAPI_JSON_PATH), "?name=Name", "Hello, Name!"),
(OPENAPI_YAML_PATH, None, "Hello, world!"),
(OPENAPI_YAML_PATH, "?name=Name", "Hello, Name!"),
(str(OPENAPI_YAML_PATH), None, "Hello, world!"),
(str(OPENAPI_YAML_PATH), "?name=Name", "Hello, Name!"),
),
)
async def test_openapi(
aiohttp_client, schema_path, query_string, expected_message
):
app = setup_openapi(
web.Application(), schema_path, operations, server_url="/api"
)
client = await aiohttp_client(app)
url = "/api/hello"
response = await client.get(
f"{url}{query_string}" if query_string is not None else url
)
assert response.status == 200
assert (await response.json())["message"] == expected_message
@pytest.mark.parametrize("is_enabled", (False, True))
async def test_openapi_validate_response(aiohttp_client, is_enabled):
app = web.Application()
setup_openapi(
app,
OPENAPI_YAML_PATH,
operations,
server_url="/api",
is_validate_response=is_enabled,
)
client = await aiohttp_client(app)
response = await client.get("/api/hello")
assert response.status == 200
assert await response.json() == {
"message": "Hello, world!",
"email": "<EMAIL>",
}
@pytest.mark.parametrize(
"has_openapi_schema_handler, url, expected_status",
(
(True, "/api/openapi.json", 200),
(False, "/api/openapi.yaml", 404),
(True, "/api/openapi.yaml", 200),
(False, "/api/openapi.yaml", 404),
(True, "/api/openapi.txt", 500),
(False, "/api/openapi.txt", 404),
),
)
async def test_openapi_schema_handler(
aiohttp_client, has_openapi_schema_handler, url, expected_status
):
app = web.Application()
setup_openapi(
app,
OPENAPI_YAML_PATH,
operations,
server_url=URL("/api"),
has_openapi_schema_handler=has_openapi_schema_handler,
)
client = await aiohttp_client(app)
response = await client.get(url)
assert response.status == expected_status
@pytest.mark.parametrize(
"schema_path, headers, expected",
(
(OPENAPI_JSON_PATH, {}, ""),
(OPENAPI_JSON_PATH, {"X-API-Key": "apiKey"}, "apiKey"),
(OPENAPI_YAML_PATH, {}, ""),
(OPENAPI_YAML_PATH, {"X-API-Key": "apiKey"}, "apiKey"),
),
)
async def test_optional_security_scheme(
aiohttp_client, schema_path, headers, expected
):
app = setup_openapi(
web.Application(), schema_path, operations, server_url="/api/"
)
client = await aiohttp_client(app)
response = await client.get("/api/empty", headers=headers)
assert response.status == 204
assert response.headers["X-API-Key"] == expected
@pytest.mark.parametrize("schema_path", (OPENAPI_JSON_PATH, OPENAPI_YAML_PATH))
async def test_request_body_nested_object(aiohttp_client, schema_path):
app = setup_openapi(
web.Application(), schema_path, operations, server_url="/api/"
)
client = await aiohttp_client(app)
response = await client.post("/api/nested-object", json=TEST_NESTED_OBJECT)
assert response.status == 200
assert response.headers["X-Data-Type"] == "<class 'pyrsistent._pmap.PMap'>"
assert (
response.headers["X-Data-Data-Data-Items-Type"]
== "<class 'pvectorc.PVector'>"
)
assert (
response.headers["X-Data-Data-Str-Items-Type"]
== "<class 'pvectorc.PVector'>"
)
assert response.headers["X-Data-UID-Type"] == "<class 'uuid.UUID'>"
assert await response.json() == TEST_NESTED_OBJECT
@pytest.mark.parametrize(
"schema_path, loader",
(
(OPENAPI_JSON_PATH, custom_json_loader),
(OPENAPI_YAML_PATH, custom_yaml_loader),
),
)
async def test_setup_openapi_schema_and_spec(
aiohttp_client, schema_path, loader
):
schema = loader(schema_path.read_bytes())
spec = create_spec(schema)
app = setup_openapi(
web.Application(),
operations,
schema=schema,
spec=spec,
server_url="/api/",
)
client = await aiohttp_client(app)
response = await client.get("/api/hello")
assert response.status == 200
assert await response.json() == {
"message": "Hello, world!",
"email": "<EMAIL>",
}
@pytest.mark.parametrize(
"schema_path, loader",
(
(OPENAPI_JSON_PATH, custom_json_loader),
(OPENAPI_YAML_PATH, custom_yaml_loader),
),
)
async def test_setup_openapi_schema_and_path_ignore_invalid_schema_path(
aiohttp_client, schema_path, loader
):
schema = loader(schema_path.read_bytes())
spec = create_spec(schema)
setup_openapi(
web.Application(),
INVALID_OPENAPI_JSON_PATH,
operations,
schema=schema,
spec=spec,
server_url="/api/",
)
@pytest.mark.parametrize("schema_path", (OPENAPI_JSON_PATH, OPENAPI_YAML_PATH))
def test_setup_openapi_invalid_operation(schema_path):
with pytest.raises(OperationError):
setup_openapi(
web.Application(),
schema_path,
invalid_operations,
server_url="/api",
)
def test_setup_openapi_invalid_path():
with pytest.raises(ConfigurationError):
setup_openapi(
web.Application(), ROOT_PATH / "does-not-exist.yaml", operations
)
def test_setup_openapi_invalid_file():
with pytest.raises(ConfigurationError):
setup_openapi(web.Application(), ROOT_PATH / "settings.py", operations)
@pytest.mark.parametrize(
"schema_path", (INVALID_OPENAPI_JSON_PATH, INVALID_OPENAPI_YAML_PATH)
)
def test_setup_openapi_invalid_spec(schema_path):
with pytest.raises(ConfigurationError):
setup_openapi(web.Application(), schema_path, operations)
@pytest.mark.parametrize(
"schema_path, level, url, expected_status",
(
(OPENAPI_JSON_PATH, "test", "/api/hello", 200),
(OPENAPI_JSON_PATH, "test", "/dev-api/hello", 404),
(OPENAPI_YAML_PATH, "test", "/api/hello", 200),
(OPENAPI_YAML_PATH, "test", "/dev-api/hello", 404),
(OPENAPI_JSON_PATH, "dev", "/api/hello", 404),
(OPENAPI_JSON_PATH, "dev", "/dev-api/hello", 200),
(OPENAPI_YAML_PATH, "dev", "/api/hello", 404),
(OPENAPI_YAML_PATH, "dev", "/dev-api/hello", 200),
),
)
async def test_setup_openapi_server_url_from_settings(
monkeypatch, aiohttp_client, schema_path, level, url, expected_status
):
monkeypatch.setenv("LEVEL", level)
app = setup_openapi(
setup_settings_from_environ(web.Application(), BaseSettings),
schema_path,
operations,
)
client = await aiohttp_client(app)
response = await client.get(url)
assert response.status == expected_status
@pytest.mark.parametrize("schema_path", (OPENAPI_JSON_PATH, OPENAPI_YAML_PATH))
def test_setup_openapi_server_url_invalid_level(monkeypatch, schema_path):
monkeypatch.setenv("LEVEL", "prod")
with pytest.raises(ConfigurationError):
setup_openapi(
setup_settings_from_environ(web.Application(), BaseSettings),
schema_path,
operations,
)
@pytest.mark.parametrize("schema_path", (OPENAPI_JSON_PATH, OPENAPI_YAML_PATH))
def test_setup_openapi_server_url_does_not_set(schema_path):
with pytest.raises(ConfigurationError):
setup_openapi(web.Application(), schema_path, operations)
@pytest.mark.parametrize("schema_path", (OPENAPI_JSON_PATH, OPENAPI_YAML_PATH))
async def test_upload_image(aiohttp_client, schema_path):
blank_png = (Path(__file__).parent / "data" / "blank.png").read_bytes()
app = setup_openapi(
web.Application(), schema_path, operations, server_url="/api"
)
client = await aiohttp_client(app)
response = await client.post(
"/api/upload-image",
data=blank_png,
headers={"Content-Type": "image/png"},
)
assert response.status == 201
assert await response.read() == blank_png
@pytest.mark.parametrize("schema_path", (OPENAPI_JSON_PATH, OPENAPI_YAML_PATH))
async def test_upload_text(aiohttp_client, schema_path):
text = "Hello, world! And other things..."
app = setup_openapi(
web.Application(), schema_path, operations, server_url="/api"
)
client = await aiohttp_client(app)
response = await client.post(
"/api/upload-text",
data=text.encode("utf-8"),
headers={"Content-Type": "text/plain"},
)
assert response.status == 201
assert await response.text() == text
@pytest.mark.parametrize("schema_path", (OPENAPI_JSON_PATH, OPENAPI_YAML_PATH))
async def test_validate_binary_response(aiohttp_client, schema_path):
app = setup_openapi(
web.Application(),
schema_path,
operations,
server_url="/api",
is_validate_response=True,
)
client = await aiohttp_client(app)
response = await client.get("/api/download.zip")
assert response.status == 200
assert response.content_type == "application/zip"
content = io.BytesIO(await response.read())
with zipfile.ZipFile(content) as handler:
with handler.open("hello.txt") as item:
assert item.read() == b"Hello, world!"
@pytest.mark.parametrize("schema_path", (OPENAPI_JSON_PATH, OPENAPI_YAML_PATH))
async def test_validate_empty_response(aiohttp_client, schema_path):
app = setup_openapi(
web.Application(),
schema_path,
operations,
server_url="/api",
is_validate_response=True,
)
client = await aiohttp_client(app)
response = await client.get("/api/empty")
assert response.status == 204
@pytest.mark.parametrize(
"schema_path, is_validate_response, expected_status",
(
(OPENAPI_JSON_PATH, False, 200),
(OPENAPI_JSON_PATH, True, 422),
(OPENAPI_YAML_PATH, False, 200),
(OPENAPI_JSON_PATH, True, 422),
),
)
async def test_validate_response(
aiohttp_client, schema_path, is_validate_response, expected_status
):
app = setup_openapi(
web.Application(),
schema_path,
operations,
server_url="/api",
is_validate_response=is_validate_response,
)
client = await aiohttp_client(app)
response = await client.get("/api/invalid-response")
assert response.status == expected_status
@pytest.mark.parametrize("schema_path", (OPENAPI_JSON_PATH, OPENAPI_YAML_PATH))
async def test_validate_response_error(aiohttp_client, schema_path):
app = setup_openapi(
web.Application(),
schema_path,
operations,
server_url="/api",
is_validate_response=True,
)
client = await aiohttp_client(app)
response = await client.get("/api/invalid-response")
assert response.status == 422
assert await response.json() == {
"detail": [
{"loc": ["response", "uid"], "message": "Field required"},
{"loc": ["response", "type"], "message": "Field required"},
{"loc": ["response", "data"], "message": "Field required"},
{"loc": ["response", "any_data"], "message": "Field required"},
]
}
|
43714
|
import gensim
import sys
import glob
import codecs
from nltk.tokenize import RegexpTokenizer
import glob
import sys
class CorpusReader():
"""
Reads corpus from gzip file.
"""
def __init__(self, files):
if isinstance(files, str):
self.files = [files]
else:
self.files = files
self.tokenizer = RegexpTokenizer(r'\w+')
def __iter__(self):
"""
Generator that returns a list of tokens for each sentence.
:return: list of tokens
"""
for f in self.files:
print "Processing ", f
for line in open(f, "r"):
try:
yield self.tokenizer.tokenize(line.decode("utf-8"))
except:
pass
print "Starting W2V training..."
files = glob.glob(sys.argv[1])
outfile_name = sys.argv[2]
dataset = CorpusReader(files)
model = gensim.models.Word2Vec(dataset, size=500, window=5, min_count=3, negative=5, workers=15)
model.save(outfile_name)
|
43792
|
from bokeh.models.sources import ColumnDataSource
from bokeh.transform import cumsum
from functools import partial
from typing import List, Type
from jira_analysis.chart.base import Axis, IChart, Chart
from jira_analysis.defect_rate.issue import Issue
from .plot.donut import DefectRateDonut
def generate_defect_chart(
issues: List[Issue], chart_class: Type[IChart] = Chart
) -> None:
chart = chart_class(
label=None,
x=Axis(label="", values=None, size=600),
y=Axis(label="", values=None, size=300),
tooltips="@value: @defect_rate{0.1f}%",
)
DefectRateDonut(
issues=issues,
data_source=ColumnDataSource,
no_defects_transform=partial(cumsum, include_zero=True),
defects_transform=cumsum,
).draw(chart)
chart.render()
|
43799
|
import soundfile as sf
import torch
import torch.nn.functional as F
from fairseq.data import Dictionary
from bol.utils.helper_functions import move_to_cuda
def get_feature(filepath):
def postprocess(feats, sample_rate):
if feats.dim == 2:
feats = feats.mean(-1)
assert feats.dim() == 1, feats.dim()
with torch.no_grad():
feats = F.layer_norm(feats, feats.shape)
return feats
wav, sample_rate = sf.read(filepath)
feats = torch.from_numpy(wav).float()
feats = postprocess(feats, sample_rate)
return feats
def post_process(sentence: str, symbol: str):
if symbol == "sentencepiece":
sentence = sentence.replace(" ", "").replace("\u2581", " ").strip()
elif symbol == "wordpiece":
sentence = sentence.replace(" ", "").replace("_", " ").strip()
elif symbol == "letter":
sentence = sentence.replace(" ", "").replace("|", " ").strip()
elif symbol == "_EOW":
sentence = sentence.replace(" ", "").replace("_EOW", " ").strip()
elif symbol is not None and symbol != "none":
sentence = (sentence + " ").replace(symbol, "").rstrip()
return sentence
def get_results_for_single_file(
wav_path, dict_path, generator, model, use_cuda=False, half=None
):
sample = dict()
net_input = dict()
target_dict = Dictionary.load(dict_path)
feature = get_feature(wav_path)
model.eval()
if half:
net_input["source"] = feature.unsqueeze(0).half()
else:
net_input["source"] = feature.unsqueeze(0)
padding_mask = (
torch.BoolTensor(net_input["source"].size(1)).fill_(False).unsqueeze(0)
)
net_input["padding_mask"] = padding_mask
sample["net_input"] = net_input
sample = move_to_cuda(sample) if use_cuda else sample
with torch.no_grad():
hypo = generator.generate(model, sample, prefix_tokens=None)
hyp_pieces = target_dict.string(hypo[0][0]["tokens"].int().cpu())
text = post_process(hyp_pieces, "letter")
return text
|
43844
|
from datetime import datetime
from getopt import GetoptError, getopt
from socket import AF_INET, SOCK_STREAM, socket as Socket
from sys import stderr
from typing import List, NoReturn
from ..io import DVRIPClient
from ..message import EPOCH
from . import EX_USAGE, guard, prog_connect
def usage() -> NoReturn:
print('Usage: {} log [-s START] [-e END]'.format(prog_connect()),
file=stderr)
exit(EX_USAGE)
def run(host: str,
serv: int,
username: str,
password: str,
args: List[str]
) -> None:
try:
opts, args = getopt(args, 's:e:')
except GetoptError:
usage()
if args:
usage()
start = EPOCH
end = datetime.now()
for opt, arg in opts:
if opt == '-s':
from dateparser import parse # type: ignore
start = parse(arg)
if start is None:
usage()
if opt == '-e':
from dateparser import parse # type: ignore
end = parse(arg)
if end is None:
usage()
conn = DVRIPClient(Socket(AF_INET, SOCK_STREAM))
conn.connect((host, serv), username, password)
try:
for entry in conn.log(start=start, end=end):
print('{:>8} {} {:>12} {}'
.format(entry.number,
entry.time.isoformat(),
entry.type.name.lower(),
entry.data))
finally:
conn.logout()
def main() -> None:
from sys import argv
from . import host, serv, username, password
if host() is None:
usage()
guard(run, host(), serv(), username(), password(), argv[1:])
|
43858
|
import torch.utils.data as data
import os
import numpy as np
import cv2
#/mnt/lustre/share/dingmingyu/new_list_lane.txt
class MyDataset(data.Dataset):
def __init__(self, file, dir_path, new_width, new_height, label_width, label_height):
imgs = []
fw = open(file, 'r')
lines = fw.readlines()
for line in lines:
words = line.strip().split()
imgs.append((words[0], words[1]))
self.imgs = imgs
self.dir_path = dir_path
self.height = new_height
self.width = new_width
self.label_height = label_height
self.label_width = label_width
def __getitem__(self, index):
path, label = self.imgs[index]
path = os.path.join(self.dir_path, path)
img = cv2.imread(path).astype(np.float32)
img = img[:,:,:3]
img = cv2.resize(img, (self.width, self.height))
img -= [104, 117, 123]
img = img.transpose(2, 0, 1)
gt = cv2.imread(label,-1)
gt = cv2.resize(gt, (self.label_width, self.label_height), interpolation = cv2.INTER_NEAREST)
if len(gt.shape) == 3:
gt = gt[:,:,0]
gt_num_list = list(np.unique(gt))
gt_num_list.remove(0)
target_ins = np.zeros((4, gt.shape[0],gt.shape[1])).astype('uint8')
for index, ins in enumerate(gt_num_list):
target_ins[index,:,:] += (gt==ins)
return img, target_ins, len(gt_num_list)
def __len__(self):
return len(self.imgs)
|
43859
|
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="workdown",
version="0.0.4",
author="<NAME>",
author_email="<EMAIL>",
description="Write Markdown and have it published and hosted on Cloudflare Workers",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/eldridgea/workdown",
packages=setuptools.find_packages(),
install_requires=['markdown'],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
entry_points={
'console_scripts': [
'workdown = workdown.workdown:main'
]
},
)
|
43873
|
from ChipseqReport import *
import xml.etree.ElementTree
import Glam2
def computeMastCurve(evalues):
'''compute a MAST curve.
see http://www.nature.com/nbt/journal/v26/n12/extref/nbt.1508-S1.pdf
returns a tuple of arrays (evalues, with_motifs, explained )
'''
if len(evalues) == 0:
raise ValueError("no data")
mi, ma = math.floor(min(evalues)), math.ceil(max(evalues))
if mi == ma:
raise ValueError("not enough data")
hist, bin_edges = numpy.histogram(evalues, bins=numpy.arange(mi, ma, 1.0))
with_motifs = numpy.cumsum(hist)
explained = numpy.array(with_motifs)
for x, evalue in enumerate(bin_edges[:-1]):
explained[x] -= evalue
explained[explained < 0] = 0
return bin_edges[:-1], with_motifs, explained
def getFDR(samples, control, num_bins=1000):
'''return the score cutoff at a certain FDR threshold using scores and
control scores. Note that this method assumes that a higher score
is a better result.
The FDR is defined as fdr = expected number of false positives
(FP) / number of positives (P)
Given a certain score threshold s , the following will be used as
approximations:
FP: the number of controls with a score of less than or equal to
s. These are all assumed to be false positives. Both samples
and control should contain rougly equal number of entries, but
FP is scaled to be equivalent to P.
P: the number of samples with a score of less than or equal to
s. These are a mixture of both true and false positives.
returns the score cutoff at FDR threshold.
'''
if len(samples) == 0 or len(control) == 0:
return None, None
mi1, ma1 = min(samples), max(samples)
mi2, ma2 = min(control), max(control)
mi, ma = min(mi1, mi2), max(ma1, ma2)
hist_samples, bin_edges_samples = numpy.histogram(
samples, range=(mi, ma), bins=num_bins)
hist_control, bin_edges_control = numpy.histogram(
control, range=(mi, ma), bins=num_bins)
hist_samples = hist_samples[::-1].cumsum()
hist_control = hist_control[::-1].cumsum()
bin_edges = bin_edges_samples[::-1]
# correct for unequal size in the two sets
correction = float(len(samples)) / len(control)
fdrs = []
m = 0
for s, p, fp in zip(bin_edges[:-1], hist_samples, hist_control):
if p != 0:
fdr = min(1.0, correction * float(fp) / p)
m = max(m, fdr)
fdrs.append(m)
return bin_edges[:-1][::-1], fdrs[::-1]
def getMastEvalueCutoff(evalues, control_evalues, fdr=0.1):
'''return the E-Value cutoff at a certain FDR threshold
using the control tracks.
returns the evalue cutoff at FDR threshold.
'''
bin_edges, fdrs = getFDR(
[-x for x in evalues], [-x for x in control_evalues])
for bin, f in zip(bin_edges, fdrs):
if f < fdr:
return -bin
return 0
def getGlamScoreCutoff(scores, controls, fdr=0.1):
'''return the score cutoff at a certain FDR threshold
using the control tracks.
1000 is returned as a score cutoff <infinity> if the
fdr can not be obtained.
returns the score cutoff at FDR threshold.
'''
bin_edges, fdrs = getFDR(scores, controls)
for bin, f in zip(bin_edges, fdrs):
if f < fdr:
return bin
return 1000
##########################################################################
##########################################################################
##########################################################################
# Base class for mast analysis
##########################################################################
class Mast(DefaultTracker):
pattern = "(.*)_mast$"
def getSlices(self, subset=None):
if subset:
return subset
if MOTIFS:
return MOTIFS
return self.getValues("SELECT DISTINCT motif FROM motif_info")
##########################################################################
##########################################################################
##########################################################################
##
##########################################################################
class MastFDR(Mast):
"""return arrays of glam2scan scores
"""
mPattern = "_mast$"
def __call__(self, track, slice=None):
evalues = self.getValues(
"SELECT -evalue FROM %(track)s_mast WHERE motif = '%(slice)s'" % locals())
control_evalues = self.getValues(
"SELECT -min_evalue FROM %(track)s_mast WHERE motif = '%(slice)s'" % locals())
bin_edges, fdrs = getFDR(evalues, control_evalues)
if bin_edges is None:
return odict()
bin_edges = [-x for x in bin_edges]
print(len(bin_edges), len(fdrs))
return odict((("score", bin_edges),
("fdr", fdrs)))
##########################################################################
##########################################################################
##########################################################################
# Annotation of bases with SNPs
##########################################################################
class MastSummary(Mast):
"""return summary of mast results.
Return for each track the number of intervals in total,
the number of intervals submitted to mast,
The evalue used as a MAST curve cutoff, the number and % explained using the Mast cutoff.
"""
mEvalueCutoff = 1
mFDR = 0.1
def __call__(self, track, slice=None):
data = []
nintervals = self.getValue(
"SELECT COUNT(*) FROM %(track)s_intervals" % locals())
data.append(("nintervals", nintervals))
data.append(("nmast", self.getValue(
"SELECT COUNT(*) FROM %(track)s_mast WHERE motif = '%(slice)s'" % locals())))
evalues = self.getValues(
"SELECT evalue FROM %(track)s_mast WHERE motif = '%(slice)s'" % locals())
if len(evalues) <= 1:
return odict()
try:
bin_edges, with_motifs, explained = computeMastCurve(evalues)
except ValueError as msg:
return odict((("msg", msg),))
if len(explained) == 0:
return odict((("msg", "no data"), ))
am = numpy.argmax(explained)
evalue = bin_edges[am]
intervals_and_peakvals = \
self.get("""
SELECT peakval, evalue
FROM %(track)s_mast AS m, %(track)s_intervals AS i
WHERE i.interval_id = m.id AND motif = '%(slice)s'
ORDER BY peakval""" % locals())
intervals_with_motifs = len(
[x for x in intervals_and_peakvals if x[1] <= evalue])
ntop = nintervals / 4
top25_with_motifs = len(
[x for x in intervals_and_peakvals[-ntop:] if x[1] <= evalue])
bottom25_with_motifs = len(
[x for x in intervals_and_peakvals[:ntop] if x[1] <= evalue])
data.append(("MC-Evalue", bin_edges[am]))
data.append(("MC-explained", explained[am]))
data.append(
("MC-explained / %", "%5.2f" % (100.0 * explained[am] / nintervals)))
data.append(("MC-with-motif", intervals_with_motifs))
data.append(("MC-with-motif / %", "%5.2f" %
(100.0 * intervals_with_motifs / nintervals)))
data.append(("MC-top25-with-motif", top25_with_motifs))
if ntop == 0:
ntop = 1
data.append(("MC-top25-with-motif / %", "%5.2f" %
(100.0 * top25_with_motifs / ntop)))
data.append(("MC-bottom25-with-motif", bottom25_with_motifs))
data.append(("MC-bottom25-with-motif / %", "%5.2f" %
(100.0 * bottom25_with_motifs / ntop)))
# use control intervals to compute FDR
control_evalues = self.getValues(
"SELECT min_evalue FROM %(track)s_mast WHERE motif = '%(slice)s'" % locals())
evalue_cutoff = getMastEvalueCutoff(
evalues, control_evalues, self.mFDR)
nexplained = len([x for x in evalues if x <= evalue_cutoff])
data.append(("FDR-Evalue", evalue_cutoff))
data.append(("FDR-explained", nexplained))
data.append(
("FDR-explained / %", "%5.2f" % (100.0 * nexplained / nintervals)))
# use a pre-defined E-value threshold
threshold = self.mEvalueCutoff
data.append(("Evalue", self.mEvalueCutoff))
n = self.getValue(
"SELECT COUNT(*) FROM %(track)s_mast WHERE motif = '%(slice)s' AND evalue <= %(threshold)f" % locals())
data.append(("threshold-explained", n))
data.append(
("threshold-explained / %", "%5.2f" % (100.0 * n / nintervals)))
# use no threshold (nmatches > 1)
n = self.getValue(
"SELECT COUNT(*) FROM %(track)s_mast WHERE motif = '%(slice)s' AND nmatches > 0" % locals())
data.append(("nmatches-explained", n))
data.append(
("nmatches-explained / %", "%5.2f" % (100.0 * n / nintervals)))
intervals_and_peakvals = \
self.get("""
SELECT peakval, nmatches
FROM %(track)s_mast AS m, %(track)s_intervals AS i
WHERE i.interval_id = m.id AND motif = '%(slice)s'
ORDER BY peakval""" % locals())
intervals_with_motifs = len(
[x for x in intervals_and_peakvals if x[1] > 0])
ntop = nintervals / 4
top25_with_motifs = len(
[x for x in intervals_and_peakvals[-ntop:] if x[1] > 0])
bottom25_with_motifs = len(
[x for x in intervals_and_peakvals[:ntop] if x[1] > 0])
if ntop == 0:
ntop = 1
data.append(("nmatches-top25-with-motif", top25_with_motifs))
data.append(("nmatches-top25-with-motif / %", "%5.2f" %
(100.0 * top25_with_motifs / ntop)))
data.append(("nmatches-bottom25-with-motif", bottom25_with_motifs))
data.append(("nmatches-bottom25-with-motif / %", "%5.2f" %
(100.0 * bottom25_with_motifs / ntop)))
return odict(data)
class MastNumberOfMotifs(Mast):
'''number of motifs matching within intervals.'''
def __call__(self, track, slice=None):
data = self.getValues(
"SELECT nmatches FROM %(track)s_mast WHERE motif = '%(slice)s'" % locals())
return odict((("nmatches", data), ))
class MastAllCorrelations(Mast):
'''correlating all measures.'''
def __call__(self, track, slice=None):
field = "length"
data = self.get("""SELECT m.evalue, m.nmatches, i.length, i.peakval, i.avgval
FROM %(track)s_mast as m, %(track)s_intervals as i
WHERE i.interval_id = m.id AND motif = '%(slice)s'
ORDER BY i.%(field)s DESC"""
% locals())
return odict(list(zip(("evalue", "nmatches", "length", "peakval", "avgval"),
list(zip(*data)))))
class MastPairwiseCorrelation(Mast):
'''base class for correlating two measures.'''
def __call__(self, track, slice=None):
field1 = self.mField1
field2 = self.mField2
data = self.get("""SELECT %(field1)s as a, %(field2)s AS b
FROM %(track)s_mast as m, %(track)s_intervals as i
WHERE i.interval_id = m.id AND motif = '%(slice)s'"""
% locals())
return odict(list(zip((field1, field2), list(zip(*data)))))
class MastEvalueVersusLength(MastPairwiseCorrelation):
'''correlate evalue with interval length.'''
mField1 = "evalue"
mField2 = "i.length"
class MastEvalueVersusNumberOfMatches(MastPairwiseCorrelation):
'''correlate evalue with number of motifs found.'''
mField1 = "evalue"
mField2 = "nmatches"
class MastEvalueVersusPeakVal(MastPairwiseCorrelation):
'''correlate evalue with peak value.'''
mField2 = "evalue"
mField1 = "peakval"
class MastNMatchesVersusPeakVal(MastPairwiseCorrelation):
'''correlate evalue with peak value.'''
mField2 = "nmatches"
mField1 = "peakval"
class MastPeakValPerNMatches(MastPairwiseCorrelation):
'''correlate evalue with peak value.'''
mField1 = "nmatches"
mField2 = "peakval"
def __call__(self, track, slice=None):
data = MastPairwiseCorrelation.__call__(self, track, slice)
n = odict()
for x in sorted(data["nmatches"]):
n[x] = []
for nmatches, peakval in sorted(
zip(data["nmatches"], data["peakval"])):
n[nmatches].append(peakval)
return odict(n)
class MastMotifLocation(Mast):
'''plot median position of motifs versus the peak location.'''
def __call__(self, track, slice=None):
data = self.getValues(
"""SELECT (i.peakcenter - (m.start + (m.end - m.start) / 2)) / ((CAST(i.length AS FLOAT) - (m.end - m.start)) / 2)
FROM %(track)s_mast as m, %(track)s_intervals as i
WHERE i.interval_id = m.id AND motif = '%(slice)s'
AND m.nmatches = 1"""
% locals())
return odict((("distance", data),))
class MastMotifLocationMiddle(Mast):
'''plot median position of motifs versus the center of the interval.'''
def __call__(self, track, slice=None):
# difference between
# middle of interval: i.start + i.length / 2
# middle of motif: m.start + (m.end - m.start) / 2
# divide by (intervalsize - motifsize) / 2
#
# only take single matches (multiple matches need not be centered)
data = self.getValues(
"""SELECT ((i.start + i.length / 2) - (m.start + (m.end - m.start) / 2))
/ ((CAST(i.length AS FLOAT) - (m.end - m.start))/2)
FROM %(track)s_mast as m, %(track)s_intervals as i
WHERE i.interval_id = m.id AND motif = '%(slice)s'
AND m.nmatches = 1"""
% locals())
return odict((("distance", data),))
class MastControlLocationMiddle(Mast):
'''plot median position of controls versus the center of the interval.'''
def __call__(self, track, slice=None):
data1 = self.getValues(
"""SELECT ( (m.r_length / 2) - (m.r_start + (m.r_end - m.r_start) / 2) ) / ((CAST( m.r_length as float) - (m.r_end - m.r_start))/2)
FROM %(track)s_mast as m, %(track)s_intervals as i
WHERE i.interval_id = m.id AND motif = '%(slice)s'
AND m.r_nmatches = 1"""
% locals())
data2 = self.getValues(
"""SELECT ( (m.l_length / 2) - (m.l_start + (m.l_end - m.l_start) / 2) ) / ((CAST( m.l_length as float) - (m.l_end - m.l_start))/2)
FROM %(track)s_mast as m, %(track)s_intervals as i
WHERE i.interval_id = m.id AND motif = '%(slice)s'
AND m.l_nmatches = 1"""
% locals())
return odict((("distance", data1 + data2),))
class MastCurve(Mast):
"""Summary stats of mast results.
"""
mPattern = "_mast$"
def __call__(self, track, slice=None):
evalues = self.getValues(
"SELECT evalue FROM %(track)s_mast WHERE motif = '%(slice)s'" % locals())
if len(evalues) == 0:
return odict()
try:
bin_edges, with_motifs, explained = computeMastCurve(evalues)
except ValueError as msg:
return odict()
data = odict()
data["with_motifs"] = odict(
(("evalue", bin_edges), ("with_motifs", with_motifs)))
data["explained"] = odict(
(("evalue", bin_edges), ("explained", explained)))
return data
class MastROC(Mast):
'''return a ROC curve. The ROC tests various peak parameters
whether they are good descriptors of a motif.
True/false positives are identified by the presence/absence
of a motif. The presence is tested using the E-value of
a motif.
'''
mPattern = "_mast$"
mFields = ("peakval", "avgval", "length")
def __call__(self, track, slice=None):
data = []
# obtain evalue distribution
evalues = self.getValues(
"SELECT evalue FROM %(track)s_mast WHERE motif = '%(slice)s'" % locals())
if len(evalues) == 0:
return odict()
try:
bin_edges, with_motifs, explained = computeMastCurve(evalues)
except ValueError as msg:
return odict()
# determine the e-value cutoff as the maximum of "explained"
cutoff = bin_edges[numpy.argmax(explained)]
# retrieve values of interest together with e-value
result = odict()
for field in self.mFields:
values = self.get("""SELECT i.%(field)s, m.evalue
FROM %(track)s_mast as m, %(track)s_intervals as i
WHERE i.interval_id = m.id AND motif = '%(slice)s'
ORDER BY i.%(field)s DESC"""
% locals())
try:
roc = Stats.computeROC(
[(x[0], x[1] <= cutoff) for x in values])
except ValueError as msg:
# ignore results where there are no positives among values.
continue
result[field] = odict(list(zip(("FPR", "TPR"), list(zip(*roc)))))
return result
class MastROCNMatches(Mast):
'''return a ROC curve. The ROC tests various peak parameters
whether they are good descriptors of a motif.
True/false positives are identified by the presence/absence
of a motif using the field nmatches
'''
mPattern = "_mast$"
mFields = ("peakval", "avgval", "length")
def __call__(self, track, slice=None):
data = []
# retrieve values of interest together with e-value
result = odict()
for field in self.mFields:
values = self.get(
"""SELECT i.%(field)s, m.nmatches
FROM %(track)s_mast as m, %(track)s_intervals as i
WHERE i.interval_id = m.id AND motif = '%(slice)s'
ORDER BY i.%(field)s DESC"""
% locals())
try:
roc = Stats.computeROC([(x[0], x[1] > 0) for x in values])
except ValueError as msg:
# ignore results where there are no positives among values.
continue
result[field] = odict(list(zip(("FPR", "TPR"), list(zip(*roc)))))
return result
class MastAUC(MastROC):
'''return AUC for a ROC curve. The ROC tests various peak parameters
whether they are good descriptors of a motif.
True/false positives are identified by the presence/absence
of a motif.
'''
mPattern = "_mast$"
mFields = ("peakval", "avgval", "length")
def __call__(self, track, slice=None):
data = MastROC.__call__(self, track, slice)
for k, d in data.items():
data[k] = Stats.getAreaUnderCurve(d['FPR'], d['TPR'])
return data
class MastEvalues(Mast):
"""return arrays of mast evalues.
"""
mPattern = "_mast$"
def __call__(self, track, slice=None):
return odict((("evalue", self.getValues("SELECT evalue FROM %(track)s_mast WHERE motif = '%(slice)s'" % locals())),
("evalue - control", self.getValues("SELECT min_evalue FROM %(track)s_mast WHERE motif = '%(slice)s'" % locals()))))
class MastPeakValWithMotif(Mast):
'''return for each peakval the proportion of intervals
that have a motif.
This tracker uses the nmatches indicator as cutoff.
'''
def __call__(self, track, slice=None):
# obtain evalue distribution
data = self.get('''
SELECT i.peakval, m.nmatches
FROM %(track)s_intervals AS i,
%(track)s_mast AS m
WHERE m.id = i.interval_id \
AND m.motif = '%(slice)s' ORDER BY i.peakval DESC''' % locals())
result = Stats.getSensitivityRecall(
[(int(x[0]), x[1] > 0) for x in data])
return odict(list(zip(("peakval", "proportion with motif", "recall"), list(zip(*result)))))
class MastPeakValWithMotifEvalue(Mast):
'''return for each peakval the proportion of intervals
that have a motif.
This class uses the ROC Evalue as cutoff.
'''
def __call__(self, track, slice=None):
# obtain evalue distribution
evalues = self.getValues(
"SELECT evalue FROM %(track)s_mast WHERE motif = '%(slice)s'" % locals())
if len(evalues) == 0:
return odict()
try:
bin_edges, with_motifs, explained = computeMastCurve(evalues)
except ValueError as msg:
return odict()
# determine the e-value cutoff as the maximum of "explained"
cutoff = bin_edges[numpy.argmax(explained)]
data = self.get('''
SELECT i.peakval, m.evalue
FROM %(track)s_intervals AS i,
%(track)s_mast AS m
WHERE m.id = i.interval_id \
AND m.motif = '%(slice)s' ORDER BY i.peakval DESC''' % locals())
result = Stats.getSensitivityRecall(
[(int(x[0]), x[1] < cutoff) for x in data])
return odict(list(zip(("peakval", "proportion with motif", "recall"), list(zip(*result)))))
class MemeInputSequenceComposition(DefaultTracker):
'''distribution of sequence composition in sequences
submitted to motif searches.'''
pattern = "(.*)_motifseq_stats"
slices = ('nA', 'nAT', 'nC', 'nG', 'nGC', 'nN', 'nT',
'nUnk', 'pA', 'pAT', 'pC', 'pG', 'pGC', 'pN', 'pT')
def __call__(self, track, slice):
return self.getValues('''SELECT %(slice)s FROM %(track)s_motifseq_stats''')
class MemeRuns(DefaultTracker):
def getTracks(self):
return self.getValues("SELECT DISTINCT motif FROM motif_info")
def __call__(self, track, slice=None):
resultsdir = os.path.abspath(
os.path.join(EXPORTDIR, "meme", "%s.meme" % track))
if not os.path.exists(resultsdir):
return None
data = []
tree = xml.etree.ElementTree.ElementTree()
tree.parse(os.path.join(resultsdir, "meme.xml"))
model = tree.find("model")
data.append(("nsequences", int(model.find("num_sequences").text)))
data.append(("nbases", int(model.find("num_positions").text)))
motifs = tree.find("motifs")
nmotifs = 0
for motif in motifs.getiterator("motif"):
nmotifs += 1
data.append(("nmotifs", nmotifs))
data.append(
("link", "`meme_%s <%s/meme.html>`_" % (track, resultsdir)))
return odict(data)
class MemeResults(DefaultTracker):
tracks = list(EXPERIMENTS)
def __call__(self, track, slice=None):
resultsdir = os.path.abspath(
os.path.join(EXPORTDIR, "meme", "%s.meme" % track.asFile()))
if not os.path.exists(resultsdir):
return []
tree = xml.etree.ElementTree.ElementTree()
tree.parse(os.path.join(resultsdir, "meme.xml"))
model = tree.find("model")
# data.append( ("nsequences", int(model.find( "num_sequences" ).text) ) )
# data.append( ("nbases", int(model.find( "num_positions" ).text) ) )
motifs = tree.find("motifs")
nmotif = 0
result = odict()
for motif in motifs.getiterator("motif"):
nmotif += 1
result[str(nmotif)] = odict((
("width", motif.get("width")),
("evalue", motif.get("e_value")),
("information content", motif.get("ic")),
("sites", motif.get("sites")),
("link", "`meme_%s_%i <%s/meme.html#summary%i>`_" %
(track, nmotif, resultsdir, nmotif))))
return result
class TomTomResults(DefaultTracker):
pattern = "(.*)_tomtom$"
def __call__(self, track, slice=None):
data = self.get("""SELECT query_id, target_id,
optimal_offset,pvalue,qvalue,overlap,query_consensus,
target_consensus, orientation FROM %(track)s_tomtom""" % locals())
headers = ("query_id", "target_id",
"optimal_offset", "pvalue", "qvalue", "overlap",
"query_consensus",
"target_consensus", "orientation")
result = odict()
for x, y in enumerate(data):
result[str(x)] = odict(list(zip(headers, y)))
return result
class AnnotationsMatrix(DefaultTracker):
def getSlices(self, subset=None):
if subset:
return subset
return []
def __call__(self, track, slice=None):
result = odict()
rows = ("intergenic", "intronic", "upstream",
"downstream", "utr", "cds", "other")
statement = self.getStatement(slice)
data = self.get(statement % locals())
levels = sorted(list(set([x[7] for x in data])))
for row in rows:
m = odict()
for l in levels:
m[l] = 0
result[row] = m
map_level2col = dict([(y, x) for x, y in enumerate(levels)])
for intergenic, intronic, upstream, downstream, utr, coding, ambiguous, level in data:
col = level
for x, v in enumerate((intergenic, intronic, upstream,
downstream, utr, coding, ambiguous)):
if v:
row = rows[x]
break
else:
row = rows[-1]
result[row][col] += 1
return result
class AnnotationsMotifs(AnnotationsMatrix):
'''return a matrix with intervals stratified by motif presence
and location of the interval.
'''
mPattern = "_mast$"
def getStatement(self, slice=None):
statement = '''
SELECT a.is_intergenic, a.is_intronic, a.is_upstream, a.is_downstream, a.is_utr, a.is_cds, a.is_ambiguous,
CASE WHEN m.nmatches > 0 THEN motif || '+' ELSE motif || '-' END
FROM %(track)s_intervals AS i,
%(track)s_annotations AS a ON a.gene_id = i.interval_id,
%(track)s_mast AS m ON m.id = i.interval_id'''
if slice is not None:
statement += " AND motif = '%(slice)s'"
return statement
class AnnotationsPeakVal(AnnotationsMatrix):
'''return a matrix with intervals stratified by peakval
and location of the interval.
'''
mPattern = "_annotations$"
def getStatement(self, slice=None):
statement = '''
SELECT a.is_intergenic, a.is_intronic, a.is_upstream, a.is_downstream, a.is_utr, a.is_cds, a.is_ambiguous,
peakval
FROM %(track)s_intervals AS i,
%(track)s_annotations AS a ON a.gene_id = i.interval_id'''
return statement
class AnnotationsPeakValData(DefaultTracker):
'''return peakval for intervals falling into various regions.'''
def getSlices(self, subset=None):
if subset:
return subset
return ("intergenic", "intronic", "upstream", "downstream", "utr", "cds", "other")
def __call__(self, track, slice=None):
if slice == "other":
slice = "ambiguous"
statement = '''
SELECT peakval
FROM %(track)s_intervals AS i,
%(track)s_annotations AS a ON a.gene_id = i.interval_id AND a.is_%(slice)s ''' % locals()
return odict((("peakval", self.getValues(statement)),))
##########################################################################
##########################################################################
##########################################################################
##########################################################################
##########################################################################
##########################################################################
# Base class for glam analysis
##########################################################################
class Glam(DefaultTracker):
pattern = "(.*)_glam$"
def getSlices(self, subset=None):
if subset:
return subset
return MOTIFS
class GlamScores(Glam):
"""return arrays of glam2scan scores
"""
mPattern = "_glam$"
def __call__(self, track, slice=None):
return odict((("score", self.getValues("SELECT score FROM %(track)s_glam WHERE motif = '%(slice)s'" % locals())),
("score - control", self.getValues("SELECT max_controls FROM %(track)s_glam WHERE motif = '%(slice)s' AND max_controls != '' " % locals()))))
##########################################################################
##########################################################################
##########################################################################
##
##########################################################################
def getGlamFDR(samples, control, num_bins=100):
'''return the score cutoff at a certain FDR threshold
using scores and control scores.
The FDR is defined as fdr = expected number of false positives
(FP) / number of positives (P)
Given a certain score threshold s , the following will be used as
approximations:
FP: the number of controls with a score of less than or equal to
s. These are all assumed to be false positives.
P: the number of samples with a score of less than or equal to
s. These are a mixture of both true and false positives.
Both samples and control should contain rougly equal number of entries.
returns the score cutoff at FDR threshold.
'''
if len(samples) == 0 or len(control) == 0:
return None, None
mi1, ma1 = min(samples), max(samples)
mi2, ma2 = min(control), max(control)
mi, ma = min(mi1, mi2), max(ma1, ma2)
hist_samples, bin_edges_samples = numpy.histogram(
samples, range=(mi, ma), bins=num_bins)
hist_control, bin_edges_control = numpy.histogram(
control, range=(mi, ma), bins=num_bins)
hist_samples = hist_samples[::-1].cumsum()
hist_control = hist_control[::-1].cumsum()
bin_edges = bin_edges_samples[::-1]
# correct for unequal size in the two sets
correction = float(len(samples)) / len(control)
fdrs = []
m = 0
for s, p, fp in zip(bin_edges[:-1], hist_samples, hist_control):
fdr = min(1.0, correction * float(fp) / p)
m = max(m, fdr)
fdrs.append(m)
return bin_edges[:-1], fdrs
class GlamFDR(Glam):
"""return arrays of glam2scan scores
"""
mPattern = "_glam$"
def __call__(self, track, slice=None):
scores = self.getValues(
"SELECT score FROM %(track)s_glam WHERE motif = '%(slice)s'" % locals())
control_scores = self.getValues(
"SELECT max_controls FROM %(track)s_glam WHERE motif = '%(slice)s' and max_controls is not null" % locals())
bin_edges, fdrs = getFDR(scores, control_scores)
if bin_edges is None:
return odict()
return odict((("score", bin_edges),
("fdr", fdrs)))
##########################################################################
##########################################################################
##########################################################################
# Annotation of bases with SNPs
##########################################################################
class GlamSummary(Glam):
"""return summary of glam results.
Return for each track the number of intervals in total, the number
of intervals submitted to mast, ...
"""
mEvalueCutoff = 1
mFDR = 0.2
def __call__(self, track, slice=None):
data = []
nintervals = self.getValue(
"SELECT COUNT(*) FROM %(track)s_intervals" % locals())
data.append(("nintervals", nintervals))
data.append(("nglam", self.getValue(
"SELECT COUNT(*) FROM %(track)s_glam WHERE motif = '%(slice)s' AND score is not null" % locals())))
scores = self.getValues(
"SELECT score FROM %(track)s_glam WHERE motif = '%(slice)s'" % locals())
if len(scores) == 0:
return odict()
control_scores = self.getValues(
"SELECT max_controls FROM %(track)s_glam WHERE motif = '%(slice)s' AND max_controls is not null" % locals())
score_cutoff = getGlamScoreCutoff(scores, control_scores, self.mFDR)
nexplained = len([x for x in scores if x >= score_cutoff])
data.append(("FDR-Score", score_cutoff))
data.append(("FDR-explained", nexplained))
data.append(
("FDR-explained / %", "%5.2f" % (100.0 * nexplained / nintervals)))
return odict(data)
##########################################################################
##########################################################################
##########################################################################
# Glam 2 results
##########################################################################
class Glam2Results(DefaultTracker):
'''return a table with information about the glam2 results.'''
mPattern = "_glam$"
def __call__(self, track, slice=None):
resultsdir = os.path.join(EXPORTDIR, "%s.glam2" % track)
if not os.path.exists(resultsdir):
return None
data = []
glam2 = Glam2.parse(open(os.path.join(resultsdir, "glam2.txt")))
result = odict()
for x, m in enumerate(glam2.motifs):
result[str(x)] = odict((("score", m.score),
("columns", m.columns),
("sequences", m.sequences),
("link", "`glam2_%s <%s/glam2.html>`_" % (track, resultsdir))))
return result
|
43874
|
import unittest
from django.apps import apps
from tethys_config.apps import TethysPortalConfig
class TethysConfigAppsTest(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_TethysPortalConfig(self):
app_config = apps.get_app_config('tethys_config')
name = app_config.name
verbose_name = app_config.verbose_name
self.assertEqual('tethys_config', name)
self.assertEqual('Tethys Portal', verbose_name)
self.assertTrue(isinstance(app_config, TethysPortalConfig))
|
43910
|
import fileinput as fin
# funcs:
def findValWithFormat(line):
lines.append(line)
taken = line.split(" ")
raw_val = taken[-1]
val = raw_val.split("/")[-1]
val = val[0:-2]
if 'us' in val:
val = float(val[0:val.find('us')])
val = val/1000
else:
val = float(val[0:val.find('ms')])
return val
def getCellNum(line):
cell_num = line[line.find(rnn_cell_string):line.find(rnn_cell_string) + len(rnn_cell_string) + 1]
return cell_num
def profRNNCell(line, rnncell_prof):
cell_num = getCellNum(line)
val = findValWithFormat(line)
rnncell_prof[cell_num] += val
# variables:
lines = []
module_rnncell = "CustomRNNCell2"
module_grad = 'gradients'
num_rnn_layer = 7
rnn_cell_string = "cell_"
module_rnn = 'rnn'
module_conv1 = 'conv1'
module_conv2 = 'conv2'
module_softmax = 'softmax_linear'
module_ctc = ['ctc_loss', 'CTCLoss']
module_bn = 'bn2'
rnn_cells = [rnn_cell_string+str(i) for i in range(num_rnn_layer)]
rnncell_f_prof = dict.fromkeys(rnn_cells)
rnncell_b_prof = dict.fromkeys(rnn_cells)
# prf estimator:
for el in rnncell_f_prof:
rnncell_f_prof[el] = 0.0
for el in rnncell_b_prof:
rnncell_b_prof[el] = 0.0
overall_cost = 0.0
profs ={\
'rnn_trans_f_prof': 0.0, \
'rnn_trans_b_prof': 0.0, \
'rnn_reshape_f_prof': 0.0, \
'rnn_reshape_b_prof': 0.0, \
'rnn_ReverseSequence_f_prof': 0.0, \
'rnn_ReverseSequence_b_prof': 0.0, \
'conv1_f_prof': 0.0, \
'conv1_b_prof': 0.0, \
'bn1_f_prof': 0.0, \
'bn1_b_prof': 0.0, \
'relu1_f_prof': 0.0, \
'relu1_b_prof': 0.0, \
'conv2_f_prof': 0.0, \
'conv2_b_prof': 0.0, \
'bn2_f_prof': 0.0, \
'bn2_b_prof': 0.0, \
'relu2_f_prof': 0.0, \
'relu2_b_prof': 0.0, \
'softmax_f_prof': 0.0, \
'softmax_b_prof': 0.0, \
'ctc_f_prof': 0.0, \
'ctc_b_prof': 0.0 \
}
with open('timing_memory.log', 'r') as f:
for line in f:
if len(line) > 3:
if ((line[3] != ' ') or 'Adam/update_' in line) and ('flops' not in line):
# flops is not considered
# conv1
if (module_grad not in line) and (module_conv1 in line) and ('Minimum' not in line) and ('Relu' not in line) and (module_bn not in line):
val = findValWithFormat(line)
profs['conv1_f_prof'] += val
if (module_grad in line) and (module_conv1 in line) and ('Minimum' not in line) and ('Relu' not in line) and (module_bn not in line):
val = findValWithFormat(line)
profs['conv1_b_prof'] += val
# BN1
if (module_grad not in line) and (module_conv1 in line) and ('Minimum' not in line) and ('Relu' not in line) and (module_bn in line):
val = findValWithFormat(line)
profs['bn1_f_prof'] += val
if (module_grad in line) and (module_conv1 in line) and ('Minimum' not in line) and ('Relu' not in line) and (module_bn in line):
val = findValWithFormat(line)
profs['bn1_b_prof'] += val
# Relu1
if (module_grad not in line) and (module_conv1 in line) and ('Minimum' in line or 'Relu' in line) and (module_bn not in line):
val = findValWithFormat(line)
profs['relu1_f_prof'] += val
if (module_grad in line) and (module_conv1 in line) and ('Minimum' in line or 'Relu' in line) and (module_bn not in line):
val = findValWithFormat(line)
profs['relu1_b_prof'] += val
# conv2
if (module_grad not in line) and (module_conv2 in line) and ('Minimum' not in line) and ('Relu' not in line) and (module_bn not in line):
val = findValWithFormat(line)
profs['conv2_f_prof'] += val
if (module_grad in line) and (module_conv2 in line) and ('Minimum' not in line) and ('Relu' not in line) and (module_bn not in line):
val = findValWithFormat(line)
profs['conv2_b_prof'] += val
# BN2
if (module_grad not in line) and (module_conv2 in line) and ('Minimum' not in line) and ('Relu' not in line) and (module_bn in line):
val = findValWithFormat(line)
profs['bn2_f_prof'] += val
if (module_grad in line) and (module_conv2 in line) and ('Minimum' not in line) and ('Relu' not in line) and (module_bn in line):
val = findValWithFormat(line)
profs['bn2_b_prof'] += val
# Relu2
if (module_grad not in line) and (module_conv2 in line) and ('Minimum' in line or 'Relu' in line) and (module_bn not in line):
val = findValWithFormat(line)
profs['relu2_f_prof'] += val
if (module_grad in line) and (module_conv2 in line) and ('Minimum' in line or 'Relu' in line) and (module_bn not in line):
val = findValWithFormat(line)
profs['relu2_b_prof'] += val
#rnn transpose
if (module_grad not in line) and (module_rnn in line) and ('transpose' in line) and (module_rnncell not in line):
val = findValWithFormat(line)
profs['rnn_trans_f_prof'] += val
if (module_grad in line) and (module_rnn in line) and ('transpose' in line) and (module_rnncell not in line):
val = findValWithFormat(line)
profs['rnn_trans_b_prof'] += val
#rnn reshape
if (module_grad not in line) and (module_rnn in line) and ('rnn/Reshape' in line) and (module_rnncell not in line):
val = findValWithFormat(line)
profs['rnn_reshape_f_prof'] += val
if (module_grad in line) and (module_rnn in line) and ('rnn/Reshape' in line) and (module_rnncell not in line):
val = findValWithFormat(line)
profs['rnn_reshape_b_prof'] += val
#rnn reshape
if (module_grad not in line) and (module_rnn in line) and ('ReverseSequence' in line):
val = findValWithFormat(line)
profs['rnn_ReverseSequence_f_prof'] += val
if (module_grad in line) and (module_rnn in line) and ('ReverseSequence' in line):
val = findValWithFormat(line)
profs['rnn_ReverseSequence_b_prof'] += val
# rnn forward profiling by cell
if (module_grad not in line) and (module_rnncell in line):
profRNNCell(line, rnncell_f_prof)
# rnn backward profiling by cell
if (module_grad in line) and (module_rnncell in line):
profRNNCell(line, rnncell_b_prof)
# softmax
if (module_grad not in line) and (module_softmax in line):
val = findValWithFormat(line)
profs['softmax_f_prof'] += val
if (module_grad in line) and (module_softmax in line):
val = findValWithFormat(line)
profs['softmax_b_prof'] += val
# ctc
for c in module_ctc:
if (c in line) and (module_grad not in line):
val = findValWithFormat(line)
profs['ctc_f_prof'] += val
if (c in line) and (module_grad in line):
val = findValWithFormat(line)
profs['ctc_b_prof'] +=val
for key, val in dict.iteritems(rnncell_f_prof):
overall_cost += val
print "(RNN forward by cell) " + str(key) + ": " + str(val) + "ms"
for key, val in dict.iteritems(rnncell_b_prof):
overall_cost += val
print "(RNN backward by cell) " + str(key) + ": " + str(val) + "ms"
# Profiling result
for k in dict.fromkeys(profs):
overall_cost += profs[k]
print k + ": " + str(profs[k]) + "ms"
print "overall: " + str(overall_cost) + "ms"
prf_file1 = open('prf1.txt', 'w')
for k in dict.fromkeys(profs):
prf_file1.write("%s:%f\n" % (k, profs[k]))
prf_file1.close()
# write including modules
prf_file2 = open('prf2.txt', 'w')
for el in lines:
prf_file2.write("%s\n" % el)
prf_file2.close()
|
43945
|
from overrides import overrides
from allennlp.data import Instance
from allennlp.common.util import JsonDict
from allennlp.predictors.predictor import Predictor
@Predictor.register('nfh_classification')
class NfhDetectorPredictor(Predictor):
""""Predictor wrapper for the NfhDetector"""
@overrides
def _json_to_instance(self, json_dict: JsonDict) -> Instance:
# def _json_to_instance(self, json_dict: JsonDict) -> JsonDict:
sentence = json_dict['tokens']
anchor_span = json_dict['anchors_indices']
label = json_dict['label'] if 'label' in json_dict else None
instance = self._dataset_reader.text_to_instance(tokens=sentence, anchors_indices=anchor_span, head=label)
# span_d = self._setting_output_span_indices(1,
# ['YEAR', 'AGE', 'CURRENCY', 'PEOPLE', 'TIME', 'OTHER'])
# label_dict = {v: k for k, v in span_d.items()}
#return {'instance': instance, 'label_dict': label_dict}
return instance
def _setting_output_span_indices(self, span_len, additional_classes):
"""
creating a dictionary from the labels (year, age, etc. and spans indices) to integers
:param span_len: the maximum possible span length
:param additional_classes: the `Implicit' classes described in the paper (year, age etc.)
:return: the mapping dictionary
"""
span_dic = {}
counter = 0
for c in additional_classes:
span_dic[c] = counter
counter += 1
# 10000 is a random large number
for i in range(10000):
for j in range(1, span_len + 1):
s = str(i) + ':' + str(i + j)
span_dic[s] = counter
counter += 1
return dict(span_dic)
|
43947
|
import numpy as np
from unityagents import UnityEnvironment
"""UnityEnv is a wrapper around UnityEnvironment
The main purpose for this Env is to establish a common interface which most environments expose
"""
class UnityEnv:
def __init__(self,
env_path,
train_mode = True
):
self.brain = None
self.brain_name = None
self.train_mode = train_mode
self.env = self.create_unity_env(env_path)
#env details
self.action_space = self.brain.vector_action_space_size
self.observation_space = self.brain.vector_observation_space_size
print(f'Action space {self.action_space}')
print(f'State space {self.observation_space}')
#backwards compatibility
self.action_dim = self.action_space
#self.observation_space = self.env.observation_space
self.state_dim = int(np.prod(self.observation_space))
def extract_env_details(self, env_info):
next_state = env_info.vector_observations # get the next state
reward = env_info.rewards # get the reward
done = env_info.local_done # see if episode has finished
return next_state, reward, done
def create_unity_env(self, env_path):
env = UnityEnvironment(file_name=env_path)
self.brain_name = env.brain_names[0]
self.brain = env.brains[self.brain_name]
return env
def reset(self):
env_info = self.env.reset(train_mode=self.train_mode)[self.brain_name]
return self.extract_env_details(env_info)[0]
def step(self, actions):
actions = np.clip(actions, -1, 1)
# torch.clamp(actions, min=-1, max=1)
self.env.step(actions)[self.brain_name]
env_info = self.env.step(actions)[self.brain_name]
next_states, rewards, dones = self.extract_env_details(env_info)
return next_states, rewards, np.array(dones)
# return next_state, reward, np.array([done])
|
43949
|
import torch
from torch import nn
import pdb, os
from shapely.geometry import *
from maskrcnn_benchmark.structures.boxlist_ops import cat_boxlist
import time
import matplotlib.pyplot as plt
import numpy as np
from scipy.signal import argrelextrema
import random
import string
all_types = [[1,2,3,4],[1,2,4,3],[1,3,2,4],[1,3,4,2],[1,4,2,3],[1,4,3,2],\
[2,1,3,4],[2,1,4,3],[2,3,1,4],[2,3,4,1],[2,4,1,3],[2,4,3,1],\
[3,1,2,4],[3,1,4,2],[3,2,1,4],[3,2,4,1],[3,4,1,2],[3,4,2,1],\
[4,1,2,3],[4,1,3,2],[4,2,1,3],[4,2,3,1],[4,3,1,2],[4,3,2,1]]
class kePostProcessor(nn.Module):
def __init__(self, keer=None, cfg=None):
super(kePostProcessor, self).__init__()
self.keer = keer
self.cfg = cfg
def forward(self, ft_x, ft_y, mty, boxes):
ke_prob_x = ft_x
ke_prob_y = ft_y
mty_prob = mty
boxes_per_image = [box.bbox.size(0) for box in boxes]
ke_prob_x = ke_prob_x.split(boxes_per_image, dim=0)
ke_prob_y = ke_prob_y.split(boxes_per_image, dim=0)
mty_prob = mty_prob.split(boxes_per_image, dim=0)
results = []
for prob_x, prob_y, prob_mty, box in zip(ke_prob_x, ke_prob_y, mty_prob, boxes):
bbox = BoxList(box.bbox, box.size, mode='xyxy')
for field in box.fields():
bbox.add_field(field, box.get_field(field))
if self.keer:
prob_x, rescores_x = self.keer(prob_x, box)
prob_y, rescores_y = self.keer(prob_y, box)
rescores = (rescores_x+rescores_y)*0.5
if self.cfg.MODEL.ROI_KE_HEAD.RESCORING:
bbox.add_field('scores', rescores)
prob = torch.cat((prob_x,prob_y), dim = -2)
prob = prob[..., :1]
prob = textKES(prob, box.size)
bbox.add_field('ke', prob)
bbox.add_field('mty', prob_mty)
results.append(bbox)
return results
# TODO remove and use only the keer
import numpy as np
import cv2
def scores_to_probs(scores):
"""Transforms CxHxW of scores to probabilities spatially."""
channels = scores.shape[0]
for c in range(channels):
temp = scores[c, :, :]
max_score = temp.max()
temp = np.exp(temp - max_score) / np.sum(np.exp(temp - max_score))
scores[c, :, :] = temp
return scores
def kes_decode(kes):
# BDN decode
for ix, i in enumerate(kes):
mnd = i[0, 0]
nkes = i.shape[1]-2
kes[ix][0, 1:5] = kes[ix][0, 1:5]*2 - mnd
return kes
def heatmaps_to_kes(maps, rois, scores, cfg):
"""Extract predicted ke locations from heatmaps. Output has shape
(#rois, 4, #kes) with the 4 rows corresponding to (x, y, logit, prob)
for each ke.
"""
# This function converts a discrete image coordinate in a HEATMAP_SIZE x
# HEATMAP_SIZE image to a continuous ke coordinate. We maintain
# consistency with kes_to_heatmap_labels by using the conversion from
# Heckbert 1990: c = d + 0.5, where d is a discrete coordinate and c is a
# continuous coordinate.
offset_x = rois[:, 0]
offset_y = rois[:, 1]
widths = rois[:, 2] - rois[:, 0]
heights = rois[:, 3] - rois[:, 1]
widths = np.maximum(widths, 1)
heights = np.maximum(heights, 1)
widths_ceil = np.ceil(widths)
heights_ceil = np.ceil(heights)
resol = cfg.MODEL.ROI_KE_HEAD.RESOLUTION # cfg.mo... 56
if maps.shape[-2:] == (1, resol):
xory_mode = 0 # x mode
elif maps.shape[-2:] == (resol, 1):
xory_mode = 1 # y mode
else:
assert(0), 'invalid mode.'
# print("maps", maps.shape, maps[0,0], maps[0,1])
# NCHW to NHWC for use with OpenCV
maps = np.transpose(maps, [0, 2, 3, 1])
min_size = 0 # cfg
num_kes = int(cfg.MODEL.ROI_KE_HEAD.NUM_KES/2)+2
d_preds = np.zeros(
(len(rois), 2, num_kes), dtype=np.float32)
d_scores = np.zeros(scores.shape, dtype=np.float32)
assert(len(rois) == maps.shape[0]), 'shape mismatch {}, {}, {}, {}'.format(str(len(rois)), \
str(rois.shape), \
str(maps.shape[0]), \
str(maps.shape))
normal = 0
innormal = 0
for i in range(len(rois)):
if min_size > 0:
roi_map_width = int(np.maximum(widths_ceil[i], min_size))
roi_map_height = int(np.maximum(heights_ceil[i], min_size))
else:
roi_map_width = widths_ceil[i]
roi_map_height = heights_ceil[i]
width_correction = widths[i] / roi_map_width
height_correction = heights[i] / roi_map_height
np.set_printoptions(suppress=True)
# print(i, "stop", maps.shape, np.around(maps[i][0, :, :], decimals=2))
if not xory_mode:
roi_map = cv2.resize(
maps[i], (roi_map_width, 1), interpolation=cv2.INTER_CUBIC)
else:
roi_map = cv2.resize(
maps[i], (1, roi_map_height), interpolation=cv2.INTER_CUBIC)
# print(roi_map.shape, np.around(roi_map[0, :, :], decimals=2))
# Bring back to CHW
roi_map = np.transpose(roi_map, [2, 0, 1])
roi_map_probs = scores_to_probs(roi_map.copy())
# kescore visulize.
map_vis = np.transpose(maps[i], [2, 0, 1])
map_vis = scores_to_probs(map_vis.copy())
sum_score = []
if cfg.MODEL.ROI_KE_HEAD.RESCORING:
for k in range(num_kes):
if map_vis[k].shape[0] == 1:
x = np.arange(0, len(map_vis[k][0]), 1)
y = map_vis[k][0]
else:
x = np.arange(0, len(map_vis[k][:, 0]), 1)
y = map_vis[k][:, 0]
top = y.max()
atop = y.argmax()
# lf2&1
lf2 = max(atop-2, 0)
lf1 = max(atop-1, 0)
rt2 = min(atop+2, 55)
rt1 = min(atop+1, 55)
sum_score.append(top+y[lf2]+y[lf1]+y[rt1]+y[rt2])
kes_score_mean = sum(sum_score)*1.0/len(sum_score)
gama = cfg.MODEL.ROI_KE_HEAD.RESCORING_GAMA
final_score = (scores[i]*(2.0-gama)+gama*kes_score_mean)*0.5
# rescore
d_scores[i] = final_score
else:
d_scores[i] = scores[i]
w = roi_map.shape[2]
for k in range(num_kes):
pos = roi_map[k, :, :].argmax()
x_int = pos % w
y_int = (pos - x_int) // w
assert (roi_map_probs[k, y_int, x_int] ==
roi_map_probs[k, :, :].max())
x = (x_int + 0.5) * width_correction
y = (y_int + 0.5) * height_correction
if not xory_mode:
d_preds[i, 0, k] = x + offset_x[i]
d_preds[i, 1, k] = roi_map_probs[k, y_int, x_int]
else:
d_preds[i, 0, k] = y + offset_y[i]
d_preds[i, 1, k] = roi_map_probs[k, y_int, x_int]
out_kes_d = kes_decode(d_preds)
return np.transpose(out_kes_d, [0, 2, 1]), d_scores
from maskrcnn_benchmark.structures.bounding_box import BoxList
from maskrcnn_benchmark.structures.ke import textKES
class KEer(object):
"""
Projects a set of masks in an image on the locations
specified by the bounding boxes
"""
def __init__(self, padding=0, cfg =None):
self.padding = padding
self.cfg =cfg
def compute_flow_field_cpu(self, boxes):
im_w, im_h = boxes.size
boxes_data = boxes.bbox
num_boxes = len(boxes_data)
device = boxes_data.device
TO_REMOVE = 1
boxes_data = boxes_data.int()
box_widths = boxes_data[:, 2] - boxes_data[:, 0] + TO_REMOVE
box_heights = boxes_data[:, 3] - boxes_data[:, 1] + TO_REMOVE
box_widths.clamp_(min=1)
box_heights.clamp_(min=1)
boxes_data = boxes_data.tolist()
box_widths = box_widths.tolist()
box_heights = box_heights.tolist()
flow_field = torch.full((num_boxes, im_h, im_w, 2), -2)
# TODO maybe optimize to make it GPU-friendly with advanced indexing
# or dedicated kernel
for i in range(num_boxes):
w = box_widths[i]
h = box_heights[i]
if w < 2 or h < 2:
continue
x = torch.linspace(-1, 1, w)
y = torch.linspace(-1, 1, h)
# meshogrid
x = x[None, :].expand(h, w)
y = y[:, None].expand(h, w)
b = boxes_data[i]
x_0 = max(b[0], 0)
x_1 = min(b[2] + 0, im_w)
y_0 = max(b[1], 0)
y_1 = min(b[3] + 0, im_h)
flow_field[i, y_0:y_1, x_0:x_1, 0] = x[(y_0 - b[1]):(y_1 - b[1]),(x_0 - b[0]):(x_1 - b[0])]
flow_field[i, y_0:y_1, x_0:x_1, 1] = y[(y_0 - b[1]):(y_1 - b[1]),(x_0 - b[0]):(x_1 - b[0])]
return flow_field.to(device)
def compute_flow_field(self, boxes):
return self.compute_flow_field_cpu(boxes)
# TODO make it work better for batches
def forward_single_image(self, masks, boxes):
boxes = boxes.convert('xyxy')
if self.padding:
boxes = BoxList(boxes.bbox.clone(), boxes.size, boxes.mode)
masks, scale = expand_masks(masks, self.padding)
boxes.bbox = expand_boxes(boxes.bbox, scale)
flow_field = self.compute_flow_field(boxes)
result = torch.nn.functional.grid_sample(masks, flow_field)
return result
def to_points(self, masks):
height, width = masks.shape[-2:]
m = masks.view(masks.shape[:2] + (-1,))
scores, pos = m.max(-1)
x_int = pos % width
y_int = (pos - x_int) // width
result = torch.stack([x_int.float(), y_int.float(), torch.ones_like(x_int, dtype=torch.float32)], dim=2)
return result
def __call__(self, masks, boxes):
# TODO do this properly
if isinstance(boxes, BoxList):
boxes = [boxes]
if isinstance(masks, list):
masks = torch.stack(masks, dim=0)
assert(len(masks.size()) == 4)
scores = boxes[0].get_field("scores")
result, rescores = heatmaps_to_kes(masks.detach().cpu().numpy(), boxes[0].bbox.cpu().numpy(), scores.cpu().numpy(), self.cfg)
return torch.from_numpy(result).to(masks.device), torch.from_numpy(rescores).to(masks.device)
def make_roi_ke_post_processor(cfg):
if cfg.MODEL.ROI_KE_HEAD.POSTPROCESS_KES:
keer = KEer(padding=0, cfg=cfg)
else:
keer = None
ke_post_processor = kePostProcessor(keer,cfg)
return ke_post_processor
|
43950
|
import streamlit as st
import numpy as np
import matplotlib.pyplot as plt
perc_heads = st.number_input(label='Chance of Coins Landing on Heads', min_value=0.0, max_value=1.0, value=.5)
graph_title = st.text_input(label='Graph Title')
binom_dist = np.random.binomial(1, perc_heads, 1000)
list_of_means = []
for i in range(0, 1000):
list_of_means.append(np.random.choice(binom_dist, 100, replace=True).mean())
fig, ax = plt.subplots()
plt.hist(list_of_means, range=[0,1])
plt.title(graph_title)
st.pyplot(fig)
|
43953
|
import sys
import emailprotectionslib.dmarc as dmarc
from MaltegoTransform import *
mt = MaltegoTransform()
mt.parseArguments(sys.argv)
domain = mt.getValue()
mt = MaltegoTransform()
try:
dmarc_record = dmarc.DmarcRecord.from_domain(domain)
#print spf_record
mt.addEntity("maltego.Phrase","DMARC Record: "+str(dmarc_record))
except:
mt.addUIMessage("Exception Occured",messageType="PartialError")
mt.returnOutput()
|
44005
|
import tkinter as tk
class ScrolledFrame(tk.Frame):
def __init__(self, parent, vertical=True, horizontal=False):
super().__init__(parent)
# canvas for inner frame
self._canvas = tk.Canvas(self)
self._canvas.grid(row=0, column=0, sticky='news') # changed
# create right scrollbar and connect to canvas Y
self._vertical_bar = tk.Scrollbar(self, orient='vertical', command=self._canvas.yview)
if vertical:
self._vertical_bar.grid(row=0, column=1, sticky='ns')
self._canvas.configure(yscrollcommand=self._vertical_bar.set)
# create bottom scrollbar and connect to canvas X
self._horizontal_bar = tk.Scrollbar(self, orient='horizontal', command=self._canvas.xview)
if horizontal:
self._horizontal_bar.grid(row=1, column=0, sticky='we')
self._canvas.configure(xscrollcommand=self._horizontal_bar.set)
# inner frame for widgets
self.inner = tk.Frame(self._canvas)
self._window = self._canvas.create_window((0, 0), window=self.inner, anchor='nw')
# autoresize inner frame
self.columnconfigure(0, weight=1) # changed
self.rowconfigure(0, weight=1) # changed
# resize when configure changed
self.inner.bind('<Configure>', self.resize)
# resize inner frame to canvas size
self.resize_width = False
self.resize_height = False
self._canvas.bind('<Configure>', self.inner_resize)
def resize(self, event=None):
self._canvas.configure(scrollregion=self._canvas.bbox('all'))
def inner_resize(self, event):
# resize inner frame to canvas size
if self.resize_width:
self._canvas.itemconfig(self._window, width=event.width)
if self.resize_height:
self._canvas.itemconfig(self._window, height=event.height)
|
44012
|
import os
import sys
import unittest
from test.support import run_unittest, import_module
# Skip tests if we don't have threading.
import_module('threading')
# Skip tests if we don't have concurrent.futures.
import_module('concurrent.futures')
def suite():
tests = unittest.TestSuite()
loader = unittest.TestLoader()
for fn in os.listdir(os.path.dirname(__file__)):
if fn.startswith("test") and fn.endswith(".py"):
mod_name = 'test.test_asyncio.' + fn[:-3]
try:
__import__(mod_name)
except unittest.SkipTest:
pass
else:
mod = sys.modules[mod_name]
tests.addTests(loader.loadTestsFromModule(mod))
return tests
def test_main():
run_unittest(suite())
|
44029
|
import ipaddress
from collections import defaultdict
from autonetkit.design.utils import filters
from autonetkit.design.utils.general import group_by
from autonetkit.network_model.types import LAYER3_DEVICES
def assign_loopbacks(topology):
"""
@param topology:
"""
layer3_nodes = [n for n in topology.nodes()
if n.type in LAYER3_DEVICES]
loopback_block = ipaddress.IPv4Network("172.16.0.0/16")
loopback_subnets = loopback_block.subnets(new_prefix=24)
grouped_l3 = group_by(layer3_nodes, "asn")
allocated_loopbacks = defaultdict(list)
for asn, nodes in grouped_l3.items():
# can repeat the loopbacks in each asn
subnet = next(loopback_subnets)
allocated_loopbacks[asn].append(subnet)
host_ips = subnet.hosts()
for node in nodes:
host_ip = next(host_ips)
lo0 = node.loopback_zero()
lo0.set("ip", host_ip)
# also map onto node for debugging/vis
topology.set("loopbacks_by_asn", allocated_loopbacks)
def assign_bc_subnets(topology):
"""
@param topology:
"""
# the network to use to address end hosts and for inter-domain connections
allocated_blocks = defaultdict(list)
global_advertise_network = ipaddress.IPv4Network("10.0.0.0/8")
global_subnets = global_advertise_network.subnets(new_prefix=16)
bc_nodes = filters.broadcast_domains(topology)
grouped_bc = group_by(bc_nodes, "asn")
for asn, nodes in grouped_bc.items():
asn_block = next(global_subnets)
allocated_blocks[asn].append(asn_block)
# quick method: allocate a /24 to each broadcast domain
# Note: this could be significantly optimised in the future
# Note: could allocate different block to internal infrastructure too
external_blocks = asn_block.subnets(new_prefix=24)
for bc in nodes:
bc_block = next(external_blocks)
bc.set("network", bc_block)
topology.set("infrastructure_by_asn", allocated_blocks)
|
44041
|
from django import VERSION
if VERSION < (1, 6):
# Before django 1.6, Django was not able to find tests in tests/tests.py
from .tests import *
|
44051
|
import discord
from discord.ext import commands
class UserBlacklisted(commands.CommandError):
"""
An exception when the user is blacklisted from the bot
"""
pass
|
44082
|
from django.core.files.uploadedfile import SimpleUploadedFile
from easy_tenants import tenant_context_disabled
from easy_tenants.storage import TenantFileSystemStorage
def test_default_storage(tenant_ctx, settings):
tenant_id = str(tenant_ctx.id)
s = TenantFileSystemStorage()
file = SimpleUploadedFile("test.txt", b"any content")
s.save("test.txt", file)
assert s.exists("test.txt")
assert s.path("test.txt") == f"{settings.MEDIA_ROOT}/{tenant_id}/test.txt"
assert s.url("test.txt") == f"{settings.MEDIA_URL}{tenant_id}/test.txt"
def test_default_storage_without_tenant(settings):
with tenant_context_disabled():
s = TenantFileSystemStorage()
file = SimpleUploadedFile("test.txt", b"any content")
s.save("test.txt", file)
assert s.exists("test.txt")
assert s.path("test.txt") == f"{settings.MEDIA_ROOT}/test.txt"
assert s.url("test.txt") == f"{settings.MEDIA_URL}test.txt"
def test_custom_base_location(tenant_ctx, settings):
location = f"{settings.MEDIA_ROOT}/2"
s = TenantFileSystemStorage(location=location, base_url="custom_url")
file = SimpleUploadedFile("test.txt", b"any content")
s.save("test.txt", file)
assert s.exists("test.txt")
assert s.path("test.txt") == f"{location}/test.txt"
assert s.url("test.txt") == "custom_url/test.txt"
|
44105
|
import os
import sys
import imp
import argparse
import time
import math
import numpy as np
from utils import utils
from utils.imageprocessing import preprocess
from utils.dataset import Dataset
from network import Network
from evaluation.lfw import LFWTest
def main(args):
paths = [
r'F:\data\face-recognition\lfw\lfw-112-mxnet\Abdoulaye_Wade\Abdoulaye_Wade_0002.jpg',
r'F:\data\face-recognition\lfw\lfw-112-mxnet\Abdoulaye_Wade\Abdoulaye_Wade_0003.jpg',
r'F:\data\face-recognition\realsense\data-labeled-clean-strict2-112-mxnet\rgb\001-chenkai\a-000013.jpg',
r'F:\data\face-recognition\realsense\data-labeled-clean-strict2-112-mxnet\rgb\001-chenkai\rgb_2.jpg',
r'F:\data\face-recognition\lfw\lfw-112-mxnet\Abdoulaye_Wade\Abdoulaye_Wade_0002.jpg',
r'F:\data\face-recognition\realsense\data-labeled-clean-strict2-112-mxnet\rgb\001-chenkai\rgb_2.jpg',
r'F:\data\face-recognition\lfw\lfw-112-mxnet\Abdoulaye_Wade\Abdoulaye_Wade_0003.jpg',
r'F:\data\face-recognition\realsense\data-labeled-clean-strict2-112-mxnet\rgb\001-chenkai\rgb_2.jpg',
]
print('%d images to load.' % len(paths))
assert(len(paths)>0)
# Load model files and config file
network = Network()
network.load_model(args.model_dir)
# network.config.preprocess_train = []
# network.config.preprocess_test = []
images = preprocess(paths, network.config, False)
import cv2
# images = np.array([cv2.resize(img, (96, 96)) for img in images])
# images = (images - 128.) / 128.
# images = images[..., ::-1]
print(images.shape)
# print(images[0,:5,:5,0])
# Run forward pass to calculate embeddings
mu, sigma_sq = network.extract_feature(images, args.batch_size, verbose=True)
print(mu.shape, sigma_sq.shape)
print('sigma_sq', np.max(sigma_sq), np.min(sigma_sq), np.mean(sigma_sq), np.exp(np.mean(np.log(sigma_sq))))
log_sigma_sq = np.log(sigma_sq)
print('log_sigma_sq', np.max(log_sigma_sq), np.min(log_sigma_sq), np.mean(log_sigma_sq))
# print('sigma_sq', sigma_sq)
feat_pfe = np.concatenate([mu, sigma_sq], axis=1)
score = utils.pair_cosin_score(mu[::2], mu[1::2])
print(score)
score = utils.pair_MLS_score(feat_pfe[::2], feat_pfe[1::2])
print(score)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--model_dir", help="The path to the pre-trained model directory",
type=str,
default=r'D:\chenkai\Probabilistic-Face-Embeddings-master\log\resface64_relu_msarcface_am_PFE/20191229-172304-iter15')
parser.add_argument("--batch_size", help="Number of images per mini batch",
type=int, default=128)
args = parser.parse_args()
main(args)
|
44144
|
from collections import namedtuple
from turtle import fd, heading, lt, pd, position, pu, rt, setheading, setposition # pylint: disable=no-name-in-module
from pudzu.utils import weighted_choice
class LSystem:
Rule = namedtuple("Rule", "predecessor successor weight", defaults=(1.0,))
def __init__(self, axiom, rules, angle=4):
self.axiom = axiom
self.angle = 360 / angle
self.rules = {}
self.weights = {}
for rule in rules:
pr = self.Rule(*rule)
self.rules.setdefault(pr.predecessor, []).append(pr.successor)
self.weights.setdefault(pr.predecessor, []).append(pr.weight)
def expand(self, iterations):
state = self.axiom
for _ in range(iterations):
state = "".join([weighted_choice(self.rules.get(c, [c]), self.weights.get(c, [1])) for c in state])
return state
def plot(self, screen, iterations, size, reset=True, tracer=(0, 0)):
if reset:
screen.clearscreen()
screen.tracer(*tracer)
stack = []
for c in self.expand(iterations):
if c == "F":
fd(size)
elif c == "G":
pu()
fd(size)
pd()
elif c == "+":
rt(self.angle)
elif c == "-":
lt(self.angle)
elif c == "[":
stack.append((position(), heading()))
elif c == "]":
p, h = stack.pop()
pu()
setposition(p)
setheading(h)
pd()
screen.update()
Koch = LSystem("F--F--F", [("F", "F+F--F+F")], 6)
Dragon = LSystem("FX", [("F", ""), ("Y", "+FX--FY+"), ("X", "-FX++FY-")], 8)
Plant07 = LSystem("Z", [("Z", "ZFX[+Z][-Z]"), ("X", "X[-FFF][+FFF]FX")], 14)
Plant08 = LSystem("SLFFF", [("S", "[+++Z][---Z]TS"), ("Z", "+H[-Z]L"), ("H", "-Z[+H]L"), ("T", "TL"), ("L", "[-FFF][+FFF]F")], 20)
Sierpinski = LSystem("AF", [("A", "BF+AF+BF"), ("B", "AF-BF-AF"), ("F", "")], 6)
Barnsley = LSystem("X", [("X", "F+[[X]-X]-F[-FX]+X"), ("F", "FF")], 14.4)
RandomWalk = LSystem("F", [("F", "FF"), ("F", "F+F"), ("F", "F++F"), ("F", "F-F")], 4)
|
44155
|
from starling_sim.basemodel.output.kpis import KPI
import logging
import pandas as pd
class KpiOutput:
def __init__(self, population_names, kpi_list, kpi_name=None):
# name of the kpi, will compose the kpi filename : <kpi_name>.csv
if kpi_name is None:
if isinstance(population_names, list):
self.name = "_&_".join(population_names) + "_kpi"
else:
self.name = population_names + "_kpi"
else:
self.name = kpi_name
# population of agent to evaluate
self.population_names = population_names
self.populations = None
# list of kpi to evaluate the given agents
self.kpi_list = kpi_list
# output file
self.filename = None
self.folder = None
def setup(self, filename, folder, simulation_model):
"""
Setup method called during simulation setup.
Sets the values of out file and folder, and call setup for KPIs.
:param filename: .csv file
:param folder:
:param simulation_model:
:return:
"""
self.filename = filename
self.folder = folder
for kpi in self.kpi_list:
kpi.setup(simulation_model)
if isinstance(self.population_names, list):
self.populations = [simulation_model.agentPopulation[population_name]
for population_name in self.population_names]
else:
self.populations = [simulation_model.agentPopulation[self.population_names]]
def agent_kpi_dict(self, agent):
"""
Computes the KPIs for the given agent
by calling their update method for all its trace
:param agent:
:return:
"""
indicators_dict = dict()
# get agent trace
events = agent.trace.eventList
# evaluate all indicators in a single pass
for event in events:
for kpi in self.kpi_list:
kpi.update(event, agent)
# merge all completed indicators
for kpi in self.kpi_list:
indicators_dict.update(kpi.indicator_dict)
# raising a warning with sphinx
# indicators_dict = {**indicators_dict, **kpi.indicator_dict}
# reset kpi values
kpi.new_indicator_dict()
# return complete indicator dict
return indicators_dict
def write_kpi_table(self):
"""
Write the KPI of the population in the csv file
obtained from out file attributes
The KPIs evaluated are defined by the kpi_list attribute
"""
# first row is always agent's id, then we add the kpi_list keys
header_list = [KPI.KEY_ID]
for kpi in self.kpi_list:
header_list += kpi.keys
path = self.folder + self.filename
kpi_table = pd.DataFrame()
# compute the kpi table for each population dict
for population in self.populations:
kpi_table = pd.concat([kpi_table, self.compute_population_kpi_table(population)])
# do not generate a kpi output if the kpi table is empty
if kpi_table.empty:
return
# generate kpi output
logging.info("Generating KPI output in file " + path)
try:
# write the dataframe into a csv file
kpi_table.to_csv(path, sep=";", index=False, columns=header_list)
except KeyError as e:
logging.warning("Could not generate kpi output {}, "
"error occurred : {}".format(path, e))
def compute_population_kpi_table(self, population):
"""
Compute a kpi table for the given population dict.
:param population: population dict {id: agent}
:return: DataFrame containing the KPI values
"""
df_output = pd.DataFrame()
for agent in population.values():
# create kpi dict for the agent
agent_indicators = self.agent_kpi_dict(agent)
# build a dataframe from the dict
if isinstance(agent_indicators[KPI.KEY_ID], list):
df = pd.DataFrame(agent_indicators)
else:
df = pd.DataFrame(agent_indicators, index=[0])
# append the dataframe to the total output
df_output = pd.concat([df_output, df])
return df_output
|
44156
|
import torch
from torch import nn as nn
from torch.nn import functional as F
from torch.autograd import Variable
from model.tensorized_layers.graphsage import BatchedGraphSAGE
class DiffPoolAssignment(nn.Module):
def __init__(self, nfeat, nnext):
super().__init__()
self.assign_mat = BatchedGraphSAGE(nfeat, nnext, use_bn=True)
def forward(self, x, adj, log=False):
s_l_init = self.assign_mat(x, adj)
s_l = F.softmax(s_l_init, dim=-1)
return s_l
|
44157
|
import os
import pyblish.api
from openpype.lib import OpenPypeMongoConnection
class IntegrateContextToLog(pyblish.api.ContextPlugin):
""" Adds context information to log document for displaying in front end"""
label = "Integrate Context to Log"
order = pyblish.api.IntegratorOrder - 0.1
hosts = ["webpublisher"]
def process(self, context):
self.log.info("Integrate Context to Log")
mongo_client = OpenPypeMongoConnection.get_mongo_client()
database_name = os.environ["OPENPYPE_DATABASE_NAME"]
dbcon = mongo_client[database_name]["webpublishes"]
for instance in context:
self.log.info("ctx_path: {}".format(instance.data.get("ctx_path")))
self.log.info("batch_id: {}".format(instance.data.get("batch_id")))
if instance.data.get("ctx_path") and instance.data.get("batch_id"):
self.log.info("Updating log record")
dbcon.update_one(
{
"batch_id": instance.data.get("batch_id"),
"status": "in_progress"
},
{"$set":
{
"path": instance.data.get("ctx_path")
}}
)
return
|
44182
|
from .model_action import ModelAction
class HVACTemplate(ModelAction):
# this shows the ip to si conversion rate
# if unit is 'ip', then multiply this rate.
# for window it is the U-value
# convert U-value IP to SI
# The conversion will change w/ft2 to w/m2 if ip shows
NUM_HVAC = 14
def __init__(self, unit="si"):
ModelAction.__init__(self, 'hvac_template', unit)
self._measure_name = 'HVAC'
self._default_list = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14]
# DOAS + VRF as default
self._data = 10
self._lower_limit = 0
self._upper_limit = HVACTemplate.NUM_HVAC - 1
self._measure_help = '''
measure name: HVAC
Unit: not required
Minimum: 0
Maximum: 13
Type: categorical
This measure will change the HVAC system in the idf file
The HVAC system types are:
0. sys1: PTAC
1. sys2: PTHP
2. sys3: PSZ-AC
3. sys4: PSZ-HP
4. sys5: Packaged VAV with Reheat
5. sys6: Packaged VAV with PFP Boxes
6. sys7: VAV with Reheat
7. sys8: VAV with PFP Boxes
8. sys9: Warm air furnace, gas fired
9. sys10: Warm air furnace, electric
10. doasvrf: DOAS with variable refrigerant flow
11. doasfancoil: DOAS with Fan coils
12. doaswshp: DOAS with water source heat pump (ground as condenser)
13. doascbcb: DOAS with active cool beam + convective baseboard
14. vavfourpipebeam: VAV system with four pipe beam
'''
def _unit_convert_ratio(self):
return 1.0
|
44209
|
import matplotlib.pyplot as plt
import numpy as np
def plot_convolution(f, g):
fig, (ax1, ax2, ax3) = plt.subplots(3, 1)
ax1.set_yticklabels([])
ax1.set_xticklabels([])
ax1.plot(f, color='blue', label='f')
ax1.legend()
ax2.set_yticklabels([])
ax2.set_xticklabels([])
ax2.plot(g, color='red', label='g')
ax2.legend()
filtered = np.convolve(f, g, "same") / sum(g)
ax3.set_yticklabels([])
ax3.set_xticklabels([])
ax3.plot(filtered, color='green', label='f * g')
ax3.legend()
plt.show()
def plot_convolution_step_by_step(f, g):
fig, (ax1, ax2, ax3, ax4, ax5) = plt.subplots(5, 1)
ax1.set_yticklabels([])
ax1.set_xticklabels([])
ax1.plot(f, color='blue', label='f')
ax1.plot(np.roll(g, -10000), color='red', label='g')
ax2.set_yticklabels([])
ax2.set_xticklabels([])
ax2.plot(f, color='blue', label='f')
ax2.plot(np.roll(g, -5000), color='red', label='g')
ax3.set_yticklabels([])
ax3.set_xticklabels([])
ax3.plot(f, color='blue', label='f')
ax3.plot(g, color='red', label='g')
ax4.set_yticklabels([])
ax4.set_xticklabels([])
ax4.plot(f, color='blue', label='f')
ax4.plot(np.roll(g, 5000), color='red', label='g')
ax5.set_yticklabels([])
ax5.set_xticklabels([])
ax5.plot(f, color='blue', label='f')
ax5.plot(np.roll(g, 10000), color='red', label='g')
plt.show()
signal = np.zeros(30000)
signal[10000:20000] = 1
kernel = np.zeros(30000)
kernel[10000:20000] = np.linspace(1, 0, 10000)
plot_convolution(signal, kernel)
plot_convolution_step_by_step(signal, kernel)
|
44224
|
import logging
import math
import torch
import torch.nn as nn
from vedastr.models.bodies import build_sequence_decoder
from vedastr.models.utils import build_torch_nn
from vedastr.models.weight_init import init_weights
from .registry import HEADS
logger = logging.getLogger()
@HEADS.register_module
class TransformerHead(nn.Module):
def __init__(
self,
decoder,
generator,
embedding,
num_steps,
pad_id,
src_from,
src_mask_from=None,
):
super(TransformerHead, self).__init__()
self.decoder = build_sequence_decoder(decoder)
self.generator = build_torch_nn(generator)
self.embedding = build_torch_nn(embedding)
self.num_steps = num_steps
self.pad_id = pad_id
self.src_from = src_from
self.src_mask_from = src_mask_from
logger.info('TransformerHead init weights')
init_weights(self.modules())
def pad_mask(self, text):
pad_mask = (text == self.pad_id)
pad_mask[:, 0] = False
pad_mask = pad_mask.unsqueeze(1)
return pad_mask
def order_mask(self, text):
t = text.size(1)
order_mask = torch.triu(torch.ones(t, t), diagonal=1).bool()
order_mask = order_mask.unsqueeze(0).to(text.device)
return order_mask
def text_embedding(self, texts):
tgt = self.embedding(texts)
tgt *= math.sqrt(tgt.size(2))
return tgt
def forward(self, feats, texts):
src = feats[self.src_from]
if self.src_mask_from:
src_mask = feats[self.src_mask_from]
else:
src_mask = None
if self.training:
tgt = self.text_embedding(texts)
tgt_mask = (self.pad_mask(texts) | self.order_mask(texts))
out = self.decoder(tgt, src, tgt_mask, src_mask)
out = self.generator(out)
else:
out = None
for _ in range(self.num_steps):
tgt = self.text_embedding(texts)
tgt_mask = self.order_mask(texts)
out = self.decoder(tgt, src, tgt_mask, src_mask)
out = self.generator(out)
next_text = torch.argmax(out[:, -1:, :], dim=-1)
texts = torch.cat([texts, next_text], dim=-1)
return out
|
44233
|
from django.db import models
__all__ = ('PostSaveImageField',)
class PostSaveImageField(models.ImageField):
def __init__(self, *args, **kwargs):
kwargs['null'] = True
kwargs['blank'] = True
super(PostSaveImageField, self).__init__(*args, **kwargs)
def contribute_to_class(self, cls, name):
super(PostSaveImageField, self).contribute_to_class(cls, name)
models.signals.post_save.connect(self.save_file, sender=cls)
def save_file(self, sender, instance, created, **kwargs):
file = super(PostSaveImageField, self).pre_save(instance, created)
if file:
instance.__class__.objects \
.filter(pk=instance.pk).update(**{self.attname: file.name})
def pre_save(self, model_instance, add):
pass
|
44236
|
import sys
import soundcard
import numpy
import pytest
skip_if_not_linux = pytest.mark.skipif(sys.platform != 'linux', reason='Only implemented for PulseAudio so far')
ones = numpy.ones(1024)
signal = numpy.concatenate([[ones], [-ones]]).T
def test_speakers():
for speaker in soundcard.all_speakers():
assert isinstance(speaker.name, str)
assert hasattr(speaker, 'id')
assert isinstance(speaker.channels, int)
assert speaker.channels > 0
def test_microphones():
for microphone in soundcard.all_microphones():
assert isinstance(microphone.name, str)
assert hasattr(microphone, 'id')
assert isinstance(microphone.channels, int)
assert microphone.channels > 0
def test_default_playback():
soundcard.default_speaker().play(signal, 44100, channels=2)
def test_default_record():
recording = soundcard.default_microphone().record(1024, 44100)
assert len(recording == 1024)
def test_default_blockless_record():
recording = soundcard.default_microphone().record(None, 44100)
@skip_if_not_linux
def test_name():
# The default is the application name, so when run from pytest,
# it’s “pytest” or “_jb_pytest_runner.py” or so.
assert 'pytest' in soundcard.get_name()
soundcard.set_name('testapp')
assert soundcard.get_name() == 'testapp'
@skip_if_not_linux
@pytest.mark.parametrize("argv,progname", [
(["./script.py"], "script.py"), # chmod +x script.py; ./script.py
(["path/to/script.py"], "script.py"), # python path/to/script.py or
# python -m path.to.script
(["module/__main__.py"], "module"), # python -m module
(["-m", "module.submodule"], "module.submodule"), # rare unresolved case
(["-c", "import soundcard; soundcard.foo()"], "import soundcard; soundcard.fo..."),
])
def test_infer_name(monkeypatch, argv, progname):
infer = soundcard.pulseaudio._PulseAudio._infer_program_name
monkeypatch.setattr(sys, "argv", argv)
assert infer() == progname
@pytest.fixture
def loopback_speaker():
import sys
if sys.platform == 'win32':
# must install https://www.vb-audio.com/Cable/index.htm
return soundcard.get_speaker('Cable')
elif sys.platform == 'darwin':
# must install soundflower
return soundcard.get_speaker('Soundflower64')
elif sys.platform == 'linux':
# pacmd load-module module-null-sink channels=6 rate=48000
return soundcard.get_speaker('Null')
else:
raise RuntimeError('Unknown platform {}'.format(sys.platform))
@pytest.fixture
def loopback_player(loopback_speaker):
with loopback_speaker.player(48000, channels=2, blocksize=512) as player:
yield player
@pytest.fixture
def loopback_microphone():
if sys.platform == 'win32':
# must install https://www.vb-audio.com/Cable/index.htm
return soundcard.get_microphone('Cable')
elif sys.platform == 'darwin':
# must install soundflower
return soundcard.get_microphone('Soundflower64')
elif sys.platform == 'linux':
return soundcard.get_microphone('Null', include_loopback=True)
else:
raise RuntimeError('Unknown platform {}'.format(sys.platform))
@pytest.fixture
def loopback_recorder(loopback_microphone):
with loopback_microphone.recorder(48000, channels=2, blocksize=512) as recorder:
yield recorder
def test_loopback_playback(loopback_player, loopback_recorder):
loopback_player.play(signal)
recording = loopback_recorder.record(1024*10)
assert recording.shape[1] == 2
left, right = recording.T
assert left.mean() > 0
assert right.mean() < 0
assert (left > 0.5).sum() == len(signal)
assert (right < -0.5).sum() == len(signal)
def test_loopback_reverse_recorder_channelmap(loopback_player, loopback_microphone):
with loopback_microphone.recorder(48000, channels=[1, 0], blocksize=512) as loopback_recorder:
loopback_player.play(signal)
recording = loopback_recorder.record(1024*12)
assert recording.shape[1] == 2
left, right = recording.T
assert right.mean() > 0
assert left.mean() < 0
assert (right > 0.5).sum() == len(signal)
assert (left < -0.5).sum() == len(signal)
def test_loopback_reverse_player_channelmap(loopback_speaker, loopback_recorder):
with loopback_speaker.player(48000, channels=[1, 0], blocksize=512) as loopback_player:
loopback_player.play(signal)
recording = loopback_recorder.record(1024*12)
assert recording.shape[1] == 2
left, right = recording.T
assert right.mean() > 0
assert left.mean() < 0
assert (right > 0.5).sum() == len(signal)
assert (left < -0.5).sum() == len(signal)
def test_loopback_mono_player_channelmap(loopback_speaker, loopback_recorder):
with loopback_speaker.player(48000, channels=[0], blocksize=512) as loopback_player:
loopback_player.play(signal[:,0])
recording = loopback_recorder.record(1024*12)
assert recording.shape[1] == 2
left, right = recording.T
assert left.mean() > 0
if sys.platform == 'linux':
# unmapped channels on linux are filled with the mean of other channels
assert right.mean() < left.mean()
else:
assert abs(right.mean()) < 0.01 # something like zero
assert (left > 0.5).sum() == len(signal)
def test_loopback_mono_recorder_channelmap(loopback_player, loopback_microphone):
with loopback_microphone.recorder(48000, channels=[0], blocksize=512) as loopback_recorder:
loopback_player.play(signal)
recording = loopback_recorder.record(1024*12)
assert len(recording.shape) == 1 or recording.shape[1] == 1
assert recording.mean() > 0
assert (recording > 0.5).sum() == len(signal)
def test_loopback_multichannel_channelmap(loopback_speaker, loopback_microphone):
with loopback_speaker.player(48000, channels=[2, 0], blocksize=512) as loopback_player:
with loopback_microphone.recorder(48000, channels=[2, 0], blocksize=512) as loopback_recorder:
loopback_player.play(signal)
recording = loopback_recorder.record(1024*12)
assert len(recording.shape) == 2
left, right = recording.T
assert left.mean() > 0
assert right.mean() < 0
assert (left > 0.5).sum() == len(signal)
assert (right < -0.5).sum() == len(signal)
|
44280
|
from pycoin.networks.bitcoinish import create_bitcoinish_network
network = create_bitcoinish_network(
network_name="Monacoin", symbol="MONA", subnet_name="mainnet",
wif_prefix_hex="b0", sec_prefix="MONASEC:", address_prefix_hex="32", pay_to_script_prefix_hex="37",
bip32_prv_prefix_hex="0488ade4", bip32_pub_prefix_hex="0488b21e", bech32_hrp="mona",
magic_header_hex="fbc0b6db", default_port=9401,
dns_bootstrap=["dnsseed.monacoin.org"])
|
44299
|
import sublime
def pkg_settings():
# NOTE: The sublime.load_settings(...) call has to be deferred to this function,
# rather than just being called immediately and assigning a module-level variable,
# because of: https://www.sublimetext.com/docs/3/api_reference.html#plugin_lifecycle
return sublime.load_settings("Git blame.sublime-settings")
PKG_SETTINGS_KEY_CUSTOMBLAMEFLAGS = "custom_blame_flags"
PKG_SETTINGS_KEY_INLINE_BLAME_ENABLED = "inline_blame_enabled"
PKG_SETTINGS_KEY_INLINE_BLAME_DELAY = "inline_blame_delay"
|
44308
|
import torch
from torch import nn, Tensor
from typing import Tuple
from ..components import ResidualRNN
__all__ = ['Encoder', 'RNNEncoder', 'GRUEncoder']
class Encoder(nn.Module):
def __init__(self, input_size, hidden_size, embedding_dim, num_layers, bidirectional, device, pad_token=0, drop_rate=0.1):
super(Encoder, self).__init__()
self._hidden_size = hidden_size
self._input_size = input_size
self._embedding_dim = embedding_dim
self._num_layers = num_layers
self._bidirectional = bidirectional
self._device = device
self._embedding = nn.Embedding(input_size, self._embedding_dim, padding_idx=pad_token)
self._dropout = nn.Dropout(drop_rate)
def forward(self, input: Tensor, states: Tuple[Tensor, ...]) -> Tuple[Tensor, Tuple[Tensor, ...]]:
"""
:param input: (seq_len, batch_size, input_dim)
:param states: internal states of the RNN, each having dimension
(num_layers * num_directions, batch_size, hidden_size)
:return:
output: (seq_len, batch, num_directions * hidden_size)
states: states at final time step, each having dimension
(num_layers * num_directions, batch_size, hidden_size)
"""
raise NotImplementedError
def init_hidden(self, batch_size: int) -> Tuple[Tensor, ...]:
"""
Initialize the first zero hidden state
:param batch_size:
:return: Initial internal states, each of dim (num_layers * num_directions, batch_size, hidden_size)
"""
raise NotImplementedError
class RNNEncoder(Encoder):
def __init__(
self, rnn, input_size, hidden_size, embedding_dim,
num_layers, bidirectional, device, pad_token=0, drop_rate=0.1
):
super(RNNEncoder, self).__init__(
input_size, hidden_size, embedding_dim, num_layers, bidirectional, device, pad_token, drop_rate
)
self.rnn = rnn
def forward(self, input: Tensor, states: Tuple[Tensor, ...]) -> Tuple[Tensor, Tuple[Tensor, ...]]:
embedded = self._dropout(self._embedding(input))
output, hidden = self.rnn(embedded, states)
return output, hidden
class GRUEncoder(RNNEncoder):
def __init__(
self, input_size, hidden_size, embedding_dim, device, bias=False,
num_layers=1, dropout=0, bidirectional=False, pad_token=0, drop_rate=0.1
):
super(GRUEncoder, self).__init__(
nn.GRU(
embedding_dim, hidden_size,
bias=bias, num_layers=num_layers,
dropout=dropout,
bidirectional=bidirectional
),
input_size, hidden_size, embedding_dim, num_layers, bidirectional, device, pad_token, drop_rate
)
def init_hidden(self, batch_size: int) -> Tuple[Tensor, ...]:
"""
Initialize the first zero hidden state
:param batch_size:
:return: Initial hidden state, of dimensision (num_layers * num_directions, batch_size, hidden_size)
"""
first_dim = self._num_layers
if self._bidirectional:
first_dim *= 2
return (torch.zeros(first_dim, batch_size, self._hidden_size, device=self._device),)
class LSTMEncoder(RNNEncoder):
def __init__(
self, input_size, hidden_size, embedding_dim, device, bias=False,
num_layers=1, dropout=0, bidirectional=False, pad_token=0, drop_rate=0.1
):
super(LSTMEncoder, self).__init__(
nn.LSTM(
embedding_dim, hidden_size,
bias=bias, num_layers=num_layers,
dropout=dropout,
bidirectional=bidirectional
),
input_size, hidden_size, embedding_dim, num_layers, bidirectional, device, pad_token, drop_rate
)
def init_hidden(self, batch_size: int) -> Tuple[Tensor, ...]:
"""
Initialize the first zero hidden state
:param batch_size:
:return: Initial hidden state and cell state, each of dim (num_layers * num_directions, batch_size, hidden_size)
"""
first_dim = self._num_layers
if self._bidirectional:
first_dim *= 2
return (
torch.zeros(first_dim, batch_size, self._hidden_size, device=self._device),
torch.zeros(first_dim, batch_size, self._hidden_size, device=self._device)
)
class ResidualRNNEncoder(RNNEncoder):
def __init__(
self, base_rnn, input_size, hidden_size, embedding_dim, device, bias=False,
num_layers=1, dropout=0, bidirectional=False, pad_token=0, drop_rate=0.1
):
super(ResidualRNNEncoder, self).__init__(
ResidualRNN(
base_rnn=base_rnn, input_size=embedding_dim,
bias=bias, num_layers=num_layers,
dropout=dropout,
bidirectional=bidirectional
),
input_size, hidden_size, embedding_dim, num_layers, bidirectional, device, pad_token, drop_rate
)
# class ResidualGRUEncoder(RNNEncoder, GRUEncoder):
# def __init__(
# self, input_size, hidden_size, embedding_dim, device, bias=False,
# num_layers=1, dropout=0, bidirectional=False, pad_token=0, drop_rate=0.1
# ):
# super(ResidualGRUEncoder, self).__init__(
# nn.GRU, input_size, hidden_size, embedding_dim, num_layers, bidirectional,
# device, bias, num_layers, dropout, bidirectional, pad_token, drop_rate
# )
#
# class GRUEncoder(Encoder):
# def __init__(
# self, input_size, hidden_size, embedding_dim, device, bias=False,
# num_layers=1, dropout=0, bidirectional=False, pad_token=0, drop_rate=0.1):
# super(GRUEncoder, self).__init__(
# input_size, hidden_size, embedding_dim,
# num_layers, bidirectional, device, pad_token, drop_rate)
# self._gru = nn.GRU(
# embedding_dim, hidden_size,
# bias=bias, num_layers=num_layers,
# dropout=dropout,
# bidirectional=bidirectional
# )
# # self._gru = ResidualRNN(
# # nn.GRU, input_size=hidden_size,
# # bias=bias, num_layers=num_layers,
# # dropout=dropout,
# # )
# self.to(device)
#
# def forward(self, input: Tensor, hidden: Tensor) -> Tuple[Tensor, Tensor]:
# embedded = self._dropout(self._embedding(input))
# output, hidden = self._gru(embedded, hidden)
# return output, hidden
#
|
44309
|
import torch
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
class LabelSmoothingCrossEntropy(torch.nn.Module):
def __init__(self):
super(LabelSmoothingCrossEntropy, self).__init__()
def forward(self, x, target, smoothing=0.1):
confidence = 1. - smoothing
logprobs = F.log_softmax(x, dim=-1)
nll_loss = -logprobs.gather(dim=-1, index=target.unsqueeze(1))
nll_loss = nll_loss.squeeze(1)
smooth_loss = -logprobs.mean(dim=-1)
loss = confidence * nll_loss + smoothing * smooth_loss
return loss.mean()
class ConfidenceLabelSmoothingCrossEntropy(torch.nn.Module):
def __init__(self):
super(ConfidenceLabelSmoothingCrossEntropy, self).__init__()
# self.confidence = [0.7425, 0.9325, 0.965, 0.5395, 0.86025, 0.754, 0.66475, 0.618, 0.7925, 0.6525, 0.5415,
# 0.5705, 0.6525, 0.59625, 0.6145, 0.62125, 0.7755, 0.866, 0.83425, 0.64125, 0.986, 0.82225,
# 0.70525, 0.5625, 0.5145, 0.5275, 0.57775, 0.918, 0.9175, 0.69575, 0.6555, 0.867, 0.945,
# 0.5155, 0.593, 0.976, 0.963, 0.591, 0.749, 0.5575, 0.52625, 0.6125, 0.83725, 0.97225,
# 0.93725, 0.6415, 0.61225, 0.584, 0.69175, 0.60825, 0.63575, 0.756, 0.61375, 0.53575]
self.confidence = [0.713, 0.953, 0.947, 0.514, 0.933, 0.725, 0.6025, 0.5855, 0.821, 0.6175, 0.547, 0.5605, 0.7,
0.609, 0.5785, 0.638, 0.8005, 0.824, 0.834, 0.5155, 0.9775, 0.8615, 0.6305, 0.549, 0.517,
0.5915, 0.5285, 0.923, 0.855, 0.751, 0.675, 0.773, 0.9805, 0.53, 0.5255, 0.9685, 0.9535,
0.5515, 0.8795, 0.497, 0.529, 0.5335, 0.8645, 0.9595, 0.9245, 0.5265, 0.452, 0.6415, 0.696,
0.617, 0.683, 0.7255, 0.5995, 0.5815, 0.772, 0.912, 0.983, 0.565, 0.7875, 0.783, 0.727,
0.6505, 0.764, 0.6875, 0.536, 0.5805, 0.605, 0.5835, 0.6505, 0.6045, 0.7505, 0.908, 0.8345,
0.767, 0.9945, 0.783, 0.78, 0.576, 0.512, 0.4635, 0.627, 0.913, 0.98, 0.6405, 0.636, 0.961,
0.9095, 0.501, 0.6605, 0.9835, 0.9725, 0.6305, 0.6185, 0.618, 0.5235, 0.6915, 0.81, 0.985,
0.95, 0.7565, 0.7725, 0.5265, 0.6875, 0.5995, 0.5885, 0.7865, 0.628, 0.49, 0.985, 0.95,
0.7565, 0.7725, 0.5265, 0.6875, 0.5995, 0.5885, 0.7865, 0.628, 0.49
]
def forward(self, x, target, sid):
confidencemat = torch.zeros_like(target,dtype=torch.float32)
for i in range(len(target)):
confidencemat[i] = self.confidence[sid[i]]
smoothing = 1 - confidencemat
logprobs = F.log_softmax(x, dim=-1)
nll_loss = -logprobs.gather(dim=-1, index=target.unsqueeze(1))
nll_loss = nll_loss.squeeze(1)
smooth_loss = -logprobs.mean(dim=-1)
loss = torch.mul(confidencemat,nll_loss) + torch.mul(smoothing,smooth_loss)
return loss.mean()
class CroppedLoss:
def __init__(self, loss_function):
self.loss_function = loss_function
def __call__(self, preds, targets):
avg_preds = torch.mean(preds, dim=2)
avg_preds = avg_preds.squeeze(dim=1)
return self.loss_function(avg_preds, targets)
def train_crop(log_interval, model, device, train_loader, optimizer, scheduler, cuda, gpuidx, epoch=1):
criterion = torch.nn.NLLLoss()
lossfn = CroppedLoss(criterion)
model.train()
for batch_idx, datas in enumerate(train_loader):
data, target = datas[0].to(device), datas[1].to(device, dtype=torch.int64)
optimizer.zero_grad()
output = model(data)
output = model.embedding_net(data)
loss = lossfn(output, target)
loss.backward()
optimizer.step()
if batch_idx % log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
scheduler.step()
def eval_crop(model, device, test_loader):
model.eval()
test_loss = []
correct = []
with torch.no_grad():
for datas in test_loader:
data, target = datas[0].to(device), datas[1].to(device, dtype=torch.int64)
outputs = []
for i in range(2):
outputs.append(model(data[:, :, :, i * 125:i * 125 + 1000]))
result = torch.cat([outputs[0], outputs[1][:, :, model.out_size - 125:model.out_size]], dim=2)
y_preds_per_trial = result.mean(dim=2)
test_loss.append(F.nll_loss(y_preds_per_trial, target, reduction='sum').item()) # sum up batch loss
pred = y_preds_per_trial.argmax(dim=1, keepdim=True) # get the index of the max log-probability
correct.append(pred.eq(target.view_as(pred)).sum().item())
loss = sum(test_loss) / len(test_loader.dataset)
# print('{:.0f}'.format(100. * correct / len(test_loader.dataset)))
print('Test set: Average loss: {:.4f}, Accuracy: {}/{} ({:.4f}%)'.format(
loss, sum(correct), len(test_loader.dataset),
100. * sum(correct) / len(test_loader.dataset)))
return loss, 100. * sum(correct) / len(test_loader.dataset)
class MAE_loss(torch.nn.Module):
def __init__(self, device):
super(MAE_loss, self).__init__()
self.device = device
self.loss_function = torch.nn.L1Loss()
def __call__(self, preds, targets):
y_onehot = torch.FloatTensor(targets.size(0), 2).to(self.device)
y_onehot.zero_()
y_onehot.scatter_(1, targets.unsqueeze(1), 1)
return self.loss_function(preds, y_onehot)
class MAE_loss(torch.nn.Module):
def __init__(self, device):
super(MAE_loss, self).__init__()
self.device = device
self.loss_function = torch.nn.L1Loss()
def __call__(self, preds, targets):
y_onehot = torch.FloatTensor(targets.size(0), 2).to(self.device)
y_onehot.zero_()
y_onehot.scatter_(1, targets.unsqueeze(1), 1)
return self.loss_function(preds, y_onehot)
import utils
import time
def train(log_interval, model, device, train_loader, optimizer, scheduler, cuda, gpuidx, epoch):
losses = utils.AverageMeter('Loss', ':.4e')
if isinstance(model, torch.nn.DataParallel):
lossfn = model.module.criterion
else:
lossfn = model.criterion
# lossfn = LabelSmoothingCrossEntropy()
# lossfn = ConfidenceLabelSmoothingCrossEntropy()
correct = []
start = time.time()
model.train()
t_data = []
t_model = []
t3 = time.time()
for batch_idx, datas in enumerate(train_loader):
data, target = datas[0].to(device), datas[1].to(device, dtype=torch.int64)
t2 = time.time()
t_data.append(t2 - t3)
# print(t2)
optimizer.zero_grad()
output = model(data.unsqueeze(dim=1))
pred = F.log_softmax(output, dim=1).argmax(dim=1, keepdim=True) # get the index of the max log-probability
correct.append(pred.eq(target.view_as(pred)).sum().item())
loss = lossfn(output, target)
loss.backward()
optimizer.step()
losses.update(loss.item(), data.size(0))
if batch_idx % log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
t3 = time.time()
t_model.append(t3 - t2)
print("time :", time.time() - start)
print(f"t_data : {sum(t_data)} , t_model : {sum(t_model)}")
print(f'Train set: Accuracy: {sum(correct)}/{len(train_loader.dataset)} ({100. * sum(correct) / len(train_loader.dataset):.4f}%)')
def train_mtl(log_interval, model, device, train_loader, optimizer, scheduler, cuda, gpuidx, epoch):
losses = utils.AverageMeter('Loss', ':.4e')
if isinstance(model, torch.nn.DataParallel):
lossfn = model.module.criterion
else:
lossfn = model.criterion
# lossfn = LabelSmoothingCrossEntropy()
# lossfn = ConfidenceLabelSmoothingCrossEntropy()
correct = []
start = time.time()
model.train()
t_data = []
t_model = []
t3 = time.time()
for batch_idx, datas in enumerate(train_loader):
data, target, subjid = datas[0].to(device), datas[1].to(device, dtype=torch.int64), datas[2].to(device, dtype=torch.int64)
t2 = time.time()
t_data.append(t2 - t3)
# print(t2)
optimizer.zero_grad()
output = model(data.unsqueeze(dim=1))
pred = F.log_softmax(output, dim=1).argmax(dim=1, keepdim=True) # get the index of the max log-probability
correct.append(pred.eq(target.view_as(pred)).sum().item())
loss = lossfn(output, 2*subjid+target)
loss.backward()
optimizer.step()
losses.update(loss.item(), data.size(0))
if batch_idx % log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
t3 = time.time()
t_model.append(t3 - t2)
print("time :", time.time() - start)
print(f"t_data : {sum(t_data)} , t_model : {sum(t_model)}")
print(f'Train set: Accuracy: {sum(correct)}/{len(train_loader.dataset)} ({100. * sum(correct) / len(train_loader.dataset):.4f}%)')
def train_gpu(log_interval, model, device, train_loader, optimizer, scheduler, cuda, gpuidx, epoch=1):
losses = utils.AverageMeter('Loss', ':.4e')
if isinstance(model, torch.nn.DataParallel):
lossfn = model.module.criterion
else:
lossfn = model.criterion
correct = []
import time
start = time.time()
model.train()
t_data = []
t_model = []
t3 = time.time()
for batch_idx, datas in enumerate(train_loader):
data, target = datas[0], datas[1]
t2 = time.time()
t_data.append(t2 - t3)
optimizer.zero_grad()
output = model(data.unsqueeze(dim=1))
pred = F.log_softmax(output, dim=1).argmax(dim=1, keepdim=True) # get the index of the max log-probability
correct.append(pred.eq(target.view_as(pred)).sum().item())
loss = lossfn(output, target)
loss.backward()
optimizer.step()
losses.update(loss.item(), data.size(0))
if batch_idx % log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
t3 = time.time()
t_model.append(t3 - t2)
print("time :", time.time() - start)
print(f"t_data : {sum(t_data)} , t_model : {sum(t_model)}")
scheduler.step(losses.avg)
print(f'Train set: Accuracy: {sum(correct)}/{len(train_loader.dataset)} ({100. * sum(correct) / len(train_loader.dataset):.4f}%)')
def eval(model, device, test_loader):
model.eval()
test_loss = []
correct = []
with torch.no_grad():
for datas in test_loader:
data, target = datas[0].to(device), datas[1].to(device, dtype=torch.int64)
output = model(data.unsqueeze(dim=1))
test_loss.append(F.cross_entropy(output, target, reduction='sum').item()) # sum up batch loss
pred = F.log_softmax(output, dim=1).argmax(dim=1, keepdim=True) # get the index of the max log-probability
correct.append(pred.eq(target.view_as(pred)).sum().item())
loss = sum(test_loss) / len(test_loader.dataset)
# print('{:.0f}'.format(100. * correct / len(test_loader.dataset)))
print('Test set: Average loss: {:.4f}, Accuracy: {}/{} ({:.4f}%)'.format(
loss, sum(correct), len(test_loader.dataset),
100. * sum(correct) / len(test_loader.dataset)))
return loss, 100. * sum(correct) / len(test_loader.dataset)
from sklearn.metrics import roc_curve
from sklearn.metrics import auc
def eval_cali(model, device, test_loader):
model.eval()
test_loss = []
correct = []
with torch.no_grad():
for datas in test_loader:
data, target = datas[0].to(device), datas[1].to(device, dtype=torch.int64)
output = model(data.unsqueeze(dim=1))
test_loss.append(F.cross_entropy(output, target, reduction='sum').item()) # sum up batch loss
pred = F.softmax(output, dim=1)
fpr, tpr, thresholds = roc_curve(target.cpu(), pred.cpu()[:,0])
AUC = auc(fpr, tpr)
correct.append(AUC)
loss = sum(test_loss) / len(test_loader.dataset)
# print('{:.0f}'.format(100. * correct / len(test_loader.dataset)))
print('Test set: Average loss: {:.4f}, Accuracy: {}/{} ({:.4f}%)'.format(
loss, sum(correct), len(test_loader.dataset),
100. * sum(correct) / len(test_loader.dataset)))
return loss, 100. * sum(correct) / len(test_loader.dataset)
def vote(output, target, topk=(1,)):
""" Computes the precision@k for the specified values of k """
maxk = max(topk)
batch_size = target.size(0)
output = F.log_softmax(output, dim=1)
_, pred = output.topk(maxk, 1, True, True)
# pred = pred.t()
# one-hot case
if target.ndimension() > 1:
target = target.max(1)[1]
modevalue = torch.mode(pred%2)[0]
return modevalue
def eval_mtl(model, device, test_loader):
model.eval()
test_loss = []
correct = []
with torch.no_grad():
for datas in test_loader:
data, target, subjid = datas[0].to(device), datas[1].to(device, dtype=torch.int64), datas[2].to(device,
dtype=torch.int64)
output = model(data.unsqueeze(dim=1))
pred = vote(output, subjid*2+target, (1,5))
test_loss.append(F.cross_entropy(output, subjid*2+target, reduction='sum').item()) # sum up batch loss
# pred_0 = F.log_softmax(output, dim=1).argmax(dim=1, keepdim=True) # get the index of the max log-probability
# pred = pred_0%2
correct.append(pred.eq(target.view_as(pred)).sum().item())
loss = sum(test_loss) / len(test_loader.dataset)
# print('{:.0f}'.format(100. * correct / len(test_loader.dataset)))
print('Test set: Average loss: {:.4f}, Accuracy: {}/{} ({:.4f}%)'.format(
loss, sum(correct), len(test_loader.dataset),
100. * sum(correct) / len(test_loader.dataset)))
return loss, 100. * sum(correct) / len(test_loader.dataset)
def eval_ensemble(models, device, test_loader):
for model in models:
model.eval()
test_loss = []
correct = []
with torch.no_grad():
for datas in test_loader:
data, target = datas[0].to(device), datas[1].to(device, dtype=torch.int64)
output = []
for model in models:
output.append(model(data.unsqueeze(dim=1)).unsqueeze(dim=2))
temp = torch.cat(output, dim=2)
temp2 = temp.mean(dim=2)
test_loss.append(F.cross_entropy(temp2, target, reduction='sum').item()) # sum up batch loss
pred = F.log_softmax(temp2, dim=1).argmax(dim=1, keepdim=True) # get the index of the max log-probability
correct.append(pred.eq(target.view_as(pred)).sum().item())
loss = sum(test_loss) / len(test_loader.dataset)
# print('{:.0f}'.format(100. * correct / len(test_loader.dataset)))
print('Test set: Average loss: {:.4f}, Accuracy: {}/{} ({:.4f}%)'.format(
loss, sum(correct), len(test_loader.dataset),
100. * sum(correct) / len(test_loader.dataset)))
return loss, 100. * sum(correct) / len(test_loader.dataset)
|
44312
|
import matplotlib
import numpy as np
import time
# matplotlib.use('Agg')
import matplotlib.pyplot as plt
VOC_BBOX_LABEL_NAMES = (
'fly',
'bike',
'bird',
'boat',
'pin',
'bus',
'c',
'cat',
'chair',
'cow',
'table',
'dog',
'horse',
'moto',
'p',
'plant',
'shep',
'sofa',
'train',
'tv',
)
def vis_img(img, ax=None):
"""Visualize a color image.
Args:
img (~numpy.ndarray): An array of shape :math:`(3, height, width)`.
This is in RGB format and the range of its value is
:math:`[0, 255]`.
ax (matplotlib.axes.Axis): The visualization is displayed on this
axis. If this is :obj:`None` (default), a new axis is created.
Returns:
~matploblib.axes.Axes:
Returns the Axes object with the plot for further tweaking.
"""
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
# CHW ==> HWC
img = img.transpose((1, 2, 0))
ax.imshow(img.astype(np.uint8))
return ax
def vis_bbox(img, bbox, label=None, score=None, ax=None):
"""
Visualize bounding boxes inside image.
:param img:
:param bbox:
:param label:
:param score:
:param ax:
:return:
"""
label_names = list(VOC_BBOX_LABEL_NAMES) + ['bg']
if label is not None and not len(bbox) == len(label):
raise ValueError('The length of label must be same as that of bbox')
if score is not None and not len(bbox) == len(score):
raise ValueError('The length of score must be same as that of bbox')
# Returns newly instantiated matplotlib.axes.Axes object if ax is None
ax = vis_img(img, ax=ax)
# If there is no bounding box to display, visualize the image and exit.
if len(bbox) == 0:
return ax
for i, bb in enumerate(bbox):
xy = (bb[1], bb[0])
height = bb[2] - bb[0]
width = bb[3] - bb[1]
ax.add_patch(plt.Rectangle(
xy, width, height, fill=False, edgecolor='red', linewidth=2))
caption = list()
if label is not None and label_names is not None:
lb = label[i]
if not (-1 <= lb < len(label_names)):
raise ValueError('No corresponding name is given')
caption.append(label_names[lb])
if score is not None:
sc = score[i]
caption.append('{:.2f}'.format(sc))
if len(caption) > 0:
ax.text(bb[1], bb[0],
':'.join(caption),
style='italic',
color='white',
bbox={'facecolor': (0.8, 0.2, 0.2), 'alpha': 0.9, 'pad': 1.5})
return ax
def fig2data(fig):
"""
brief Convert a Matplotlib figure to a 4D numpy array with RGBA
channels and return it
@param fig: a matplotlib figure
@return a numpy 3D array of RGBA values
"""
# draw the renderer
fig.canvas.draw()
# Get the RGBA buffer from the figure
w, h = fig.canvas.get_width_height()
buf = np.fromstring(fig.canvas.tostring_argb(), dtype=np.uint8)
buf.shape = (w, h, 4)
# canvas.tostring_argb give pixmap in ARGB mode. Roll the ALPHA channel to have it in RGBA mode
buf = np.roll(buf, 3, axis=2)
return buf.reshape((h, w, 4))
def fig4vis(fig):
"""
convert figure to ndarray
"""
ax = fig.get_figure()
img_data = fig2data(ax).astype(np.int32)
plt.close()
# HWC ==> CHW
return img_data[:, :, :3].transpose((2, 0, 1)) / 255.
def visdom_bbox(*args, **kwargs):
fig = vis_bbox(*args, **kwargs)
data = fig4vis(fig)
return data
|
44332
|
from .abc import ABCTokenGenerator, Token
from .consistent import ConsistentTokenGenerator
from .single import SingleTokenGenerator
from .util import get_token_generator
|
44383
|
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('libretto', '0009_auto_20150423_2042'),
]
operations = [
migrations.AlterModelOptions(
name='pupitre',
options={'ordering': ('-soliste', 'partie'), 'verbose_name': 'pupitre', 'verbose_name_plural': 'pupitres'},
),
migrations.AlterField(
model_name='elementdeprogramme',
name='autre',
field=models.CharField(max_length=500, blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='engagement',
name='individus',
field=models.ManyToManyField(related_name='engagements', to='libretto.Individu'),
preserve_default=True,
),
migrations.AlterField(
model_name='evenement',
name='circonstance',
field=models.CharField(max_length=500, verbose_name='circonstance', blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='evenement',
name='debut_date_approx',
field=models.CharField(help_text='Ne remplir que si la date est impr\xe9cise.', max_length=60, verbose_name='date (approximative)', blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='evenement',
name='debut_heure_approx',
field=models.CharField(help_text='Ne remplir que si l\u2019heure est impr\xe9cise.', max_length=30, verbose_name='heure (approximative)', blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='evenement',
name='debut_lieu_approx',
field=models.CharField(help_text='Ne remplir que si le lieu (ou institution) est impr\xe9cis(e).', max_length=50, verbose_name='lieu (approximatif)', blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='evenement',
name='fin_date_approx',
field=models.CharField(help_text='Ne remplir que si la date est impr\xe9cise.', max_length=60, verbose_name='date (approximative)', blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='evenement',
name='fin_heure_approx',
field=models.CharField(help_text='Ne remplir que si l\u2019heure est impr\xe9cise.', max_length=30, verbose_name='heure (approximative)', blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='evenement',
name='fin_lieu_approx',
field=models.CharField(help_text='Ne remplir que si le lieu (ou institution) est impr\xe9cis(e).', max_length=50, verbose_name='lieu (approximatif)', blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='individu',
name='deces_date_approx',
field=models.CharField(help_text='Ne remplir que si la date est impr\xe9cise.', max_length=60, verbose_name='date (approximative)', blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='individu',
name='deces_lieu_approx',
field=models.CharField(help_text='Ne remplir que si le lieu (ou institution) est impr\xe9cis(e).', max_length=50, verbose_name='lieu (approximatif)', blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='individu',
name='naissance_date_approx',
field=models.CharField(help_text='Ne remplir que si la date est impr\xe9cise.', max_length=60, verbose_name='date (approximative)', blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='individu',
name='naissance_lieu_approx',
field=models.CharField(help_text='Ne remplir que si le lieu (ou institution) est impr\xe9cis(e).', max_length=50, verbose_name='lieu (approximatif)', blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='oeuvre',
name='creation_date_approx',
field=models.CharField(help_text='Ne remplir que si la date est impr\xe9cise.', max_length=60, verbose_name='date (approximative)', blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='oeuvre',
name='creation_heure_approx',
field=models.CharField(help_text='Ne remplir que si l\u2019heure est impr\xe9cise.', max_length=30, verbose_name='heure (approximative)', blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='oeuvre',
name='creation_lieu_approx',
field=models.CharField(help_text='Ne remplir que si le lieu (ou institution) est impr\xe9cis(e).', max_length=50, verbose_name='lieu (approximatif)', blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='partie',
name='professions',
field=models.ManyToManyField(related_name='parties', to='libretto.Profession', blank=True, help_text='La ou les profession(s) capable(s) de jouer ce r\xf4le ou cet instrument.', null=True, verbose_name='occupations'),
preserve_default=True,
),
migrations.AlterField(
model_name='personnel',
name='engagements',
field=models.ManyToManyField(related_name='personnels', to='libretto.Engagement'),
preserve_default=True,
),
migrations.AlterField(
model_name='pupitre',
name='quantite_max',
field=models.IntegerField(default=1, verbose_name='quantit\xe9 maximale'),
preserve_default=True,
),
migrations.AlterField(
model_name='pupitre',
name='quantite_min',
field=models.IntegerField(default=1, verbose_name='quantit\xe9 minimale'),
preserve_default=True,
),
migrations.AlterField(
model_name='source',
name='date_approx',
field=models.CharField(help_text='Ne remplir que si la date est impr\xe9cise.', max_length=60, verbose_name='date (approximative)', blank=True),
preserve_default=True,
),
]
|
44404
|
from system import System
from src.basic.sessions.cmd_session import CmdSession
class CmdSystem(System):
def __init__(self):
super(CmdSystem, self).__init__()
@classmethod
def name(cls):
return 'cmd'
def new_session(self, agent, kb):
return CmdSession(agent, kb)
|
44412
|
import re
from typing import List
from .consts import *
# =================== #
# INTERNALS FUNCTIONS #
# =================== #
def my_re_escape(text):
escape_char = r"[]"
returned_text = ""
for c in text:
if c in escape_char:
returned_text += "\\"
returned_text += c
return returned_text
ESCAPED_CSI = my_re_escape(CSI)
def remove_attributes_from_sgr(sgr_string: str, attributes: List[str]) -> str:
"""
Remove unwanted attributes in an SRG sequence.
SRG sequence start always with '\033[' and end with 'm'.
Not all attributes have the same number of parameters.
If all the attributes are removed return an empty string (without CSI and 'm')
Args:
sgr_string: SRG string
attributes: attributes to remove
Returns:
SRG string without unwanted attributes
"""
# TODO: replace remove by a set to optimize lookup
# Remove the CSI in the beginning and the 'm' at the end
params = sgr_string[2:-1].split(";")
keep: List[str] = [] # list of params to keep, to return in the end
# Since we are going to jump some iterations we can't use
# for loop, but we will use while loop.
i = 0
while True:
if i >= len(params):
break
param = params[i]
# 38 48 58 are the only attributes that take more than one parameter
# https://en.wikipedia.org/wiki/ANSI_escape_code#SGR_parameters
if param not in ["38", "48", "58"]:
if param not in attributes:
keep.append(param)
else:
second_param = params[i + 1]
# if second param is 2 => the attribute take 5 param
if second_param == "2":
params_to_keep = params[i : i + 5]
i = i + 4
# if second param is 5 => the attribute take 3 param
elif second_param == "5":
params_to_keep = params[i : i + 3]
i = i + 2
else:
# FIXME: How to handle errors? if the attribute is wrong?
params_to_keep = []
if param not in attributes:
keep.extend(params_to_keep)
i += 1
# If keep is empty return an empty string without CSI and 'm'
if keep:
return f"{CSI}{';'.join(keep)}m"
else:
return ""
def repl_remove_attributes_from_sgr(matchobj, remove: List[str]) -> str:
"""Addapted remove_sequence_from_text function to be used with regex"""
return remove_attributes_from_sgr(matchobj.group(0), remove)
re_sgr = re.compile(
fr"""
{ESCAPED_CSI}
\d*
(?:
;\d*
)*
m
""",
re.VERBOSE,
)
def remove_attributes_from_string(string, remove: List[str]) -> str:
"""Remove unwanted attributes form a string"""
return re_sgr.sub(lambda x: repl_remove_attributes_from_sgr(x, remove), string)
# ======================== #
# REMOVE GRAPHIC FUNCTIONS #
# ======================== #
def rmgraphics(string: str) -> str:
"""Remove all graphics attributes (all SGR)"""
return re_sgr.sub("", string)
def rmcolor(text: str) -> str:
"""Remove all color attributes from a string"""
# remove all attributes from 30 to 39
# 30-37 => 8colors
# 38 => 24 bits colors
# 39 => reset colors
remove = [str(i) for i in range(30, 40)]
return remove_attributes_from_string(text, remove)
def rmbackground(text: str) -> str:
"""Remove all color attributes from a string"""
# remove all attributes from 40 to 49
# 40-47 => 8colors background
# 48 => 24 bits colors background
# 49 => reset background colors
attributes = [str(i) for i in range(40, 50)]
return remove_attributes_from_string(text, attributes)
def rmstyle(text: str) -> str:
# TODO: change list to set
# TODO: test rmstyle
# TODO: make the list outside of the function to optimize (not calculate the list each time)
attributes = [
N_UNDERLINE,
N_DOUBLE_UNDERLINE,
N_RESET_UNDERLINE,
N_ITALIC,
N_RESET_ITALIC,
N_CROSS,
N_RESET_CROSS,
N_BLINK,
N_RESET_BLINK,
N_BOLD,
N_DIM,
N_RESET_BOLD_AND_DIM,
]
return remove_attributes_from_string(text, attributes)
def rmunderline(text):
attributes = [N_UNDERLINE, N_DOUBLE_UNDERLINE, N_RESET_UNDERLINE]
return remove_attributes_from_string(text, attributes)
def rmitalic(text):
attributes = [N_ITALIC, N_RESET_ITALIC]
return remove_attributes_from_string(text, attributes)
def rmcross(text):
attributes = [N_CROSS, N_RESET_CROSS]
return remove_attributes_from_string(text, attributes)
def rmblink(text):
attributes = [N_BLINK, N_RESET_BLINK]
return remove_attributes_from_string(text, attributes)
def rmbold_and_dim(text):
attributes = [N_BOLD, N_DIM, N_RESET_BOLD_AND_DIM]
return remove_attributes_from_string(text, attributes)
|
44458
|
import torch
import torch.nn.functional as F
from ..models.progressive import ProGANGenerator, ProGANDiscriminator
from ..modules.gan_loss import ImprovedWGANLoss
from ..modules.instance_refiner import InstanceRefiner
from tools.utils import to_cuda
from models import load_network, save_network, print_network
class SegModel(torch.nn.Module):
def __init__(self, opt, is_train=True, is_main=True, logger=None):
super().__init__()
self.opt = opt
self.is_main = is_main
self.netG, self.netD = self.initialize_networks(is_train)
if is_train:
self.opt_g, self.opt_d = self.create_optimizers(self.opt)
self.gan_loss = ImprovedWGANLoss(self.netD)
self.logger = logger if self.is_main else None
self.ins_refiner = InstanceRefiner(self.opt)
def forward(self, data, fake_data={}, interpolate=False, alpha=None, mode='', log=False, hard=True,
global_iteration=None):
z, real_seg, real_cond = self.preprocess_input(data)
_, fake_seg, _ = self.preprocess_input(fake_data, is_fake=True)
if mode == 'generator':
g_loss, fake_seg = self.compute_generator_loss(real_cond, z, interpolate, alpha, hard, log, global_iteration)
fake_seg = self.postprocess_output(fake_seg)
return g_loss, fake_seg
elif mode == 'discriminator':
d_loss = self.compute_discriminator_loss(real_cond, real_seg, fake_seg, interpolate, alpha, log, global_iteration)
return d_loss
elif mode == 'inference':
fake_seg = self.generate_fake(real_cond, z, interpolate, alpha, hard, log, global_iteration)
fake_seg = self.postprocess_output(fake_seg)
return fake_seg
else:
raise ValueError(f"mode '{mode}' is invalid")
def postprocess_output(self, seg):
if self.opt.dim != self.opt.seg_dim:
size = (int(self.opt.dim), int(self.opt.aspect_ratio * self.opt.dim))
mode = 'bilinear' if self.opt.discretization == "none" or self.opt.bilimax else 'nearest'
seg = {k: self.resize(v, size, mode=mode) for k, v in seg.items()}
if self.opt.bilimax:
index = seg["sem_seg"].max(1, keepdim=True)[1]
seg["sem_seg"] = torch.zeros_like(seg["sem_seg"]).scatter_(1, index, 1.0)
return seg
def resize(self, t, size, mode='nearest'):
if size is not None and not 0 in t.size():
return torch.nn.functional.interpolate(t, size=size, mode=mode)
else:
return t
def preprocess_input(self, data, is_fake=False):
size = (int(self.opt.seg_dim), int(self.opt.aspect_ratio * self.opt.seg_dim)) if self.opt.dim != self.opt.seg_dim else None
data["z_seg"] = to_cuda(data, "z_seg")
data["sem_seg"] = to_cuda(data, "sem_seg")
data["ins_center"] = to_cuda(data, "ins_center")
data["ins_offset"] = to_cuda(data, "ins_offset")
data["ins_edge"] = to_cuda(data, "ins_edge")
data["ins_density"] = to_cuda(data, "ins_density")
data["sem_cond"] = to_cuda(data, "sem_cond")
data["ins_cond"] = to_cuda(data, "ins_cond")
if is_fake:
data["sem_seg"] = data["sem_seg"].detach()
data["ins_center"] = data["ins_center"].detach()
data["ins_offset"] = data["ins_offset"].detach()
data["ins_edge"] = data["ins_edge"].detach()
data["ins_density"] = data["ins_density"].detach()
z = data["z_seg"]
seg = {'sem_seg': self.resize(data["sem_seg"], size),
'ins_center': self.resize(data["ins_center"], size),
'ins_offset': self.resize(data["ins_offset"], size),
'ins_edge': self.resize(data["ins_edge"], size),
'ins_density': self.resize(data["ins_density"], size)}
cond = {'sem_cond': data["sem_cond"],
'ins_cond': data["ins_cond"]}
return z, seg, cond
def initialize_networks(self, is_train):
if self.opt.model == 'progressive':
netG = ProGANGenerator(self.opt).cuda()
netD = ProGANDiscriminator(self.opt).cuda() if is_train else None
else:
raise ValueError
if self.is_main:
netG = load_network(netG, "seg_g", self.opt)
print_network(netG)
if is_train:
netD = load_network(netD, "seg_d", self.opt)
print_network(netD)
netG.res = self.opt.seg_dim
if netD:
netD.res = self.opt.seg_dim
return netG, netD
def save_model(self, global_iteration, latest):
save_network(self.netG, "seg_g", global_iteration, self.opt, latest=latest)
save_network(self.netD, "seg_d", global_iteration, self.opt, latest=latest)
def create_optimizers(self, opt):
if opt.optimizer == "adam":
opt_g = torch.optim.Adam(self.netG.parameters(), lr=opt.lr, betas=(opt.beta1, opt.beta2))
opt_d = torch.optim.Adam(self.netD.parameters(), lr=opt.lr, betas=(opt.beta1, opt.beta2))
else:
raise NotImplementedError
return opt_g, opt_d
def compute_generator_loss(self, real_cond, z, interpolate, alpha, hard, log, global_iteration):
if interpolate:
fake_segs = self.netG.interpolate(z, alpha, cond=real_cond)
else:
fake_segs = self.netG(z, cond=real_cond)
if not "inter" in self.opt.cond_mode:
fake_segs = [fake_segs[-1]]
x_fake_segs = [self.to_discrete(fake_seg) for fake_seg in fake_segs]
sem_for_dis = real_cond["sem_cond"] if "original_cgan" in self.opt.cond_mode else None
if interpolate:
score = self.netD.interpolate(x_fake_segs[-1], alpha, sem_cond=sem_for_dis)
else:
score = self.netD(x_fake_segs[-1], sem_cond=sem_for_dis)
loss_gen = self.gan_loss.generator_loss_logits(score).sum()
loss = loss_gen
spread = torch.tensor([])
fake_sem_mask = torch.tensor([])
fake_ins_cond = torch.tensor([])
pseudo_center_mask = torch.tensor([])
fake_raw_filtered_sem_seg = torch.tensor([])
real_sem_cond = torch.tensor([])
real_ins_cond = torch.tensor([])
pseudo_ins_center = torch.tensor([])
pseudo_ins_offset = torch.tensor([])
entropy = torch.tensor([])
fake_sem_cond = torch.tensor([])
fake_center_mask = torch.tensor([])
loss_sem_entropy = []
loss_sem_recover = []
loss_sem_d_recover = []
loss_ins_recover = []
loss_pseudo_center = []
loss_pseudo_offset = []
loss_sem_spread = []
loss_ova = []
for fake_seg, x_fake_seg in zip(fake_segs, x_fake_segs):
logprob = torch.log(fake_seg["sem_seg"] + 0.00001)
entropy = -torch.sum(torch.mul(fake_seg["sem_seg"], logprob), dim=1, keepdim=True)
loss_sem_entropy.append(torch.mean(entropy))
if self.opt.cond_seg:
cond_loss = 0
if self.opt.cond_seg in ["semantic", "panoptic"]:
real_sem_cond = real_cond["sem_cond"]
fake_sem_cond = torch.mean(fake_seg["sem_seg"], dim=(2, 3))
index = fake_seg["sem_seg"].max(1, keepdim=True)[1]
d_fake_sem_seg = torch.zeros_like(fake_seg["sem_seg"]).scatter_(1, index, 1.0)
d_fake_sem_cond = torch.mean(d_fake_sem_seg, dim=(2, 3))
logprob_cond = torch.log(fake_sem_cond + 0.00001)
d_logprob_cond = torch.log(d_fake_sem_cond + 0.00001)
loss_sem_recover.append(F.kl_div(logprob_cond, real_sem_cond, reduction='batchmean'))
loss_sem_d_recover.append(F.kl_div(d_logprob_cond, real_sem_cond, reduction='batchmean'))
if 'sem_recover' in self.opt.cond_mode:
cond_loss += loss_sem_recover[-1]
if self.opt.cond_seg in ["instance", "panoptic"] and "density" in self.opt.instance_type:
real_ins_cond = real_cond["ins_cond"]
fake_ins_cond = torch.sum(fake_seg["ins_density"], dim=(2, 3))
loss_ins_recover.append(F.l1_loss(fake_ins_cond, real_ins_cond))
if 'ins_recover' in self.opt.cond_mode:
cond_loss += loss_ins_recover[-1]
if 'sem_assisted' in self.opt.cond_mode:
fake_sem_mask = fake_seg["sem_mask"]
spread = torch.sum(fake_sem_mask, dim=1)
loss_sem_spread.append(torch.mean((spread - 1) ** 2))
if len(self.opt.ova_idx) > 0:
ova = 0
for idx in self.opt.ova_idx:
other_idx = [i for i in range(self.opt.num_semantics) if i != idx]
ova += torch.mean(torch.sum(fake_sem_mask[:, other_idx], dim=1) * fake_sem_mask[:, idx])
loss_ova.append(ova)
cond_loss += ova * self.opt.lambda_ova
if 'spread' in self.opt.cond_mode:
cond_loss += loss_sem_spread[-1]
if 'entropy' in self.opt.cond_mode:
cond_loss += loss_sem_entropy[-1]
loss += cond_loss
if self.opt.pseudo_supervision:
with torch.no_grad():
pseudo = self.ins_refiner.batch_transform(fake_seg["ins_center"], x_fake_seg["ins_offset"], x_fake_seg["sem_seg"])
pseudo_ins_center, pseudo_ins_offset = pseudo
loss_pseudo_center.append(F.mse_loss(fake_seg["ins_center"], pseudo_ins_center))
loss_pseudo_offset.append(F.mse_loss(x_fake_seg["ins_offset"], pseudo_ins_offset))
loss_pseudo = loss_pseudo_center[-1] + loss_pseudo_offset[-1]
loss += loss_pseudo
if self.logger:
# log scalars every step
self.logger.log_scalar("seg_generator/sem_entropy", loss_sem_entropy, global_iteration)
self.logger.log_scalar("seg_generator/gen", loss_gen, global_iteration)
self.logger.log_scalar("seg_generator/sem_cond_recover", loss_sem_recover, global_iteration)
self.logger.log_scalar("seg_generator/sem_cond_true_recover", loss_sem_d_recover, global_iteration)
self.logger.log_scalar("seg_generator/sem_ins_recover", loss_ins_recover, global_iteration)
self.logger.log_scalar("seg_generator/sem_cond_spread", loss_sem_spread, global_iteration)
self.logger.log_scalar("seg_generator/ins_pseudo_center", loss_pseudo_center, global_iteration)
self.logger.log_scalar("seg_generator/ins_pseudo_offset", loss_pseudo_offset, global_iteration)
self.logger.log_scalar("seg_generator/one_versus_all", loss_ova, global_iteration)
# log images every few steps
if log:
fake_seg = fake_segs[-1]
x_fake_seg = x_fake_segs[-1]
with torch.no_grad():
fake_raw_sem_seg = fake_seg["raw_sem_seg"]
if fake_raw_sem_seg.size(0) > 0:
fake_raw_filtered_sem_seg = torch.zeros(fake_raw_sem_seg[:16].cpu().shape)
fake_raw_filtered_sem_seg[real_sem_cond[:16].cpu()>0] = fake_raw_sem_seg[:16].cpu()[real_sem_cond[:16].cpu()>0]
if fake_seg["ins_center"].size(0) > 0:
fake_center_mask = self.ins_refiner.get_peak_mask(fake_seg["ins_center"][:16])
if pseudo_ins_center.size(0) > 0 and pseudo_center_mask.size(0) == 0:
pseudo_center_mask = self.ins_refiner.get_peak_mask(pseudo_ins_center[:16])
self.logger.log_semantic_seg("seg_generator/fake", fake_seg["sem_seg"][:16].cpu(), 4, global_iteration)
self.logger.log_semantic_seg("seg_generator/fake_gumbel", x_fake_seg["sem_seg"][:16].cpu(), 4, global_iteration)
self.logger.log_cond_distrib("seg_generator/semantic_distrib", real_sem_cond[:16].cpu(), fake_sem_cond[:16].cpu(), 4, 4, global_iteration)
self.logger.log_img("seg_generator/entropy", entropy[:16].cpu(), 4, global_iteration)
self.logger.log_spread("seg_generator/spread", spread[:16].cpu(), 4, global_iteration)
self.logger.log_semantic_mask("seg_generator/semantic_mask", fake_sem_mask[:1].cpu(), real_sem_cond[:1].cpu(), 16, 4, global_iteration)
self.logger.log_semantic_seg("seg_generator/fake_raw", fake_raw_sem_seg[:16].cpu(), 4, global_iteration)
self.logger.log_semantic_seg("seg_generator/fake_raw_filtered", fake_raw_filtered_sem_seg[:16].cpu(), 4, global_iteration)
self.logger.log_ins_center("seg_generator/fake_ins_center", fake_seg["ins_center"][:16].cpu(), 4, global_iteration)
self.logger.log_ins_center("seg_generator/pseudo_ins_center_gumbel", pseudo_ins_center[:16].cpu(), 4, global_iteration)
self.logger.log_img("seg_generator/fake_center_mask", fake_center_mask[:16].cpu(), 4, global_iteration)
self.logger.log_img("seg_generator/pseudo_center_mask_gumbel", pseudo_center_mask[:16].cpu(), 4, global_iteration)
self.logger.log_instance("seg_generator/fake_instance_gumbel", x_fake_seg["sem_seg"][:16].cpu(), fake_center_mask[:16].cpu(), x_fake_seg["ins_offset"][:16].cpu(), 4, global_iteration)
self.logger.log_instance("seg_generator/pseudo_instance_gumbel", x_fake_seg["sem_seg"][:16].cpu(), pseudo_center_mask[:16].cpu(), pseudo_ins_offset[:16].cpu(), 4, global_iteration)
self.logger.log_ins_offset("seg_generator/fake_ins_offset_gumbel", x_fake_seg["sem_seg"][:16].cpu(), x_fake_seg["ins_offset"][:16].cpu(), 4, global_iteration)
self.logger.log_ins_offset("seg_generator/pseudo_ins_offset_gumbel", x_fake_seg["sem_seg"][:16].cpu(), pseudo_ins_offset[:16].cpu(), 4, global_iteration)
self.logger.log_img("seg_generator/fake_ins_edge", fake_seg["ins_edge"][:16].cpu(), 4, global_iteration)
self.logger.log_ins_density("seg_generator/fake_ins_density", fake_seg["ins_density"][:16].cpu(), 4, global_iteration)
self.logger.log_cond_distrib("seg_generator/instance_distrib", real_ins_cond[:16].cpu(), fake_ins_cond[:16].cpu(), 4, 4, global_iteration)
if hard:
return loss, x_fake_segs[-1]
else:
return loss, fake_segs[-1]
def compute_discriminator_loss(self, real_cond, real_seg, fake_seg, interpolate, alpha, log, global_iteration):
sem_for_dis_real = torch.mean(real_seg["sem_seg"], dim=(2, 3)) if "original_cgan" in self.opt.cond_mode else None
sem_for_dis_fake = real_cond["sem_cond"] if "original_cgan" in self.opt.cond_mode else None
if interpolate:
real_score = self.netD.interpolate(real_seg, alpha, sem_cond=sem_for_dis_real)
fake_score = self.netD.interpolate(fake_seg, alpha, sem_cond=sem_for_dis_fake)
forward = lambda x: self.netD.interpolate(x, alpha, sem_cond=sem_for_dis_real)
else:
real_score = self.netD(real_seg, sem_cond=sem_for_dis_real)
fake_score = self.netD(fake_seg, sem_cond=sem_for_dis_fake)
forward = lambda x: self.netD(x, sem_cond=sem_for_dis_real)
if self.opt.panoptic:
real = torch.cat([real_seg["sem_seg"], real_seg["ins_center"], real_seg["ins_offset"], real_seg["ins_edge"],
real_seg["ins_density"]], dim=1)
fake = torch.cat([fake_seg["sem_seg"], fake_seg["ins_center"], fake_seg["ins_offset"], fake_seg["ins_edge"],
fake_seg["ins_density"]], dim=1)
else:
real = real_seg["sem_seg"]
fake = fake_seg["sem_seg"]
loss = self.gan_loss.discriminator_loss_logits(real, fake, real_score, fake_score, forward=forward)
if self.logger:
# log scalars every step
self.logger.log_scalar("seg_generator/dis", loss, global_iteration)
# log images every few step
if log:
real_center_mask = torch.Tensor([])
if self.opt.panoptic:
with torch.no_grad():
real_center_mask = self.ins_refiner.get_peak_mask(real_seg["ins_center"])
self.logger.log_semantic_seg("seg_generator/real", real_seg["sem_seg"][:16].cpu(), 4, global_iteration)
self.logger.log_ins_center("seg_generator/real_ins_center", real_seg["ins_center"][:16].cpu(), 4, global_iteration)
self.logger.log_ins_offset("seg_generator/real_ins_offset", real_seg["sem_seg"][:16].cpu(), real_seg["ins_offset"][:16].cpu(), 4, global_iteration)
self.logger.log_instance("seg_generator/real_instance", real_seg["sem_seg"][:16].cpu(), real_center_mask[:16].cpu(), real_seg["ins_offset"][:16].cpu(), 4, global_iteration)
self.logger.log_img("seg_generator/real_ins_edge", real_seg["ins_edge"][:16].cpu(), 4, global_iteration)
self.logger.log_ins_density("seg_generator/real_ins_density", real_seg["ins_density"][:16].cpu(), 4, global_iteration)
return loss
def generate_fake(self, real_cond, z, interpolate, alpha, hard, log, global_iteration):
with torch.no_grad():
if interpolate:
fake_seg = self.netG.interpolate(z, alpha, cond=real_cond)[-1]
else:
fake_seg = self.netG(z, cond=real_cond)[-1]
x_fake_seg = self.to_discrete(fake_seg)
fake_sem_cond = torch.mean(x_fake_seg["sem_seg"], dim=(2, 3))
if self.opt.cond_seg in ["semantic", "panoptic"]:
real_sem_cond = real_cond["sem_cond"]
else:
real_sem_cond = torch.Tensor([])
if log and self.logger:
self.logger.log_semantic_seg("seg_generator/fake", fake_seg["sem_seg"][:16].cpu(), 4, global_iteration)
self.logger.log_cond_distrib("seg_generator/semantic_distrib", real_sem_cond[:16].cpu(), fake_sem_cond[:16].cpu(), 4, 4, global_iteration)
if hard:
return x_fake_seg
else:
return fake_seg
def to_discrete(self, fake_seg):
fake_sem_seg = fake_seg["sem_seg"]
if self.opt.discretization == "gumbel":
x_fake_sem_seg = self.gumbel_sampler(fake_sem_seg)
elif self.opt.discretization == "max":
x_fake_sem_seg = self.max_sampler(fake_sem_seg)
elif self.opt.discretization == "none":
x_fake_sem_seg = self.none_sampler(fake_sem_seg)
else:
raise ValueError
fake_ins_center, fake_ins_offset = fake_seg["ins_center"], fake_seg["ins_offset"]
fake_ins_edge = fake_seg["ins_edge"]
fake_ins_density = fake_seg["ins_density"]
x_fake_ins_offset = self.ins_refiner.filter_offset(fake_ins_offset, x_fake_sem_seg)
x_fake_ins_density = self.ins_refiner.filter_density(fake_ins_density, x_fake_sem_seg)
x_fake_seg = {"sem_seg": x_fake_sem_seg, "ins_center": fake_ins_center, "ins_offset": x_fake_ins_offset,
"ins_edge": fake_ins_edge, "ins_density": x_fake_ins_density}
if self.opt.store_masks:
x_fake_seg["sem_mask"] = fake_seg["sem_mask"]
return x_fake_seg
def max_sampler(self, fake, hard=True, dim=1):
y_soft = fake
if hard:
# straight through.
index = y_soft.max(dim, keepdim=True)[1]
y_hard = torch.zeros_like(fake).scatter_(dim, index, 1.0)
return (y_hard - y_soft).detach() + y_soft
else:
# reparametrization trick.
return y_soft
def gumbel_sampler(self, fake, hard=True, dim=1):
logits = torch.log(fake + 0.00001)
if torch.isnan(logits.max()).data:
print(fake.min(), fake.max())
gumbels = -(torch.empty_like(logits).exponential_()).log() # ~Gumbel(0, 1)
gumbels = (logits + gumbels) / self.opt.t # ~Gumbel(logits, tau)
y_soft = gumbels.softmax(dim)
if hard:
# straight through.
index = y_soft.max(dim, keepdim=True)[1]
y_hard = torch.zeros_like(logits).scatter_(dim, index, 1.0)
return (y_hard - y_soft).detach() + y_soft
else:
# reparametrization trick.
return y_soft
def none_sampler(self, fake, hard=True, dim=1):
return fake
|
44465
|
from chroma_core.lib.storage_plugin.api import attributes
from chroma_core.lib.storage_plugin.api.identifiers import GlobalId, ScopedId
from chroma_core.lib.storage_plugin.api.plugin import Plugin
from chroma_core.lib.storage_plugin.api import resources
from chroma_core.lib.storage_plugin.api import relations
version = 1
class Controller(resources.ScannableResource):
class Meta:
identifier = GlobalId("address")
address = attributes.String()
class Lun(resources.LogicalDrive):
class Meta:
identifier = ScopedId("lun_id")
lun_id = attributes.String()
class Presentation(resources.Resource):
lun_id = attributes.String()
path = attributes.String()
host_id = attributes.Integer()
class Meta:
identifier = ScopedId("lun_id", "host_id")
relations = [
relations.Provide(provide_to=resources.DeviceNode, attributes=["host_id", "path"]),
relations.Subscribe(subscribe_to=Lun, attributes=["lun_id"]),
]
class TestPlugin(Plugin):
pass
|
44469
|
from .functions import *
from .sources import *
from .sinks import *
from .transfers import *
from .discrete import *
from .linalg import *
from .displays import *
from .connections import *
url = "https://petercorke.github.io/bdsim/" + __package__
|
44502
|
import torchvision
__all__ = ["plot_compare"]
def plot_compare(sr, hr, baseline, filename):
"""
Plot Super-Resolution and High-Resolution image comparison
"""
sr, hr, baseline = sr.squeeze(), hr.squeeze(), baseline.squeeze()
grid = torchvision.utils.make_grid([hr, baseline, sr])
torchvision.utils.save_image(grid, filename)
|
44505
|
import numbers
import xnmt.tensor_tools as tt
import xnmt.modelparts.decoders as decoders
import xnmt.transducers.recurrent as recurrent
import xnmt.transducers.base as transducers_base
import xnmt.expression_seqs as expr_seq
import xnmt.vocabs as vocabs
class SimultaneousState(decoders.AutoRegressiveDecoderState):
"""
The read/write state used to determine the state of the SimultaneousTranslator.
"""
def __init__(self,
model,
encoder_state: recurrent.UniLSTMState,
context_state: decoders.AutoRegressiveDecoderState,
output_embed: tt.Tensor,
to_read:int = 0,
to_write:int = 0,
prev_written_word: numbers.Integral = None,
reset_attender:bool = True):
super().__init__(None, None)
self.model = model
self.encoder_state = encoder_state
self.context_state = context_state
self.output_embed = output_embed
self.has_been_read = to_read
self.has_been_written = to_write
self.prev_written_word = prev_written_word
self.reset_attender = reset_attender
def read(self, src):
src_embed = self.model.src_embedder.embed(src[self.has_been_read])
next_encoder_state = self.encoder_state.add_input(src_embed)
return SimultaneousState(self.model, next_encoder_state, self.context_state,
self.output_embed, self.has_been_read+1, self.has_been_written,
self.prev_written_word, True)
def calc_context(self, src_encoding):
# Generating h_t based on RNN(h_{t-1}, embed(e_{t-1}))
if self.prev_written_word is None:
final_transducer_state = [transducers_base.FinalTransducerState(h, c) \
for h, c in zip(self.encoder_state.h(), self.encoder_state.c())]
context_state = self.model.decoder.initial_state(final_transducer_state,
vocabs.Vocab.SS)
else:
context_state = self.model.decoder.add_input(self.context_state, self.prev_written_word)
# Reset attender if there is a read action
reset_attender = self.reset_attender
if reset_attender:
self.model.attender.init_sent(expr_seq.ExpressionSequence(expr_list=src_encoding))
reset_attender = False
# Calc context for decoding
context_state.context = self.model.attender.calc_context(context_state.rnn_state.output())
return SimultaneousState(self.model, self.encoder_state, context_state,
self.output_embed, self.has_been_read, self.has_been_written,
self.prev_written_word,
reset_attender)
def write(self, next_word):
return SimultaneousState(self.model, self.encoder_state, self.context_state,
self.model.decoder.embedder.embed(next_word), self.has_been_read,
self.has_been_written+1,
next_word,
self.reset_attender)
# These states are used for decoding
def as_vector(self):
return self.context_state.as_vector()
@property
def rnn_state(self):
return self.context_state.rnn_state
@property
def context(self):
return self.context_state.context
@context.setter
def context(self, value):
self.context_state.context = value
|
44512
|
TEST_TEMP_RAW = 529191
TEST_TEMP_CMP = 24.7894877676
TEST_PRES_RAW = 326816
TEST_PRES_CMP = 1006.61517564
TEST_ALT_CMP = 57.3174
def test_temperature():
from tools import SMBusFakeDevice
from bmp280 import BMP280
from calibration import BMP280Calibration
dev = SMBusFakeDevice(1)
# Load the fake temperature into the virtual registers
dev.regs[0xfc] = (TEST_TEMP_RAW & 0x0000F) << 4
dev.regs[0xfb] = (TEST_TEMP_RAW & 0x00FF0) >> 4
dev.regs[0xfa] = (TEST_TEMP_RAW & 0xFF000) >> 12
bmp280 = BMP280(i2c_dev=dev)
bmp280.setup()
# Replace the loaded calibration with our known values
bmp280.calibration = BMP280Calibration()
assert round(bmp280.get_temperature(), 4) == round(TEST_TEMP_CMP, 4)
def test_temperature_forced():
from tools import SMBusFakeDevice
from bmp280 import BMP280
from calibration import BMP280Calibration
dev = SMBusFakeDevice(1)
# Load the fake temperature into the virtual registers
dev.regs[0xfc] = (TEST_TEMP_RAW & 0x0000F) << 4
dev.regs[0xfb] = (TEST_TEMP_RAW & 0x00FF0) >> 4
dev.regs[0xfa] = (TEST_TEMP_RAW & 0xFF000) >> 12
bmp280 = BMP280(i2c_dev=dev)
bmp280.setup(mode="forced")
# Replace the loaded calibration with our known values
bmp280.calibration = BMP280Calibration()
assert round(bmp280.get_temperature(), 4) == round(TEST_TEMP_CMP, 4)
def test_pressure():
from tools import SMBusFakeDevice
from bmp280 import BMP280
from calibration import BMP280Calibration
dev = SMBusFakeDevice(1)
# Load the fake temperature values into the virtual registers
# Pressure is temperature compensated!!!
dev.regs[0xfc] = (TEST_TEMP_RAW & 0x0000F) << 4
dev.regs[0xfb] = (TEST_TEMP_RAW & 0x00FF0) >> 4
dev.regs[0xfa] = (TEST_TEMP_RAW & 0xFF000) >> 12
# Load the fake pressure values
dev.regs[0xf9] = (TEST_PRES_RAW & 0x0000F) << 4
dev.regs[0xf8] = (TEST_PRES_RAW & 0x00FF0) >> 4
dev.regs[0xf7] = (TEST_PRES_RAW & 0xFF000) >> 12
bmp280 = BMP280(i2c_dev=dev)
bmp280.setup()
# Replace the loaded calibration with our known values
bmp280.calibration = BMP280Calibration()
assert round(bmp280.get_pressure(), 4) == round(TEST_PRES_CMP, 4)
def test_altitude():
from tools import SMBusFakeDevice
from bmp280 import BMP280
from calibration import BMP280Calibration
dev = SMBusFakeDevice(1)
# Load the fake temperature values into the virtual registers
# Pressure is temperature compensated!!!
dev.regs[0xfc] = (TEST_TEMP_RAW & 0x0000F) << 4
dev.regs[0xfb] = (TEST_TEMP_RAW & 0x00FF0) >> 4
dev.regs[0xfa] = (TEST_TEMP_RAW & 0xFF000) >> 12
# Load the fake pressure values
dev.regs[0xf9] = (TEST_PRES_RAW & 0x0000F) << 4
dev.regs[0xf8] = (TEST_PRES_RAW & 0x00FF0) >> 4
dev.regs[0xf7] = (TEST_PRES_RAW & 0xFF000) >> 12
bmp280 = BMP280(i2c_dev=dev)
bmp280.setup()
# Replace the loaded calibration with our known values
bmp280.calibration = BMP280Calibration()
assert round(bmp280.get_altitude(), 4) == round(TEST_ALT_CMP, 4)
|
44555
|
import sys
sys.path.insert(0, '../')
from mocap.settings import get_amass_validation_files, get_amass_test_files
from mocap.math.amass_fk import rotmat2euclidean, exp2euclidean
from mocap.visualization.sequence import SequenceVisualizer
from mocap.math.mirror_smpl import mirror_p3d
from mocap.datasets.dataset import Limb
from mocap.datasets.combined import Combined
from mocap.datasets.framerate import AdaptFramerate
import mocap.datasets.h36m as H36M
import numpy as np
import numpy.linalg as la
from mocap.datasets.amass import AMASS_SMPL3d, AMASS_QUAT, AMASS_EXP
data_loc = '/mnt/Data/datasets/amass'
val = get_amass_validation_files()
test = get_amass_test_files()
ds = AMASS_SMPL3d(val, data_loc=data_loc)
print(ds.get_joints_for_limb(Limb.LEFT_LEG))
ds = AdaptFramerate(Combined(ds), target_framerate=50)
print(ds.get_joints_for_limb(Limb.LEFT_LEG))
ds_h36m = Combined(H36M.H36M_FixedSkeleton(actors=['S5'], actions=['walking'], remove_global_Rt=True))
seq3d = ds[0]
seq3d_h36m = ds_h36m[0]
seq3d = seq3d[0:200].reshape((200, 14, 3))
seq3d_h36m = seq3d_h36m[0:200].reshape((200, 14, 3))
a = np.array([[[0.4, 0, 0]]])
b = np.array([[[-0.4, 0, 0]]])
seq3d += a
seq3d_h36m += b
vis_dir = '../output/'
vis = SequenceVisualizer(vis_dir, 'vis_amass_vs_h36m',
to_file=True,
mark_origin=False)
vis.plot(seq1=seq3d, seq2=seq3d_h36m, parallel=True,
create_video=True,
noaxis=False,
plot_jid=False,
)
|
44558
|
from django.test import TestCase
from review.models import Review
class TestReviewModel(TestCase):
'''
Test suite for review modules.
'''
def setUp(self):
'''
Set up test data for the review model.
'''
Review.objects.create(
feedback='Test review',
riderReview='Test review content',
)
def tearDown(self):
'''
Clean up test data for the review model.
'''
Review.objects.all().delete()
def test_review_feedback(self):
'''
Test review model for feedback.
'''
review = Review.objects.get(feedback='Test review')
self.assertEqual(review.feedback, 'Test review')
def test_review_rider_review(self):
'''
Test review model for rider review.
'''
review = Review.objects.get(riderReview='Test review content')
self.assertEqual(review.riderReview, 'Test review content')
def test_review_verbose_name_plural(self):
'''
Test review model for verbose name plural.
'''
self.assertEqual(str(Review._meta.verbose_name_plural), 'Customer feedback')
|
44560
|
from openprocurement.tender.core.procedure.serializers.base import ListSerializer
from openprocurement.tender.core.procedure.serializers.document import ConfidentialDocumentSerializer
from openprocurement.tender.core.procedure.serializers.parameter import ParameterSerializer
from openprocurement.tender.esco.procedure.serializers.lot_value import LotValueSerializer
from openprocurement.tender.esco.procedure.serializers.value import ValueSerializer
from openprocurement.tender.openeu.procedure.serializers import BidSerializer as BaseBidSerializer
class BidSerializer(BaseBidSerializer):
serializers = {
"value": ValueSerializer,
"lotValues": ListSerializer(LotValueSerializer),
"documents": ListSerializer(ConfidentialDocumentSerializer),
"parameters": ListSerializer(ParameterSerializer),
}
|
44675
|
import networkx as nx
class Hierarchy:
def __init__(self, tree, column):
self.tree = tree
self.column = column
def _leaves_below(self, node):
leaves = sum(([vv for vv in v if self.tree.out_degree(vv) == 0]
for k, v in nx.dfs_successors(self.tree, node).items()),
[])
return sorted(leaves) or [node]
def __call__(self, *nodes):
"""Return process IDs below the given nodes in the tree"""
s = set()
for node in nodes:
if self.tree.in_degree(node) == 0:
return None # all
s.update(self._leaves_below(node))
if len(s) == 1:
query = '{} == "{}"'.format(self.column, s.pop())
else:
query = '{} in {}'.format(self.column, repr(sorted(s)))
return query
|
44688
|
import orderedset
def find_cycle(nodes, successors):
path = orderedset.orderedset()
visited = set()
def visit(node):
# If the node is already in the current path, we have found a cycle.
if not path.add(node):
return (path, node)
# If we have otherwise already visited this node, we don't need to visit
# it again.
if node in visited:
item = path.pop()
assert item == node
return
visited.add(node)
# Otherwise, visit all the successors.
for succ in successors(node):
cycle = visit(succ)
if cycle is not None:
return cycle
item = path.pop()
assert item == node
return None
for node in nodes:
cycle = visit(node)
if cycle is not None:
return cycle
else:
assert not path.items
return None
|
44757
|
from typing import Callable, AsyncGenerator, Generator
import asyncio
import httpx
import pytest
from asgi_lifespan import LifespanManager
from fastapi import FastAPI
from fastapi.testclient import TestClient
TestClientGenerator = Callable[[FastAPI], AsyncGenerator[httpx.AsyncClient, None]]
@pytest.fixture(scope="session")
def event_loop():
loop = asyncio.get_event_loop()
yield loop
loop.close()
@pytest.fixture
async def client(
request: pytest.FixtureRequest,
) -> AsyncGenerator[httpx.AsyncClient, None]:
marker = request.node.get_closest_marker("fastapi")
if marker is None:
raise ValueError("client fixture: the marker fastapi must be provided")
try:
app = marker.kwargs["app"]
except KeyError:
raise ValueError(
"client fixture: keyword argument app must be provided in the marker"
)
if not isinstance(app, FastAPI):
raise ValueError("client fixture: app must be a FastAPI instance")
dependency_overrides = marker.kwargs.get("dependency_overrides")
if dependency_overrides:
if not isinstance(dependency_overrides, dict):
raise ValueError(
"client fixture: dependency_overrides must be a dictionary"
)
app.dependency_overrides = dependency_overrides
run_lifespan_events = marker.kwargs.get("run_lifespan_events", True)
if not isinstance(run_lifespan_events, bool):
raise ValueError("client fixture: run_lifespan_events must be a bool")
test_client_generator = httpx.AsyncClient(app=app, base_url="http://app.io")
if run_lifespan_events:
async with LifespanManager(app):
async with test_client_generator as test_client:
yield test_client
else:
async with test_client_generator as test_client:
yield test_client
@pytest.fixture
def websocket_client(
request: pytest.FixtureRequest,
event_loop: asyncio.AbstractEventLoop,
) -> Generator[TestClient, None, None]:
asyncio.set_event_loop(event_loop)
marker = request.node.get_closest_marker("fastapi")
if marker is None:
raise ValueError("client fixture: the marker fastapi must be provided")
try:
app = marker.kwargs["app"]
except KeyError:
raise ValueError(
"client fixture: keyword argument app must be provided in the marker"
)
if not isinstance(app, FastAPI):
raise ValueError("client fixture: app must be a FastAPI instance")
dependency_overrides = marker.kwargs.get("dependency_overrides")
if dependency_overrides:
if not isinstance(dependency_overrides, dict):
raise ValueError(
"client fixture: dependency_overrides must be a dictionary"
)
app.dependency_overrides = dependency_overrides
with TestClient(app) as test_client:
yield test_client
|
44767
|
from data_types.user import User
class PullRequestReview:
"""
GitHub Pull Request Review
https://developer.github.com/v3/pulls/reviews/
Attributes:
id: Review id
body: Review body text
html_url: Public URL for issue on github.com
state: approved|commented|changes_requested
user: Review author User object
submitted_at: Submitted time
pull_request_url: If issue linked in pull request, stores its public URL
"""
def __init__(self, data):
# Internal GitHub id
self.id = data.get('id', 0)
# Who create
self.user = None
if 'user' in data:
self.user = User(data['user'])
# Body
self.body = data.get('body', '')
# Dates
self.submitted_at = data.get('submitted_at', '')
self.html_url = data.get('html_url', '')
# Review result
self.state = data.get('state', '')
# Linked pull request
self.pull_request_url = ''
if 'pull_request' in data:
self.pull_request_url = data['pull_request'].get('html_url', '')
|
44811
|
from .base_dataset import BaseDataset
from .baseline_dataset import BaselineDataset
from .refinement_dataset import RefinementDataset
__all__ = [
'BaseDataset',
'BaselineDataset',
'RefinementDataset'
]
|
44814
|
import aiohttp
import asyncio
import json
import logging
import requests
from typing import cast, Iterable, List, Optional
from electrumsv.constants import TxFlags
logging.basicConfig(level=logging.DEBUG)
class TxStateWSClient:
def __init__(self, host: str="127.0.0.1", port: int=9999, wallet_name: str="worker1.sqlite",
wallet_password: str="<PASSWORD>", account: int=1) -> None:
self.host = host
self.port = port
self.url = f'http://{self.host}:{self.port}/v1/regtest/dapp/' \
f'wallets/{wallet_name}/{account}/txs/websocket/text-events'
self.wallet_name = wallet_name
self.wallet_password = <PASSWORD>
self.account = account
self.session = aiohttp.ClientSession()
self._ws: Optional[aiohttp.client.ClientWebSocketResponse] = None
self.msg_queue = asyncio.Queue()
self.logger = logging.getLogger("tx-state-ws-client")
async def __aenter__(self):
# Normally the RESTAPI pulls the password out of the body, but `ws_connect` cannot be
# passed a data/json parameter even if it's method is changed to POST.
self._ws = await self.session.ws_connect(self.url,
headers={ "X-Wallet-Password": self.wallet_password })
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
await cast(aiohttp.client.ClientWebSocketResponse, self._ws).close()
await self.session.close()
async def send_str(self, msg: str):
await cast(aiohttp.client.ClientWebSocketResponse, self._ws).send_str(msg)
async def _receive_msgs(self):
try:
async for msg in cast(aiohttp.client.ClientWebSocketResponse, self._ws):
if json.loads(msg.data).get('code'):
self.logger.debug(f'Error message received from server: {msg.data}')
continue
self.logger.debug(f'Message received from server: {msg.data}')
self.msg_queue.put_nowait(msg.data)
await asyncio.sleep(0)
if msg.type in (aiohttp.WSMsgType.CLOSED,
aiohttp.WSMsgType.ERROR):
break
finally:
self.msg_queue.put_nowait(None) # poison pill
async def block_until_mempool(self, txids: Iterable[str]) -> None:
self._receive_msg_task = asyncio.create_task(self._receive_msgs())
subs = json.dumps({
"txids": list(txids)
})
txids_set = set(txids)
await self.send_str(subs)
while True:
msg = await self.msg_queue.get()
if not msg: # poison pill
break
msg = json.loads(msg)
txid = msg.get("txid")
if not txid:
continue
tx_flags = msg.get("tx_flags")
if msg.get("txid") in txids_set and \
(tx_flags & TxFlags.STATE_CLEARED) == TxFlags.STATE_CLEARED or \
(tx_flags & TxFlags.STATE_SETTLED) == TxFlags.STATE_SETTLED:
txids_set.remove(txid)
if len(txids_set) == 0:
break
async def block_until_confirmed(self, txids: List[str]) -> None:
self._receive_msg_task = asyncio.create_task(self._receive_msgs())
subs = json.dumps({
"txids": list(txids)
})
txids_set = set(txids)
await self.send_str(subs)
while True:
msg = await self.msg_queue.get()
if not msg: # poison pill
break
self.logger.debug(msg)
msg = json.loads(msg)
txid = msg.get("txid")
if not txid:
continue
tx_flags = msg.get("tx_flags")
if msg.get("txid") in txids_set and \
(tx_flags & TxFlags.STATE_SETTLED == TxFlags.STATE_SETTLED):
txids_set.remove(txid)
if len(txids_set) == 0:
break
if __name__ == "__main__":
logger = logging.getLogger("main")
logger_urllib3 = logging.getLogger("urllib3")
logger_urllib3.setLevel(logging.WARNING)
async def wait_for_mempool(txids):
async with TxStateWSClient() as ws_client:
await ws_client.block_until_mempool(txids)
async def wait_for_confirmation(txids):
async with TxStateWSClient() as ws_client:
await ws_client.block_until_confirmed(txids)
result1 = requests.post(f'http://127.0.0.1:9999/v1/regtest/dapp/wallets/'
f'worker1.sqlite/load_wallet')
result2 = requests.post(f'http://127.0.0.1:9999/v1/regtest/dapp/wallets/'
f'worker1.sqlite/1/topup_account')
if result2.status_code != 200:
raise requests.exceptions.HTTPError(result2.text)
txids = [result2.json()["txid"]]
logger.info("mine a block to observe the websocket receiving the push notification and "
"unblocking the thread")
asyncio.run(wait_for_confirmation(txids))
|
44821
|
from opendatatools.common import RestAgent, md5
from progressbar import ProgressBar
import json
import pandas as pd
import io
import hashlib
import time
index_map = {
'Barclay_Hedge_Fund_Index' : 'ghsndx',
'Convertible_Arbitrage_Index' : 'ghsca',
'Distressed_Securities_Index' : 'ghsds',
'Emerging_Markets_Index' : 'ghsem',
'Equity_Long_Bias_Index' : 'ghselb',
'Equity_Long_Short_Index' : 'ghsels',
'Equity_Market_Neutral_Index' : 'ghsemn',
'European_Equities_Index' : 'ghsee',
'Event_Driven_Index' : 'ghsed',
'Fixed_Income_Arbitrage_Index' : 'ghsfia',
'Fund_of_Funds_Index' : 'ghsfof',
'Global_Macro_Index' : 'ghsmc',
'Healthcare_&_Biotechnology_Index': 'ghsbio',
'Merger_Arbitrage_Index' : 'ghsma',
'Multi_Strategy_Index' : 'ghsms',
'Pacific_Rim_Equities_Index' : 'ghspre',
'Technology_Index' : 'ghstec',
}
class SimuAgent(RestAgent):
def __init__(self):
RestAgent.__init__(self)
self.user_info = None
self.df_fundlist = None
self.cookies = None
def login(self, username, password):
url = 'https://passport.simuwang.com/index.php?m=Passport&c=auth&a=login&type=login&name=%s&pass=%s&reme=1&rn=1' % (username, password)
self.add_headers({'Referer': 'https://dc.simuwang.com/'})
response = self.do_request(url)
if response is None:
return None, '登录失败'
jsonobj = json.loads(response)
suc = jsonobj['suc']
msg = jsonobj['msg']
if suc != 1:
return None, msg
self.cookies = self.get_cookies()
self.user_info = jsonobj['data']
return self.user_info, msg
def prepare_cookies(self, url):
response = self.do_request(url, None)
if response is not None:
cookies = self.get_cookies()
return cookies
else:
return None
def _get_rz_token(self, time):
mk = time * 158995555893
mtoken = md5(md5(str(mk))) + '.' + str(time)
return mtoken
def _get_fund_list_page(self, page_no):
url = 'https://dc.simuwang.com/ranking/get?page=%s&condition=fund_type:1,6,4,3,8,2;ret:9;rating_year:1;istiered:0;company_type:1;sort_name:profit_col2;sort_asc:desc;keyword:' % page_no
response = self.do_request(url)
if response is None:
return None, '获取数据失败', None
jsonobj = json.loads(response)
code = jsonobj['code']
msg = jsonobj['msg']
if code != 1000:
return None, msg, None
df = pd.DataFrame(jsonobj['data'])
pageinfo = jsonobj['pager']
return df, '', pageinfo
def load_data(self):
page_no = 1
df_list = []
df, msg, pageinfo = self._get_fund_list_page(page_no)
if df is None:
return None, msg
df_list.append(df)
page_count = pageinfo['pagecount']
process_bar = ProgressBar().start(max_value=page_count)
page_no = page_no + 1
while page_no <= page_count:
df, msg, pageinfo = self._get_fund_list_page(page_no)
if df is None:
return None, msg
df_list.append(df)
process_bar.update(page_no)
page_no = page_no + 1
self.df_fundlist = pd.concat(df_list)
return self.df_fundlist, ''
def get_fund_list(self):
if self.df_fundlist is None:
return None, '请先加载数据 load_data'
return self.df_fundlist, ''
def _get_sign(self, url, params):
str = url
for k,v in params.items():
str = str + k + params[k]
sha1 = hashlib.sha1()
sha1.update(str.encode('utf8'))
sign = sha1.hexdigest()
return sign
def _get_token(self, fund_id):
sign = self._get_sign('https://dc.simuwang.com/Api/getToken', {'id' : fund_id})
url = 'https://dc.simuwang.com/Api/getToken?id=%s&sign=%s' % (fund_id, sign)
self.add_headers({'Referer': 'https://dc.simuwang.com/'})
response = self.do_request(url)
if response is None:
return None, '获取数据失败'
jsonobj = json.loads(response)
code = jsonobj['code']
msg = jsonobj['message']
if code != 1000 :
return code, msg
self.cookies.update(self.get_cookies())
salt = jsonobj['data']
muid = self.user_info['userid']
#str = 'id%smuid%spage%s%s' % (fund_id, muid, page_no, salt)
str = '%s%s' % (fund_id, salt)
sha1 = hashlib.sha1()
sha1.update(str.encode('utf8'))
token = sha1.hexdigest()
return token, ''
def _get_fund_nav_page(self, fund_id, page_no):
muid = self.user_info['userid']
token, msg = self._get_token(fund_id)
if token is None:
return None, '获取token失败: ' + msg, ''
url = 'https://dc.simuwang.com/fund/getNavList.html'
self.add_headers({'Referer': 'https://dc.simuwang.com/product/%s.html' % fund_id})
data = {
'id' : fund_id,
'muid' : muid,
'page' : str(page_no),
'token': token,
}
response = self.do_request(url, param=data, cookies=self.cookies, encoding="utf8")
if response is None:
return None, '获取数据失败', ''
jsonobj = json.loads(response)
code = jsonobj['code']
msg = jsonobj['msg']
if code != 1000 :
return code, msg, ''
df = pd.DataFrame(jsonobj['data'])
pageinfo = jsonobj['pager']
return df, '', pageinfo
def _bit_encrypt(self, str, key):
cryText = ''
keyLen = len(key)
strLen = len(str)
for i in range(strLen):
k = i % keyLen
cryText = cryText + chr(ord(str[i]) - k)
return cryText
def _bit_encrypt2(self, str, key):
cryText = ''
keyLen = len(key)
strLen = len(str)
for i in range(strLen):
k = i % keyLen
cryText = cryText + chr(ord(str[i]) ^ ord(key[k]))
return cryText
def _decrypt_data(self, str, func, key):
# return self._bit_encrypt(str, 'cd0a8bee4c6b2f8a91ad5538dde2eb34')
# return self._bit_encrypt(str, '937ab03370497f2b4e8d0599ad25c44c')
# return self._bit_encrypt(str, '083975ce19392492bbccff21a52f1ace')
return func(str, key)
def _get_decrypt_info(self, fund_id):
url = 'https://dc.simuwang.com/product/%s.html' % fund_id
response = self.do_request(url, param=None, cookies=self.cookies, encoding="utf8")
if response is None:
return None, '获取数据失败', ''
if "String.fromCharCode(str.charCodeAt(i) - k)" in response:
decrypt_func = self._bit_encrypt
else:
decrypt_func = self._bit_encrypt2
if response.find("return xOrEncrypt(str, ")> 0:
tag = "return xOrEncrypt(str, "
else:
tag = "return bitEncrypt(str, "
pos = response.index(tag) + len(tag) + 1
key = response[pos:pos+32]
return decrypt_func, key
def get_fund_nav(self, fund_id, time_elapse = 0):
if self.user_info is None:
return None, '请先登录'
page_no = 1
df_list = []
df, msg, pageinfo = self._get_fund_nav_page(fund_id, page_no)
if df is None:
return None, msg
df_list.append(df)
page_count = pageinfo['pagecount']
page_no = page_no + 1
while page_no <= page_count:
try_times = 1
while try_times <= 3:
df, msg, pageinfo = self._get_fund_nav_page(fund_id, page_no)
if df is None:
if try_times > 3:
return None, msg
else:
try_times = try_times + 1
continue
else:
df_list.append(df)
break
page_no = page_no + 1
if time_elapse > 0:
time.sleep(time_elapse)
df_nav = pd.concat(df_list)
df_nav.drop('c', axis=1, inplace=True)
df_nav.rename(columns={'d': 'date', 'n': 'nav', 'cn' : 'accu_nav', 'cnw' : 'accu_nav_w'}, inplace=True)
# 这个网站搞了太多的小坑
func, key = self._get_decrypt_info(fund_id)
df_nav['nav'] = df_nav['nav'].apply(lambda x : self._decrypt_data(x, func, key))
df_nav['accu_nav'] = df_nav['accu_nav'].apply(lambda x : self._decrypt_data(x, func, key))
df_nav['accu_nav_w'] = df_nav['accu_nav_w'].apply(lambda x : self._decrypt_data(x, func, key))
#df_nav['nav'] = df_nav['nav'] - df_nav.index * 0.01 - 0.01
#df_nav['accu_nav'] = df_nav['accu_nav'].apply(lambda x: float(x) - 0.01)
#df_nav['accu_nav_w'] = df_nav['accu_nav_w'].apply(lambda x: float(x) - 0.02)
return df_nav, ''
class BarclayAgent(RestAgent):
def __init__(self):
RestAgent.__init__(self)
self.add_headers({'Referer': 'https://www.barclayhedge.com/research/indices/ghs/Equity_Long_Short_Index.html'})
self.add_headers({'Content - Type': 'application / x - www - form - urlencoded'})
def get_data(self, index):
prog_cod = index_map[index]
url = "https://www.barclayhedge.com/cgi-bin/barclay_stats/ghsndx.cgi"
param = {
'dump': 'excel',
'prog_cod': prog_cod,
}
response = self.do_request(url, param=param, method='POST', type='binary')
if response is not None:
excel = pd.ExcelFile(io.BytesIO(response))
df = excel.parse('Sheet1').dropna(how='all').copy().reset_index().drop(0)
df.columns = ['year', 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec', 'YTD']
df = df.set_index('year')
return df, ''
return None, "获取数据失败"
|
44826
|
import networkx as nx
import matplotlib.pyplot as plt
from nxviz import GeoPlot
G = nx.read_gpickle("divvy.pkl")
print(list(G.nodes(data=True))[0])
G_new = G.copy()
for n1, n2, d in G.edges(data=True):
if d["count"] < 200:
G_new.remove_edge(n1, n2)
g = GeoPlot(
G_new,
node_lat="latitude",
node_lon="longitude",
node_color="dpcapacity",
node_size=0.005,
)
g.draw()
plt.show()
|
44838
|
import numpy as np
import pandas as pd
import torch
import src.configuration as C
import src.dataset as dataset
import src.models as models
import src.utils as utils
from pathlib import Path
from fastprogress import progress_bar
if __name__ == "__main__":
args = utils.get_sed_parser().parse_args()
config = utils.load_config(args.config)
global_params = config["globals"]
output_dir = Path(global_params["output_dir"])
output_dir.mkdir(exist_ok=True, parents=True)
utils.set_seed(global_params["seed"])
device = C.get_device(global_params["device"])
df, datadir = C.get_metadata(config)
splitter = C.get_split(config)
for i, (_, val_idx) in enumerate(splitter.split(df, y=df["ebird_code"])):
if i not in global_params["folds"]:
continue
val_df = df.loc[val_idx, :].reset_index(drop=True)
loader = C.get_sed_inference_loader(val_df, datadir, config)
model = models.get_model_for_inference(config,
global_params["weights"][i])
if not torch.cuda.is_available():
device = torch.device("cpu")
else:
device = torch.device("cuda")
model.to(device)
model.eval()
estimated_event_list = []
for batch in progress_bar(loader):
waveform = batch["waveform"]
ebird_code = batch["ebird_code"][0]
wav_name = batch["wav_name"][0]
target = batch["targets"].detach().cpu().numpy()[0]
global_time = 0.0
if waveform.ndim == 3:
waveform = waveform.squeeze(0)
batch_size = 32
whole_size = waveform.size(0)
if whole_size % batch_size == 0:
n_iter = whole_size // batch_size
else:
n_iter = whole_size // batch_size + 1
for index in range(n_iter):
iter_batch = waveform[index * batch_size:(index + 1) * batch_size]
if iter_batch.ndim == 1:
iter_batch = iter_batch.unsqueeze(0)
iter_batch = iter_batch.to(device)
with torch.no_grad():
prediction = model(iter_batch)
framewise_output = prediction["framewise_output"].detach(
).cpu().numpy()
thresholded = framewise_output >= args.threshold
target_indices = np.argwhere(target).reshape(-1)
for short_clip in thresholded:
for target_idx in target_indices:
if short_clip[:, target_idx].mean() == 0:
pass
else:
detected = np.argwhere(
short_clip[:, target_idx]).reshape(-1)
head_idx = 0
tail_idx = 0
while True:
if (tail_idx + 1 == len(detected)) or (
detected[tail_idx + 1] -
detected[tail_idx] != 1):
onset = 0.01 * detected[head_idx] + global_time
offset = 0.01 * detected[tail_idx] + global_time
estimated_event = {
"filename": wav_name,
"ebird_code": dataset.INV_BIRD_CODE[target_idx],
"onset": onset,
"offset": offset
}
estimated_event_list.append(estimated_event)
head_idx = tail_idx + 1
tail_idx = tail_idx + 1
if head_idx > len(detected):
break
else:
tail_idx = tail_idx + 1
global_time += 5.0
estimated_event_df = pd.DataFrame(estimated_event_list)
save_filename = global_params["save_path"].replace(".csv", "")
save_filename += f"_th{args.threshold}" + ".csv"
save_path = output_dir / save_filename
if save_path.exists():
event_level_labels = pd.read_csv(save_path)
estimated_event_df = pd.concat(
[event_level_labels, estimated_event_df], axis=0,
sort=False).reset_index(drop=True)
estimated_event_df.to_csv(save_path, index=False)
else:
estimated_event_df.to_csv(save_path, index=False)
|
44870
|
import torch
import torch.nn as nn
from torch.nn import init
from torchvision import models
from torch.autograd import Variable
from resnet import resnet50, resnet18
import torch.nn.functional as F
import math
from attention import IWPA, AVG, MAX, GEM
class Normalize(nn.Module):
def __init__(self, power=2):
super(Normalize, self).__init__()
self.power = power
def forward(self, x):
norm = x.pow(self.power).sum(1, keepdim=True).pow(1. / self.power)
out = x.div(norm)
return out
# #####################################################################
def weights_init_kaiming(m):
classname = m.__class__.__name__
# print(classname)
if classname.find('Conv') != -1:
init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif classname.find('Linear') != -1:
init.kaiming_normal_(m.weight.data, a=0, mode='fan_out')
init.zeros_(m.bias.data)
elif classname.find('BatchNorm1d') != -1:
init.normal_(m.weight.data, 1.0, 0.01)
init.zeros_(m.bias.data)
def weights_init_classifier(m):
classname = m.__class__.__name__
if classname.find('Linear') != -1:
init.normal_(m.weight.data, 0, 0.001)
if m.bias:
init.zeros_(m.bias.data)
# Defines the new fc layer and classification layer
# |--Linear--|--bn--|--relu--|--Linear--|
class FeatureBlock(nn.Module):
def __init__(self, input_dim, low_dim, dropout=0.5, relu=True):
super(FeatureBlock, self).__init__()
feat_block = []
feat_block += [nn.Linear(input_dim, low_dim)]
feat_block += [nn.BatchNorm1d(low_dim)]
feat_block = nn.Sequential(*feat_block)
feat_block.apply(weights_init_kaiming)
self.feat_block = feat_block
def forward(self, x):
x = self.feat_block(x)
return x
class ClassBlock(nn.Module):
def __init__(self, input_dim, class_num, dropout=0.5, relu=True):
super(ClassBlock, self).__init__()
classifier = []
if relu:
classifier += [nn.LeakyReLU(0.1)]
if dropout:
classifier += [nn.Dropout(p=dropout)]
classifier += [nn.Linear(input_dim, class_num)]
classifier = nn.Sequential(*classifier)
classifier.apply(weights_init_classifier)
self.classifier = classifier
def forward(self, x):
x = self.classifier(x)
return x
class visible_module(nn.Module):
def __init__(self, arch='resnet50'):
super(visible_module, self).__init__()
model_v = resnet50(pretrained=True,
last_conv_stride=1, last_conv_dilation=1)
# avg pooling to global pooling
self.visible = model_v
def forward(self, x):
x = self.visible.conv1(x)
x = self.visible.bn1(x)
x = self.visible.relu(x)
x = self.visible.maxpool(x)
x = self.visible.layer1(x)
return x
class thermal_module(nn.Module):
def __init__(self, arch='resnet50'):
super(thermal_module, self).__init__()
model_t = resnet50(pretrained=True,
last_conv_stride=1, last_conv_dilation=1)
# avg pooling to global pooling
self.thermal = model_t
def forward(self, x):
x = self.thermal.conv1(x)
x = self.thermal.bn1(x)
x = self.thermal.relu(x)
x = self.thermal.maxpool(x)
x = self.thermal.layer1(x)
return x
class base_resnet(nn.Module):
def __init__(self, arch='resnet50'):
super(base_resnet, self).__init__()
model_base = resnet50(pretrained=True,
last_conv_stride=1, last_conv_dilation=1)
# avg pooling to global pooling
model_base.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.base = model_base
def forward(self, x):
#x = self.base.layer1(x)
x = self.base.layer2(x)
x = self.base.layer3(x)
x = self.base.layer4(x)
return x
class embed_net(nn.Module):
def __init__(self, class_num, drop=0.2, part = 3, arch='resnet50', cpool = 'no', bpool = 'avg', fuse = 'sum'):
super(embed_net, self).__init__()
self.thermal_module = thermal_module(arch=arch)
self.visible_module = visible_module(arch=arch)
self.base_resnet = base_resnet(arch=arch)
pool_dim = 2048
pool_dim_att = 2048 if fuse == "sum" else 4096
self.dropout = drop
self.part = part
self.cpool = cpool
self.bpool = bpool
self.fuse = fuse
self.l2norm = Normalize(2)
self.bottleneck = nn.BatchNorm1d(pool_dim)
self.bottleneck.bias.requires_grad_(False) # no shift
self.classifier = nn.Linear(pool_dim, class_num, bias=False)
self.bottleneck.apply(weights_init_kaiming)
self.classifier.apply(weights_init_classifier)
if self.cpool == 'wpa':
self.classifier_att = nn.Linear(pool_dim_att, class_num, bias=False)
self.classifier_att.apply(weights_init_classifier)
self.cpool_layer = IWPA(pool_dim, part,fuse)
if self.cpool == 'avg':
self.classifier_att = nn.Linear(pool_dim_att, class_num, bias=False)
self.classifier_att.apply(weights_init_classifier)
self.cpool_layer = AVG(pool_dim,fuse)
if self.cpool == 'max':
self.classifier_att = nn.Linear(pool_dim_att, class_num, bias=False)
self.classifier_att.apply(weights_init_classifier)
self.cpool_layer = MAX(pool_dim,fuse)
if self.cpool == 'gem':
self.classifier_att = nn.Linear(pool_dim_att, class_num, bias=False)
self.classifier_att.apply(weights_init_classifier)
self.cpool_layer = GEM(pool_dim,fuse)
def forward(self, x1, x2, modal=0):
# domain specific block
if modal == 0:
x1 = self.visible_module(x1)
x2 = self.thermal_module(x2)
x = torch.cat((x1, x2), 0)
elif modal == 1:
x = self.visible_module(x1)
elif modal == 2:
x = self.thermal_module(x2)
# shared four blocks
x = self.base_resnet(x)
if self.bpool == 'gem':
b, c, _, _ = x.shape
x_pool = x.view(b, c, -1)
p = 3.0
x_pool = (torch.mean(x_pool**p, dim=-1) + 1e-12)**(1/p)
elif self.bpool == 'avg':
x_pool = F.adaptive_avg_pool2d(x,1)
x_pool = x_pool.view(x_pool.size(0), x_pool.size(1))
elif self.bpool == 'max':
x_pool = F.adaptive_max_pool2d(x,1)
x_pool = x_pool.view(x_pool.size(0), x_pool.size(1))
else:
print("wrong backbone pooling!!!")
exit()
feat = self.bottleneck(x_pool)
if self.cpool != 'no':
# intra-modality weighted part attention
if self.cpool == 'wpa':
feat_att, feat_att_bn = self.cpool_layer(x, feat, 1, self.part)
if self.cpool in ['avg', 'max', 'gem']:
feat_att, feat_att_bn = self.cpool_layer(x, feat)
if self.training:
return x_pool, self.classifier(feat), feat_att_bn, self.classifier_att(feat_att_bn)
else:
return self.l2norm(feat), self.l2norm(feat_att_bn)
else:
if self.training:
return x_pool, self.classifier(feat)
else:
return self.l2norm(feat)
|
44891
|
import logging
import collections
import json
import time
import string
import random
logger = logging.getLogger(__name__)
from schematics.types import BaseType
from schematics.exceptions import ValidationError
from nymms.utils import parse_time
import arrow
class TimestampType(BaseType):
def to_native(self, value, context=None):
if isinstance(value, arrow.arrow.Arrow):
return value
try:
return parse_time(value)
except ValueError:
return arrow.get(value)
def to_primitive(self, value, context=None):
return value.isoformat()
def _mock(self, context=None):
year = 86400 * 365
return arrow.get(time.time() + (random.randrange(-1 * 20 * year,
200 * year)))
class JSONType(BaseType):
def to_native(self, value, context=None):
if isinstance(value, basestring):
return json.loads(value)
return value
def to_primitive(self, value, context=None):
return json.dumps(value)
def _mock(self, context=None):
return dict(
[(random.choice(string.ascii_letters),
random.choice(string.printable)) for i in
range(random.randrange(4, 10))])
StateObject = collections.namedtuple('StateObject', ['name', 'code'])
STATE_OK = StateObject('ok', 0)
STATE_WARNING = STATE_WARN = StateObject('warning', 1)
STATE_CRITICAL = STATE_CRIT = StateObject('critical', 2)
STATE_UNKNOWN = StateObject('unknown', 3)
STATES = collections.OrderedDict([
('ok', STATE_OK),
('warning', STATE_WARNING),
('critical', STATE_CRITICAL),
('unknown', STATE_UNKNOWN)])
class StateType(BaseType):
def __init__(self, *args, **kwargs):
super(StateType, self).__init__(*args, choices=STATES.values(),
**kwargs)
def to_native(self, value, context=None):
if isinstance(value, StateObject):
return value
try:
int_value = int(value)
try:
return STATES.values()[int_value]
except IndexError:
return STATE_UNKNOWN
except ValueError:
try:
return STATES[value.lower()]
except KeyError:
raise ValidationError(self.messages['choices'].format(
unicode(self.choices)))
def to_primitive(self, value, context=None):
return value.code
class StateNameType(StateType):
def to_primitive(self, value, context=None):
return value.name
StateTypeObject = collections.namedtuple('StateTypeObject', ['name', 'code'])
STATE_TYPE_SOFT = StateTypeObject('soft', 0)
STATE_TYPE_HARD = StateTypeObject('hard', 1)
STATE_TYPES = collections.OrderedDict([
('soft', STATE_TYPE_SOFT),
('hard', STATE_TYPE_HARD)])
class StateTypeType(BaseType):
def __init__(self, *args, **kwargs):
super(StateTypeType, self).__init__(*args,
choices=STATE_TYPES.values(),
**kwargs)
def to_native(self, value, context=None):
if isinstance(value, StateTypeObject):
return value
try:
return STATE_TYPES.values()[int(value)]
except ValueError:
try:
return STATE_TYPES[value.lower()]
except KeyError:
raise ValidationError(self.messages['choices'].format(
unicode(self.choices)))
def to_primitive(self, value, context=None):
return value.code
class StateTypeNameType(StateTypeType):
def to_primitive(self, value, context=None):
return value.name
|
44906
|
from .kfold import PurgedKFold, CPKFold, generate_signals
from .score import cv_score
from .pipeline import Pipeline
from .hyper import clf_hyper_fit
from .distribution import LogUniformGen, log_uniform
from .utils import evaluate
|
44907
|
import tensorflow as tf
def touch(fname: str, times=None, create_dirs: bool = False):
import os
if create_dirs:
base_dir = os.path.dirname(fname)
if not os.path.exists(base_dir):
os.makedirs(base_dir)
with open(fname, 'a'):
os.utime(fname, times)
def touch_dir(base_dir: str) -> None:
import os
if not os.path.exists(base_dir):
os.makedirs(base_dir)
def now_int():
from datetime import datetime
epoch = datetime.utcfromtimestamp(0)
return (datetime.now() - epoch).total_seconds()
def bias_variable(shape, name=None):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial, name=name)
def entry_stop_gradients(target, mask):
mask_h = tf.logical_not(mask)
mask = tf.cast(mask, dtype=target.dtype)
mask_h = tf.cast(mask_h, dtype=target.dtype)
return tf.stop_gradient(mask_h * target) + mask * target
# Adapeted from
# https://gist.github.com/kukuruza/03731dc494603ceab0c5#gistcomment-1879326
def on_grid(kernel, grid_side, pad=1):
"""Visualize conv. features as an image (mostly for the 1st layer).
Place kernel into a grid, with some paddings between adjacent filters.
Args:
kernel: tensor of shape [Y, X, NumChannels, NumKernels]
grid_side: side of the grid. Require: NumKernels == grid_side**2
pad: number of black pixels around each filter (between them)
Returns:
An image Tensor with shape [(Y+2*pad)*grid_side, (X+2*pad)*grid_side, NumChannels, 1].
"""
x_min = tf.reduce_min(kernel)
x_max = tf.reduce_max(kernel)
kernel1 = (kernel - x_min) / (x_max - x_min)
# pad X and Y
x1 = tf.pad(
kernel1,
tf.constant([[pad, pad], [pad, pad], [0, 0], [0, 0]]),
mode='CONSTANT')
# X and Y dimensions, w.r.t. padding
Y = kernel1.get_shape()[0] + 2 * pad
X = kernel1.get_shape()[1] + 2 * pad
channels = kernel1.get_shape()[2]
# put NumKernels to the 1st dimension
x2 = tf.transpose(x1, (3, 0, 1, 2))
# organize grid on Y axis
x3 = tf.reshape(x2,
tf.stack(
values=[grid_side, Y * grid_side, X, channels],
axis=0)) # 3
# switch X and Y axes
x4 = tf.transpose(x3, (0, 2, 1, 3))
# organize grid on X axis
x5 = tf.reshape(x4,
tf.stack(
values=[1, X * grid_side, Y * grid_side, channels],
axis=0)) # 3
# back to normal order (not combining with the next step for clarity)
x6 = tf.transpose(x5, (2, 1, 3, 0))
# to tf.image_summary order [batch_size, height, width, channels],
# where in this case batch_size == 1
x7 = tf.transpose(x6, (3, 0, 1, 2))
# scale to [0, 255] and convert to uint8
return tf.image.convert_image_dtype(x7, dtype=tf.uint8)
def get_last_output(output, sequence_length, name):
"""Get the last value of the returned output of an RNN.
http://disq.us/p/1gjkgdr
output: [batch x number of steps x ... ] Output of the dynamic lstm.
sequence_length: [batch] Length of each of the sequence.
"""
rng = tf.range(0, tf.shape(sequence_length)[0])
indexes = tf.stack([rng, sequence_length - 1], 1)
return tf.gather_nd(output, indexes, name)
|
44958
|
class Solution:
def maxRotateFunction(self, nums: List[int]) -> int:
sums = sum(nums)
index = 0
res = 0
for ele in nums:
res += index*ele
index += 1
ans = res
for i in range(1,len(nums)):
res = res + sums - (len(nums))*nums[len(nums) - i]
ans = max(res,ans)
return ans
|
44962
|
import os
import re
import shutil
from ._base import DanubeCloudCommand, CommandOption, CommandError, lcd
class Command(DanubeCloudCommand):
help = 'Generate documentation files displayed in GUI.'
DOC_REPO = 'https://github.com/erigones/esdc-docs.git'
DOC_TMP_DIR = '/var/tmp/esdc-docs'
options = (
CommandOption('--api', '--api-only', action='store_true', dest='api_only', default=False,
help='Generate only the API documentation.'),
CommandOption('--user-guide', '--user-guide-only', action='store_true', dest='user_guide_only', default=False,
help='Generate only the User Guide.'),
)
def gendoc_api(self):
"""Generate api documentation"""
with lcd(self.PROJECT_DIR):
doc_dir = self._path(self.PROJECT_DIR, 'doc', 'api')
doc_dst = self._path(self.PROJECT_DIR, 'api', 'static', 'api', 'doc')
bin_dst = self._path(self.PROJECT_DIR, 'api', 'static', 'api', 'bin')
# Build sphinx docs
with lcd(doc_dir):
self.local('make esdc-clean; make esdc ESDOCDIR="%s"' % doc_dst)
# Create es script suitable for download
es_src = self._path(self.PROJECT_DIR, 'bin', 'es')
es_dst = self._path(bin_dst, 'es')
es_current = os.path.join(self.settings.PROJECT_DIR, 'var', 'www', 'static', 'api', 'bin', 'es')
api_url = "API_URL = '%s'" % (self.settings.SITE_LINK + '/api')
if os.path.isfile(es_current):
with open(es_current, 'r') as es0:
for line in es0:
if line.startswith("API_URL = '"):
api_url = line
break
with open(es_src) as es1:
with os.fdopen(os.open(es_dst, os.O_WRONLY | os.O_CREAT | os.O_TRUNC, 0o644), 'w') as es2:
es2.write(es1.read().replace("API_URL = 'http://127.0.0.1:8000/api'", api_url))
# Copy es_bash_completion.sh to download location
es_bc_src = self._path(doc_dir, 'es_bash_completion.sh')
self.local('cp %s %s' % (es_bc_src, bin_dst))
self.display('API documentation built successfully.', color='green')
def gendoc_user_guide(self, fallback_branch='master'):
"""Generate user guide"""
doc_dst = self._path(self.PROJECT_DIR, 'gui', 'static', 'user-guide')
with lcd(self.PROJECT_DIR):
try:
branch = self.get_git_version()[0] # Git tag or branch name
except CommandError:
self.display('Could not determine our branch or tag', color='yellow')
branch = fallback_branch
self.display('Falling back to "%s" branch' % branch, color='yellow')
else:
self.display('We are on branch "%s"' % branch)
if self._path_exists(self.DOC_TMP_DIR, 'user-guide', 'conf.py'):
existing_repo = True
self.display('%s already exists in %s' % (self.DOC_REPO, self.DOC_TMP_DIR), color='yellow')
with lcd(self.DOC_TMP_DIR):
self.local('git fetch')
self.display('%s has been successfully updated.' % self.DOC_REPO, color='green')
else:
if self._path_exists(self.DOC_TMP_DIR):
self.display('Removing stale %s', self.DOC_TMP_DIR, color='yellow')
shutil.rmtree(self.DOC_TMP_DIR)
existing_repo = False
self.local('git clone %s %s' % (self.DOC_REPO, self.DOC_TMP_DIR))
self.display('%s has been successfully cloned.' % self.DOC_TMP_DIR, color='green')
with lcd(self.DOC_TMP_DIR):
if self.local('git checkout %s' % branch, raise_on_error=False) != 0:
self.display('Could not checkout esdc-docs branch "%s"' % branch, color='yellow')
branch = fallback_branch
self.display('Falling back to "%s" branch' % branch, color='yellow')
self.local('git checkout %s' % branch)
self.display('Checked out esdc-docs branch "%s"' % branch, color='green')
# If the branch is no a tag name, then we need to merge/pull
if existing_repo and not re.search('^v[0-9]', branch):
self.local('git merge --ff-only origin/%s' % branch)
self.display('Merged esdc-docs branch "%s"' % branch, color='green')
# Build sphinx docs
with lcd(self._path(self.DOC_TMP_DIR, 'user-guide')):
self.local('make esdc-clean; make esdc ESDOCDIR="%s"' % doc_dst)
self.display('User guide built successfully.', color='green')
def handle(self, api_only=False, user_guide_only=False, **options):
if api_only and user_guide_only:
pass
elif api_only:
self.gendoc_api()
return
elif user_guide_only:
self.gendoc_user_guide()
return
self.gendoc_api()
self.display('\n\n', stderr=True)
self.gendoc_user_guide()
|
44966
|
import json
import os
from eg import config
from eg import substitute
from eg import util
from mock import Mock
from mock import patch
PATH_UNSQUEEZED_FILE = os.path.join(
'test',
'assets',
'pwd_unsqueezed.md'
)
PATH_SQUEEZED_FILE = os.path.join(
'test',
'assets',
'pwd_squeezed.md'
)
def _create_config(
examples_dir=None,
custom_dir=None,
color_config=None,
use_color=True,
pager_cmd=None,
editor_cmd=None,
squeeze=False,
subs=None
):
"""
Create a config.Config object with default values for expediency in
testing.
"""
return config.Config(
examples_dir=examples_dir,
custom_dir=custom_dir,
color_config=color_config,
use_color=use_color,
pager_cmd=pager_cmd,
editor_cmd=editor_cmd,
squeeze=squeeze,
subs=subs
)
@patch('os.walk')
def test_get_file_paths_for_program_with_single(mock_walk):
program = 'cp'
examples_dir = '/Users/tyrion'
program_file = program + util.EXAMPLE_FILE_SUFFIX
expected = ['/Users/tyrion/cp.md']
mock_walk.return_value = [
[examples_dir, [], [program_file, 'cp.txt', 'other_file.md']],
]
actual = util.get_file_paths_for_program(program, examples_dir)
assert actual == expected
mock_walk.assert_called_once_with(examples_dir)
@patch('os.walk')
def test_get_file_paths_for_program_with_nested(mock_walk):
program = 'cp'
examples_dir = '/Users/tyrion'
program_file = 'cp.md'
mock_walk.return_value = [
[
examples_dir,
['dirA', 'dirB'],
[program_file, 'cp.txt', 'other_file.md'],
],
[
examples_dir + '/dirA',
['dirA-child'],
[program_file, 'bad.md'],
],
[
examples_dir + '/dirA/dirA-child',
[],
['bad.md', program_file, 'wtf.md'],
],
[
examples_dir + '/dirB',
[],
['foo.md', program_file],
],
]
expected = [
'/Users/tyrion/cp.md',
'/Users/tyrion/dirA/cp.md',
'/Users/tyrion/dirA/dirA-child/cp.md',
'/Users/tyrion/dirB/cp.md',
]
actual = util.get_file_paths_for_program(program, examples_dir)
assert actual == expected
mock_walk.assert_called_once_with(examples_dir)
@patch('os.walk')
def test_get_file_paths_for_program_with_none(mock_walk):
expected = []
mock_walk.return_value = []
actual = util.get_file_paths_for_program('cp', '/Users/tyrion')
assert actual == expected
mock_walk.assert_called_once_with('/Users/tyrion')
@patch('os.walk')
def test_get_file_paths_for_program_with_no_dir(mock_walk):
assert util.get_file_paths_for_program('cp', None) == []
@patch('eg.util.page_string')
@patch('eg.util.get_formatted_contents')
@patch('eg.util.get_contents_from_files')
@patch('eg.util.get_resolved_program')
def test_handle_program_no_entries(
mock_resolve_program,
mock_get_contents,
mock_format,
mock_page_string,
):
"""
We should do the right thing if there are no entries for a given program.
"""
program = 'cp'
test_config = _create_config()
mock_resolve_program.return_value = program
util.handle_program(program, test_config)
mock_resolve_program.assert_called_once_with(
program,
test_config
)
# We should have aborted and not called any of the
# other methods.
assert mock_get_contents.call_count == 0
assert mock_format.call_count == 0
assert mock_page_string.call_count == 0
@patch('eg.util.get_resolved_program')
@patch('eg.util.get_contents_from_files')
@patch('eg.util.get_file_paths_for_program')
@patch('eg.util.get_formatted_contents')
@patch('eg.util.page_string')
def test_handle_program_finds_paths_and_calls_open_pager_no_alias(
mock_page,
mock_format,
mock_get_paths,
mock_get_contents,
mock_resolve,
):
"""
If there are entries for the program, handle_program needs to get the
paths, get the contents, format the contents, and page the resulting
string.
"""
program = 'mv'
examples_dir = 'test-eg-dir'
custom_dir = 'test-custom-dir'
color_config = None
use_color = False
pager_cmd = 'foo bar'
squeeze = False
subs = ['foo', 'bar']
file_contents = 'I am the contents of mv.md.'
formatted_contents = 'and I am the formatted contents of mv.md.'
test_config = _create_config(
examples_dir=examples_dir,
custom_dir=custom_dir,
color_config=color_config,
use_color=use_color,
pager_cmd=pager_cmd,
squeeze=squeeze,
subs=subs
)
default_paths = ['test-eg-dir/mv.md', 'test-eg-dir/foo/mv.md']
custom_paths = ['test-custom-dir/mv.md', 'test-custom-dir/bar.md']
def return_correct_path(*args, **kwargs):
program_param = args[0]
dir_param = args[1]
if program_param != program:
raise NameError('expected ' + program + ', got ' + program_param)
if dir_param == examples_dir:
return default_paths
elif dir_param == custom_dir:
return custom_paths
else:
raise NameError(
'got ' +
dir_param +
', expected ' +
examples_dir +
' or ' +
custom_dir)
mock_format.return_value = formatted_contents
mock_get_paths.side_effect=return_correct_path
mock_get_contents.return_value = file_contents
mock_resolve.return_value = program
util.handle_program(program, test_config)
mock_resolve.assert_called_once_with(
program,
test_config
)
mock_get_paths.assert_any_call(
program,
examples_dir
)
mock_get_paths.assert_any_call(
program,
custom_dir,
)
mock_get_contents.assert_called_once_with(
custom_paths[0],
custom_paths[1],
default_paths[0],
default_paths[1],
)
mock_format.assert_called_once_with(
file_contents,
use_color=test_config.use_color,
color_config=test_config.color_config,
squeeze=test_config.squeeze,
subs=test_config.subs
)
mock_page.assert_called_once_with(
formatted_contents,
test_config.pager_cmd
)
@patch('eg.util.get_resolved_program')
@patch('eg.util.get_contents_from_files')
@patch('eg.util.get_file_paths_for_program')
@patch('eg.util.get_formatted_contents')
@patch('eg.util.page_string')
def test_handle_program_finds_paths_and_calls_open_pager_with_alias(
mock_page,
mock_format,
mock_get_paths,
mock_get_contents,
mock_resolve,
):
"""
If there are entries for the program, handle_program needs to get the
paths, get the contents, format the contents, and page the resulting
string.
"""
alias_for_program = 'link'
resolved_program = 'ln'
examples_dir = 'test-eg-dir'
custom_dir = 'test-custom-dir'
color_config = None
use_color = False
pager_cmd = 'foo bar'
squeeze = False
subs = ['foo', 'bar']
file_contents = 'I am the contents of ln.md.'
formatted_contents = 'and I am the formatted contents of ln.md.'
test_config = _create_config(
examples_dir=examples_dir,
custom_dir=custom_dir,
color_config=color_config,
use_color=use_color,
pager_cmd=pager_cmd,
squeeze=squeeze,
subs=subs
)
default_paths = ['test-eg-dir/ln.md']
custom_paths = ['test-custom-dir/ln.md']
def return_correct_path(*args, **kwargs):
program_param = args[0]
dir_param = args[1]
if program_param != resolved_program:
raise NameError(
'expected ' +
resolved_program +
', got ' +
program_param
)
if dir_param == examples_dir:
return default_paths
elif dir_param == custom_dir:
return custom_paths
else:
raise NameError(
'got ' +
dir_param +
', expected ' +
examples_dir +
' or ' +
custom_dir)
mock_format.return_value = formatted_contents
mock_get_paths.side_effect = return_correct_path
mock_get_contents.return_value = file_contents
mock_resolve.return_value = resolved_program
util.handle_program(
alias_for_program,
test_config
)
mock_resolve.assert_called_once_with(
alias_for_program,
test_config
)
mock_get_paths.assert_any_call(
resolved_program,
examples_dir
)
mock_get_paths.assert_any_call(
resolved_program,
custom_dir,
)
mock_get_contents.assert_called_once_with(
custom_paths[0],
default_paths[0]
)
mock_format.assert_called_once_with(
file_contents,
use_color=test_config.use_color,
color_config=test_config.color_config,
squeeze=test_config.squeeze,
subs=test_config.subs
)
mock_page.assert_called_once_with(
formatted_contents,
test_config.pager_cmd
)
def test_get_list_of_all_supported_commands(tmpdir):
dir_example = tmpdir.mkdir('examples')
dir_custom = tmpdir.mkdir('custom')
config = _create_config(
examples_dir=str(dir_example),
custom_dir=str(dir_custom),
)
expected = [
'a-only-default',
'b-both *',
'c-only-custom +',
'd-only-custom-nested +',
'e-only-default-nested',
'f-default-custom-nested',
'g-both-different-levels *',
't-a-only-default-alias -> a-only-default',
'u-b-both-alias -> b-both *',
'v-c-only-custom-alias -> c-only-custom +'
]
aliases = {
't-a-only-default-alias': 'a-only-default',
'u-b-both-alias': 'b-both',
'v-c-only-custom-alias': 'c-only-custom'
}
# Make the directory structure we expect.
dir_example_nested = dir_example.mkdir('default-nested')
dir_custom_nested = dir_custom.mkdir('custom-nested')
dir_example.join('a-only-default.md').write('foo')
dir_example.join('b-both.md').write('foo')
dir_custom.join('b-both.md').write('foo')
dir_custom.join('c-only-custom.md').write('foo')
dir_custom_nested.join('d-only-custom-nested.md').write('foo')
dir_example_nested.join('e-only-default-nested.md').write('foo')
dir_example_nested.join('f-default-custom-nested.md').write('foo')
dir_example.join('g-both-different-levels.md').write('foo')
dir_custom_nested.join('g-both-different-levels.md').write('foo')
# Use the 'with' context manager rather than the @decorator, because the
# tmpdir fixture doesn't play nice with the decorator.
with patch('eg.util.get_alias_dict') as mock_get_alias:
mock_get_alias.return_value = aliases
actual = util.get_list_of_all_supported_commands(config)
assert actual == expected
mock_get_alias.assert_called_once_with(config)
def test_list_supported_programs_fails_gracefully_if_no_dirs():
test_config = _create_config()
actual = util.get_list_of_all_supported_commands(test_config)
target = []
assert actual == target
def test_calls_pipepager_if_not_less():
"""
We're special casing less a bit, as it is the default value, so if a custom
command has been set that is NOT less, we should call pipepager straight
away.
"""
_helper_assert_about_pager('page me plz', 'cat', False)
def test_calls_fallback_pager_if_none():
"""
If pager_cmd is None, we should just use the fallback pager.
"""
_helper_assert_about_pager('page me plz', None, True)
def test_calls_pipepager_if_less():
"""
We should call pipepager if we ask to use less and less is installed on the
machine.
"""
_helper_assert_about_pager('a fancy value to page', 'less -R', False)
def test_calls_fallback_if_cmd_is_flag_string():
"""
We are using a flag string to indicate if we should use the fallback pager.
"""
_helper_assert_about_pager(
'page via fallback',
util.FLAG_FALLBACK,
True
)
@patch('pydoc.pager')
@patch('pydoc.pipepager')
def _helper_assert_about_pager(
str_to_page,
pager_cmd,
use_fallback,
pipepager,
default_pager,
):
"""
Help with asserting about pager.
str_to_page: what you're paging
pager_cmd: the string you're passing to pipepager (or None)
use_default: false if we should actually use pydoc.pipepager, true if we
instead are going to fallback to pydoc.pager
"""
util.page_string(str_to_page, pager_cmd)
if use_fallback:
default_pager.assert_called_once_with(str_to_page)
assert pipepager.call_count == 0
else:
assert default_pager.call_count == 0
pipepager.assert_called_once_with(
str_to_page,
cmd=pager_cmd
)
@patch('eg.util.pydoc.pipepager', side_effect=KeyboardInterrupt)
def test_page_string_excepts_keyboard_interrupt_if_not_less(pipepager_mock):
"""
Do not fail when user hits ctrl-c while in pager.
"""
try:
util.page_string('page me plz', 'cat')
except KeyboardInterrupt:
raise AssertionError('Should not have got this far')
pipepager_mock.assert_called_once_with('page me plz', cmd='cat')
@patch('eg.util.pydoc.pager', side_effect=KeyboardInterrupt)
def test_page_string_excepts_keyboard_interrupt_if_none(pager_mock):
"""
Do not fail when user hits ctrl-c while in pipepager.
"""
try:
util.page_string('page me plz', None)
except KeyboardInterrupt:
raise AssertionError('Should not have got this far')
pager_mock.assert_called_once_with('page me plz')
def test_get_contents_from_files_handles_none():
"""
Empty string if no files.
"""
_helper_assert_file_contents(
[],
''
)
def test_get_contents_from_files_handles_one():
file_infos = [
{
'path': 'test/path',
'contents': 'contents of file'
}
]
combined_contents = 'contents of file'
_helper_assert_file_contents(
file_infos,
combined_contents
)
def test_get_contents_from_files_handles_multiple():
file_infos = [
{
'path': 'path/1',
'contents': 'foo\n'
},
{
'path': 'path/2/foo',
'contents': 'bar\n'
},
{
'path': 'another/path',
'contents': 'baz'
}
]
combined_contents = 'foo\nbar\nbaz'
_helper_assert_file_contents(
file_infos,
combined_contents
)
@patch('eg.util._get_contents_of_file')
def _helper_assert_file_contents(
file_infos,
target_contents,
get_contents_mock,
):
"""
Helper method to assert things about the get_contents_from_files method.
Does not actually hit the disk.
file_infos: array of { path, contents } dicts representing files. Array so
that we can assert proper order calling
target_contents: the final combined contents that should be returned by the
get_contents_from_files method.
"""
# This method will be used by the mock framework to return the right file
# contents based on the file name.
def return_file_contents(*args, **kwargs):
for file_info in file_infos:
if file_info['path'] == args[0]:
return file_info['contents']
raise TypeError('did not find path in test obj')
get_contents_mock.side_effect = return_file_contents
paths = [el['path'] for el in file_infos]
actual = util.get_contents_from_files(*paths)
assert actual == target_contents
@patch('eg.util.get_colorized_contents')
@patch('eg.util.get_squeezed_contents')
@patch('eg.util.get_substituted_contents')
def _helper_assert_formatted_contents(
starting_contents,
use_color,
color_config,
squeeze,
subs,
colorized_contents,
squeezed_contents,
subbed_contents,
formatted_result,
sub_method,
squeeze_method,
color_method,
):
"""
Helper method to assist in asserting things about the
get_formatted_contents method.
starting_contents: the starting string that we are working with
use_color: True if we should use color
color_config: the color config to be passed to get_colorized_contents
squeeze: True if we should squeeze
subs: the list of Substitutions that we should pass to
get_substituted_contents
colored_contents: the result of get_colorized_contents
squeezed_contents: the result of get_squeezed_contents
subbed_contents: the result of subbed_contents
formatted_result: the final, formatted string that should be returned
"""
sub_method.return_value = subbed_contents
squeeze_method.return_value = squeezed_contents
color_method.return_value = colorized_contents
actual = util.get_formatted_contents(
starting_contents,
use_color,
color_config,
squeeze,
subs
)
# We'll update the contents as they get formatted to make sure
# we pass the right thing to the various methods.
contents_thus_far = starting_contents
if use_color:
color_method.assert_called_once_with(
contents_thus_far,
color_config
)
contents_thus_far = colorized_contents
else:
assert color_method.call_count == 0
if squeeze:
squeeze_method.assert_called_once_with(contents_thus_far)
contents_thus_far = squeezed_contents
else:
assert squeeze_method.call_count == 0
if subs:
sub_method.assert_called_once_with(
contents_thus_far,
subs
)
contents_thus_far = subbed_contents
else:
assert sub_method.call_count == 0
assert actual == formatted_result
def test_get_formatted_contents_does_not_format_methods_if_all_falsey():
"""
We should invoke none of the formatter methods if the flags are false and
subs is not truthy.
"""
starting_contents = 'this is where we start'
_helper_assert_formatted_contents(
starting_contents,
False,
'some color config',
False,
None,
'this was colored',
'this was squeezed',
'these contents were subbed',
starting_contents
)
def test_get_formatted_contents_calls_colorize_if_use_color():
"""
Colorize the contents if use_color = True.
"""
starting_contents = 'this is where we start'
colorized_contents = 'COLORIZED: this is where we start'
_helper_assert_formatted_contents(
starting_contents,
True,
'some color config',
False,
None,
colorized_contents,
'this was squeezed',
'these contents were subbed',
colorized_contents
)
def test_get_formatted_contents_squeezes():
"""If squeeze, we need to squeeze."""
starting_contents = 'this is where we start'
squeezed_contents = 'this is the result of a squeezing'
_helper_assert_formatted_contents(
starting_contents,
False,
'some color config',
True,
None,
'this was colored',
squeezed_contents,
'these contents were subbed',
squeezed_contents
)
def test_get_formatted_contents_subsitutes():
"""If subs is truthy, get_substituted contents should be called."""
starting_contents = 'this is where we start'
subbed_contents = 'substituted like a teacher'
_helper_assert_formatted_contents(
starting_contents,
False,
'some color config',
False,
['truthy', 'list'],
'this was colored',
'this was squeezed',
subbed_contents,
subbed_contents
)
def test_perform_all_formatting():
"""
When use_color, squeeze, and subs are all truthy, all the formatting
should be applied in that order.
"""
starting_contents = 'the starting point for grand formatting'
subbed_contents = 'subbed is the last thing called so should be the result'
_helper_assert_formatted_contents(
starting_contents,
True,
'some color config',
True,
['truthy', 'list'],
'this was colored',
'this was squeezed',
subbed_contents,
subbed_contents
)
def _get_file_as_string(path):
"""Get the contents of the file as a string."""
with open(path, 'r') as f:
data = f.read()
return data
def test_get_squeezed_contents_correctly_squeezes():
"""
Our squeeze method should follow our convention, which is to remove the
blank line between a description and an example, to keep two blank lines
between sections, and otherwise have only single blank lines.
"""
unsqueezed = _get_file_as_string(PATH_UNSQUEEZED_FILE)
# the target squeezed output is a reference implementation in
# pwd_squeezed.md.
target = _get_file_as_string(PATH_SQUEEZED_FILE)
actual = util.get_squeezed_contents(unsqueezed)
assert actual == target
def test_get_substituted_contents_handles_empty_subs():
"""Nothing should be formatted if there are no substitutions."""
raw_contents = 'this should not be subbed'
actual = util.get_substituted_contents(raw_contents, [])
assert actual == raw_contents
def test_get_substituted_contents_substitutes_calls_correct_methods():
"""
The get_substituted_contents method calls things in the correct order.
"""
sub_one = Mock(auto_spec=substitute.Substitution)
sub_one_result = 'result of sub one'
sub_one.apply_and_get_result.return_value = sub_one_result
sub_two = Mock(auto_spec=substitute.Substitution)
sub_two_result = 'result of sub two'
sub_two.apply_and_get_result.return_value = sub_two_result
starting_contents = 'the string we should be substituting into'
target = sub_two_result
subs = [sub_one, sub_two]
actual = util.get_substituted_contents(starting_contents, subs)
sub_one.apply_and_get_result.assert_called_once_with(starting_contents)
sub_two.apply_and_get_result.assert_called_once_with(sub_one_result)
assert actual == target
def test_get_substituted_contents_substitutes_correctly():
"""
Basic test to make sure Substitutions can get applied correctly.
"""
sub_one = substitute.Substitution('foo', 'bar', False)
sub_two = substitute.Substitution('bar\n\n', 'baz\n', True)
start = 'foo\n\n something else\n\n bar\n\n'
target = 'baz\n something else\n\n baz\n'
subs = [sub_one, sub_two]
actual = util.get_substituted_contents(start, subs)
assert actual == target
@patch('eg.color.EgColorizer')
def test_get_colorized_contents_calls_methods(patched_colorizer_class):
"""
We should call the correct methods on the EgColorizer objects when we color
a file.
"""
raw_contents = 'these are uncolored contents'
colored_contents = 'COLORED: ' + raw_contents
color_config = 'some color config'
# The actual instance created by these calls is stored at return_value.
colorizer_instance = patched_colorizer_class.return_value
colorizer_instance.colorize_text.return_value = colored_contents
actual = util.get_colorized_contents(raw_contents, color_config)
assert actual == colored_contents
colorizer_instance.colorize_text.assert_called_once_with(raw_contents)
@patch('eg.util.get_alias_dict')
def _helper_assert_get_resolved_program(
program,
resolved_program,
config_obj,
alias_dict,
mock_dict,
):
"""
program: the program to resolved for as an alias
resolved_program: the result of the resolution.
config_obj: the config_obj to use toe resolve the alias path
alias_dict: the dict of aliases to be returned
"""
mock_dict.return_value = alias_dict
actual = util.get_resolved_program(program, config_obj)
assert actual == resolved_program
mock_dict.assert_called_once_with(config_obj)
def test_get_resolved_program_no_alias():
"""
A program that is not an alias should return itself.
"""
alias_dict = {
'link': 'ln',
'nc': 'netcat'
}
config_obj = 'a config'
_helper_assert_get_resolved_program('link', 'ln', config_obj, alias_dict)
def test_get_resolved_program_is_alias():
"""
A program that is an alias should return the resolved value.
"""
alias_dict = {
'link': 'ln',
'nc': 'netcat'
}
config_obj = 'some new config'
_helper_assert_get_resolved_program('cp', 'cp', config_obj, alias_dict)
def test_get_alias_dict_returns_contents_of_correct_file():
"""
get_alias_dict should read data from the file at the default path.
"""
alias_dict = {
'link': 'ln',
'nc': 'netcat'
}
config_obj = _create_config(
examples_dir='path/to/examples/dir',
)
alias_file_path = 'path/to/alias/file'
alias_dict_str = json.dumps(alias_dict)
_helper_assert_get_alias_dict(
alias_dict_str,
alias_dict,
config_obj,
alias_file_path,
True
)
def test_get_alias_dict_fails_gracefully_if_not_file():
"""
Since users can specify a directory for examples that might not contain the
aliases file, we want to fail gracefully if the file doesn't exist.
"""
contents_of_alias_dict_file = 'should never be reached'
config_obj = _create_config(
examples_dir='path/to/examples/dir',
)
alias_file_path = 'path/to/the/alias/file'
_helper_assert_get_alias_dict(
contents_of_alias_dict_file,
{},
config_obj,
alias_file_path,
False
)
@patch('eg.util._get_contents_of_file')
@patch('eg.util._get_alias_file_path')
@patch('os.path.isfile')
def _helper_assert_get_alias_dict(
contents_of_alias_dict_file,
target_alias_dict,
config_obj,
alias_file_path,
alias_file_path_is_file,
mock_is_file,
mock_get_alias_file_path,
mock_get_contents,
):
"""
contents_of_alias_dict_file: the string contents of the file storing the
dictionary of aliases
target_alias_dict: the target result of get_alias_dict
config_obj: the Config object
alias_file_path: the path to be returned by _get_alias_file_path
alias_file_path_is_file: True if the alias path is a file, else False
"""
mock_is_file.return_value = alias_file_path_is_file
mock_get_alias_file_path.return_value = alias_file_path
mock_get_contents.return_value = contents_of_alias_dict_file
actual = util.get_alias_dict(config_obj)
assert actual == target_alias_dict
mock_get_alias_file_path.assert_called_once_with(config_obj)
mock_is_file.assert_called_once_with(alias_file_path)
if alias_file_path_is_file:
mock_get_contents.assert_called_once_with(alias_file_path)
else:
assert mock_get_contents.call_count == 0
@patch('os.path.join')
def test_get_alias_file_path(mock_join):
"""
_get_alias_file_path should just join the example dir and the alias file
name, to make sure we look in the right place for the file.
"""
config_obj = _create_config(
examples_dir='handy/dandy/examples/dir',
)
join_result = 'joined path'
mock_join.return_value = join_result
actual = util._get_alias_file_path(config_obj)
assert actual == join_result
mock_join.assert_called_once_with(
config_obj.examples_dir,
util.ALIAS_FILE_NAME
)
def test_is_example_file_true_if_has_suffix():
"""
Should be true if ends in EXAMPLE_FILE_SUFFIX.
"""
file_name = 'find.md'
actual = util._is_example_file(file_name)
assert actual == True
def test_is_example_file_true_if_not_suffix():
"""
Should be false if the file does not end in EXAMPLE_FILE_SUFFIX.
"""
file_name = 'aliases.json'
actual = util._is_example_file(file_name)
assert actual == False
def test_can_parse_alias_file():
"""
Make sure aliases.json file can be parsed.
This is to make sure an edit doesn't accidentally corrupt it.
"""
# We'll have to hardcode this.
alias_file_path = os.path.join(
config.DEFAULT_EXAMPLES_DIR,
util.ALIAS_FILE_NAME
)
alias_file_contents = util._get_contents_of_file(alias_file_path)
alias_dict = json.loads(alias_file_contents)
# We'll check that link goes to ln, as we know that one will be present.
assert alias_dict['link'] == 'ln'
@patch('os.path.exists')
@patch('eg.util._inform_cannot_edit_no_custom_dir')
@patch('eg.util.get_resolved_program')
@patch('eg.util.get_file_paths_for_program')
@patch('subprocess.call')
def test_edit_custom_examples_correct_with_custom_dir(
mock_call,
mock_get_paths,
mock_get_program,
mock_inform,
mock_exists,
):
"""
We should resolve aliases, get the custom file path, and call subprocess.
"""
program = 'du'
resolved_program = 'alias for du'
config = _create_config(custom_dir='path/to/custom', editor_cmd='nano')
paths = ['path/to/custom/du.md', 'foo.md']
mock_get_program.return_value = resolved_program
mock_get_paths.return_value = paths
mock_exists.return_value = True
util.edit_custom_examples(program, config)
mock_get_program.assert_called_once_with(program, config)
mock_get_paths.assert_called_once_with(resolved_program, config.custom_dir)
mock_call.assert_called_once_with([config.editor_cmd, paths[0]])
assert mock_inform.call_count == 0
@patch('os.path.exists')
@patch('eg.util._inform_cannot_edit_no_custom_dir')
@patch('eg.util.get_resolved_program')
@patch('eg.util.get_file_paths_for_program')
@patch('subprocess.call')
def test_edit_custom_examples_creates_file_if_none_exist(
mock_call,
mock_get_paths,
mock_get_program,
mock_inform,
mock_exists,
):
program = 'du'
resolved_program = 'alias-for-du'
config = _create_config(custom_dir='path/to/custom', editor_cmd='nano')
paths = []
mock_get_program.return_value = resolved_program
mock_get_paths.return_value = paths
mock_exists.return_value = True
util.edit_custom_examples(program, config)
mock_get_program.assert_called_once_with(program, config)
mock_get_paths.assert_called_once_with(resolved_program, config.custom_dir)
mock_call.assert_called_once_with(
[config.editor_cmd, 'path/to/custom/alias-for-du.md'])
assert mock_inform.call_count == 0
@patch('os.path.exists')
@patch('eg.util._inform_cannot_edit_no_custom_dir')
@patch('eg.util.get_resolved_program')
@patch('eg.util.get_file_paths_for_program')
@patch('subprocess.call')
def test_edit_custom_examples_informs_if_no_custom_dir(
mock_call,
mock_get_paths,
mock_get_program,
mock_inform,
mock_exists,
):
"""
We should inform the user if they are trying to edit with no custom dir.
This should be true if it is not set and if the path does not exist.
"""
program = 'awk'
# First with no custom dir set.
config = _create_config(editor_cmd='vi -e')
mock_exists.return_value = True
util.edit_custom_examples(program, config)
assert mock_inform.call_count == 1
# And now with it set but a nonexistent path.
config = _create_config(custom_dir='/path/to/custom', editor_cmd='vi -e')
mock_exists.return_value = False
util.edit_custom_examples(program, config)
assert mock_inform.call_count == 2
assert mock_call.call_count == 0
assert mock_get_paths.call_count == 0
assert mock_get_program.call_count == 0
|
44968
|
def format_card(card_num):
"""
Formats card numbers to remove any spaces, unnecessary characters, etc
Input: Card number, integer or string
Output: Correctly formatted card number, string
"""
import re
card_num = str(card_num)
# Regex to remove any nondigit characters
return re.sub(r"\D", "", card_num)
def validate_card(card_num):
"""
Check if credit card is valid using the Luhn algorithm
Input: Card number, integer or string
Output: Valid?, boolean
"""
double = 0
total = 0
digits = str(card_num)
for i in range(len(digits) - 1, -1, -1):
for c in str((double + 1) * int(digits[i])):
total += int(c)
double = (double + 1) % 2
return (total % 10) == 0
|
45026
|
import typing as t
__all__ = ["Cell", "ListObject", ]
class Cell(t.NamedTuple):
"""Field data representation for html template"""
value: t.Any
url: t.Tuple[str, t.Dict[str, t.Union[str, int]]]
is_safe: bool = False
class ListObject(t.NamedTuple):
rows: t.List[t.List[Cell]]
has_next: bool
has_prev: bool
count: t.Optional[int]
active_page: t.Optional[int]
per_page: int
next_id: t.Optional[int]
|
45039
|
import FWCore.ParameterSet.Config as cms
BtagPerformanceESProducer_TTBARWPBTAGCSVL = cms.ESProducer("BtagPerformanceESProducer",
# this is what it makes available
ComponentName = cms.string('TTBARWPBTAGCSVL'),
# this is where it gets the payload from
PayloadName = cms.string('BTagTTBARWPBTAGCSVLtable_v8_offline'),
WorkingPointName = cms.string('BTagTTBARWPBTAGCSVLwp_v8_offline')
)
BtagPerformanceESProducer_TTBARWPBTAGCSVM = cms.ESProducer("BtagPerformanceESProducer",
# this is what it makes available
ComponentName = cms.string('TTBARWPBTAGCSVM'),
# this is where it gets the payload from
PayloadName = cms.string('BTagTTBARWPBTAGCSVMtable_v8_offline'),
WorkingPointName = cms.string('BTagTTBARWPBTAGCSVMwp_v8_offline')
)
BtagPerformanceESProducer_TTBARWPBTAGCSVT = cms.ESProducer("BtagPerformanceESProducer",
# this is what it makes available
ComponentName = cms.string('TTBARWPBTAGCSVT'),
# this is where it gets the payload from
PayloadName = cms.string('BTagTTBARWPBTAGCSVTtable_v8_offline'),
WorkingPointName = cms.string('BTagTTBARWPBTAGCSVTwp_v8_offline')
)
BtagPerformanceESProducer_TTBARWPBTAGJPL = cms.ESProducer("BtagPerformanceESProducer",
# this is what it makes available
ComponentName = cms.string('TTBARWPBTAGJPL'),
# this is where it gets the payload from
PayloadName = cms.string('BTagTTBARWPBTAGJPLtable_v8_offline'),
WorkingPointName = cms.string('BTagTTBARWPBTAGJPLwp_v8_offline')
)
BtagPerformanceESProducer_TTBARWPBTAGJPM = cms.ESProducer("BtagPerformanceESProducer",
# this is what it makes available
ComponentName = cms.string('TTBARWPBTAGJPM'),
# this is where it gets the payload from
PayloadName = cms.string('BTagTTBARWPBTAGJPMtable_v8_offline'),
WorkingPointName = cms.string('BTagTTBARWPBTAGJPMwp_v8_offline')
)
BtagPerformanceESProducer_TTBARWPBTAGJPT = cms.ESProducer("BtagPerformanceESProducer",
# this is what it makes available
ComponentName = cms.string('TTBARWPBTAGJPT'),
# this is where it gets the payload from
PayloadName = cms.string('BTagTTBARWPBTAGJPTtable_v8_offline'),
WorkingPointName = cms.string('BTagTTBARWPBTAGJPTwp_v8_offline')
)
BtagPerformanceESProducer_TTBARWPBTAGJBPL = cms.ESProducer("BtagPerformanceESProducer",
# this is what it makes available
ComponentName = cms.string('TTBARWPBTAGJBPL'),
# this is where it gets the payload from
PayloadName = cms.string('BTagTTBARWPBTAGJBPLtable_v8_offline'),
WorkingPointName = cms.string('BTagTTBARWPBTAGJBPLwp_v8_offline')
)
BtagPerformanceESProducer_TTBARWPBTAGJBPM = cms.ESProducer("BtagPerformanceESProducer",
# this is what it makes available
ComponentName = cms.string('TTBARWPBTAGJBPM'),
# this is where it gets the payload from
PayloadName = cms.string('BTagTTBARWPBTAGJBPMtable_v8_offline'),
WorkingPointName = cms.string('BTagTTBARWPBTAGJBPMwp_v8_offline')
)
BtagPerformanceESProducer_TTBARWPBTAGJBPT = cms.ESProducer("BtagPerformanceESProducer",
# this is what it makes available
ComponentName = cms.string('TTBARWPBTAGJBPT'),
# this is where it gets the payload from
PayloadName = cms.string('BTagTTBARWPBTAGJBPTtable_v8_offline'),
WorkingPointName = cms.string('BTagTTBARWPBTAGJBPTwp_v8_offline')
)
BtagPerformanceESProducer_TTBARWPBTAGJBPL = cms.ESProducer("BtagPerformanceESProducer",
# this is what it makes available
ComponentName = cms.string('TTBARWPBTAGJBPL'),
# this is where it gets the payload from
PayloadName = cms.string('BTagTTBARWPBTAGJBPLtable_v8_offline'),
WorkingPointName = cms.string('BTagTTBARWPBTAGJBPLwp_v8_offline')
)
BtagPerformanceESProducer_TTBARWPBTAGJBPM = cms.ESProducer("BtagPerformanceESProducer",
# this is what it makes available
ComponentName = cms.string('TTBARWPBTAGJBPM'),
# this is where it gets the payload from
PayloadName = cms.string('BTagTTBARWPBTAGJBPMtable_v8_offline'),
WorkingPointName = cms.string('BTagTTBARWPBTAGJBPMwp_v8_offline')
)
BtagPerformanceESProducer_TTBARWPBTAGJBPT = cms.ESProducer("BtagPerformanceESProducer",
# this is what it makes available
ComponentName = cms.string('TTBARWPBTAGJBPT'),
# this is where it gets the payload from
PayloadName = cms.string('BTagTTBARWPBTAGJBPTtable_v8_offline'),
WorkingPointName = cms.string('BTagTTBARWPBTAGJBPTwp_v8_offline')
)
BtagPerformanceESProducer_TTBARWPBTAGSSVHEM = cms.ESProducer("BtagPerformanceESProducer",
# this is what it makes available
ComponentName = cms.string('TTBARWPBTAGSSVHEM'),
# this is where it gets the payload from
PayloadName = cms.string('BTagTTBARWPBTAGSSVHEMtable_v8_offline'),
WorkingPointName = cms.string('BTagTTBARWPBTAGSSVHEMwp_v8_offline')
)
BtagPerformanceESProducer_TTBARWPBTAGSSVHET = cms.ESProducer("BtagPerformanceESProducer",
# this is what it makes available
ComponentName = cms.string('TTBARWPBTAGSSVHET'),
# this is where it gets the payload from
PayloadName = cms.string('BTagTTBARWPBTAGSSVHETtable_v8_offline'),
WorkingPointName = cms.string('BTagTTBARWPBTAGSSVHETwp_v8_offline')
)
BtagPerformanceESProducer_TTBARWPBTAGSSVHPT = cms.ESProducer("BtagPerformanceESProducer",
# this is what it makes available
ComponentName = cms.string('TTBARWPBTAGSSVHPT'),
# this is where it gets the payload from
PayloadName = cms.string('BTagTTBARWPBTAGSSVHPTtable_v8_offline'),
WorkingPointName = cms.string('BTagTTBARWPBTAGSSVHPTwp_v8_offline')
)
BtagPerformanceESProducer_TTBARWPBTAGTCHEL = cms.ESProducer("BtagPerformanceESProducer",
# this is what it makes available
ComponentName = cms.string('TTBARWPBTAGTCHEL'),
# this is where it gets the payload from
PayloadName = cms.string('BTagTTBARWPBTAGTCHELtable_v8_offline'),
WorkingPointName = cms.string('BTagTTBARWPBTAGTCHELwp_v8_offline')
)
BtagPerformanceESProducer_TTBARWPBTAGTCHEM = cms.ESProducer("BtagPerformanceESProducer",
# this is what it makes available
ComponentName = cms.string('TTBARWPBTAGTCHEM'),
# this is where it gets the payload from
PayloadName = cms.string('BTagTTBARWPBTAGTCHEMtable_v8_offline'),
WorkingPointName = cms.string('BTagTTBARWPBTAGTCHEMwp_v8_offline')
)
BtagPerformanceESProducer_TTBARWPBTAGTCHET = cms.ESProducer("BtagPerformanceESProducer",
# this is what it makes available
ComponentName = cms.string('TTBARWPBTAGTCHET'),
# this is where it gets the payload from
PayloadName = cms.string('BTagTTBARWPBTAGTCHETtable_v8_offline'),
WorkingPointName = cms.string('BTagTTBARWPBTAGTCHETwp_v8_offline')
)
BtagPerformanceESProducer_TTBARWPBTAGTCHPL = cms.ESProducer("BtagPerformanceESProducer",
# this is what it makes available
ComponentName = cms.string('TTBARWPBTAGTCHPL'),
# this is where it gets the payload from
PayloadName = cms.string('BTagTTBARWPBTAGTCHPLtable_v8_offline'),
WorkingPointName = cms.string('BTagTTBARWPBTAGTCHPLwp_v8_offline')
)
BtagPerformanceESProducer_TTBARWPBTAGTCHPM = cms.ESProducer("BtagPerformanceESProducer",
# this is what it makes available
ComponentName = cms.string('TTBARWPBTAGTCHPM'),
# this is where it gets the payload from
PayloadName = cms.string('BTagTTBARWPBTAGTCHPMtable_v8_offline'),
WorkingPointName = cms.string('BTagTTBARWPBTAGTCHPMwp_v8_offline')
)
BtagPerformanceESProducer_TTBARWPBTAGTCHPT = cms.ESProducer("BtagPerformanceESProducer",
# this is what it makes available
ComponentName = cms.string('TTBARWPBTAGTCHPT'),
# this is where it gets the payload from
PayloadName = cms.string('BTagTTBARWPBTAGTCHPTtable_v8_offline'),
WorkingPointName = cms.string('BTagTTBARWPBTAGTCHPTwp_v8_offline')
)
|
45057
|
from datetime import datetime, timezone
from ..exceptions import *
class Orders(object):
def __init__(self, session, trading_types):
super(Orders, self).__init__()
self._session = session
self._trading_types = trading_types
def generateOrderObject(self, legacy_contract_id, issuer, quantity, order_type, trading_type, instrument_type, price=None):
"""
**Generate Order Object**
- legacy_contract_id (string): (required)
- issuer (string): (required)
- quantity (int): (required)
- order_type (enum): (required)
- trading_type (enum): (required)
- instrument_type (required)
- price (float): (optional)
"""
if trading_type == self._trading_types.Limited and not price:
raise OrderFormatError("If trading type is Limited, the price argument is required")
order_object = {
"algoTradingTypeId": trading_type.value,
"capitalOrderTypeId": order_type.value,
"instrumentType": instrument_type.value,
"issueId": issuer,
"quantity": quantity,
"hash": self.__generateHash(legacy_contract_id, issuer, quantity, instrument_type)
}
if price:
order_object["price"] = price
return order_object
def __generateHash(self, legacy_contract_id, issuer, quantity, instrument_type):
Now = datetime.now(timezone.utc)
Now = Now.replace(tzinfo=None)
Millis = int((Now - datetime(year=1970, day=1, month=1)).total_seconds() * 1000)
TickerName = issuer.replace(" ", "")
return f"{str(Millis)}{legacy_contract_id}{TickerName}{str(quantity)}{str(instrument_type.value)}"
def submitOrder(self, legacy_contract_id, duration, order):
"""
**Submit one order**
https://homebroker-api.gbm.com/GBMP/api/Operation/RegisterCapitalOrder
- legacy_contract_id (string): (required)
- duration (int): (required)
- order (object): (required)
"""
metadata = {
'tags': ['order', 'generate order'],
'operation': 'submitOrder'
}
resource = "https://homebroker-api.gbm.com/GBMP/api/Operation/RegisterCapitalOrder"
payload = {
"contractId": legacy_contract_id,
"duration": duration,
"algoTradingTypeId": order.get("algoTradingTypeId"),
"orders": [order]
}
return self._session.post(metadata, resource, payload)
def getOrders(self, legacy_contract_id):
"""
**Get submitted Orders**
https://homebroker-api.gbm.com/GBMP/api/Operation/GetBlotterOrders
- legacy_contract_id (string): (required)
"""
metadata = {
'tags': ['Get Orders'],
'operation': 'getOrders'
}
resource = "https://homebroker-api.gbm.com/GBMP/api/Operation/GetBlotterOrders"
payload = {
"contractId": legacy_contract_id,
"accountId": legacy_contract_id,
"instrumentTypes": [0, 2],
"processDate": datetime.utcnow().strftime('%Y-%m-%dT06:00:00.000Z')
}
return self._session.post(metadata, resource, payload)
|
45073
|
import hou
import husdoutputprocessors.base as base
import os
class StagingDirOutputProcessor(base.OutputProcessorBase):
"""Output all USD Rop file nodes into the Staging Directory
Ignore any folders and paths set in the Configured Layers
and USD Rop node, just take the filename and save into a
single directory.
"""
theParameters = None
parameter_prefix = "stagingdiroutputprocessor_"
stagingdir_parm_name = parameter_prefix + "stagingDir"
def __init__(self):
self.staging_dir = None
def displayName(self):
return 'StagingDir Output Processor'
def parameters(self):
if not self.theParameters:
parameters = hou.ParmTemplateGroup()
rootdirparm = hou.StringParmTemplate(
self.stagingdir_parm_name,
'Staging Directory', 1,
string_type=hou.stringParmType.FileReference,
file_type=hou.fileType.Directory
)
parameters.append(rootdirparm)
self.theParameters = parameters.asDialogScript()
return self.theParameters
def beginSave(self, config_node, t):
# Use the Root Directory parameter if it is set.
root_dir_parm = config_node.parm(self.stagingdir_parm_name)
if root_dir_parm:
self.staging_dir = root_dir_parm.evalAtTime(t)
if not self.staging_dir:
out_file_parm = config_node.parm('lopoutput')
if out_file_parm:
self.staging_dir = out_file_parm.evalAtTime(t)
if self.staging_dir:
(self.staging_dir, filename) = os.path.split(self.staging_dir)
def endSave(self):
self.staging_dir = None
def processAsset(self, asset_path,
asset_path_for_save,
referencing_layer_path,
asset_is_layer,
for_save):
"""
Args:
asset_path (str): The incoming file path you want to alter or not.
asset_path_for_save (bool): Whether the current path is a
referenced path in the USD file. When True, return the path
you want inside USD file.
referencing_layer_path (str): ???
asset_is_layer (bool): Whether this asset is a USD layer file.
If this is False, the asset is something else (for example,
a texture or volume file).
for_save (bool): Whether the asset path is for a file to be saved
out. If so, then return actual written filepath.
Returns:
The refactored asset path.
"""
# Treat save paths as being relative to the output path.
if for_save and self.staging_dir:
# Whenever we're processing a Save Path make sure to
# resolve it to the Staging Directory
filename = os.path.basename(asset_path)
return os.path.join(self.staging_dir, filename)
return asset_path
output_processor = StagingDirOutputProcessor()
def usdOutputProcessor():
return output_processor
|
45074
|
def useless_print(content):
'''Print the argument recieved'''
print(content)
if __name__ == "__main__":
import sys
useless_print(sys.argv[1])
|
45090
|
from flask import Blueprint
order_api_blueprint = Blueprint('order_api', __name__)
from . import routes
|
45112
|
def version():
"""Function takes no aruguments and returns a STR value of the current version of the library. This value should match
the value in the setup.py
:param None
:return str value of the current version of the library
:rtype str
>>> version()
1.0.33
"""
print ('1.0.34')
|
45120
|
import boto3
import os
import uuid
from urllib.parse import unquote_plus
from PIL import Image
s3_client = boto3.client('s3')
def resize_image(picture_file_path, crop_dimensions=None):
# get the profile pics store ready
image = Image.open(picture_file_path)
if crop_dimensions:
image = image.crop(crop_dimensions)
widthGet = os.environ.get('RESIZE_WIDTH')
heightGet = os.environ.get('RESIZE_HEIGHT')
width = int(widthGet)
height = int(heightGet)
image = image.resize((width, height))
# save and convert to jpg here
cropped_filename = os.path.join(os.path.dirname(picture_file_path), "{}_cropped.jpg".format(picture_file_path))
thumbnail_filename = os.path.join(os.path.dirname(picture_file_path), "{}_thumbnail.jpg".format(picture_file_path))
image.save(cropped_filename)
thumbnailWidthGet = os.environ.get('THUMBNAIL_WIDTH')
thumbnailHeightGet = os.environ.get('THUMBNAIL_HEIGHT')
thumbnailWidth = int(thumbnailWidthGet)
thumbnailHeight = int(thumbnailHeightGet)
image = image.resize((thumbnailWidth, thumbnailHeight))
image.save(thumbnail_filename)
return (cropped_filename, thumbnail_filename)
def handler(event, context):
amplify_storage_bucket_name = os.environ.get('STORAGE_PLATELETSTORAGE_BUCKETNAME')
print(os.environ)
for record in event['Records']:
bucket = record['s3']['bucket']['name']
key = unquote_plus(record['s3']['object']['key'])
tmpkey = key.replace('/', '')
download_path = '/tmp/{}{}'.format(uuid.uuid4(), tmpkey)
print('Downloading {} from bucket {} to {}'.format(key, bucket, download_path))
s3_client.download_file(bucket, key, download_path)
(newImage, thumbnail) = resize_image(download_path)
base_key = key.split('.')[0]
s3_client.upload_file(newImage, amplify_storage_bucket_name, key)
s3_client.upload_file(thumbnail, amplify_storage_bucket_name, "{}_thumbnail.jpg".format(base_key))
s3_client.delete_object(Bucket=bucket, Key=key)
|
45126
|
from git import Repo
import subprocess
import os, shutil
# I use this later to lazily generate an error with a message
class CustomError(Exception):
pass
repo_path = "../../"
r = Repo(repo_path)
repo_heads = r.heads # or it's alias: r.branches
repo_heads_names = [h.name for h in repo_heads]
#kokkos_src = '/Users/bird/kokkos/'
#kokkos_install = '/Users/bird/kokkos/build/install'
#cabana_install = '/Users/bird/Cabana/build/build/install' # not a typo, it's in a dumb path
#platforms = ["Serial", "CPU", "GPU", "UVM"]
platforms = ["Serial", "CPU", "GPU"]
#platforms = ["CPU", "GPU"]
#platforms = ["GPU"]
#platforms = ["CPU"]
CXX = "g++"
#arch = 'Volta70'
arch = 'Kepler35'
subprocess.check_call(['./timing_lib.sh'])
this_build_dir = 'build'
kokkos_dirs = {}
cabana_dirs = {}
home_dir = os.environ['HOME']
# Build Dependencies
# TODO: make this configurable
kokkos_root = os.path.join(home_dir,'kokkos')
cabana_root = os.path.join(home_dir,'Cabana')
# Check we can find Kokkos and Cabana
if not os.path.isdir(kokkos_root):
raise CustomError("Can't find kokkos")
if not os.path.isdir(cabana_root):
raise CustomError("Can't find Cabana")
# Copy Kokkos and Cabana to be inside this dir
def copy_and_overwrite(from_path, to_path):
if os.path.exists(to_path):
shutil.rmtree(to_path)
shutil.copytree(from_path, to_path)
def copy_if_safe(from_path, to_path):
if not os.path.isdir(to_path):
shutil.copytree(from_path, to_path)
# only copy if they don't exist already
kokkos_new = os.path.join(this_build_dir,'kokkos')
copy_if_safe(kokkos_root, kokkos_new)
cabana_new = os.path.join(this_build_dir,'cabana')
copy_if_safe(cabana_root, cabana_new)
# Build Dependencies
for plat in platforms:
install_dir = "build-" + plat
# Do Build
print("build_kokkos.sh " + CXX + " " + kokkos_new + " " + install_dir + " " + plat + " " + arch)
subprocess.check_call(['./build_kokkos.sh', CXX, kokkos_new, install_dir, plat, arch])
print("./build_cabana.sh " + " " + CXX + " " + os.path.join(kokkos_new,install_dir,'install') + " " + cabana_new + " " + install_dir + " " + plat)
subprocess.check_call(['./build_cabana.sh', CXX, os.path.join(kokkos_new,install_dir,'install'), cabana_new, install_dir, plat])
# Save dirs, relative to root
cabana_dirs[plat] = install_dir
kokkos_dirs[plat] = install_dir
# Iterate over *local* git branches
for branch in repo_heads_names:
print("Working on branch " + branch)
for plat in platforms:
print(plat)
# TODO: throughout these scripts we assume ./instal is the install dir! abstract it.
cabana_install = os.path.join( cabana_dirs[plat], 'install')
kokkos_install = os.path.join( kokkos_dirs[plat], 'install')
# For each repo, check it out into a new folder and build it
#clone_path = './' + branch
clone_path = os.path.join('./', this_build_dir, branch)
print("!!!! WORKING ON " + clone_path)
# look to see if the folder already exists:
if not os.path.isdir(clone_path):
# if it does... delete it (!)
#print("Deleting " + clone_path)
# We need to delete where it will build only one platforms worth,
# or hoist the clone
#shutil.rmtree(clone_path + build??)
# OR if it does... skip
#continue
# clone it
cloned = Repo.clone_from(
repo_path,
clone_path,
branch=branch
)
pwd = os.getcwd()
kokkos_full_path = os.path.join(pwd, kokkos_new, kokkos_install)
cabana_full_path = os.path.join(pwd, cabana_new, cabana_install)
print("kk full path " + kokkos_full_path)
print("./build_and_run.sh " + clone_path + " g++ " + kokkos_full_path + " " + cabana_full_path + " " + plat)
subprocess.check_call(['./build_and_run.sh', clone_path, "g++", kokkos_full_path, cabana_full_path, plat])
|
45133
|
import json
from django.core.management.base import BaseCommand, CommandError
from users.models import User
class Command(BaseCommand):
help = "Exports a user information as a set of environment variables"
def add_arguments(self, parser):
parser.add_argument("user_id", type=int)
def handle(self, *args, **options):
user_id = options["user_id"]
user = User.objects.get(id=user_id).bot_user()
if not user:
raise CommandError('User "%s" does not exist' % user_id)
print(
f"""
# user ID {user.id} for user {user.name}
USER_BINANCE_API_KEY="{user.binance_api_key}"
USER_BINANCE_SECRET_KEY="{user.binance_secret_key}"
USER_EXTERNAL_PORTFOLIO='{json.dumps(user.external_portfolio)}'
USER_PREFERENCES='{json.dumps(user.preferences)}'
"""
)
|
45171
|
import torch
import torchvision
from torchvision import transforms
def load_mnist_dataset(train_batch_size, test_batch_size=1):
train_set = torchvision.datasets.MNIST(".", train=True, transform=transforms.Compose([transforms.ToTensor()]), download=True)
test_set = torchvision.datasets.MNIST(".", train=False, transform=transforms.Compose([transforms.ToTensor()]), download=True)
train_loader = torch.utils.data.DataLoader(train_set, batch_size=train_batch_size, shuffle=True)
test_loader = torch.utils.data.DataLoader(test_set, batch_size=test_batch_size, shuffle=False)
return train_loader, test_loader
|
45176
|
from __future__ import absolute_import, print_function, unicode_literals
import os
import shutil
import stat
import sys
import tempfile
from io import StringIO, open
from subprocess import list2cmdline
from textwrap import dedent
import ksconf.ext.six as six
from ksconf.__main__ import cli
from ksconf.conf.parser import (GLOBAL_STANZA, PARSECONF_MID, parse_conf,
parse_conf_stream, write_conf)
from ksconf.util.file import file_hash
from ksconf.vc.git import git_cmd
# Some unittest fixup for various python versions
import tests.compat as _ # noqa
del _
# What to export
__all__ = [
"static_data",
"ksconf_cli",
"TestWorkDir",
"FakeStdin",
"GLOBAL_STANZA",
"parse_conf",
"parse_string",
"write_conf",
"_debug_file",
]
def _debug_file(flag, fn): # pragma: no cover
""" Dump file contents with a message string to the output. For quick'n'dirty unittest
debugging only """
with open(fn) as fp:
content = fp.read()
length = len(content)
hash = file_hash(fn)
print("\n{flag} {fn} len={length} hash={hash} \n{content}".format(**vars()))
del flag, hash, length
def static_data(path):
""" Get paths to files under the 'tests/data/*' location """
# Assume "/" for path separation for simplicity; but ultimately OS independent
parts = path.split("/")
return os.path.abspath(os.path.join(os.path.dirname(__file__), "data", *parts))
def parse_string(text, profile=None, **kwargs):
text = dedent(text)
f = StringIO(text)
if profile:
return parse_conf(f, profile)
else:
return parse_conf_stream(f, **kwargs)
'''
# Let's try to avoid launching external processes (makes coverage more difficult, and so on)
def ksconf_exec(args):
args = list(args)
args.insert(0, "ksconf.py")
from subprocess import call
args = list(args)
if True: # Coverage enabled
args = ["coverage", "run", "-a" ] + args
rc = call(args)
return KsconfOutput(rc, ...)
'''
class _KsconfCli():
"""
CLI Wrapper context management class for unit testing;
USAGE: Use the ksconf_cli() singleton in a context (with)
Unfortunately, we have to redirect stdout/stderr while this runs, not
very clean, but we try to make it as safe as possible.
tmpfile: os.tmpfile, or StringIO?
"""
class KsconfOutput(object):
""" Container for the results from a KsconfCli call."""
__slots__ = ("returncode", "stdout", "stderr")
def __init__(self, *args):
self.returncode, self.stdout, self.stderr = args
def get_conf(self, profile=None, **kwargs):
""" Parse stdout as a .conf file"""
f = StringIO(self.stdout)
if profile:
return parse_conf(f, profile)
else:
return parse_conf_stream(f, **kwargs)
@staticmethod
def _as_string(stream):
stream.seek(0)
return stream.read()
def __call__(self, *args):
# In later versions of Python (3.4), something like this could be considered:
# from contextlib import redirect_stdout
self._last_args = args
_stdout, _stderr = (sys.stdout, sys.stderr)
try:
# Capture all output written to stdout/stderr
temp_stdout = sys.stdout = StringIO()
temp_stderr = sys.stderr = StringIO()
try:
rc = cli(args, _unittest=True)
except SystemExit as e: # pragma: no cover
if hasattr(e, "code"): # PY3
rc = e.code
else:
rc = e.message
finally:
# This next step MUST be done!
(sys.stdout, sys.stderr) = _stdout, _stderr
stdout = self._as_string(temp_stdout)
stderr = self._as_string(temp_stderr)
output = self.KsconfOutput(rc, stdout, stderr)
self._last_output = output
return output
def __enter__(self):
self._last_args = None
self._last_output = None
return self
def __exit__(self, exc_type, exc_val, exc_tb):
# Don't worry with coverage here. It gets plenty of testing DURING unittest development ;-)
if exc_type is not None: # pragma: no cover
sys.stderr.write("Exception while running: ksconf {0}\n".
format(list2cmdline(self._last_args)))
ko = self._last_output
if ko:
if ko.stdout:
sys.stderr.write("STDOUT:\n{0}\n".format(ko.stdout))
if ko.stderr:
sys.stderr.write("STDERR:\n{0}\n".format(ko.stderr))
# Re-raise exception
return False
ksconf_cli = _KsconfCli()
class FakeStdin(object):
def __init__(self, content):
if isinstance(content, six.string_types):
content = StringIO(content)
self.stream = content
def __enter__(self):
self._real_stdin = sys.stdin
sys.stdin = self.stream
return self
def __exit__(self, exc_type, exc_val, exc_tb):
# Don't worry with coverage here. It gets plenty of testing DURING unittest development ;-)
sys.stdin = self._real_stdin
if exc_type is not None: # pragma: no cover
# Re-raise exception
return False
class TestWorkDir(object):
""" Create a temporary working directory to create app-like structures and other supporting
file system artifacts necessary for many CLI tests. Cleanup is done automatically.
Can also be used as context manager (``with``) to temporarily change the directory and restore
the working directory upon completion.
"""
encoding = "utf-8"
def __init__(self, git_repo=False):
if git_repo:
self._path = tempfile.mkdtemp("-ksconftest-git")
self.git("init")
else:
self._path = tempfile.mkdtemp("-ksconftest")
self.git_repo = git_repo
self._working_dir = None
def __del__(self):
self.clean()
def __enter__(self):
self._working_dir = os.getcwd()
os.chdir(self._path)
def __exit__(self, exc_type, exc_val, exc_tb):
os.chdir(self._working_dir)
self._working_dir = None
def clean(self, force=False):
""" Explicitly clean/wipe the working directory. """
if not hasattr(self, "_path"):
return
if "KSCONF_KEEP_TEST_FILES" in os.environ and not force: # pragma: no cover
return
# Remove read-only file handler (e.g. clean .git/objects/xx/* files on Windows)
def del_rw(action, name, exc): # pragma: no cover (infrequently used)
# https://stackoverflow.com/a/21263493/315892
# Not checking for file vs dir, ...
os.chmod(name, stat.S_IWRITE)
os.remove(name)
del action, exc
shutil.rmtree(self._path, onerror=del_rw)
# Prevent the class from being used further
del self._path
def git(self, *args):
o = git_cmd(args, cwd=self._path)
if o.returncode != 0: # pragma: no cover
# Because, if we're using ksconf_cli, then we may be redirecting these...
stderr = sys.__stderr__
stderr.write("Git command 'git {0}' failed with exit code {1}\n{2}\n"
.format(" ".join(args), o.returncode, o.stderr))
raise RuntimeError("Failed git command (return code {0})".format(o.returncode))
def get_path(self, rel_path):
# Always using unix/URL style paths internally. But we want this to be OS agnostic
rel_parts = rel_path.split("/")
return os.path.join(self._path, *rel_parts)
def makedir(self, rel_path, path=None):
if path is None:
path = self.get_path(rel_path)
if not os.path.isdir(path):
os.makedirs(path)
return path
def write_file(self, rel_path, content):
path = self.get_path(rel_path)
self.makedir(None, path=os.path.dirname(path))
kw = {}
if isinstance(content, bytes):
kw["mode"] = "wb"
else:
kw["mode"] = "w"
kw["encoding"] = self.encoding
content = dedent(content)
with open(path, **kw) as stream:
stream.write(content)
return path
def read_file(self, rel_path, as_bytes=False):
path = self.get_path(rel_path)
kw = {}
if as_bytes:
kw["mode"] = "rb"
else:
kw["mode"] = "r"
kw["encoding"] = self.encoding
with open(path, **kw) as stream:
content = stream.read()
return content
def remove_file(self, rel_path):
path = self.get_path(rel_path)
os.unlink(path)
def write_conf(self, rel_path, conf):
path = self.get_path(rel_path)
self.makedir(None, path=os.path.dirname(path))
write_conf(path, conf)
return path
def read_conf(self, rel_path, profile=PARSECONF_MID):
path = self.get_path(rel_path)
return parse_conf(path, profile=profile)
def copy_static(self, static, rel_path):
src = static_data(static)
with open(src, "rb") as stream:
content = stream.read()
return self.write_file(rel_path, content)
|
45184
|
from django.db import models
# from django.contrib.auth.models import User
from apps.users.models import CustomUser
# 引入Enum类型
from enum import Enum
from enumfields import EnumIntegerField
class BillType(Enum):
OUTGO = 0 # 账目类型.支出
INCOME = 1 # 账目类型.收入
class Categorys(models.Model):
"""
账目明细分类表
"""
is_default = models.BooleanField('是否默认分类', default=False) # True:默认存在分类 False:用户自定义分类
user = models.ForeignKey(CustomUser, verbose_name='自定义分类所属用户', blank=True, null=True, on_delete=models.CASCADE)
bill_type = EnumIntegerField(BillType, verbose_name='账目类型', default=BillType.OUTGO)
name = models.CharField('分类名称', max_length=20, unique=True)
parent = models.ForeignKey('self', verbose_name='父级分类', blank=True, null=True, on_delete=models.CASCADE)
modify_time = models.DateTimeField('修改时间', auto_now=True)
create_time = models.DateTimeField('创建时间', auto_now_add=True)
class Meta:
db_table = "categorys"
ordering = ['-modify_time']
|
45189
|
from common import *
os.chdir(EXTERNAL_DIR)
PKGS = env('PKGS', 'all')
ninja_template = read_file(f'{SCRIPT_DIR}/install_deps_{OS_LOWER_CASE}.ninja.in')
write_file(f'install_deps_{OS_LOWER_CASE}.ninja', ninja_template.format(**ENV_PARAMS))
for key, val in ENV_PARAMS.items():
print(f'{key} = {val}')
shell(f'ninja -v -j 1 -f install_deps_{OS_LOWER_CASE}.ninja {PKGS}')
|
45208
|
from RecoEgamma.EgammaElectronProducers.gsfElectrons_cfi import ecalDrivenGsfElectrons
lowPtGsfElectronsPreRegression = ecalDrivenGsfElectrons.clone(gsfElectronCoresTag = "lowPtGsfElectronCores")
from Configuration.Eras.Modifier_fastSim_cff import fastSim
fastSim.toModify(lowPtGsfElectronsPreRegression,ctfTracksTag = "generalTracksBeforeMixing")
|
45275
|
explanations = {
'gamma': '''
Proportion of tree modifications that should use mutrel-informed choice for
node to move, rather than uniform choice
''',
'zeta': '''
Proportion of tree modifications that should use mutrel-informed choice for
destination to move node to, rather than uniform choice
''',
'iota': '''
Probability of initializing with mutrel-informed tree rather than fully
branching tree when beginning chain
'''
}
defaults = {
'gamma': 0.7,
'zeta': 0.7,
'iota': 0.7,
}
assert set(explanations.keys()) == set(defaults.keys())
|
45292
|
import pytest
import numpy as np
from csgo.analytics.distance import (
bombsite_distance,
point_distance,
polygon_area,
area_distance,
)
from csgo.analytics.coords import Encoder
class TestCSGOAnalytics:
"""Class to test CSGO analytics"""
def test_bombsite_distance(self):
"""Test bombsite distance function."""
assert bombsite_distance([0, 0, 0]) == 35
assert bombsite_distance([0, 0, 0], bombsite="B") == 38
assert bombsite_distance([0, 0, 0], bombsite="A", map="de_inferno") == 30
def test_point_distance(self):
"""Test point distance function"""
assert point_distance([0, 0], [1, 1], type="euclidean") == 1.4142135623730951
assert point_distance([0, 0], [1, 1], type="manhattan") == 2
assert point_distance([0, 0], [1, 1], type="canberra") == 2.0
assert point_distance([-1, 5], [2, 1], type="cosine") == 0.7368825942078912
assert point_distance([0, 0, 0], [100, 100, 100]) == 4
assert point_distance([0, 0, 0], [100, 100, 100], map="de_vertigo") == 1
def test_polygon_area(self):
"""Test polygon area function"""
assert polygon_area([0, 1, 2], [0, 1, 0]) == 1.0
def test_bombsite_invalid_map(self):
"""
Test bombsite function with an invalid map.
"""
with pytest.raises(ValueError):
bombsite_distance([0, 0, 0], map="dust2")
def test_point_invalid_map(self):
"""
Test point distance function with an invalid map.
"""
with pytest.raises(ValueError):
point_distance([0, 0, 0], [1, 1, 1], map="dust2")
def test_area_invalid_map(self):
"""
Test area distance function with an invalid map.
"""
with pytest.raises(ValueError):
area_distance(26, 42, map="dust2")
def test_area_dist(self):
"""
Tests that area distance returns correct value.
"""
assert area_distance(26, 42, map="de_mirage") == 26
def test_place_encode(self):
"""
Tests that place encoding works for correct values
"""
e = Encoder()
assert np.sum(e.encode("place", "TSpawn")) == 1
assert np.sum(e.encode("place", "TSpawnnn")) == 0
assert np.sum(e.encode("map", "de_dust2")) == 1
assert np.sum(e.encode("map", "de_dust0")) == 0
|
45316
|
import logging
logger = logging.getLogger(__name__)
# Pump rate in mL/s (4.3 L/min)
_PUMP_RATE_ML_PER_SEC = 4300.0 / 60.0
# Default amount of water to add to the plant (in mL) when pump manager detects
# low soil moisture.
DEFAULT_PUMP_AMOUNT = 200
class Pump(object):
"""Wrapper for a Seaflo 12V water pump."""
def __init__(self, pi_io, clock, pump_pin):
"""Creates a new Pump wrapper.
Args:
pi_io: Raspberry Pi I/O interface.
clock: A clock interface.
pump_pin: Raspberry Pi pin to which the pump is connected.
"""
self._pi_io = pi_io
self._clock = clock
self._pump_pin = pump_pin
def pump_water(self, amount_ml):
"""Pumps the specified amount of water.
Args:
amount_ml: Amount of water to pump (in mL).
Raises:
ValueError: The amount of water to be pumped is invalid.
"""
if amount_ml == 0.0:
return
elif amount_ml < 0.0:
raise ValueError('Cannot pump a negative amount of water')
else:
logger.info('turning pump on (with GPIO pin %d)', self._pump_pin)
self._pi_io.turn_pin_on(self._pump_pin)
wait_time_seconds = amount_ml / _PUMP_RATE_ML_PER_SEC
self._clock.wait(wait_time_seconds)
logger.info('turning pump off (with GPIO pin %d)', self._pump_pin)
self._pi_io.turn_pin_off(self._pump_pin)
logger.info('pumped %.f mL of water', amount_ml)
return
class PumpManager(object):
"""Pump Manager manages the water pump."""
def __init__(self, pump, pump_scheduler, moisture_threshold, pump_amount,
timer):
"""Creates a PumpManager object, which manages a water pump.
Args:
pump: A pump instance, which supports water pumping.
pump_scheduler: A pump scheduler instance that controls the time
periods in which the pump can be run.
moisture_threshold: Soil moisture threshold. If soil moisture is
below this value, manager pumps water on pump_if_needed calls.
pump_amount: Amount (in mL) to pump every time the water pump runs.
timer: A timer that counts down until the next forced pump. When
this timer expires, the pump manager runs the pump once,
regardless of the moisture level.
"""
self._pump = pump
self._pump_scheduler = pump_scheduler
self._moisture_threshold = moisture_threshold
self._pump_amount = pump_amount
self._timer = timer
def pump_if_needed(self, moisture):
"""Run the water pump if there is a need to run it.
Args:
moisture: Soil moisture level
Returns:
The amount of water pumped, in mL.
"""
if self._should_pump(moisture):
self._pump.pump_water(self._pump_amount)
self._timer.reset()
return self._pump_amount
return 0
def _should_pump(self, moisture):
"""Returns True if the pump should be run."""
if not self._pump_scheduler.is_running_pump_allowed():
return False
return (moisture < self._moisture_threshold) or self._timer.expired()
class PumpScheduler(object):
"""Controls when the pump is allowed to run."""
def __init__(self, local_clock, sleep_windows):
"""Creates new PumpScheduler instance.
Args:
local_clock: A local clock interface
sleep_windows: A list of 2-tuples, each representing a sleep window.
Tuple items are datetime.time objects.
"""
self._local_clock = local_clock
self._sleep_windows = sleep_windows
def is_running_pump_allowed(self):
"""Returns True if OK to run pump, otherwise False.
Pump is not allowed to run from the start of a sleep window (inclusive)
to the end of a sleep window (exclusive).
"""
current_time = self._local_clock.now().time()
for sleep_time, wake_time in self._sleep_windows:
# Check if sleep window wraps midnight.
if wake_time < sleep_time:
if current_time >= sleep_time or current_time < wake_time:
return False
else:
if sleep_time <= current_time < wake_time:
return False
return True
|
45327
|
import datetime
import app.helpers.helpers
from app.controllers.api import record as record_api
from app.helpers import helpers
class TestRecord:
def get_record(self, records, type_):
for record in records:
if record["type"] == type_:
return record
def test_list_no_Record(self, client):
"""Test if db contains no record."""
headers = {"X-Api-Key": "123"}
res = client.get("/api/domain/list", headers=headers)
json_data = res.get_json()
assert json_data["code"] == 404
def test_add_record(self, client, mocker):
"""Test adding record from its endpoint.
- Create a User
- Create a domain (with default SOA,NS,CNAME created)
- Add a record
- Query the db to assure it's created
"""
mocker.patch("app.helpers.producer.kafka_producer")
mocker.patch("app.helpers.producer.send")
headers = {"X-Api-Key": "123"}
# create user
data = {"email": "<EMAIL>"}
post_res = client.post("/api/user/add", data=data, headers=headers)
json_data = post_res.get_json()
user_id = json_data["data"]["id"]
# add domain
data = {"zone": "company.com", "user_id": user_id}
res = client.post("/api/domain/add", data=data, headers=headers)
create_domain_data = res.get_json()
# add record
data = {
"zone": "company.com",
"owner": "host",
"rtype": "A",
"rdata": "1.1.1.1",
"ttl": 7200,
}
res = client.post("/api/record/add", data=data, headers=headers)
add_record_data = res.get_json()
# list record
res = client.get("/api/domain/list", headers=headers)
list_record_data = res.get_json()
assert create_domain_data["code"] == 201
assert create_domain_data["data"]["zone"] == "company.com"
assert add_record_data["code"] == 201
assert add_record_data["data"]["owner"] == "host"
assert add_record_data["data"]["rdata"] == "1.1.1.1"
assert list_record_data["code"] == 200
assert list_record_data["data"][0]["zone"] == "company.com"
assert list_record_data["data"][0]["user"]["email"] == "<EMAIL>"
def test_edit_record(self, client, mocker):
"""Test editing record from its endpoint.
- Create a User
- Create a domain (with default SOA,NS,CNAME created)
- Add a record
- Edit a record
- Query the db to assure it's edited
"""
mocker.patch("app.helpers.producer.kafka_producer")
mocker.patch("app.helpers.producer.send")
headers = {"X-Api-Key": "123"}
# create user
data = {"email": "<EMAIL>"}
post_res = client.post("/api/user/add", data=data, headers=headers)
json_data = post_res.get_json()
user_id = json_data["data"]["id"]
# add domain
data = {"zone": "company.com", "user_id": user_id}
client.post("/api/domain/add", data=data, headers=headers)
# list record
res = client.get("/api/domain/list", headers=headers)
list_record_data = res.get_json()
# edit record
records = list_record_data["data"][0]["records"]
cname_record = self.get_record(records, "CNAME")
cname_record_id = cname_record["id"]
data = {
"zone": "company.com",
"owner": "www_edit",
"rtype": "CNAME",
"rdata": "company_edited.com",
"ttl": 3600,
}
res = client.put(
f"/api/record/edit/{cname_record_id}", data=data, headers=headers
)
edit_record_data = res.get_json()
# list record
res = client.get("/api/domain/list", headers=headers)
list_record_data = res.get_json()
records = list_record_data["data"][0]["records"]
edited_record_data = self.get_record(records, "CNAME")
assert edit_record_data["code"] == 200
assert edit_record_data["data"]["owner"] == "www_edit"
assert list_record_data["code"] == 200
assert edited_record_data["rdata"] == "company_edited.com"
def test_edit_record_no_ttl_change(self, client, mocker):
"""Test editing record from its endpoint.
- Create a User
- Create a domain (with default SOA,NS,CNAME created)
- Edit a record with the same TTL
"""
mocker.patch("app.helpers.producer.kafka_producer")
mocker.patch("app.helpers.producer.send")
headers = {"X-Api-Key": "123"}
# create user
data = {"email": "<EMAIL>"}
post_res = client.post("/api/user/add", data=data, headers=headers)
json_data = post_res.get_json()
user_id = json_data["data"]["id"]
# add domain
data = {"zone": "company.com", "user_id": user_id}
client.post("/api/domain/add", data=data, headers=headers)
# list record
res = client.get("/api/domain/list", headers=headers)
list_record_data = res.get_json()
# edit record
records = list_record_data["data"][0]["records"]
cname_record = self.get_record(records, "CNAME")
cname_record_id = cname_record["id"]
data = {
"zone": "company.com",
"owner": "www",
"rtype": "CNAME",
"rdata": "company.com.",
"ttl": "3600",
}
res = client.put(
f"/api/record/edit/{cname_record_id}", data=data, headers=headers
)
edit_record_data = res.get_json()
assert edit_record_data["code"] == 409
assert edit_record_data["message"] == "The record already exists"
def test_edit_record_with_ttl_change(self, client, mocker):
"""Test editing record from its endpoint.
- Create a User
- Create a domain (with default SOA,NS,CNAME created)
- Edit a record with the different TTL
- Query the db to assure it's edited
"""
mocker.patch("app.helpers.producer.kafka_producer")
mocker.patch("app.helpers.producer.send")
headers = {"X-Api-Key": "123"}
# create user
data = {"email": "<EMAIL>"}
post_res = client.post("/api/user/add", data=data, headers=headers)
json_data = post_res.get_json()
user_id = json_data["data"]["id"]
# add domain
data = {"zone": "company.com", "user_id": user_id}
client.post("/api/domain/add", data=data, headers=headers)
# list record
res = client.get("/api/domain/list", headers=headers)
list_record_data = res.get_json()
# edit record
records = list_record_data["data"][0]["records"]
cname_record = self.get_record(records, "CNAME")
cname_record_id = cname_record["id"]
data = {
"zone": "company.com",
"owner": "www",
"rtype": "CNAME",
"rdata": "company.com.",
"ttl": "300",
}
res = client.put(
f"/api/record/edit/{cname_record_id}", data=data, headers=headers
)
edit_record_data = res.get_json()
# list record
res = client.get("/api/domain/list", headers=headers)
list_record_data = res.get_json()
records = list_record_data["data"][0]["records"]
edited_record_data = self.get_record(records, "CNAME")
assert edit_record_data["code"] == 200
assert edit_record_data["data"]["ttl"] == "300"
assert list_record_data["code"] == 200
assert edited_record_data["ttl"] == "300"
def test_delete_record(self, client, mocker):
"""Test deleting record from its endpoint.
- Create a User
- Create a domain (with default SOA,NS,CNAME created)
- List the default records
- Delete one of the record
- Query the db to assure it's deleted
"""
mocker.patch("app.helpers.producer.kafka_producer")
mocker.patch("app.helpers.producer.send")
headers = {"X-Api-Key": "123"}
# create user
data = {"email": "<EMAIL>"}
post_res = client.post("/api/user/add", data=data, headers=headers)
json_data = post_res.get_json()
user_id = json_data["data"]["id"]
# add domain
data = {"zone": "company.com", "user_id": user_id}
client.post("/api/domain/add", data=data, headers=headers)
# list record
res = client.get("/api/domain/list", headers=headers)
list_record_data = res.get_json()
# edit record
records = list_record_data["data"][0]["records"]
cname_record = self.get_record(records, "CNAME")
cname_record_id = cname_record["id"]
delete_res = client.delete(
f"/api/record/delete/{cname_record_id}", headers=headers
)
# list record
res = client.get("/api/domain/list", headers=headers)
list_record_data = res.get_json()
records = list_record_data["data"][0]["records"]
assert delete_res.status_code == 204
# it must be 3 after deletion
assert len(records) == 3
def test_edit_record_no_ttl_change_MX(self, client, mocker):
"""Test editing record from its endpoint.
- Create a User
- Create a domain (with default SOA,NS,CNAME created)
- Add MX record
- Edit a record with the same TTL
"""
mocker.patch("app.helpers.producer.kafka_producer")
mocker.patch("app.helpers.producer.send")
headers = {"X-Api-Key": "123"}
# create user
data = {"email": "<EMAIL>"}
post_res = client.post("/api/user/add", data=data, headers=headers)
json_data = post_res.get_json()
user_id = json_data["data"]["id"]
# add domain
data = {"zone": "company.com", "user_id": user_id}
client.post("/api/domain/add", data=data, headers=headers)
# add record
data = {
"zone": "company.com",
"owner": "mx1",
"rtype": "MX",
"rdata": "10 mail.example.com.",
"ttl": 7200,
}
res = client.post("/api/record/add", data=data, headers=headers)
json_data = res.get_json()
record_id = json_data["data"]["id"]
# edit record
data = {
"zone": "company.com",
"owner": "mx1",
"rtype": "MX",
"rdata": "10 mail.example.com.",
"ttl": 7200,
}
res = client.put(f"/api/record/edit/{record_id}", data=data, headers=headers)
edit_record_data = res.get_json()
assert edit_record_data["code"] == 409
assert edit_record_data["message"] == "The record already exists"
def test_edit_record_with_ttl_change_MX(self, client, mocker):
"""Test editing record from its endpoint.
- Create a User
- Create a domain (with default SOA,NS,CNAME created)
- Add MX record
- Edit a record with the different TTL
- Query the db to assure it's edited
"""
mocker.patch("app.helpers.producer.kafka_producer")
mocker.patch("app.helpers.producer.send")
headers = {"X-Api-Key": "123"}
# create user
data = {"email": "<EMAIL>"}
post_res = client.post("/api/user/add", data=data, headers=headers)
json_data = post_res.get_json()
user_id = json_data["data"]["id"]
# add domain
data = {"zone": "company.com", "user_id": user_id}
client.post("/api/domain/add", data=data, headers=headers)
# add record
data = {
"zone": "company.com",
"owner": "mx1",
"rtype": "MX",
"rdata": "10 mail.example.com.",
"ttl": 7200,
}
res = client.post("/api/record/add", data=data, headers=headers)
json_data = res.get_json()
record_id = json_data["data"]["id"]
# edit record
data = {
"zone": "company.com",
"owner": "mx1",
"rtype": "MX",
"rdata": "10 mail.example.com.",
"ttl": 14400,
}
res = client.put(f"/api/record/edit/{record_id}", data=data, headers=headers)
edit_record_data = res.get_json()
# list record
res = client.get("/api/domain/list", headers=headers)
list_record_data = res.get_json()
records = list_record_data["data"][0]["records"]
edited_record_data = self.get_record(records, "MX")
assert edit_record_data["code"] == 200
assert edit_record_data["data"]["ttl"] == "14400"
assert list_record_data["code"] == 200
assert edited_record_data["ttl"] == "14400"
def test_edit_record_respect_zone_limit(self, client, monkeypatch, mocker):
"""Test edit record respecting zone limit of 99
- Create a User
- Create a domain (with default SOA, NS, CNAME created)
- Add TXT record
- Edit a record with the different TXT value until it reaches a limit
- Edit a record with tomorrows date
"""
mocker.patch("app.helpers.producer.kafka_producer")
mocker.patch("app.helpers.producer.send")
headers = {"X-Api-Key": "123"}
# create user
data = {"email": "<EMAIL>"}
post_res = client.post("/api/user/add", data=data, headers=headers)
json_data = post_res.get_json()
user_id = json_data["data"]["id"]
# add domain
data = {"zone": "company.com", "user_id": user_id}
client.post("/api/domain/add", data=data, headers=headers)
# add record
data = {
"zone": "company.com",
"owner": "txt1",
"rtype": "TXT",
"rdata": "0",
"ttl": 7200,
}
res = client.post("/api/record/add", data=data, headers=headers)
json_data = res.get_json()
record_id = json_data["data"]["id"]
increment_serial = 0
# 50 times for edit record is enough to make serial > 99
# record edit increment serial twice at time
while increment_serial < 50:
data = {
"zone": "company.com",
"owner": "txt1",
"rtype": "TXT",
"rdata": f"{increment_serial}",
"ttl": 7200,
}
res = client.put(
f"/api/record/edit/{record_id}", data=data, headers=headers
)
edit_record_data = res.get_json()
increment_serial += 1
assert edit_record_data["code"] == 429
assert edit_record_data["message"] == "Zone Change Limit Reached"
# ensure correct serial
serial_resource = record_api.get_serial_resource("company.com")
today_date = helpers.soa_time_set()
assert serial_resource["serial_counter"] == "98"
assert serial_resource["serial_date"] == today_date
assert serial_resource["serial"] == f"{today_date}98"
#
# if user waits until tomorrow
#
def fake_soa_time_set():
tomorrow_date = datetime.datetime.now() + datetime.timedelta(days=1)
return tomorrow_date.strftime("%Y%m%d")
monkeypatch.setattr(app.helpers.helpers, "soa_time_set", fake_soa_time_set)
data = {
"zone": "company.com",
"owner": "txt1",
"rtype": "TXT",
"rdata": "random text",
"ttl": 7200,
}
res = client.put(f"/api/record/edit/{record_id}", data=data, headers=headers)
edit_record_data = res.get_json()
assert edit_record_data["code"] == 200
# ensure correct serial
serial_resource = record_api.get_serial_resource("company.com")
today_date = helpers.soa_time_set()
assert serial_resource["serial_counter"] == "03"
assert serial_resource["serial_date"] == today_date
assert serial_resource["serial"] == f"{today_date}03"
|
45333
|
import subprocess
import os
def download_task_model(task):
m_path = os.path.join('/home/ubuntu/s3', "model_log_final", task,
"logs/model.permanent-ckpt")
dirs, fname = os.path.split(m_path)
dst_dir = dirs.replace('/home/ubuntu/s3', "s3://taskonomy-unpacked-oregon")
tmp_path = "/home/ubuntu/temp/{}".format(task)
subprocess.call('mkdir -p {}'.format(tmp_path), shell=True)
tmp_fname = os.path.join(tmp_path, fname)
aws_cp_command = "aws s3 cp {}.data-00000-of-00001 {}".format(os.path.join(dst_dir, fname), tmp_path)
subprocess.call(aws_cp_command, shell=True)
aws_cp_command = "aws s3 cp {}.meta {}".format(os.path.join(dst_dir, fname), tmp_path)
subprocess.call(aws_cp_command, shell=True)
aws_cp_command = "aws s3 cp {}.index {}".format(os.path.join(dst_dir, fname), tmp_path)
subprocess.call(aws_cp_command, shell=True)
list_of_tasks = 'autoencoder curvature denoise edge2d edge3d \
keypoint2d keypoint3d colorization jigsaw \
reshade rgb2depth rgb2mist rgb2sfnorm \
room_layout segment25d segment2d vanishing_point_well_defined \
segmentsemantic_rb class_1000 class_places impainting_whole'
list_of_tasks = 'impainting_whole'
list_of_tasks = list_of_tasks.split()
for t in list_of_tasks:
download_task_model(t)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.