max_stars_repo_path
stringlengths 4
286
| max_stars_repo_name
stringlengths 5
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.03M
| content_cleaned
stringlengths 6
1.03M
| language
stringclasses 111
values | language_score
float64 0.03
1
| comments
stringlengths 0
556k
| edu_score
float64 0.32
5.03
| edu_int_score
int64 0
5
|
---|---|---|---|---|---|---|---|---|---|---|
etools/apps/pcs/tests.py | Igelinmist/etools | 0 | 6632651 | from datetime import date, datetime, timedelta, time
from django.test import TestCase
from pcs.models.extern_data_models import Param, load_params
from pcs.utils import AddrCoder
class ParamTestCase(TestCase):
def setUp(self):
load_params()
def test_getting_params(self):
""" test params load from external db"""
params_cnt = Param.objects.count()
self.assertGreater(params_cnt, 0)
def test_getting_history_data(self):
""" test getting historical data """
param = Param.objects.get(pk=5324846)
hist = param.get_hist_data()
hist_data = hist['data']
hist_data_cnt = len(hist_data)
item = hist_data[0]
self.assertGreater(hist_data_cnt, 0)
self.assertNotEqual(item.dt, None)
def test_getting_slice_data(self):
""" test getting hours slices """
param = Param.objects.get(pk=5324846)
from_dttm = date.today() - timedelta(days=1)
from_dttm = datetime.combine(from_dttm, time())
to_dttm = from_dttm + timedelta(hours=12)
hist = param.get_hist_data(from_dttm, to_dttm)
hist_data_hr = hist['ctrl_h']
self.assertGreater(len(hist_data_hr), 0)
self.assertEqual(from_dttm + timedelta(hours=1) in hist_data_hr, True)
class AddrCoderTestCase(TestCase):
def test_encode_addr(self):
coder = AddrCoder
flat_addr = coder.encode_addr(1, 0x1141, 96)
self.assertEqual(flat_addr, 0x01114160)
def test_decode_addr(self):
coder = AddrCoder
syst, ctrl, chan = coder.decode_addr(0x01114160)
self.assertEqual((syst, ctrl, chan), (0x01, 0x1141, 0x60))
| from datetime import date, datetime, timedelta, time
from django.test import TestCase
from pcs.models.extern_data_models import Param, load_params
from pcs.utils import AddrCoder
class ParamTestCase(TestCase):
def setUp(self):
load_params()
def test_getting_params(self):
""" test params load from external db"""
params_cnt = Param.objects.count()
self.assertGreater(params_cnt, 0)
def test_getting_history_data(self):
""" test getting historical data """
param = Param.objects.get(pk=5324846)
hist = param.get_hist_data()
hist_data = hist['data']
hist_data_cnt = len(hist_data)
item = hist_data[0]
self.assertGreater(hist_data_cnt, 0)
self.assertNotEqual(item.dt, None)
def test_getting_slice_data(self):
""" test getting hours slices """
param = Param.objects.get(pk=5324846)
from_dttm = date.today() - timedelta(days=1)
from_dttm = datetime.combine(from_dttm, time())
to_dttm = from_dttm + timedelta(hours=12)
hist = param.get_hist_data(from_dttm, to_dttm)
hist_data_hr = hist['ctrl_h']
self.assertGreater(len(hist_data_hr), 0)
self.assertEqual(from_dttm + timedelta(hours=1) in hist_data_hr, True)
class AddrCoderTestCase(TestCase):
def test_encode_addr(self):
coder = AddrCoder
flat_addr = coder.encode_addr(1, 0x1141, 96)
self.assertEqual(flat_addr, 0x01114160)
def test_decode_addr(self):
coder = AddrCoder
syst, ctrl, chan = coder.decode_addr(0x01114160)
self.assertEqual((syst, ctrl, chan), (0x01, 0x1141, 0x60))
| en | 0.683639 | test params load from external db test getting historical data test getting hours slices | 2.441584 | 2 |
Code/DataManager.py | chaturanand/Voice-based-gender-recognition | 0 | 6632652 | <filename>Code/DataManager.py<gh_stars>0
import os
import sys
import math
import tarfile
class DataManager:
def __init__(self, dataset_path):
self.dataset_path = dataset_path
def extract_dataset(self, compressed_dataset_file_name, dataset_directory):
try:
# extract files to dataset folder
tar = tarfile.open(compressed_dataset_file_name, "r:gz")
tar.extractall(dataset_directory)
tar.close()
print("Files extraction was successfull ...")
except:
print("Ecxception raised: No extraction was done ...")
def make_folder(self, folder_path):
try:
os.mkdir(folder_path)
print(folder_path, "was created ...")
except:
print("Ecxception raised: ", folder_path, "could not be created ...")
def move_files(self, src, dst, group):
for fname in group:
os.rename(src + '/' + fname, dst + '/' + fname)
def get_fnames_from_dict(self, dataset_dict, f_or_m):
training_data, testing_data = [], []
for i in range(1,5):
length_data = len(dataset_dict[f_or_m +"000" + str(i)])
length_separator = math.trunc(length_data*2/3)
training_data += dataset_dict[f_or_m + "000" + str(i)][:length_separator]
testing_data += dataset_dict[f_or_m + "000" + str(i)][length_separator:]
return training_data, testing_data
def manage(self):
# read config file and get path to compressed dataset
compressed_dataset_file_name = self.dataset_path
dataset_directory = compressed_dataset_file_name.split(".")[0]
# create a folder for the data
try:
os.mkdir(dataset_directory)
except:
pass
# extract dataset
self.extract_dataset(compressed_dataset_file_name, dataset_directory)
# select females files and males files
file_names = [fname for fname in os.listdir(dataset_directory) if ("f0" in fname or "m0" in fname)]
dataset_dict = {"f0001": [], "f0002": [], "f0003": [], "f0004": [], "f0005": [],
"m0001": [], "m0002": [], "m0003": [], "m0004": [], "m0005": [], }
# fill in dictionary
for fname in file_names:
dataset_dict[fname.split('_')[0]].append(fname)
# divide and group file names
training_set, testing_set = {},{}
training_set["females"], testing_set["females"] = self.get_fnames_from_dict(dataset_dict, "f")
training_set["males" ], testing_set["males" ] = self.get_fnames_from_dict(dataset_dict, "m")
# make training and testing folders
self.make_folder("TrainingData")
self.make_folder("TestingData")
self.make_folder("TrainingData/females")
self.make_folder("TrainingData/males")
self.make_folder("TestingData/females")
self.make_folder("TestingData/males")
# move files
self.move_files(dataset_directory, "TrainingData/females", training_set["females"])
self.move_files(dataset_directory, "TrainingData/males", training_set["males"])
self.move_files(dataset_directory, "TestingData/females", testing_set["females"])
self.move_files(dataset_directory, "TestingData/males", testing_set["males"])
if __name__== "__main__":
data_manager = DataManager("SLR45.tgz")
data_manager.manage()
| <filename>Code/DataManager.py<gh_stars>0
import os
import sys
import math
import tarfile
class DataManager:
def __init__(self, dataset_path):
self.dataset_path = dataset_path
def extract_dataset(self, compressed_dataset_file_name, dataset_directory):
try:
# extract files to dataset folder
tar = tarfile.open(compressed_dataset_file_name, "r:gz")
tar.extractall(dataset_directory)
tar.close()
print("Files extraction was successfull ...")
except:
print("Ecxception raised: No extraction was done ...")
def make_folder(self, folder_path):
try:
os.mkdir(folder_path)
print(folder_path, "was created ...")
except:
print("Ecxception raised: ", folder_path, "could not be created ...")
def move_files(self, src, dst, group):
for fname in group:
os.rename(src + '/' + fname, dst + '/' + fname)
def get_fnames_from_dict(self, dataset_dict, f_or_m):
training_data, testing_data = [], []
for i in range(1,5):
length_data = len(dataset_dict[f_or_m +"000" + str(i)])
length_separator = math.trunc(length_data*2/3)
training_data += dataset_dict[f_or_m + "000" + str(i)][:length_separator]
testing_data += dataset_dict[f_or_m + "000" + str(i)][length_separator:]
return training_data, testing_data
def manage(self):
# read config file and get path to compressed dataset
compressed_dataset_file_name = self.dataset_path
dataset_directory = compressed_dataset_file_name.split(".")[0]
# create a folder for the data
try:
os.mkdir(dataset_directory)
except:
pass
# extract dataset
self.extract_dataset(compressed_dataset_file_name, dataset_directory)
# select females files and males files
file_names = [fname for fname in os.listdir(dataset_directory) if ("f0" in fname or "m0" in fname)]
dataset_dict = {"f0001": [], "f0002": [], "f0003": [], "f0004": [], "f0005": [],
"m0001": [], "m0002": [], "m0003": [], "m0004": [], "m0005": [], }
# fill in dictionary
for fname in file_names:
dataset_dict[fname.split('_')[0]].append(fname)
# divide and group file names
training_set, testing_set = {},{}
training_set["females"], testing_set["females"] = self.get_fnames_from_dict(dataset_dict, "f")
training_set["males" ], testing_set["males" ] = self.get_fnames_from_dict(dataset_dict, "m")
# make training and testing folders
self.make_folder("TrainingData")
self.make_folder("TestingData")
self.make_folder("TrainingData/females")
self.make_folder("TrainingData/males")
self.make_folder("TestingData/females")
self.make_folder("TestingData/males")
# move files
self.move_files(dataset_directory, "TrainingData/females", training_set["females"])
self.move_files(dataset_directory, "TrainingData/males", training_set["males"])
self.move_files(dataset_directory, "TestingData/females", testing_set["females"])
self.move_files(dataset_directory, "TestingData/males", testing_set["males"])
if __name__== "__main__":
data_manager = DataManager("SLR45.tgz")
data_manager.manage()
| en | 0.789829 | # extract files to dataset folder # read config file and get path to compressed dataset # create a folder for the data # extract dataset # select females files and males files # fill in dictionary # divide and group file names # make training and testing folders # move files | 2.721071 | 3 |
mongomantic/__init__.py | techie-gg/Mongomantic | 18 | 6632653 | <filename>mongomantic/__init__.py
# type: ignore[attr-defined]
"""A MongoDB Python ORM, built on Pydantic and PyMongo."""
try:
from importlib.metadata import PackageNotFoundError, version
except ImportError: # pragma: no cover
from importlib_metadata import PackageNotFoundError, version
try:
__version__ = version(__name__)
except PackageNotFoundError: # pragma: no cover
__version__ = "unknown"
from mongomantic.core.base_repository import BaseRepository
from mongomantic.core.database import connect, disconnect
from mongomantic.core.index import Index
from mongomantic.core.mongo_model import MongoDBModel
__all__ = ["BaseRepository", "MongoDBModel", "connect", "disconnect", "Index"]
| <filename>mongomantic/__init__.py
# type: ignore[attr-defined]
"""A MongoDB Python ORM, built on Pydantic and PyMongo."""
try:
from importlib.metadata import PackageNotFoundError, version
except ImportError: # pragma: no cover
from importlib_metadata import PackageNotFoundError, version
try:
__version__ = version(__name__)
except PackageNotFoundError: # pragma: no cover
__version__ = "unknown"
from mongomantic.core.base_repository import BaseRepository
from mongomantic.core.database import connect, disconnect
from mongomantic.core.index import Index
from mongomantic.core.mongo_model import MongoDBModel
__all__ = ["BaseRepository", "MongoDBModel", "connect", "disconnect", "Index"]
| en | 0.59342 | # type: ignore[attr-defined] A MongoDB Python ORM, built on Pydantic and PyMongo. # pragma: no cover # pragma: no cover | 1.977393 | 2 |
src/models/model.py | rudyn2/tsad_v2 | 0 | 6632654 | import torch
import torch.nn as nn
from torch.distributions import Normal
from torch.distributions.transformed_distribution import TransformedDistribution
# from torch.distributions.transforms import TanhTransform
from src.utils.eps_scheduler import Epsilon
from src.utils.transforms import TanhTransform
class FullyConnectedNetwork(nn.Module):
def __init__(self, input_dim, output_dim, arch='256-256'):
super().__init__()
self.input_dim = input_dim
self.output_dim = output_dim
self.arch = arch
d = input_dim
modules = []
hidden_sizes = [int(h) for h in arch.split('-')]
for hidden_size in hidden_sizes:
fc = nn.Linear(d, hidden_size)
modules.append(fc)
modules.append(nn.ReLU())
d = hidden_size
last_fc = nn.Linear(d, output_dim)
modules.append(last_fc)
self.network = nn.Sequential(*modules)
def forward(self, input_tensor):
return self.network(input_tensor)
class ReparameterizedTanhGaussian(nn.Module):
def __init__(self, log_std_min=-20.0, log_std_max=2.0):
super().__init__()
self.log_std_min = log_std_min
self.log_std_max = log_std_max
def forward(self, mean, log_std, deterministic=False):
log_std = torch.clamp(log_std, self.log_std_min, self.log_std_max)
std = torch.exp(log_std)
action_distribution = TransformedDistribution(
Normal(mean, std), TanhTransform(cache_size=1)
)
if deterministic:
action_sample = torch.tanh(mean)
else:
action_sample = action_distribution.rsample()
log_prob = torch.sum(
action_distribution.log_prob(action_sample), dim=1
)
return action_sample, log_prob
class TanhGaussianPolicy(nn.Module):
def __init__(self, observation_dim, action_dim, arch='256-256',
log_std_multiplier=1.0, log_std_offset=-1.0):
super().__init__()
self.observation_dim = observation_dim
self.action_dim = action_dim
self.arch = arch
self.base_network = FullyConnectedNetwork(
observation_dim, 2 * action_dim, arch
)
self.log_std_multiplier = Scalar(log_std_multiplier)
self.log_std_offset = Scalar(log_std_offset)
self.tanh_gaussian = ReparameterizedTanhGaussian()
def forward(self, observations, deterministic=False):
base_network_output = self.base_network(observations)
mean, log_std = torch.split(base_network_output, self.action_dim, dim=1)
log_std = self.log_std_multiplier() * log_std + self.log_std_offset()
return self.tanh_gaussian(mean, log_std, deterministic)
class SamplerPolicy(object):
def __init__(self, policy, device):
self.policy = policy
self.device = device
def __call__(self, observations, deterministic=False):
with torch.no_grad():
single_action = len(observations.shape) == 1
new_observations = torch.tensor(
observations, dtype=torch.float32, device=self.device
)
if single_action:
new_observations = new_observations.unsqueeze(0)
actions, _ = self.policy(new_observations, deterministic)
if single_action:
actions = actions.squeeze(0)
actions = actions.cpu().numpy()
return actions
class FullyConnectedQFunction(nn.Module):
def __init__(self, observation_dim, action_dim, arch='256-256'):
super().__init__()
self.observation_dim = observation_dim
self.action_dim = action_dim
self.arch = arch
self.network = FullyConnectedNetwork(
observation_dim + action_dim, 1, arch
)
def forward(self, observations, actions):
input_tensor = torch.cat([observations, actions], dim=1)
return torch.squeeze(self.network(input_tensor), dim=1)
class FullyConnectedQFunctionHLC(nn.Module):
def __init__(self, observation_dim, action_dim, arch='256-256', hlcs=(0, 1, 2, 3)):
super().__init__()
self.observation_dim = observation_dim
self.action_dim = action_dim
self.arch = arch
self.hlcs = hlcs
self.networks = nn.ModuleDict({str(hlc): FullyConnectedNetwork(
observation_dim + action_dim, 1, arch
) for hlc in hlcs})
def forward(self, observations, actions, hlc):
input_tensor = torch.cat([observations, actions], dim=1)
output = []
indices_array = []
for i in self.hlcs:
mask = hlc == i
if len(mask.shape) == 0:
mask = mask.view(-1)
indices = torch.nonzero(mask)
filtered_input = input_tensor[hlc == i]
predicted_q = torch.squeeze(self.networks[str(i)](filtered_input), dim=1)
output.append(predicted_q)
indices_array.append(indices)
output = torch.cat(output, dim=0)
indices_array = torch.cat(indices_array, dim=0).squeeze(dim=1)
# inverse indices array
_, inv_indices = torch.sort(indices_array)
# reorder output as input
output = output[inv_indices]
return output
def transfer_learning(self, from_hlc: int = 3, to_hlcs: tuple = (0, 1, 2)):
if from_hlc in self.hlcs:
for hlc in to_hlcs:
if hlc in self.hlcs:
self.networks[str(hlc)].load_state_dict(self.networks[str(from_hlc)].state_dict())
print(f"Transferred from {from_hlc} to {hlc}")
else:
print("Origin network is not present in the policy.")
class Scalar(nn.Module):
def __init__(self, init_value):
super().__init__()
self.constant = nn.Parameter(
torch.tensor(init_value, dtype=torch.float32)
)
def forward(self):
return self.constant
class DDPGSamplerPolicy(object):
def __init__(self, policy, device, exploration_noise=0.1, max_steps=10000, action_low=-1, action_max=1):
self.policy = policy
self.device = device
self.exploration_noise = exploration_noise
self.eps_scheduler = Epsilon(max_steps, epsilon_max=0.5, epsilon_min=0.05)
self.action_low = action_low
self.action_max = action_max
def __call__(self, observations, hlc, deterministic=False):
with torch.no_grad():
observations = torch.from_numpy(observations).float().to(self.device)
hlc = torch.tensor(
hlc, dtype=torch.int8, device=self.device
)
if len(hlc.shape) == 1 or len(hlc.shape) == 0:
hlc.view(1, 1)
if len(observations.shape) == 1:
observations = observations.unsqueeze(0)
actions = self.policy(observations, hlc, deterministic=deterministic)
if not deterministic:
noise = torch.normal(0, self.eps_scheduler.step(), size=actions.shape, device=self.device)
actions = torch.clamp(actions + noise, self.action_low, self.action_max)
if len(observations.shape) == 1 or observations.shape[0] == 1:
actions = actions.squeeze(0)
actions = actions.cpu().numpy()
return actions
def get_epsilon(self):
return self.eps_scheduler.epsilon()
class FullyConnectedTanhPolicy(nn.Module):
def __init__(self, observation_dim, action_dim, arch='256-256'):
super().__init__()
self.observation_dim = observation_dim
self.action_dim = action_dim
self.arch = arch
self.network = FullyConnectedNetwork(
observation_dim, action_dim, arch
)
# deterministic parameter just for compatibility
def forward(self, observation, deterministic=True):
output = self.network(observation)
output = torch.tanh(output)
return output
class FullyConnectedTanhPolicyHLC(nn.Module):
def __init__(self, observation_dim, action_dim, arch='256-256', hlcs=(0, 1, 2, 3)):
super().__init__()
self.observation_dim = observation_dim
self.action_dim = action_dim
self.arch = arch
self.hlcs = hlcs
self.networks = nn.ModuleDict({str(hlc): FullyConnectedNetwork(
observation_dim, action_dim, arch
) for hlc in hlcs})
# deterministic parameter just for compatibility
def forward(self, observation, hlc, deterministic=True):
output = []
indices_array = []
for i in self.hlcs:
mask = hlc == i
if len(mask.shape) == 0:
mask = mask.view(-1)
indices = torch.nonzero(mask)
filtered_observation = observation[mask]
prediction = self.networks[str(i)](filtered_observation)
prediction = torch.tanh(prediction)
output.append(prediction)
indices_array.append(indices)
output = torch.cat(output, dim=0)
indices_array = torch.cat(indices_array, dim=0).squeeze(dim=1)
# inverse indices array
_, inv_indices = torch.sort(indices_array)
# reorder output as input
output = output[inv_indices]
return output
def transfer_learning(self, from_hlc: int = 3, to_hlcs: tuple = (0, 1, 2)):
if from_hlc in self.hlcs:
for hlc in to_hlcs:
if hlc in self.hlcs:
self.networks[str(hlc)].load_state_dict(self.networks[str(from_hlc)].state_dict())
print(f"Transferred from {from_hlc} to {hlc}")
else:
print("Origin network is not present in the policy.")
| import torch
import torch.nn as nn
from torch.distributions import Normal
from torch.distributions.transformed_distribution import TransformedDistribution
# from torch.distributions.transforms import TanhTransform
from src.utils.eps_scheduler import Epsilon
from src.utils.transforms import TanhTransform
class FullyConnectedNetwork(nn.Module):
def __init__(self, input_dim, output_dim, arch='256-256'):
super().__init__()
self.input_dim = input_dim
self.output_dim = output_dim
self.arch = arch
d = input_dim
modules = []
hidden_sizes = [int(h) for h in arch.split('-')]
for hidden_size in hidden_sizes:
fc = nn.Linear(d, hidden_size)
modules.append(fc)
modules.append(nn.ReLU())
d = hidden_size
last_fc = nn.Linear(d, output_dim)
modules.append(last_fc)
self.network = nn.Sequential(*modules)
def forward(self, input_tensor):
return self.network(input_tensor)
class ReparameterizedTanhGaussian(nn.Module):
def __init__(self, log_std_min=-20.0, log_std_max=2.0):
super().__init__()
self.log_std_min = log_std_min
self.log_std_max = log_std_max
def forward(self, mean, log_std, deterministic=False):
log_std = torch.clamp(log_std, self.log_std_min, self.log_std_max)
std = torch.exp(log_std)
action_distribution = TransformedDistribution(
Normal(mean, std), TanhTransform(cache_size=1)
)
if deterministic:
action_sample = torch.tanh(mean)
else:
action_sample = action_distribution.rsample()
log_prob = torch.sum(
action_distribution.log_prob(action_sample), dim=1
)
return action_sample, log_prob
class TanhGaussianPolicy(nn.Module):
def __init__(self, observation_dim, action_dim, arch='256-256',
log_std_multiplier=1.0, log_std_offset=-1.0):
super().__init__()
self.observation_dim = observation_dim
self.action_dim = action_dim
self.arch = arch
self.base_network = FullyConnectedNetwork(
observation_dim, 2 * action_dim, arch
)
self.log_std_multiplier = Scalar(log_std_multiplier)
self.log_std_offset = Scalar(log_std_offset)
self.tanh_gaussian = ReparameterizedTanhGaussian()
def forward(self, observations, deterministic=False):
base_network_output = self.base_network(observations)
mean, log_std = torch.split(base_network_output, self.action_dim, dim=1)
log_std = self.log_std_multiplier() * log_std + self.log_std_offset()
return self.tanh_gaussian(mean, log_std, deterministic)
class SamplerPolicy(object):
def __init__(self, policy, device):
self.policy = policy
self.device = device
def __call__(self, observations, deterministic=False):
with torch.no_grad():
single_action = len(observations.shape) == 1
new_observations = torch.tensor(
observations, dtype=torch.float32, device=self.device
)
if single_action:
new_observations = new_observations.unsqueeze(0)
actions, _ = self.policy(new_observations, deterministic)
if single_action:
actions = actions.squeeze(0)
actions = actions.cpu().numpy()
return actions
class FullyConnectedQFunction(nn.Module):
def __init__(self, observation_dim, action_dim, arch='256-256'):
super().__init__()
self.observation_dim = observation_dim
self.action_dim = action_dim
self.arch = arch
self.network = FullyConnectedNetwork(
observation_dim + action_dim, 1, arch
)
def forward(self, observations, actions):
input_tensor = torch.cat([observations, actions], dim=1)
return torch.squeeze(self.network(input_tensor), dim=1)
class FullyConnectedQFunctionHLC(nn.Module):
def __init__(self, observation_dim, action_dim, arch='256-256', hlcs=(0, 1, 2, 3)):
super().__init__()
self.observation_dim = observation_dim
self.action_dim = action_dim
self.arch = arch
self.hlcs = hlcs
self.networks = nn.ModuleDict({str(hlc): FullyConnectedNetwork(
observation_dim + action_dim, 1, arch
) for hlc in hlcs})
def forward(self, observations, actions, hlc):
input_tensor = torch.cat([observations, actions], dim=1)
output = []
indices_array = []
for i in self.hlcs:
mask = hlc == i
if len(mask.shape) == 0:
mask = mask.view(-1)
indices = torch.nonzero(mask)
filtered_input = input_tensor[hlc == i]
predicted_q = torch.squeeze(self.networks[str(i)](filtered_input), dim=1)
output.append(predicted_q)
indices_array.append(indices)
output = torch.cat(output, dim=0)
indices_array = torch.cat(indices_array, dim=0).squeeze(dim=1)
# inverse indices array
_, inv_indices = torch.sort(indices_array)
# reorder output as input
output = output[inv_indices]
return output
def transfer_learning(self, from_hlc: int = 3, to_hlcs: tuple = (0, 1, 2)):
if from_hlc in self.hlcs:
for hlc in to_hlcs:
if hlc in self.hlcs:
self.networks[str(hlc)].load_state_dict(self.networks[str(from_hlc)].state_dict())
print(f"Transferred from {from_hlc} to {hlc}")
else:
print("Origin network is not present in the policy.")
class Scalar(nn.Module):
def __init__(self, init_value):
super().__init__()
self.constant = nn.Parameter(
torch.tensor(init_value, dtype=torch.float32)
)
def forward(self):
return self.constant
class DDPGSamplerPolicy(object):
def __init__(self, policy, device, exploration_noise=0.1, max_steps=10000, action_low=-1, action_max=1):
self.policy = policy
self.device = device
self.exploration_noise = exploration_noise
self.eps_scheduler = Epsilon(max_steps, epsilon_max=0.5, epsilon_min=0.05)
self.action_low = action_low
self.action_max = action_max
def __call__(self, observations, hlc, deterministic=False):
with torch.no_grad():
observations = torch.from_numpy(observations).float().to(self.device)
hlc = torch.tensor(
hlc, dtype=torch.int8, device=self.device
)
if len(hlc.shape) == 1 or len(hlc.shape) == 0:
hlc.view(1, 1)
if len(observations.shape) == 1:
observations = observations.unsqueeze(0)
actions = self.policy(observations, hlc, deterministic=deterministic)
if not deterministic:
noise = torch.normal(0, self.eps_scheduler.step(), size=actions.shape, device=self.device)
actions = torch.clamp(actions + noise, self.action_low, self.action_max)
if len(observations.shape) == 1 or observations.shape[0] == 1:
actions = actions.squeeze(0)
actions = actions.cpu().numpy()
return actions
def get_epsilon(self):
return self.eps_scheduler.epsilon()
class FullyConnectedTanhPolicy(nn.Module):
def __init__(self, observation_dim, action_dim, arch='256-256'):
super().__init__()
self.observation_dim = observation_dim
self.action_dim = action_dim
self.arch = arch
self.network = FullyConnectedNetwork(
observation_dim, action_dim, arch
)
# deterministic parameter just for compatibility
def forward(self, observation, deterministic=True):
output = self.network(observation)
output = torch.tanh(output)
return output
class FullyConnectedTanhPolicyHLC(nn.Module):
def __init__(self, observation_dim, action_dim, arch='256-256', hlcs=(0, 1, 2, 3)):
super().__init__()
self.observation_dim = observation_dim
self.action_dim = action_dim
self.arch = arch
self.hlcs = hlcs
self.networks = nn.ModuleDict({str(hlc): FullyConnectedNetwork(
observation_dim, action_dim, arch
) for hlc in hlcs})
# deterministic parameter just for compatibility
def forward(self, observation, hlc, deterministic=True):
output = []
indices_array = []
for i in self.hlcs:
mask = hlc == i
if len(mask.shape) == 0:
mask = mask.view(-1)
indices = torch.nonzero(mask)
filtered_observation = observation[mask]
prediction = self.networks[str(i)](filtered_observation)
prediction = torch.tanh(prediction)
output.append(prediction)
indices_array.append(indices)
output = torch.cat(output, dim=0)
indices_array = torch.cat(indices_array, dim=0).squeeze(dim=1)
# inverse indices array
_, inv_indices = torch.sort(indices_array)
# reorder output as input
output = output[inv_indices]
return output
def transfer_learning(self, from_hlc: int = 3, to_hlcs: tuple = (0, 1, 2)):
if from_hlc in self.hlcs:
for hlc in to_hlcs:
if hlc in self.hlcs:
self.networks[str(hlc)].load_state_dict(self.networks[str(from_hlc)].state_dict())
print(f"Transferred from {from_hlc} to {hlc}")
else:
print("Origin network is not present in the policy.")
| en | 0.444185 | # from torch.distributions.transforms import TanhTransform # inverse indices array # reorder output as input # deterministic parameter just for compatibility # deterministic parameter just for compatibility # inverse indices array # reorder output as input | 2.179119 | 2 |
pyalgorithm/search/linear_search.py | AjithPanneerselvam/algo | 2 | 6632655 | <reponame>AjithPanneerselvam/algo
""" Linear search
Time complexity - O(n)
"""
def linear_search(ls, val):
for i in range(len(ls)):
if ls[i] == val:
# Found! Returns the index
return i + 1
# Not found
return None
| """ Linear search
Time complexity - O(n)
"""
def linear_search(ls, val):
for i in range(len(ls)):
if ls[i] == val:
# Found! Returns the index
return i + 1
# Not found
return None | en | 0.641422 | Linear search
Time complexity - O(n) # Found! Returns the index # Not found | 3.743306 | 4 |
src/careers.py | nilund93/whfrp_generator | 0 | 6632656 | # careers
human_careers={
1:"apothecary",
2:"engineer",
3:"lawyer",
4:"nun",
5:"nun",
6:"physician",
7:"priest",
8:"priest",
9:"priest",
10:"priest",
11:"priest",
12:"scholar",
13:"scholar",
14:"wizard",
15:"agitator",
16:"artisan",
17:"artisan",
18:"beggar",
19:"beggar",
20:"investigator",
21:"merchant",
22:"rat catcher",
23:"rat catcher",
24:"townsman",
25:"townsman",
26:"townsman",
27:"watchman",
28:"advisor",
29:"artist",
30:"duellist",
31:"envoy",
32:"noble",
33:"servant",
34:"servant",
35:"servant",
36:"spy",
37:"warden",
38:"bailiff",
39:"hed<NAME>",
40:"herbalist",
41:"hunter",
42:"hunter",
43:"miner",
44:"mystic",
45:"scout",
46:"villager",
47:"villager",
48:"villager",
49:"villager",
50:"villager",
51:"<NAME>",
52:"coachman",
53:"entertainer",
54:"entertainer",
55:"flagellant",
56:"flagellant",
57:"messenger",
58:"pedlar",
59:"road warden",
60:"witch hunter",
61:"boatman",
62:"boatman",
63:"huffer",
64:"riverwarden",
65:"riverwarden",
66:"riverwoman",
67:"riverwoman",
68:"riverwoman",
69:"seaman",
70:"seaman",
71:"smuggler",
72:"stevedore",
73:"stevedore",
74:"wrecker",
75:"bawd",
76:"bawd",
77:"charlatan",
78:"fence",
79:"grave robber",
80:"outlaw",
81:"outlaw",
82:"outlaw",
83:"outlaw",
84:"racketeer",
85:"thief",
86:"thief",
87:"thief",
88:"witch",
89:"cavalryman",
90:"cavalryman",
91:"guard",
92:"guard",
93:"knight",
94:"pit fighter",
95:"protagonist",
96:"soldier",
97:"soldier",
98:"soldier",
99:"soldier",
100:"warrior priest"
}
dwarf_careers={
1:"apothecary",
2:"engineer",
3:"engineer",
4:"engineer",
5:"lawyer",
6:"lawyer",
7:"physician",
8:"scholar",
9:"scholar",
10:"agitator",
11:"agitator",
12:"artisan",
13:"artisan",
14:"artisan",
15:"artisan",
16:"artisan",
17:"artisan",
18:"beggar",
19:"investigator",
20:"investigator",
21:"merchant",
22:"merchant",
23:"merchant",
24:"merchant",
25:"rat catcher",
26:"townsman",
27:"townsman",
28:"townsman",
29:"townsman",
30:"townsman",
31:"townsman",
32:"watchman",
33:"watchman",
34:"watchman",
35:"advisor",
36:"advisor",
37:"artist",
38:"duellist",
39:"envoy",
40:"envoy",
41:"noble",
42:"servant",
43:"spy",
44:"warden",
45:"warden",
46:"bailiff",
47:"bailiff",
48:"hunter",
49:"hunter",
50:"miner",
51:"miner",
52:"miner",
53:"miner",
54:"miner",
55:"scout",
56:"villager",
57:"bount<NAME>",
58:"bounty hunter",
59:"bounty hunter",
60:"bounty hunter",
61:"coachman",
62:"entertainer",
63:"entertainer",
64:"messenger",
65:"messenger",
66:"pedlar",
67:"pedlar",
68:"boatman",
69:"boatman",
70:"huffer",
71:"riverwoman",
72:"riverwoman",
73:"seaman",
74:"smuggler",
75:"smuggler",
76:"stevedore",
77:"stevedore",
78:"wrecker",
79:"fence",
80:"outlaw",
81:"outlaw",
82:"outlaw",
83:"racketeer",
84:"thief",
85:"guard",
86:"guard",
87:"guard",
88:"pit figher",
89:"pit figher",
90:"pit figher",
91:"protagonist",
92:"protagonist",
93:"protagonist",
94:"soldier",
95:"soldier",
96:"soldier",
97:"slayer",
98:"slayer",
99:"slayer",
100:"slayer"
}
halfling_careers={
1:"apothecary",
2:"engineer",
3:"lawyer",
4:"lawyer",
5:"physician",
6:"physician",
7:"scholar",
8:"scholar",
9:"agitator",
10:"agitator",
11:"artisan",
12:"artisan",
13:"artisan",
14:"artisan",
15:"artisan",
16:"beggar",
17:"beggar",
18:"beggar",
19:"beggar",
20:"investigator",
21:"investigator",
22:"merchant",
23:"merchant",
24:"merchant",
25:"merchant",
26:"rat catcher",
27:"rat catcher",
28:"rat catcher",
29:"townsman",
30:"townsman",
31:"townsman",
32:"watchman",
33:"watchman",
34:"advisor",
35:"artist",
36:"artist",
37:"envoy",
38:"servant",
39:"servant",
40:"servant",
41:"servant",
42:"servant",
43:"servant",
44:"spy",
45:"warden",
46:"warden",
47:"bailiff",
48:"herbalist",
49:"herbalist",
50:"herbalist",
51:"hunter",
52:"hunter",
53:"miner",
54:"scout",
55:"villager",
56:"villager",
57:"villager",
58:"bo<NAME>",
59:"coachman",
60:"coachman",
61:"entertainer",
62:"entertainer",
63:"entertainer",
64:"messenger",
65:"messenger",
66:"pedlar",
67:"pedlar",
68:"road warden",
69:"boatman",
70:"huffer",
71:"riverwarden",
72:"riverwoman",
73:"riverwoman",
74:"riverwoman",
75:"seaman",
76:"smuggler",
77:"smuggler",
78:"smuggler",
79:"smuggler",
80:"stevedore",
81:"stevedore",
82:"stevedore",
83:"bawd",
84:"bawd",
85:"bawd",
86:"charlatan",
87:"fence",
88:"grave robber",
89:"outlaw",
90:"racketeer",
91:"thief",
92:"thief",
93:"thief",
94:"thief",
95:"guard",
96:"guard",
97:"pit fighter",
98:"soldier",
99:"soldier",
100:"soldier"
}
highelf_careers={
1:"apothecary",
2:"apothecary",
3:"lawyer",
4:"lawyer",
5:"lawyer",
6:"lawyer",
7:"physician",
8:"physician",
9:"scholar",
10:"scholar",
11:"scholar",
12:"scholar",
13:"wizard",
14:"wizard",
15:"wizard",
16:"wizard",
17:"artisan",
18:"artisan",
19:"artisan",
20:"investigator",
21:"investigator",
22:"merchant",
23:"merchant",
24:"merchant",
25:"merchant",
26:"merchant",
27:"townsman",
28:"townsman",
29:"watchman",
30:"advisor",
31:"advisor",
32:"artist",
33:"duellist",
34:"duellist",
35:"envoy",
36:"envoy",
37:"envoy",
38:"noble",
39:"noble",
40:"noble",
41:"spy",
42:"spy",
43:"spy",
44:"warden",
45:"warden",
46:"herbalist",
47:"herbalist",
48:"hunter",
49:"hunter",
50:"hunter",
51:"scout",
52:"scout",
53:"scout",
54:"scout",
55:"scout",
56:"scout",
57:"<NAME>",
58:"bounty hunter",
59:"bounty hunter",
60:"entertainer",
61:"entertainer",
62:"entertainer",
63:"messenger",
64:"boatman",
65:"seaman",
66:"seaman",
67:"seaman",
68:"seaman",
69:"seaman",
70:"seaman",
71:"seaman",
72:"seaman",
73:"seaman",
74:"seaman",
75:"seaman",
76:"seaman",
77:"seaman",
78:"seaman",
79:"seaman",
80:"smuggler",
81:"bawd",
82:"bawd",
83:"charlatan",
84:"charlatan",
85:"charlatan",
86:"outlaw",
87:"outlaw",
88:"outlaw",
89:"cavalryman",
90:"cavalryman",
91:"cavalryman",
92:"cavalryman",
93:"guard",
94:"guard",
95:"knight",
96:"pit fighter",
97:"pit fighter",
98:"protagonist",
99:"soldier",
100:"soldier"
}
woodelf_careers={
1:"scholar",
2:"wizard",
3:"wizard",
4:"wizard",
5:"wizard",
6:"artisan",
7:"artisan",
8:"artisan",
9:"artisan",
10:"artisan",
11:"advisor",
12:"advisor",
13:"advisor",
14:"advisor",
15:"artist",
16:"artist",
17:"artist",
18:"artist",
19:"envoy",
20:"envoy",
21:"envoy",
22:"envoy",
23:"envoy",
24:"envoy",
25:"envoy",
26:"noble",
27:"noble",
28:"noble",
29:"noble",
30:"noble",
31:"noble",
32:"spy",
33:"spy",
34:"spy",
35:"spy",
36:"herbalist",
37:"herbalist",
38:"herbalist",
39:"herbalist",
40:"herbalist",
41:"herbalist",
42:"herbalist",
43:"hunter",
44:"hunter",
45:"hunter",
46:"hunter",
47:"hunter",
48:"hunter",
49:"hunter",
50:"hunter",
51:"hunter",
52:"hunter",
53:"mystic",
54:"mystic",
55:"mystic",
56:"mystic",
57:"messenger",
58:"scout",
59:"scout",
60:"scout",
61:"scout",
62:"scout",
63:"scout",
64:"scout",
65:"scout",
66:"scout",
67:"scout",
68:"scout",
69:"bounty hunter",
70:"bounty hunter",
71:"entertainer",
72:"entertainer",
73:"entertainer",
74:"entertainer",
75:"entertainer",
76:"messenger",
77:"messenger",
78:"messenger",
79:"wrecker",
80:"outlaw",
81:"outlaw",
82:"outlaw",
83:"outlaw",
84:"outlaw",
85:"outlaw",
86:"cavalryman",
87:"cavalryman",
88:"cavalryman",
89:"cavalryman",
90:"cavalryman",
91:"guard",
92:"guard",
93:"knight",
94:"knight",
95:"pit fighter",
96:"pit fighter",
97:"soldier",
98:"soldier",
99:"soldier",
100:"soldier"
}
# class
academics = ["apothecary", "engineer", "lawyer", "nun", "physician", "priest", "scholar", "wizard"]
burghers = ["agitator", "artisan", "beggar", "investigator", "merchant", "rat catcher", "townsman", "watchman"]
courtiers = ["advisor", "artist", "duelist", "envoy", "noble", "servant", "spy", "warden"]
peasants = ["bailiff", "hedge witch", "herbalist", "hunter", "miner", "mystic", "scout", "villager"]
rangers = ["bounty hunter", "coachman", "entertainer", "flagellant", "messenger", "pedlar", "road warden", "witch hunter"]
riverfolk = ["boatman", "huffer", "riverwarden", "riverwoman", "seaman", "smuggler", "stevedore", "wrecker"]
rogues = ["bawd", "charlatan", "fence", "grave robber", "outlaw", "racketeer", "theif", "witch"]
warriors = ["cavalryman", "guard", "knight", "pit fighter", "protagonist", "soldier", "slayer", "warrior priest"]
# names
male_reiklander = ["Aimar", "Askan", "Gerlach", "Rudolf", "Elmer",
"Siegfried", "Hans", "Wilmer", "Helmer", "Helmut",
"Wilbur", "Bruno", "Leonhard", "Lambert", "Reinhard",
"Gotthard", "Ludwig", "Otto", "Dagobert", "Theobald",
"Ralf", "Rolf", "Friedrich", "Dieter", "Adolf",
"Heinrich", "Ralf"]
female_reiklander = ["Clementia", "Erika", "Fryke", "Frederike",
"Gerthrud", "Helga", "Marieke", "Siegfreda",
"Frieda", "Ulrika", "Wilhelmina", "Liebtrud",
"Saskia", "Hildegard", "Hallgerd", "Ebba", "Hulda",
"Yvonne", "Charlotte", "Inge", "Kajsa", "Kirsa",
"Geraldine"]
reiklander_surname = ["Schaffenberger", "Adolphus", "Auerswalder",
"Wissenlander", "Averlander", "Baumer",
"Autlermann", "Nordlander", "Damstadter",
"Eshermann", "Esselmann", "Breuer",
"Bredermann", "Dasselmann", "Diermar",
"Donatus", "Eilhardt", "Ehrhard",
"Dunkelberg", "Erasmus", "Gustavus",
"Hergiger", "Henroth", "Hasselmann",
"Hausier", "Hahnbrandt", "Gundersohn",
"Heffengenger", "Heidmann", "Heinrich",
"Jaeger", "Bauer", "Hausier",
"Hetzer", "Horst", "Feldtmann",
"Feldt", "Garmann", "Frohlich",
"Gisel", "Grenz", "Grundenburger",
"Heinz", "Geldt", "Klein"] | # careers
human_careers={
1:"apothecary",
2:"engineer",
3:"lawyer",
4:"nun",
5:"nun",
6:"physician",
7:"priest",
8:"priest",
9:"priest",
10:"priest",
11:"priest",
12:"scholar",
13:"scholar",
14:"wizard",
15:"agitator",
16:"artisan",
17:"artisan",
18:"beggar",
19:"beggar",
20:"investigator",
21:"merchant",
22:"rat catcher",
23:"rat catcher",
24:"townsman",
25:"townsman",
26:"townsman",
27:"watchman",
28:"advisor",
29:"artist",
30:"duellist",
31:"envoy",
32:"noble",
33:"servant",
34:"servant",
35:"servant",
36:"spy",
37:"warden",
38:"bailiff",
39:"hed<NAME>",
40:"herbalist",
41:"hunter",
42:"hunter",
43:"miner",
44:"mystic",
45:"scout",
46:"villager",
47:"villager",
48:"villager",
49:"villager",
50:"villager",
51:"<NAME>",
52:"coachman",
53:"entertainer",
54:"entertainer",
55:"flagellant",
56:"flagellant",
57:"messenger",
58:"pedlar",
59:"road warden",
60:"witch hunter",
61:"boatman",
62:"boatman",
63:"huffer",
64:"riverwarden",
65:"riverwarden",
66:"riverwoman",
67:"riverwoman",
68:"riverwoman",
69:"seaman",
70:"seaman",
71:"smuggler",
72:"stevedore",
73:"stevedore",
74:"wrecker",
75:"bawd",
76:"bawd",
77:"charlatan",
78:"fence",
79:"grave robber",
80:"outlaw",
81:"outlaw",
82:"outlaw",
83:"outlaw",
84:"racketeer",
85:"thief",
86:"thief",
87:"thief",
88:"witch",
89:"cavalryman",
90:"cavalryman",
91:"guard",
92:"guard",
93:"knight",
94:"pit fighter",
95:"protagonist",
96:"soldier",
97:"soldier",
98:"soldier",
99:"soldier",
100:"warrior priest"
}
dwarf_careers={
1:"apothecary",
2:"engineer",
3:"engineer",
4:"engineer",
5:"lawyer",
6:"lawyer",
7:"physician",
8:"scholar",
9:"scholar",
10:"agitator",
11:"agitator",
12:"artisan",
13:"artisan",
14:"artisan",
15:"artisan",
16:"artisan",
17:"artisan",
18:"beggar",
19:"investigator",
20:"investigator",
21:"merchant",
22:"merchant",
23:"merchant",
24:"merchant",
25:"rat catcher",
26:"townsman",
27:"townsman",
28:"townsman",
29:"townsman",
30:"townsman",
31:"townsman",
32:"watchman",
33:"watchman",
34:"watchman",
35:"advisor",
36:"advisor",
37:"artist",
38:"duellist",
39:"envoy",
40:"envoy",
41:"noble",
42:"servant",
43:"spy",
44:"warden",
45:"warden",
46:"bailiff",
47:"bailiff",
48:"hunter",
49:"hunter",
50:"miner",
51:"miner",
52:"miner",
53:"miner",
54:"miner",
55:"scout",
56:"villager",
57:"bount<NAME>",
58:"bounty hunter",
59:"bounty hunter",
60:"bounty hunter",
61:"coachman",
62:"entertainer",
63:"entertainer",
64:"messenger",
65:"messenger",
66:"pedlar",
67:"pedlar",
68:"boatman",
69:"boatman",
70:"huffer",
71:"riverwoman",
72:"riverwoman",
73:"seaman",
74:"smuggler",
75:"smuggler",
76:"stevedore",
77:"stevedore",
78:"wrecker",
79:"fence",
80:"outlaw",
81:"outlaw",
82:"outlaw",
83:"racketeer",
84:"thief",
85:"guard",
86:"guard",
87:"guard",
88:"pit figher",
89:"pit figher",
90:"pit figher",
91:"protagonist",
92:"protagonist",
93:"protagonist",
94:"soldier",
95:"soldier",
96:"soldier",
97:"slayer",
98:"slayer",
99:"slayer",
100:"slayer"
}
halfling_careers={
1:"apothecary",
2:"engineer",
3:"lawyer",
4:"lawyer",
5:"physician",
6:"physician",
7:"scholar",
8:"scholar",
9:"agitator",
10:"agitator",
11:"artisan",
12:"artisan",
13:"artisan",
14:"artisan",
15:"artisan",
16:"beggar",
17:"beggar",
18:"beggar",
19:"beggar",
20:"investigator",
21:"investigator",
22:"merchant",
23:"merchant",
24:"merchant",
25:"merchant",
26:"rat catcher",
27:"rat catcher",
28:"rat catcher",
29:"townsman",
30:"townsman",
31:"townsman",
32:"watchman",
33:"watchman",
34:"advisor",
35:"artist",
36:"artist",
37:"envoy",
38:"servant",
39:"servant",
40:"servant",
41:"servant",
42:"servant",
43:"servant",
44:"spy",
45:"warden",
46:"warden",
47:"bailiff",
48:"herbalist",
49:"herbalist",
50:"herbalist",
51:"hunter",
52:"hunter",
53:"miner",
54:"scout",
55:"villager",
56:"villager",
57:"villager",
58:"bo<NAME>",
59:"coachman",
60:"coachman",
61:"entertainer",
62:"entertainer",
63:"entertainer",
64:"messenger",
65:"messenger",
66:"pedlar",
67:"pedlar",
68:"road warden",
69:"boatman",
70:"huffer",
71:"riverwarden",
72:"riverwoman",
73:"riverwoman",
74:"riverwoman",
75:"seaman",
76:"smuggler",
77:"smuggler",
78:"smuggler",
79:"smuggler",
80:"stevedore",
81:"stevedore",
82:"stevedore",
83:"bawd",
84:"bawd",
85:"bawd",
86:"charlatan",
87:"fence",
88:"grave robber",
89:"outlaw",
90:"racketeer",
91:"thief",
92:"thief",
93:"thief",
94:"thief",
95:"guard",
96:"guard",
97:"pit fighter",
98:"soldier",
99:"soldier",
100:"soldier"
}
highelf_careers={
1:"apothecary",
2:"apothecary",
3:"lawyer",
4:"lawyer",
5:"lawyer",
6:"lawyer",
7:"physician",
8:"physician",
9:"scholar",
10:"scholar",
11:"scholar",
12:"scholar",
13:"wizard",
14:"wizard",
15:"wizard",
16:"wizard",
17:"artisan",
18:"artisan",
19:"artisan",
20:"investigator",
21:"investigator",
22:"merchant",
23:"merchant",
24:"merchant",
25:"merchant",
26:"merchant",
27:"townsman",
28:"townsman",
29:"watchman",
30:"advisor",
31:"advisor",
32:"artist",
33:"duellist",
34:"duellist",
35:"envoy",
36:"envoy",
37:"envoy",
38:"noble",
39:"noble",
40:"noble",
41:"spy",
42:"spy",
43:"spy",
44:"warden",
45:"warden",
46:"herbalist",
47:"herbalist",
48:"hunter",
49:"hunter",
50:"hunter",
51:"scout",
52:"scout",
53:"scout",
54:"scout",
55:"scout",
56:"scout",
57:"<NAME>",
58:"bounty hunter",
59:"bounty hunter",
60:"entertainer",
61:"entertainer",
62:"entertainer",
63:"messenger",
64:"boatman",
65:"seaman",
66:"seaman",
67:"seaman",
68:"seaman",
69:"seaman",
70:"seaman",
71:"seaman",
72:"seaman",
73:"seaman",
74:"seaman",
75:"seaman",
76:"seaman",
77:"seaman",
78:"seaman",
79:"seaman",
80:"smuggler",
81:"bawd",
82:"bawd",
83:"charlatan",
84:"charlatan",
85:"charlatan",
86:"outlaw",
87:"outlaw",
88:"outlaw",
89:"cavalryman",
90:"cavalryman",
91:"cavalryman",
92:"cavalryman",
93:"guard",
94:"guard",
95:"knight",
96:"pit fighter",
97:"pit fighter",
98:"protagonist",
99:"soldier",
100:"soldier"
}
woodelf_careers={
1:"scholar",
2:"wizard",
3:"wizard",
4:"wizard",
5:"wizard",
6:"artisan",
7:"artisan",
8:"artisan",
9:"artisan",
10:"artisan",
11:"advisor",
12:"advisor",
13:"advisor",
14:"advisor",
15:"artist",
16:"artist",
17:"artist",
18:"artist",
19:"envoy",
20:"envoy",
21:"envoy",
22:"envoy",
23:"envoy",
24:"envoy",
25:"envoy",
26:"noble",
27:"noble",
28:"noble",
29:"noble",
30:"noble",
31:"noble",
32:"spy",
33:"spy",
34:"spy",
35:"spy",
36:"herbalist",
37:"herbalist",
38:"herbalist",
39:"herbalist",
40:"herbalist",
41:"herbalist",
42:"herbalist",
43:"hunter",
44:"hunter",
45:"hunter",
46:"hunter",
47:"hunter",
48:"hunter",
49:"hunter",
50:"hunter",
51:"hunter",
52:"hunter",
53:"mystic",
54:"mystic",
55:"mystic",
56:"mystic",
57:"messenger",
58:"scout",
59:"scout",
60:"scout",
61:"scout",
62:"scout",
63:"scout",
64:"scout",
65:"scout",
66:"scout",
67:"scout",
68:"scout",
69:"bounty hunter",
70:"bounty hunter",
71:"entertainer",
72:"entertainer",
73:"entertainer",
74:"entertainer",
75:"entertainer",
76:"messenger",
77:"messenger",
78:"messenger",
79:"wrecker",
80:"outlaw",
81:"outlaw",
82:"outlaw",
83:"outlaw",
84:"outlaw",
85:"outlaw",
86:"cavalryman",
87:"cavalryman",
88:"cavalryman",
89:"cavalryman",
90:"cavalryman",
91:"guard",
92:"guard",
93:"knight",
94:"knight",
95:"pit fighter",
96:"pit fighter",
97:"soldier",
98:"soldier",
99:"soldier",
100:"soldier"
}
# class
academics = ["apothecary", "engineer", "lawyer", "nun", "physician", "priest", "scholar", "wizard"]
burghers = ["agitator", "artisan", "beggar", "investigator", "merchant", "rat catcher", "townsman", "watchman"]
courtiers = ["advisor", "artist", "duelist", "envoy", "noble", "servant", "spy", "warden"]
peasants = ["bailiff", "hedge witch", "herbalist", "hunter", "miner", "mystic", "scout", "villager"]
rangers = ["bounty hunter", "coachman", "entertainer", "flagellant", "messenger", "pedlar", "road warden", "witch hunter"]
riverfolk = ["boatman", "huffer", "riverwarden", "riverwoman", "seaman", "smuggler", "stevedore", "wrecker"]
rogues = ["bawd", "charlatan", "fence", "grave robber", "outlaw", "racketeer", "theif", "witch"]
warriors = ["cavalryman", "guard", "knight", "pit fighter", "protagonist", "soldier", "slayer", "warrior priest"]
# names
male_reiklander = ["Aimar", "Askan", "Gerlach", "Rudolf", "Elmer",
"Siegfried", "Hans", "Wilmer", "Helmer", "Helmut",
"Wilbur", "Bruno", "Leonhard", "Lambert", "Reinhard",
"Gotthard", "Ludwig", "Otto", "Dagobert", "Theobald",
"Ralf", "Rolf", "Friedrich", "Dieter", "Adolf",
"Heinrich", "Ralf"]
female_reiklander = ["Clementia", "Erika", "Fryke", "Frederike",
"Gerthrud", "Helga", "Marieke", "Siegfreda",
"Frieda", "Ulrika", "Wilhelmina", "Liebtrud",
"Saskia", "Hildegard", "Hallgerd", "Ebba", "Hulda",
"Yvonne", "Charlotte", "Inge", "Kajsa", "Kirsa",
"Geraldine"]
reiklander_surname = ["Schaffenberger", "Adolphus", "Auerswalder",
"Wissenlander", "Averlander", "Baumer",
"Autlermann", "Nordlander", "Damstadter",
"Eshermann", "Esselmann", "Breuer",
"Bredermann", "Dasselmann", "Diermar",
"Donatus", "Eilhardt", "Ehrhard",
"Dunkelberg", "Erasmus", "Gustavus",
"Hergiger", "Henroth", "Hasselmann",
"Hausier", "Hahnbrandt", "Gundersohn",
"Heffengenger", "Heidmann", "Heinrich",
"Jaeger", "Bauer", "Hausier",
"Hetzer", "Horst", "Feldtmann",
"Feldt", "Garmann", "Frohlich",
"Gisel", "Grenz", "Grundenburger",
"Heinz", "Geldt", "Klein"] | en | 0.745232 | # careers # class # names | 1.709999 | 2 |
src/main/python/apache/thermos/observer/http/static_assets.py | wickman/incubator-aurora | 1 | 6632657 | #
# Copyright 2013 Apache Software Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import mimetypes
import os
from bottle import HTTPResponse
import pkg_resources
from twitter.common import log
from twitter.common.http.server import HttpServer
class StaticAssets(object):
"""
Serve the /assets directory.
"""
def __init__(self):
self._assets = {}
self._detect_assets()
def _detect_assets(self):
log.info('detecting assets...')
assets = pkg_resources.resource_listdir(__name__, 'assets')
cached_assets = {}
for asset in assets:
log.info(' detected asset: %s' % asset)
cached_assets[asset] = pkg_resources.resource_string(
__name__, os.path.join('assets', asset))
self._assets = cached_assets
@HttpServer.route("/favicon.ico")
def handle_favicon(self):
HttpServer.redirect("/assets/favicon.ico")
@HttpServer.route("/assets/:filename")
def handle_asset(self, filename):
# TODO(wickman) Add static_content to bottle.
if filename in self._assets:
mimetype, encoding = mimetypes.guess_type(filename)
headers = {}
if mimetype: headers['Content-Type'] = mimetype
if encoding: headers['Content-Encoding'] = encoding
return HTTPResponse(self._assets[filename], header=headers)
else:
HttpServer.abort(404, 'Unknown asset: %s' % filename)
| #
# Copyright 2013 Apache Software Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import mimetypes
import os
from bottle import HTTPResponse
import pkg_resources
from twitter.common import log
from twitter.common.http.server import HttpServer
class StaticAssets(object):
"""
Serve the /assets directory.
"""
def __init__(self):
self._assets = {}
self._detect_assets()
def _detect_assets(self):
log.info('detecting assets...')
assets = pkg_resources.resource_listdir(__name__, 'assets')
cached_assets = {}
for asset in assets:
log.info(' detected asset: %s' % asset)
cached_assets[asset] = pkg_resources.resource_string(
__name__, os.path.join('assets', asset))
self._assets = cached_assets
@HttpServer.route("/favicon.ico")
def handle_favicon(self):
HttpServer.redirect("/assets/favicon.ico")
@HttpServer.route("/assets/:filename")
def handle_asset(self, filename):
# TODO(wickman) Add static_content to bottle.
if filename in self._assets:
mimetype, encoding = mimetypes.guess_type(filename)
headers = {}
if mimetype: headers['Content-Type'] = mimetype
if encoding: headers['Content-Encoding'] = encoding
return HTTPResponse(self._assets[filename], header=headers)
else:
HttpServer.abort(404, 'Unknown asset: %s' % filename)
| en | 0.83126 | # # Copyright 2013 Apache Software Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Serve the /assets directory. # TODO(wickman) Add static_content to bottle. | 2.009618 | 2 |
Course02/Week3/12.5. Returning a value from a function.py | skunkworksdev/Python_Programming_Michigan | 0 | 6632658 | """
12.5. Returning a value from a function
"""
# Not only can you pass a parameter value into a function, a function can also
# produce a value. You have already seen this in some previous functions that
# you have used. For example, len takes a list or string as a parameter value
# and returns a number, the length of that list or string. range takes an
# integer as a parameter value and returns a list containing all the numbers
# from 0 up to that parameter value.
#
# Functions that return values are sometimes called fruitful functions. In many
# other languages, a function that doesn’t return a value is called a
# procedure, but we will stick here with the Python way of also calling it a
# function, or if we want to stress it, a non-fruitful function.
# https://youtu.be/LGOZyrRCJ1o
"""
How do we write our own fruitful function? Let’s start by creating a very
simple mathematical function that we will call square. The square function
will take one number as a parameter and return the result of squaring that
number. Here is the black-box diagram with the Python code following.
"""
def square(x):
y = x * x
return y
toSquare = 10
result = square(toSquare)
print("The result of {} squared is {}.".format(toSquare, result))
"""
The return statement is followed by an expression which is evaluated. Its
result is returned to the caller as the “fruit” of calling this function.
Because the return statement can contain any Python expression we could have
avoided creating the temporary variable y and simply used return x*x. Try
modifying the square function above to see that this works just the same.
On the other hand, using temporary variables like y in the program above makes
debugging easier. These temporary variables are referred to as local variables.
Notice something important here. The name of the variable we pass as an
argument — toSquare — has nothing to do with the name of the formal
parameter — x. It is as if x = toSquare is executed when square is called.
It doesn’t matter what the value was named in the caller (the place where
the function was invoked). Inside square, it’s name is x. You can see this
very clearly in codelens, where the global variables and the local variables
for the square function are in separate boxes. """
def square(x):
y = x * x
return y
toSquare = 10
squareResult = square(toSquare)
"""
There is one more aspect of function return values that should be noted. All
Python functions return the special value None unless there is an explicit
return statement with a value other than None. Consider the following common
mistake made by beginning Python programmers. As you step through this example,
pay very close attention to the return value in the local variables listing.
Then look at what is printed when the function is over.
"""
def square(x):
y = x * x
print(y) # Bad! This is confusing! Should use return instead!
toSquare = 10
square(toSquare)
print("The result of {} squared is {}.".format(toSquare, squareResult))
"""
The problem with this function is that even though it prints the value of the
squared input, that value will not be returned to the place where the call
was done. Instead, the value None will be returned. Since line 6 uses the
return value as the right hand side of an assignment statement, squareResult
will have None as its value and the result printed in line 7 is incorrect.
Typically, functions will return values that can be printed or processed in
some other way by the caller.
A return statement, once executed, immediately terminates execution of a
function, even if it is not the last statement in the function. In the
following code, when line 3 executes, the value 5 is returned and assigned to
the variable x, then printed. Lines 4 and 5 never execute. Run the following
code and try making some modifications of it to make sure you understand why
“there” and 10 never print out.
"""
def weird():
print("here")
return 5
print("there")
return 10
x = weird()
print(x)
"""
The fact that a return statement immediately ends execution of the code block
inside a function is important to understand for writing complex programs, and
it can also be very useful. The following example is a situation where you can
use this to your advantage – and understanding this will help you understand
other people’s code better, and be able to walk through code more confidently.
Consider a situation where you want to write a function to find out, from a
class attendance list, whether anyone’s first name is longer than five letters,
called longer_than_five. If there is anyone in class whose first name is longer
than 5 letters, the function should return True. Otherwise, it should return
False.
In this case, you’ll be using conditional statements in the code that exists
in the function body, the code block indented underneath the function
definition statement (just like the code that starts with the line
print("here") in the example above – that’s the body of the function weird,
above).
Bonus challenge for studying: After you look at the explanation below, stop
looking at the code – just the description of the function above it, and try
to write the code yourself! Then test it on different lists and make sure that
it works. But read the explanation first, so you can be sure you have a solid
grasp on these function mechanics.
First, an English plan for this new function to define called longer_than_five:
You’ll want to pass in a list of strings (representing people’s first names)
to the function.
You’ll want to iterate over all the items in the list, each of the strings.
As soon as you get to one name that is longer than five letters, you know the
function should return True – yes, there is at least one name longer than five
letters!
And if you go through the whole list and there was no name longer than five
letters, then the function should return False.
Now, the code:
"""
def longer_than_five(list_of_names):
for name in list_of_names: # iterate over the list to look at each name
if len(name) > 5: # as soon as you see a name longer than 5 letters,
return True
# If Python executes that return statement, the function is over and
# the rest of the code will not run -- you already have your answer!
return False # You will only get to this line if you
# iterated over the whole list and did not get a name where
# the if expression evaluated to True, so at this point, it's correct to
# return False!
# Here are a couple sample calls to the function with different lists of names.
# Try running this code in Codelens a few times and make sure you understand
# exactly what is happening.
list1 = ["Sam", "Tera", "Sal", "Amita"]
list2 = ["Rey", "Ayo", "Lauren", "Natalie"]
print(longer_than_five(list1))
print(longer_than_five(list2))
"""
So far, we have just seen return values being assigned to variables. For
example, we had the line squareResult = square(toSquare). As with all
assignment statements, the right hand side is executed first. It invokes the
square function, passing in a parameter value 10 (the current value of
toSquare). That returns a value 100, which completes the evaluation of the
right-hand side of the assignment. 100 is then assigned to the variable
squareResult. In this case, the function invocation was the entire expression
that was evaluated.
Function invocations, however, can also be used as part of more complicated
expressions. For example, squareResult = 2 * square(toSquare). In this case,
the value 100 is returned and is then multiplied by 2 to produce the value 200.
When python evaluates an expression like x * 3, it substitutes the current
value of x into the expression and then does the multiplication. When python
evaluates an expression like 2 * square(toSquare), it substitutes the return
value 100 for entire function invocation and then does the multiplication.
To reiterate, when executing a line of code
squareResult = 2 * square(toSquare), the python interpreter does these steps:
It’s an assignment statement, so evaluate the right-hand side
expression 2 * square(toSquare).
Look up the values of the variables square and toSquare: square is a function
object and toSquare is 10
Pass 10 as a parameter value to the function, get back the return value 100
Substitute 100 for square(toSquare), so that the expression now reads 2 * 100
Assign 200 to variable squareResult
Check your understanding
"""
# func-4-1: What is wrong with the following function definition:
def addEm(x, y, z):
return x + y + z
print('the answer is', x + y + z)
# A. You should never use a print statement in a function definition.
# B. You should not have any statements in a function after the return
# statement. Once the function gets to the return statement it will
# immediately stop executing the function. [x]
# C. You must calculate the value of x+y+z before you return it.
# D. A function cannot return a number.
"""This is a very common mistake so be sure to watch out for it when you write
your code!"""
# func-4-2: What will the following function return?
def addEm(x, y, z):
print(x + y + z)
# A. The value None [x]
# B. The value of x+y+z
# C. The string 'x+y+z'
"""
We have accidentally used print where we mean return. Therefore, the function
will return the value None by default. This is a VERY COMMON mistake so watch
out! This mistake is also particularly difficult to find because when you run
the function the output looks the same. It is not until you try to assign its
value to a variable that you can notice a difference.
"""
# func-4-3: What will the following code output?
def square(x):
y = x * x
return y
print(square(5) + square(5))
# A. 25
# B. 50 [x]
# C. 25 + 25
"""The two return values are added together."""
# func-4-4: What will the following code output?
def square(x):
y = x * x
return y
print(square(square(2)))
# A. 8
# B. 16 [x]
# C. Error: can't put a function invocation inside parentheses
"""It squares 2, yielding the value 4. 4 is then passed as a value to square
again, yeilding 16."""
# func-4-5: What will the following code output?
def cyu2(s1, s2):
x = len(s1)
y = len(s2)
return x - y
z = cyu2("Yes", "no")
if z > 0:
print("First one was longer")
else:
print("Second one was at least as long")
# A. 1
# B. Yes
# C. First one was longer [x]
# D. Second one was at least as long
# E. Error
"""cyu2 returns the value 1, which is assigned to z."""
# func-4-6: Which will print out first, square, g, or a number?
def square(x):
print("square")
return x * x
def g(y):
print("g")
return y + 3
print(square(g(2)))
# A. square
# B. g
# C. a number
"""g has to be executed and return a value in order to know what paramater
value to provide to x."""
# func-4-7: How many lines will the following code print?
def show_me_numbers(list_of_ints):
print(10)
print("Next we'll accumulate the sum")
accum = 0
for num in list_of_ints:
accum = accum + num
return accum
print("All done with accumulation!")
show_me_numbers([4, 2, 3])
# A. 3
# B. 2 [x]
# C. None
"""Yes! Two printed lines, and then the function body execution reaches a
return statement."""
# 8. Write a function named same that takes a string as input, and simply
# returns that string.
def same(string):
return string
# 9. Write a function called same_thing that returns the parameter, unchanged.
def same_thing(unchanged):
return unchanged
# 10. Write a function called subtract_three that takes an integer or any
# number as input, and returns that number minus three.
def subtract_three(x):
return x - 3
# 11. Write a function called change that takes one number as its input and
# returns that number, plus 7.
def change(x):
return x + 7
# 12. Write a function named intro that takes a string as input. Given the
# string “Becky” as input, the function should return: “Hello, my name is
# Becky and I love SI 106.”
def intro(stringV):
return "Hello, my name is " + stringV + " and I love SI 106."
intro("Becky") # Chame a função intro
# Passe o parâmetro de nome Becky
# 13. Write a function called s_change that takes one string as input and
# returns that string, concatenated with the string ” for fun.”.
def s_change(stringX):
return stringX + " for fun."
# 14. Write a function called decision that takes a string as input, and then
# checks the number of characters. If it has over 17 characters, return “This
# is a long string”, if it is shorter or has 17 characters, return “This is a
# short string”.
def decision(stringZ):
if len(stringZ) > 17:
return "This is a long string"
else:
return "This is a short string"
| """
12.5. Returning a value from a function
"""
# Not only can you pass a parameter value into a function, a function can also
# produce a value. You have already seen this in some previous functions that
# you have used. For example, len takes a list or string as a parameter value
# and returns a number, the length of that list or string. range takes an
# integer as a parameter value and returns a list containing all the numbers
# from 0 up to that parameter value.
#
# Functions that return values are sometimes called fruitful functions. In many
# other languages, a function that doesn’t return a value is called a
# procedure, but we will stick here with the Python way of also calling it a
# function, or if we want to stress it, a non-fruitful function.
# https://youtu.be/LGOZyrRCJ1o
"""
How do we write our own fruitful function? Let’s start by creating a very
simple mathematical function that we will call square. The square function
will take one number as a parameter and return the result of squaring that
number. Here is the black-box diagram with the Python code following.
"""
def square(x):
y = x * x
return y
toSquare = 10
result = square(toSquare)
print("The result of {} squared is {}.".format(toSquare, result))
"""
The return statement is followed by an expression which is evaluated. Its
result is returned to the caller as the “fruit” of calling this function.
Because the return statement can contain any Python expression we could have
avoided creating the temporary variable y and simply used return x*x. Try
modifying the square function above to see that this works just the same.
On the other hand, using temporary variables like y in the program above makes
debugging easier. These temporary variables are referred to as local variables.
Notice something important here. The name of the variable we pass as an
argument — toSquare — has nothing to do with the name of the formal
parameter — x. It is as if x = toSquare is executed when square is called.
It doesn’t matter what the value was named in the caller (the place where
the function was invoked). Inside square, it’s name is x. You can see this
very clearly in codelens, where the global variables and the local variables
for the square function are in separate boxes. """
def square(x):
y = x * x
return y
toSquare = 10
squareResult = square(toSquare)
"""
There is one more aspect of function return values that should be noted. All
Python functions return the special value None unless there is an explicit
return statement with a value other than None. Consider the following common
mistake made by beginning Python programmers. As you step through this example,
pay very close attention to the return value in the local variables listing.
Then look at what is printed when the function is over.
"""
def square(x):
y = x * x
print(y) # Bad! This is confusing! Should use return instead!
toSquare = 10
square(toSquare)
print("The result of {} squared is {}.".format(toSquare, squareResult))
"""
The problem with this function is that even though it prints the value of the
squared input, that value will not be returned to the place where the call
was done. Instead, the value None will be returned. Since line 6 uses the
return value as the right hand side of an assignment statement, squareResult
will have None as its value and the result printed in line 7 is incorrect.
Typically, functions will return values that can be printed or processed in
some other way by the caller.
A return statement, once executed, immediately terminates execution of a
function, even if it is not the last statement in the function. In the
following code, when line 3 executes, the value 5 is returned and assigned to
the variable x, then printed. Lines 4 and 5 never execute. Run the following
code and try making some modifications of it to make sure you understand why
“there” and 10 never print out.
"""
def weird():
print("here")
return 5
print("there")
return 10
x = weird()
print(x)
"""
The fact that a return statement immediately ends execution of the code block
inside a function is important to understand for writing complex programs, and
it can also be very useful. The following example is a situation where you can
use this to your advantage – and understanding this will help you understand
other people’s code better, and be able to walk through code more confidently.
Consider a situation where you want to write a function to find out, from a
class attendance list, whether anyone’s first name is longer than five letters,
called longer_than_five. If there is anyone in class whose first name is longer
than 5 letters, the function should return True. Otherwise, it should return
False.
In this case, you’ll be using conditional statements in the code that exists
in the function body, the code block indented underneath the function
definition statement (just like the code that starts with the line
print("here") in the example above – that’s the body of the function weird,
above).
Bonus challenge for studying: After you look at the explanation below, stop
looking at the code – just the description of the function above it, and try
to write the code yourself! Then test it on different lists and make sure that
it works. But read the explanation first, so you can be sure you have a solid
grasp on these function mechanics.
First, an English plan for this new function to define called longer_than_five:
You’ll want to pass in a list of strings (representing people’s first names)
to the function.
You’ll want to iterate over all the items in the list, each of the strings.
As soon as you get to one name that is longer than five letters, you know the
function should return True – yes, there is at least one name longer than five
letters!
And if you go through the whole list and there was no name longer than five
letters, then the function should return False.
Now, the code:
"""
def longer_than_five(list_of_names):
for name in list_of_names: # iterate over the list to look at each name
if len(name) > 5: # as soon as you see a name longer than 5 letters,
return True
# If Python executes that return statement, the function is over and
# the rest of the code will not run -- you already have your answer!
return False # You will only get to this line if you
# iterated over the whole list and did not get a name where
# the if expression evaluated to True, so at this point, it's correct to
# return False!
# Here are a couple sample calls to the function with different lists of names.
# Try running this code in Codelens a few times and make sure you understand
# exactly what is happening.
list1 = ["Sam", "Tera", "Sal", "Amita"]
list2 = ["Rey", "Ayo", "Lauren", "Natalie"]
print(longer_than_five(list1))
print(longer_than_five(list2))
"""
So far, we have just seen return values being assigned to variables. For
example, we had the line squareResult = square(toSquare). As with all
assignment statements, the right hand side is executed first. It invokes the
square function, passing in a parameter value 10 (the current value of
toSquare). That returns a value 100, which completes the evaluation of the
right-hand side of the assignment. 100 is then assigned to the variable
squareResult. In this case, the function invocation was the entire expression
that was evaluated.
Function invocations, however, can also be used as part of more complicated
expressions. For example, squareResult = 2 * square(toSquare). In this case,
the value 100 is returned and is then multiplied by 2 to produce the value 200.
When python evaluates an expression like x * 3, it substitutes the current
value of x into the expression and then does the multiplication. When python
evaluates an expression like 2 * square(toSquare), it substitutes the return
value 100 for entire function invocation and then does the multiplication.
To reiterate, when executing a line of code
squareResult = 2 * square(toSquare), the python interpreter does these steps:
It’s an assignment statement, so evaluate the right-hand side
expression 2 * square(toSquare).
Look up the values of the variables square and toSquare: square is a function
object and toSquare is 10
Pass 10 as a parameter value to the function, get back the return value 100
Substitute 100 for square(toSquare), so that the expression now reads 2 * 100
Assign 200 to variable squareResult
Check your understanding
"""
# func-4-1: What is wrong with the following function definition:
def addEm(x, y, z):
return x + y + z
print('the answer is', x + y + z)
# A. You should never use a print statement in a function definition.
# B. You should not have any statements in a function after the return
# statement. Once the function gets to the return statement it will
# immediately stop executing the function. [x]
# C. You must calculate the value of x+y+z before you return it.
# D. A function cannot return a number.
"""This is a very common mistake so be sure to watch out for it when you write
your code!"""
# func-4-2: What will the following function return?
def addEm(x, y, z):
print(x + y + z)
# A. The value None [x]
# B. The value of x+y+z
# C. The string 'x+y+z'
"""
We have accidentally used print where we mean return. Therefore, the function
will return the value None by default. This is a VERY COMMON mistake so watch
out! This mistake is also particularly difficult to find because when you run
the function the output looks the same. It is not until you try to assign its
value to a variable that you can notice a difference.
"""
# func-4-3: What will the following code output?
def square(x):
y = x * x
return y
print(square(5) + square(5))
# A. 25
# B. 50 [x]
# C. 25 + 25
"""The two return values are added together."""
# func-4-4: What will the following code output?
def square(x):
y = x * x
return y
print(square(square(2)))
# A. 8
# B. 16 [x]
# C. Error: can't put a function invocation inside parentheses
"""It squares 2, yielding the value 4. 4 is then passed as a value to square
again, yeilding 16."""
# func-4-5: What will the following code output?
def cyu2(s1, s2):
x = len(s1)
y = len(s2)
return x - y
z = cyu2("Yes", "no")
if z > 0:
print("First one was longer")
else:
print("Second one was at least as long")
# A. 1
# B. Yes
# C. First one was longer [x]
# D. Second one was at least as long
# E. Error
"""cyu2 returns the value 1, which is assigned to z."""
# func-4-6: Which will print out first, square, g, or a number?
def square(x):
print("square")
return x * x
def g(y):
print("g")
return y + 3
print(square(g(2)))
# A. square
# B. g
# C. a number
"""g has to be executed and return a value in order to know what paramater
value to provide to x."""
# func-4-7: How many lines will the following code print?
def show_me_numbers(list_of_ints):
print(10)
print("Next we'll accumulate the sum")
accum = 0
for num in list_of_ints:
accum = accum + num
return accum
print("All done with accumulation!")
show_me_numbers([4, 2, 3])
# A. 3
# B. 2 [x]
# C. None
"""Yes! Two printed lines, and then the function body execution reaches a
return statement."""
# 8. Write a function named same that takes a string as input, and simply
# returns that string.
def same(string):
return string
# 9. Write a function called same_thing that returns the parameter, unchanged.
def same_thing(unchanged):
return unchanged
# 10. Write a function called subtract_three that takes an integer or any
# number as input, and returns that number minus three.
def subtract_three(x):
return x - 3
# 11. Write a function called change that takes one number as its input and
# returns that number, plus 7.
def change(x):
return x + 7
# 12. Write a function named intro that takes a string as input. Given the
# string “Becky” as input, the function should return: “Hello, my name is
# Becky and I love SI 106.”
def intro(stringV):
return "Hello, my name is " + stringV + " and I love SI 106."
intro("Becky") # Chame a função intro
# Passe o parâmetro de nome Becky
# 13. Write a function called s_change that takes one string as input and
# returns that string, concatenated with the string ” for fun.”.
def s_change(stringX):
return stringX + " for fun."
# 14. Write a function called decision that takes a string as input, and then
# checks the number of characters. If it has over 17 characters, return “This
# is a long string”, if it is shorter or has 17 characters, return “This is a
# short string”.
def decision(stringZ):
if len(stringZ) > 17:
return "This is a long string"
else:
return "This is a short string"
| en | 0.89795 | 12.5. Returning a value from a function # Not only can you pass a parameter value into a function, a function can also # produce a value. You have already seen this in some previous functions that # you have used. For example, len takes a list or string as a parameter value # and returns a number, the length of that list or string. range takes an # integer as a parameter value and returns a list containing all the numbers # from 0 up to that parameter value. # # Functions that return values are sometimes called fruitful functions. In many # other languages, a function that doesn’t return a value is called a # procedure, but we will stick here with the Python way of also calling it a # function, or if we want to stress it, a non-fruitful function. # https://youtu.be/LGOZyrRCJ1o How do we write our own fruitful function? Let’s start by creating a very simple mathematical function that we will call square. The square function will take one number as a parameter and return the result of squaring that number. Here is the black-box diagram with the Python code following. The return statement is followed by an expression which is evaluated. Its result is returned to the caller as the “fruit” of calling this function. Because the return statement can contain any Python expression we could have avoided creating the temporary variable y and simply used return x*x. Try modifying the square function above to see that this works just the same. On the other hand, using temporary variables like y in the program above makes debugging easier. These temporary variables are referred to as local variables. Notice something important here. The name of the variable we pass as an argument — toSquare — has nothing to do with the name of the formal parameter — x. It is as if x = toSquare is executed when square is called. It doesn’t matter what the value was named in the caller (the place where the function was invoked). Inside square, it’s name is x. You can see this very clearly in codelens, where the global variables and the local variables for the square function are in separate boxes. There is one more aspect of function return values that should be noted. All Python functions return the special value None unless there is an explicit return statement with a value other than None. Consider the following common mistake made by beginning Python programmers. As you step through this example, pay very close attention to the return value in the local variables listing. Then look at what is printed when the function is over. # Bad! This is confusing! Should use return instead! The problem with this function is that even though it prints the value of the squared input, that value will not be returned to the place where the call was done. Instead, the value None will be returned. Since line 6 uses the return value as the right hand side of an assignment statement, squareResult will have None as its value and the result printed in line 7 is incorrect. Typically, functions will return values that can be printed or processed in some other way by the caller. A return statement, once executed, immediately terminates execution of a function, even if it is not the last statement in the function. In the following code, when line 3 executes, the value 5 is returned and assigned to the variable x, then printed. Lines 4 and 5 never execute. Run the following code and try making some modifications of it to make sure you understand why “there” and 10 never print out. The fact that a return statement immediately ends execution of the code block inside a function is important to understand for writing complex programs, and it can also be very useful. The following example is a situation where you can use this to your advantage – and understanding this will help you understand other people’s code better, and be able to walk through code more confidently. Consider a situation where you want to write a function to find out, from a class attendance list, whether anyone’s first name is longer than five letters, called longer_than_five. If there is anyone in class whose first name is longer than 5 letters, the function should return True. Otherwise, it should return False. In this case, you’ll be using conditional statements in the code that exists in the function body, the code block indented underneath the function definition statement (just like the code that starts with the line print("here") in the example above – that’s the body of the function weird, above). Bonus challenge for studying: After you look at the explanation below, stop looking at the code – just the description of the function above it, and try to write the code yourself! Then test it on different lists and make sure that it works. But read the explanation first, so you can be sure you have a solid grasp on these function mechanics. First, an English plan for this new function to define called longer_than_five: You’ll want to pass in a list of strings (representing people’s first names) to the function. You’ll want to iterate over all the items in the list, each of the strings. As soon as you get to one name that is longer than five letters, you know the function should return True – yes, there is at least one name longer than five letters! And if you go through the whole list and there was no name longer than five letters, then the function should return False. Now, the code: # iterate over the list to look at each name # as soon as you see a name longer than 5 letters, # If Python executes that return statement, the function is over and # the rest of the code will not run -- you already have your answer! # You will only get to this line if you # iterated over the whole list and did not get a name where # the if expression evaluated to True, so at this point, it's correct to # return False! # Here are a couple sample calls to the function with different lists of names. # Try running this code in Codelens a few times and make sure you understand # exactly what is happening. So far, we have just seen return values being assigned to variables. For example, we had the line squareResult = square(toSquare). As with all assignment statements, the right hand side is executed first. It invokes the square function, passing in a parameter value 10 (the current value of toSquare). That returns a value 100, which completes the evaluation of the right-hand side of the assignment. 100 is then assigned to the variable squareResult. In this case, the function invocation was the entire expression that was evaluated. Function invocations, however, can also be used as part of more complicated expressions. For example, squareResult = 2 * square(toSquare). In this case, the value 100 is returned and is then multiplied by 2 to produce the value 200. When python evaluates an expression like x * 3, it substitutes the current value of x into the expression and then does the multiplication. When python evaluates an expression like 2 * square(toSquare), it substitutes the return value 100 for entire function invocation and then does the multiplication. To reiterate, when executing a line of code squareResult = 2 * square(toSquare), the python interpreter does these steps: It’s an assignment statement, so evaluate the right-hand side expression 2 * square(toSquare). Look up the values of the variables square and toSquare: square is a function object and toSquare is 10 Pass 10 as a parameter value to the function, get back the return value 100 Substitute 100 for square(toSquare), so that the expression now reads 2 * 100 Assign 200 to variable squareResult Check your understanding # func-4-1: What is wrong with the following function definition: # A. You should never use a print statement in a function definition. # B. You should not have any statements in a function after the return # statement. Once the function gets to the return statement it will # immediately stop executing the function. [x] # C. You must calculate the value of x+y+z before you return it. # D. A function cannot return a number. This is a very common mistake so be sure to watch out for it when you write your code! # func-4-2: What will the following function return? # A. The value None [x] # B. The value of x+y+z # C. The string 'x+y+z' We have accidentally used print where we mean return. Therefore, the function will return the value None by default. This is a VERY COMMON mistake so watch out! This mistake is also particularly difficult to find because when you run the function the output looks the same. It is not until you try to assign its value to a variable that you can notice a difference. # func-4-3: What will the following code output? # A. 25 # B. 50 [x] # C. 25 + 25 The two return values are added together. # func-4-4: What will the following code output? # A. 8 # B. 16 [x] # C. Error: can't put a function invocation inside parentheses It squares 2, yielding the value 4. 4 is then passed as a value to square again, yeilding 16. # func-4-5: What will the following code output? # A. 1 # B. Yes # C. First one was longer [x] # D. Second one was at least as long # E. Error cyu2 returns the value 1, which is assigned to z. # func-4-6: Which will print out first, square, g, or a number? # A. square # B. g # C. a number g has to be executed and return a value in order to know what paramater value to provide to x. # func-4-7: How many lines will the following code print? # A. 3 # B. 2 [x] # C. None Yes! Two printed lines, and then the function body execution reaches a return statement. # 8. Write a function named same that takes a string as input, and simply # returns that string. # 9. Write a function called same_thing that returns the parameter, unchanged. # 10. Write a function called subtract_three that takes an integer or any # number as input, and returns that number minus three. # 11. Write a function called change that takes one number as its input and # returns that number, plus 7. # 12. Write a function named intro that takes a string as input. Given the # string “Becky” as input, the function should return: “Hello, my name is # Becky and I love SI 106.” # Chame a função intro # Passe o parâmetro de nome Becky # 13. Write a function called s_change that takes one string as input and # returns that string, concatenated with the string ” for fun.”. # 14. Write a function called decision that takes a string as input, and then # checks the number of characters. If it has over 17 characters, return “This # is a long string”, if it is shorter or has 17 characters, return “This is a # short string”. | 4.750952 | 5 |
metarunlog/util.py | tomsercu/metarunlog | 2 | 6632659 | <gh_stars>1-10
# Metarunlog, experiment management tool.
# Author: <NAME>
# Date: 2015-01-23
import datetime
import subprocess
def nowstring(sec=True, ms= False):
tstr = datetime.datetime.now().isoformat()
if not ms:
tstr = tstr.split('.')[0]
if not sec:
tstr = tstr.rsplit(':',1)[0]
return tstr
def sshify(cmd, sshHost, sshPass, vfh=None):
cleancmd = ''
if sshHost:
#cmd = 'ssh -t {} "{}"'.format(sshHost, cmd) #works but messes up terminal
#cmd = 'ssh {} "shopt -s huponexit; {}"'.format(sshHost, cmd) # doesnt work to kill job on exit
cmd = 'ssh {} "{}"'.format(sshHost, cmd)
#TODO use paramiko or pexpect see http://stackoverflow.com/questions/4669204/send-ctrl-c-to-remote-processes-started-via-subprocess-popen-and-ssh
if sshPass:
cleancmd = "sshpass -p '{}' {}".format('***', cmd)
cmd = "sshpass -p '{}' {}".format(sshPass, cmd)
# printing
if not cleancmd: cleancmd = cmd
if vfh: vfh.write(cleancmd + '\n')
return cmd
def _decode_list(data):
rv = []
for item in data:
if isinstance(item, unicode):
item = item.encode('utf-8')
elif isinstance(item, list):
item = _decode_list(item)
elif isinstance(item, dict):
item = _decode_dict(item)
rv.append(item)
return rv
def _decode_dict(data):
rv = {}
for key, value in data.iteritems():
if isinstance(key, unicode):
key = key.encode('utf-8')
if isinstance(value, unicode):
value = value.encode('utf-8')
elif isinstance(value, list):
value = _decode_list(value)
elif isinstance(value, dict):
value = _decode_dict(value)
rv[key] = value
return rv
def get_commit():
cline = subprocess.check_output("git log -n1 --oneline", shell=True)
#print "cline: ", cline
cline = cline.split()
return (cline[0], " ".join(cline[1:]))
| # Metarunlog, experiment management tool.
# Author: <NAME>
# Date: 2015-01-23
import datetime
import subprocess
def nowstring(sec=True, ms= False):
tstr = datetime.datetime.now().isoformat()
if not ms:
tstr = tstr.split('.')[0]
if not sec:
tstr = tstr.rsplit(':',1)[0]
return tstr
def sshify(cmd, sshHost, sshPass, vfh=None):
cleancmd = ''
if sshHost:
#cmd = 'ssh -t {} "{}"'.format(sshHost, cmd) #works but messes up terminal
#cmd = 'ssh {} "shopt -s huponexit; {}"'.format(sshHost, cmd) # doesnt work to kill job on exit
cmd = 'ssh {} "{}"'.format(sshHost, cmd)
#TODO use paramiko or pexpect see http://stackoverflow.com/questions/4669204/send-ctrl-c-to-remote-processes-started-via-subprocess-popen-and-ssh
if sshPass:
cleancmd = "sshpass -p '{}' {}".format('***', cmd)
cmd = "sshpass -p '{}' {}".format(sshPass, cmd)
# printing
if not cleancmd: cleancmd = cmd
if vfh: vfh.write(cleancmd + '\n')
return cmd
def _decode_list(data):
rv = []
for item in data:
if isinstance(item, unicode):
item = item.encode('utf-8')
elif isinstance(item, list):
item = _decode_list(item)
elif isinstance(item, dict):
item = _decode_dict(item)
rv.append(item)
return rv
def _decode_dict(data):
rv = {}
for key, value in data.iteritems():
if isinstance(key, unicode):
key = key.encode('utf-8')
if isinstance(value, unicode):
value = value.encode('utf-8')
elif isinstance(value, list):
value = _decode_list(value)
elif isinstance(value, dict):
value = _decode_dict(value)
rv[key] = value
return rv
def get_commit():
cline = subprocess.check_output("git log -n1 --oneline", shell=True)
#print "cline: ", cline
cline = cline.split()
return (cline[0], " ".join(cline[1:])) | en | 0.561969 | # Metarunlog, experiment management tool. # Author: <NAME> # Date: 2015-01-23 #cmd = 'ssh -t {} "{}"'.format(sshHost, cmd) #works but messes up terminal #cmd = 'ssh {} "shopt -s huponexit; {}"'.format(sshHost, cmd) # doesnt work to kill job on exit #TODO use paramiko or pexpect see http://stackoverflow.com/questions/4669204/send-ctrl-c-to-remote-processes-started-via-subprocess-popen-and-ssh # printing #print "cline: ", cline | 2.256916 | 2 |
ex03 - key phrase problem.py | neong83/algorithm_practices | 0 | 6632660 | <reponame>neong83/algorithm_practices
from typing import Dict
text = (
"Suppose we have a set of English text documents "
"and wish to rank which document is most relevant to the query, "
"the brown cow . A simple way to start out is by eliminating "
"documents that do not contain all three words the brown, and cow, "
"but this still leaves many documents"
)
exclude_words = ["the", "a", "by", "to", "and", "of", ",", ".", "is"]
# first attempt
# key in dict is O(1)
def convert_list_into_dict(word_list) -> Dict:
exclusion = {}
for i in word_list:
exclusion[i] = ""
return exclusion
def get_most_repeated_phases(content, exclusion) -> (str, int):
word = None
max_count = 0
exclusion_dict = convert_list_into_dict(exclusion)
word_dicts = {}
for i in content.split(" "):
if i in exclusion_dict.keys():
continue
if i in word_dicts.keys():
word_dicts[i] += 1
else:
word_dicts[i] = 1
if word_dicts[i] > max_count:
max_count = word_dicts[i]
word = i
return (word, max_count)
print(get_most_repeated_phases(text, exclude_words))
# second attempts
word_cout = {}
splited_text = text.replace(",", "").replace(".", "").split(" ")
for w in splited_text:
if w in exclude_words or len(w) == 0: # this is O(n)
continue
key = w.strip().lower()
if key in word_cout.keys():
word_cout[key] += 1
else:
word_cout[key] = 1
for word, count in word_cout.items():
print(f"word={word}, count={count}")
| from typing import Dict
text = (
"Suppose we have a set of English text documents "
"and wish to rank which document is most relevant to the query, "
"the brown cow . A simple way to start out is by eliminating "
"documents that do not contain all three words the brown, and cow, "
"but this still leaves many documents"
)
exclude_words = ["the", "a", "by", "to", "and", "of", ",", ".", "is"]
# first attempt
# key in dict is O(1)
def convert_list_into_dict(word_list) -> Dict:
exclusion = {}
for i in word_list:
exclusion[i] = ""
return exclusion
def get_most_repeated_phases(content, exclusion) -> (str, int):
word = None
max_count = 0
exclusion_dict = convert_list_into_dict(exclusion)
word_dicts = {}
for i in content.split(" "):
if i in exclusion_dict.keys():
continue
if i in word_dicts.keys():
word_dicts[i] += 1
else:
word_dicts[i] = 1
if word_dicts[i] > max_count:
max_count = word_dicts[i]
word = i
return (word, max_count)
print(get_most_repeated_phases(text, exclude_words))
# second attempts
word_cout = {}
splited_text = text.replace(",", "").replace(".", "").split(" ")
for w in splited_text:
if w in exclude_words or len(w) == 0: # this is O(n)
continue
key = w.strip().lower()
if key in word_cout.keys():
word_cout[key] += 1
else:
word_cout[key] = 1
for word, count in word_cout.items():
print(f"word={word}, count={count}") | en | 0.969815 | # first attempt # key in dict is O(1) # second attempts # this is O(n) | 4.124248 | 4 |
torch/optimization/combine_optimization.py | jihuacao/Putil | 1 | 6632661 | <filename>torch/optimization/combine_optimization.py<gh_stars>1-10
# coding=utf-8
#from torch.optim import Optimizer
import torch
class CombineOptimization:
def __init__(self, **optimizations):
self._optimizations = optimizations
pass
def step(self, closure=None):
for index, (k, v) in enumerate(self._optimizations.items()):
v.step()
pass
pass
def load_state_dict(self, state_dict, unexisted_strategy):
for index, (k, v) in enumerate(self._optimizations.items()):
if k in state_dict.dict():
v.load_state_dict(state_dict[k])
pass
else:
pass
pass
pass | <filename>torch/optimization/combine_optimization.py<gh_stars>1-10
# coding=utf-8
#from torch.optim import Optimizer
import torch
class CombineOptimization:
def __init__(self, **optimizations):
self._optimizations = optimizations
pass
def step(self, closure=None):
for index, (k, v) in enumerate(self._optimizations.items()):
v.step()
pass
pass
def load_state_dict(self, state_dict, unexisted_strategy):
for index, (k, v) in enumerate(self._optimizations.items()):
if k in state_dict.dict():
v.load_state_dict(state_dict[k])
pass
else:
pass
pass
pass | en | 0.377287 | # coding=utf-8 #from torch.optim import Optimizer | 2.375397 | 2 |
scripts/teamforge-import.py | rohankumardubey/allura | 113 | 6632662 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import absolute_import
import logging
from getpass import getpass
from optparse import OptionParser
from tg import tmpl_context as c
import re
import os
from time import mktime
import time
import json
from six.moves.urllib.parse import urlparse
import six.moves.urllib.request
import six.moves.urllib.parse
import six.moves.urllib.error
from six.moves.http_cookiejar import CookieJar
from datetime import datetime
from six.moves.configparser import ConfigParser
import random
import string
import codecs
import sqlalchemy
from suds.client import Client
from ming.orm.ormsession import ThreadLocalORMSession
from ming.base import Object
from allura import model as M
from allura.lib import helpers as h
from allura.lib import utils
import six
from io import open
from six.moves import map
log = logging.getLogger('teamforge-import')
'''
http://help.collab.net/index.jsp?topic=/teamforge520/reference/api-services.html
http://www.open.collab.net/nonav/community/cif/csfe/50/javadoc/index.html?com/collabnet/ce/soap50/webservices/page/package-summary.html
'''
options = None
s = None # security token
client = None # main api client
users = {}
cj = CookieJar()
loggedInOpener = six.moves.urllib.request.build_opener(six.moves.urllib.request.HTTPCookieProcessor(cj))
def make_client(api_url, app):
return Client(api_url + app + '?wsdl', location=api_url + app)
def main():
global options, s, client, users
defaults = dict(
api_url=None,
attachment_url='/sf/%s/do/%s/',
default_wiki_text='PRODUCT NAME HERE',
username=None,
password=<PASSWORD>,
output_dir='teamforge-export/',
list_project_ids=False,
neighborhood=None,
neighborhood_shortname=None,
use_thread_import_id_when_reloading=False,
skip_wiki=False,
skip_frs_download=False,
skip_unsupported_check=False)
optparser = get_parser(defaults)
options, project_ids = optparser.parse_args()
if options.config_file:
config = ConfigParser()
config.read(options.config_file)
defaults.update(
(k, eval(v)) for k, v in config.items('teamforge-import'))
optparser = get_parser(defaults)
options, project_ids = optparser.parse_args()
# neither specified, so do both
if not options.extract and not options.load:
options.extract = True
options.load = True
if options.extract:
client = make_client(options.api_url, 'CollabNet')
api_v = client.service.getApiVersion()
if not api_v.startswith('5.4.'):
log.warning('Unexpected API Version %s. May not work correctly.' %
api_v)
s = client.service.login(
options.username, options.password or getpass('Password: '))
teamforge_v = client.service.getVersion(s)
if not teamforge_v.startswith('5.4.'):
log.warning(
'Unexpected TeamForge Version %s. May not work correctly.' %
teamforge_v)
if options.load:
if not options.neighborhood:
log.error('You must specify a neighborhood when loading')
return
try:
nbhd = M.Neighborhood.query.get(name=options.neighborhood)
except Exception:
log.exception('error querying mongo')
log.error(
'This should be run as "paster script production.ini ../scripts/teamforge-import.py -- ...options.."')
return
assert nbhd
if not project_ids:
if not options.extract:
log.error('You must specify project ids')
return
projects = client.service.getProjectList(s)
project_ids = [p.id for p in projects.dataRows]
if options.list_project_ids:
print(' '.join(project_ids))
return
if not os.path.exists(options.output_dir):
os.makedirs(options.output_dir)
for pid in project_ids:
if options.extract:
try:
project = client.service.getProjectData(s, pid)
log.info('Project: %s %s %s' %
(project.id, project.title, project.path))
out_dir = os.path.join(options.output_dir, project.id)
if not os.path.exists(out_dir):
os.mkdir(out_dir)
get_project(project)
get_files(project)
if not options.skip_wiki:
get_homepage_wiki(project)
get_discussion(project)
get_news(project)
if not options.skip_unsupported_check:
check_unsupported_tools(project)
with codecs.open(os.path.join(options.output_dir, 'users.json'), 'w', encoding='utf-8') as user_file:
json.dump(users, user_file, default=str)
except Exception:
log.exception('Error extracting %s' % pid)
if options.load:
try:
project = create_project(pid, nbhd)
except Exception:
log.exception('Error creating %s' % pid)
def load_users():
''' load the users data from file, if it hasn't been already '''
global users
user_filename = os.path.join(options.output_dir, 'users.json')
if not users and os.path.exists(user_filename):
with open(user_filename) as user_file:
# Object for attribute access
users = json.load(user_file, object_hook=Object)
def save_user(usernames):
if isinstance(usernames, six.string_types):
usernames = [usernames]
load_users()
for username in usernames:
if username not in users:
user_data = client.service.getUserData(s, username)
users[username] = Object(user_data)
if users[username].status != 'Active':
log.warn('user: %s status: %s' %
(username, users[username].status))
def get_project(project):
global client
cats = make_client(options.api_url, 'CategorizationApp')
data = client.service.getProjectData(s, project.id)
access_level = {1: 'public', 4: 'private', 3: 'gated community'}[
client.service.getProjectAccessLevel(s, project.id)
]
admins = client.service.listProjectAdmins(s, project.id).dataRows
members = client.service.getProjectMemberList(s, project.id).dataRows
groups = client.service.getProjectGroupList(s, project.id).dataRows
categories = cats.service.getProjectCategories(s, project.id).dataRows
save(json.dumps(dict(
data=dict(data),
access_level=access_level,
admins=list(map(dict, admins)),
members=list(map(dict, members)),
groups=list(map(dict, groups)),
categories=list(map(dict, categories)),
), default=str),
project, project.id + '.json')
if len(groups):
log.warn('Project has groups %s' % groups)
for u in admins:
if not u.status != 'active':
log.warn('inactive admin %s' % u)
if u.superUser:
log.warn('super user admin %s' % u)
save_user(data.createdBy)
save_user(u.userName for u in admins)
save_user(u.userName for u in members)
def get_user(orig_username):
'returns an allura User object'
sf_username = make_valid_sf_username(orig_username)
u = M.User.by_username(sf_username)
if not u:
load_users()
user = users[orig_username]
if user.status != 'Active':
log.warn('Inactive user %s %s' % (orig_username, user.status))
if not 3 <= len(user.fullName) <= 32:
raise Exception('invalid fullName length: %s' % user.fullName)
if '@' not in user.email:
raise Exception('invalid email: %s' % user.email)
# FIXME: hardcoded SFX integration
from sfx.model import tables as T
nu = T.users.insert()
nu.execute(user_name=sf_username.encode('utf-8'),
email=user.email.lower().encode('utf-8'),
realname=user.fullName.encode('utf-8'),
status='A' if user.status == 'Active' else 'D',
language=275, # english trove id
timezone=user.timeZone,
user_pw=''.join(random.sample(string.printable, 32)),
unix_pw=''.join(random.sample(string.printable, 32)),
user_pw_modtime=int(time.time()),
mail_siteupdates=0,
add_date=int(time.time()),
)
user_id = sqlalchemy.select(
[T.users.c.user_id], T.users.c.user_name == sf_username).execute().fetchone().user_id
npref = T.user_preferences.insert()
npref.execute(user_id=user_id, preference_name='country',
preference_value='US')
npref.execute(user_id=user_id,
preference_name='opt_research', preference_value=0)
npref.execute(user_id=user_id,
preference_name='opt_thirdparty', preference_value=0)
new_audit = T.audit_trail_user.insert()
new_audit.execute(
date=int(time.time()),
username='nobody',
ip_address='(imported)',
operation_resource=user_id,
operation='%s user account created by TeamForge import script' % user.status,
operation_target='',
)
u = M.User.by_username(sf_username)
assert u
return u
def convert_project_shortname(teamforge_path):
'convert from TeamForge to SF, and validate early'
tf_shortname = teamforge_path.split('.')[-1]
sf_shortname = tf_shortname.replace('_', '-')
# FIXME hardcoded translations
sf_shortname = {
'i1': 'motorola-i1',
'i9': 'motorola-i9',
'devplatformforocap': 'ocap-dev-pltfrm',
'sitewide': '--init--',
}.get(sf_shortname, sf_shortname)
if not 3 <= len(sf_shortname) <= 15:
raise ValueError(
'Project name length must be between 3 & 15, inclusive: %s (%s)' %
(sf_shortname, len(sf_shortname)))
return sf_shortname
# FIXME hardcoded
skip_perms_usernames = set([
'username1', 'username2', 'username3'
])
def create_project(pid, nbhd):
M.session.artifact_orm_session._get().skip_mod_date = True
data = loadjson(pid, pid + '.json')
# pprint(data)
log.info('Loading: %s %s %s' % (pid, data.data.title, data.data.path))
shortname = convert_project_shortname(data.data.path)
project = M.Project.query.get(
shortname=shortname, neighborhood_id=nbhd._id)
if not project:
private = (data.access_level == 'private')
log.debug('Creating %s private=%s' % (shortname, private))
one_admin = [
u.userName for u in data.admins if u.status == 'Active'][0]
project = nbhd.register_project(shortname,
get_user(one_admin),
project_name=data.data.title,
private_project=private)
project.notifications_disabled = True
project.short_description = data.data.description
project.last_updated = datetime.strptime(
data.data.lastModifiedDate, '%Y-%m-%d %H:%M:%S')
M.main_orm_session.flush(project)
# TODO: push last_updated to gutenberg?
# TODO: try to set createdDate?
role_admin = M.ProjectRole.by_name('Admin', project)
admin_usernames = set()
for admin in data.admins:
# FIXME: skip non-active users
if admin.userName in skip_perms_usernames:
continue
admin_usernames.add(admin.userName)
user = get_user(admin.userName)
c.user = user
pr = M.ProjectRole.by_user(user, project=project, upsert=True)
pr.roles = [role_admin._id]
ThreadLocalORMSession.flush_all()
role_developer = M.ProjectRole.by_name('Developer', project)
for member in data.members:
# FIXME: skip non-active users
if member.userName in skip_perms_usernames:
continue
if member.userName in admin_usernames:
continue
user = get_user(member.userName)
pr = M.ProjectRole.by_user(user, project=project, upsert=True)
pr.roles = [role_developer._id]
ThreadLocalORMSession.flush_all()
project.labels = [cat.path.split('projects/categorization.root.')[1]
for cat in data.categories]
icon_file = 'emsignia-MOBILITY-red.png'
if 'nsn' in project.labels or 'msi' in project.labels:
icon_file = 'emsignia-SOLUTIONS-blue.gif'
if project.icon:
M.ProjectFile.remove(dict(project_id=project._id, category='icon'))
with open(os.path.join('..', 'scripts', icon_file), 'rb') as fp:
M.ProjectFile.save_image(
icon_file, fp, content_type=utils.guess_mime_type(icon_file),
square=True, thumbnail_size=(48, 48),
thumbnail_meta=dict(project_id=project._id, category='icon'))
ThreadLocalORMSession.flush_all()
dirs = os.listdir(os.path.join(options.output_dir, pid))
frs_mapping = loadjson(pid, 'frs_mapping.json')
if not options.skip_wiki and 'wiki' in dirs:
import_wiki(project, pid, nbhd)
if not options.skip_frs_download and not project.app_instance('downloads'):
project.install_app('Downloads', 'downloads')
if 'forum' in dirs:
import_discussion(project, pid, frs_mapping, shortname, nbhd)
if 'news' in dirs:
import_news(project, pid, frs_mapping, shortname, nbhd)
project.notifications_disabled = False
ThreadLocalORMSession.flush_all()
return project
def import_wiki(project, pid, nbhd):
from forgewiki import model as WM
def upload_attachments(page, pid, beginning):
dirpath = os.path.join(options.output_dir, pid, 'wiki', beginning)
if not os.path.exists(dirpath):
return
files = os.listdir(dirpath)
for f in files:
with open(os.path.join(options.output_dir, pid, 'wiki', beginning, f)) as fp:
page.attach(f, fp, content_type=utils.guess_mime_type(f))
pages = os.listdir(os.path.join(options.output_dir, pid, 'wiki'))
# handle the homepage content
if 'homepage_text.markdown' in pages:
home_app = project.app_instance('home')
h.set_context(project.shortname, 'home', neighborhood=nbhd)
# set permissions and config options
role_admin = M.ProjectRole.by_name('Admin')._id
role_anon = M.ProjectRole.by_name('*anonymous')._id
home_app.config.options['show_discussion'] = False
home_app.config.options['show_left_bar'] = False
home_app.config.options['show_right_bar'] = False
home_app.config.acl = [
M.ACE.allow(role_anon, 'read'),
M.ACE.allow(role_admin, 'create'),
M.ACE.allow(role_admin, 'edit'),
M.ACE.allow(role_admin, 'delete'),
M.ACE.allow(role_admin, 'moderate'),
M.ACE.allow(role_admin, 'configure'),
M.ACE.allow(role_admin, 'admin')]
p = WM.Page.upsert('Home')
p.text = wiki2markdown(load(pid, 'wiki', 'homepage_text.markdown'))
upload_attachments(p, pid, 'homepage')
if 'HomePage.json' in pages and 'HomePage.markdown' in pages:
wiki_app = project.app_instance('wiki')
if not wiki_app:
wiki_app = project.install_app('Wiki', 'wiki')
h.set_context(project.shortname, 'wiki', neighborhood=nbhd)
# set permissions and config options
role_admin = M.ProjectRole.by_name('Admin')._id
role_anon = M.ProjectRole.by_name('*anonymous')._id
wiki_app.config.options['show_discussion'] = False
wiki_app.config.options['show_left_bar'] = False
wiki_app.config.options['show_right_bar'] = False
wiki_app.config.acl = [
M.ACE.allow(role_anon, 'read'),
M.ACE.allow(role_admin, 'create'),
M.ACE.allow(role_admin, 'edit'),
M.ACE.allow(role_admin, 'delete'),
M.ACE.allow(role_admin, 'moderate'),
M.ACE.allow(role_admin, 'configure'),
M.ACE.allow(role_admin, 'admin')]
# make all the wiki pages
for page in pages:
ending = page[-5:]
beginning = page[:-5]
markdown_file = '%s.markdown' % beginning
if '.json' == ending and markdown_file in pages:
page_data = loadjson(pid, 'wiki', page)
content = load(pid, 'wiki', markdown_file)
if page == 'HomePage.json':
globals = WM.Globals.query.get(
app_config_id=wiki_app.config._id)
if globals is not None:
globals.root = page_data.title
else:
globals = WM.Globals(
app_config_id=wiki_app.config._id, root=page_data.title)
p = WM.Page.upsert(page_data.title)
p.text = wiki2markdown(content)
# upload attachments
upload_attachments(p, pid, beginning)
if not p.history().first():
p.commit()
ThreadLocalORMSession.flush_all()
def import_discussion(project, pid, frs_mapping, sf_project_shortname, nbhd):
from forgediscussion import model as DM
discuss_app = project.app_instance('discussion')
if not discuss_app:
discuss_app = project.install_app('Discussion', 'discussion')
h.set_context(project.shortname, 'discussion', neighborhood=nbhd)
assert c.app
# set permissions and config options
role_admin = M.ProjectRole.by_name('Admin')._id
role_developer = M.ProjectRole.by_name('Developer')._id
role_auth = M.ProjectRole.by_name('*authenticated')._id
role_anon = M.ProjectRole.by_name('*anonymous')._id
discuss_app.config.acl = [
M.ACE.allow(role_anon, 'read'),
M.ACE.allow(role_auth, 'post'),
M.ACE.allow(role_auth, 'unmoderated_post'),
M.ACE.allow(role_developer, 'moderate'),
M.ACE.allow(role_admin, 'configure'),
M.ACE.allow(role_admin, 'admin')]
ThreadLocalORMSession.flush_all()
DM.Forum.query.remove(
dict(app_config_id=discuss_app.config._id, shortname='general'))
forums = os.listdir(os.path.join(options.output_dir, pid, 'forum'))
for forum in forums:
ending = forum[-5:]
forum_name = forum[:-5]
if '.json' == ending and forum_name in forums:
forum_data = loadjson(pid, 'forum', forum)
fo = DM.Forum.query.get(
shortname=forum_name, app_config_id=discuss_app.config._id)
if not fo:
fo = DM.Forum(app_config_id=discuss_app.config._id,
shortname=forum_name)
fo.name = forum_data.title
fo.description = forum_data.description
fo_num_topics = 0
fo_num_posts = 0
topics = os.listdir(os.path.join(options.output_dir, pid, 'forum',
forum_name))
for topic in topics:
ending = topic[-5:]
topic_name = topic[:-5]
if '.json' == ending and topic_name in topics:
fo_num_topics += 1
topic_data = loadjson(pid, 'forum', forum_name, topic)
thread_query = dict(
subject=topic_data.title,
discussion_id=fo._id,
app_config_id=discuss_app.config._id)
if not options.skip_thread_import_id_when_reloading:
# temporary/transitional. Just needed the first time
# running with this new code against an existing import
# that didn't have import_ids
thread_query['import_id'] = topic_data.id
to = DM.ForumThread.query.get(**thread_query)
if not to:
to = DM.ForumThread.new(
subject=topic_data.title,
discussion_id=fo._id,
import_id=topic_data.id,
app_config_id=discuss_app.config._id)
to.import_id = topic_data.id
to_num_replies = 0
oldest_post = None
newest_post = None
posts = sorted(
os.listdir(os.path.join(options.output_dir, pid, 'forum', forum_name, topic_name)))
for post in posts:
ending = post[-5:]
post_name = post[:-5]
if '.json' == ending:
to_num_replies += 1
post_data = loadjson(pid, 'forum',
forum_name, topic_name, post)
p = DM.ForumPost.query.get(
_id='%s%s@import' % (
post_name, str(discuss_app.config._id)),
thread_id=to._id,
discussion_id=fo._id,
app_config_id=discuss_app.config._id)
if not p:
p = DM.ForumPost(
_id='%s%s@import' % (
post_name, str(
discuss_app.config._id)),
thread_id=to._id,
discussion_id=fo._id,
app_config_id=discuss_app.config._id)
create_date = datetime.strptime(
post_data.createdDate, '%Y-%m-%d %H:%M:%S')
p.timestamp = create_date
p.author_id = str(
get_user(post_data.createdByUserName)._id)
p.text = convert_post_content(
frs_mapping, sf_project_shortname, post_data.content, nbhd)
p.status = 'ok'
if post_data.replyToId:
p.parent_id = '%s%s@import' % (
post_data.replyToId, str(discuss_app.config._id))
slug, full_slug = p.make_slugs(
parent=p.parent, timestamp=create_date)
p.slug = slug
p.full_slug = full_slug
if oldest_post is None or oldest_post.timestamp > create_date:
oldest_post = p
if newest_post is None or newest_post.timestamp < create_date:
newest_post = p
ThreadLocalORMSession.flush_all()
to.num_replies = to_num_replies
to.first_post_id = oldest_post._id
to.last_post_date = newest_post.timestamp
to.mod_date = newest_post.timestamp
fo_num_posts += to_num_replies
fo.num_topics = fo_num_topics
fo.num_posts = fo_num_posts
ThreadLocalORMSession.flush_all()
def import_news(project, pid, frs_mapping, sf_project_shortname, nbhd):
from forgeblog import model as BM
posts = os.listdir(os.path.join(options.output_dir, pid, 'news'))
if len(posts):
news_app = project.app_instance('news')
if not news_app:
news_app = project.install_app('blog', 'news', mount_label='News')
h.set_context(project.shortname, 'news', neighborhood=nbhd)
# make all the blog posts
for post in posts:
if '.json' == post[-5:]:
post_data = loadjson(pid, 'news', post)
create_date = datetime.strptime(
post_data.createdOn, '%Y-%m-%d %H:%M:%S')
p = BM.BlogPost.query.get(title=post_data.title,
timestamp=create_date,
app_config_id=news_app.config._id)
if not p:
p = BM.BlogPost(title=post_data.title,
timestamp=create_date,
app_config_id=news_app.config._id)
p.text = convert_post_content(
frs_mapping, sf_project_shortname, post_data.body, nbhd)
p.mod_date = create_date
p.state = 'published'
if not p.slug:
p.make_slug()
if not p.history().first():
p.commit()
ThreadLocalORMSession.flush_all()
M.Thread.new(discussion_id=p.app_config.discussion_id,
ref_id=p.index_id(),
subject='%s discussion' % p.title)
user = get_user(post_data.createdByUsername)
p.history().first().author = dict(
id=user._id,
username=user.username,
display_name=user.get_pref('display_name'))
ThreadLocalORMSession.flush_all()
def check_unsupported_tools(project):
docs = make_client(options.api_url, 'DocumentApp')
doc_count = 0
for doc in docs.service.getDocumentFolderList(s, project.id, recursive=True).dataRows:
if doc.title == 'Root Folder':
continue
doc_count += 1
if doc_count:
log.warn('Migrating documents is not supported, but found %s docs' %
doc_count)
scm = make_client(options.api_url, 'ScmApp')
for repo in scm.service.getRepositoryList(s, project.id).dataRows:
log.warn('Migrating SCM repos is not supported, but found %s' %
repo.repositoryPath)
tasks = make_client(options.api_url, 'TaskApp')
task_count = len(
tasks.service.getTaskList(s, project.id, filters=None).dataRows)
if task_count:
log.warn('Migrating tasks is not supported, but found %s tasks' %
task_count)
tracker = make_client(options.api_url, 'TrackerApp')
tracker_count = len(
tracker.service.getArtifactList(s, project.id, filters=None).dataRows)
if tracker_count:
log.warn(
'Migrating trackers is not supported, but found %s tracker artifacts' %
task_count)
def load(project_id, *paths):
in_file = os.path.join(options.output_dir, project_id, *paths)
with open(in_file, encoding='utf-8') as input:
content = input.read()
return content
def loadjson(*args):
# Object for attribute access
return json.loads(load(*args), object_hook=Object)
def save(content, project, *paths):
out_file = os.path.join(options.output_dir, project.id, *paths)
if not os.path.exists(os.path.dirname(out_file)):
os.makedirs(os.path.dirname(out_file))
with codecs.open(out_file, 'w', encoding='utf-8') as out:
out.write(content.encode('utf-8'))
def download_file(tool, url_path, *filepaths):
if tool == 'wiki':
action = 'viewAttachment'
elif tool == 'frs':
action = 'downloadFile'
else:
raise ValueError('tool %s not supported' % tool)
action_url = options.attachment_url % (tool, action)
out_file = os.path.join(options.output_dir, *filepaths)
if not os.path.exists(os.path.dirname(out_file)):
os.makedirs(os.path.dirname(out_file))
if '://' in url_path:
url = url_path
else:
hostname = urlparse(options.api_url).hostname
scheme = urlparse(options.api_url).scheme
url = scheme + '://' + hostname + action_url + six.moves.urllib.parse.quote(url_path)
log.debug('fetching %s' % url)
resp = loggedInOpener.open(url)
# if not logged in and this is private, you will get an html response instead of the file
# log in to make sure the file should really be html
if resp.headers.type == 'text/html':
# log in and save the file
resp = loggedInOpener.open(scheme + '://' + hostname + "/sf/sfmain/do/login", six.moves.urllib.parse.urlencode({
'username': options.username,
'password': <PASSWORD>,
'returnToUrl': url,
'sfsubmit': 'submit'
}))
with codecs.open(out_file, 'w', encoding='utf-8') as out:
out.write(resp.fp.read())
return out_file
bracket_macro = re.compile(r'\[(.*?)\]')
h1 = re.compile(r'^!!!', re.MULTILINE)
h2 = re.compile(r'^!!', re.MULTILINE)
h3 = re.compile(r'^!', re.MULTILINE)
re_stats = re.compile(r'#+ .* [Ss]tatistics\n+(.*\[sf:.*?Statistics\].*)+')
def wiki2markdown(markup):
'''
Partial implementation of http://help.collab.net/index.jsp?topic=/teamforge520/reference/wiki-wikisyntax.html
TODO: __ for bold
TODO: quote filenames with spaces, e.g. [[img src="foo bar.jpg"]]
'''
def bracket_handler(matchobj):
snippet = matchobj.group(1)
ext = snippet.rsplit('.')[-1].lower()
# TODO: support [foo|bar.jpg]
if snippet.startswith('sf:'):
# can't handle these macros
return matchobj.group(0)
elif ext in ('jpg', 'gif', 'png'):
filename = snippet.split('/')[-1]
return '[[img src=%s]]' % filename
elif '|' in snippet:
text, link = snippet.split('|', 1)
return '[%s](%s)' % (text, link)
else:
# regular link
return '<%s>' % snippet
markup = bracket_macro.sub(bracket_handler, markup or '')
markup = h1.sub('#', markup)
markup = h2.sub('##', markup)
markup = h3.sub('###', markup)
markup = re_stats.sub('', markup)
return markup
re_rel = re.compile(r'\b(rel\d+)\b')
def convert_post_content(frs_mapping, sf_project_shortname, text, nbhd):
def rel_handler(matchobj):
relno = matchobj.group(1)
path = frs_mapping.get(relno)
if path:
return '<a href="/projects/%s.%s/files/%s">%s</a>' % (
sf_project_shortname, nbhd.url_prefix.strip('/'), path, path)
else:
return relno
text = re_rel.sub(rel_handler, text or '')
return text
def find_image_references(markup):
'yields filenames'
for matchobj in bracket_macro.finditer(markup):
snippet = matchobj.group(1)
ext = snippet.rsplit('.')[-1].lower()
if ext in ('jpg', 'gif', 'png'):
yield snippet
def get_news(project):
'''
Extracts news posts
'''
app = make_client(options.api_url, 'NewsApp')
# find the forums
posts = app.service.getNewsPostList(s, project.id)
for post in posts.dataRows:
save(json.dumps(dict(post), default=str),
project, 'news', post.id + '.json')
save_user(post.createdByUsername)
def get_discussion(project):
'''
Extracts discussion forums and posts
'''
app = make_client(options.api_url, 'DiscussionApp')
# find the forums
forums = app.service.getForumList(s, project.id)
for forum in forums.dataRows:
forumname = forum.path.split('.')[-1]
log.info('Retrieving data for forum: %s' % forumname)
save(json.dumps(dict(forum), default=str), project, 'forum',
forumname + '.json')
# topic in this forum
topics = app.service.getTopicList(s, forum.id)
for topic in topics.dataRows:
save(json.dumps(dict(topic), default=str), project, 'forum',
forumname, topic.id + '.json')
# posts in this topic
posts = app.service.getPostList(s, topic.id)
for post in posts.dataRows:
save(json.dumps(dict(post), default=str), project, 'forum',
forumname, topic.id, post.id + '.json')
save_user(post.createdByUserName)
def get_homepage_wiki(project):
'''
Extracts home page and wiki pages
'''
wiki = make_client(options.api_url, 'WikiApp')
pages = {}
wiki_pages = wiki.service.getWikiPageList(s, project.id)
for wiki_page in wiki_pages.dataRows:
wiki_page = wiki.service.getWikiPageData(s, wiki_page.id)
pagename = wiki_page.path.split('/')[-1]
save(json.dumps(dict(wiki_page), default=str),
project, 'wiki', pagename + '.json')
if not wiki_page.wikiText:
log.debug('skip blank wiki page %s' % wiki_page.path)
continue
pages[pagename] = wiki_page.wikiText
# PageApp does not provide a useful way to determine the Project Home special wiki page
# so use some heuristics
homepage = None
if '$ProjectHome' in pages and options.default_wiki_text not in pages['$ProjectHome']:
homepage = pages.pop('$ProjectHome')
elif 'HomePage' in pages and options.default_wiki_text not in pages['HomePage']:
homepage = pages.pop('HomePage')
elif '$ProjectHome' in pages:
homepage = pages.pop('$ProjectHome')
elif 'HomePage' in pages:
homepage = pages.pop('HomePage')
else:
log.warn('did not find homepage')
if homepage:
save(homepage, project, 'wiki', 'homepage_text.markdown')
for img_ref in find_image_references(homepage):
filename = img_ref.split('/')[-1]
if '://' in img_ref:
img_url = img_ref
else:
img_url = project.path + '/wiki/' + img_ref
download_file('wiki', img_url, project.id,
'wiki', 'homepage', filename)
for path, text in six.iteritems(pages):
if options.default_wiki_text in text:
log.debug('skipping default wiki page %s' % path)
else:
save(text, project, 'wiki', path + '.markdown')
for img_ref in find_image_references(text):
filename = img_ref.split('/')[-1]
if '://' in img_ref:
img_url = img_ref
else:
img_url = project.path + '/wiki/' + img_ref
download_file('wiki', img_url, project.id,
'wiki', path, filename)
def _dir_sql(created_on, project, dir_name, rel_path):
assert options.neighborhood_shortname
if not rel_path:
parent_directory = "'1'"
else:
parent_directory = "(SELECT pfs_path FROM pfs_path WHERE path_name = '%s/')" % rel_path
sql = """
UPDATE pfs
SET file_crtime = '%s'
WHERE source_pk = (SELECT project.project FROM project WHERE project.project_name = '%s.%s')
AND source_table = 'project'
AND pfs_type = 'd'
AND pfs_name = '%s'
AND parent_directory = %s;
""" % (created_on, convert_project_shortname(project.path), options.neighborhood_shortname,
dir_name, parent_directory)
return sql
def get_files(project):
frs = make_client(options.api_url, 'FrsApp')
valid_pfs_filename = re.compile(
r'(?![. ])[-_ +.,=#~@!()\[\]a-zA-Z0-9]+(?<! )$')
pfs_output_dir = os.path.join(
os.path.abspath(options.output_dir), 'PFS', convert_project_shortname(project.path))
sql_updates = ''
def handle_path(obj, prev_path):
path_component = obj.title.strip().replace(
'/', ' ').replace('&', '').replace(':', '')
path = os.path.join(prev_path, path_component)
if not valid_pfs_filename.match(path_component):
log.error('Invalid filename: "%s"' % path)
save(json.dumps(dict(obj), default=str),
project, 'frs', path + '.json')
return path
frs_mapping = {}
for pkg in frs.service.getPackageList(s, project.id).dataRows:
pkg_path = handle_path(pkg, '')
pkg_details = frs.service.getPackageData(s, pkg.id) # download count
save(json.dumps(dict(pkg_details), default=str),
project, 'frs', pkg_path + '_details.json')
for rel in frs.service.getReleaseList(s, pkg.id).dataRows:
rel_path = handle_path(rel, pkg_path)
frs_mapping[rel['id']] = rel_path
# download count
rel_details = frs.service.getReleaseData(s, rel.id)
save(json.dumps(dict(rel_details), default=str),
project, 'frs', rel_path + '_details.json')
for file in frs.service.getFrsFileList(s, rel.id).dataRows:
details = frs.service.getFrsFileData(s, file.id)
file_path = handle_path(file, rel_path)
save(json.dumps(dict(file,
lastModifiedBy=details.lastModifiedBy,
lastModifiedDate=details.lastModifiedDate,
),
default=str),
project,
'frs',
file_path + '.json'
)
if not options.skip_frs_download:
download_file('frs', rel.path + '/' + file.id,
pfs_output_dir, file_path)
mtime = int(mktime(details.lastModifiedDate.timetuple()))
os.utime(os.path.join(pfs_output_dir, file_path),
(mtime, mtime))
# releases
created_on = int(mktime(rel.createdOn.timetuple()))
mtime = int(mktime(rel.lastModifiedOn.timetuple()))
if os.path.exists(os.path.join(pfs_output_dir, rel_path)):
os.utime(os.path.join(pfs_output_dir, rel_path),
(mtime, mtime))
sql_updates += _dir_sql(created_on, project,
rel.title.strip(), pkg_path)
# packages
created_on = int(mktime(pkg.createdOn.timetuple()))
mtime = int(mktime(pkg.lastModifiedOn.timetuple()))
if os.path.exists(os.path.join(pfs_output_dir, pkg_path)):
os.utime(os.path.join(pfs_output_dir, pkg_path), (mtime, mtime))
sql_updates += _dir_sql(created_on, project, pkg.title.strip(), '')
# save pfs update sql for this project
with open(os.path.join(options.output_dir, 'pfs_updates.sql'), 'a') as out:
out.write('/* %s */' % project.id)
out.write(sql_updates)
save(json.dumps(frs_mapping), project, 'frs_mapping.json')
def get_parser(defaults):
optparser = OptionParser(
usage=('%prog [--options] [projID projID projID]\n'
'If no project ids are given, all projects will be migrated'))
optparser.set_defaults(**defaults)
# Command-line-only options
optparser.add_option(
'--extract-only', action='store_true', dest='extract',
help='Store data from the TeamForge API on the local filesystem; not load into Allura')
optparser.add_option(
'--load-only', action='store_true', dest='load',
help='Load into Allura previously-extracted data')
optparser.add_option(
'--config-file', dest='config_file',
help='Load options from config file')
# Command-line options with defaults in config file
optparser.add_option(
'--api-url', dest='api_url', help='e.g. https://hostname/ce-soap50/services/')
optparser.add_option(
'--attachment-url', dest='attachment_url')
optparser.add_option(
'--default-wiki-text', dest='default_wiki_text',
help='used in determining if a wiki page text is default or changed')
optparser.add_option(
'-u', '--username', dest='username')
optparser.add_option(
'-p', '--password', dest='password')
optparser.add_option(
'-o', '--output-dir', dest='output_dir')
optparser.add_option(
'--list-project-ids', action='store_true', dest='list_project_ids')
optparser.add_option(
'-n', '--neighborhood', dest='neighborhood',
help='Neighborhood full name, to load in to')
optparser.add_option(
'--n-shortname', dest='neighborhood_shortname',
help='Neighborhood shortname, for PFS extract SQL')
optparser.add_option(
'--skip-thread-import-id-when-reloading', action='store_true',
dest='skip_thread_import_id_when_reloading'
)
optparser.add_option(
'--skip-frs-download', action='store_true', dest='skip_frs_download')
optparser.add_option(
'--skip-wiki', action='store_true', dest='skip_wiki')
optparser.add_option(
'--skip-unsupported-check', action='store_true', dest='skip_unsupported_check')
return optparser
re_username = re.compile(r"^[a-z\-0-9]+$")
def make_valid_sf_username(orig_username):
sf_username = orig_username.replace('_', '-').lower()
# FIXME username translation is hardcoded here:
sf_username = dict(
rlevy='ramilevy',
mkeisler='mkeisler',
bthale='bthale',
mmuller='mattjustmull',
MalcolmDwyer='slagheap',
tjyang='tjyang',
manaic='maniac76',
srinid='cnudav',
es='est016',
david_peyer='david-mmi',
okruse='ottokruse',
jvp='jvpmoto',
dmorelli='dmorelli',
).get(sf_username, sf_username + '-mmi')
if not re_username.match(sf_username):
adjusted_username = ''.join(
ch for ch in sf_username[:-4]
if ch.isalnum() or ch == '-') + '-mmi'
log.error('invalid sf_username characters: %s Changing it to %s',
sf_username, adjusted_username)
sf_username = adjusted_username
if len(sf_username) > 15:
adjusted_username = sf_username[0:15 - 4] + '-mmi'
log.error('invalid sf_username length: %s Changing it to %s',
sf_username, adjusted_username)
sf_username = adjusted_username
return sf_username
if __name__ == '__main__':
logging.basicConfig(level=logging.WARN)
log.setLevel(logging.DEBUG)
main()
def test_make_valid_sf_username():
tests = {
# basic
'foo': 'foo-mmi',
# lookup
'rlevy': 'ramilevy',
# too long
'u012345678901234567890': 'u0123456789-mmi',
'foo^213': 'foo213-mmi'
}
for k, v in six.iteritems(tests):
assert make_valid_sf_username(k) == v
def test_convert_post_content():
nbhd = Object()
nbhd.url_prefix = '/motorola/'
text = '''rel100? or ?rel101 or rel102 or rel103a or rel104'''
mapping = dict(
rel100='rel/100/',
rel101='rel/101/',
rel102='rel/102/',
rel103='rel/103/',
rel104='rel/104/')
converted = convert_post_content(mapping, 'foo', text, nbhd)
assert 'href="/projects/foo.motorola/files/rel/100' in converted, converted
assert 'href="/projects/foo.motorola/files/rel/101' in converted, converted
assert 'href="/projects/foo.motorola/files/rel/102' in converted, converted
assert 'href="/projects/foo.motorola/files/rel/103' not in converted, converted
assert 'href="/projects/foo.motorola/files/rel/104' in converted, converted
def test_convert_markup():
markup = '''
!this is the first headline
Please note that this project is for distributing, discussing, and supporting the open source software we release.
[http://www.google.com]
[SourceForge |http://www.sf.net]
[$ProjectHome/myimage.jpg]
[$ProjectHome/anotherimage.jpg]
!!! Project Statistics
|[sf:frsStatistics]|[sf:artifactStatistics]|
'''
new_markup = wiki2markdown(markup)
assert '\n[[img src=myimage.jpg]]\n[[img src=anotherimage.jpg]]\n' in new_markup
assert '\n###this is the first' in new_markup
assert '<http://www.google.com>' in new_markup
assert '[SourceForge ](http://www.sf.net)' in new_markup
assert '\n# Project Statistics' not in new_markup
assert '[sf:frsStatistics]' not in new_markup
| # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import absolute_import
import logging
from getpass import getpass
from optparse import OptionParser
from tg import tmpl_context as c
import re
import os
from time import mktime
import time
import json
from six.moves.urllib.parse import urlparse
import six.moves.urllib.request
import six.moves.urllib.parse
import six.moves.urllib.error
from six.moves.http_cookiejar import CookieJar
from datetime import datetime
from six.moves.configparser import ConfigParser
import random
import string
import codecs
import sqlalchemy
from suds.client import Client
from ming.orm.ormsession import ThreadLocalORMSession
from ming.base import Object
from allura import model as M
from allura.lib import helpers as h
from allura.lib import utils
import six
from io import open
from six.moves import map
log = logging.getLogger('teamforge-import')
'''
http://help.collab.net/index.jsp?topic=/teamforge520/reference/api-services.html
http://www.open.collab.net/nonav/community/cif/csfe/50/javadoc/index.html?com/collabnet/ce/soap50/webservices/page/package-summary.html
'''
options = None
s = None # security token
client = None # main api client
users = {}
cj = CookieJar()
loggedInOpener = six.moves.urllib.request.build_opener(six.moves.urllib.request.HTTPCookieProcessor(cj))
def make_client(api_url, app):
return Client(api_url + app + '?wsdl', location=api_url + app)
def main():
global options, s, client, users
defaults = dict(
api_url=None,
attachment_url='/sf/%s/do/%s/',
default_wiki_text='PRODUCT NAME HERE',
username=None,
password=<PASSWORD>,
output_dir='teamforge-export/',
list_project_ids=False,
neighborhood=None,
neighborhood_shortname=None,
use_thread_import_id_when_reloading=False,
skip_wiki=False,
skip_frs_download=False,
skip_unsupported_check=False)
optparser = get_parser(defaults)
options, project_ids = optparser.parse_args()
if options.config_file:
config = ConfigParser()
config.read(options.config_file)
defaults.update(
(k, eval(v)) for k, v in config.items('teamforge-import'))
optparser = get_parser(defaults)
options, project_ids = optparser.parse_args()
# neither specified, so do both
if not options.extract and not options.load:
options.extract = True
options.load = True
if options.extract:
client = make_client(options.api_url, 'CollabNet')
api_v = client.service.getApiVersion()
if not api_v.startswith('5.4.'):
log.warning('Unexpected API Version %s. May not work correctly.' %
api_v)
s = client.service.login(
options.username, options.password or getpass('Password: '))
teamforge_v = client.service.getVersion(s)
if not teamforge_v.startswith('5.4.'):
log.warning(
'Unexpected TeamForge Version %s. May not work correctly.' %
teamforge_v)
if options.load:
if not options.neighborhood:
log.error('You must specify a neighborhood when loading')
return
try:
nbhd = M.Neighborhood.query.get(name=options.neighborhood)
except Exception:
log.exception('error querying mongo')
log.error(
'This should be run as "paster script production.ini ../scripts/teamforge-import.py -- ...options.."')
return
assert nbhd
if not project_ids:
if not options.extract:
log.error('You must specify project ids')
return
projects = client.service.getProjectList(s)
project_ids = [p.id for p in projects.dataRows]
if options.list_project_ids:
print(' '.join(project_ids))
return
if not os.path.exists(options.output_dir):
os.makedirs(options.output_dir)
for pid in project_ids:
if options.extract:
try:
project = client.service.getProjectData(s, pid)
log.info('Project: %s %s %s' %
(project.id, project.title, project.path))
out_dir = os.path.join(options.output_dir, project.id)
if not os.path.exists(out_dir):
os.mkdir(out_dir)
get_project(project)
get_files(project)
if not options.skip_wiki:
get_homepage_wiki(project)
get_discussion(project)
get_news(project)
if not options.skip_unsupported_check:
check_unsupported_tools(project)
with codecs.open(os.path.join(options.output_dir, 'users.json'), 'w', encoding='utf-8') as user_file:
json.dump(users, user_file, default=str)
except Exception:
log.exception('Error extracting %s' % pid)
if options.load:
try:
project = create_project(pid, nbhd)
except Exception:
log.exception('Error creating %s' % pid)
def load_users():
''' load the users data from file, if it hasn't been already '''
global users
user_filename = os.path.join(options.output_dir, 'users.json')
if not users and os.path.exists(user_filename):
with open(user_filename) as user_file:
# Object for attribute access
users = json.load(user_file, object_hook=Object)
def save_user(usernames):
if isinstance(usernames, six.string_types):
usernames = [usernames]
load_users()
for username in usernames:
if username not in users:
user_data = client.service.getUserData(s, username)
users[username] = Object(user_data)
if users[username].status != 'Active':
log.warn('user: %s status: %s' %
(username, users[username].status))
def get_project(project):
global client
cats = make_client(options.api_url, 'CategorizationApp')
data = client.service.getProjectData(s, project.id)
access_level = {1: 'public', 4: 'private', 3: 'gated community'}[
client.service.getProjectAccessLevel(s, project.id)
]
admins = client.service.listProjectAdmins(s, project.id).dataRows
members = client.service.getProjectMemberList(s, project.id).dataRows
groups = client.service.getProjectGroupList(s, project.id).dataRows
categories = cats.service.getProjectCategories(s, project.id).dataRows
save(json.dumps(dict(
data=dict(data),
access_level=access_level,
admins=list(map(dict, admins)),
members=list(map(dict, members)),
groups=list(map(dict, groups)),
categories=list(map(dict, categories)),
), default=str),
project, project.id + '.json')
if len(groups):
log.warn('Project has groups %s' % groups)
for u in admins:
if not u.status != 'active':
log.warn('inactive admin %s' % u)
if u.superUser:
log.warn('super user admin %s' % u)
save_user(data.createdBy)
save_user(u.userName for u in admins)
save_user(u.userName for u in members)
def get_user(orig_username):
'returns an allura User object'
sf_username = make_valid_sf_username(orig_username)
u = M.User.by_username(sf_username)
if not u:
load_users()
user = users[orig_username]
if user.status != 'Active':
log.warn('Inactive user %s %s' % (orig_username, user.status))
if not 3 <= len(user.fullName) <= 32:
raise Exception('invalid fullName length: %s' % user.fullName)
if '@' not in user.email:
raise Exception('invalid email: %s' % user.email)
# FIXME: hardcoded SFX integration
from sfx.model import tables as T
nu = T.users.insert()
nu.execute(user_name=sf_username.encode('utf-8'),
email=user.email.lower().encode('utf-8'),
realname=user.fullName.encode('utf-8'),
status='A' if user.status == 'Active' else 'D',
language=275, # english trove id
timezone=user.timeZone,
user_pw=''.join(random.sample(string.printable, 32)),
unix_pw=''.join(random.sample(string.printable, 32)),
user_pw_modtime=int(time.time()),
mail_siteupdates=0,
add_date=int(time.time()),
)
user_id = sqlalchemy.select(
[T.users.c.user_id], T.users.c.user_name == sf_username).execute().fetchone().user_id
npref = T.user_preferences.insert()
npref.execute(user_id=user_id, preference_name='country',
preference_value='US')
npref.execute(user_id=user_id,
preference_name='opt_research', preference_value=0)
npref.execute(user_id=user_id,
preference_name='opt_thirdparty', preference_value=0)
new_audit = T.audit_trail_user.insert()
new_audit.execute(
date=int(time.time()),
username='nobody',
ip_address='(imported)',
operation_resource=user_id,
operation='%s user account created by TeamForge import script' % user.status,
operation_target='',
)
u = M.User.by_username(sf_username)
assert u
return u
def convert_project_shortname(teamforge_path):
'convert from TeamForge to SF, and validate early'
tf_shortname = teamforge_path.split('.')[-1]
sf_shortname = tf_shortname.replace('_', '-')
# FIXME hardcoded translations
sf_shortname = {
'i1': 'motorola-i1',
'i9': 'motorola-i9',
'devplatformforocap': 'ocap-dev-pltfrm',
'sitewide': '--init--',
}.get(sf_shortname, sf_shortname)
if not 3 <= len(sf_shortname) <= 15:
raise ValueError(
'Project name length must be between 3 & 15, inclusive: %s (%s)' %
(sf_shortname, len(sf_shortname)))
return sf_shortname
# FIXME hardcoded
skip_perms_usernames = set([
'username1', 'username2', 'username3'
])
def create_project(pid, nbhd):
M.session.artifact_orm_session._get().skip_mod_date = True
data = loadjson(pid, pid + '.json')
# pprint(data)
log.info('Loading: %s %s %s' % (pid, data.data.title, data.data.path))
shortname = convert_project_shortname(data.data.path)
project = M.Project.query.get(
shortname=shortname, neighborhood_id=nbhd._id)
if not project:
private = (data.access_level == 'private')
log.debug('Creating %s private=%s' % (shortname, private))
one_admin = [
u.userName for u in data.admins if u.status == 'Active'][0]
project = nbhd.register_project(shortname,
get_user(one_admin),
project_name=data.data.title,
private_project=private)
project.notifications_disabled = True
project.short_description = data.data.description
project.last_updated = datetime.strptime(
data.data.lastModifiedDate, '%Y-%m-%d %H:%M:%S')
M.main_orm_session.flush(project)
# TODO: push last_updated to gutenberg?
# TODO: try to set createdDate?
role_admin = M.ProjectRole.by_name('Admin', project)
admin_usernames = set()
for admin in data.admins:
# FIXME: skip non-active users
if admin.userName in skip_perms_usernames:
continue
admin_usernames.add(admin.userName)
user = get_user(admin.userName)
c.user = user
pr = M.ProjectRole.by_user(user, project=project, upsert=True)
pr.roles = [role_admin._id]
ThreadLocalORMSession.flush_all()
role_developer = M.ProjectRole.by_name('Developer', project)
for member in data.members:
# FIXME: skip non-active users
if member.userName in skip_perms_usernames:
continue
if member.userName in admin_usernames:
continue
user = get_user(member.userName)
pr = M.ProjectRole.by_user(user, project=project, upsert=True)
pr.roles = [role_developer._id]
ThreadLocalORMSession.flush_all()
project.labels = [cat.path.split('projects/categorization.root.')[1]
for cat in data.categories]
icon_file = 'emsignia-MOBILITY-red.png'
if 'nsn' in project.labels or 'msi' in project.labels:
icon_file = 'emsignia-SOLUTIONS-blue.gif'
if project.icon:
M.ProjectFile.remove(dict(project_id=project._id, category='icon'))
with open(os.path.join('..', 'scripts', icon_file), 'rb') as fp:
M.ProjectFile.save_image(
icon_file, fp, content_type=utils.guess_mime_type(icon_file),
square=True, thumbnail_size=(48, 48),
thumbnail_meta=dict(project_id=project._id, category='icon'))
ThreadLocalORMSession.flush_all()
dirs = os.listdir(os.path.join(options.output_dir, pid))
frs_mapping = loadjson(pid, 'frs_mapping.json')
if not options.skip_wiki and 'wiki' in dirs:
import_wiki(project, pid, nbhd)
if not options.skip_frs_download and not project.app_instance('downloads'):
project.install_app('Downloads', 'downloads')
if 'forum' in dirs:
import_discussion(project, pid, frs_mapping, shortname, nbhd)
if 'news' in dirs:
import_news(project, pid, frs_mapping, shortname, nbhd)
project.notifications_disabled = False
ThreadLocalORMSession.flush_all()
return project
def import_wiki(project, pid, nbhd):
from forgewiki import model as WM
def upload_attachments(page, pid, beginning):
dirpath = os.path.join(options.output_dir, pid, 'wiki', beginning)
if not os.path.exists(dirpath):
return
files = os.listdir(dirpath)
for f in files:
with open(os.path.join(options.output_dir, pid, 'wiki', beginning, f)) as fp:
page.attach(f, fp, content_type=utils.guess_mime_type(f))
pages = os.listdir(os.path.join(options.output_dir, pid, 'wiki'))
# handle the homepage content
if 'homepage_text.markdown' in pages:
home_app = project.app_instance('home')
h.set_context(project.shortname, 'home', neighborhood=nbhd)
# set permissions and config options
role_admin = M.ProjectRole.by_name('Admin')._id
role_anon = M.ProjectRole.by_name('*anonymous')._id
home_app.config.options['show_discussion'] = False
home_app.config.options['show_left_bar'] = False
home_app.config.options['show_right_bar'] = False
home_app.config.acl = [
M.ACE.allow(role_anon, 'read'),
M.ACE.allow(role_admin, 'create'),
M.ACE.allow(role_admin, 'edit'),
M.ACE.allow(role_admin, 'delete'),
M.ACE.allow(role_admin, 'moderate'),
M.ACE.allow(role_admin, 'configure'),
M.ACE.allow(role_admin, 'admin')]
p = WM.Page.upsert('Home')
p.text = wiki2markdown(load(pid, 'wiki', 'homepage_text.markdown'))
upload_attachments(p, pid, 'homepage')
if 'HomePage.json' in pages and 'HomePage.markdown' in pages:
wiki_app = project.app_instance('wiki')
if not wiki_app:
wiki_app = project.install_app('Wiki', 'wiki')
h.set_context(project.shortname, 'wiki', neighborhood=nbhd)
# set permissions and config options
role_admin = M.ProjectRole.by_name('Admin')._id
role_anon = M.ProjectRole.by_name('*anonymous')._id
wiki_app.config.options['show_discussion'] = False
wiki_app.config.options['show_left_bar'] = False
wiki_app.config.options['show_right_bar'] = False
wiki_app.config.acl = [
M.ACE.allow(role_anon, 'read'),
M.ACE.allow(role_admin, 'create'),
M.ACE.allow(role_admin, 'edit'),
M.ACE.allow(role_admin, 'delete'),
M.ACE.allow(role_admin, 'moderate'),
M.ACE.allow(role_admin, 'configure'),
M.ACE.allow(role_admin, 'admin')]
# make all the wiki pages
for page in pages:
ending = page[-5:]
beginning = page[:-5]
markdown_file = '%s.markdown' % beginning
if '.json' == ending and markdown_file in pages:
page_data = loadjson(pid, 'wiki', page)
content = load(pid, 'wiki', markdown_file)
if page == 'HomePage.json':
globals = WM.Globals.query.get(
app_config_id=wiki_app.config._id)
if globals is not None:
globals.root = page_data.title
else:
globals = WM.Globals(
app_config_id=wiki_app.config._id, root=page_data.title)
p = WM.Page.upsert(page_data.title)
p.text = wiki2markdown(content)
# upload attachments
upload_attachments(p, pid, beginning)
if not p.history().first():
p.commit()
ThreadLocalORMSession.flush_all()
def import_discussion(project, pid, frs_mapping, sf_project_shortname, nbhd):
from forgediscussion import model as DM
discuss_app = project.app_instance('discussion')
if not discuss_app:
discuss_app = project.install_app('Discussion', 'discussion')
h.set_context(project.shortname, 'discussion', neighborhood=nbhd)
assert c.app
# set permissions and config options
role_admin = M.ProjectRole.by_name('Admin')._id
role_developer = M.ProjectRole.by_name('Developer')._id
role_auth = M.ProjectRole.by_name('*authenticated')._id
role_anon = M.ProjectRole.by_name('*anonymous')._id
discuss_app.config.acl = [
M.ACE.allow(role_anon, 'read'),
M.ACE.allow(role_auth, 'post'),
M.ACE.allow(role_auth, 'unmoderated_post'),
M.ACE.allow(role_developer, 'moderate'),
M.ACE.allow(role_admin, 'configure'),
M.ACE.allow(role_admin, 'admin')]
ThreadLocalORMSession.flush_all()
DM.Forum.query.remove(
dict(app_config_id=discuss_app.config._id, shortname='general'))
forums = os.listdir(os.path.join(options.output_dir, pid, 'forum'))
for forum in forums:
ending = forum[-5:]
forum_name = forum[:-5]
if '.json' == ending and forum_name in forums:
forum_data = loadjson(pid, 'forum', forum)
fo = DM.Forum.query.get(
shortname=forum_name, app_config_id=discuss_app.config._id)
if not fo:
fo = DM.Forum(app_config_id=discuss_app.config._id,
shortname=forum_name)
fo.name = forum_data.title
fo.description = forum_data.description
fo_num_topics = 0
fo_num_posts = 0
topics = os.listdir(os.path.join(options.output_dir, pid, 'forum',
forum_name))
for topic in topics:
ending = topic[-5:]
topic_name = topic[:-5]
if '.json' == ending and topic_name in topics:
fo_num_topics += 1
topic_data = loadjson(pid, 'forum', forum_name, topic)
thread_query = dict(
subject=topic_data.title,
discussion_id=fo._id,
app_config_id=discuss_app.config._id)
if not options.skip_thread_import_id_when_reloading:
# temporary/transitional. Just needed the first time
# running with this new code against an existing import
# that didn't have import_ids
thread_query['import_id'] = topic_data.id
to = DM.ForumThread.query.get(**thread_query)
if not to:
to = DM.ForumThread.new(
subject=topic_data.title,
discussion_id=fo._id,
import_id=topic_data.id,
app_config_id=discuss_app.config._id)
to.import_id = topic_data.id
to_num_replies = 0
oldest_post = None
newest_post = None
posts = sorted(
os.listdir(os.path.join(options.output_dir, pid, 'forum', forum_name, topic_name)))
for post in posts:
ending = post[-5:]
post_name = post[:-5]
if '.json' == ending:
to_num_replies += 1
post_data = loadjson(pid, 'forum',
forum_name, topic_name, post)
p = DM.ForumPost.query.get(
_id='%s%s@import' % (
post_name, str(discuss_app.config._id)),
thread_id=to._id,
discussion_id=fo._id,
app_config_id=discuss_app.config._id)
if not p:
p = DM.ForumPost(
_id='%s%s@import' % (
post_name, str(
discuss_app.config._id)),
thread_id=to._id,
discussion_id=fo._id,
app_config_id=discuss_app.config._id)
create_date = datetime.strptime(
post_data.createdDate, '%Y-%m-%d %H:%M:%S')
p.timestamp = create_date
p.author_id = str(
get_user(post_data.createdByUserName)._id)
p.text = convert_post_content(
frs_mapping, sf_project_shortname, post_data.content, nbhd)
p.status = 'ok'
if post_data.replyToId:
p.parent_id = '%s%s@import' % (
post_data.replyToId, str(discuss_app.config._id))
slug, full_slug = p.make_slugs(
parent=p.parent, timestamp=create_date)
p.slug = slug
p.full_slug = full_slug
if oldest_post is None or oldest_post.timestamp > create_date:
oldest_post = p
if newest_post is None or newest_post.timestamp < create_date:
newest_post = p
ThreadLocalORMSession.flush_all()
to.num_replies = to_num_replies
to.first_post_id = oldest_post._id
to.last_post_date = newest_post.timestamp
to.mod_date = newest_post.timestamp
fo_num_posts += to_num_replies
fo.num_topics = fo_num_topics
fo.num_posts = fo_num_posts
ThreadLocalORMSession.flush_all()
def import_news(project, pid, frs_mapping, sf_project_shortname, nbhd):
from forgeblog import model as BM
posts = os.listdir(os.path.join(options.output_dir, pid, 'news'))
if len(posts):
news_app = project.app_instance('news')
if not news_app:
news_app = project.install_app('blog', 'news', mount_label='News')
h.set_context(project.shortname, 'news', neighborhood=nbhd)
# make all the blog posts
for post in posts:
if '.json' == post[-5:]:
post_data = loadjson(pid, 'news', post)
create_date = datetime.strptime(
post_data.createdOn, '%Y-%m-%d %H:%M:%S')
p = BM.BlogPost.query.get(title=post_data.title,
timestamp=create_date,
app_config_id=news_app.config._id)
if not p:
p = BM.BlogPost(title=post_data.title,
timestamp=create_date,
app_config_id=news_app.config._id)
p.text = convert_post_content(
frs_mapping, sf_project_shortname, post_data.body, nbhd)
p.mod_date = create_date
p.state = 'published'
if not p.slug:
p.make_slug()
if not p.history().first():
p.commit()
ThreadLocalORMSession.flush_all()
M.Thread.new(discussion_id=p.app_config.discussion_id,
ref_id=p.index_id(),
subject='%s discussion' % p.title)
user = get_user(post_data.createdByUsername)
p.history().first().author = dict(
id=user._id,
username=user.username,
display_name=user.get_pref('display_name'))
ThreadLocalORMSession.flush_all()
def check_unsupported_tools(project):
docs = make_client(options.api_url, 'DocumentApp')
doc_count = 0
for doc in docs.service.getDocumentFolderList(s, project.id, recursive=True).dataRows:
if doc.title == 'Root Folder':
continue
doc_count += 1
if doc_count:
log.warn('Migrating documents is not supported, but found %s docs' %
doc_count)
scm = make_client(options.api_url, 'ScmApp')
for repo in scm.service.getRepositoryList(s, project.id).dataRows:
log.warn('Migrating SCM repos is not supported, but found %s' %
repo.repositoryPath)
tasks = make_client(options.api_url, 'TaskApp')
task_count = len(
tasks.service.getTaskList(s, project.id, filters=None).dataRows)
if task_count:
log.warn('Migrating tasks is not supported, but found %s tasks' %
task_count)
tracker = make_client(options.api_url, 'TrackerApp')
tracker_count = len(
tracker.service.getArtifactList(s, project.id, filters=None).dataRows)
if tracker_count:
log.warn(
'Migrating trackers is not supported, but found %s tracker artifacts' %
task_count)
def load(project_id, *paths):
in_file = os.path.join(options.output_dir, project_id, *paths)
with open(in_file, encoding='utf-8') as input:
content = input.read()
return content
def loadjson(*args):
# Object for attribute access
return json.loads(load(*args), object_hook=Object)
def save(content, project, *paths):
out_file = os.path.join(options.output_dir, project.id, *paths)
if not os.path.exists(os.path.dirname(out_file)):
os.makedirs(os.path.dirname(out_file))
with codecs.open(out_file, 'w', encoding='utf-8') as out:
out.write(content.encode('utf-8'))
def download_file(tool, url_path, *filepaths):
if tool == 'wiki':
action = 'viewAttachment'
elif tool == 'frs':
action = 'downloadFile'
else:
raise ValueError('tool %s not supported' % tool)
action_url = options.attachment_url % (tool, action)
out_file = os.path.join(options.output_dir, *filepaths)
if not os.path.exists(os.path.dirname(out_file)):
os.makedirs(os.path.dirname(out_file))
if '://' in url_path:
url = url_path
else:
hostname = urlparse(options.api_url).hostname
scheme = urlparse(options.api_url).scheme
url = scheme + '://' + hostname + action_url + six.moves.urllib.parse.quote(url_path)
log.debug('fetching %s' % url)
resp = loggedInOpener.open(url)
# if not logged in and this is private, you will get an html response instead of the file
# log in to make sure the file should really be html
if resp.headers.type == 'text/html':
# log in and save the file
resp = loggedInOpener.open(scheme + '://' + hostname + "/sf/sfmain/do/login", six.moves.urllib.parse.urlencode({
'username': options.username,
'password': <PASSWORD>,
'returnToUrl': url,
'sfsubmit': 'submit'
}))
with codecs.open(out_file, 'w', encoding='utf-8') as out:
out.write(resp.fp.read())
return out_file
bracket_macro = re.compile(r'\[(.*?)\]')
h1 = re.compile(r'^!!!', re.MULTILINE)
h2 = re.compile(r'^!!', re.MULTILINE)
h3 = re.compile(r'^!', re.MULTILINE)
re_stats = re.compile(r'#+ .* [Ss]tatistics\n+(.*\[sf:.*?Statistics\].*)+')
def wiki2markdown(markup):
'''
Partial implementation of http://help.collab.net/index.jsp?topic=/teamforge520/reference/wiki-wikisyntax.html
TODO: __ for bold
TODO: quote filenames with spaces, e.g. [[img src="foo bar.jpg"]]
'''
def bracket_handler(matchobj):
snippet = matchobj.group(1)
ext = snippet.rsplit('.')[-1].lower()
# TODO: support [foo|bar.jpg]
if snippet.startswith('sf:'):
# can't handle these macros
return matchobj.group(0)
elif ext in ('jpg', 'gif', 'png'):
filename = snippet.split('/')[-1]
return '[[img src=%s]]' % filename
elif '|' in snippet:
text, link = snippet.split('|', 1)
return '[%s](%s)' % (text, link)
else:
# regular link
return '<%s>' % snippet
markup = bracket_macro.sub(bracket_handler, markup or '')
markup = h1.sub('#', markup)
markup = h2.sub('##', markup)
markup = h3.sub('###', markup)
markup = re_stats.sub('', markup)
return markup
re_rel = re.compile(r'\b(rel\d+)\b')
def convert_post_content(frs_mapping, sf_project_shortname, text, nbhd):
def rel_handler(matchobj):
relno = matchobj.group(1)
path = frs_mapping.get(relno)
if path:
return '<a href="/projects/%s.%s/files/%s">%s</a>' % (
sf_project_shortname, nbhd.url_prefix.strip('/'), path, path)
else:
return relno
text = re_rel.sub(rel_handler, text or '')
return text
def find_image_references(markup):
'yields filenames'
for matchobj in bracket_macro.finditer(markup):
snippet = matchobj.group(1)
ext = snippet.rsplit('.')[-1].lower()
if ext in ('jpg', 'gif', 'png'):
yield snippet
def get_news(project):
'''
Extracts news posts
'''
app = make_client(options.api_url, 'NewsApp')
# find the forums
posts = app.service.getNewsPostList(s, project.id)
for post in posts.dataRows:
save(json.dumps(dict(post), default=str),
project, 'news', post.id + '.json')
save_user(post.createdByUsername)
def get_discussion(project):
'''
Extracts discussion forums and posts
'''
app = make_client(options.api_url, 'DiscussionApp')
# find the forums
forums = app.service.getForumList(s, project.id)
for forum in forums.dataRows:
forumname = forum.path.split('.')[-1]
log.info('Retrieving data for forum: %s' % forumname)
save(json.dumps(dict(forum), default=str), project, 'forum',
forumname + '.json')
# topic in this forum
topics = app.service.getTopicList(s, forum.id)
for topic in topics.dataRows:
save(json.dumps(dict(topic), default=str), project, 'forum',
forumname, topic.id + '.json')
# posts in this topic
posts = app.service.getPostList(s, topic.id)
for post in posts.dataRows:
save(json.dumps(dict(post), default=str), project, 'forum',
forumname, topic.id, post.id + '.json')
save_user(post.createdByUserName)
def get_homepage_wiki(project):
'''
Extracts home page and wiki pages
'''
wiki = make_client(options.api_url, 'WikiApp')
pages = {}
wiki_pages = wiki.service.getWikiPageList(s, project.id)
for wiki_page in wiki_pages.dataRows:
wiki_page = wiki.service.getWikiPageData(s, wiki_page.id)
pagename = wiki_page.path.split('/')[-1]
save(json.dumps(dict(wiki_page), default=str),
project, 'wiki', pagename + '.json')
if not wiki_page.wikiText:
log.debug('skip blank wiki page %s' % wiki_page.path)
continue
pages[pagename] = wiki_page.wikiText
# PageApp does not provide a useful way to determine the Project Home special wiki page
# so use some heuristics
homepage = None
if '$ProjectHome' in pages and options.default_wiki_text not in pages['$ProjectHome']:
homepage = pages.pop('$ProjectHome')
elif 'HomePage' in pages and options.default_wiki_text not in pages['HomePage']:
homepage = pages.pop('HomePage')
elif '$ProjectHome' in pages:
homepage = pages.pop('$ProjectHome')
elif 'HomePage' in pages:
homepage = pages.pop('HomePage')
else:
log.warn('did not find homepage')
if homepage:
save(homepage, project, 'wiki', 'homepage_text.markdown')
for img_ref in find_image_references(homepage):
filename = img_ref.split('/')[-1]
if '://' in img_ref:
img_url = img_ref
else:
img_url = project.path + '/wiki/' + img_ref
download_file('wiki', img_url, project.id,
'wiki', 'homepage', filename)
for path, text in six.iteritems(pages):
if options.default_wiki_text in text:
log.debug('skipping default wiki page %s' % path)
else:
save(text, project, 'wiki', path + '.markdown')
for img_ref in find_image_references(text):
filename = img_ref.split('/')[-1]
if '://' in img_ref:
img_url = img_ref
else:
img_url = project.path + '/wiki/' + img_ref
download_file('wiki', img_url, project.id,
'wiki', path, filename)
def _dir_sql(created_on, project, dir_name, rel_path):
assert options.neighborhood_shortname
if not rel_path:
parent_directory = "'1'"
else:
parent_directory = "(SELECT pfs_path FROM pfs_path WHERE path_name = '%s/')" % rel_path
sql = """
UPDATE pfs
SET file_crtime = '%s'
WHERE source_pk = (SELECT project.project FROM project WHERE project.project_name = '%s.%s')
AND source_table = 'project'
AND pfs_type = 'd'
AND pfs_name = '%s'
AND parent_directory = %s;
""" % (created_on, convert_project_shortname(project.path), options.neighborhood_shortname,
dir_name, parent_directory)
return sql
def get_files(project):
frs = make_client(options.api_url, 'FrsApp')
valid_pfs_filename = re.compile(
r'(?![. ])[-_ +.,=#~@!()\[\]a-zA-Z0-9]+(?<! )$')
pfs_output_dir = os.path.join(
os.path.abspath(options.output_dir), 'PFS', convert_project_shortname(project.path))
sql_updates = ''
def handle_path(obj, prev_path):
path_component = obj.title.strip().replace(
'/', ' ').replace('&', '').replace(':', '')
path = os.path.join(prev_path, path_component)
if not valid_pfs_filename.match(path_component):
log.error('Invalid filename: "%s"' % path)
save(json.dumps(dict(obj), default=str),
project, 'frs', path + '.json')
return path
frs_mapping = {}
for pkg in frs.service.getPackageList(s, project.id).dataRows:
pkg_path = handle_path(pkg, '')
pkg_details = frs.service.getPackageData(s, pkg.id) # download count
save(json.dumps(dict(pkg_details), default=str),
project, 'frs', pkg_path + '_details.json')
for rel in frs.service.getReleaseList(s, pkg.id).dataRows:
rel_path = handle_path(rel, pkg_path)
frs_mapping[rel['id']] = rel_path
# download count
rel_details = frs.service.getReleaseData(s, rel.id)
save(json.dumps(dict(rel_details), default=str),
project, 'frs', rel_path + '_details.json')
for file in frs.service.getFrsFileList(s, rel.id).dataRows:
details = frs.service.getFrsFileData(s, file.id)
file_path = handle_path(file, rel_path)
save(json.dumps(dict(file,
lastModifiedBy=details.lastModifiedBy,
lastModifiedDate=details.lastModifiedDate,
),
default=str),
project,
'frs',
file_path + '.json'
)
if not options.skip_frs_download:
download_file('frs', rel.path + '/' + file.id,
pfs_output_dir, file_path)
mtime = int(mktime(details.lastModifiedDate.timetuple()))
os.utime(os.path.join(pfs_output_dir, file_path),
(mtime, mtime))
# releases
created_on = int(mktime(rel.createdOn.timetuple()))
mtime = int(mktime(rel.lastModifiedOn.timetuple()))
if os.path.exists(os.path.join(pfs_output_dir, rel_path)):
os.utime(os.path.join(pfs_output_dir, rel_path),
(mtime, mtime))
sql_updates += _dir_sql(created_on, project,
rel.title.strip(), pkg_path)
# packages
created_on = int(mktime(pkg.createdOn.timetuple()))
mtime = int(mktime(pkg.lastModifiedOn.timetuple()))
if os.path.exists(os.path.join(pfs_output_dir, pkg_path)):
os.utime(os.path.join(pfs_output_dir, pkg_path), (mtime, mtime))
sql_updates += _dir_sql(created_on, project, pkg.title.strip(), '')
# save pfs update sql for this project
with open(os.path.join(options.output_dir, 'pfs_updates.sql'), 'a') as out:
out.write('/* %s */' % project.id)
out.write(sql_updates)
save(json.dumps(frs_mapping), project, 'frs_mapping.json')
def get_parser(defaults):
optparser = OptionParser(
usage=('%prog [--options] [projID projID projID]\n'
'If no project ids are given, all projects will be migrated'))
optparser.set_defaults(**defaults)
# Command-line-only options
optparser.add_option(
'--extract-only', action='store_true', dest='extract',
help='Store data from the TeamForge API on the local filesystem; not load into Allura')
optparser.add_option(
'--load-only', action='store_true', dest='load',
help='Load into Allura previously-extracted data')
optparser.add_option(
'--config-file', dest='config_file',
help='Load options from config file')
# Command-line options with defaults in config file
optparser.add_option(
'--api-url', dest='api_url', help='e.g. https://hostname/ce-soap50/services/')
optparser.add_option(
'--attachment-url', dest='attachment_url')
optparser.add_option(
'--default-wiki-text', dest='default_wiki_text',
help='used in determining if a wiki page text is default or changed')
optparser.add_option(
'-u', '--username', dest='username')
optparser.add_option(
'-p', '--password', dest='password')
optparser.add_option(
'-o', '--output-dir', dest='output_dir')
optparser.add_option(
'--list-project-ids', action='store_true', dest='list_project_ids')
optparser.add_option(
'-n', '--neighborhood', dest='neighborhood',
help='Neighborhood full name, to load in to')
optparser.add_option(
'--n-shortname', dest='neighborhood_shortname',
help='Neighborhood shortname, for PFS extract SQL')
optparser.add_option(
'--skip-thread-import-id-when-reloading', action='store_true',
dest='skip_thread_import_id_when_reloading'
)
optparser.add_option(
'--skip-frs-download', action='store_true', dest='skip_frs_download')
optparser.add_option(
'--skip-wiki', action='store_true', dest='skip_wiki')
optparser.add_option(
'--skip-unsupported-check', action='store_true', dest='skip_unsupported_check')
return optparser
re_username = re.compile(r"^[a-z\-0-9]+$")
def make_valid_sf_username(orig_username):
sf_username = orig_username.replace('_', '-').lower()
# FIXME username translation is hardcoded here:
sf_username = dict(
rlevy='ramilevy',
mkeisler='mkeisler',
bthale='bthale',
mmuller='mattjustmull',
MalcolmDwyer='slagheap',
tjyang='tjyang',
manaic='maniac76',
srinid='cnudav',
es='est016',
david_peyer='david-mmi',
okruse='ottokruse',
jvp='jvpmoto',
dmorelli='dmorelli',
).get(sf_username, sf_username + '-mmi')
if not re_username.match(sf_username):
adjusted_username = ''.join(
ch for ch in sf_username[:-4]
if ch.isalnum() or ch == '-') + '-mmi'
log.error('invalid sf_username characters: %s Changing it to %s',
sf_username, adjusted_username)
sf_username = adjusted_username
if len(sf_username) > 15:
adjusted_username = sf_username[0:15 - 4] + '-mmi'
log.error('invalid sf_username length: %s Changing it to %s',
sf_username, adjusted_username)
sf_username = adjusted_username
return sf_username
if __name__ == '__main__':
logging.basicConfig(level=logging.WARN)
log.setLevel(logging.DEBUG)
main()
def test_make_valid_sf_username():
tests = {
# basic
'foo': 'foo-mmi',
# lookup
'rlevy': 'ramilevy',
# too long
'u012345678901234567890': 'u0123456789-mmi',
'foo^213': 'foo213-mmi'
}
for k, v in six.iteritems(tests):
assert make_valid_sf_username(k) == v
def test_convert_post_content():
nbhd = Object()
nbhd.url_prefix = '/motorola/'
text = '''rel100? or ?rel101 or rel102 or rel103a or rel104'''
mapping = dict(
rel100='rel/100/',
rel101='rel/101/',
rel102='rel/102/',
rel103='rel/103/',
rel104='rel/104/')
converted = convert_post_content(mapping, 'foo', text, nbhd)
assert 'href="/projects/foo.motorola/files/rel/100' in converted, converted
assert 'href="/projects/foo.motorola/files/rel/101' in converted, converted
assert 'href="/projects/foo.motorola/files/rel/102' in converted, converted
assert 'href="/projects/foo.motorola/files/rel/103' not in converted, converted
assert 'href="/projects/foo.motorola/files/rel/104' in converted, converted
def test_convert_markup():
markup = '''
!this is the first headline
Please note that this project is for distributing, discussing, and supporting the open source software we release.
[http://www.google.com]
[SourceForge |http://www.sf.net]
[$ProjectHome/myimage.jpg]
[$ProjectHome/anotherimage.jpg]
!!! Project Statistics
|[sf:frsStatistics]|[sf:artifactStatistics]|
'''
new_markup = wiki2markdown(markup)
assert '\n[[img src=myimage.jpg]]\n[[img src=anotherimage.jpg]]\n' in new_markup
assert '\n###this is the first' in new_markup
assert '<http://www.google.com>' in new_markup
assert '[SourceForge ](http://www.sf.net)' in new_markup
assert '\n# Project Statistics' not in new_markup
assert '[sf:frsStatistics]' not in new_markup
| en | 0.737778 | # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. http://help.collab.net/index.jsp?topic=/teamforge520/reference/api-services.html http://www.open.collab.net/nonav/community/cif/csfe/50/javadoc/index.html?com/collabnet/ce/soap50/webservices/page/package-summary.html # security token # main api client # neither specified, so do both load the users data from file, if it hasn't been already # Object for attribute access # FIXME: hardcoded SFX integration # english trove id # FIXME hardcoded translations # FIXME hardcoded # pprint(data) # TODO: push last_updated to gutenberg? # TODO: try to set createdDate? # FIXME: skip non-active users # FIXME: skip non-active users # handle the homepage content # set permissions and config options # set permissions and config options # make all the wiki pages # upload attachments # set permissions and config options # temporary/transitional. Just needed the first time # running with this new code against an existing import # that didn't have import_ids # make all the blog posts # Object for attribute access # if not logged in and this is private, you will get an html response instead of the file # log in to make sure the file should really be html # log in and save the file Partial implementation of http://help.collab.net/index.jsp?topic=/teamforge520/reference/wiki-wikisyntax.html TODO: __ for bold TODO: quote filenames with spaces, e.g. [[img src="foo bar.jpg"]] # TODO: support [foo|bar.jpg] # can't handle these macros # regular link #', markup) ##', markup) Extracts news posts # find the forums Extracts discussion forums and posts # find the forums # topic in this forum # posts in this topic Extracts home page and wiki pages # PageApp does not provide a useful way to determine the Project Home special wiki page # so use some heuristics UPDATE pfs SET file_crtime = '%s' WHERE source_pk = (SELECT project.project FROM project WHERE project.project_name = '%s.%s') AND source_table = 'project' AND pfs_type = 'd' AND pfs_name = '%s' AND parent_directory = %s; #~@!()\[\]a-zA-Z0-9]+(?<! )$') # download count # download count # releases # packages # save pfs update sql for this project # Command-line-only options # Command-line options with defaults in config file # FIXME username translation is hardcoded here: # basic # lookup # too long rel100? or ?rel101 or rel102 or rel103a or rel104 !this is the first headline Please note that this project is for distributing, discussing, and supporting the open source software we release. [http://www.google.com] [SourceForge |http://www.sf.net] [$ProjectHome/myimage.jpg] [$ProjectHome/anotherimage.jpg] !!! Project Statistics |[sf:frsStatistics]|[sf:artifactStatistics]| ###this is the first' in new_markup # Project Statistics' not in new_markup | 1.52271 | 2 |
judger.py | SkyErnest/legal_basis | 0 | 6632663 | <reponame>SkyErnest/legal_basis<filename>judger.py<gh_stars>0
from math import log
import os
import json
import numpy as np
class Judger:
# Initialize Judger, with the path of accusation list and law articles list
def __init__(self, accusation_path, law_path):
self.accu_dic = {}
f = open(accusation_path, "r",encoding='utf-8')
self.task1_cnt = 0
for line in f:
self.task1_cnt += 1
self.accu_dic[line[:-1]] = self.task1_cnt
self.law_dic = {}
f = open(law_path, "r",encoding='utf-8')
self.task2_cnt = 0
for line in f:
self.task2_cnt += 1
self.law_dic[int(line[:-1])] = self.task2_cnt
# Format the result generated by the Predictor class
@staticmethod
def format_result(result):
rex = {"accusation": [], "articles": [], "imprisonment": -3}
res_acc = []
for x in result["accusation"]:
if not (x is None):
res_acc.append(int(x))
rex["accusation"] = res_acc
if not (result["imprisonment"] is None):
rex["imprisonment"] = int(result["imprisonment"])
else:
rex["imprisonment"] = -3
res_art = []
for x in result["articles"]:
if not (x is None):
res_art.append(int(x))
rex["articles"] = res_art
return rex
# Gen new results according to the truth and users output
def gen_new_result(self, result, truth, label):
s1 = set(label["accusation"])
s2 = set()
for name in truth["accusation"]:
s2.add(self.accu_dic[name.replace("[", "").replace("]", "")])
for a in range(0, self.task1_cnt):
in1 = (a + 1) in s1
in2 = (a + 1) in s2
if in1:
if in2:
result[0][a]["TP"] += 1
else:
result[0][a]["FP"] += 1
else:
if in2:
result[0][a]["FN"] += 1
else:
result[0][a]["TN"] += 1
s1 = set(label["articles"])
s2 = set()
for name in truth["relevant_articles"]:
s2.add(self.law_dic[name])
for a in range(0, self.task2_cnt):
in1 = (a + 1) in s1
in2 = (a + 1) in s2
if in1:
if in2:
result[1][a]["TP"] += 1
else:
result[1][a]["FP"] += 1
else:
if in2:
result[1][a]["FN"] += 1
else:
result[1][a]["TN"] += 1
result[2]["cnt"] += 1
sc = 0
if truth["term_of_imprisonment"]["death_penalty"]:
if label["imprisonment"] == -2:
sc = 1
elif truth["term_of_imprisonment"]["life_imprisonment"]:
if label["imprisonment"] == -1:
sc = 1
else:
if label["imprisonment"] < 0:
sc = 0
else:
v1 = truth["term_of_imprisonment"]["imprisonment"]
v2 = label["imprisonment"]
v = abs(log(v1 + 1) - log(v2 + 1))
if v <= 0.2:
sc = 1
elif v <= 0.4:
sc = 0.8
elif v <= 0.6:
sc = 0.6
elif v <= 0.8:
sc = 0.4
elif v <= 1.0:
sc = 0.2
else:
sc = 0
sc = sc * 1.0
result[2]["score"] += sc
return result
# Calculate precision, recall and f1 value
# According to https://github.com/dice-group/gerbil/wiki/Precision,-Recall-and-F1-measure
@staticmethod
def get_value(res):
if res["TP"] == 0:
if res["FP"] == 0 and res["FN"] == 0:
precision = 1.0
recall = 1.0
f1 = 1.0
else:
precision = 0.0
recall = 0.0
f1 = 0.0
else:
precision = 1.0 * res["TP"] / (res["TP"] + res["FP"])
recall = 1.0 * res["TP"] / (res["TP"] + res["FN"])
f1 = 2 * precision * recall / (precision + recall)
return precision, recall, f1
# Generate score for the first two subtasks
def gen_score(self, arr):
sumf = 0
y = {"TP": 0, "FP": 0, "FN": 0, "TN": 0}
for x in arr:
p, r, f = self.get_value(x)
sumf += f
for z in x.keys():
y[z] += x[z]
_, __, f_ = self.get_value(y)
return (f_ + sumf * 1.0 / len(arr)) / 2.0
# Generatue all scores
def get_score(self, result):
s1 = self.gen_score(result[0])
s2 = self.gen_score(result[1])
s3 = 1.0 * result[2]["score"] / result[2]["cnt"]
return [s1, s2, s3]
# Test with ground truth path and the user's output path
def test(self, truth_path, output_path):
cnt = 0
result = [[], [], {}]
for a in range(0, self.task1_cnt):
result[0].append({"TP": 0, "FP": 0, "TN": 0, "FN": 0})
for a in range(0, self.task2_cnt):
result[1].append({"TP": 0, "FP": 0, "TN": 0, "FN": 0})
result[2] = {"cnt": 0, "score": 0}
inf = open(truth_path, "r",encoding='utf-8')
ouf = open(output_path, "r",encoding='utf-8')
for line in inf:
ground_truth = json.loads(line)["meta"]
user_output = json.loads(ouf.readline())
cnt += 1
result = self.gen_new_result(result, ground_truth, user_output)
return result
if __name__ == '__main__':
J = Judger('accu.txt', 'law.txt')
res = J.test('data_test.json', 'data_test_predict.json')
total_score = 0
for task_idx in range(2):
TP_micro = 0
FP_micro = 0
FN_micro = 0
f1 = []
for class_idx in range(len(res[task_idx])):
if res[task_idx][class_idx]["TP"] == 0:
f1.append(0)
continue
TP_micro += res[task_idx][class_idx]["TP"]
FP_micro += res[task_idx][class_idx]["FP"]
FN_micro += res[task_idx][class_idx]["FN"]
precision = res[task_idx][class_idx]["TP"] * 1.0 / (res[task_idx][class_idx]["TP"] + res[task_idx][class_idx]["FP"])
recall = res[task_idx][class_idx]["TP"] * 1.0 / (res[task_idx][class_idx]["TP"] + res[task_idx][class_idx]["FN"])
f1.append(2 * precision * recall / (precision + recall))
precision_micro = TP_micro * 1.0 / (TP_micro + FP_micro)
recall_micro = TP_micro * 1.0 / (TP_micro + FN_micro)
F1_micro = 2 * precision_micro * recall_micro / (precision_micro + recall_micro)
F1_macro = np.sum(f1) / len(f1)
total_score += 100.0 * (F1_micro + F1_macro)/2
print('task id: {}, F1_micro: {}, F1_macro: {}, final score: {}'.format(task_idx + 1, F1_micro, F1_macro, 100.0 * (F1_micro + F1_macro)/2))
total_score += res[2]['score'] / res[2]['cnt'] * 100
print('task id: 3, score:{}'.format(res[2]['score'] / res[2]['cnt'] * 100))
print('total score:', total_score)
| from math import log
import os
import json
import numpy as np
class Judger:
# Initialize Judger, with the path of accusation list and law articles list
def __init__(self, accusation_path, law_path):
self.accu_dic = {}
f = open(accusation_path, "r",encoding='utf-8')
self.task1_cnt = 0
for line in f:
self.task1_cnt += 1
self.accu_dic[line[:-1]] = self.task1_cnt
self.law_dic = {}
f = open(law_path, "r",encoding='utf-8')
self.task2_cnt = 0
for line in f:
self.task2_cnt += 1
self.law_dic[int(line[:-1])] = self.task2_cnt
# Format the result generated by the Predictor class
@staticmethod
def format_result(result):
rex = {"accusation": [], "articles": [], "imprisonment": -3}
res_acc = []
for x in result["accusation"]:
if not (x is None):
res_acc.append(int(x))
rex["accusation"] = res_acc
if not (result["imprisonment"] is None):
rex["imprisonment"] = int(result["imprisonment"])
else:
rex["imprisonment"] = -3
res_art = []
for x in result["articles"]:
if not (x is None):
res_art.append(int(x))
rex["articles"] = res_art
return rex
# Gen new results according to the truth and users output
def gen_new_result(self, result, truth, label):
s1 = set(label["accusation"])
s2 = set()
for name in truth["accusation"]:
s2.add(self.accu_dic[name.replace("[", "").replace("]", "")])
for a in range(0, self.task1_cnt):
in1 = (a + 1) in s1
in2 = (a + 1) in s2
if in1:
if in2:
result[0][a]["TP"] += 1
else:
result[0][a]["FP"] += 1
else:
if in2:
result[0][a]["FN"] += 1
else:
result[0][a]["TN"] += 1
s1 = set(label["articles"])
s2 = set()
for name in truth["relevant_articles"]:
s2.add(self.law_dic[name])
for a in range(0, self.task2_cnt):
in1 = (a + 1) in s1
in2 = (a + 1) in s2
if in1:
if in2:
result[1][a]["TP"] += 1
else:
result[1][a]["FP"] += 1
else:
if in2:
result[1][a]["FN"] += 1
else:
result[1][a]["TN"] += 1
result[2]["cnt"] += 1
sc = 0
if truth["term_of_imprisonment"]["death_penalty"]:
if label["imprisonment"] == -2:
sc = 1
elif truth["term_of_imprisonment"]["life_imprisonment"]:
if label["imprisonment"] == -1:
sc = 1
else:
if label["imprisonment"] < 0:
sc = 0
else:
v1 = truth["term_of_imprisonment"]["imprisonment"]
v2 = label["imprisonment"]
v = abs(log(v1 + 1) - log(v2 + 1))
if v <= 0.2:
sc = 1
elif v <= 0.4:
sc = 0.8
elif v <= 0.6:
sc = 0.6
elif v <= 0.8:
sc = 0.4
elif v <= 1.0:
sc = 0.2
else:
sc = 0
sc = sc * 1.0
result[2]["score"] += sc
return result
# Calculate precision, recall and f1 value
# According to https://github.com/dice-group/gerbil/wiki/Precision,-Recall-and-F1-measure
@staticmethod
def get_value(res):
if res["TP"] == 0:
if res["FP"] == 0 and res["FN"] == 0:
precision = 1.0
recall = 1.0
f1 = 1.0
else:
precision = 0.0
recall = 0.0
f1 = 0.0
else:
precision = 1.0 * res["TP"] / (res["TP"] + res["FP"])
recall = 1.0 * res["TP"] / (res["TP"] + res["FN"])
f1 = 2 * precision * recall / (precision + recall)
return precision, recall, f1
# Generate score for the first two subtasks
def gen_score(self, arr):
sumf = 0
y = {"TP": 0, "FP": 0, "FN": 0, "TN": 0}
for x in arr:
p, r, f = self.get_value(x)
sumf += f
for z in x.keys():
y[z] += x[z]
_, __, f_ = self.get_value(y)
return (f_ + sumf * 1.0 / len(arr)) / 2.0
# Generatue all scores
def get_score(self, result):
s1 = self.gen_score(result[0])
s2 = self.gen_score(result[1])
s3 = 1.0 * result[2]["score"] / result[2]["cnt"]
return [s1, s2, s3]
# Test with ground truth path and the user's output path
def test(self, truth_path, output_path):
cnt = 0
result = [[], [], {}]
for a in range(0, self.task1_cnt):
result[0].append({"TP": 0, "FP": 0, "TN": 0, "FN": 0})
for a in range(0, self.task2_cnt):
result[1].append({"TP": 0, "FP": 0, "TN": 0, "FN": 0})
result[2] = {"cnt": 0, "score": 0}
inf = open(truth_path, "r",encoding='utf-8')
ouf = open(output_path, "r",encoding='utf-8')
for line in inf:
ground_truth = json.loads(line)["meta"]
user_output = json.loads(ouf.readline())
cnt += 1
result = self.gen_new_result(result, ground_truth, user_output)
return result
if __name__ == '__main__':
J = Judger('accu.txt', 'law.txt')
res = J.test('data_test.json', 'data_test_predict.json')
total_score = 0
for task_idx in range(2):
TP_micro = 0
FP_micro = 0
FN_micro = 0
f1 = []
for class_idx in range(len(res[task_idx])):
if res[task_idx][class_idx]["TP"] == 0:
f1.append(0)
continue
TP_micro += res[task_idx][class_idx]["TP"]
FP_micro += res[task_idx][class_idx]["FP"]
FN_micro += res[task_idx][class_idx]["FN"]
precision = res[task_idx][class_idx]["TP"] * 1.0 / (res[task_idx][class_idx]["TP"] + res[task_idx][class_idx]["FP"])
recall = res[task_idx][class_idx]["TP"] * 1.0 / (res[task_idx][class_idx]["TP"] + res[task_idx][class_idx]["FN"])
f1.append(2 * precision * recall / (precision + recall))
precision_micro = TP_micro * 1.0 / (TP_micro + FP_micro)
recall_micro = TP_micro * 1.0 / (TP_micro + FN_micro)
F1_micro = 2 * precision_micro * recall_micro / (precision_micro + recall_micro)
F1_macro = np.sum(f1) / len(f1)
total_score += 100.0 * (F1_micro + F1_macro)/2
print('task id: {}, F1_micro: {}, F1_macro: {}, final score: {}'.format(task_idx + 1, F1_micro, F1_macro, 100.0 * (F1_micro + F1_macro)/2))
total_score += res[2]['score'] / res[2]['cnt'] * 100
print('task id: 3, score:{}'.format(res[2]['score'] / res[2]['cnt'] * 100))
print('total score:', total_score) | en | 0.804314 | # Initialize Judger, with the path of accusation list and law articles list # Format the result generated by the Predictor class # Gen new results according to the truth and users output # Calculate precision, recall and f1 value # According to https://github.com/dice-group/gerbil/wiki/Precision,-Recall-and-F1-measure # Generate score for the first two subtasks # Generatue all scores # Test with ground truth path and the user's output path | 3.010003 | 3 |
check_data_quality/cc/removern.py | genejiang2012/ETL_tools | 0 | 6632664 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
import sys
import os
import codecs
def remove_wrapping_lines(filePath, fromCode='utf_8_sig', toCode='utf-8'):
"""
将csv文件中的内容中换行符转为\n,并转换编码保存
常用编码类型:
gb18030: 中文编码
utf_8: 常用的utf8编码
utf_8_sig: utf8 with bom
"""
fr = codecs.open(filePath, 'rb', fromCode)
fw = codecs.open(filePath+'.'+'csv', 'wb', toCode)
# 当前行是否未结束的标记,默认False代表正常结束
needAppend = False
# 完整行内容变量
res = ''
while True:
# 读取一行
line = fr.readline()
# 如果文件内容结束
if not line:
if needAppend:
fw.write(res+'\n')
break
# 去除前后空格和行尾换行符
line = line.strip()
if needAppend:
# 如果上一次循环标记行未结束,则把本次循环读取行内容附加到上一行后面
res += line+'\\n'
else:
# 否则正常读取本行内容
res = line
if res[-1:] == '"':
# 如果当前行尾最后一个字符是双引号,认为是正常结束,标记符设置为False,并写入文件
needAppend = False
print(res)
fw.write(res+'\n')
else:
# 否则标记行未完成,等待下一个循环附加下一行内容
needAppend = True
def read_dir(dir_path, ext, fromCode='utf-8', toCode='utf-8'):
for (root, dirs, files) in os.walk(dir_path):
print(dirs)
for filename in files:
if filename[-len(ext):] == ext:
remove_wrapping_lines(os.path.join(root, filename),
fromCode, toCode)
for dir_name in dirs:
remove_wrapping_lines(os.path.join(root, dir_name), ext,
fromCode, toCode)
if __name__ == "__main__":
if len(sys.argv) != 5:
print("usage: removern.py dirpath ext fromCode toCode ")
exit(1)
read_dir(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4])
print("done")
| #!/usr/bin/env python
# -*- coding:utf-8 -*-
import sys
import os
import codecs
def remove_wrapping_lines(filePath, fromCode='utf_8_sig', toCode='utf-8'):
"""
将csv文件中的内容中换行符转为\n,并转换编码保存
常用编码类型:
gb18030: 中文编码
utf_8: 常用的utf8编码
utf_8_sig: utf8 with bom
"""
fr = codecs.open(filePath, 'rb', fromCode)
fw = codecs.open(filePath+'.'+'csv', 'wb', toCode)
# 当前行是否未结束的标记,默认False代表正常结束
needAppend = False
# 完整行内容变量
res = ''
while True:
# 读取一行
line = fr.readline()
# 如果文件内容结束
if not line:
if needAppend:
fw.write(res+'\n')
break
# 去除前后空格和行尾换行符
line = line.strip()
if needAppend:
# 如果上一次循环标记行未结束,则把本次循环读取行内容附加到上一行后面
res += line+'\\n'
else:
# 否则正常读取本行内容
res = line
if res[-1:] == '"':
# 如果当前行尾最后一个字符是双引号,认为是正常结束,标记符设置为False,并写入文件
needAppend = False
print(res)
fw.write(res+'\n')
else:
# 否则标记行未完成,等待下一个循环附加下一行内容
needAppend = True
def read_dir(dir_path, ext, fromCode='utf-8', toCode='utf-8'):
for (root, dirs, files) in os.walk(dir_path):
print(dirs)
for filename in files:
if filename[-len(ext):] == ext:
remove_wrapping_lines(os.path.join(root, filename),
fromCode, toCode)
for dir_name in dirs:
remove_wrapping_lines(os.path.join(root, dir_name), ext,
fromCode, toCode)
if __name__ == "__main__":
if len(sys.argv) != 5:
print("usage: removern.py dirpath ext fromCode toCode ")
exit(1)
read_dir(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4])
print("done")
| zh | 0.942556 | #!/usr/bin/env python # -*- coding:utf-8 -*- 将csv文件中的内容中换行符转为\n,并转换编码保存 常用编码类型: gb18030: 中文编码 utf_8: 常用的utf8编码 utf_8_sig: utf8 with bom # 当前行是否未结束的标记,默认False代表正常结束 # 完整行内容变量 # 读取一行 # 如果文件内容结束 # 去除前后空格和行尾换行符 # 如果上一次循环标记行未结束,则把本次循环读取行内容附加到上一行后面 # 否则正常读取本行内容 # 如果当前行尾最后一个字符是双引号,认为是正常结束,标记符设置为False,并写入文件 # 否则标记行未完成,等待下一个循环附加下一行内容 | 3.031715 | 3 |
fastml_engine/exception/infer_exception.py | fast-mlops/fastml-engine | 1 | 6632665 | <reponame>fast-mlops/fastml-engine
class InferException(Exception):
def __init__(self, code=500, message="", *arg):
self.args = arg
self.message = message
self.code = code
if arg:
Exception.__init__(self, code, message, arg)
else:
Exception.__init__(self, code, message)
| class InferException(Exception):
def __init__(self, code=500, message="", *arg):
self.args = arg
self.message = message
self.code = code
if arg:
Exception.__init__(self, code, message, arg)
else:
Exception.__init__(self, code, message) | none | 1 | 3.066533 | 3 |
|
spotify_playlist_additions/playlists/abstract.py | CoolDudde4150/Spotify-Playlist-Additions | 0 | 6632666 | """Contains the abstract interface for a playlist addon"""
from abc import ABC, abstractmethod
from typing import Any
from spotipy import Spotify
class AbstractPlaylist(ABC):
"""An abstract class that a new playlist can inherit callback functions
from. Each frame, any of these may be invoked if the required state is
found.
"""
def __init__(self, spotify_client: Spotify, playlist: dict, user_id: str):
"""The most basic initializer that can be implemented. Any playlist
implementation needs take in a Spotify client from spotipy and a playlist
Args:
spotify_client: A client that can be used for making Spotify API
calls.
playlist: The playlist that this runtime has been configured to run on. In the same format as described on
the spotify API
user_id: The user ID that has connected to this runtime.
"""
self._spotify_client = spotify_client
self._playlist = playlist
self._user_id = user_id
@property
def scope(self) -> str:
"""The required scope of the playlist implementation. Should be a
simple property in the child class. Defaults to no scope
Returns:
str: The scope of the playlist.
"""
return ""
@abstractmethod
async def start(self) -> Any:
"""Method called at the start of runtime. Only called once.
"""
@abstractmethod
async def stop(self) -> Any:
"""Method called at the end of runtime. Only called once
"""
@abstractmethod
async def handle_skipped_track(self, track: dict) -> Any:
"""Called on each configured playlist when the main loop detects a
skipped track.
Args:
track: The skipped track retrieved from the Spotify API.
Retains the exact format that Spotify defines in their API.
"""
@abstractmethod
async def handle_fully_listened_track(self, track: dict) -> Any:
"""Called on each configured playlist when the main loop detects a
fully listened track (to within a degree of uncertainty)
Args:
track: The fully listened track retrieved from the Spotify API.
Retains the exact format that Spotify defines in their API
"""
| """Contains the abstract interface for a playlist addon"""
from abc import ABC, abstractmethod
from typing import Any
from spotipy import Spotify
class AbstractPlaylist(ABC):
"""An abstract class that a new playlist can inherit callback functions
from. Each frame, any of these may be invoked if the required state is
found.
"""
def __init__(self, spotify_client: Spotify, playlist: dict, user_id: str):
"""The most basic initializer that can be implemented. Any playlist
implementation needs take in a Spotify client from spotipy and a playlist
Args:
spotify_client: A client that can be used for making Spotify API
calls.
playlist: The playlist that this runtime has been configured to run on. In the same format as described on
the spotify API
user_id: The user ID that has connected to this runtime.
"""
self._spotify_client = spotify_client
self._playlist = playlist
self._user_id = user_id
@property
def scope(self) -> str:
"""The required scope of the playlist implementation. Should be a
simple property in the child class. Defaults to no scope
Returns:
str: The scope of the playlist.
"""
return ""
@abstractmethod
async def start(self) -> Any:
"""Method called at the start of runtime. Only called once.
"""
@abstractmethod
async def stop(self) -> Any:
"""Method called at the end of runtime. Only called once
"""
@abstractmethod
async def handle_skipped_track(self, track: dict) -> Any:
"""Called on each configured playlist when the main loop detects a
skipped track.
Args:
track: The skipped track retrieved from the Spotify API.
Retains the exact format that Spotify defines in their API.
"""
@abstractmethod
async def handle_fully_listened_track(self, track: dict) -> Any:
"""Called on each configured playlist when the main loop detects a
fully listened track (to within a degree of uncertainty)
Args:
track: The fully listened track retrieved from the Spotify API.
Retains the exact format that Spotify defines in their API
"""
| en | 0.910665 | Contains the abstract interface for a playlist addon An abstract class that a new playlist can inherit callback functions from. Each frame, any of these may be invoked if the required state is found. The most basic initializer that can be implemented. Any playlist implementation needs take in a Spotify client from spotipy and a playlist Args: spotify_client: A client that can be used for making Spotify API calls. playlist: The playlist that this runtime has been configured to run on. In the same format as described on the spotify API user_id: The user ID that has connected to this runtime. The required scope of the playlist implementation. Should be a simple property in the child class. Defaults to no scope Returns: str: The scope of the playlist. Method called at the start of runtime. Only called once. Method called at the end of runtime. Only called once Called on each configured playlist when the main loop detects a skipped track. Args: track: The skipped track retrieved from the Spotify API. Retains the exact format that Spotify defines in their API. Called on each configured playlist when the main loop detects a fully listened track (to within a degree of uncertainty) Args: track: The fully listened track retrieved from the Spotify API. Retains the exact format that Spotify defines in their API | 3.210902 | 3 |
setup.py | CaliDog/tachikoma | 21 | 6632667 | <filename>setup.py
from setuptools import setup, find_packages
import os
here = os.path.abspath(os.path.dirname(__file__))
with open('requirements.txt') as f:
dependencies = f.read().splitlines()
long_description = """
Tachikoma is a jobs pipeline for connecting to services, processing results, and sending notifications. It handles
all the magic bits like storage and diffing for you, and all you have to do is focus on the meat and potatos of
what you want to do!
"""
setup(
name='tachikoma',
version="1.5",
url='https://github.com/CaliDog/tachikoma/',
author='<NAME>',
install_requires=dependencies,
setup_requires=dependencies,
author_email='<EMAIL>',
description='Tachikoma is an alerting pipeline so smart it\'s scary',
long_description=long_description,
packages=find_packages(),
include_package_data=True,
license = "MIT",
classifiers = [
"Development Status :: 3 - Alpha",
"License :: OSI Approved :: MIT License",
"Topic :: Internet :: WWW/HTTP",
"Operating System :: MacOS :: MacOS X",
"Operating System :: POSIX",
"Programming Language :: Python :: 3",
"Framework :: AsyncIO"
],
) | <filename>setup.py
from setuptools import setup, find_packages
import os
here = os.path.abspath(os.path.dirname(__file__))
with open('requirements.txt') as f:
dependencies = f.read().splitlines()
long_description = """
Tachikoma is a jobs pipeline for connecting to services, processing results, and sending notifications. It handles
all the magic bits like storage and diffing for you, and all you have to do is focus on the meat and potatos of
what you want to do!
"""
setup(
name='tachikoma',
version="1.5",
url='https://github.com/CaliDog/tachikoma/',
author='<NAME>',
install_requires=dependencies,
setup_requires=dependencies,
author_email='<EMAIL>',
description='Tachikoma is an alerting pipeline so smart it\'s scary',
long_description=long_description,
packages=find_packages(),
include_package_data=True,
license = "MIT",
classifiers = [
"Development Status :: 3 - Alpha",
"License :: OSI Approved :: MIT License",
"Topic :: Internet :: WWW/HTTP",
"Operating System :: MacOS :: MacOS X",
"Operating System :: POSIX",
"Programming Language :: Python :: 3",
"Framework :: AsyncIO"
],
) | en | 0.961181 | Tachikoma is a jobs pipeline for connecting to services, processing results, and sending notifications. It handles all the magic bits like storage and diffing for you, and all you have to do is focus on the meat and potatos of what you want to do! | 1.620932 | 2 |
setup.py | rjgpinel/rlbc | 43 | 6632668 | <filename>setup.py<gh_stars>10-100
import shutil
from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
def read_requirements_file(filename):
req_file_path = path.join(path.dirname(path.realpath(__file__)), filename)
with open(req_file_path) as f:
return [line.strip() for line in f]
setup(
name='learning2manipulate',
version='1.0.0',
description='Learning long-horizon robotic manipulations',
packages=find_packages(),
install_requires=read_requirements_file('requirements.txt'))
shutil.copyfile('bc/settings_template.py', 'bc/settings.py')
shutil.copyfile('ppo/settings_template.py', 'ppo/settings.py')
print('In order to make the repo to work, modify bc/settings.py and ppo/settings.py')
| <filename>setup.py<gh_stars>10-100
import shutil
from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
def read_requirements_file(filename):
req_file_path = path.join(path.dirname(path.realpath(__file__)), filename)
with open(req_file_path) as f:
return [line.strip() for line in f]
setup(
name='learning2manipulate',
version='1.0.0',
description='Learning long-horizon robotic manipulations',
packages=find_packages(),
install_requires=read_requirements_file('requirements.txt'))
shutil.copyfile('bc/settings_template.py', 'bc/settings.py')
shutil.copyfile('ppo/settings_template.py', 'ppo/settings.py')
print('In order to make the repo to work, modify bc/settings.py and ppo/settings.py')
| none | 1 | 1.874821 | 2 |
|
pype/plugins/maya/publish/validate_ass_relative_paths.py | kalisp/pype | 0 | 6632669 | <gh_stars>0
import os
import types
import maya.cmds as cmds
import pyblish.api
import pype.api
import pype.hosts.maya.action
class ValidateAssRelativePaths(pyblish.api.InstancePlugin):
"""Ensure exporting ass file has set relative texture paths"""
order = pype.api.ValidateContentsOrder
hosts = ['maya']
families = ['ass']
label = "ASS has relative texture paths"
actions = [pype.api.RepairAction]
def process(self, instance):
# we cannot ask this until user open render settings as
# `defaultArnoldRenderOptions` doesn't exists
try:
relative_texture = cmds.getAttr(
"defaultArnoldRenderOptions.absolute_texture_paths")
relative_procedural = cmds.getAttr(
"defaultArnoldRenderOptions.absolute_procedural_paths")
texture_search_path = cmds.getAttr(
"defaultArnoldRenderOptions.tspath"
)
procedural_search_path = cmds.getAttr(
"defaultArnoldRenderOptions.pspath"
)
except ValueError:
assert False, ("Can not validate, render setting were not opened "
"yet so Arnold setting cannot be validate")
scene_dir, scene_basename = os.path.split(cmds.file(q=True, loc=True))
scene_name, _ = os.path.splitext(scene_basename)
assert self.maya_is_true(relative_texture) is not True, \
("Texture path is set to be absolute")
assert self.maya_is_true(relative_procedural) is not True, \
("Procedural path is set to be absolute")
anatomy = instance.context.data["anatomy"]
# Use project root variables for multiplatform support, see:
# https://docs.arnoldrenderer.com/display/A5AFMUG/Search+Path
# ':' as path separator is supported by Arnold for all platforms.
keys = anatomy.root_environments().keys()
paths = []
for k in keys:
paths.append("[{}]".format(k))
self.log.info("discovered roots: {}".format(":".join(paths)))
assert ":".join(paths) in texture_search_path, (
"Project roots are not in texture_search_path"
)
assert ":".join(paths) in procedural_search_path, (
"Project roots are not in procedural_search_path"
)
@classmethod
def repair(cls, instance):
texture_path = cmds.getAttr("defaultArnoldRenderOptions.tspath")
procedural_path = cmds.getAttr("defaultArnoldRenderOptions.pspath")
# Use project root variables for multiplatform support, see:
# https://docs.arnoldrenderer.com/display/A5AFMUG/Search+Path
# ':' as path separator is supported by Arnold for all platforms.
anatomy = instance.context.data["anatomy"]
keys = anatomy.root_environments().keys()
paths = []
for k in keys:
paths.append("[{}]".format(k))
cmds.setAttr(
"defaultArnoldRenderOptions.tspath",
":".join([p for p in paths + [texture_path] if p]),
type="string"
)
cmds.setAttr(
"defaultArnoldRenderOptions.absolute_texture_paths",
False
)
cmds.setAttr(
"defaultArnoldRenderOptions.pspath",
":".join([p for p in paths + [procedural_path] if p]),
type="string"
)
cmds.setAttr(
"defaultArnoldRenderOptions.absolute_procedural_paths",
False
)
@staticmethod
def find_absolute_path(relative_path, all_root_paths):
for root_path in all_root_paths:
possible_path = os.path.join(root_path, relative_path)
if os.path.exists(possible_path):
return possible_path
def maya_is_true(self, attr_val):
"""
Whether a Maya attr evaluates to True.
When querying an attribute value from an ambiguous object the
Maya API will return a list of values, which need to be properly
handled to evaluate properly.
"""
if isinstance(attr_val, types.BooleanType):
return attr_val
elif isinstance(attr_val, (types.ListType, types.GeneratorType)):
return any(attr_val)
else:
return bool(attr_val)
| import os
import types
import maya.cmds as cmds
import pyblish.api
import pype.api
import pype.hosts.maya.action
class ValidateAssRelativePaths(pyblish.api.InstancePlugin):
"""Ensure exporting ass file has set relative texture paths"""
order = pype.api.ValidateContentsOrder
hosts = ['maya']
families = ['ass']
label = "ASS has relative texture paths"
actions = [pype.api.RepairAction]
def process(self, instance):
# we cannot ask this until user open render settings as
# `defaultArnoldRenderOptions` doesn't exists
try:
relative_texture = cmds.getAttr(
"defaultArnoldRenderOptions.absolute_texture_paths")
relative_procedural = cmds.getAttr(
"defaultArnoldRenderOptions.absolute_procedural_paths")
texture_search_path = cmds.getAttr(
"defaultArnoldRenderOptions.tspath"
)
procedural_search_path = cmds.getAttr(
"defaultArnoldRenderOptions.pspath"
)
except ValueError:
assert False, ("Can not validate, render setting were not opened "
"yet so Arnold setting cannot be validate")
scene_dir, scene_basename = os.path.split(cmds.file(q=True, loc=True))
scene_name, _ = os.path.splitext(scene_basename)
assert self.maya_is_true(relative_texture) is not True, \
("Texture path is set to be absolute")
assert self.maya_is_true(relative_procedural) is not True, \
("Procedural path is set to be absolute")
anatomy = instance.context.data["anatomy"]
# Use project root variables for multiplatform support, see:
# https://docs.arnoldrenderer.com/display/A5AFMUG/Search+Path
# ':' as path separator is supported by Arnold for all platforms.
keys = anatomy.root_environments().keys()
paths = []
for k in keys:
paths.append("[{}]".format(k))
self.log.info("discovered roots: {}".format(":".join(paths)))
assert ":".join(paths) in texture_search_path, (
"Project roots are not in texture_search_path"
)
assert ":".join(paths) in procedural_search_path, (
"Project roots are not in procedural_search_path"
)
@classmethod
def repair(cls, instance):
texture_path = cmds.getAttr("defaultArnoldRenderOptions.tspath")
procedural_path = cmds.getAttr("defaultArnoldRenderOptions.pspath")
# Use project root variables for multiplatform support, see:
# https://docs.arnoldrenderer.com/display/A5AFMUG/Search+Path
# ':' as path separator is supported by Arnold for all platforms.
anatomy = instance.context.data["anatomy"]
keys = anatomy.root_environments().keys()
paths = []
for k in keys:
paths.append("[{}]".format(k))
cmds.setAttr(
"defaultArnoldRenderOptions.tspath",
":".join([p for p in paths + [texture_path] if p]),
type="string"
)
cmds.setAttr(
"defaultArnoldRenderOptions.absolute_texture_paths",
False
)
cmds.setAttr(
"defaultArnoldRenderOptions.pspath",
":".join([p for p in paths + [procedural_path] if p]),
type="string"
)
cmds.setAttr(
"defaultArnoldRenderOptions.absolute_procedural_paths",
False
)
@staticmethod
def find_absolute_path(relative_path, all_root_paths):
for root_path in all_root_paths:
possible_path = os.path.join(root_path, relative_path)
if os.path.exists(possible_path):
return possible_path
def maya_is_true(self, attr_val):
"""
Whether a Maya attr evaluates to True.
When querying an attribute value from an ambiguous object the
Maya API will return a list of values, which need to be properly
handled to evaluate properly.
"""
if isinstance(attr_val, types.BooleanType):
return attr_val
elif isinstance(attr_val, (types.ListType, types.GeneratorType)):
return any(attr_val)
else:
return bool(attr_val) | en | 0.827586 | Ensure exporting ass file has set relative texture paths # we cannot ask this until user open render settings as # `defaultArnoldRenderOptions` doesn't exists # Use project root variables for multiplatform support, see: # https://docs.arnoldrenderer.com/display/A5AFMUG/Search+Path # ':' as path separator is supported by Arnold for all platforms. # Use project root variables for multiplatform support, see: # https://docs.arnoldrenderer.com/display/A5AFMUG/Search+Path # ':' as path separator is supported by Arnold for all platforms. Whether a Maya attr evaluates to True. When querying an attribute value from an ambiguous object the Maya API will return a list of values, which need to be properly handled to evaluate properly. | 2.155078 | 2 |
test/test_pypigeonhole_build/test_app_version_control.py | psilons/pypigeonhole-build | 0 | 6632670 | import unittest
import os
import shutil
import pypigeonhole_build.app_version_control as app_version_control
class FileEditorUtilsTest(unittest.TestCase):
def test_replace_line(self):
curr_path = os.path.dirname(os.path.abspath(__file__))
src_file = os.path.join(curr_path, 'sample_file_edit.txt')
target_file = '/tmp/target_file'
shutil.copyfile(src_file, target_file)
new_str = 'You DO! You LIKE US'
app_version_control.replace_line(target_file, 'I DO! I LIKE THEM', new_str)
with open(target_file, 'r') as f:
lines = f.read()
self.assertTrue(lines.find(new_str) > -1)
os.remove(target_file)
def test_version_inc(self):
self.assertTrue(app_version_control.version_inc_upto10('1.2.3') == '1.2.4')
self.assertTrue(app_version_control.version_inc_upto10('1.2.9') == '1.3.0')
self.assertTrue(app_version_control.version_inc_upto10('1.9.9') == '2.0.0')
self.assertTrue(app_version_control.version_inc_upto100('1.2.3') == '1.2.4')
self.assertTrue(app_version_control.version_inc_upto100('1.2.99') == '1.3.0')
self.assertTrue(app_version_control.version_inc_upto100('1.99.99') == '2.0.0')
self.assertTrue(app_version_control.version_inc_upto10('1.2.3') == '1.2.4')
self.assertTrue(app_version_control.version_inc_inf('1.9.9') == '1.9.10')
self.assertTrue(app_version_control.version_inc_inf('1.9.99') == '1.9.100')
def test_bump_version(self):
tmp_file = '/tmp/test1'
v = '1.2.3'
with open(tmp_file, 'w') as f: # open a tmp file, save 1.2.3 in it.
f.write('__app_version = ' + v)
app_version_control.bump_version_upto10(v, tmp_file) # bump 1.2.3 to 1.2.4
app_version_control.bump_version_upto100('1.2.4', tmp_file) # bump 1.2.4 to 1.2.5
app_version_control.bump_version_inf('1.2.5', tmp_file) # bump 1.2.5 to 1.2.6
with open(tmp_file, 'r') as f:
line = f.readline()
self.assertTrue(line.strip() == '__app_version = "1.2.6"')
# os.remove(tmp_file)
| import unittest
import os
import shutil
import pypigeonhole_build.app_version_control as app_version_control
class FileEditorUtilsTest(unittest.TestCase):
def test_replace_line(self):
curr_path = os.path.dirname(os.path.abspath(__file__))
src_file = os.path.join(curr_path, 'sample_file_edit.txt')
target_file = '/tmp/target_file'
shutil.copyfile(src_file, target_file)
new_str = 'You DO! You LIKE US'
app_version_control.replace_line(target_file, 'I DO! I LIKE THEM', new_str)
with open(target_file, 'r') as f:
lines = f.read()
self.assertTrue(lines.find(new_str) > -1)
os.remove(target_file)
def test_version_inc(self):
self.assertTrue(app_version_control.version_inc_upto10('1.2.3') == '1.2.4')
self.assertTrue(app_version_control.version_inc_upto10('1.2.9') == '1.3.0')
self.assertTrue(app_version_control.version_inc_upto10('1.9.9') == '2.0.0')
self.assertTrue(app_version_control.version_inc_upto100('1.2.3') == '1.2.4')
self.assertTrue(app_version_control.version_inc_upto100('1.2.99') == '1.3.0')
self.assertTrue(app_version_control.version_inc_upto100('1.99.99') == '2.0.0')
self.assertTrue(app_version_control.version_inc_upto10('1.2.3') == '1.2.4')
self.assertTrue(app_version_control.version_inc_inf('1.9.9') == '1.9.10')
self.assertTrue(app_version_control.version_inc_inf('1.9.99') == '1.9.100')
def test_bump_version(self):
tmp_file = '/tmp/test1'
v = '1.2.3'
with open(tmp_file, 'w') as f: # open a tmp file, save 1.2.3 in it.
f.write('__app_version = ' + v)
app_version_control.bump_version_upto10(v, tmp_file) # bump 1.2.3 to 1.2.4
app_version_control.bump_version_upto100('1.2.4', tmp_file) # bump 1.2.4 to 1.2.5
app_version_control.bump_version_inf('1.2.5', tmp_file) # bump 1.2.5 to 1.2.6
with open(tmp_file, 'r') as f:
line = f.readline()
self.assertTrue(line.strip() == '__app_version = "1.2.6"')
# os.remove(tmp_file)
| en | 0.687206 | # open a tmp file, save 1.2.3 in it. # bump 1.2.3 to 1.2.4 # bump 1.2.4 to 1.2.5 # bump 1.2.5 to 1.2.6 # os.remove(tmp_file) | 2.734776 | 3 |
SpaceInvaders/SpaceInvaders.py | dimikave/Space-Invaders-Game-Pygame | 0 | 6632671 | <filename>SpaceInvaders/SpaceInvaders.py
import pygame
import os
from pygame.locals import *
from random import randint
from sys import exit
def CenterMessage(screen,surface):
return (screen.get_width() - surface.get_width())/2
def PrepareSound(filename):
sound = pygame.mixer.Sound(filename)
return sound
class Craft(object):
def __init__(self,imagefiles,coord):
self.shape = [pygame.image.load(imagefile) for imagefile in imagefiles]
self.ship_width = self.shape[0].get_width()
self.ship_height = self.shape[0].get_height()
self.rect = pygame.Rect(coord,(self.ship_width, self.ship_height))
self.ship_midwidth = self.ship_width/2
self.firecolor=(255,0,0)
self.firespeed = -800
self.shotlength = 20
def Show(self, surface,imageindex):
surface.blit(self.shape[imageindex],(self.rect[0],self.rect[1]))
def Move(self,speed_x,speed_y,time):
distance_x = time *speed_x
distance_y = time *speed_y
self.rect.move_ip(distance_x,distance_y)
def Fire(self):
shot = Laser((self.rect[0]+self.ship_midwidth,self.rect[1]),self.firecolor,self.shotlength,self.firespeed,self.rect[1],15)
return shot
class SpaceCraft(Craft):
def __init__(self, imagefile,coord,min_coord,max_coord,lasersound):
super(SpaceCraft,self).__init__(imagefile,coord)
self.min_coord = min_coord
self.max_coord = (max_coord[0] - self.ship_width,max_coord[1]-self.ship_height)
self.lasersound = lasersound
def Move(self,speed_x,speed_y,time):
super(SpaceCraft,self).Move(speed_x,speed_y,time)
for i in (0,1):
if self.rect[i]< self.min_coord[i]:
self.rect[i] = self.min_coord[i]
if self.rect[i] > self.max_coord[i]:
self.rect[i]= self.max_coord[i]
def Fire(self):
self.lasersound.play()
return super(SpaceCraft,self).Fire()
class SpaceBackground:
def __init__(self,screenheight,imagefile):
self.shape = pygame.image.load(imagefile)
self.coord = [0,0]
self.coord2 = [0, - screenheight]
self.y_original = self.coord[1]
self.y2_original = self.coord2[1]
def Show(self, surface):
surface.blit(self.shape,self.coord)
surface.blit(self.shape,self.coord2)
def Scroll(self, speed_y, time):
distance_y = speed_y *time
self.coord[1]+= distance_y
self.coord2[1]+=distance_y
if self.coord2[1]>=0:
self.coord[1] = self.y_original
self.coord2[1] = self.y2_original
class Alien(Craft):
def __init__(self, imagefile,coord, speed_x,speed_y):
imagefiles = (imagefile,)
super(Alien,self).__init__(imagefiles,coord)
self.speed_x = speed_x
self.speed_y = speed_y
self.shot_height = 10
self.firebaseline = self.ship_height
self.firecolor=(255,255,0)
self.firespeed = 200
def Move(self, time):
super(Alien, self).Move(self.speed_x,self.speed_x,time)
if self.rect[0]>= 440 or self.rect[0] <=10:
self.speed_x = -self.speed_x
if self.rect[1] <= 10 or self.rect[1]>=440:
self.speed_y = -self.speed_y
def Show(self, surface):
imageindex = 0
super(Alien,self).Show(surface,imageindex)
def Fire(self):
theshot = Laser((self.rect[0]+self.ship_midwidth,self.rect[1]+self.firebaseline),self.firecolor,self.shot_height, self.firespeed,self.rect[1]+self.firebaseline,0)
return theshot
class Laser:
def __init__(self, coord, color, size, speed,refline,voffset):
self.x1 = coord[0]
self.y1 = coord[1]+voffset
self.size = size
self.color= color
self.speed = speed
self.refline = refline
def DistanceTravelled(self):
return abs(self.refline-self.y1)
def Show(self, surface):
pygame.draw.line(surface,self.color,(self.x1, self.y1),(self.x1, self.y1-self.size),3)
def Move(self,time):
distance = self.speed * time
self.y1 += distance
def GoneAbove(self,y):
if self.y1<=y:
return True
else:
return False
def GoneBelow(self,y):
pass
def GetXY(self):
return (self.x1 ,self.y1)
class ScoreBoard:
def __init__(self, x, y, font, fontsize):
self.x = x
self.y = y
self.font = pygame.font.SysFont(font,fontsize)
self.score = 0
def Change(self, amount):
self.score += amount
def Show(self, surface):
scoretext = self.font.render("Score: "+str(self.score), True,(0,0,255))
surface.blit(scoretext,(self.x, self.y))
def GetValue(self):
return self.score
def SetValue(self, score):
self.score = score
class ShieldMeter:
def __init__(self, x, y, maxvalue, warnvalue):
self.x = x
self.y = y
self.maxvalue = maxvalue
self.warnvalue = warnvalue
self.currentvalue = maxvalue
self.shieldcolor = (0,255,0)
def Show(self, surface):
if self.currentvalue < self.warnvalue:
self.shieldcolor = (255,0,0)
else:
self.shieldcolor = (0,255,0)
pygame.draw.rect(surface,self.shieldcolor,(self.x, self.y, self.currentvalue,25))
def Increase(self, amount):
self.currentvalue += amount
if self.currentvalue > self.maxvalue:
self.currentvalue = self.maxvalue
def Decrease(self, amount):
self.currentvalue -= amount
if self.currentvalue < 0:
self.currentvalue = 0
def GetValue(self):
return self.currentvalue
def SetValue(self,value):
self.currentvalue = value
if self.currentvalue > self.maxvalue:
self.currentvalue = maxvalue
if self.currentvalue < 0:
self.currentvalue = 0
def GameOverShow(screen):
font = pygame.font.SysFont("impact",32)
gameovertext1 = font.render("Game Over!",True,(255,255,255))
text_x = CenterMessage(screen, gameovertext1)
screen.blit(gameovertext1,(text_x,280))
gameovertext2 = font.render("Press R to Restart", True,(255,255,255))
text_x = CenterMessage(screen, gameovertext2)
screen.blit(gameovertext2,(text_x,320))
return
def PlayMusic(soundfile):
pygame.mixer.music.load(soundfile)
pygame.mixer.music.play(-1)
def main():
pygame.init()
backspeed = 100
laser = PrepareSound("shoot.wav")
explosion = PrepareSound("invaderkilled.wav")
spaceship_low = (0,0)
screenwidth,screenheight=(480,640)
spaceship_high = (screenwidth,screenheight)
destroyed = PrepareSound("explosion.wav")
shield = ShieldMeter(200,10,250,75)
spaceship_pos = (240,540)
screen = pygame.display.set_mode((screenwidth,screenheight),DOUBLEBUF,32)
pygame.display.set_caption("Pygame Invaders")
pygame.key.set_repeat(1,1)
StarField = SpaceBackground(screenheight,"stars.jpg")
shipimages = ('spaceship2.png', 'spaceship3.png')
SpaceShip = SpaceCraft(shipimages,spaceship_pos,spaceship_low,spaceship_high,laser)
clock = pygame.time.Clock()
framerate = 60
PlayMusic("spaceinvaders.ogg")
firelist = []
alienimage = ('alien1.png','alien2.png','alien3.png','alien4.png','alien5.png')
numofaliens=8
AlienShips = []
alienfirelist = []
laserdownlimit = screenheight - 40
score = ScoreBoard(0,0,"impact",32)
GameOver = False
imageindex = 0
flashcount = 0
while True:
time = clock.tick(framerate)/1000.0
if not AlienShips:
AlienShips = [Alien(alienimage[randint(0,len(alienimage)-1)],[randint(20,screenwidth-80),randint(20,screenheight-140)],randint(100,150),randint(100,150)) for i in range(0,numofaliens)]
StarField.Scroll(backspeed,time)
shipspeed_x = 0
shipspeed_y = 0
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
exit()
if event.type == KEYDOWN:
key = pygame.key.get_pressed()
if key[K_q] :
pygame.quit()
exit()
if key[K_r] and GameOver:
GameOver = False
shield.SetValue(250)
score.SetValue(0)
if key[K_LEFT]:
shipspeed_x= -300
if key[K_RIGHT]:
shipspeed_x = 300
if key[K_UP]:
shipspeed_y= -300
if key[K_DOWN]:
shipspeed_y= 300
if key[K_SPACE] and not GameOver:
if firelist:
if firelist[-1].DistanceTravelled() >= 150 :
firelist.append(SpaceShip.Fire())
else:
firelist.append(SpaceShip.Fire())
if event.type == USEREVENT+1:
if flashcount < 10:
flashcount +=1
if imageindex == 1:
imageindex = 0
else:
imageindex = 1
else:
imageindex = 0
flashcount = 0
pygame.time.set_timer(USEREVENT+1,0)
SpaceShip.Move(shipspeed_x,shipspeed_y,time)
StarField.Show(screen)
SpaceShip.Show(screen,imageindex)
score.Show(screen)
shield.Show(screen)
for AlienShip in AlienShips:
AlienShip.Show(screen)
AlienShip.Move(time)
if randint(0,10)==9:
if alienfirelist:
if alienfirelist[-1].DistanceTravelled()>=100:
alienfirelist.append(AlienShip.Fire())
else:
alienfirelist.append(AlienShip.Fire())
for theshot in alienfirelist:
theshot.Move(time)
theshot.Show(screen)
if theshot.GoneBelow(laserdownlimit):
alienfirelist.remove(theshot)
else:
if SpaceShip.rect.collidepoint(theshot.GetXY()) and not GameOver:
destroyed.play()
shield.Decrease(25)
pygame.time.set_timer(USEREVENT+1,25)
if theshot in alienfirelist:
alienfirelist.remove(theshot)
for theshot in firelist:
theshot.Move(time)
theshot.Show(screen)
if theshot.GoneAbove(0):
firelist.remove(theshot)
else:
for AlienShip in AlienShips:
if AlienShip.rect.collidepoint(theshot.GetXY()):
score.Change(10)
explosion.play()
if score.GetValue()%100 == 0:
shield.Increase(25)
if theshot in firelist:
firelist.remove(theshot)
AlienShips.remove(AlienShip)
if shield.GetValue() ==0:
GameOverShow(screen)
GameOver = True
pygame.display.update()
if __name__=='__main__':
main()
| <filename>SpaceInvaders/SpaceInvaders.py
import pygame
import os
from pygame.locals import *
from random import randint
from sys import exit
def CenterMessage(screen,surface):
return (screen.get_width() - surface.get_width())/2
def PrepareSound(filename):
sound = pygame.mixer.Sound(filename)
return sound
class Craft(object):
def __init__(self,imagefiles,coord):
self.shape = [pygame.image.load(imagefile) for imagefile in imagefiles]
self.ship_width = self.shape[0].get_width()
self.ship_height = self.shape[0].get_height()
self.rect = pygame.Rect(coord,(self.ship_width, self.ship_height))
self.ship_midwidth = self.ship_width/2
self.firecolor=(255,0,0)
self.firespeed = -800
self.shotlength = 20
def Show(self, surface,imageindex):
surface.blit(self.shape[imageindex],(self.rect[0],self.rect[1]))
def Move(self,speed_x,speed_y,time):
distance_x = time *speed_x
distance_y = time *speed_y
self.rect.move_ip(distance_x,distance_y)
def Fire(self):
shot = Laser((self.rect[0]+self.ship_midwidth,self.rect[1]),self.firecolor,self.shotlength,self.firespeed,self.rect[1],15)
return shot
class SpaceCraft(Craft):
def __init__(self, imagefile,coord,min_coord,max_coord,lasersound):
super(SpaceCraft,self).__init__(imagefile,coord)
self.min_coord = min_coord
self.max_coord = (max_coord[0] - self.ship_width,max_coord[1]-self.ship_height)
self.lasersound = lasersound
def Move(self,speed_x,speed_y,time):
super(SpaceCraft,self).Move(speed_x,speed_y,time)
for i in (0,1):
if self.rect[i]< self.min_coord[i]:
self.rect[i] = self.min_coord[i]
if self.rect[i] > self.max_coord[i]:
self.rect[i]= self.max_coord[i]
def Fire(self):
self.lasersound.play()
return super(SpaceCraft,self).Fire()
class SpaceBackground:
def __init__(self,screenheight,imagefile):
self.shape = pygame.image.load(imagefile)
self.coord = [0,0]
self.coord2 = [0, - screenheight]
self.y_original = self.coord[1]
self.y2_original = self.coord2[1]
def Show(self, surface):
surface.blit(self.shape,self.coord)
surface.blit(self.shape,self.coord2)
def Scroll(self, speed_y, time):
distance_y = speed_y *time
self.coord[1]+= distance_y
self.coord2[1]+=distance_y
if self.coord2[1]>=0:
self.coord[1] = self.y_original
self.coord2[1] = self.y2_original
class Alien(Craft):
def __init__(self, imagefile,coord, speed_x,speed_y):
imagefiles = (imagefile,)
super(Alien,self).__init__(imagefiles,coord)
self.speed_x = speed_x
self.speed_y = speed_y
self.shot_height = 10
self.firebaseline = self.ship_height
self.firecolor=(255,255,0)
self.firespeed = 200
def Move(self, time):
super(Alien, self).Move(self.speed_x,self.speed_x,time)
if self.rect[0]>= 440 or self.rect[0] <=10:
self.speed_x = -self.speed_x
if self.rect[1] <= 10 or self.rect[1]>=440:
self.speed_y = -self.speed_y
def Show(self, surface):
imageindex = 0
super(Alien,self).Show(surface,imageindex)
def Fire(self):
theshot = Laser((self.rect[0]+self.ship_midwidth,self.rect[1]+self.firebaseline),self.firecolor,self.shot_height, self.firespeed,self.rect[1]+self.firebaseline,0)
return theshot
class Laser:
def __init__(self, coord, color, size, speed,refline,voffset):
self.x1 = coord[0]
self.y1 = coord[1]+voffset
self.size = size
self.color= color
self.speed = speed
self.refline = refline
def DistanceTravelled(self):
return abs(self.refline-self.y1)
def Show(self, surface):
pygame.draw.line(surface,self.color,(self.x1, self.y1),(self.x1, self.y1-self.size),3)
def Move(self,time):
distance = self.speed * time
self.y1 += distance
def GoneAbove(self,y):
if self.y1<=y:
return True
else:
return False
def GoneBelow(self,y):
pass
def GetXY(self):
return (self.x1 ,self.y1)
class ScoreBoard:
def __init__(self, x, y, font, fontsize):
self.x = x
self.y = y
self.font = pygame.font.SysFont(font,fontsize)
self.score = 0
def Change(self, amount):
self.score += amount
def Show(self, surface):
scoretext = self.font.render("Score: "+str(self.score), True,(0,0,255))
surface.blit(scoretext,(self.x, self.y))
def GetValue(self):
return self.score
def SetValue(self, score):
self.score = score
class ShieldMeter:
def __init__(self, x, y, maxvalue, warnvalue):
self.x = x
self.y = y
self.maxvalue = maxvalue
self.warnvalue = warnvalue
self.currentvalue = maxvalue
self.shieldcolor = (0,255,0)
def Show(self, surface):
if self.currentvalue < self.warnvalue:
self.shieldcolor = (255,0,0)
else:
self.shieldcolor = (0,255,0)
pygame.draw.rect(surface,self.shieldcolor,(self.x, self.y, self.currentvalue,25))
def Increase(self, amount):
self.currentvalue += amount
if self.currentvalue > self.maxvalue:
self.currentvalue = self.maxvalue
def Decrease(self, amount):
self.currentvalue -= amount
if self.currentvalue < 0:
self.currentvalue = 0
def GetValue(self):
return self.currentvalue
def SetValue(self,value):
self.currentvalue = value
if self.currentvalue > self.maxvalue:
self.currentvalue = maxvalue
if self.currentvalue < 0:
self.currentvalue = 0
def GameOverShow(screen):
font = pygame.font.SysFont("impact",32)
gameovertext1 = font.render("Game Over!",True,(255,255,255))
text_x = CenterMessage(screen, gameovertext1)
screen.blit(gameovertext1,(text_x,280))
gameovertext2 = font.render("Press R to Restart", True,(255,255,255))
text_x = CenterMessage(screen, gameovertext2)
screen.blit(gameovertext2,(text_x,320))
return
def PlayMusic(soundfile):
pygame.mixer.music.load(soundfile)
pygame.mixer.music.play(-1)
def main():
pygame.init()
backspeed = 100
laser = PrepareSound("shoot.wav")
explosion = PrepareSound("invaderkilled.wav")
spaceship_low = (0,0)
screenwidth,screenheight=(480,640)
spaceship_high = (screenwidth,screenheight)
destroyed = PrepareSound("explosion.wav")
shield = ShieldMeter(200,10,250,75)
spaceship_pos = (240,540)
screen = pygame.display.set_mode((screenwidth,screenheight),DOUBLEBUF,32)
pygame.display.set_caption("Pygame Invaders")
pygame.key.set_repeat(1,1)
StarField = SpaceBackground(screenheight,"stars.jpg")
shipimages = ('spaceship2.png', 'spaceship3.png')
SpaceShip = SpaceCraft(shipimages,spaceship_pos,spaceship_low,spaceship_high,laser)
clock = pygame.time.Clock()
framerate = 60
PlayMusic("spaceinvaders.ogg")
firelist = []
alienimage = ('alien1.png','alien2.png','alien3.png','alien4.png','alien5.png')
numofaliens=8
AlienShips = []
alienfirelist = []
laserdownlimit = screenheight - 40
score = ScoreBoard(0,0,"impact",32)
GameOver = False
imageindex = 0
flashcount = 0
while True:
time = clock.tick(framerate)/1000.0
if not AlienShips:
AlienShips = [Alien(alienimage[randint(0,len(alienimage)-1)],[randint(20,screenwidth-80),randint(20,screenheight-140)],randint(100,150),randint(100,150)) for i in range(0,numofaliens)]
StarField.Scroll(backspeed,time)
shipspeed_x = 0
shipspeed_y = 0
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
exit()
if event.type == KEYDOWN:
key = pygame.key.get_pressed()
if key[K_q] :
pygame.quit()
exit()
if key[K_r] and GameOver:
GameOver = False
shield.SetValue(250)
score.SetValue(0)
if key[K_LEFT]:
shipspeed_x= -300
if key[K_RIGHT]:
shipspeed_x = 300
if key[K_UP]:
shipspeed_y= -300
if key[K_DOWN]:
shipspeed_y= 300
if key[K_SPACE] and not GameOver:
if firelist:
if firelist[-1].DistanceTravelled() >= 150 :
firelist.append(SpaceShip.Fire())
else:
firelist.append(SpaceShip.Fire())
if event.type == USEREVENT+1:
if flashcount < 10:
flashcount +=1
if imageindex == 1:
imageindex = 0
else:
imageindex = 1
else:
imageindex = 0
flashcount = 0
pygame.time.set_timer(USEREVENT+1,0)
SpaceShip.Move(shipspeed_x,shipspeed_y,time)
StarField.Show(screen)
SpaceShip.Show(screen,imageindex)
score.Show(screen)
shield.Show(screen)
for AlienShip in AlienShips:
AlienShip.Show(screen)
AlienShip.Move(time)
if randint(0,10)==9:
if alienfirelist:
if alienfirelist[-1].DistanceTravelled()>=100:
alienfirelist.append(AlienShip.Fire())
else:
alienfirelist.append(AlienShip.Fire())
for theshot in alienfirelist:
theshot.Move(time)
theshot.Show(screen)
if theshot.GoneBelow(laserdownlimit):
alienfirelist.remove(theshot)
else:
if SpaceShip.rect.collidepoint(theshot.GetXY()) and not GameOver:
destroyed.play()
shield.Decrease(25)
pygame.time.set_timer(USEREVENT+1,25)
if theshot in alienfirelist:
alienfirelist.remove(theshot)
for theshot in firelist:
theshot.Move(time)
theshot.Show(screen)
if theshot.GoneAbove(0):
firelist.remove(theshot)
else:
for AlienShip in AlienShips:
if AlienShip.rect.collidepoint(theshot.GetXY()):
score.Change(10)
explosion.play()
if score.GetValue()%100 == 0:
shield.Increase(25)
if theshot in firelist:
firelist.remove(theshot)
AlienShips.remove(AlienShip)
if shield.GetValue() ==0:
GameOverShow(screen)
GameOver = True
pygame.display.update()
if __name__=='__main__':
main()
| none | 1 | 2.743208 | 3 |
|
examples/03-sign-key/sign-key.py | winterwolf32/JWT- | 0 | 6632672 | from myjwt.modify_jwt import change_payload
from myjwt.modify_jwt import signature
from myjwt.utils import jwt_to_json
from myjwt.variables import INVALID_SIGNATURE
from myjwt.variables import VALID_SIGNATURE
jwt = "<KEY>"
key = "pentesterlab"
# "header" = {"typ": "JWT", "alg": "HS256"}
# "payload" = {"username": null}
# "signature" = "Tr0VvdP6rVBGBGuI_luxGCOaz6BbhC6IxRTlKOW8UjM"
jwtJson = jwt_to_json(jwt)
jwtJson = change_payload(jwtJson, {"username": "admin"})
# "header" = {"typ": "JWT", "alg": "HS256"}
# "payload" = {"username": "admin"}
# "signature" = "Tr0VvdP6rVBGBGuI_luxGCOaz6BbhC6IxRTlKOW8UjM"
new_jwt = signature(jwtJson, key)
print(jwt)
# verify your jwt
print(
VALID_SIGNATURE
if new_jwt.split(".")[2] == jwt.split(".")[2]
else INVALID_SIGNATURE,
)
| from myjwt.modify_jwt import change_payload
from myjwt.modify_jwt import signature
from myjwt.utils import jwt_to_json
from myjwt.variables import INVALID_SIGNATURE
from myjwt.variables import VALID_SIGNATURE
jwt = "<KEY>"
key = "pentesterlab"
# "header" = {"typ": "JWT", "alg": "HS256"}
# "payload" = {"username": null}
# "signature" = "Tr0VvdP6rVBGBGuI_luxGCOaz6BbhC6IxRTlKOW8UjM"
jwtJson = jwt_to_json(jwt)
jwtJson = change_payload(jwtJson, {"username": "admin"})
# "header" = {"typ": "JWT", "alg": "HS256"}
# "payload" = {"username": "admin"}
# "signature" = "Tr0VvdP6rVBGBGuI_luxGCOaz6BbhC6IxRTlKOW8UjM"
new_jwt = signature(jwtJson, key)
print(jwt)
# verify your jwt
print(
VALID_SIGNATURE
if new_jwt.split(".")[2] == jwt.split(".")[2]
else INVALID_SIGNATURE,
)
| en | 0.509381 | # "header" = {"typ": "JWT", "alg": "HS256"} # "payload" = {"username": null} # "signature" = "Tr0VvdP6rVBGBGuI_luxGCOaz6BbhC6IxRTlKOW8UjM" # "header" = {"typ": "JWT", "alg": "HS256"} # "payload" = {"username": "admin"} # "signature" = "Tr0VvdP6rVBGBGuI_luxGCOaz6BbhC6IxRTlKOW8UjM" # verify your jwt | 2.966941 | 3 |
django_functest/tests/views.py | django-functest/django-functest | 71 | 6632673 | from __future__ import absolute_import, print_function, unicode_literals
import uuid
from django import forms
from django.http import HttpResponseRedirect
from django.shortcuts import render
from django.utils.html import mark_safe
from .models import Thing
try:
from django.urls import reverse
except ImportError:
from django.core.urlresolvers import reverse
def test_misc(request):
return render(request, "django_functest/tests/test_misc.html",
{'name': request.session.get('name', None)})
def redirect_to_misc(request):
return HttpResponseRedirect(reverse('django_functest.test_misc'))
def set_sess_foo_to_bar(request):
request.session['foo'] = 'bar'
return render(request, "django_functest/tests/test_misc.html")
class AddSpacersMixin(object):
def __init__(self, add_spacers=False, **kwargs):
super(AddSpacersMixin, self).__init__(**kwargs)
self.add_spacers = add_spacers
def as_p(self):
retval = super(AddSpacersMixin, self).as_p()
if self.add_spacers:
# Hack to help test interacting with elements
# that aren't in view.
retval = mark_safe(retval.replace('</p>', '</p>' + ('<br>' * 100)))
return retval
class ThingForm(AddSpacersMixin, forms.ModelForm):
category = forms.ChoiceField(choices=Thing.CATEGORY_CHOICES,
widget=forms.RadioSelect)
class Meta:
model = Thing
fields = ['name', 'big', 'clever', 'element_type', 'category', 'count', 'description']
class ThingFormWithSelectForCategory(AddSpacersMixin, forms.ModelForm):
class Meta:
model = Thing
fields = ThingForm._meta.fields
# Have separate forms so that we test different form enctype
class ThingFormWithUpload(AddSpacersMixin, forms.ModelForm):
class Meta:
model = Thing
fields = ['name', 'notes_file']
def edit_thing(request, thing_id, with_upload=False):
thing = Thing.objects.get(id=int(thing_id))
add_spacers = 'add_spacers' in request.GET
add_js_delay = int(request.GET.get('add_js_delay', '0'))
if with_upload:
form_class = ThingFormWithUpload
redirect_url = reverse('edit_thing_with_upload', kwargs={'thing_id': thing_id})
else:
select_for_category = 'select_for_category' in request.GET
form_class = ThingFormWithSelectForCategory if select_for_category else ThingForm
redirect_url = reverse('edit_thing', kwargs={'thing_id': thing_id})
if request.method == "POST":
if 'clear' in request.POST:
thing = Thing(id=thing.id, category=Thing.CATEGORY_MAGMA)
thing.save()
return HttpResponseRedirect(reverse('thing_cleared', kwargs={'thing_id': thing_id}))
else:
thing_form = form_class(data=request.POST,
files=request.FILES,
instance=thing,
add_spacers=add_spacers)
if thing_form.is_valid():
thing_form.save()
return HttpResponseRedirect(redirect_url)
else:
thing_form = form_class(instance=thing,
add_spacers=add_spacers)
return render(request, "django_functest/tests/edit_thing.html",
{'thing_form': thing_form,
'thing': thing,
'add_js_delay': add_js_delay,
'upload': with_upload,
})
def edit_thing_with_upload(request, thing_id):
return edit_thing(request, thing_id, with_upload=True)
def list_things(request):
things = Thing.objects.all()
if 'select_thing' in request.GET:
id_list = map(int, request.GET.getlist('select_thing'))
selected_things = things.filter(id__in=id_list)
else:
selected_things = []
return render(request, "django_functest/tests/list_things.html",
{'things': things,
'selected_things': selected_things,
})
def thing_cleared(request, thing_id):
thing = Thing.objects.get(id=int(thing_id))
return render(request, "django_functest/tests/thing_cleared.html",
{'thing': thing})
def new_browser_session_test(request):
if 'UID' in request.session:
uid = request.session['UID']
message = "Welcome back"
else:
uid = uuid.uuid1()
request.session['UID'] = str(uid)
message = "Hello new user"
return render(request, "django_functest/tests/new_browser_session_test.html",
{'uid': uid,
'message': message,
})
| from __future__ import absolute_import, print_function, unicode_literals
import uuid
from django import forms
from django.http import HttpResponseRedirect
from django.shortcuts import render
from django.utils.html import mark_safe
from .models import Thing
try:
from django.urls import reverse
except ImportError:
from django.core.urlresolvers import reverse
def test_misc(request):
return render(request, "django_functest/tests/test_misc.html",
{'name': request.session.get('name', None)})
def redirect_to_misc(request):
return HttpResponseRedirect(reverse('django_functest.test_misc'))
def set_sess_foo_to_bar(request):
request.session['foo'] = 'bar'
return render(request, "django_functest/tests/test_misc.html")
class AddSpacersMixin(object):
def __init__(self, add_spacers=False, **kwargs):
super(AddSpacersMixin, self).__init__(**kwargs)
self.add_spacers = add_spacers
def as_p(self):
retval = super(AddSpacersMixin, self).as_p()
if self.add_spacers:
# Hack to help test interacting with elements
# that aren't in view.
retval = mark_safe(retval.replace('</p>', '</p>' + ('<br>' * 100)))
return retval
class ThingForm(AddSpacersMixin, forms.ModelForm):
category = forms.ChoiceField(choices=Thing.CATEGORY_CHOICES,
widget=forms.RadioSelect)
class Meta:
model = Thing
fields = ['name', 'big', 'clever', 'element_type', 'category', 'count', 'description']
class ThingFormWithSelectForCategory(AddSpacersMixin, forms.ModelForm):
class Meta:
model = Thing
fields = ThingForm._meta.fields
# Have separate forms so that we test different form enctype
class ThingFormWithUpload(AddSpacersMixin, forms.ModelForm):
class Meta:
model = Thing
fields = ['name', 'notes_file']
def edit_thing(request, thing_id, with_upload=False):
thing = Thing.objects.get(id=int(thing_id))
add_spacers = 'add_spacers' in request.GET
add_js_delay = int(request.GET.get('add_js_delay', '0'))
if with_upload:
form_class = ThingFormWithUpload
redirect_url = reverse('edit_thing_with_upload', kwargs={'thing_id': thing_id})
else:
select_for_category = 'select_for_category' in request.GET
form_class = ThingFormWithSelectForCategory if select_for_category else ThingForm
redirect_url = reverse('edit_thing', kwargs={'thing_id': thing_id})
if request.method == "POST":
if 'clear' in request.POST:
thing = Thing(id=thing.id, category=Thing.CATEGORY_MAGMA)
thing.save()
return HttpResponseRedirect(reverse('thing_cleared', kwargs={'thing_id': thing_id}))
else:
thing_form = form_class(data=request.POST,
files=request.FILES,
instance=thing,
add_spacers=add_spacers)
if thing_form.is_valid():
thing_form.save()
return HttpResponseRedirect(redirect_url)
else:
thing_form = form_class(instance=thing,
add_spacers=add_spacers)
return render(request, "django_functest/tests/edit_thing.html",
{'thing_form': thing_form,
'thing': thing,
'add_js_delay': add_js_delay,
'upload': with_upload,
})
def edit_thing_with_upload(request, thing_id):
return edit_thing(request, thing_id, with_upload=True)
def list_things(request):
things = Thing.objects.all()
if 'select_thing' in request.GET:
id_list = map(int, request.GET.getlist('select_thing'))
selected_things = things.filter(id__in=id_list)
else:
selected_things = []
return render(request, "django_functest/tests/list_things.html",
{'things': things,
'selected_things': selected_things,
})
def thing_cleared(request, thing_id):
thing = Thing.objects.get(id=int(thing_id))
return render(request, "django_functest/tests/thing_cleared.html",
{'thing': thing})
def new_browser_session_test(request):
if 'UID' in request.session:
uid = request.session['UID']
message = "Welcome back"
else:
uid = uuid.uuid1()
request.session['UID'] = str(uid)
message = "Hello new user"
return render(request, "django_functest/tests/new_browser_session_test.html",
{'uid': uid,
'message': message,
})
| en | 0.920762 | # Hack to help test interacting with elements # that aren't in view. # Have separate forms so that we test different form enctype | 2.173245 | 2 |
Semester 6/DWM/page_rank.py | atharva8300/Engineering-Practical-Experiments | 7 | 6632674 | <filename>Semester 6/DWM/page_rank.py
import string
LETTERS = string.ascii_uppercase
graph = [
[0, 0, 1, 1, 1, 0, 0],
[0, 1, 0, 1, 1, 0, 1],
[0, 1, 1, 0, 0, 1, 0],
[1, 0, 0, 1, 1, 1, 0],
[1, 0, 1, 0, 0, 1, 0],
[1, 1, 0, 0, 1, 0, 1],
[1, 1, 1, 0, 1, 0, 0],
]
class Node:
def __init__(self, name):
self.name = name
self.inbound = []
self.outbound = []
def add_inbound(self, node):
self.inbound.append(node)
def add_outbound(self, node):
self.outbound.append(node)
def __repr__(self):
return f"Node {self.name}: Inbound: {self.inbound} ; Outbound: {self.outbound}"
def page_rank(nodes, limit=20, d=0.85):
ranks = {}
for node in nodes:
ranks[node.name] = 1
outbounds = {}
for node in nodes:
outbounds[node.name] = len(node.outbound)
last_iteration_ranks = ranks.copy()
i = 0
while True:
print(f"======= Iteration {i + 1} =======")
for j, node in enumerate(nodes):
ranks[node.name] = round((1 - d) / num_nodes + d * sum(
[ranks[ib] / outbounds[ib] for ib in node.inbound]
), 5)
ranks = dict(
sorted(ranks.items(), key=lambda item: item[1], reverse=True))
if ranks == last_iteration_ranks:
print("Page ranks converged.")
print(ranks)
break
else:
last_iteration_ranks = ranks.copy()
print(ranks)
i += 1
def main():
num_nodes = len(graph)
names = list(LETTERS[:num_nodes])
nodes = [Node(name) for name in names]
for ri, row in enumerate(graph):
for ci, col in enumerate(row):
if col == 1:
nodes[ci].add_inbound(names[ri])
nodes[ri].add_outbound(names[ci])
print("======= Nodes =======")
for node in nodes:
print(node)
page_rank(nodes)
if __name__ == "__main__":
num_nodes = len(graph)
main()
| <filename>Semester 6/DWM/page_rank.py
import string
LETTERS = string.ascii_uppercase
graph = [
[0, 0, 1, 1, 1, 0, 0],
[0, 1, 0, 1, 1, 0, 1],
[0, 1, 1, 0, 0, 1, 0],
[1, 0, 0, 1, 1, 1, 0],
[1, 0, 1, 0, 0, 1, 0],
[1, 1, 0, 0, 1, 0, 1],
[1, 1, 1, 0, 1, 0, 0],
]
class Node:
def __init__(self, name):
self.name = name
self.inbound = []
self.outbound = []
def add_inbound(self, node):
self.inbound.append(node)
def add_outbound(self, node):
self.outbound.append(node)
def __repr__(self):
return f"Node {self.name}: Inbound: {self.inbound} ; Outbound: {self.outbound}"
def page_rank(nodes, limit=20, d=0.85):
ranks = {}
for node in nodes:
ranks[node.name] = 1
outbounds = {}
for node in nodes:
outbounds[node.name] = len(node.outbound)
last_iteration_ranks = ranks.copy()
i = 0
while True:
print(f"======= Iteration {i + 1} =======")
for j, node in enumerate(nodes):
ranks[node.name] = round((1 - d) / num_nodes + d * sum(
[ranks[ib] / outbounds[ib] for ib in node.inbound]
), 5)
ranks = dict(
sorted(ranks.items(), key=lambda item: item[1], reverse=True))
if ranks == last_iteration_ranks:
print("Page ranks converged.")
print(ranks)
break
else:
last_iteration_ranks = ranks.copy()
print(ranks)
i += 1
def main():
num_nodes = len(graph)
names = list(LETTERS[:num_nodes])
nodes = [Node(name) for name in names]
for ri, row in enumerate(graph):
for ci, col in enumerate(row):
if col == 1:
nodes[ci].add_inbound(names[ri])
nodes[ri].add_outbound(names[ci])
print("======= Nodes =======")
for node in nodes:
print(node)
page_rank(nodes)
if __name__ == "__main__":
num_nodes = len(graph)
main()
| none | 1 | 3.157407 | 3 |
|
rofl/config/__init__.py | Bobobert/RoLas | 2 | 6632675 | from .config import createConfig, createAgent, createPolicy, getTrainFun, getEnvMaker, createNetwork
from .experimentScrips import setUpExperiment, loadExperiment
| from .config import createConfig, createAgent, createPolicy, getTrainFun, getEnvMaker, createNetwork
from .experimentScrips import setUpExperiment, loadExperiment
| none | 1 | 1.011598 | 1 |
|
fuelrat_hexchat.py | Guntereno/FuelRatHelper | 0 | 6632676 | import datetime
import hexchat
import json
import os
import sys
from tkinter import Tk
__module_name__ = "fuelrat_helper_hexchat"
__module_version__ = "1.0"
__module_description__ = "Fuel Rat Helper"
# The script folder isn't necessarily in the search path when running through HexChat
path = os.path.join(hexchat.get_info("configdir"), "addons")
if path not in sys.path:
sys.path.append(path)
import rat_lib
_clipping_enabled = True
def copy_to_clipboard(line):
global _clipping_enabled
if _clipping_enabled:
r = Tk()
r.withdraw()
r.clipboard_clear()
r.clipboard_append(line)
r.update()
r.destroy()
def handle_privmsg(word, word_eol, userdata):
try:
sender = word[0]
recipient = word[2]
message = word_eol[3]
if (recipient == "#fuelrats") and ("MechaSqueak[BOT]" in sender):
# Dump json to the logs
event_json = json.dumps({"sender": sender, "recipient": recipient, "message": message})
rat_lib.append_to_log(event_json)
# Parse a case data from the message
case_data = rat_lib.parse_ratsignal(message)
# If found, dump the data and copy system to clipboard
if case_data:
copy_to_clipboard(case_data["system"])
except Exception as e:
error_str = "EXCEPTION: " + e
hexchat.prnt(error_str)
rat_lib.append_to_log(error_str)
return hexchat.EAT_NONE
def parse_bool(word):
uword = word.upper()
if(uword == "TRUE"):
val = True
elif(uword == "FALSE"):
val = False
else:
raise Exception()
return val
def set_logging_enabled(val):
val = rat_lib.set_logging_enabled(val)
if(val):
hexchat.prnt("Logging to '" + rat_lib.log_path())
else:
hexchat.prnt("Logging disabled")
def handle_fr_log(word, word_eol, userdata):
try:
if len(word) < 2:
raise Exception()
val = parse_bool(word[1])
set_logging_enabled(val)
except:
print("Usage: /fr_log <'true'/'false'> (Current: '" + str(rat_lib.get_logging_enabled()) + "')")
def handle_fr_clip(word, word_eol, userdata):
try:
global _clipping_enabled
if len(word) < 2:
raise Exception()
_clipping_enabled = parse_bool(word[1])
hexchat.prnt("Clipping " + ("enabled" if _clipping_enabled else "disabled"))
except:
print("Usage: /fr_clip <'true'/'false'> (Current: '" + str(_clipping_enabled) + "')")
hexchat.hook_server("PRIVMSG", handle_privmsg)
hexchat.hook_command("FR_LOG", handle_fr_log, help=" /fr_log <'true'/'false'>: Enable or disable FuelRat helper logging.")
hexchat.hook_command("FR_CLIP", handle_fr_clip, help=" /fr_clip <'true'/'false'>: Enable or disable copying system name to clipboard.")
#set_logging_enabled(True)
| import datetime
import hexchat
import json
import os
import sys
from tkinter import Tk
__module_name__ = "fuelrat_helper_hexchat"
__module_version__ = "1.0"
__module_description__ = "Fuel Rat Helper"
# The script folder isn't necessarily in the search path when running through HexChat
path = os.path.join(hexchat.get_info("configdir"), "addons")
if path not in sys.path:
sys.path.append(path)
import rat_lib
_clipping_enabled = True
def copy_to_clipboard(line):
global _clipping_enabled
if _clipping_enabled:
r = Tk()
r.withdraw()
r.clipboard_clear()
r.clipboard_append(line)
r.update()
r.destroy()
def handle_privmsg(word, word_eol, userdata):
try:
sender = word[0]
recipient = word[2]
message = word_eol[3]
if (recipient == "#fuelrats") and ("MechaSqueak[BOT]" in sender):
# Dump json to the logs
event_json = json.dumps({"sender": sender, "recipient": recipient, "message": message})
rat_lib.append_to_log(event_json)
# Parse a case data from the message
case_data = rat_lib.parse_ratsignal(message)
# If found, dump the data and copy system to clipboard
if case_data:
copy_to_clipboard(case_data["system"])
except Exception as e:
error_str = "EXCEPTION: " + e
hexchat.prnt(error_str)
rat_lib.append_to_log(error_str)
return hexchat.EAT_NONE
def parse_bool(word):
uword = word.upper()
if(uword == "TRUE"):
val = True
elif(uword == "FALSE"):
val = False
else:
raise Exception()
return val
def set_logging_enabled(val):
val = rat_lib.set_logging_enabled(val)
if(val):
hexchat.prnt("Logging to '" + rat_lib.log_path())
else:
hexchat.prnt("Logging disabled")
def handle_fr_log(word, word_eol, userdata):
try:
if len(word) < 2:
raise Exception()
val = parse_bool(word[1])
set_logging_enabled(val)
except:
print("Usage: /fr_log <'true'/'false'> (Current: '" + str(rat_lib.get_logging_enabled()) + "')")
def handle_fr_clip(word, word_eol, userdata):
try:
global _clipping_enabled
if len(word) < 2:
raise Exception()
_clipping_enabled = parse_bool(word[1])
hexchat.prnt("Clipping " + ("enabled" if _clipping_enabled else "disabled"))
except:
print("Usage: /fr_clip <'true'/'false'> (Current: '" + str(_clipping_enabled) + "')")
hexchat.hook_server("PRIVMSG", handle_privmsg)
hexchat.hook_command("FR_LOG", handle_fr_log, help=" /fr_log <'true'/'false'>: Enable or disable FuelRat helper logging.")
hexchat.hook_command("FR_CLIP", handle_fr_clip, help=" /fr_clip <'true'/'false'>: Enable or disable copying system name to clipboard.")
#set_logging_enabled(True)
| en | 0.741862 | # The script folder isn't necessarily in the search path when running through HexChat # Dump json to the logs # Parse a case data from the message # If found, dump the data and copy system to clipboard #set_logging_enabled(True) | 2.172642 | 2 |
pandas_redshift/core.py | thcborges/pandas_redshift | 0 | 6632677 | #!/usr/bin/env python3
from io import StringIO
import pandas as pd
import traceback
import psycopg2
import boto3
import sys
import os
import re
S3_ACCEPTED_KWARGS = [
'ACL', 'Body', 'CacheControl ', 'ContentDisposition', 'ContentEncoding', 'ContentLanguage',
'ContentLength', 'ContentMD5', 'ContentType', 'Expires', 'GrantFullControl', 'GrantRead',
'GrantReadACP', 'GrantWriteACP', 'Metadata', 'ServerSideEncryption', 'StorageClass',
'WebsiteRedirectLocation', 'SSECustomerAlgorithm', 'SSECustomerKey', 'SSECustomerKeyMD5',
'SSEKMSKeyId', 'RequestPayer', 'Tagging'
] # Available parameters for service: https://boto3.readthedocs.io/en/latest/reference/services/s3.html#S3.Client.put_object
def connect_to_redshift(dbname, host, user, port = 5439, **kwargs):
global connect, cursor
connect = psycopg2.connect(dbname = dbname,
host = host,
port = port,
user = user,
**kwargs)
cursor = connect.cursor()
def connect_to_s3(aws_access_key_id, aws_secret_access_key, bucket, subdirectory=None, aws_iam_role=None, **kwargs):
global s3, s3_bucket_var, s3_subdirectory_var, aws_1, aws_2, aws_token, aws_role
s3 = boto3.resource('s3',
aws_access_key_id = aws_access_key_id,
aws_secret_access_key = aws_secret_access_key,
**kwargs)
s3_bucket_var = bucket
if subdirectory is None:
s3_subdirectory_var = ''
else:
s3_subdirectory_var = subdirectory + '/'
aws_1 = aws_access_key_id
aws_2 = aws_secret_access_key
aws_role = aws_iam_role
if kwargs.get('aws_session_token'):
aws_token = kwargs.get('aws_session_token')
else:
aws_token = ''
def redshift_to_pandas(sql_query):
# pass a sql query and return a pandas dataframe
cursor.execute(sql_query)
columns_list = [desc[0] for desc in cursor.description]
data = pd.DataFrame(cursor.fetchall(), columns = columns_list)
return data
def validate_column_names(data_frame):
"""Validate the column names to ensure no reserved words are used.
Arguments:
dataframe pd.data_frame -- data to validate
"""
rrwords = open(os.path.join(os.path.dirname(__file__),
'redshift_reserve_words.txt'), 'r').readlines()
rrwords = [r.strip().lower() for r in rrwords]
data_frame.columns = [x.lower() for x in data_frame.columns]
for col in data_frame.columns:
try:
assert col not in rrwords
except:
raise ValueError(
'DataFrame column name {0} is a reserve word in redshift'
.format(col))
# check for spaces in the column names
there_are_spaces = sum([re.search('\s', x) != None for x in data_frame.columns]) > 0
# delimit them if there are
if there_are_spaces:
col_names_dict = {x:'"{0}"'.format(x) for x in data_frame.columns}
data_frame.rename(columns = col_names_dict, inplace = True)
return data_frame
def df_to_s3(data_frame, csv_name, index, save_local, delimiter, **kwargs):
"""Write a dataframe to S3
Arguments:
dataframe pd.data_frame -- data to upload
csv_name str -- name of the file to upload
save_local bool -- save a local copy
delimiter str -- delimiter for csv file
"""
extra_kwargs = {k: v for k, v in kwargs.items() if k in S3_ACCEPTED_KWARGS and v is not None}
# create local backup
if save_local == True:
data_frame.to_csv(csv_name, index=index, sep=delimiter)
print('saved file {0} in {1}'.format(csv_name, os.getcwd()))
#
csv_buffer = StringIO()
data_frame.to_csv(csv_buffer, index=index, sep=delimiter)
s3.Bucket(s3_bucket_var).put_object(
Key=s3_subdirectory_var + csv_name, Body=csv_buffer.getvalue(),
**extra_kwargs)
print('saved file {0} in bucket {1}'.format(
csv_name, s3_subdirectory_var + csv_name))
def create_redshift_table(data_frame,
redshift_table_name,
column_data_types=None,
index=False,
append=False,
diststyle = 'even',
distkey = '',
sort_interleaved = False,
sortkey = ''):
"""Create an empty RedShift Table
"""
if index == True:
columns = list(data_frame.columns)
if data_frame.index.name:
columns.insert(0, data_frame.index.name)
else:
columns.insert(0, "index")
else:
columns = list(data_frame.columns)
if column_data_types is None:
column_data_types = ['varchar(256)'] * len(columns)
columns_and_data_type = ', '.join(
['{0} {1}'.format(x, y) for x, y in zip(columns, column_data_types)])
create_table_query = 'create table {0} ({1})'.format(
redshift_table_name, columns_and_data_type)
if len(distkey) == 0:
# Without a distkey, we can set a diststyle
if diststyle not in ['even', 'all']:
raise ValueError("diststyle must be either 'even' or 'all'")
else:
create_table_query += ' diststyle {0}'.format(diststyle)
else:
# otherwise, override diststyle with distkey
create_table_query += ' distkey({0})'.format(distkey)
if len(sortkey) > 0:
if sort_interleaved:
create_table_query += ' interleaved'
create_table_query += ' sortkey({0})'.format(sortkey)
print(create_table_query)
print('CREATING A TABLE IN REDSHIFT')
cursor.execute('drop table if exists {0}'.format(redshift_table_name))
cursor.execute(create_table_query)
connect.commit()
def s3_to_redshift(redshift_table_name, delimiter=',', quotechar='"',
dateformat='auto', timeformat='auto', region='', parameters=''):
bucket_name = 's3://{0}/{1}.csv'.format(
s3_bucket_var, s3_subdirectory_var + redshift_table_name)
if aws_1 and aws_2:
authorization = """
access_key_id '{0}'
secret_access_key '{1}'
""".format(aws_1, aws_2)
elif aws_role:
authorization = """
iam_role '{0}'
""".format(aws_role)
else:
authorization = ""
s3_to_sql = """
copy {0}
from '{1}'
delimiter '{2}'
ignoreheader 1
csv quote as '{3}'
dateformat '{4}'
timeformat '{5}'
{6}
{7}
""".format(redshift_table_name, bucket_name, delimiter, quotechar, dateformat,
timeformat, authorization, parameters)
if region:
s3_to_sql = s3_to_sql + "region '{0}'".format(region)
if aws_token != '':
s3_to_sql = s3_to_sql + "\n\tsession_token '{0}'".format(aws_token)
s3_to_sql = s3_to_sql + ';'
print(s3_to_sql)
# send the file
print('FILLING THE TABLE IN REDSHIFT')
try:
cursor.execute(s3_to_sql)
connect.commit()
except Exception as e:
print(e)
traceback.print_exc(file=sys.stdout)
connect.rollback()
raise
def pandas_to_redshift(data_frame,
redshift_table_name,
column_data_types=None,
index=False,
save_local=False,
delimiter=',',
quotechar='"',
dateformat='auto',
timeformat='auto',
region='',
append=False,
diststyle='even',
distkey='',
sort_interleaved=False,
sortkey='',
parameters='',
**kwargs):
# Validate column names.
data_frame = validate_column_names(data_frame)
# Send data to S3
csv_name = redshift_table_name + '.csv'
s3_kwargs = {k: v for k, v in kwargs.items() if k in S3_ACCEPTED_KWARGS and v is not None}
df_to_s3(data_frame, csv_name, index, save_local, delimiter, **s3_kwargs)
# CREATE AN EMPTY TABLE IN REDSHIFT
if append is False:
create_redshift_table(data_frame, redshift_table_name,
column_data_types, index, append,
diststyle, distkey, sort_interleaved, sortkey)
# CREATE THE COPY STATEMENT TO SEND FROM S3 TO THE TABLE IN REDSHIFT
s3_to_redshift(redshift_table_name, delimiter, quotechar, dateformat, timeformat, region, parameters)
def exec_commit(sql_query):
cursor.execute(sql_query)
connect.commit()
def close_up_shop():
global connect, cursor, s3, s3_bucket_var, s3_subdirectory_var, aws_1, aws_2, aws_token
cursor.close()
connect.commit()
connect.close()
try:
del connect, cursor
except:
pass
try:
del s3, s3_bucket_var, s3_subdirectory_var, aws_1, aws_2, aws_token
except:
pass
#-------------------------------------------------------------------------------
| #!/usr/bin/env python3
from io import StringIO
import pandas as pd
import traceback
import psycopg2
import boto3
import sys
import os
import re
S3_ACCEPTED_KWARGS = [
'ACL', 'Body', 'CacheControl ', 'ContentDisposition', 'ContentEncoding', 'ContentLanguage',
'ContentLength', 'ContentMD5', 'ContentType', 'Expires', 'GrantFullControl', 'GrantRead',
'GrantReadACP', 'GrantWriteACP', 'Metadata', 'ServerSideEncryption', 'StorageClass',
'WebsiteRedirectLocation', 'SSECustomerAlgorithm', 'SSECustomerKey', 'SSECustomerKeyMD5',
'SSEKMSKeyId', 'RequestPayer', 'Tagging'
] # Available parameters for service: https://boto3.readthedocs.io/en/latest/reference/services/s3.html#S3.Client.put_object
def connect_to_redshift(dbname, host, user, port = 5439, **kwargs):
global connect, cursor
connect = psycopg2.connect(dbname = dbname,
host = host,
port = port,
user = user,
**kwargs)
cursor = connect.cursor()
def connect_to_s3(aws_access_key_id, aws_secret_access_key, bucket, subdirectory=None, aws_iam_role=None, **kwargs):
global s3, s3_bucket_var, s3_subdirectory_var, aws_1, aws_2, aws_token, aws_role
s3 = boto3.resource('s3',
aws_access_key_id = aws_access_key_id,
aws_secret_access_key = aws_secret_access_key,
**kwargs)
s3_bucket_var = bucket
if subdirectory is None:
s3_subdirectory_var = ''
else:
s3_subdirectory_var = subdirectory + '/'
aws_1 = aws_access_key_id
aws_2 = aws_secret_access_key
aws_role = aws_iam_role
if kwargs.get('aws_session_token'):
aws_token = kwargs.get('aws_session_token')
else:
aws_token = ''
def redshift_to_pandas(sql_query):
# pass a sql query and return a pandas dataframe
cursor.execute(sql_query)
columns_list = [desc[0] for desc in cursor.description]
data = pd.DataFrame(cursor.fetchall(), columns = columns_list)
return data
def validate_column_names(data_frame):
"""Validate the column names to ensure no reserved words are used.
Arguments:
dataframe pd.data_frame -- data to validate
"""
rrwords = open(os.path.join(os.path.dirname(__file__),
'redshift_reserve_words.txt'), 'r').readlines()
rrwords = [r.strip().lower() for r in rrwords]
data_frame.columns = [x.lower() for x in data_frame.columns]
for col in data_frame.columns:
try:
assert col not in rrwords
except:
raise ValueError(
'DataFrame column name {0} is a reserve word in redshift'
.format(col))
# check for spaces in the column names
there_are_spaces = sum([re.search('\s', x) != None for x in data_frame.columns]) > 0
# delimit them if there are
if there_are_spaces:
col_names_dict = {x:'"{0}"'.format(x) for x in data_frame.columns}
data_frame.rename(columns = col_names_dict, inplace = True)
return data_frame
def df_to_s3(data_frame, csv_name, index, save_local, delimiter, **kwargs):
"""Write a dataframe to S3
Arguments:
dataframe pd.data_frame -- data to upload
csv_name str -- name of the file to upload
save_local bool -- save a local copy
delimiter str -- delimiter for csv file
"""
extra_kwargs = {k: v for k, v in kwargs.items() if k in S3_ACCEPTED_KWARGS and v is not None}
# create local backup
if save_local == True:
data_frame.to_csv(csv_name, index=index, sep=delimiter)
print('saved file {0} in {1}'.format(csv_name, os.getcwd()))
#
csv_buffer = StringIO()
data_frame.to_csv(csv_buffer, index=index, sep=delimiter)
s3.Bucket(s3_bucket_var).put_object(
Key=s3_subdirectory_var + csv_name, Body=csv_buffer.getvalue(),
**extra_kwargs)
print('saved file {0} in bucket {1}'.format(
csv_name, s3_subdirectory_var + csv_name))
def create_redshift_table(data_frame,
redshift_table_name,
column_data_types=None,
index=False,
append=False,
diststyle = 'even',
distkey = '',
sort_interleaved = False,
sortkey = ''):
"""Create an empty RedShift Table
"""
if index == True:
columns = list(data_frame.columns)
if data_frame.index.name:
columns.insert(0, data_frame.index.name)
else:
columns.insert(0, "index")
else:
columns = list(data_frame.columns)
if column_data_types is None:
column_data_types = ['varchar(256)'] * len(columns)
columns_and_data_type = ', '.join(
['{0} {1}'.format(x, y) for x, y in zip(columns, column_data_types)])
create_table_query = 'create table {0} ({1})'.format(
redshift_table_name, columns_and_data_type)
if len(distkey) == 0:
# Without a distkey, we can set a diststyle
if diststyle not in ['even', 'all']:
raise ValueError("diststyle must be either 'even' or 'all'")
else:
create_table_query += ' diststyle {0}'.format(diststyle)
else:
# otherwise, override diststyle with distkey
create_table_query += ' distkey({0})'.format(distkey)
if len(sortkey) > 0:
if sort_interleaved:
create_table_query += ' interleaved'
create_table_query += ' sortkey({0})'.format(sortkey)
print(create_table_query)
print('CREATING A TABLE IN REDSHIFT')
cursor.execute('drop table if exists {0}'.format(redshift_table_name))
cursor.execute(create_table_query)
connect.commit()
def s3_to_redshift(redshift_table_name, delimiter=',', quotechar='"',
dateformat='auto', timeformat='auto', region='', parameters=''):
bucket_name = 's3://{0}/{1}.csv'.format(
s3_bucket_var, s3_subdirectory_var + redshift_table_name)
if aws_1 and aws_2:
authorization = """
access_key_id '{0}'
secret_access_key '{1}'
""".format(aws_1, aws_2)
elif aws_role:
authorization = """
iam_role '{0}'
""".format(aws_role)
else:
authorization = ""
s3_to_sql = """
copy {0}
from '{1}'
delimiter '{2}'
ignoreheader 1
csv quote as '{3}'
dateformat '{4}'
timeformat '{5}'
{6}
{7}
""".format(redshift_table_name, bucket_name, delimiter, quotechar, dateformat,
timeformat, authorization, parameters)
if region:
s3_to_sql = s3_to_sql + "region '{0}'".format(region)
if aws_token != '':
s3_to_sql = s3_to_sql + "\n\tsession_token '{0}'".format(aws_token)
s3_to_sql = s3_to_sql + ';'
print(s3_to_sql)
# send the file
print('FILLING THE TABLE IN REDSHIFT')
try:
cursor.execute(s3_to_sql)
connect.commit()
except Exception as e:
print(e)
traceback.print_exc(file=sys.stdout)
connect.rollback()
raise
def pandas_to_redshift(data_frame,
redshift_table_name,
column_data_types=None,
index=False,
save_local=False,
delimiter=',',
quotechar='"',
dateformat='auto',
timeformat='auto',
region='',
append=False,
diststyle='even',
distkey='',
sort_interleaved=False,
sortkey='',
parameters='',
**kwargs):
# Validate column names.
data_frame = validate_column_names(data_frame)
# Send data to S3
csv_name = redshift_table_name + '.csv'
s3_kwargs = {k: v for k, v in kwargs.items() if k in S3_ACCEPTED_KWARGS and v is not None}
df_to_s3(data_frame, csv_name, index, save_local, delimiter, **s3_kwargs)
# CREATE AN EMPTY TABLE IN REDSHIFT
if append is False:
create_redshift_table(data_frame, redshift_table_name,
column_data_types, index, append,
diststyle, distkey, sort_interleaved, sortkey)
# CREATE THE COPY STATEMENT TO SEND FROM S3 TO THE TABLE IN REDSHIFT
s3_to_redshift(redshift_table_name, delimiter, quotechar, dateformat, timeformat, region, parameters)
def exec_commit(sql_query):
cursor.execute(sql_query)
connect.commit()
def close_up_shop():
global connect, cursor, s3, s3_bucket_var, s3_subdirectory_var, aws_1, aws_2, aws_token
cursor.close()
connect.commit()
connect.close()
try:
del connect, cursor
except:
pass
try:
del s3, s3_bucket_var, s3_subdirectory_var, aws_1, aws_2, aws_token
except:
pass
#-------------------------------------------------------------------------------
| en | 0.425581 | #!/usr/bin/env python3 # Available parameters for service: https://boto3.readthedocs.io/en/latest/reference/services/s3.html#S3.Client.put_object # pass a sql query and return a pandas dataframe Validate the column names to ensure no reserved words are used. Arguments: dataframe pd.data_frame -- data to validate # check for spaces in the column names # delimit them if there are Write a dataframe to S3 Arguments: dataframe pd.data_frame -- data to upload csv_name str -- name of the file to upload save_local bool -- save a local copy delimiter str -- delimiter for csv file # create local backup # Create an empty RedShift Table # Without a distkey, we can set a diststyle # otherwise, override diststyle with distkey access_key_id '{0}' secret_access_key '{1}' iam_role '{0}' copy {0} from '{1}' delimiter '{2}' ignoreheader 1 csv quote as '{3}' dateformat '{4}' timeformat '{5}' {6} {7} # send the file # Validate column names. # Send data to S3 # CREATE AN EMPTY TABLE IN REDSHIFT # CREATE THE COPY STATEMENT TO SEND FROM S3 TO THE TABLE IN REDSHIFT #------------------------------------------------------------------------------- | 2.146632 | 2 |
src/gmmtest.py | ClementLancien/machineLearning | 0 | 6632678 | <gh_stars>0
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import animation
from JSAnimation import IPython_display
np.random.seed(13)
data = np.random.random(100)
plt.hist(data, bins=15, normed=True, color='black', alpha=0.5)
plt.title('Histogram of $U(0,1)$ samples')
plt.show()
def normal(x, mu, sigma):
"""Normal distribution PDF."""
return np.exp(-0.5 * ((x - mu) / sigma)**2) / (np.sqrt(2 * np.pi) * sigma)
def _fit_gmm(data, num_components, num_iters=100):
"""Fit a single GMM with EM with one random initialization."""
# Random initialization.
mu = np.random.choice(data, num_components, replace=False)
sigma = (np.random.random(num_components) * 0.15) + 0.1
prob_population = np.ones(num_components) / num_components
# Keep track of results after each iteration.
results = []
results.append((mu.copy(), sigma.copy(), prob_population.copy(), float('-inf')))
last_log_likelihood = None
for i in range(num_iters):
# E-step.
probs = [normal(x, mu, sigma) for x in data]
probs = np.array([p / p.sum() for p in probs])
# M-step.
for k in range(num_components):
k_probs = probs[:, k]
mu[k] = (k_probs * data).sum() / k_probs.sum()
sigma[k] = np.sqrt((k_probs * (data - mu[k])**2).sum() / k_probs.sum())
prob_population[k] = k_probs.sum() / len(data)
# Bookkeeping.
log_likelihood = np.log(np.product([(normal(data[n], mu, sigma) * probs[n, :]).sum()
for n in range(len(data))]))
results.append((mu.copy(), sigma.copy(), prob_population.copy(), log_likelihood))
if last_log_likelihood is not None and log_likelihood <= (last_log_likelihood + 0.01):
break
last_log_likelihood = log_likelihood
return results
def fit_gmm(data, num_components, num_iters=10, num_random_inits=10):
"""Find the maximum likelihood GMM over several random initializations."""
best_results = None
best_results_iters = None
best_ll = float('-inf')
# Try several random initializations and keep the best.
for attempt in range(num_random_inits):
results_iters = _fit_gmm(data, num_components, num_iters=num_iters)
final_log_likelihood = results_iters[-1][3]
if final_log_likelihood > best_ll:
best_results = results_iters[-1]
best_results_iters = results_iters
best_ll = final_log_likelihood
return best_results, best_results_iters
colors = 'bgrcmy'
def gmm_fit_and_animate(data, num_components, interval=200):
_, best_results_iters = fit_gmm(data, num_components, num_iters=200, num_random_inits=10)
# Remove initial random guess (before doing a single iteration).
best_results_iters = best_results_iters[1:]
fig = plt.figure(figsize=(12, 8))
ax = plt.axes(xlim=(0, 1), ylim=(0, 2))
line, = ax.plot([], [], label='GMM Fit', color='black', alpha=0.7, linewidth=3)
ax.hist(data, normed=True, bins=15, color='lightgray', alpha=0.2, label='Real Data')
ax.legend()
ax.set_title('{0} Components'.format(num_components))
def animate(i):
mu, sigma, prob_population, _ = best_results_iters[i]
xs = np.linspace(0, 1, 1000)
ys = [(normal(x, mu, sigma) * prob_population).sum() for x in xs]
line.set_data(xs, ys)
for k in range(num_components):
ys = [normal(x, mu[k], sigma[k]) * prob_population[k] for x in xs]
ax.plot(xs, ys, alpha=0.2, color=colors[k % len(colors)])
# Things like to crash if I try to do too many frames, I guess, so limit
# the number of frames.
num_iters = len(best_results_iters)
frames = np.arange(0, num_iters, max(1, num_iters // 20), dtype=int)
return animation.FuncAnimation(fig, animate, frames=frames, interval=interval)
gmm_fit_and_animate(data, 3)
| import matplotlib.pyplot as plt
import numpy as np
from matplotlib import animation
from JSAnimation import IPython_display
np.random.seed(13)
data = np.random.random(100)
plt.hist(data, bins=15, normed=True, color='black', alpha=0.5)
plt.title('Histogram of $U(0,1)$ samples')
plt.show()
def normal(x, mu, sigma):
"""Normal distribution PDF."""
return np.exp(-0.5 * ((x - mu) / sigma)**2) / (np.sqrt(2 * np.pi) * sigma)
def _fit_gmm(data, num_components, num_iters=100):
"""Fit a single GMM with EM with one random initialization."""
# Random initialization.
mu = np.random.choice(data, num_components, replace=False)
sigma = (np.random.random(num_components) * 0.15) + 0.1
prob_population = np.ones(num_components) / num_components
# Keep track of results after each iteration.
results = []
results.append((mu.copy(), sigma.copy(), prob_population.copy(), float('-inf')))
last_log_likelihood = None
for i in range(num_iters):
# E-step.
probs = [normal(x, mu, sigma) for x in data]
probs = np.array([p / p.sum() for p in probs])
# M-step.
for k in range(num_components):
k_probs = probs[:, k]
mu[k] = (k_probs * data).sum() / k_probs.sum()
sigma[k] = np.sqrt((k_probs * (data - mu[k])**2).sum() / k_probs.sum())
prob_population[k] = k_probs.sum() / len(data)
# Bookkeeping.
log_likelihood = np.log(np.product([(normal(data[n], mu, sigma) * probs[n, :]).sum()
for n in range(len(data))]))
results.append((mu.copy(), sigma.copy(), prob_population.copy(), log_likelihood))
if last_log_likelihood is not None and log_likelihood <= (last_log_likelihood + 0.01):
break
last_log_likelihood = log_likelihood
return results
def fit_gmm(data, num_components, num_iters=10, num_random_inits=10):
"""Find the maximum likelihood GMM over several random initializations."""
best_results = None
best_results_iters = None
best_ll = float('-inf')
# Try several random initializations and keep the best.
for attempt in range(num_random_inits):
results_iters = _fit_gmm(data, num_components, num_iters=num_iters)
final_log_likelihood = results_iters[-1][3]
if final_log_likelihood > best_ll:
best_results = results_iters[-1]
best_results_iters = results_iters
best_ll = final_log_likelihood
return best_results, best_results_iters
colors = 'bgrcmy'
def gmm_fit_and_animate(data, num_components, interval=200):
_, best_results_iters = fit_gmm(data, num_components, num_iters=200, num_random_inits=10)
# Remove initial random guess (before doing a single iteration).
best_results_iters = best_results_iters[1:]
fig = plt.figure(figsize=(12, 8))
ax = plt.axes(xlim=(0, 1), ylim=(0, 2))
line, = ax.plot([], [], label='GMM Fit', color='black', alpha=0.7, linewidth=3)
ax.hist(data, normed=True, bins=15, color='lightgray', alpha=0.2, label='Real Data')
ax.legend()
ax.set_title('{0} Components'.format(num_components))
def animate(i):
mu, sigma, prob_population, _ = best_results_iters[i]
xs = np.linspace(0, 1, 1000)
ys = [(normal(x, mu, sigma) * prob_population).sum() for x in xs]
line.set_data(xs, ys)
for k in range(num_components):
ys = [normal(x, mu[k], sigma[k]) * prob_population[k] for x in xs]
ax.plot(xs, ys, alpha=0.2, color=colors[k % len(colors)])
# Things like to crash if I try to do too many frames, I guess, so limit
# the number of frames.
num_iters = len(best_results_iters)
frames = np.arange(0, num_iters, max(1, num_iters // 20), dtype=int)
return animation.FuncAnimation(fig, animate, frames=frames, interval=interval)
gmm_fit_and_animate(data, 3) | en | 0.889965 | Normal distribution PDF. Fit a single GMM with EM with one random initialization. # Random initialization. # Keep track of results after each iteration. # E-step. # M-step. # Bookkeeping. Find the maximum likelihood GMM over several random initializations. # Try several random initializations and keep the best. # Remove initial random guess (before doing a single iteration). # Things like to crash if I try to do too many frames, I guess, so limit # the number of frames. | 3.133467 | 3 |
speedtest-charts.py | frdmn/google-speedtest-chart | 53 | 6632679 | <reponame>frdmn/google-speedtest-chart
#!/usr/bin/env python3
import datetime
import pygsheets
import speedtest
import argparse
# Set options
parser = argparse.ArgumentParser(
description='Simple Python script to push speedtest results \
(using speedtest-cli) to a Google Docs spreadsheet'
)
parser.add_argument(
"-w, --workbookname", action="store", default="Speedtest", type=str,
dest="workbookname",
help='Sets the woorkbook name, default is "Speedtest"'
)
parser.add_argument(
"-b, --bymonth", action="store_true", default=False,
dest="bymonth",
help='Creats a new sheet for each month named MMM YYYY (ex: Jun 2018)'
)
cliarg = parser.parse_args()
# Set constants
DATE = datetime.datetime.now().strftime("%m/%d/%Y %H:%M:%S")
header = [['A1', 'B1', 'C1', 'D1'], ['Date', 'Download', 'Upload', 'Ping']]
if cliarg.bymonth:
sheetname = datetime.datetime.now().strftime("%b %Y")
# set variable scope
download = ''
upload = ''
ping = ''
def get_credentials():
"""Function to check for valid OAuth access tokens."""
gc = pygsheets.authorize(outh_file="credentials.json")
return gc
def submit_into_spreadsheet(download, upload, ping):
"""Function to submit speedtest result."""
gc = get_credentials()
try:
speedtest = gc.open(cliarg.workbookname)
except pygsheets.SpreadsheetNotFound:
speedtest = gc.create(cliarg.workbookname)
if cliarg.bymonth:
try:
sheet = speedtest.worksheet('title', sheetname)
except pygsheets.WorksheetNotFound:
sheet = speedtest.add_worksheet(sheetname)
else:
sheet = speedtest.sheet1
headnew = str(sheet.cell('A1').value)
headcur = str(header[1][0])
if headnew != headcur:
# create header row
for index in range(len(header[0])):
head = sheet.cell(header[0][index])
head.value = header[1][index]
head.update()
data = [DATE, download, upload, ping]
sheet.append_table(values=data)
def getresults():
"""Function to generate speedtest result."""
spdtest = speedtest.Speedtest()
spdtest.get_best_server()
download = round(spdtest.download() / 1000 / 1000, 2)
upload = round(spdtest.upload() / 1000 / 1000, 2)
ping = round(spdtest.results.ping)
return(download, upload, ping)
def main():
# Check for proper credentials
print("Checking OAuth validity...")
try:
get_credentials()
except pygsheets.AuthenticationError:
print("Authentication Failed")
raise
# Run speedtest and store output
print("Starting speed test...")
download, upload, ping = getresults()
print(
"Starting speed finished (Download: ", download,
", Upload: ", upload,
", Ping: ", ping, ")")
# Write to spreadsheet
print("Writing to spreadsheet...")
submit_into_spreadsheet(download, upload, ping)
print("Successfuly written to spreadsheet!")
if __name__ == "__main__":
main()
| #!/usr/bin/env python3
import datetime
import pygsheets
import speedtest
import argparse
# Set options
parser = argparse.ArgumentParser(
description='Simple Python script to push speedtest results \
(using speedtest-cli) to a Google Docs spreadsheet'
)
parser.add_argument(
"-w, --workbookname", action="store", default="Speedtest", type=str,
dest="workbookname",
help='Sets the woorkbook name, default is "Speedtest"'
)
parser.add_argument(
"-b, --bymonth", action="store_true", default=False,
dest="bymonth",
help='Creats a new sheet for each month named MMM YYYY (ex: Jun 2018)'
)
cliarg = parser.parse_args()
# Set constants
DATE = datetime.datetime.now().strftime("%m/%d/%Y %H:%M:%S")
header = [['A1', 'B1', 'C1', 'D1'], ['Date', 'Download', 'Upload', 'Ping']]
if cliarg.bymonth:
sheetname = datetime.datetime.now().strftime("%b %Y")
# set variable scope
download = ''
upload = ''
ping = ''
def get_credentials():
"""Function to check for valid OAuth access tokens."""
gc = pygsheets.authorize(outh_file="credentials.json")
return gc
def submit_into_spreadsheet(download, upload, ping):
"""Function to submit speedtest result."""
gc = get_credentials()
try:
speedtest = gc.open(cliarg.workbookname)
except pygsheets.SpreadsheetNotFound:
speedtest = gc.create(cliarg.workbookname)
if cliarg.bymonth:
try:
sheet = speedtest.worksheet('title', sheetname)
except pygsheets.WorksheetNotFound:
sheet = speedtest.add_worksheet(sheetname)
else:
sheet = speedtest.sheet1
headnew = str(sheet.cell('A1').value)
headcur = str(header[1][0])
if headnew != headcur:
# create header row
for index in range(len(header[0])):
head = sheet.cell(header[0][index])
head.value = header[1][index]
head.update()
data = [DATE, download, upload, ping]
sheet.append_table(values=data)
def getresults():
"""Function to generate speedtest result."""
spdtest = speedtest.Speedtest()
spdtest.get_best_server()
download = round(spdtest.download() / 1000 / 1000, 2)
upload = round(spdtest.upload() / 1000 / 1000, 2)
ping = round(spdtest.results.ping)
return(download, upload, ping)
def main():
# Check for proper credentials
print("Checking OAuth validity...")
try:
get_credentials()
except pygsheets.AuthenticationError:
print("Authentication Failed")
raise
# Run speedtest and store output
print("Starting speed test...")
download, upload, ping = getresults()
print(
"Starting speed finished (Download: ", download,
", Upload: ", upload,
", Ping: ", ping, ")")
# Write to spreadsheet
print("Writing to spreadsheet...")
submit_into_spreadsheet(download, upload, ping)
print("Successfuly written to spreadsheet!")
if __name__ == "__main__":
main() | en | 0.512417 | #!/usr/bin/env python3 # Set options # Set constants # set variable scope Function to check for valid OAuth access tokens. Function to submit speedtest result. # create header row Function to generate speedtest result. # Check for proper credentials # Run speedtest and store output # Write to spreadsheet | 2.932428 | 3 |
tools/mo/openvino/tools/mo/front/kaldi/extractors/backproptruncation_ext.py | pazamelin/openvino | 1 | 6632680 | <gh_stars>1-10
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import numpy as np
from openvino.tools.mo.front.caffe.extractors.utils import embed_input
from openvino.tools.mo.front.extractor import FrontExtractorOp
from openvino.tools.mo.front.kaldi.loader.utils import read_binary_float_token, read_binary_integer32_token, collect_until_token
from openvino.tools.mo.ops.scale_shift import ScaleShiftOp
class BackPropTrancationFrontExtractor(FrontExtractorOp):
op = 'backproptruncationcomponent'
enabled = True
@classmethod
def extract(cls, node):
pb = node.parameters
collect_until_token(pb, b'<Dim>')
dim = read_binary_integer32_token(pb)
collect_until_token(pb, b'<Scale>')
scale = read_binary_float_token(pb)
# TODO add real batch here
attrs = {}
embed_input(attrs, 1, 'weights', np.full([dim], scale))
ScaleShiftOp.update_node_stat(node, attrs)
return cls.enabled
| # Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import numpy as np
from openvino.tools.mo.front.caffe.extractors.utils import embed_input
from openvino.tools.mo.front.extractor import FrontExtractorOp
from openvino.tools.mo.front.kaldi.loader.utils import read_binary_float_token, read_binary_integer32_token, collect_until_token
from openvino.tools.mo.ops.scale_shift import ScaleShiftOp
class BackPropTrancationFrontExtractor(FrontExtractorOp):
op = 'backproptruncationcomponent'
enabled = True
@classmethod
def extract(cls, node):
pb = node.parameters
collect_until_token(pb, b'<Dim>')
dim = read_binary_integer32_token(pb)
collect_until_token(pb, b'<Scale>')
scale = read_binary_float_token(pb)
# TODO add real batch here
attrs = {}
embed_input(attrs, 1, 'weights', np.full([dim], scale))
ScaleShiftOp.update_node_stat(node, attrs)
return cls.enabled | en | 0.257653 | # Copyright (C) 2018-2021 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # TODO add real batch here | 1.964389 | 2 |
Solutions/problem20.py | WalrusCow/euler | 0 | 6632681 | <reponame>WalrusCow/euler<filename>Solutions/problem20.py<gh_stars>0
# Project Euler Problem 20
# Created on: 2012-06-18
# Created by: <NAME>
from math import factorial
print(sum(map(int, str(factorial(100)))))
| # Project Euler Problem 20
# Created on: 2012-06-18
# Created by: <NAME>
from math import factorial
print(sum(map(int, str(factorial(100))))) | en | 0.659634 | # Project Euler Problem 20 # Created on: 2012-06-18 # Created by: <NAME> | 3.152137 | 3 |
icinga2api_py/api.py | TimL20/icinga2api_py | 0 | 6632682 | <filename>icinga2api_py/api.py
# -*- coding: utf-8 -*-
"""
Small client for easy access to the Icinga2 API using the requests library
(https://github.com/psf/requests).
This client is really dump and has not much ideas about how the Icinga2 API works.
What it does is to set up a requests.Session (which it extends), build URL and body, construct request and a response
at the end. By default the constructed request is a :class:`icinga2api_py.models.APIRequest`, and the constructed
response a :class:`icinga2api_py.models.APIResponse`. It's possible to override these defaults in a subclass.
"""
import requests
from typing import Union
from .models import APIRequest, APIResponse
# Default request class
DEFAULT_REQUEST_CLASS = APIRequest
# Default response class
DEFAULT_RESPONSE_CLASS = APIResponse
class API(requests.Session):
"""API session for easily sending requests and getting responses.
Objects of this class are used for constructing a request, that than will use it's :meth:`prepare_request` and
:meth:`create_response` methods for sending the request and constructing the response.
The preparation method is inherited from the requests.Session superclass.
"""
#: Accepted HTTP methods (in a class attribute to enable easy overwriting).
#: Everything else is a URL or body part for the :class:`RequestBuilder`
HTTP_METHODS = {"GET", "POST", "PUT", "DELETE"}
def __init__(self, url: str, **sessionparams):
"""Construct the API session with an URL and "optional" session parameters.
:param url: URL, e.g. "https://icingahost:5665/v1/", see :meth:`prepare_base_url` for what is expected here
:param **sessionparams: Keyword arguments are set as session attribute. Every attribute of a requests.Session
is allowed, these include: headers (default headers), auth, proxies, params (default
parameters), verify, cert (client certificate path), trust_env and more.
"""
super().__init__()
self.base_url = self.prepare_base_url(url)
# Set default Accept header to JSON, as the Icinga2 API uses that
self.headers["Accept"] = "application/json"
# Set session parameters like verify, proxies, auth, ...
for key, value in sessionparams.items():
setattr(self, key, value)
@staticmethod
def prepare_base_url(url: str) -> str:
"""Prepare the base_url for usage.
This static method adds scheme, trailing '/', API version suffix and Icinga port defaults if not specified.
This method is called by :meth:`__init__`.
:param url: The URL to prepare for client usage
:raises ValueError: if the url parameter is False in a boolean context
:return: The prepared base URL
"""
if not url:
raise ValueError(f"Unable to prepare URL {url}")
# Prefix https if not specified
if "://" not in url:
url = f"https://{url}"
# Append '/' to URL if not already there
if url[-1:] != "/":
url = f"{url}/"
# Suffix API version
if url[-3:-2] != "v":
url = f"{url}v1/"
# Set port if not specified
scheme, _, host, path = url.split('/', 3)
if ":" not in host:
url = f"{scheme}//{host}:5665/{path}"
return url
@property
def request_class(self):
"""The class used as a request.
This property is expected to return a callable, that acts like :class:`icinga2api_py.models.APIRequest`.
It's usually only used internally, overwriting it in subclasses enables to easily change client behavior.
"""
return DEFAULT_REQUEST_CLASS
def create_response(self, response):
"""Create a custom response from a requests.Response."""
return DEFAULT_RESPONSE_CLASS(response)
@classmethod
def from_pieces(cls, host, port=5665, url_prefix='/v1', **sessionparams) -> "API":
"""Simplified creation of an API object."""
url = f"https://{host}:{port}{url_prefix}/"
return cls(url, **sessionparams)
@classmethod
def clone(cls, obj: "API") -> "API":
"""Clone the given client.
The returned object is something like a shallow copy of the given obj, but only attributes usually used with
this class are copied. That this method will return a shallow copy comes with the possibly unwanted effected
that e.g. updating headers for the clone also updates the headers of the original object. Attribute assignments
will have no effect on clone objects of course.
"""
sessionparams = {}
for attr in obj.__attrs__:
sessionparams[attr] = getattr(obj, attr, None)
api = cls(obj.base_url, **sessionparams)
return api
def __copy__(self):
"""Get a shallow copy."""
return self.clone(self)
def __getattr__(self, item) -> "RequestBuilder":
"""Return a RequestBuilder object with the given first item."""
return self.s(item)
def s(self, item) -> "RequestBuilder":
"""Return a RequestBuilder object with the given first item."""
return self.RequestBuilder(self).s(item)
def __truediv__(self, item) -> "RequestBuilder":
"""Return a RequestBuilder object with the given first item."""
return self.RequestBuilder(self).s(item)
class RequestBuilder:
"""Class to build a request and it's dictionary (JSON) body."""
def __init__(self, api: "API"):
"""The RequestBuilder needs an API client object to init, mainly to pass it on to the request class."""
self.api_client = api
self._lastattr = None # last attribute -> call to put in body, or not to add it to the URL
self._builder_list = [] # URL Builder, joined with "/" at the and
self._body = {} # Request body as dictionary
def _rotate_attr(self, new=None) -> "API.RequestBuilder":
"""Helper method, as this functionality is needed twice.
The "last used attr" is rotated. If nothing was done with the last used attr yet, it's added to the URL
builder list. A new last used attr is set if given.
"""
if self._lastattr is not None:
self._builder_list.append(self._lastattr)
self._lastattr = None
if new is not None:
self._lastattr = new
return self
def __getattr__(self, item) -> Union[APIRequest, "API.RequestBuilder"]:
"""Add item to URL path OR prepare item for put in body OR construct a request."""
return self.s(item)
def __truediv__(self, item) -> Union[APIRequest, "API.RequestBuilder"]:
"""Add item to URL path OR prepare item for put in body OR construct a request."""
return self.s(item)
def s(self, item) -> Union[APIRequest, "API.RequestBuilder"]:
"""Add item to URL path OR prepare item for put in body OR construct a request."""
if item.upper() not in self.api_client.HTTP_METHODS:
return self._rotate_attr(item)
# item was a accepted HTTP method -> construct a request
self._rotate_attr()
# Construct URL with base url from api client + the "/"-joined builder list
url = self.api_client.base_url + "/".join(self._builder_list)
return self.api_client.request_class(self.api_client, item.upper(), url, json=self._body)
def __call__(self, *args, **kwargs) -> "API.RequestBuilder":
"""Call this object to put the last item into the body.
The last item is put into the JSON-encoded body as a key, with the arguments this is called with as its
value(s).
Passing one argument causes this argument to be the value. When passing multiple arguments, the value is a
list of these arguments. When no arguments are passed, the body value for this key is deleted.
Things are a bit different, when the body already has a value with such a key.
If a single arg gets passed and a single arg already is the value, the value gets a list of these two.
If a single arg gets passed and the value has already multiple args, the new arg gets appended to the value.
If multiple args are passed and there is a value, the value gets a list of all (args and value list items).
"""
key = self._lastattr
# Reset last used attr
self._lastattr = None
single_item_exists = key in self._body and not isinstance(self._body[key], list)
item_list_exists = key in self._body and isinstance(self._body[key], list)
if len(args) == 1:
# Exactly one arg given
value = args[0]
if single_item_exists:
# Single item + single item => list
self._body[key] = [self._body[key], value]
elif item_list_exists:
# Add single item to multiple existing items
self._body[key] += (value, )
else:
# Set single item
self._body[key] = value
elif len(args) > 1:
# More than one arg given
value = list(args)
if single_item_exists:
# Add multiple items to single item
self._body[key] = [self._body[key]] + value
elif item_list_exists:
# Add multiple items to multiple items
self._body[key] += value
else:
# Set multiple items
self._body[key] = value
else:
# No arg given -> delete
del self._body[key]
return self
| <filename>icinga2api_py/api.py
# -*- coding: utf-8 -*-
"""
Small client for easy access to the Icinga2 API using the requests library
(https://github.com/psf/requests).
This client is really dump and has not much ideas about how the Icinga2 API works.
What it does is to set up a requests.Session (which it extends), build URL and body, construct request and a response
at the end. By default the constructed request is a :class:`icinga2api_py.models.APIRequest`, and the constructed
response a :class:`icinga2api_py.models.APIResponse`. It's possible to override these defaults in a subclass.
"""
import requests
from typing import Union
from .models import APIRequest, APIResponse
# Default request class
DEFAULT_REQUEST_CLASS = APIRequest
# Default response class
DEFAULT_RESPONSE_CLASS = APIResponse
class API(requests.Session):
"""API session for easily sending requests and getting responses.
Objects of this class are used for constructing a request, that than will use it's :meth:`prepare_request` and
:meth:`create_response` methods for sending the request and constructing the response.
The preparation method is inherited from the requests.Session superclass.
"""
#: Accepted HTTP methods (in a class attribute to enable easy overwriting).
#: Everything else is a URL or body part for the :class:`RequestBuilder`
HTTP_METHODS = {"GET", "POST", "PUT", "DELETE"}
def __init__(self, url: str, **sessionparams):
"""Construct the API session with an URL and "optional" session parameters.
:param url: URL, e.g. "https://icingahost:5665/v1/", see :meth:`prepare_base_url` for what is expected here
:param **sessionparams: Keyword arguments are set as session attribute. Every attribute of a requests.Session
is allowed, these include: headers (default headers), auth, proxies, params (default
parameters), verify, cert (client certificate path), trust_env and more.
"""
super().__init__()
self.base_url = self.prepare_base_url(url)
# Set default Accept header to JSON, as the Icinga2 API uses that
self.headers["Accept"] = "application/json"
# Set session parameters like verify, proxies, auth, ...
for key, value in sessionparams.items():
setattr(self, key, value)
@staticmethod
def prepare_base_url(url: str) -> str:
"""Prepare the base_url for usage.
This static method adds scheme, trailing '/', API version suffix and Icinga port defaults if not specified.
This method is called by :meth:`__init__`.
:param url: The URL to prepare for client usage
:raises ValueError: if the url parameter is False in a boolean context
:return: The prepared base URL
"""
if not url:
raise ValueError(f"Unable to prepare URL {url}")
# Prefix https if not specified
if "://" not in url:
url = f"https://{url}"
# Append '/' to URL if not already there
if url[-1:] != "/":
url = f"{url}/"
# Suffix API version
if url[-3:-2] != "v":
url = f"{url}v1/"
# Set port if not specified
scheme, _, host, path = url.split('/', 3)
if ":" not in host:
url = f"{scheme}//{host}:5665/{path}"
return url
@property
def request_class(self):
"""The class used as a request.
This property is expected to return a callable, that acts like :class:`icinga2api_py.models.APIRequest`.
It's usually only used internally, overwriting it in subclasses enables to easily change client behavior.
"""
return DEFAULT_REQUEST_CLASS
def create_response(self, response):
"""Create a custom response from a requests.Response."""
return DEFAULT_RESPONSE_CLASS(response)
@classmethod
def from_pieces(cls, host, port=5665, url_prefix='/v1', **sessionparams) -> "API":
"""Simplified creation of an API object."""
url = f"https://{host}:{port}{url_prefix}/"
return cls(url, **sessionparams)
@classmethod
def clone(cls, obj: "API") -> "API":
"""Clone the given client.
The returned object is something like a shallow copy of the given obj, but only attributes usually used with
this class are copied. That this method will return a shallow copy comes with the possibly unwanted effected
that e.g. updating headers for the clone also updates the headers of the original object. Attribute assignments
will have no effect on clone objects of course.
"""
sessionparams = {}
for attr in obj.__attrs__:
sessionparams[attr] = getattr(obj, attr, None)
api = cls(obj.base_url, **sessionparams)
return api
def __copy__(self):
"""Get a shallow copy."""
return self.clone(self)
def __getattr__(self, item) -> "RequestBuilder":
"""Return a RequestBuilder object with the given first item."""
return self.s(item)
def s(self, item) -> "RequestBuilder":
"""Return a RequestBuilder object with the given first item."""
return self.RequestBuilder(self).s(item)
def __truediv__(self, item) -> "RequestBuilder":
"""Return a RequestBuilder object with the given first item."""
return self.RequestBuilder(self).s(item)
class RequestBuilder:
"""Class to build a request and it's dictionary (JSON) body."""
def __init__(self, api: "API"):
"""The RequestBuilder needs an API client object to init, mainly to pass it on to the request class."""
self.api_client = api
self._lastattr = None # last attribute -> call to put in body, or not to add it to the URL
self._builder_list = [] # URL Builder, joined with "/" at the and
self._body = {} # Request body as dictionary
def _rotate_attr(self, new=None) -> "API.RequestBuilder":
"""Helper method, as this functionality is needed twice.
The "last used attr" is rotated. If nothing was done with the last used attr yet, it's added to the URL
builder list. A new last used attr is set if given.
"""
if self._lastattr is not None:
self._builder_list.append(self._lastattr)
self._lastattr = None
if new is not None:
self._lastattr = new
return self
def __getattr__(self, item) -> Union[APIRequest, "API.RequestBuilder"]:
"""Add item to URL path OR prepare item for put in body OR construct a request."""
return self.s(item)
def __truediv__(self, item) -> Union[APIRequest, "API.RequestBuilder"]:
"""Add item to URL path OR prepare item for put in body OR construct a request."""
return self.s(item)
def s(self, item) -> Union[APIRequest, "API.RequestBuilder"]:
"""Add item to URL path OR prepare item for put in body OR construct a request."""
if item.upper() not in self.api_client.HTTP_METHODS:
return self._rotate_attr(item)
# item was a accepted HTTP method -> construct a request
self._rotate_attr()
# Construct URL with base url from api client + the "/"-joined builder list
url = self.api_client.base_url + "/".join(self._builder_list)
return self.api_client.request_class(self.api_client, item.upper(), url, json=self._body)
def __call__(self, *args, **kwargs) -> "API.RequestBuilder":
"""Call this object to put the last item into the body.
The last item is put into the JSON-encoded body as a key, with the arguments this is called with as its
value(s).
Passing one argument causes this argument to be the value. When passing multiple arguments, the value is a
list of these arguments. When no arguments are passed, the body value for this key is deleted.
Things are a bit different, when the body already has a value with such a key.
If a single arg gets passed and a single arg already is the value, the value gets a list of these two.
If a single arg gets passed and the value has already multiple args, the new arg gets appended to the value.
If multiple args are passed and there is a value, the value gets a list of all (args and value list items).
"""
key = self._lastattr
# Reset last used attr
self._lastattr = None
single_item_exists = key in self._body and not isinstance(self._body[key], list)
item_list_exists = key in self._body and isinstance(self._body[key], list)
if len(args) == 1:
# Exactly one arg given
value = args[0]
if single_item_exists:
# Single item + single item => list
self._body[key] = [self._body[key], value]
elif item_list_exists:
# Add single item to multiple existing items
self._body[key] += (value, )
else:
# Set single item
self._body[key] = value
elif len(args) > 1:
# More than one arg given
value = list(args)
if single_item_exists:
# Add multiple items to single item
self._body[key] = [self._body[key]] + value
elif item_list_exists:
# Add multiple items to multiple items
self._body[key] += value
else:
# Set multiple items
self._body[key] = value
else:
# No arg given -> delete
del self._body[key]
return self
| en | 0.821379 | # -*- coding: utf-8 -*- Small client for easy access to the Icinga2 API using the requests library (https://github.com/psf/requests). This client is really dump and has not much ideas about how the Icinga2 API works. What it does is to set up a requests.Session (which it extends), build URL and body, construct request and a response at the end. By default the constructed request is a :class:`icinga2api_py.models.APIRequest`, and the constructed response a :class:`icinga2api_py.models.APIResponse`. It's possible to override these defaults in a subclass. # Default request class # Default response class API session for easily sending requests and getting responses. Objects of this class are used for constructing a request, that than will use it's :meth:`prepare_request` and :meth:`create_response` methods for sending the request and constructing the response. The preparation method is inherited from the requests.Session superclass. #: Accepted HTTP methods (in a class attribute to enable easy overwriting). #: Everything else is a URL or body part for the :class:`RequestBuilder` Construct the API session with an URL and "optional" session parameters. :param url: URL, e.g. "https://icingahost:5665/v1/", see :meth:`prepare_base_url` for what is expected here :param **sessionparams: Keyword arguments are set as session attribute. Every attribute of a requests.Session is allowed, these include: headers (default headers), auth, proxies, params (default parameters), verify, cert (client certificate path), trust_env and more. # Set default Accept header to JSON, as the Icinga2 API uses that # Set session parameters like verify, proxies, auth, ... Prepare the base_url for usage. This static method adds scheme, trailing '/', API version suffix and Icinga port defaults if not specified. This method is called by :meth:`__init__`. :param url: The URL to prepare for client usage :raises ValueError: if the url parameter is False in a boolean context :return: The prepared base URL # Prefix https if not specified # Append '/' to URL if not already there # Suffix API version # Set port if not specified The class used as a request. This property is expected to return a callable, that acts like :class:`icinga2api_py.models.APIRequest`. It's usually only used internally, overwriting it in subclasses enables to easily change client behavior. Create a custom response from a requests.Response. Simplified creation of an API object. Clone the given client. The returned object is something like a shallow copy of the given obj, but only attributes usually used with this class are copied. That this method will return a shallow copy comes with the possibly unwanted effected that e.g. updating headers for the clone also updates the headers of the original object. Attribute assignments will have no effect on clone objects of course. Get a shallow copy. Return a RequestBuilder object with the given first item. Return a RequestBuilder object with the given first item. Return a RequestBuilder object with the given first item. Class to build a request and it's dictionary (JSON) body. The RequestBuilder needs an API client object to init, mainly to pass it on to the request class. # last attribute -> call to put in body, or not to add it to the URL # URL Builder, joined with "/" at the and # Request body as dictionary Helper method, as this functionality is needed twice. The "last used attr" is rotated. If nothing was done with the last used attr yet, it's added to the URL builder list. A new last used attr is set if given. Add item to URL path OR prepare item for put in body OR construct a request. Add item to URL path OR prepare item for put in body OR construct a request. Add item to URL path OR prepare item for put in body OR construct a request. # item was a accepted HTTP method -> construct a request # Construct URL with base url from api client + the "/"-joined builder list Call this object to put the last item into the body. The last item is put into the JSON-encoded body as a key, with the arguments this is called with as its value(s). Passing one argument causes this argument to be the value. When passing multiple arguments, the value is a list of these arguments. When no arguments are passed, the body value for this key is deleted. Things are a bit different, when the body already has a value with such a key. If a single arg gets passed and a single arg already is the value, the value gets a list of these two. If a single arg gets passed and the value has already multiple args, the new arg gets appended to the value. If multiple args are passed and there is a value, the value gets a list of all (args and value list items). # Reset last used attr # Exactly one arg given # Single item + single item => list # Add single item to multiple existing items # Set single item # More than one arg given # Add multiple items to single item # Add multiple items to multiple items # Set multiple items # No arg given -> delete | 3.266523 | 3 |
simplemooc/courses/views.py | KelsonMaciel/simplemooc- | 0 | 6632683 | from django.shortcuts import render, get_object_or_404, redirect
from django.contrib.auth.decorators import login_required
from django.contrib import messages
from .models import Course, Enrollment, Announcement, Lesson, Material
from .forms import ContactCourse, CommentForm
from .decorators import enrollment_required
def index(request):
courses = Course.objects.all()
template_name = 'courses/index.html'
context = {
'courses': courses
}
return render(request, template_name, context)
# def details(request, pk):
# course = get_object_or_404(Course, pk=pk)
# context = {
# 'course': course
# }
# template_name = 'courses/details.html'
# return render(request, template_name, context)
def details(request, slug):
course = get_object_or_404(Course, slug=slug)
context = {}
if request.method == 'POST':
form = ContactCourse(request.POST)
if form.is_valid():
context['is_valid'] = True
form.send_mail(course)
form = ContactCourse()
else:
form = ContactCourse()
context['form'] = form
context['course'] = course
template_name = 'courses/details.html'
return render(request, template_name, context)
@login_required
def enrollment(request, slug):
course = get_object_or_404(Course, slug=slug)
enrollment, created = Enrollment.objects.get_or_create(
user=request.user, course=course
)
if created:
# enrollment.active()
messages.success(request, 'Você foi inscrito no curso com sucesso')
else:
messages.info(request, 'Você já está inscrito no curso')
return redirect('accounts:dashboard')
@login_required
def undo_enrollment(request, slug):
course = get_object_or_404(Course, slug=slug)
enrollment = get_object_or_404(
Enrollment, user=request.user, course=course
)
if request.method == 'POST':
enrollment.delete()
messages.success(request, 'Sua inscrição foi cancelada com sucesso')
return redirect('accounts:dashboard')
template = 'courses/undo_enrollment.html'
context = {
'enrollment': enrollment,
'course': course,
}
return render(request, template, context)
@login_required
@enrollment_required
def announcements(request, slug):
course = request.course
template = 'courses/announcements.html'
context = {
'course': course,
'announcements': course.announcements.all()
}
return render(request, template, context)
@login_required
@enrollment_required
def show_announcement(request, slug, pk):
course = request.course
announcement = get_object_or_404(course.announcements.all(), pk=pk)
form = CommentForm(request.POST or None)
if form.is_valid():
comment = form.save(commit=False)
comment.user = request.user
comment.announcement = announcement
comment.save()
form = CommentForm()
messages.success(request, 'Seu comentário foi enviado com sucesso')
template = 'courses/show_announcement.html'
context = {
'course': course,
'announcement': announcement,
'form': form,
}
return render(request, template, context)
@login_required
@enrollment_required
def lessons(request, slug):
course = request.course
template = 'courses/lessons.html'
lessons = course.release_lessons()
if request.user.is_staff:
lessons = course.lessons.all()
context = {
'course': course,
'lessons': lessons
}
return render(request, template, context)
@login_required
@enrollment_required
def lesson(request, slug, pk):
course = request.course
lesson = get_object_or_404(Lesson, pk=pk, course=course)
if not request.user.is_staff and not lesson.is_available():
messages.error(request, 'Esta aula não está disponível')
return redirect('courses:lessons', slug=course.slug)
template = 'courses/lesson.html'
context = {
'course': course,
'lesson': lesson
}
return render(request, template, context)
@login_required
@enrollment_required
def material(request, slug, pk):
course = request.course
material = get_object_or_404(Material, pk=pk, lesson__course=course)
lesson = material.lesson
if not request.user.is_staff and not lesson.is_available():
messages.error(request, 'Este material não está disponível')
return redirect('courses:lesson', slug=course.slug, pk=lesson.pk)
if not material.is_embedded():
return redirect(material.file.url)
template = 'courses/material.html'
context = {
'course': course,
'lesson': lesson,
'material': material,
}
return render(request, template, context)
| from django.shortcuts import render, get_object_or_404, redirect
from django.contrib.auth.decorators import login_required
from django.contrib import messages
from .models import Course, Enrollment, Announcement, Lesson, Material
from .forms import ContactCourse, CommentForm
from .decorators import enrollment_required
def index(request):
courses = Course.objects.all()
template_name = 'courses/index.html'
context = {
'courses': courses
}
return render(request, template_name, context)
# def details(request, pk):
# course = get_object_or_404(Course, pk=pk)
# context = {
# 'course': course
# }
# template_name = 'courses/details.html'
# return render(request, template_name, context)
def details(request, slug):
course = get_object_or_404(Course, slug=slug)
context = {}
if request.method == 'POST':
form = ContactCourse(request.POST)
if form.is_valid():
context['is_valid'] = True
form.send_mail(course)
form = ContactCourse()
else:
form = ContactCourse()
context['form'] = form
context['course'] = course
template_name = 'courses/details.html'
return render(request, template_name, context)
@login_required
def enrollment(request, slug):
course = get_object_or_404(Course, slug=slug)
enrollment, created = Enrollment.objects.get_or_create(
user=request.user, course=course
)
if created:
# enrollment.active()
messages.success(request, 'Você foi inscrito no curso com sucesso')
else:
messages.info(request, 'Você já está inscrito no curso')
return redirect('accounts:dashboard')
@login_required
def undo_enrollment(request, slug):
course = get_object_or_404(Course, slug=slug)
enrollment = get_object_or_404(
Enrollment, user=request.user, course=course
)
if request.method == 'POST':
enrollment.delete()
messages.success(request, 'Sua inscrição foi cancelada com sucesso')
return redirect('accounts:dashboard')
template = 'courses/undo_enrollment.html'
context = {
'enrollment': enrollment,
'course': course,
}
return render(request, template, context)
@login_required
@enrollment_required
def announcements(request, slug):
course = request.course
template = 'courses/announcements.html'
context = {
'course': course,
'announcements': course.announcements.all()
}
return render(request, template, context)
@login_required
@enrollment_required
def show_announcement(request, slug, pk):
course = request.course
announcement = get_object_or_404(course.announcements.all(), pk=pk)
form = CommentForm(request.POST or None)
if form.is_valid():
comment = form.save(commit=False)
comment.user = request.user
comment.announcement = announcement
comment.save()
form = CommentForm()
messages.success(request, 'Seu comentário foi enviado com sucesso')
template = 'courses/show_announcement.html'
context = {
'course': course,
'announcement': announcement,
'form': form,
}
return render(request, template, context)
@login_required
@enrollment_required
def lessons(request, slug):
course = request.course
template = 'courses/lessons.html'
lessons = course.release_lessons()
if request.user.is_staff:
lessons = course.lessons.all()
context = {
'course': course,
'lessons': lessons
}
return render(request, template, context)
@login_required
@enrollment_required
def lesson(request, slug, pk):
course = request.course
lesson = get_object_or_404(Lesson, pk=pk, course=course)
if not request.user.is_staff and not lesson.is_available():
messages.error(request, 'Esta aula não está disponível')
return redirect('courses:lessons', slug=course.slug)
template = 'courses/lesson.html'
context = {
'course': course,
'lesson': lesson
}
return render(request, template, context)
@login_required
@enrollment_required
def material(request, slug, pk):
course = request.course
material = get_object_or_404(Material, pk=pk, lesson__course=course)
lesson = material.lesson
if not request.user.is_staff and not lesson.is_available():
messages.error(request, 'Este material não está disponível')
return redirect('courses:lesson', slug=course.slug, pk=lesson.pk)
if not material.is_embedded():
return redirect(material.file.url)
template = 'courses/material.html'
context = {
'course': course,
'lesson': lesson,
'material': material,
}
return render(request, template, context)
| en | 0.465006 | # def details(request, pk): # course = get_object_or_404(Course, pk=pk) # context = { # 'course': course # } # template_name = 'courses/details.html' # return render(request, template_name, context) # enrollment.active() | 1.967266 | 2 |
libtrellis/3rdparty/pybind11/pybind11/__main__.py | antmicro/prjtrellis | 2 | 6632684 | # pylint: disable=missing-function-docstring
import argparse
import sys
import sysconfig
from .commands import get_cmake_dir, get_include
def print_includes() -> None:
dirs = [
sysconfig.get_path("include"),
sysconfig.get_path("platinclude"),
get_include(),
]
# Make unique but preserve order
unique_dirs = []
for d in dirs:
if d and d not in unique_dirs:
unique_dirs.append(d)
print(" ".join("-I" + d for d in unique_dirs))
def main() -> None:
parser = argparse.ArgumentParser()
parser.add_argument(
"--includes",
action="store_true",
help="Include flags for both pybind11 and Python headers.",
)
parser.add_argument(
"--cmakedir",
action="store_true",
help="Print the CMake module directory, ideal for setting -Dpybind11_ROOT in CMake.",
)
args = parser.parse_args()
if not sys.argv[1:]:
parser.print_help()
if args.includes:
print_includes()
if args.cmakedir:
print(get_cmake_dir())
if __name__ == "__main__":
main()
| # pylint: disable=missing-function-docstring
import argparse
import sys
import sysconfig
from .commands import get_cmake_dir, get_include
def print_includes() -> None:
dirs = [
sysconfig.get_path("include"),
sysconfig.get_path("platinclude"),
get_include(),
]
# Make unique but preserve order
unique_dirs = []
for d in dirs:
if d and d not in unique_dirs:
unique_dirs.append(d)
print(" ".join("-I" + d for d in unique_dirs))
def main() -> None:
parser = argparse.ArgumentParser()
parser.add_argument(
"--includes",
action="store_true",
help="Include flags for both pybind11 and Python headers.",
)
parser.add_argument(
"--cmakedir",
action="store_true",
help="Print the CMake module directory, ideal for setting -Dpybind11_ROOT in CMake.",
)
args = parser.parse_args()
if not sys.argv[1:]:
parser.print_help()
if args.includes:
print_includes()
if args.cmakedir:
print(get_cmake_dir())
if __name__ == "__main__":
main()
| en | 0.689051 | # pylint: disable=missing-function-docstring # Make unique but preserve order | 2.341991 | 2 |
previous_work/interface_functions.py | orangewaxcap/cellcounter | 0 | 6632685 | """
This file contains various functions used in the main
counting notebook.
"""
import matplotlib.pyplot as plt
import numpy as np
from skimage import exposure, feature, filters, io, morphology
from ipywidgets import widgets, interactive, fixed, interact_manual
from math import sqrt
from collections import Counter
import xml.etree.ElementTree as ET
from xml.dom import minidom
from IPython.display import display
"""
OLD FUNCTIONS BELOW
"""
DUMMY_FILE = 'Drd2_Adult_S_conf_25X_CPU1_cryo_NAV.jpg'
def load_and_get_name_of_image(name=DUMMY_FILE,
channel=1,
cmap='jet',
show_image=True
):
"""
Loads an image, strips out the desired channel,
and optionally displays the image.
Returns image and its filename for later
use.
"""
rawpic = io.imread(name)
img = rawpic[:,:, channel]
if show_image:
io.imshow(img, cmap=cmap)
return img, name
def adjust_image(image,
lower_thresh=2, upper_thresh=98,
filter_size=0,
cmap='jet'
):
"""
Applies contrast stretching to an image, then
uses a white top-hat filter to remove small patches
of brightness that would cause false positives later.
Input: image; values for min and max percentile
brightnesses to keep; size below which bright patches
will be removed; colourmap.
Output: image, hopefully with most of the background stripped
out. If it's worked well, it'll look like bright blobs on a
dark background.
"""
p2, p98 = np.percentile(image, (lower_thresh, upper_thresh))
img_rescale = exposure.rescale_intensity(image, in_range=(p2, p98))
selem = morphology.disk(filter_size)
wht_tophat = morphology.white_tophat(img_rescale,selem=selem)
io.imshow(img_rescale - wht_tophat, cmap=cmap)
return img_rescale
def detect_blobs(original_image,
processed_image,
max_sigma=30,
threshold=0.1
):
"""
Detects bright blobs in an image using the scikit-image
determinant of gaussian technique, then marks them on the
image.
Input: original and processed images; max_sigma to determine
upper limit for blob size; threshold to determine how bright
something needs to be before it's identified as a blob.
Output: displays image with red rings around detected blobs;
returns array of blob markers (y,x,radius).
"""
blobs_dog = feature.blob_dog(processed_image,
max_sigma=max_sigma,
threshold=threshold
)
blobs_dog[:, 2] = blobs_dog[:, 2] * sqrt(2) #radius calcs
fig,axes = plt.subplots(ncols=3, figsize=(16,12))
ax_im_pairs = list(zip(axes,
(original_image,
processed_image,
original_image),
(False,True,True)
))
for ax,im,draw in ax_im_pairs:
ax.imshow(im)
if draw == True:
for blob in blobs_dog:
y,x,r = blob
c = plt.Circle((x, y), r,
color='r',
linewidth=2,
fill=False
)
ax.add_patch(c)
print("{} blobs detected.".format(len(blobs_dog)))
return blobs_dog
def save_for_imagej(coords, name):
"""
Dumps out the blob coordinates as an XML file suitable for
use in the ImageJ cell counter plugin. If the file name isn't
a perfect match with the one you pass to this function, the
cell counter plugin will throw an error and not work. It's a
really fragile safety check and ought to be bypassable, but
the plugin hasn't been updated for ten years soo......
Inputs: coordinates array from the detect_blobs function; file
name stem from load_and_get_name_of_image function.
Output: saves an XML file for use in ImageJ. All markers will
be saved as marker type 1 for simplicity.
"""
def prettify(elem):
"""
Return a pretty-printed XML string for the element.
From https://gist.github.com/jefftriplett/3980637
"""
rough_string = ET.tostring(elem, 'utf-8')
reparsed = minidom.parseString(rough_string)
return reparsed.toprettyxml(indent=" ", encoding="UTF-8").decode('UTF8')
marker_types = list(range(1,9))
marks_to_store = {1: coords}
root = ET.Element("CellCounter_Marker_File")
imgprops = ET.SubElement(root, "Image_Properties")
imgfname = ET.SubElement(imgprops, "Image_Filename")
imgfname.text = name
markerdata = ET.SubElement(root, "Marker_Data")
curtype = ET.SubElement(markerdata, "Current_Type")
curtype.text = '0'
for i in marker_types:
marks_container = ET.SubElement(markerdata, "Marker_Type")
mtype = ET.SubElement(marks_container, "Type")
mtype.text = str(i)
if i in marks_to_store.keys():
for y,x in marks_to_store[i]:
mark = ET.SubElement(mtype, "Marker")
markx = ET.SubElement(mark, "MarkerX")
markx.text = str(int(x))
marky = ET.SubElement(mark, "MarkerY")
marky.text = str(int(y))
markz = ET.SubElement(mark, "MarkerZ")
markz.text = str(int(1))
filestem = name.split('.')[0]
with open('{}.xml'.format(filestem), 'w') as f:
f.write(prettify(root))
print("File saved: {}.xml".format(filestem)) | """
This file contains various functions used in the main
counting notebook.
"""
import matplotlib.pyplot as plt
import numpy as np
from skimage import exposure, feature, filters, io, morphology
from ipywidgets import widgets, interactive, fixed, interact_manual
from math import sqrt
from collections import Counter
import xml.etree.ElementTree as ET
from xml.dom import minidom
from IPython.display import display
"""
OLD FUNCTIONS BELOW
"""
DUMMY_FILE = 'Drd2_Adult_S_conf_25X_CPU1_cryo_NAV.jpg'
def load_and_get_name_of_image(name=DUMMY_FILE,
channel=1,
cmap='jet',
show_image=True
):
"""
Loads an image, strips out the desired channel,
and optionally displays the image.
Returns image and its filename for later
use.
"""
rawpic = io.imread(name)
img = rawpic[:,:, channel]
if show_image:
io.imshow(img, cmap=cmap)
return img, name
def adjust_image(image,
lower_thresh=2, upper_thresh=98,
filter_size=0,
cmap='jet'
):
"""
Applies contrast stretching to an image, then
uses a white top-hat filter to remove small patches
of brightness that would cause false positives later.
Input: image; values for min and max percentile
brightnesses to keep; size below which bright patches
will be removed; colourmap.
Output: image, hopefully with most of the background stripped
out. If it's worked well, it'll look like bright blobs on a
dark background.
"""
p2, p98 = np.percentile(image, (lower_thresh, upper_thresh))
img_rescale = exposure.rescale_intensity(image, in_range=(p2, p98))
selem = morphology.disk(filter_size)
wht_tophat = morphology.white_tophat(img_rescale,selem=selem)
io.imshow(img_rescale - wht_tophat, cmap=cmap)
return img_rescale
def detect_blobs(original_image,
processed_image,
max_sigma=30,
threshold=0.1
):
"""
Detects bright blobs in an image using the scikit-image
determinant of gaussian technique, then marks them on the
image.
Input: original and processed images; max_sigma to determine
upper limit for blob size; threshold to determine how bright
something needs to be before it's identified as a blob.
Output: displays image with red rings around detected blobs;
returns array of blob markers (y,x,radius).
"""
blobs_dog = feature.blob_dog(processed_image,
max_sigma=max_sigma,
threshold=threshold
)
blobs_dog[:, 2] = blobs_dog[:, 2] * sqrt(2) #radius calcs
fig,axes = plt.subplots(ncols=3, figsize=(16,12))
ax_im_pairs = list(zip(axes,
(original_image,
processed_image,
original_image),
(False,True,True)
))
for ax,im,draw in ax_im_pairs:
ax.imshow(im)
if draw == True:
for blob in blobs_dog:
y,x,r = blob
c = plt.Circle((x, y), r,
color='r',
linewidth=2,
fill=False
)
ax.add_patch(c)
print("{} blobs detected.".format(len(blobs_dog)))
return blobs_dog
def save_for_imagej(coords, name):
"""
Dumps out the blob coordinates as an XML file suitable for
use in the ImageJ cell counter plugin. If the file name isn't
a perfect match with the one you pass to this function, the
cell counter plugin will throw an error and not work. It's a
really fragile safety check and ought to be bypassable, but
the plugin hasn't been updated for ten years soo......
Inputs: coordinates array from the detect_blobs function; file
name stem from load_and_get_name_of_image function.
Output: saves an XML file for use in ImageJ. All markers will
be saved as marker type 1 for simplicity.
"""
def prettify(elem):
"""
Return a pretty-printed XML string for the element.
From https://gist.github.com/jefftriplett/3980637
"""
rough_string = ET.tostring(elem, 'utf-8')
reparsed = minidom.parseString(rough_string)
return reparsed.toprettyxml(indent=" ", encoding="UTF-8").decode('UTF8')
marker_types = list(range(1,9))
marks_to_store = {1: coords}
root = ET.Element("CellCounter_Marker_File")
imgprops = ET.SubElement(root, "Image_Properties")
imgfname = ET.SubElement(imgprops, "Image_Filename")
imgfname.text = name
markerdata = ET.SubElement(root, "Marker_Data")
curtype = ET.SubElement(markerdata, "Current_Type")
curtype.text = '0'
for i in marker_types:
marks_container = ET.SubElement(markerdata, "Marker_Type")
mtype = ET.SubElement(marks_container, "Type")
mtype.text = str(i)
if i in marks_to_store.keys():
for y,x in marks_to_store[i]:
mark = ET.SubElement(mtype, "Marker")
markx = ET.SubElement(mark, "MarkerX")
markx.text = str(int(x))
marky = ET.SubElement(mark, "MarkerY")
marky.text = str(int(y))
markz = ET.SubElement(mark, "MarkerZ")
markz.text = str(int(1))
filestem = name.split('.')[0]
with open('{}.xml'.format(filestem), 'w') as f:
f.write(prettify(root))
print("File saved: {}.xml".format(filestem)) | en | 0.846635 | This file contains various functions used in the main counting notebook. OLD FUNCTIONS BELOW Loads an image, strips out the desired channel, and optionally displays the image. Returns image and its filename for later use. Applies contrast stretching to an image, then uses a white top-hat filter to remove small patches of brightness that would cause false positives later. Input: image; values for min and max percentile brightnesses to keep; size below which bright patches will be removed; colourmap. Output: image, hopefully with most of the background stripped out. If it's worked well, it'll look like bright blobs on a dark background. Detects bright blobs in an image using the scikit-image determinant of gaussian technique, then marks them on the image. Input: original and processed images; max_sigma to determine upper limit for blob size; threshold to determine how bright something needs to be before it's identified as a blob. Output: displays image with red rings around detected blobs; returns array of blob markers (y,x,radius). #radius calcs Dumps out the blob coordinates as an XML file suitable for use in the ImageJ cell counter plugin. If the file name isn't a perfect match with the one you pass to this function, the cell counter plugin will throw an error and not work. It's a really fragile safety check and ought to be bypassable, but the plugin hasn't been updated for ten years soo...... Inputs: coordinates array from the detect_blobs function; file name stem from load_and_get_name_of_image function. Output: saves an XML file for use in ImageJ. All markers will be saved as marker type 1 for simplicity. Return a pretty-printed XML string for the element. From https://gist.github.com/jefftriplett/3980637 | 2.654614 | 3 |
scripts/data_processing/bckgrd_subtraction/bckgrd_subtract_batch_spline.py | fang-ren/segmentation_CoFeZr | 0 | 6632686 | # -*- coding: utf-8 -*-
"""
Created on Wed Aug 03 16:22:25 2016
@author: fangren
"""
import numpy as np
import matplotlib.pyplot as plt
from scipy.interpolate import splev, splrep
import os
import csv
def file_index(index):
if len(str(index)) == 1:
return '000' + str(index)
elif len(str(index)) == 2:
return '00' + str(index)
elif len(str(index)) == 3:
return '0' + str(index)
elif len(str(index)) == 4:
return str(index)
def read_1D(filename):
data = np.genfromtxt(filename, delimiter=',', skip_header = 0)
Qlist = data[:,0][8:937]
IntAve = data[:,1][3:][8:937]
return Qlist, IntAve
def select_bckgrd(background_indices):
background_x = Qlist[background_indices]
background_y = IntAve[background_indices]
return background_x, background_y
def save_results(background_x, background_y, Qlist, IntAve, index, base_filename):
indices = range(1, 930)
plt.figure(1)
plt.subplot(311)
plt.plot(indices, IntAve)
plt.plot(background_indices, background_y, 'o')
tck = splrep(background_x,background_y)
background = splev(Qlist, tck)
plt.subplot(312)
plt.plot(Qlist, IntAve)
plt.plot(Qlist, background)
plt.plot(background_x, background_y, 'o')
plt.subplot(313)
plt.plot(Qlist, (IntAve-background))
plt.plot(Qlist, [0]*929, 'r--')
plt.savefig(save_path + base_filename + file_index(index) + 'bckgrd_subtract.png' )
plt.close('all')
rows = zip(Qlist, (IntAve-background))
with open(save_path + base_filename + file_index(index) + 'bckgrd_subtracted.csv', 'a') as csvoutput:
writer = csv.writer(csvoutput, delimiter = ',', lineterminator = '\n')
for row in rows:
writer.writerow(row)
csvoutput.close()
background_indices = background_indices = [5,42, 92, 142, 180, 570, 696, 730, 784, 802, 841, 863, 882, 895, 903, 925]
folder_path = 'C:\\Research_FangRen\\Data\\July2016\\CoZrFe_ternary\\1D\\Sample16\\'
base_filename = 'Sample16_2thin_24x24_t30_'
save_path = folder_path + 'background_subtracted\\'
if not os.path.exists(save_path):
os.makedirs(save_path)
index = 1
basefile_path = folder_path + base_filename
while (index <= 441):
print 'processing', basefile_path + file_index(index) + '_1D.csv'
filename = basefile_path + file_index(index) + '_1D.csv'
Qlist, IntAve = read_1D(filename)
background_x, background_y = select_bckgrd(background_indices)
save_results(background_x, background_y, Qlist, IntAve, index, base_filename)
index += 1
| # -*- coding: utf-8 -*-
"""
Created on Wed Aug 03 16:22:25 2016
@author: fangren
"""
import numpy as np
import matplotlib.pyplot as plt
from scipy.interpolate import splev, splrep
import os
import csv
def file_index(index):
if len(str(index)) == 1:
return '000' + str(index)
elif len(str(index)) == 2:
return '00' + str(index)
elif len(str(index)) == 3:
return '0' + str(index)
elif len(str(index)) == 4:
return str(index)
def read_1D(filename):
data = np.genfromtxt(filename, delimiter=',', skip_header = 0)
Qlist = data[:,0][8:937]
IntAve = data[:,1][3:][8:937]
return Qlist, IntAve
def select_bckgrd(background_indices):
background_x = Qlist[background_indices]
background_y = IntAve[background_indices]
return background_x, background_y
def save_results(background_x, background_y, Qlist, IntAve, index, base_filename):
indices = range(1, 930)
plt.figure(1)
plt.subplot(311)
plt.plot(indices, IntAve)
plt.plot(background_indices, background_y, 'o')
tck = splrep(background_x,background_y)
background = splev(Qlist, tck)
plt.subplot(312)
plt.plot(Qlist, IntAve)
plt.plot(Qlist, background)
plt.plot(background_x, background_y, 'o')
plt.subplot(313)
plt.plot(Qlist, (IntAve-background))
plt.plot(Qlist, [0]*929, 'r--')
plt.savefig(save_path + base_filename + file_index(index) + 'bckgrd_subtract.png' )
plt.close('all')
rows = zip(Qlist, (IntAve-background))
with open(save_path + base_filename + file_index(index) + 'bckgrd_subtracted.csv', 'a') as csvoutput:
writer = csv.writer(csvoutput, delimiter = ',', lineterminator = '\n')
for row in rows:
writer.writerow(row)
csvoutput.close()
background_indices = background_indices = [5,42, 92, 142, 180, 570, 696, 730, 784, 802, 841, 863, 882, 895, 903, 925]
folder_path = 'C:\\Research_FangRen\\Data\\July2016\\CoZrFe_ternary\\1D\\Sample16\\'
base_filename = 'Sample16_2thin_24x24_t30_'
save_path = folder_path + 'background_subtracted\\'
if not os.path.exists(save_path):
os.makedirs(save_path)
index = 1
basefile_path = folder_path + base_filename
while (index <= 441):
print 'processing', basefile_path + file_index(index) + '_1D.csv'
filename = basefile_path + file_index(index) + '_1D.csv'
Qlist, IntAve = read_1D(filename)
background_x, background_y = select_bckgrd(background_indices)
save_results(background_x, background_y, Qlist, IntAve, index, base_filename)
index += 1
| en | 0.733766 | # -*- coding: utf-8 -*- Created on Wed Aug 03 16:22:25 2016 @author: fangren | 2.630889 | 3 |
pysmt/plugins.py | div72/py2many | 345 | 6632687 | <gh_stars>100-1000
import io
import os
import random
import sys
import time
from tempfile import NamedTemporaryFile
from typing import Callable, Dict, List, Tuple, Union
try:
from argparse_dataclass import dataclass as ap_dataclass
from argparse_dataclass import ArgumentParser
except:
ArgumentParser = "ArgumentParser"
ap_dataclass = "ap_dataclass"
class SmtTranspilerPlugins:
def visit_open(self, node, vargs):
# TODO
return None
def visit_named_temp_file(self, node, vargs):
# TODO
return None
def visit_textio_read(self, node, vargs):
# TODO
return None
def visit_textio_write(self, node, vargs):
# TODO
return None
def visit_ap_dataclass(self, cls):
# Do whatever transformation the decorator does to cls here
return cls
def visit_range(self, node, vargs: List[str]) -> str:
if len(node.args) == 1:
return f"(0..{vargs[0]} - 1)"
elif len(node.args) == 2:
return f"({vargs[0]}..{vargs[1]} - 1)"
elif len(node.args) == 3:
return f"countup({vargs[0]}, {vargs[1]} - 1, {vargs[2]})"
raise Exception(
"encountered range() call with unknown parameters: range({})".format(vargs)
)
def visit_print(self, node, vargs: List[str]) -> str:
args = []
for n in vargs:
args.append(n)
args.append('" "')
args = ", ".join(args[:-1])
return f"echo {args}"
# small one liners are inlined here as lambdas
SMALL_DISPATCH_MAP = {
"str": lambda n, vargs: f"$({vargs[0]})",
"bool": lambda n, vargs: f"bool({vargs[0]})",
"floor": lambda n, vargs: f"int(floor({vargs[0]}))",
}
SMALL_USINGS_MAP: Dict[str, str] = {}
DISPATCH_MAP = {
"range": SmtTranspilerPlugins.visit_range,
"xrange": SmtTranspilerPlugins.visit_range,
"print": SmtTranspilerPlugins.visit_print,
}
MODULE_DISPATCH_TABLE: Dict[str, str] = {}
DECORATOR_DISPATCH_TABLE = {ap_dataclass: SmtTranspilerPlugins.visit_ap_dataclass}
CLASS_DISPATCH_TABLE: Dict[type, Callable] = {}
ATTR_DISPATCH_TABLE: Dict[type, Callable] = {}
FuncType = Union[Callable, str]
FUNC_DISPATCH_TABLE: Dict[FuncType, Tuple[Callable, bool]] = {
open: (SmtTranspilerPlugins.visit_open, True),
NamedTemporaryFile: (SmtTranspilerPlugins.visit_named_temp_file, True),
io.TextIOWrapper.read: (SmtTranspilerPlugins.visit_textio_read, True),
io.TextIOWrapper.read: (SmtTranspilerPlugins.visit_textio_write, True),
os.unlink: (lambda self, node, vargs: f"std::fs::remove_file({vargs[0]})", True),
sys.exit: (lambda self, node, vargs: f"quit({vargs[0]})", True),
}
FUNC_USINGS_MAP = {
time.time: "pylib",
random.seed: "pylib",
random.random: "pylib",
}
| import io
import os
import random
import sys
import time
from tempfile import NamedTemporaryFile
from typing import Callable, Dict, List, Tuple, Union
try:
from argparse_dataclass import dataclass as ap_dataclass
from argparse_dataclass import ArgumentParser
except:
ArgumentParser = "ArgumentParser"
ap_dataclass = "ap_dataclass"
class SmtTranspilerPlugins:
def visit_open(self, node, vargs):
# TODO
return None
def visit_named_temp_file(self, node, vargs):
# TODO
return None
def visit_textio_read(self, node, vargs):
# TODO
return None
def visit_textio_write(self, node, vargs):
# TODO
return None
def visit_ap_dataclass(self, cls):
# Do whatever transformation the decorator does to cls here
return cls
def visit_range(self, node, vargs: List[str]) -> str:
if len(node.args) == 1:
return f"(0..{vargs[0]} - 1)"
elif len(node.args) == 2:
return f"({vargs[0]}..{vargs[1]} - 1)"
elif len(node.args) == 3:
return f"countup({vargs[0]}, {vargs[1]} - 1, {vargs[2]})"
raise Exception(
"encountered range() call with unknown parameters: range({})".format(vargs)
)
def visit_print(self, node, vargs: List[str]) -> str:
args = []
for n in vargs:
args.append(n)
args.append('" "')
args = ", ".join(args[:-1])
return f"echo {args}"
# small one liners are inlined here as lambdas
SMALL_DISPATCH_MAP = {
"str": lambda n, vargs: f"$({vargs[0]})",
"bool": lambda n, vargs: f"bool({vargs[0]})",
"floor": lambda n, vargs: f"int(floor({vargs[0]}))",
}
SMALL_USINGS_MAP: Dict[str, str] = {}
DISPATCH_MAP = {
"range": SmtTranspilerPlugins.visit_range,
"xrange": SmtTranspilerPlugins.visit_range,
"print": SmtTranspilerPlugins.visit_print,
}
MODULE_DISPATCH_TABLE: Dict[str, str] = {}
DECORATOR_DISPATCH_TABLE = {ap_dataclass: SmtTranspilerPlugins.visit_ap_dataclass}
CLASS_DISPATCH_TABLE: Dict[type, Callable] = {}
ATTR_DISPATCH_TABLE: Dict[type, Callable] = {}
FuncType = Union[Callable, str]
FUNC_DISPATCH_TABLE: Dict[FuncType, Tuple[Callable, bool]] = {
open: (SmtTranspilerPlugins.visit_open, True),
NamedTemporaryFile: (SmtTranspilerPlugins.visit_named_temp_file, True),
io.TextIOWrapper.read: (SmtTranspilerPlugins.visit_textio_read, True),
io.TextIOWrapper.read: (SmtTranspilerPlugins.visit_textio_write, True),
os.unlink: (lambda self, node, vargs: f"std::fs::remove_file({vargs[0]})", True),
sys.exit: (lambda self, node, vargs: f"quit({vargs[0]})", True),
}
FUNC_USINGS_MAP = {
time.time: "pylib",
random.seed: "pylib",
random.random: "pylib",
} | en | 0.872029 | # TODO # TODO # TODO # TODO # Do whatever transformation the decorator does to cls here # small one liners are inlined here as lambdas | 2.694723 | 3 |
api_study/apps/trade/models.py | shidashui/django_restful_api_study | 2 | 6632688 | import datetime
from django.contrib.auth import get_user_model
from django.db import models
# Create your models here.
from goods.models import Goods
User = get_user_model()
class ShoppingCart(models.Model):
"""
购物车
"""
user = models.ForeignKey(User, on_delete=models.CASCADE, verbose_name="用户")
goods = models.ForeignKey(Goods, on_delete=models.CASCADE, verbose_name="商品")
nums = models.IntegerField("购买数量", default=0)
add_time = models.DateTimeField(verbose_name="添加时间", default=datetime.datetime.now)
class Meta:
verbose_name = "购物车"
verbose_name_plural = verbose_name
unique_together = ("user", "goods")
def __str__(self):
return "%s(%d)".format(self.goods.name, self.nums)
class OrderInfo(models.Model):
"""
订单信息
"""
ORDER_STATUS = (
("TRADE_SUCCESS", "成功"),
("TRADE_CLOSED", "超时关闭"),
("WAIT_BUYER_PAY", "交易创建"),
("TRADE_FINISHED", "交易结束"),
("paying", "待支付"),
)
PAY_TYPE = (
("alipay", "支付宝"),
("wechat", "微信"),
)
user = models.ForeignKey(User, on_delete=models.CASCADE, verbose_name="用户")
#订单号唯一
order_sn = models.CharField("订单编号", max_length=30, null=True, blank=True, unique=True)
#微信支付会用到
nonce_str = models.CharField("随机加密串", max_length=50, null=True, blank=True)
#支付宝交易号
trade_no = models.CharField("交易号", max_length=100, unique=True, null=True, blank=True)
#支付状态
pay_status = models.CharField("订单状态", choices=ORDER_STATUS, default="paying", max_length=30)
#订单支付类型
pay_type = models.CharField("支付类型", choices=PAY_TYPE, default="alipay", max_length=10)
post_script = models.CharField("订单留言", max_length=200)
order_mount = models.FloatField("订单金额", default=0.0)
pay_time = models.DateTimeField("支付时间", null=True, blank=True)
#用户信息
address = models.CharField("收货地址", max_length=100, default="")
signer_name = models.CharField("签收人", max_length=20, default="")
signer_mobile = models.CharField("联系电话", max_length=11)
add_time = models.DateTimeField("添加时间", default=datetime.datetime.now)
class Meta:
verbose_name = "订单信息"
verbose_name_plural = verbose_name
def __str__(self):
return str(self.order_sn)
class OrderGoods(models.Model):
"""
订单内的商品详情
"""
#一个订单对应多个商品
order = models.ForeignKey(OrderInfo, on_delete=models.CASCADE, verbose_name="订单信息", related_name="goods")
#两个外键形成一张关联表
goods = models.ForeignKey(Goods, on_delete=models.CASCADE, verbose_name="商品")
goods_num = models.IntegerField("商品数量", default=0)
add_time = models.DateTimeField("添加时间", default=datetime.datetime.now)
class Meta:
verbose_name = "订单商品"
verbose_name_plural = verbose_name
def __str__(self):
return str(self.order.order_sn) | import datetime
from django.contrib.auth import get_user_model
from django.db import models
# Create your models here.
from goods.models import Goods
User = get_user_model()
class ShoppingCart(models.Model):
"""
购物车
"""
user = models.ForeignKey(User, on_delete=models.CASCADE, verbose_name="用户")
goods = models.ForeignKey(Goods, on_delete=models.CASCADE, verbose_name="商品")
nums = models.IntegerField("购买数量", default=0)
add_time = models.DateTimeField(verbose_name="添加时间", default=datetime.datetime.now)
class Meta:
verbose_name = "购物车"
verbose_name_plural = verbose_name
unique_together = ("user", "goods")
def __str__(self):
return "%s(%d)".format(self.goods.name, self.nums)
class OrderInfo(models.Model):
"""
订单信息
"""
ORDER_STATUS = (
("TRADE_SUCCESS", "成功"),
("TRADE_CLOSED", "超时关闭"),
("WAIT_BUYER_PAY", "交易创建"),
("TRADE_FINISHED", "交易结束"),
("paying", "待支付"),
)
PAY_TYPE = (
("alipay", "支付宝"),
("wechat", "微信"),
)
user = models.ForeignKey(User, on_delete=models.CASCADE, verbose_name="用户")
#订单号唯一
order_sn = models.CharField("订单编号", max_length=30, null=True, blank=True, unique=True)
#微信支付会用到
nonce_str = models.CharField("随机加密串", max_length=50, null=True, blank=True)
#支付宝交易号
trade_no = models.CharField("交易号", max_length=100, unique=True, null=True, blank=True)
#支付状态
pay_status = models.CharField("订单状态", choices=ORDER_STATUS, default="paying", max_length=30)
#订单支付类型
pay_type = models.CharField("支付类型", choices=PAY_TYPE, default="alipay", max_length=10)
post_script = models.CharField("订单留言", max_length=200)
order_mount = models.FloatField("订单金额", default=0.0)
pay_time = models.DateTimeField("支付时间", null=True, blank=True)
#用户信息
address = models.CharField("收货地址", max_length=100, default="")
signer_name = models.CharField("签收人", max_length=20, default="")
signer_mobile = models.CharField("联系电话", max_length=11)
add_time = models.DateTimeField("添加时间", default=datetime.datetime.now)
class Meta:
verbose_name = "订单信息"
verbose_name_plural = verbose_name
def __str__(self):
return str(self.order_sn)
class OrderGoods(models.Model):
"""
订单内的商品详情
"""
#一个订单对应多个商品
order = models.ForeignKey(OrderInfo, on_delete=models.CASCADE, verbose_name="订单信息", related_name="goods")
#两个外键形成一张关联表
goods = models.ForeignKey(Goods, on_delete=models.CASCADE, verbose_name="商品")
goods_num = models.IntegerField("商品数量", default=0)
add_time = models.DateTimeField("添加时间", default=datetime.datetime.now)
class Meta:
verbose_name = "订单商品"
verbose_name_plural = verbose_name
def __str__(self):
return str(self.order.order_sn) | zh | 0.96184 | # Create your models here. 购物车 订单信息 #订单号唯一 #微信支付会用到 #支付宝交易号 #支付状态 #订单支付类型 #用户信息 订单内的商品详情 #一个订单对应多个商品 #两个外键形成一张关联表 | 2.399927 | 2 |
tests/test_data/test_datasets/test_trackingnet_dataset.py | benxiao/mmtracking | 1 | 6632689 | # Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
import tempfile
import mmcv
import numpy as np
from mmtrack.datasets import DATASETS as DATASETS
PREFIX = osp.join(osp.dirname(__file__), '../../data')
LASOT_ANN_PATH = f'{PREFIX}/demo_sot_data/lasot'
def test_format_results():
dataset_class = DATASETS.get('TrackingNetTestDataset')
dataset = dataset_class(
ann_file=osp.join(LASOT_ANN_PATH, 'lasot_test_dummy.json'),
pipeline=[])
results = []
for video_name in ['airplane-1', 'airplane-2']:
results.extend(
mmcv.list_from_file(
osp.join(LASOT_ANN_PATH, video_name, 'track_results.txt')))
track_results = []
for result in results:
x1, y1, x2, y2 = result.split(',')
track_results.append(
np.array([float(x1),
float(y1),
float(x2),
float(y2), 0.]))
track_results = dict(track_results=track_results)
tmp_dir = tempfile.TemporaryDirectory()
dataset.format_results(track_results, resfile_path=tmp_dir.name)
tmp_dir.cleanup()
| # Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
import tempfile
import mmcv
import numpy as np
from mmtrack.datasets import DATASETS as DATASETS
PREFIX = osp.join(osp.dirname(__file__), '../../data')
LASOT_ANN_PATH = f'{PREFIX}/demo_sot_data/lasot'
def test_format_results():
dataset_class = DATASETS.get('TrackingNetTestDataset')
dataset = dataset_class(
ann_file=osp.join(LASOT_ANN_PATH, 'lasot_test_dummy.json'),
pipeline=[])
results = []
for video_name in ['airplane-1', 'airplane-2']:
results.extend(
mmcv.list_from_file(
osp.join(LASOT_ANN_PATH, video_name, 'track_results.txt')))
track_results = []
for result in results:
x1, y1, x2, y2 = result.split(',')
track_results.append(
np.array([float(x1),
float(y1),
float(x2),
float(y2), 0.]))
track_results = dict(track_results=track_results)
tmp_dir = tempfile.TemporaryDirectory()
dataset.format_results(track_results, resfile_path=tmp_dir.name)
tmp_dir.cleanup()
| en | 0.828799 | # Copyright (c) OpenMMLab. All rights reserved. | 2.362843 | 2 |
thrift/lib/py/util/trollius.py | fakeNetflix/facebook-repo-fbthrift | 15 | 6632690 | <gh_stars>10-100
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import trollius as asyncio
from trollius import (From, Return, )
from thrift.server.TTrolliusServer import ThriftClientProtocolFactory
from thrift.util.Decorators import protocol_manager
@asyncio.coroutine
def async_protocol_manager(coro):
_, protocol = yield From(coro)
raise Return(protocol_manager(protocol))
def create_client(client_klass,
host=None,
port=None,
loop=None,
timeouts=None,
client_type=None):
"""
create a Trollius thrift client and return a context manager for it
This is a coroutine
:param client_klass: thrift Client class
:param host: hostname/ip, None = loopback
:param port: port number
:param loop: Trollius event loop
:returns: a Context manager which provides the thrift client
"""
if not loop:
loop = asyncio.get_event_loop()
coro = loop.create_connection(
ThriftClientProtocolFactory(
client_klass,
loop=loop,
timeouts=timeouts,
client_type=client_type,
),
host=host,
port=port,
)
return async_protocol_manager(coro)
def call_as_future(callable, loop, *args, **kwargs):
"""This is a copy of thrift.util.asyncio. So, let's consider unifying them.
call_as_future(callable, *args, **kwargs) -> trollius.Task
Like trollius.ensure_future() but takes any callable and converts
it to a coroutine function first.
"""
if not asyncio.iscoroutinefunction(callable):
callable = asyncio.coroutine(callable)
return asyncio.ensure_future(callable(*args, **kwargs), loop=loop)
| from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import trollius as asyncio
from trollius import (From, Return, )
from thrift.server.TTrolliusServer import ThriftClientProtocolFactory
from thrift.util.Decorators import protocol_manager
@asyncio.coroutine
def async_protocol_manager(coro):
_, protocol = yield From(coro)
raise Return(protocol_manager(protocol))
def create_client(client_klass,
host=None,
port=None,
loop=None,
timeouts=None,
client_type=None):
"""
create a Trollius thrift client and return a context manager for it
This is a coroutine
:param client_klass: thrift Client class
:param host: hostname/ip, None = loopback
:param port: port number
:param loop: Trollius event loop
:returns: a Context manager which provides the thrift client
"""
if not loop:
loop = asyncio.get_event_loop()
coro = loop.create_connection(
ThriftClientProtocolFactory(
client_klass,
loop=loop,
timeouts=timeouts,
client_type=client_type,
),
host=host,
port=port,
)
return async_protocol_manager(coro)
def call_as_future(callable, loop, *args, **kwargs):
"""This is a copy of thrift.util.asyncio. So, let's consider unifying them.
call_as_future(callable, *args, **kwargs) -> trollius.Task
Like trollius.ensure_future() but takes any callable and converts
it to a coroutine function first.
"""
if not asyncio.iscoroutinefunction(callable):
callable = asyncio.coroutine(callable)
return asyncio.ensure_future(callable(*args, **kwargs), loop=loop) | en | 0.75274 | create a Trollius thrift client and return a context manager for it This is a coroutine :param client_klass: thrift Client class :param host: hostname/ip, None = loopback :param port: port number :param loop: Trollius event loop :returns: a Context manager which provides the thrift client This is a copy of thrift.util.asyncio. So, let's consider unifying them. call_as_future(callable, *args, **kwargs) -> trollius.Task Like trollius.ensure_future() but takes any callable and converts it to a coroutine function first. | 2.51644 | 3 |
src/pyrad_proc/pyrad/EGG-INFO/scripts/main_process_data_birds.py | jfigui/pyrad | 41 | 6632691 | #!/home/daniel/anaconda3/bin/python
# -*- coding: utf-8 -*-
"""
================================================
Pyrad: The MeteoSwiss Radar Processing framework
================================================
Welcome to Pyrad!
This program processes bird data
"""
# Author: fvj
# License: BSD 3 clause
import datetime
import argparse
import atexit
import os
import glob
from warnings import warn
from pyrad.flow.flow_control import main as pyrad_main
from pyrad.io import get_fieldname_pyart
from pyrad.io import read_profile_ts
from pyrad.graph import get_field_name, _plot_time_range
from pyart.config import get_metadata
print(__doc__)
def main():
"""
"""
# parse the arguments
parser = argparse.ArgumentParser(
description='Entry to Pyrad processing framework')
# positional arguments
parser.add_argument(
'proc_cfgfile', type=str, help='name of main configuration file')
parser.add_argument(
'starttime', type=str,
help=('starting time of the data to be processed. ' +
'Format ''YYYYMMDDhhmmss'''))
parser.add_argument(
'endtime', type=str,
help='end time of the data to be processed. Format ''YYYYMMDDhhmmss''')
# keyword arguments
parser.add_argument(
'--cfgpath', type=str,
default=os.path.expanduser('~')+'/pyrad/config/processing/',
help='configuration file path')
parser.add_argument(
'--storepath', type=str,
default='/store/msrad/radar/pyrad_products/rad4alp_birds_PHA/',
help='Base data storing path')
parser.add_argument(
'--hres', type=int, default=200, help='Height resolution [m]')
args = parser.parse_args()
print("====== PYRAD data processing started: %s" %
datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S"))
atexit.register(_print_end_msg,
"====== PYRAD data processing finished: ")
print('config path: '+args.cfgpath)
print('config file: '+args.proc_cfgfile)
print('start time: '+args.starttime)
print('end time: '+args.endtime)
proc_starttime = datetime.datetime.strptime(
args.starttime, '%Y%m%d%H%M%S')
proc_endtime = datetime.datetime.strptime(
args.endtime, '%Y%m%d%H%M%S')
cfgfile_proc = args.cfgpath+args.proc_cfgfile
pyrad_main(cfgfile_proc, starttime=proc_starttime, endtime=proc_endtime)
# Plot time-height
file_base = args.storepath
hres = args.hres
datatype_list = [
'dBZc', 'eta_h', 'bird_density', 'WIND_SPEED', 'WIND_DIRECTION',
'wind_vel_h_u', 'wind_vel_h_v', 'wind_vel_v']
startdate = proc_starttime.replace(hour=0, minute=0, second=0, microsecond=0)
enddate = proc_endtime.replace(hour=0, minute=0, second=0, microsecond=0)
ndays = int((enddate-startdate).days)+1
for datatype in datatype_list:
flist = []
for i in range(ndays):
time_dir = (
proc_starttime+datetime.timedelta(days=i)).strftime('%Y-%m-%d')
filepath = (
file_base+time_dir+'/VAD/PROFILE_WIND/' +
'*_wind_profile_VAD_WIND_hres'+str(hres)+'.csv')
labels = [
'u_wind', 'std_u_wind', 'np_u_wind',
'v_wind', 'std_v_wind', 'np_v_wind',
'w_wind', 'std_w_wind', 'np_w_wind',
'mag_h_wind', 'dir_h_wind']
label_nr = 0
if datatype == 'dBZc':
filepath = (
file_base+time_dir+'/velFilter/PROFILE_dBZc/' +
'*_rhi_profile_*_dBZc_hres'+str(hres)+'.csv')
labels = [
'50.0-percentile', '25.0-percentile', '75.0-percentile']
# dBZ mean data
# filepath = (
# file_base+time_dir+'/velFilter/PROFILE_dBZc_mean/' +
# '*_rhi_profile_*_dBZc_hres'+str(hres)+'.csv')
# labels = [
# 'Mean', 'Min', 'Max']
# dBZ linear mean data
# filepath = (
# file_base+time_dir+'/velFilter/PROFILE_dBZc_linear_mean/' +
# '*_rhi_profile_*_dBZc_hres'+str(hres)+'.csv')
# labels = [
# 'Mean', 'Min', 'Max']
# dBZ before filtering with fitted velocity
# filepath = (
# file_base+time_dir+'/echoFilter/PROFILE_dBZc/' +
# '*_rhi_profile_*_dBZc_hres'+str(hres)+'.csv')
# labels = [
# '50.0-percentile', '25.0-percentile', '75.0-percentile']
#
# dBZ before filtering with fitted velocity. Linear mean
# filepath = (
# file_base+time_dir+'/echoFilter/PROFILE_dBZc_linear_mean/' +
# '*_rhi_profile_*_dBZc_hres'+str(hres)+'.csv')
# labels = [
# 'Mean', 'Min', 'Max']
elif datatype == 'eta_h':
filepath = (
file_base+time_dir+'/vol_refl/PROFILE/' +
'*_rhi_profile_*_eta_h_hres'+str(hres)+'.csv')
labels = [
'50.0-percentile', '25.0-percentile', '75.0-percentile']
# mean data
# filepath = (
# file_base+time_dir+'/vol_refl/PROFILE_mean/' +
# '*_rhi_profile_*_eta_h_hres'+str(hres)+'.csv')
# labels = [
# 'Mean', 'Min', 'Max']
elif datatype == 'bird_density':
filepath = (
file_base+time_dir+'/bird_density/PROFILE/' +
'*_rhi_profile_*_bird_density_hres'+str(hres)+'.csv')
labels = [
'50.0-percentile', '25.0-percentile', '75.0-percentile']
# mean data
# filepath = (
# file_base+time_dir+'/bird_density/PROFILE_mean/' +
# '*_rhi_profile_*_bird_density_hres'+str(hres)+'.csv')
# labels = [
# 'Mean', 'Min', 'Max']
elif datatype == 'WIND_SPEED':
label_nr = 9
elif datatype == 'WIND_DIRECTION':
label_nr = 10
elif datatype == 'wind_vel_h_u':
label_nr = 0
elif datatype == 'wind_vel_h_v':
label_nr = 3
elif datatype == 'wind_vel_v':
label_nr = 6
flist_aux = glob.glob(filepath)
if not flist_aux:
warn('No profile files found in '+filepath)
continue
flist.extend(flist_aux)
if not flist:
warn('No profile files found')
continue
flist.sort()
field_name = get_fieldname_pyart(datatype)
field_dict = get_metadata(field_name)
titl = 'bird retrieval '+args.starttime+'\n'+get_field_name(
field_dict, field_name)
tbin_edges, hbin_edges, np_ma, data_ma, t_start = read_profile_ts(
flist, labels, hres=hres, label_nr=label_nr)
basepath_out = os.path.dirname(flist[0])
fname = (
basepath_out+'/'+args.starttime+'_TIME_HEIGHT_' +
datatype+'_hres'+str(hres)+'.png')
vmin = vmax = None
_plot_time_range(
tbin_edges, hbin_edges/1000., data_ma, field_name, [fname],
titl=titl, figsize=[10, 8], vmin=vmin, vmax=vmax, dpi=72)
print("----- plot to '%s'" % fname)
# Plot number of points
field_dict = get_metadata('number_of_samples')
titl = 'bird retrieval '+args.starttime+'\n'+get_field_name(
field_dict, 'number_of_samples')
fname = (
basepath_out+'/'+args.starttime+'_TIME_HEIGHT_' +
datatype+'nsamples_hres'+str(hres)+'.png')
vmin = vmax = None
_plot_time_range(
tbin_edges, hbin_edges/1000., np_ma, 'number_of_samples', [fname],
titl=titl, figsize=[10, 8], vmin=vmin, vmax=vmax, dpi=72)
print("----- plot to '%s'" % fname)
def _print_end_msg(text):
"""
prints end message
Parameters
----------
text : str
the text to be printed
Returns
-------
Nothing
"""
print(text + datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S"))
# ---------------------------------------------------------
# Start main:
# ---------------------------------------------------------
if __name__ == "__main__":
main()
| #!/home/daniel/anaconda3/bin/python
# -*- coding: utf-8 -*-
"""
================================================
Pyrad: The MeteoSwiss Radar Processing framework
================================================
Welcome to Pyrad!
This program processes bird data
"""
# Author: fvj
# License: BSD 3 clause
import datetime
import argparse
import atexit
import os
import glob
from warnings import warn
from pyrad.flow.flow_control import main as pyrad_main
from pyrad.io import get_fieldname_pyart
from pyrad.io import read_profile_ts
from pyrad.graph import get_field_name, _plot_time_range
from pyart.config import get_metadata
print(__doc__)
def main():
"""
"""
# parse the arguments
parser = argparse.ArgumentParser(
description='Entry to Pyrad processing framework')
# positional arguments
parser.add_argument(
'proc_cfgfile', type=str, help='name of main configuration file')
parser.add_argument(
'starttime', type=str,
help=('starting time of the data to be processed. ' +
'Format ''YYYYMMDDhhmmss'''))
parser.add_argument(
'endtime', type=str,
help='end time of the data to be processed. Format ''YYYYMMDDhhmmss''')
# keyword arguments
parser.add_argument(
'--cfgpath', type=str,
default=os.path.expanduser('~')+'/pyrad/config/processing/',
help='configuration file path')
parser.add_argument(
'--storepath', type=str,
default='/store/msrad/radar/pyrad_products/rad4alp_birds_PHA/',
help='Base data storing path')
parser.add_argument(
'--hres', type=int, default=200, help='Height resolution [m]')
args = parser.parse_args()
print("====== PYRAD data processing started: %s" %
datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S"))
atexit.register(_print_end_msg,
"====== PYRAD data processing finished: ")
print('config path: '+args.cfgpath)
print('config file: '+args.proc_cfgfile)
print('start time: '+args.starttime)
print('end time: '+args.endtime)
proc_starttime = datetime.datetime.strptime(
args.starttime, '%Y%m%d%H%M%S')
proc_endtime = datetime.datetime.strptime(
args.endtime, '%Y%m%d%H%M%S')
cfgfile_proc = args.cfgpath+args.proc_cfgfile
pyrad_main(cfgfile_proc, starttime=proc_starttime, endtime=proc_endtime)
# Plot time-height
file_base = args.storepath
hres = args.hres
datatype_list = [
'dBZc', 'eta_h', 'bird_density', 'WIND_SPEED', 'WIND_DIRECTION',
'wind_vel_h_u', 'wind_vel_h_v', 'wind_vel_v']
startdate = proc_starttime.replace(hour=0, minute=0, second=0, microsecond=0)
enddate = proc_endtime.replace(hour=0, minute=0, second=0, microsecond=0)
ndays = int((enddate-startdate).days)+1
for datatype in datatype_list:
flist = []
for i in range(ndays):
time_dir = (
proc_starttime+datetime.timedelta(days=i)).strftime('%Y-%m-%d')
filepath = (
file_base+time_dir+'/VAD/PROFILE_WIND/' +
'*_wind_profile_VAD_WIND_hres'+str(hres)+'.csv')
labels = [
'u_wind', 'std_u_wind', 'np_u_wind',
'v_wind', 'std_v_wind', 'np_v_wind',
'w_wind', 'std_w_wind', 'np_w_wind',
'mag_h_wind', 'dir_h_wind']
label_nr = 0
if datatype == 'dBZc':
filepath = (
file_base+time_dir+'/velFilter/PROFILE_dBZc/' +
'*_rhi_profile_*_dBZc_hres'+str(hres)+'.csv')
labels = [
'50.0-percentile', '25.0-percentile', '75.0-percentile']
# dBZ mean data
# filepath = (
# file_base+time_dir+'/velFilter/PROFILE_dBZc_mean/' +
# '*_rhi_profile_*_dBZc_hres'+str(hres)+'.csv')
# labels = [
# 'Mean', 'Min', 'Max']
# dBZ linear mean data
# filepath = (
# file_base+time_dir+'/velFilter/PROFILE_dBZc_linear_mean/' +
# '*_rhi_profile_*_dBZc_hres'+str(hres)+'.csv')
# labels = [
# 'Mean', 'Min', 'Max']
# dBZ before filtering with fitted velocity
# filepath = (
# file_base+time_dir+'/echoFilter/PROFILE_dBZc/' +
# '*_rhi_profile_*_dBZc_hres'+str(hres)+'.csv')
# labels = [
# '50.0-percentile', '25.0-percentile', '75.0-percentile']
#
# dBZ before filtering with fitted velocity. Linear mean
# filepath = (
# file_base+time_dir+'/echoFilter/PROFILE_dBZc_linear_mean/' +
# '*_rhi_profile_*_dBZc_hres'+str(hres)+'.csv')
# labels = [
# 'Mean', 'Min', 'Max']
elif datatype == 'eta_h':
filepath = (
file_base+time_dir+'/vol_refl/PROFILE/' +
'*_rhi_profile_*_eta_h_hres'+str(hres)+'.csv')
labels = [
'50.0-percentile', '25.0-percentile', '75.0-percentile']
# mean data
# filepath = (
# file_base+time_dir+'/vol_refl/PROFILE_mean/' +
# '*_rhi_profile_*_eta_h_hres'+str(hres)+'.csv')
# labels = [
# 'Mean', 'Min', 'Max']
elif datatype == 'bird_density':
filepath = (
file_base+time_dir+'/bird_density/PROFILE/' +
'*_rhi_profile_*_bird_density_hres'+str(hres)+'.csv')
labels = [
'50.0-percentile', '25.0-percentile', '75.0-percentile']
# mean data
# filepath = (
# file_base+time_dir+'/bird_density/PROFILE_mean/' +
# '*_rhi_profile_*_bird_density_hres'+str(hres)+'.csv')
# labels = [
# 'Mean', 'Min', 'Max']
elif datatype == 'WIND_SPEED':
label_nr = 9
elif datatype == 'WIND_DIRECTION':
label_nr = 10
elif datatype == 'wind_vel_h_u':
label_nr = 0
elif datatype == 'wind_vel_h_v':
label_nr = 3
elif datatype == 'wind_vel_v':
label_nr = 6
flist_aux = glob.glob(filepath)
if not flist_aux:
warn('No profile files found in '+filepath)
continue
flist.extend(flist_aux)
if not flist:
warn('No profile files found')
continue
flist.sort()
field_name = get_fieldname_pyart(datatype)
field_dict = get_metadata(field_name)
titl = 'bird retrieval '+args.starttime+'\n'+get_field_name(
field_dict, field_name)
tbin_edges, hbin_edges, np_ma, data_ma, t_start = read_profile_ts(
flist, labels, hres=hres, label_nr=label_nr)
basepath_out = os.path.dirname(flist[0])
fname = (
basepath_out+'/'+args.starttime+'_TIME_HEIGHT_' +
datatype+'_hres'+str(hres)+'.png')
vmin = vmax = None
_plot_time_range(
tbin_edges, hbin_edges/1000., data_ma, field_name, [fname],
titl=titl, figsize=[10, 8], vmin=vmin, vmax=vmax, dpi=72)
print("----- plot to '%s'" % fname)
# Plot number of points
field_dict = get_metadata('number_of_samples')
titl = 'bird retrieval '+args.starttime+'\n'+get_field_name(
field_dict, 'number_of_samples')
fname = (
basepath_out+'/'+args.starttime+'_TIME_HEIGHT_' +
datatype+'nsamples_hres'+str(hres)+'.png')
vmin = vmax = None
_plot_time_range(
tbin_edges, hbin_edges/1000., np_ma, 'number_of_samples', [fname],
titl=titl, figsize=[10, 8], vmin=vmin, vmax=vmax, dpi=72)
print("----- plot to '%s'" % fname)
def _print_end_msg(text):
"""
prints end message
Parameters
----------
text : str
the text to be printed
Returns
-------
Nothing
"""
print(text + datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S"))
# ---------------------------------------------------------
# Start main:
# ---------------------------------------------------------
if __name__ == "__main__":
main()
| en | 0.300276 | #!/home/daniel/anaconda3/bin/python # -*- coding: utf-8 -*- ================================================ Pyrad: The MeteoSwiss Radar Processing framework ================================================ Welcome to Pyrad! This program processes bird data # Author: fvj # License: BSD 3 clause # parse the arguments # positional arguments )) parser.add_argument( 'endtime', type=str, help='end time of the data to be processed. Format ''YYYYMMDDhhmmss # keyword arguments # Plot time-height # dBZ mean data # filepath = ( # file_base+time_dir+'/velFilter/PROFILE_dBZc_mean/' + # '*_rhi_profile_*_dBZc_hres'+str(hres)+'.csv') # labels = [ # 'Mean', 'Min', 'Max'] # dBZ linear mean data # filepath = ( # file_base+time_dir+'/velFilter/PROFILE_dBZc_linear_mean/' + # '*_rhi_profile_*_dBZc_hres'+str(hres)+'.csv') # labels = [ # 'Mean', 'Min', 'Max'] # dBZ before filtering with fitted velocity # filepath = ( # file_base+time_dir+'/echoFilter/PROFILE_dBZc/' + # '*_rhi_profile_*_dBZc_hres'+str(hres)+'.csv') # labels = [ # '50.0-percentile', '25.0-percentile', '75.0-percentile'] # # dBZ before filtering with fitted velocity. Linear mean # filepath = ( # file_base+time_dir+'/echoFilter/PROFILE_dBZc_linear_mean/' + # '*_rhi_profile_*_dBZc_hres'+str(hres)+'.csv') # labels = [ # 'Mean', 'Min', 'Max'] # mean data # filepath = ( # file_base+time_dir+'/vol_refl/PROFILE_mean/' + # '*_rhi_profile_*_eta_h_hres'+str(hres)+'.csv') # labels = [ # 'Mean', 'Min', 'Max'] # mean data # filepath = ( # file_base+time_dir+'/bird_density/PROFILE_mean/' + # '*_rhi_profile_*_bird_density_hres'+str(hres)+'.csv') # labels = [ # 'Mean', 'Min', 'Max'] # Plot number of points prints end message Parameters ---------- text : str the text to be printed Returns ------- Nothing # --------------------------------------------------------- # Start main: # --------------------------------------------------------- | 2.43814 | 2 |
aiosnow/models/_schema/schema.py | michaeldcanady/aiosnow | 38 | 6632692 | <reponame>michaeldcanady/aiosnow<gh_stars>10-100
from typing import Any, Dict, Iterable, Tuple, Union
import marshmallow
from aiosnow.exceptions import (
DeserializationError,
IncompatiblePayloadField,
SchemaError,
SerializationError,
UnexpectedPayloadType,
UnknownPayloadField,
)
from .fields.base import BaseField
from .fields.mapped import MappedField
from .nested import Nested
class ModelSchemaMeta(marshmallow.schema.SchemaMeta):
def __new__(mcs, name: str, bases: tuple, attrs: dict) -> Any:
fields = attrs["fields"] = {}
nested_fields = attrs["nested_fields"] = {}
pks = []
for k, v in attrs.items():
if isinstance(v, BaseField):
if v.is_primary:
pks.append(k)
fields[k] = v
fields[k].name = k
elif isinstance(v, ModelSchemaMeta):
fields[k] = Nested(k, v, allow_none=True, required=False)
nested_fields.update({k: fields[k]})
else:
continue
if len(pks) == 1:
attrs["_primary_key"] = pks[0]
elif len(pks) == 0:
attrs["_primary_key"] = None
elif len(pks) > 1:
raise SchemaError(
f"Multiple primary keys (is_primary) supplied "
f"in {name}. Maximum allowed is 1."
)
cls = super().__new__(mcs, name, bases, {**attrs, **fields})
for k, v in fields.items():
setattr(cls, k, v)
return cls
class ModelSchema(marshmallow.Schema, metaclass=ModelSchemaMeta):
fields: Dict
nested_fields: Dict
@marshmallow.pre_load
def _load_response(self, data: Union[list, dict], **_: Any) -> Union[list, dict]:
"""Load response content
@TODO - move into load()
Args:
data: Dictionary of fields to deserialize
Returns:
dict(field_name=field_value, ...)
"""
if isinstance(data, list):
return [dict(self.__load_response(r or {})) for r in data]
elif isinstance(data, dict):
return dict(self.__load_response(data or {}))
else:
raise TypeError(
f"Response content must be {list} or {dict}, got: {type(data)}"
)
def __load_response(self, content: dict) -> Iterable[Tuple[str, str]]:
"""Yields deserialized response content items
Args:
content: Response content to deserialize
Yields: <name>, <value>
"""
for key, value in content.items():
field = self._declared_fields.get(key, None)
if isinstance(field, BaseField):
if isinstance(value, dict) and {"value", "display_value"} <= set(
value.keys()
):
if isinstance(field, MappedField):
value = value["value"] or None, value["display_value"] or None
else:
value = value[field.pluck.value] or None
elif isinstance(field, Nested):
pass
else: # Unknown field
continue
yield key, value
def __dump_payload(self, payload: dict) -> Iterable[Tuple[str, str]]:
"""Yields serialized payload
Args:
payload: Payload to serialize
Yields: <name>, <value>
"""
for key, value in payload.items():
if isinstance(key, BaseField):
key = key.name
elif isinstance(key, str):
pass
else:
raise IncompatiblePayloadField(
f"Incompatible field in payload: {type(key)}"
)
field = getattr(self, key, None)
if not field:
raise UnknownPayloadField(f"Unknown field in payload {key}")
yield key, value
def load_content(self, *args: Any, **kwargs: Any) -> dict:
try:
return self._do_load(*args, partial=True, postprocess=True, **kwargs)
except marshmallow.exceptions.ValidationError as e:
raise DeserializationError(e)
def loads(self, *args: Any, **kwargs: Any) -> dict:
try:
return super().loads(*args, **kwargs)
except marshmallow.exceptions.ValidationError as e:
raise DeserializationError(e)
def dumps(self, obj: dict, *args: Any, many: bool = None, **kwargs: Any) -> str:
"""Dump payload
Args:
obj: The object to serialize
many: Whether to serialize `obj` as a collection. If `None`, the value for `self.many` is used.
Returns:
JSON string
"""
if not isinstance(obj, dict):
raise UnexpectedPayloadType(
f"Invalid payload: {type(obj)}, expected: {dict}"
)
data = dict(self.__dump_payload(obj))
try:
return super().dumps(data)
except marshmallow.exceptions.ValidationError as e:
raise SerializationError(e)
| from typing import Any, Dict, Iterable, Tuple, Union
import marshmallow
from aiosnow.exceptions import (
DeserializationError,
IncompatiblePayloadField,
SchemaError,
SerializationError,
UnexpectedPayloadType,
UnknownPayloadField,
)
from .fields.base import BaseField
from .fields.mapped import MappedField
from .nested import Nested
class ModelSchemaMeta(marshmallow.schema.SchemaMeta):
def __new__(mcs, name: str, bases: tuple, attrs: dict) -> Any:
fields = attrs["fields"] = {}
nested_fields = attrs["nested_fields"] = {}
pks = []
for k, v in attrs.items():
if isinstance(v, BaseField):
if v.is_primary:
pks.append(k)
fields[k] = v
fields[k].name = k
elif isinstance(v, ModelSchemaMeta):
fields[k] = Nested(k, v, allow_none=True, required=False)
nested_fields.update({k: fields[k]})
else:
continue
if len(pks) == 1:
attrs["_primary_key"] = pks[0]
elif len(pks) == 0:
attrs["_primary_key"] = None
elif len(pks) > 1:
raise SchemaError(
f"Multiple primary keys (is_primary) supplied "
f"in {name}. Maximum allowed is 1."
)
cls = super().__new__(mcs, name, bases, {**attrs, **fields})
for k, v in fields.items():
setattr(cls, k, v)
return cls
class ModelSchema(marshmallow.Schema, metaclass=ModelSchemaMeta):
fields: Dict
nested_fields: Dict
@marshmallow.pre_load
def _load_response(self, data: Union[list, dict], **_: Any) -> Union[list, dict]:
"""Load response content
@TODO - move into load()
Args:
data: Dictionary of fields to deserialize
Returns:
dict(field_name=field_value, ...)
"""
if isinstance(data, list):
return [dict(self.__load_response(r or {})) for r in data]
elif isinstance(data, dict):
return dict(self.__load_response(data or {}))
else:
raise TypeError(
f"Response content must be {list} or {dict}, got: {type(data)}"
)
def __load_response(self, content: dict) -> Iterable[Tuple[str, str]]:
"""Yields deserialized response content items
Args:
content: Response content to deserialize
Yields: <name>, <value>
"""
for key, value in content.items():
field = self._declared_fields.get(key, None)
if isinstance(field, BaseField):
if isinstance(value, dict) and {"value", "display_value"} <= set(
value.keys()
):
if isinstance(field, MappedField):
value = value["value"] or None, value["display_value"] or None
else:
value = value[field.pluck.value] or None
elif isinstance(field, Nested):
pass
else: # Unknown field
continue
yield key, value
def __dump_payload(self, payload: dict) -> Iterable[Tuple[str, str]]:
"""Yields serialized payload
Args:
payload: Payload to serialize
Yields: <name>, <value>
"""
for key, value in payload.items():
if isinstance(key, BaseField):
key = key.name
elif isinstance(key, str):
pass
else:
raise IncompatiblePayloadField(
f"Incompatible field in payload: {type(key)}"
)
field = getattr(self, key, None)
if not field:
raise UnknownPayloadField(f"Unknown field in payload {key}")
yield key, value
def load_content(self, *args: Any, **kwargs: Any) -> dict:
try:
return self._do_load(*args, partial=True, postprocess=True, **kwargs)
except marshmallow.exceptions.ValidationError as e:
raise DeserializationError(e)
def loads(self, *args: Any, **kwargs: Any) -> dict:
try:
return super().loads(*args, **kwargs)
except marshmallow.exceptions.ValidationError as e:
raise DeserializationError(e)
def dumps(self, obj: dict, *args: Any, many: bool = None, **kwargs: Any) -> str:
"""Dump payload
Args:
obj: The object to serialize
many: Whether to serialize `obj` as a collection. If `None`, the value for `self.many` is used.
Returns:
JSON string
"""
if not isinstance(obj, dict):
raise UnexpectedPayloadType(
f"Invalid payload: {type(obj)}, expected: {dict}"
)
data = dict(self.__dump_payload(obj))
try:
return super().dumps(data)
except marshmallow.exceptions.ValidationError as e:
raise SerializationError(e) | en | 0.547511 | Load response content @TODO - move into load() Args: data: Dictionary of fields to deserialize Returns: dict(field_name=field_value, ...) Yields deserialized response content items Args: content: Response content to deserialize Yields: <name>, <value> # Unknown field Yields serialized payload Args: payload: Payload to serialize Yields: <name>, <value> Dump payload Args: obj: The object to serialize many: Whether to serialize `obj` as a collection. If `None`, the value for `self.many` is used. Returns: JSON string | 2.143478 | 2 |
ravenclaw/wrangling/fill_with_regression.py | idin/ravenclaw | 1 | 6632693 | <gh_stars>1-10
from copy import deepcopy
from numpy import where, minimum, maximum
def fill_with_regression(
data, regressor, based_on_col, group_by = None,
cols=None, except_cols=[], inplace=False,
limit_min = False,
limit_max = False,
global_min = None,
global_max = None
):
if inplace:
new_data = data
else:
new_data = data.copy()
if cols is None:
cols = new_data.columns
if group_by is None:
for col in cols:
if col not in except_cols and col != based_on_col and col and new_data[col].isnull().values.any():
available_data = data[data[col].isnull()==False]
this_regressor = deepcopy(regressor)
try:
this_regressor.train(X=available_data[[based_on_col]], y=available_data[col], echo=False)
pred = this_regressor.predict(data=data[[based_on_col]], echo=False)
if limit_min:
if global_min is not None:
the_min = global_min
else:
the_min = available_data[col].min()
pred = maximum(pred, the_min)
if limit_max:
if global_max is not None:
the_max = global_max
else:
the_max = available_data[col].max()
pred = minimum(pred, the_max)
new_data[col] = where(data[col].isnull(), pred, data[col])
except:
pass
else:
return data.groupby(group_by).apply(
lambda x: fill_with_regression(
data=x, based_on_col=based_on_col, cols=cols, except_cols=except_cols,
inplace=False, regressor=regressor, limit_min=limit_min, limit_max=limit_max,
global_min=global_min, global_max=global_max
)
).reset_index(drop=True)
return new_data | from copy import deepcopy
from numpy import where, minimum, maximum
def fill_with_regression(
data, regressor, based_on_col, group_by = None,
cols=None, except_cols=[], inplace=False,
limit_min = False,
limit_max = False,
global_min = None,
global_max = None
):
if inplace:
new_data = data
else:
new_data = data.copy()
if cols is None:
cols = new_data.columns
if group_by is None:
for col in cols:
if col not in except_cols and col != based_on_col and col and new_data[col].isnull().values.any():
available_data = data[data[col].isnull()==False]
this_regressor = deepcopy(regressor)
try:
this_regressor.train(X=available_data[[based_on_col]], y=available_data[col], echo=False)
pred = this_regressor.predict(data=data[[based_on_col]], echo=False)
if limit_min:
if global_min is not None:
the_min = global_min
else:
the_min = available_data[col].min()
pred = maximum(pred, the_min)
if limit_max:
if global_max is not None:
the_max = global_max
else:
the_max = available_data[col].max()
pred = minimum(pred, the_max)
new_data[col] = where(data[col].isnull(), pred, data[col])
except:
pass
else:
return data.groupby(group_by).apply(
lambda x: fill_with_regression(
data=x, based_on_col=based_on_col, cols=cols, except_cols=except_cols,
inplace=False, regressor=regressor, limit_min=limit_min, limit_max=limit_max,
global_min=global_min, global_max=global_max
)
).reset_index(drop=True)
return new_data | none | 1 | 2.85342 | 3 |
|
res/TensorFlowPythonModels/examples/minimum-maximum/__init__.py | juitem/ONE | 255 | 6632694 | <reponame>juitem/ONE
import tensorflow as tf
in_ = tf.compat.v1.placeholder(dtype=tf.float32, shape=(1, 16, 160, 160), name="Hole")
upper_ = tf.compat.v1.constant(6.)
lower_ = tf.compat.v1.constant(0.)
min_ = tf.compat.v1.minimum(in_, upper_)
max_ = tf.compat.v1.maximum(min_, lower_)
'''
python ../../compiler/tf2tfliteV2/tf2tfliteV2.py --v1 \
-i minimum-maximum.pbtxt \
-o minimum-maximum.tflite \
-I Hole -O Maximum
'''
| import tensorflow as tf
in_ = tf.compat.v1.placeholder(dtype=tf.float32, shape=(1, 16, 160, 160), name="Hole")
upper_ = tf.compat.v1.constant(6.)
lower_ = tf.compat.v1.constant(0.)
min_ = tf.compat.v1.minimum(in_, upper_)
max_ = tf.compat.v1.maximum(min_, lower_)
'''
python ../../compiler/tf2tfliteV2/tf2tfliteV2.py --v1 \
-i minimum-maximum.pbtxt \
-o minimum-maximum.tflite \
-I Hole -O Maximum
''' | en | 0.215826 | python ../../compiler/tf2tfliteV2/tf2tfliteV2.py --v1 \ -i minimum-maximum.pbtxt \ -o minimum-maximum.tflite \ -I Hole -O Maximum | 2.571375 | 3 |
octicons16px/skip.py | andrewp-as-is/octicons16px.py | 1 | 6632695 |
OCTICON_SKIP = """
<svg class="octicon octicon-skip" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 16 16" width="16" height="16"><path fill-rule="evenodd" d="M1.5 8a6.5 6.5 0 1113 0 6.5 6.5 0 01-13 0zM8 0a8 8 0 100 16A8 8 0 008 0zm3.28 5.78a.75.75 0 00-1.06-1.06l-5.5 5.5a.75.75 0 101.06 1.06l5.5-5.5z"></path></svg>
"""
|
OCTICON_SKIP = """
<svg class="octicon octicon-skip" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 16 16" width="16" height="16"><path fill-rule="evenodd" d="M1.5 8a6.5 6.5 0 1113 0 6.5 6.5 0 01-13 0zM8 0a8 8 0 100 16A8 8 0 008 0zm3.28 5.78a.75.75 0 00-1.06-1.06l-5.5 5.5a.75.75 0 101.06 1.06l5.5-5.5z"></path></svg>
"""
| en | 0.174351 | <svg class="octicon octicon-skip" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 16 16" width="16" height="16"><path fill-rule="evenodd" d="M1.5 8a6.5 6.5 0 1113 0 6.5 6.5 0 01-13 0zM8 0a8 8 0 100 16A8 8 0 008 0zm3.28 5.78a.75.75 0 00-1.06-1.06l-5.5 5.5a.75.75 0 101.06 1.06l5.5-5.5z"></path></svg> | 1.219285 | 1 |
else/else14.py | mifomen/python | 0 | 6632696 | <filename>else/else14.py
a=int(input())
b=int(input())
c=int(input())
if a<=b<=c:
print(a,c)
elif a<=c<=b:
print(a,b)
elif a<=c<=b: #2,3,1
print(a,b)
elif a<=c<=b:
print(a,b)
#rpoblem
| <filename>else/else14.py
a=int(input())
b=int(input())
c=int(input())
if a<=b<=c:
print(a,c)
elif a<=c<=b:
print(a,b)
elif a<=c<=b: #2,3,1
print(a,b)
elif a<=c<=b:
print(a,b)
#rpoblem
| ca | 0.346387 | #2,3,1 #rpoblem | 3.884371 | 4 |
Anaconda-files/Program_19c.py | DrStephenLynch/dynamical-systems-with-applications-using-python | 1 | 6632697 | <reponame>DrStephenLynch/dynamical-systems-with-applications-using-python<filename>Anaconda-files/Program_19c.py<gh_stars>1-10
# Program 19c: Synchronization between two Lorenz systems.
# See Figure 19.7(b).
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import odeint
# Constants
sigma = 16
b = 4
r = 45.92
tmax = 100
t = np.arange(0.0, tmax, 0.1)
def two_lorenz_odes(X, t):
x1, x2, x3, y2, y3 = X
dx1 = sigma * (x2 - x1)
dx2 = -x1 * x3 + r*x1 - x2
dx3 = x1 * x2 - b*x3
dy2 = -x1 * y3 + r*x1 - y2
dy3 = x1 * y2 - b*y3
return (dx1, dx2, dx3, dy2, dy3)
y0 = [15, 20, 30, 10, 20]
X = odeint(two_lorenz_odes, y0, t, rtol=1e-6)
x1, x2, x3, y2, y3 = X.T # unpack columns
plt.figure(1)
plt.plot(x3, y3)
plt.xlabel(r'$y_3$', fontsize=15)
plt.ylabel(r'$x_3$', fontsize=15)
plt.show()
| # Program 19c: Synchronization between two Lorenz systems.
# See Figure 19.7(b).
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import odeint
# Constants
sigma = 16
b = 4
r = 45.92
tmax = 100
t = np.arange(0.0, tmax, 0.1)
def two_lorenz_odes(X, t):
x1, x2, x3, y2, y3 = X
dx1 = sigma * (x2 - x1)
dx2 = -x1 * x3 + r*x1 - x2
dx3 = x1 * x2 - b*x3
dy2 = -x1 * y3 + r*x1 - y2
dy3 = x1 * y2 - b*y3
return (dx1, dx2, dx3, dy2, dy3)
y0 = [15, 20, 30, 10, 20]
X = odeint(two_lorenz_odes, y0, t, rtol=1e-6)
x1, x2, x3, y2, y3 = X.T # unpack columns
plt.figure(1)
plt.plot(x3, y3)
plt.xlabel(r'$y_3$', fontsize=15)
plt.ylabel(r'$x_3$', fontsize=15)
plt.show() | en | 0.697408 | # Program 19c: Synchronization between two Lorenz systems. # See Figure 19.7(b). # Constants # unpack columns | 3.561941 | 4 |
search/search/request_handler.py | ATintern/microservices | 1 | 6632698 | # Python
import asyncio
# module
from search.grpc_client import gene_search, chromosome_search, chromosome_region
from search.query_parser import makeQueryParser
class RequestHandler:
def __init__(self, parser, gene_address, chromosome_address, region_address):
self.parser = parser
self.gene_address = gene_address
self.chromosome_address = chromosome_address
self.region_address = region_address
async def process(self, query):
# parser the query and search the indexes
search_results = {'genes': [], 'regions': []}
for results, start, end in self.parser.scanString(query):
if 'genes' in results:
for name, in results['genes']:
genes = await gene_search(name, self.gene_address)
search_results['genes'].extend(genes)
if 'regions' in results:
for name, start, stop in results['regions']:
chromosomes = await chromosome_search(name, self.chromosome_address)
chromosome_regions = await asyncio.gather(*[
chromosome_region(chromosome, start, stop, self.region_address)
for chromosome in chromosomes
])
for regions in chromosome_regions:
search_results['regions'].extend(regions)
return search_results
| # Python
import asyncio
# module
from search.grpc_client import gene_search, chromosome_search, chromosome_region
from search.query_parser import makeQueryParser
class RequestHandler:
def __init__(self, parser, gene_address, chromosome_address, region_address):
self.parser = parser
self.gene_address = gene_address
self.chromosome_address = chromosome_address
self.region_address = region_address
async def process(self, query):
# parser the query and search the indexes
search_results = {'genes': [], 'regions': []}
for results, start, end in self.parser.scanString(query):
if 'genes' in results:
for name, in results['genes']:
genes = await gene_search(name, self.gene_address)
search_results['genes'].extend(genes)
if 'regions' in results:
for name, start, stop in results['regions']:
chromosomes = await chromosome_search(name, self.chromosome_address)
chromosome_regions = await asyncio.gather(*[
chromosome_region(chromosome, start, stop, self.region_address)
for chromosome in chromosomes
])
for regions in chromosome_regions:
search_results['regions'].extend(regions)
return search_results
| en | 0.244327 | # Python # module # parser the query and search the indexes | 2.719724 | 3 |
packages/core/minos-microservice-aggregate/minos/aggregate/snapshots/repositories/database/factories.py | minos-framework/minos-python | 247 | 6632699 | from abc import (
ABC,
abstractmethod,
)
from collections.abc import (
Iterable,
)
from datetime import (
datetime,
)
from typing import (
Any,
Optional,
)
from uuid import (
UUID,
)
from minos.common import (
DatabaseOperation,
DatabaseOperationFactory,
)
from ....queries import (
_Condition,
_Ordering,
)
class SnapshotDatabaseOperationFactory(DatabaseOperationFactory, ABC):
"""Snapshot Database Operation Factory class."""
@abstractmethod
def build_create(self) -> DatabaseOperation:
"""Build the database operation to create the snapshot table.
:return: A ``DatabaseOperation`` instance.
"""
@abstractmethod
def build_delete(self, transaction_uuids: Iterable[UUID]) -> DatabaseOperation:
"""Build the database operation to delete rows by transaction identifiers.
:param transaction_uuids: The transaction identifiers.
:return: A ``DatabaseOperation`` instance.
"""
@abstractmethod
def build_submit(
self,
uuid: UUID,
name: str,
version: int,
schema: bytes,
data: dict[str, Any],
created_at: datetime,
updated_at: datetime,
transaction_uuid: UUID,
) -> DatabaseOperation:
"""Build the insert database operation.
:param uuid: The identifier of the entity.
:param name: The name of the entity.
:param version: The version of the entity.
:param schema: The schema of the entity.
:param data: The data of the entity.
:param created_at: The creation datetime.
:param updated_at: The last update datetime.
:param transaction_uuid: The transaction identifier.
:return: A ``DatabaseOperation`` instance.
"""
@abstractmethod
def build_query(
self,
name: str,
condition: _Condition,
ordering: Optional[_Ordering],
limit: Optional[int],
transaction_uuids: tuple[UUID, ...],
exclude_deleted: bool,
) -> DatabaseOperation:
"""Build the query database operation.
:param name: Class name of the ``RootEntity``.
:param condition: The condition that must be satisfied by the ``RootEntity`` instances.
:param ordering: Optional argument to return the instance with specific ordering strategy. The default behaviour
is to retrieve them without any order pattern.
:param limit: Optional argument to return only a subset of instances. The default behaviour is to return all the
instances that meet the given condition.
:param transaction_uuids: The transaction within the operation is performed. If not any value is provided, then
the transaction is extracted from the context var. If not any transaction is being scoped then the query is
performed to the global snapshot.
:param exclude_deleted: If ``True``, deleted ``RootEntity`` entries are included, otherwise deleted
``RootEntity`` entries are filtered.
:return: A ``DatabaseOperation`` instance.
"""
@abstractmethod
def build_submit_offset(self, value: int) -> DatabaseOperation:
"""Build the database operation to store the offset.
:param value: The value to be stored as the new offset.
:return: A ``DatabaseOperation`` instance.
"""
@abstractmethod
def build_query_offset(self) -> DatabaseOperation:
"""Build the database operation to get the current offset.
:return: A ``DatabaseOperation`` instance.
"""
| from abc import (
ABC,
abstractmethod,
)
from collections.abc import (
Iterable,
)
from datetime import (
datetime,
)
from typing import (
Any,
Optional,
)
from uuid import (
UUID,
)
from minos.common import (
DatabaseOperation,
DatabaseOperationFactory,
)
from ....queries import (
_Condition,
_Ordering,
)
class SnapshotDatabaseOperationFactory(DatabaseOperationFactory, ABC):
"""Snapshot Database Operation Factory class."""
@abstractmethod
def build_create(self) -> DatabaseOperation:
"""Build the database operation to create the snapshot table.
:return: A ``DatabaseOperation`` instance.
"""
@abstractmethod
def build_delete(self, transaction_uuids: Iterable[UUID]) -> DatabaseOperation:
"""Build the database operation to delete rows by transaction identifiers.
:param transaction_uuids: The transaction identifiers.
:return: A ``DatabaseOperation`` instance.
"""
@abstractmethod
def build_submit(
self,
uuid: UUID,
name: str,
version: int,
schema: bytes,
data: dict[str, Any],
created_at: datetime,
updated_at: datetime,
transaction_uuid: UUID,
) -> DatabaseOperation:
"""Build the insert database operation.
:param uuid: The identifier of the entity.
:param name: The name of the entity.
:param version: The version of the entity.
:param schema: The schema of the entity.
:param data: The data of the entity.
:param created_at: The creation datetime.
:param updated_at: The last update datetime.
:param transaction_uuid: The transaction identifier.
:return: A ``DatabaseOperation`` instance.
"""
@abstractmethod
def build_query(
self,
name: str,
condition: _Condition,
ordering: Optional[_Ordering],
limit: Optional[int],
transaction_uuids: tuple[UUID, ...],
exclude_deleted: bool,
) -> DatabaseOperation:
"""Build the query database operation.
:param name: Class name of the ``RootEntity``.
:param condition: The condition that must be satisfied by the ``RootEntity`` instances.
:param ordering: Optional argument to return the instance with specific ordering strategy. The default behaviour
is to retrieve them without any order pattern.
:param limit: Optional argument to return only a subset of instances. The default behaviour is to return all the
instances that meet the given condition.
:param transaction_uuids: The transaction within the operation is performed. If not any value is provided, then
the transaction is extracted from the context var. If not any transaction is being scoped then the query is
performed to the global snapshot.
:param exclude_deleted: If ``True``, deleted ``RootEntity`` entries are included, otherwise deleted
``RootEntity`` entries are filtered.
:return: A ``DatabaseOperation`` instance.
"""
@abstractmethod
def build_submit_offset(self, value: int) -> DatabaseOperation:
"""Build the database operation to store the offset.
:param value: The value to be stored as the new offset.
:return: A ``DatabaseOperation`` instance.
"""
@abstractmethod
def build_query_offset(self) -> DatabaseOperation:
"""Build the database operation to get the current offset.
:return: A ``DatabaseOperation`` instance.
"""
| en | 0.772485 | Snapshot Database Operation Factory class. Build the database operation to create the snapshot table. :return: A ``DatabaseOperation`` instance. Build the database operation to delete rows by transaction identifiers. :param transaction_uuids: The transaction identifiers. :return: A ``DatabaseOperation`` instance. Build the insert database operation. :param uuid: The identifier of the entity. :param name: The name of the entity. :param version: The version of the entity. :param schema: The schema of the entity. :param data: The data of the entity. :param created_at: The creation datetime. :param updated_at: The last update datetime. :param transaction_uuid: The transaction identifier. :return: A ``DatabaseOperation`` instance. Build the query database operation. :param name: Class name of the ``RootEntity``. :param condition: The condition that must be satisfied by the ``RootEntity`` instances. :param ordering: Optional argument to return the instance with specific ordering strategy. The default behaviour is to retrieve them without any order pattern. :param limit: Optional argument to return only a subset of instances. The default behaviour is to return all the instances that meet the given condition. :param transaction_uuids: The transaction within the operation is performed. If not any value is provided, then the transaction is extracted from the context var. If not any transaction is being scoped then the query is performed to the global snapshot. :param exclude_deleted: If ``True``, deleted ``RootEntity`` entries are included, otherwise deleted ``RootEntity`` entries are filtered. :return: A ``DatabaseOperation`` instance. Build the database operation to store the offset. :param value: The value to be stored as the new offset. :return: A ``DatabaseOperation`` instance. Build the database operation to get the current offset. :return: A ``DatabaseOperation`` instance. | 2.552762 | 3 |
udify/modules/xlmr_pretrained.py | TeMU-BSC/udify-transformers | 1 | 6632700 | """
An extension of AllenNLP's BERT pretrained helper classes. Supports modification to BERT dropout, and applies
a sliding window approach for long sentences.
"""
from typing import Dict, List, Callable, Tuple, Any
import logging
import collections
from overrides import overrides
import torch
import torch.nn.functional as F
from transformers import AutoTokenizer, AutoConfig, AutoModel
from transformers import XLMRobertaModel, XLMRobertaTokenizer
from allennlp.common.util import pad_sequence_to_length
from allennlp.modules.token_embedders import TokenEmbedder
from allennlp.data.vocabulary import Vocabulary
from allennlp.data.tokenizers.token import Token
from allennlp.data.token_indexers.token_indexer import TokenIndexer
from allennlp.nn import util
from udify.modules.scalar_mix import ScalarMixWithDropout
logger = logging.getLogger(__name__)
# TODO(joelgrus): Figure out how to generate token_type_ids out of this token indexer.
# This is the default list of tokens that should not be lowercased.
_NEVER_LOWERCASE = []
class WordpieceIndexer(TokenIndexer[int]):
"""
A token indexer that does the wordpiece-tokenization (e.g. for BERT embeddings).
If you are using one of the pretrained BERT models, you'll want to use the ``PretrainedBertIndexer``
subclass rather than this base class.
Parameters
----------
vocab : ``Dict[str, int]``
The mapping {wordpiece -> id}. Note this is not an AllenNLP ``Vocabulary``.
wordpiece_tokenizer : ``Callable[[str], List[str]]``
A function that does the actual tokenization.
namespace : str, optional (default: "wordpiece")
The namespace in the AllenNLP ``Vocabulary`` into which the wordpieces
will be loaded.
use_starting_offsets : bool, optional (default: False)
By default, the "offsets" created by the token indexer correspond to the
last wordpiece in each word. If ``use_starting_offsets`` is specified,
they will instead correspond to the first wordpiece in each word.
max_pieces : int, optional (default: 514)
The BERT embedder uses positional embeddings and so has a corresponding
maximum length for its input ids. Any inputs longer than this will
either be truncated (default), or be split apart and batched using a
sliding window.
do_lowercase : ``bool``, optional (default=``False``)
Should we lowercase the provided tokens before getting the indices?
You would need to do this if you are using an -uncased BERT model
but your DatasetReader is not lowercasing tokens (which might be the
case if you're also using other embeddings based on cased tokens).
never_lowercase: ``List[str]``, optional
Tokens that should never be lowercased. Default is
[].
start_tokens : ``List[str]``, optional (default=``None``)
These are prepended to the tokens provided to ``tokens_to_indices``.
end_tokens : ``List[str]``, optional (default=``None``)
These are appended to the tokens provided to ``tokens_to_indices``.
separator_token : ``str``, optional (default=``[SEP]``)
This token indicates the segments in the sequence.
truncate_long_sequences : ``bool``, optional (default=``True``)
By default, long sequences will be truncated to the maximum sequence
length. Otherwise, they will be split apart and batched using a
sliding window.
"""
def __init__(self,
vocab: Dict[str, int],
tokenizer: XLMRobertaTokenizer,
wordpiece_tokenizer: Callable[[str], List[str]],
namespace: str = "wordpiece",
use_starting_offsets: bool = False,
max_pieces: int = 512,
do_lowercase: bool = False,
never_lowercase: List[str] = None,
start_tokens: List[str] = None,
end_tokens: List[str] = None,
separator_token: str = "</s>",
truncate_long_sequences: bool = True,
token_min_padding_length: int = 0) -> None:
super().__init__(token_min_padding_length)
self.vocab = vocab
# The BERT code itself does a two-step tokenization:
# sentence -> [words], and then word -> [wordpieces]
# In AllenNLP, the first step is implemented as the ``BertSimpleWordSplitter``,
# and this token indexer handles the second.
self.tokenizer = tokenizer
self.wordpiece_tokenizer = wordpiece_tokenizer
self._namespace = namespace
self._added_to_vocabulary = False
self.max_pieces = max_pieces
self.use_starting_offsets = use_starting_offsets
self._do_lowercase = do_lowercase
self._truncate_long_sequences = truncate_long_sequences
if never_lowercase is None:
# Use the defaults
self._never_lowercase = set(_NEVER_LOWERCASE)
else:
self._never_lowercase = set(never_lowercase)
# Convert the start_tokens and end_tokens to wordpiece_ids
self._start_piece_ids = [self.tokenizer._convert_token_to_id(wordpiece)
for token in (start_tokens or [])
for wordpiece in wordpiece_tokenizer(token)]
self._end_piece_ids = [self.tokenizer._convert_token_to_id(wordpiece)
for token in (end_tokens or [])
for wordpiece in wordpiece_tokenizer(token)]
# Convert the separator_token to wordpiece_ids
self._separator_ids = [self.tokenizer._convert_token_to_id(wordpiece)
for wordpiece in wordpiece_tokenizer(separator_token)]
@overrides
def count_vocab_items(self, token: Token, counter: Dict[str, Dict[str, int]]):
# If we only use pretrained models, we don't need to do anything here.
pass
def _add_encoding_to_vocabulary(self, vocabulary: Vocabulary) -> None:
# pylint: disable=protected-access
for word, idx in self.vocab.items():
vocabulary._token_to_index[self._namespace][word] = idx
vocabulary._index_to_token[self._namespace][idx] = word
@overrides
def tokens_to_indices(self,
tokens: List[Token],
vocabulary: Vocabulary,
index_name: str) -> Dict[str, List[List[int]]]:
if not self._added_to_vocabulary:
self._add_encoding_to_vocabulary(vocabulary)
self._added_to_vocabulary = True
# This lowercases tokens if necessary
text = (token.text.lower()
if self._do_lowercase and token.text not in self._never_lowercase
else token.text
for token in tokens)
# Obtain a nested sequence of wordpieces, each represented by a list of wordpiece ids
token_wordpiece_ids = [
[self.tokenizer._convert_token_to_id(wordpiece) for wordpiece in self.wordpiece_tokenizer(token)]
for token in text
]
# offsets[i] will give us the index into wordpiece_ids
# for the wordpiece "corresponding to" the i-th input token.
offsets = []
# If we're using initial offsets, we want to start at offset = len(text_tokens)
# so that the first offset is the index of the first wordpiece of tokens[0].
# Otherwise, we want to start at len(text_tokens) - 1, so that the "previous"
# offset is the last wordpiece of "tokens[-1]".
offset = len(self._start_piece_ids) if self.use_starting_offsets else len(self._start_piece_ids) - 1
for token in token_wordpiece_ids:
# For initial offsets, the current value of ``offset`` is the start of
# the current wordpiece, so add it to ``offsets`` and then increment it.
if self.use_starting_offsets:
offsets.append(offset)
offset += len(token)
# For final offsets, the current value of ``offset`` is the end of
# the previous wordpiece, so increment it and then add it to ``offsets``.
else:
offset += len(token)
offsets.append(offset)
# Flattened list of wordpieces. In the end, the output of the model (e.g., BERT) should
# have a sequence length equal to the length of this list. However, it will first be split into
# chunks of length `self.max_pieces` so that they can be fit through the model. After packing
# and passing through the model, it should be unpacked to represent the wordpieces in this list.
flat_wordpiece_ids = [wordpiece for token in token_wordpiece_ids for wordpiece in token]
# The code below will (possibly) pack the wordpiece sequence into multiple sub-sequences by using a sliding
# window `window_length` that overlaps with previous windows according to the `stride`. Suppose we have
# the following sentence: "I went to the store to buy some milk". Then a sliding window of length 4 and
# stride of length 2 will split them up into:
# "[I went to the] [to the store to] [store to buy some] [buy some milk [PAD]]".
# This is to ensure that the model has context of as much of the sentence as possible to get accurate
# embeddings. Finally, the sequences will be padded with any start/end piece ids, e.g.,
# "[CLS] I went to the [SEP] [CLS] to the store to [SEP] ...".
# The embedder should then be able to split this token sequence by the window length,
# pass them through the model, and recombine them.
# Specify the stride to be half of `self.max_pieces`, minus any additional start/end wordpieces
window_length = self.max_pieces - len(self._start_piece_ids) - len(self._end_piece_ids)
stride = window_length // 2
if len(flat_wordpiece_ids) <= window_length:
# If all the wordpieces fit, then we don't need to do anything special
wordpiece_windows = [self._start_piece_ids + flat_wordpiece_ids + self._end_piece_ids]
elif self._truncate_long_sequences:
logger.warning("Too many wordpieces, truncating sequence. If you would like a rolling window, set"
"`truncate_long_sequences` to False"
f"{[token.text for token in tokens]}")
wordpiece_windows = [self._start_piece_ids + flat_wordpiece_ids[:window_length] + self._end_piece_ids]
else:
# Create a sliding window of wordpieces of length `max_pieces` that advances by `stride` steps and
# add start/end wordpieces to each window
# TODO: this currently does not respect word boundaries, so words may be cut in half between windows
# However, this would increase complexity, as sequences would need to be padded/unpadded in the middle
wordpiece_windows = [self._start_piece_ids + flat_wordpiece_ids[i:i+window_length] + self._end_piece_ids
for i in range(0, len(flat_wordpiece_ids), stride)]
# Check for overlap in the last window. Throw it away if it is redundant.
last_window = wordpiece_windows[-1][1:]
penultimate_window = wordpiece_windows[-2]
if last_window == penultimate_window[-len(last_window):]:
wordpiece_windows = wordpiece_windows[:-1]
# Flatten the wordpiece windows
wordpiece_ids = [wordpiece for sequence in wordpiece_windows for wordpiece in sequence]
# Constructing `token_type_ids` by `self._separator`
token_type_ids = _get_token_type_ids(wordpiece_ids, self._separator_ids)
# Our mask should correspond to the original tokens,
# because calling util.get_text_field_mask on the
# "wordpiece_id" tokens will produce the wrong shape.
# However, because of the max_pieces constraint, we may
# have truncated the wordpieces; accordingly, we want the mask
# to correspond to the remaining tokens after truncation, which
# is captured by the offsets.
mask = [1 for _ in offsets]
return {
index_name: wordpiece_ids,
f"{index_name}-offsets": offsets,
f"{index_name}-type-ids": token_type_ids,
"mask": mask
}
@overrides
def get_padding_token(self) -> int:
return 0
@overrides
def get_padding_lengths(self, token: int) -> Dict[str, int]: # pylint: disable=unused-argument
return {}
@overrides
def as_padded_tensor(
self,
tokens: Dict[str, List[int]],
desired_num_tokens: Dict[str, int],
padding_lengths: Dict[str, int],
) -> Dict[str, torch.Tensor]:
return {
key: torch.LongTensor(pad_sequence_to_length(val, desired_num_tokens[key]))
for key, val in tokens.items()
}
@overrides
def get_keys(self, index_name: str) -> List[str]:
"""
We need to override this because the indexer generates multiple keys.
"""
# pylint: disable=no-self-use
return [index_name, f"{index_name}-offsets", f"{index_name}-type-ids", "mask"]
@TokenIndexer.register("udify-xlmr-pretrained")
class PretrainedXlmrIndexer(WordpieceIndexer):
# pylint: disable=line-too-long
"""
A ``TokenIndexer`` corresponding to a pretrained BERT model.
Parameters
----------
pretrained_model: ``str``
Either the name of the pretrained model to use (e.g. 'xlm-roberta-base'),
or the path to the .txt file with its vocabulary.
If the name is a key in the list of pretrained models at
https://github.com/huggingface/pytorch-pretrained-BERT/blob/master/pytorch_pretrained_bert/tokenization.py#L33
the corresponding path will be used; otherwise it will be interpreted as a path or URL.
use_starting_offsets: bool, optional (default: False)
By default, the "offsets" created by the token indexer correspond to the
last wordpiece in each word. If ``use_starting_offsets`` is specified,
they will instead correspond to the first wordpiece in each word.
do_lowercase: ``bool``, optional (default = True)
Whether to lowercase the tokens before converting to wordpiece ids.
never_lowercase: ``List[str]``, optional
Tokens that should never be lowercased. Default is
[].
max_pieces: int, optional (default: 514)
The BERT embedder uses positional embeddings and so has a corresponding
maximum length for its input ids. Any inputs longer than this will
either be truncated (default), or be split apart and batched using a
sliding window.
truncate_long_sequences : ``bool``, optional (default=``True``)
By default, long sequences will be truncated to the maximum sequence
length. Otherwise, they will be split apart and batched using a
sliding window.
"""
def __init__(self,
pretrained_model: str,
use_starting_offsets: bool = False,
do_lowercase: bool = False,
never_lowercase: List[str] = None,
max_pieces: int = 512,
truncate_long_sequences: bool = False) -> None:
if pretrained_model.endswith("-cased") and do_lowercase:
logger.warning("Your BERT model appears to be cased, "
"but your indexer is lowercasing tokens.")
elif pretrained_model.endswith("-uncased") and not do_lowercase:
logger.warning("Your BERT model appears to be uncased, "
"but your indexer is not lowercasing tokens.")
bert_tokenizer = AutoTokenizer.from_pretrained(pretrained_model)
bert_vocab = {bert_tokenizer.convert_ids_to_tokens(i): i for i in range(250001)}
bert_vocab[bert_tokenizer.convert_ids_to_tokens(250004)] = 250002
bert_vocab = collections.OrderedDict(bert_vocab)
super().__init__(vocab=bert_vocab,
tokenizer=bert_tokenizer,
wordpiece_tokenizer=bert_tokenizer._tokenize,
namespace="bert",
use_starting_offsets=use_starting_offsets,
max_pieces=max_pieces,
do_lowercase=do_lowercase,
never_lowercase=never_lowercase,
start_tokens=["<s>"],
end_tokens=["</s>"],
separator_token="</s>",
truncate_long_sequences=truncate_long_sequences)
def _get_token_type_ids(wordpiece_ids: List[int],
separator_ids: List[int]) -> List[int]:
num_wordpieces = len(wordpiece_ids)
token_type_ids: List[int] = []
type_id = 0
cursor = 0
while cursor < num_wordpieces:
# check length
if num_wordpieces - cursor < len(separator_ids):
token_type_ids.extend(type_id
for _ in range(num_wordpieces - cursor))
cursor += num_wordpieces - cursor
# check content
# when it is a separator
elif all(wordpiece_ids[cursor + index] == separator_id
for index, separator_id in enumerate(separator_ids)):
token_type_ids.extend(type_id for _ in separator_ids)
type_id += 1
cursor += len(separator_ids)
# when it is not
else:
cursor += 1
token_type_ids.append(type_id)
return token_type_ids
class XlmrEmbedder(TokenEmbedder):
"""
A ``TokenEmbedder`` that produces BERT embeddings for your tokens.
Should be paired with a ``BertIndexer``, which produces wordpiece ids.
Most likely you probably want to use ``PretrainedBertEmbedder``
for one of the named pretrained models, not this base class.
Parameters
----------
bert_model: ``BertModel``
The BERT model being wrapped.
top_layer_only: ``bool``, optional (default = ``False``)
If ``True``, then only return the top layer instead of apply the scalar mix.
max_pieces : int, optional (default: 514)
The BERT embedder uses positional embeddings and so has a corresponding
maximum length for its input ids. Assuming the inputs are windowed
and padded appropriately by this length, the embedder will split them into a
large batch, feed them into BERT, and recombine the output as if it was a
longer sequence.
start_tokens : int, optional (default: 1)
The number of starting special tokens input to BERT (usually 1, i.e., [CLS])
end_tokens : int, optional (default: 1)
The number of ending tokens input to BERT (usually 1, i.e., [SEP])
combine_layers : str, optional (default: "mix")
Options: "mix", "last", "all"
"""
def __init__(self,
bert_model: XLMRobertaModel,
max_pieces: int = 512,
start_tokens: int = 1,
end_tokens: int = 1,
layer_dropout: float = 0.0,
combine_layers: str = "mix") -> None:
super().__init__()
self.bert_model = bert_model
self.output_dim = bert_model.config.hidden_size
self.max_pieces = max_pieces
self.start_tokens = start_tokens
self.end_tokens = end_tokens
self.combine_layers = combine_layers
if self.combine_layers == "mix":
self._scalar_mix = ScalarMixWithDropout(bert_model.config.num_hidden_layers,
do_layer_norm=False,
dropout=layer_dropout)
else:
self._scalar_mix = None
def get_output_dim(self) -> int:
return self.output_dim
def forward(self,
input_ids: torch.LongTensor,
offsets: torch.LongTensor = None,
token_type_ids: torch.LongTensor = None) -> torch.Tensor:
"""
Parameters
----------
input_ids : ``torch.LongTensor``
The (batch_size, ..., max_sequence_length) tensor of wordpiece ids.
offsets : ``torch.LongTensor``, optional
The BERT embeddings are one per wordpiece. However it's possible/likely
you might want one per original token. In that case, ``offsets``
represents the indices of the desired wordpiece for each original token.
Depending on how your token indexer is configured, this could be the
position of the last wordpiece for each token, or it could be the position
of the first wordpiece for each token.
For example, if you had the sentence "Definitely not", and if the corresponding
wordpieces were ["Def", "##in", "##ite", "##ly", "not"], then the input_ids
would be 5 wordpiece ids, and the "last wordpiece" offsets would be [3, 4].
If offsets are provided, the returned tensor will contain only the wordpiece
embeddings at those positions, and (in particular) will contain one embedding
per token. If offsets are not provided, the entire tensor of wordpiece embeddings
will be returned.
token_type_ids : ``torch.LongTensor``, optional
If an input consists of two sentences (as in the BERT paper),
tokens from the first sentence should have type 0 and tokens from
the second sentence should have type 1. If you don't provide this
(the default BertIndexer doesn't) then it's assumed to be all 0s.
"""
# pylint: disable=arguments-differ
batch_size, full_seq_len = input_ids.size(0), input_ids.size(-1)
initial_dims = list(input_ids.shape[:-1])
# The embedder may receive an input tensor that has a sequence length longer than can
# be fit. In that case, we should expect the wordpiece indexer to create padded windows
# of length `self.max_pieces` for us, and have them concatenated into one long sequence.
# E.g., "[CLS] I went to the [SEP] [CLS] to the store to [SEP] ..."
# We can then split the sequence into sub-sequences of that length, and concatenate them
# along the batch dimension so we effectively have one huge batch of partial sentences.
# This can then be fed into BERT without any sentence length issues. Keep in mind
# that the memory consumption can dramatically increase for large batches with extremely
# long sentences.
needs_split = full_seq_len > self.max_pieces
last_window_size = 0
if needs_split:
# Split the flattened list by the window size, `max_pieces`
split_input_ids = list(input_ids.split(self.max_pieces, dim=-1))
# We want all sequences to be the same length, so pad the last sequence
last_window_size = split_input_ids[-1].size(-1)
padding_amount = self.max_pieces - last_window_size
split_input_ids[-1] = F.pad(split_input_ids[-1], pad=[0, padding_amount], value=0)
# Now combine the sequences along the batch dimension
input_ids = torch.cat(split_input_ids, dim=0)
# if token_type_ids is None:
# token_type_ids = torch.zeros_like(input_ids)
input_mask = (input_ids != 0).long()
# input_ids may have extra dimensions, so we reshape down to 2-d
# before calling the BERT model and then reshape back at the end.
# MAYBE CHANGE THIS XLMRobertaForTokenClassification
all_encoder_layers = self.bert_model(input_ids=util.combine_initial_dims(input_ids),
# token_type_ids=util.combine_initial_dims(token_type_ids),
attention_mask=util.combine_initial_dims(input_mask))
all_encoder_layers = torch.stack(all_encoder_layers[2][1:]) # dump initial embeddings
if needs_split:
# First, unpack the output embeddings into one long sequence again
unpacked_embeddings = torch.split(all_encoder_layers, batch_size, dim=1)
unpacked_embeddings = torch.cat(unpacked_embeddings, dim=2)
# Next, select indices of the sequence such that it will result in embeddings representing the original
# sentence. To capture maximal context, the indices will be the middle part of each embedded window
# sub-sequence (plus any leftover start and final edge windows), e.g.,
# 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
# "[CLS] I went to the very fine [SEP] [CLS] the very fine store to eat [SEP]"
# with max_pieces = 8 should produce max context indices [2, 3, 4, 10, 11, 12] with additional start
# and final windows with indices [0, 1] and [14, 15] respectively.
# Find the stride as half the max pieces, ignoring the special start and end tokens
# Calculate an offset to extract the centermost embeddings of each window
stride = (self.max_pieces - self.start_tokens - self.end_tokens) // 2
stride_offset = stride // 2 + self.start_tokens
first_window = list(range(stride_offset))
max_context_windows = [i for i in range(full_seq_len)
if stride_offset - 1 < i % self.max_pieces < stride_offset + stride]
final_window_start = full_seq_len - (full_seq_len % self.max_pieces) + stride_offset + stride
final_window = list(range(final_window_start, full_seq_len))
select_indices = first_window + max_context_windows + final_window
initial_dims.append(len(select_indices))
recombined_embeddings = unpacked_embeddings[:, :, select_indices]
else:
recombined_embeddings = all_encoder_layers
# Recombine the outputs of all layers
# (layers, batch_size * d1 * ... * dn, sequence_length, embedding_dim)
# recombined = torch.cat(combined, dim=2)
input_mask = (recombined_embeddings != 0).long()
# At this point, mix is (batch_size * d1 * ... * dn, sequence_length, embedding_dim)
if offsets is None:
# Resize to (batch_size, d1, ..., dn, sequence_length, embedding_dim)
dims = initial_dims if needs_split else input_ids.size()
layers = util.uncombine_initial_dims(recombined_embeddings, dims)
else:
# offsets is (batch_size, d1, ..., dn, orig_sequence_length)
offsets2d = util.combine_initial_dims(offsets)
# now offsets is (batch_size * d1 * ... * dn, orig_sequence_length)
range_vector = util.get_range_vector(offsets2d.size(0),
device=util.get_device_of(recombined_embeddings)).unsqueeze(1)
# selected embeddings is also (batch_size * d1 * ... * dn, orig_sequence_length)
selected_embeddings = recombined_embeddings[:, range_vector, offsets2d]
# selected_embeddings = recombined_embeddings[range_vector, offsets2d]
layers = util.uncombine_initial_dims(selected_embeddings, offsets.size())
if self._scalar_mix is not None:
return self._scalar_mix(layers, input_mask)
elif self.combine_layers == "last":
return layers[-1]
else:
return layers
@TokenEmbedder.register("udify-xlmr-pretrained")
class UdifyPretrainedXlmrEmbedder(XlmrEmbedder):
def __init__(self, pretrained_model: str,
requires_grad: bool = False,
dropout: float = 0.1,
layer_dropout: float = 0.1,
combine_layers: str = "mix") -> None:
config = AutoConfig.from_pretrained(pretrained_model)
config.output_hidden_states = True
model = AutoModel.from_config(config)
for param in model.parameters():
param.requires_grad = requires_grad
super().__init__(bert_model=model,
layer_dropout=layer_dropout,
combine_layers=combine_layers)
self.model = model
self.dropout = dropout
self.set_dropout(dropout)
def set_dropout(self, dropout):
"""
Applies dropout to all BERT layers
"""
self.dropout = dropout
self.model.embeddings.dropout.p = dropout
for layer in self.model.encoder.layer:
layer.attention.self.dropout.p = dropout
layer.attention.output.dropout.p = dropout
layer.output.dropout.p = dropout
@TokenEmbedder.register("udify-xlmr-predictor")
class UdifyPredictionXlmrEmbedder(XlmrEmbedder):
"""To be used for inference only, pretrained model is unneeded"""
def __init__(self, bert_config: str,
requires_grad: bool = False,
dropout: float = 0.1,
layer_dropout: float = 0.1,
combine_layers: str = "mix") -> None:
config = AutoConfig.from_pretrained('xlm-roberta-base')
config.output_hidden_states = True
model = AutoModel.from_config(config)
for param in model.parameters():
param.requires_grad = requires_grad
super().__init__(bert_model=model,
layer_dropout=layer_dropout,
combine_layers=combine_layers)
self.model = model
self.dropout = dropout
self.set_dropout(dropout)
def set_dropout(self, dropout):
"""
Applies dropout to all BERT layers
"""
self.dropout = dropout
self.model.embeddings.dropout.p = dropout
for layer in self.model.encoder.layer:
layer.attention.self.dropout.p = dropout
layer.attention.output.dropout.p = dropout
layer.output.dropout.p = dropout
| """
An extension of AllenNLP's BERT pretrained helper classes. Supports modification to BERT dropout, and applies
a sliding window approach for long sentences.
"""
from typing import Dict, List, Callable, Tuple, Any
import logging
import collections
from overrides import overrides
import torch
import torch.nn.functional as F
from transformers import AutoTokenizer, AutoConfig, AutoModel
from transformers import XLMRobertaModel, XLMRobertaTokenizer
from allennlp.common.util import pad_sequence_to_length
from allennlp.modules.token_embedders import TokenEmbedder
from allennlp.data.vocabulary import Vocabulary
from allennlp.data.tokenizers.token import Token
from allennlp.data.token_indexers.token_indexer import TokenIndexer
from allennlp.nn import util
from udify.modules.scalar_mix import ScalarMixWithDropout
logger = logging.getLogger(__name__)
# TODO(joelgrus): Figure out how to generate token_type_ids out of this token indexer.
# This is the default list of tokens that should not be lowercased.
_NEVER_LOWERCASE = []
class WordpieceIndexer(TokenIndexer[int]):
"""
A token indexer that does the wordpiece-tokenization (e.g. for BERT embeddings).
If you are using one of the pretrained BERT models, you'll want to use the ``PretrainedBertIndexer``
subclass rather than this base class.
Parameters
----------
vocab : ``Dict[str, int]``
The mapping {wordpiece -> id}. Note this is not an AllenNLP ``Vocabulary``.
wordpiece_tokenizer : ``Callable[[str], List[str]]``
A function that does the actual tokenization.
namespace : str, optional (default: "wordpiece")
The namespace in the AllenNLP ``Vocabulary`` into which the wordpieces
will be loaded.
use_starting_offsets : bool, optional (default: False)
By default, the "offsets" created by the token indexer correspond to the
last wordpiece in each word. If ``use_starting_offsets`` is specified,
they will instead correspond to the first wordpiece in each word.
max_pieces : int, optional (default: 514)
The BERT embedder uses positional embeddings and so has a corresponding
maximum length for its input ids. Any inputs longer than this will
either be truncated (default), or be split apart and batched using a
sliding window.
do_lowercase : ``bool``, optional (default=``False``)
Should we lowercase the provided tokens before getting the indices?
You would need to do this if you are using an -uncased BERT model
but your DatasetReader is not lowercasing tokens (which might be the
case if you're also using other embeddings based on cased tokens).
never_lowercase: ``List[str]``, optional
Tokens that should never be lowercased. Default is
[].
start_tokens : ``List[str]``, optional (default=``None``)
These are prepended to the tokens provided to ``tokens_to_indices``.
end_tokens : ``List[str]``, optional (default=``None``)
These are appended to the tokens provided to ``tokens_to_indices``.
separator_token : ``str``, optional (default=``[SEP]``)
This token indicates the segments in the sequence.
truncate_long_sequences : ``bool``, optional (default=``True``)
By default, long sequences will be truncated to the maximum sequence
length. Otherwise, they will be split apart and batched using a
sliding window.
"""
def __init__(self,
vocab: Dict[str, int],
tokenizer: XLMRobertaTokenizer,
wordpiece_tokenizer: Callable[[str], List[str]],
namespace: str = "wordpiece",
use_starting_offsets: bool = False,
max_pieces: int = 512,
do_lowercase: bool = False,
never_lowercase: List[str] = None,
start_tokens: List[str] = None,
end_tokens: List[str] = None,
separator_token: str = "</s>",
truncate_long_sequences: bool = True,
token_min_padding_length: int = 0) -> None:
super().__init__(token_min_padding_length)
self.vocab = vocab
# The BERT code itself does a two-step tokenization:
# sentence -> [words], and then word -> [wordpieces]
# In AllenNLP, the first step is implemented as the ``BertSimpleWordSplitter``,
# and this token indexer handles the second.
self.tokenizer = tokenizer
self.wordpiece_tokenizer = wordpiece_tokenizer
self._namespace = namespace
self._added_to_vocabulary = False
self.max_pieces = max_pieces
self.use_starting_offsets = use_starting_offsets
self._do_lowercase = do_lowercase
self._truncate_long_sequences = truncate_long_sequences
if never_lowercase is None:
# Use the defaults
self._never_lowercase = set(_NEVER_LOWERCASE)
else:
self._never_lowercase = set(never_lowercase)
# Convert the start_tokens and end_tokens to wordpiece_ids
self._start_piece_ids = [self.tokenizer._convert_token_to_id(wordpiece)
for token in (start_tokens or [])
for wordpiece in wordpiece_tokenizer(token)]
self._end_piece_ids = [self.tokenizer._convert_token_to_id(wordpiece)
for token in (end_tokens or [])
for wordpiece in wordpiece_tokenizer(token)]
# Convert the separator_token to wordpiece_ids
self._separator_ids = [self.tokenizer._convert_token_to_id(wordpiece)
for wordpiece in wordpiece_tokenizer(separator_token)]
@overrides
def count_vocab_items(self, token: Token, counter: Dict[str, Dict[str, int]]):
# If we only use pretrained models, we don't need to do anything here.
pass
def _add_encoding_to_vocabulary(self, vocabulary: Vocabulary) -> None:
# pylint: disable=protected-access
for word, idx in self.vocab.items():
vocabulary._token_to_index[self._namespace][word] = idx
vocabulary._index_to_token[self._namespace][idx] = word
@overrides
def tokens_to_indices(self,
tokens: List[Token],
vocabulary: Vocabulary,
index_name: str) -> Dict[str, List[List[int]]]:
if not self._added_to_vocabulary:
self._add_encoding_to_vocabulary(vocabulary)
self._added_to_vocabulary = True
# This lowercases tokens if necessary
text = (token.text.lower()
if self._do_lowercase and token.text not in self._never_lowercase
else token.text
for token in tokens)
# Obtain a nested sequence of wordpieces, each represented by a list of wordpiece ids
token_wordpiece_ids = [
[self.tokenizer._convert_token_to_id(wordpiece) for wordpiece in self.wordpiece_tokenizer(token)]
for token in text
]
# offsets[i] will give us the index into wordpiece_ids
# for the wordpiece "corresponding to" the i-th input token.
offsets = []
# If we're using initial offsets, we want to start at offset = len(text_tokens)
# so that the first offset is the index of the first wordpiece of tokens[0].
# Otherwise, we want to start at len(text_tokens) - 1, so that the "previous"
# offset is the last wordpiece of "tokens[-1]".
offset = len(self._start_piece_ids) if self.use_starting_offsets else len(self._start_piece_ids) - 1
for token in token_wordpiece_ids:
# For initial offsets, the current value of ``offset`` is the start of
# the current wordpiece, so add it to ``offsets`` and then increment it.
if self.use_starting_offsets:
offsets.append(offset)
offset += len(token)
# For final offsets, the current value of ``offset`` is the end of
# the previous wordpiece, so increment it and then add it to ``offsets``.
else:
offset += len(token)
offsets.append(offset)
# Flattened list of wordpieces. In the end, the output of the model (e.g., BERT) should
# have a sequence length equal to the length of this list. However, it will first be split into
# chunks of length `self.max_pieces` so that they can be fit through the model. After packing
# and passing through the model, it should be unpacked to represent the wordpieces in this list.
flat_wordpiece_ids = [wordpiece for token in token_wordpiece_ids for wordpiece in token]
# The code below will (possibly) pack the wordpiece sequence into multiple sub-sequences by using a sliding
# window `window_length` that overlaps with previous windows according to the `stride`. Suppose we have
# the following sentence: "I went to the store to buy some milk". Then a sliding window of length 4 and
# stride of length 2 will split them up into:
# "[I went to the] [to the store to] [store to buy some] [buy some milk [PAD]]".
# This is to ensure that the model has context of as much of the sentence as possible to get accurate
# embeddings. Finally, the sequences will be padded with any start/end piece ids, e.g.,
# "[CLS] I went to the [SEP] [CLS] to the store to [SEP] ...".
# The embedder should then be able to split this token sequence by the window length,
# pass them through the model, and recombine them.
# Specify the stride to be half of `self.max_pieces`, minus any additional start/end wordpieces
window_length = self.max_pieces - len(self._start_piece_ids) - len(self._end_piece_ids)
stride = window_length // 2
if len(flat_wordpiece_ids) <= window_length:
# If all the wordpieces fit, then we don't need to do anything special
wordpiece_windows = [self._start_piece_ids + flat_wordpiece_ids + self._end_piece_ids]
elif self._truncate_long_sequences:
logger.warning("Too many wordpieces, truncating sequence. If you would like a rolling window, set"
"`truncate_long_sequences` to False"
f"{[token.text for token in tokens]}")
wordpiece_windows = [self._start_piece_ids + flat_wordpiece_ids[:window_length] + self._end_piece_ids]
else:
# Create a sliding window of wordpieces of length `max_pieces` that advances by `stride` steps and
# add start/end wordpieces to each window
# TODO: this currently does not respect word boundaries, so words may be cut in half between windows
# However, this would increase complexity, as sequences would need to be padded/unpadded in the middle
wordpiece_windows = [self._start_piece_ids + flat_wordpiece_ids[i:i+window_length] + self._end_piece_ids
for i in range(0, len(flat_wordpiece_ids), stride)]
# Check for overlap in the last window. Throw it away if it is redundant.
last_window = wordpiece_windows[-1][1:]
penultimate_window = wordpiece_windows[-2]
if last_window == penultimate_window[-len(last_window):]:
wordpiece_windows = wordpiece_windows[:-1]
# Flatten the wordpiece windows
wordpiece_ids = [wordpiece for sequence in wordpiece_windows for wordpiece in sequence]
# Constructing `token_type_ids` by `self._separator`
token_type_ids = _get_token_type_ids(wordpiece_ids, self._separator_ids)
# Our mask should correspond to the original tokens,
# because calling util.get_text_field_mask on the
# "wordpiece_id" tokens will produce the wrong shape.
# However, because of the max_pieces constraint, we may
# have truncated the wordpieces; accordingly, we want the mask
# to correspond to the remaining tokens after truncation, which
# is captured by the offsets.
mask = [1 for _ in offsets]
return {
index_name: wordpiece_ids,
f"{index_name}-offsets": offsets,
f"{index_name}-type-ids": token_type_ids,
"mask": mask
}
@overrides
def get_padding_token(self) -> int:
return 0
@overrides
def get_padding_lengths(self, token: int) -> Dict[str, int]: # pylint: disable=unused-argument
return {}
@overrides
def as_padded_tensor(
self,
tokens: Dict[str, List[int]],
desired_num_tokens: Dict[str, int],
padding_lengths: Dict[str, int],
) -> Dict[str, torch.Tensor]:
return {
key: torch.LongTensor(pad_sequence_to_length(val, desired_num_tokens[key]))
for key, val in tokens.items()
}
@overrides
def get_keys(self, index_name: str) -> List[str]:
"""
We need to override this because the indexer generates multiple keys.
"""
# pylint: disable=no-self-use
return [index_name, f"{index_name}-offsets", f"{index_name}-type-ids", "mask"]
@TokenIndexer.register("udify-xlmr-pretrained")
class PretrainedXlmrIndexer(WordpieceIndexer):
# pylint: disable=line-too-long
"""
A ``TokenIndexer`` corresponding to a pretrained BERT model.
Parameters
----------
pretrained_model: ``str``
Either the name of the pretrained model to use (e.g. 'xlm-roberta-base'),
or the path to the .txt file with its vocabulary.
If the name is a key in the list of pretrained models at
https://github.com/huggingface/pytorch-pretrained-BERT/blob/master/pytorch_pretrained_bert/tokenization.py#L33
the corresponding path will be used; otherwise it will be interpreted as a path or URL.
use_starting_offsets: bool, optional (default: False)
By default, the "offsets" created by the token indexer correspond to the
last wordpiece in each word. If ``use_starting_offsets`` is specified,
they will instead correspond to the first wordpiece in each word.
do_lowercase: ``bool``, optional (default = True)
Whether to lowercase the tokens before converting to wordpiece ids.
never_lowercase: ``List[str]``, optional
Tokens that should never be lowercased. Default is
[].
max_pieces: int, optional (default: 514)
The BERT embedder uses positional embeddings and so has a corresponding
maximum length for its input ids. Any inputs longer than this will
either be truncated (default), or be split apart and batched using a
sliding window.
truncate_long_sequences : ``bool``, optional (default=``True``)
By default, long sequences will be truncated to the maximum sequence
length. Otherwise, they will be split apart and batched using a
sliding window.
"""
def __init__(self,
pretrained_model: str,
use_starting_offsets: bool = False,
do_lowercase: bool = False,
never_lowercase: List[str] = None,
max_pieces: int = 512,
truncate_long_sequences: bool = False) -> None:
if pretrained_model.endswith("-cased") and do_lowercase:
logger.warning("Your BERT model appears to be cased, "
"but your indexer is lowercasing tokens.")
elif pretrained_model.endswith("-uncased") and not do_lowercase:
logger.warning("Your BERT model appears to be uncased, "
"but your indexer is not lowercasing tokens.")
bert_tokenizer = AutoTokenizer.from_pretrained(pretrained_model)
bert_vocab = {bert_tokenizer.convert_ids_to_tokens(i): i for i in range(250001)}
bert_vocab[bert_tokenizer.convert_ids_to_tokens(250004)] = 250002
bert_vocab = collections.OrderedDict(bert_vocab)
super().__init__(vocab=bert_vocab,
tokenizer=bert_tokenizer,
wordpiece_tokenizer=bert_tokenizer._tokenize,
namespace="bert",
use_starting_offsets=use_starting_offsets,
max_pieces=max_pieces,
do_lowercase=do_lowercase,
never_lowercase=never_lowercase,
start_tokens=["<s>"],
end_tokens=["</s>"],
separator_token="</s>",
truncate_long_sequences=truncate_long_sequences)
def _get_token_type_ids(wordpiece_ids: List[int],
separator_ids: List[int]) -> List[int]:
num_wordpieces = len(wordpiece_ids)
token_type_ids: List[int] = []
type_id = 0
cursor = 0
while cursor < num_wordpieces:
# check length
if num_wordpieces - cursor < len(separator_ids):
token_type_ids.extend(type_id
for _ in range(num_wordpieces - cursor))
cursor += num_wordpieces - cursor
# check content
# when it is a separator
elif all(wordpiece_ids[cursor + index] == separator_id
for index, separator_id in enumerate(separator_ids)):
token_type_ids.extend(type_id for _ in separator_ids)
type_id += 1
cursor += len(separator_ids)
# when it is not
else:
cursor += 1
token_type_ids.append(type_id)
return token_type_ids
class XlmrEmbedder(TokenEmbedder):
"""
A ``TokenEmbedder`` that produces BERT embeddings for your tokens.
Should be paired with a ``BertIndexer``, which produces wordpiece ids.
Most likely you probably want to use ``PretrainedBertEmbedder``
for one of the named pretrained models, not this base class.
Parameters
----------
bert_model: ``BertModel``
The BERT model being wrapped.
top_layer_only: ``bool``, optional (default = ``False``)
If ``True``, then only return the top layer instead of apply the scalar mix.
max_pieces : int, optional (default: 514)
The BERT embedder uses positional embeddings and so has a corresponding
maximum length for its input ids. Assuming the inputs are windowed
and padded appropriately by this length, the embedder will split them into a
large batch, feed them into BERT, and recombine the output as if it was a
longer sequence.
start_tokens : int, optional (default: 1)
The number of starting special tokens input to BERT (usually 1, i.e., [CLS])
end_tokens : int, optional (default: 1)
The number of ending tokens input to BERT (usually 1, i.e., [SEP])
combine_layers : str, optional (default: "mix")
Options: "mix", "last", "all"
"""
def __init__(self,
bert_model: XLMRobertaModel,
max_pieces: int = 512,
start_tokens: int = 1,
end_tokens: int = 1,
layer_dropout: float = 0.0,
combine_layers: str = "mix") -> None:
super().__init__()
self.bert_model = bert_model
self.output_dim = bert_model.config.hidden_size
self.max_pieces = max_pieces
self.start_tokens = start_tokens
self.end_tokens = end_tokens
self.combine_layers = combine_layers
if self.combine_layers == "mix":
self._scalar_mix = ScalarMixWithDropout(bert_model.config.num_hidden_layers,
do_layer_norm=False,
dropout=layer_dropout)
else:
self._scalar_mix = None
def get_output_dim(self) -> int:
return self.output_dim
def forward(self,
input_ids: torch.LongTensor,
offsets: torch.LongTensor = None,
token_type_ids: torch.LongTensor = None) -> torch.Tensor:
"""
Parameters
----------
input_ids : ``torch.LongTensor``
The (batch_size, ..., max_sequence_length) tensor of wordpiece ids.
offsets : ``torch.LongTensor``, optional
The BERT embeddings are one per wordpiece. However it's possible/likely
you might want one per original token. In that case, ``offsets``
represents the indices of the desired wordpiece for each original token.
Depending on how your token indexer is configured, this could be the
position of the last wordpiece for each token, or it could be the position
of the first wordpiece for each token.
For example, if you had the sentence "Definitely not", and if the corresponding
wordpieces were ["Def", "##in", "##ite", "##ly", "not"], then the input_ids
would be 5 wordpiece ids, and the "last wordpiece" offsets would be [3, 4].
If offsets are provided, the returned tensor will contain only the wordpiece
embeddings at those positions, and (in particular) will contain one embedding
per token. If offsets are not provided, the entire tensor of wordpiece embeddings
will be returned.
token_type_ids : ``torch.LongTensor``, optional
If an input consists of two sentences (as in the BERT paper),
tokens from the first sentence should have type 0 and tokens from
the second sentence should have type 1. If you don't provide this
(the default BertIndexer doesn't) then it's assumed to be all 0s.
"""
# pylint: disable=arguments-differ
batch_size, full_seq_len = input_ids.size(0), input_ids.size(-1)
initial_dims = list(input_ids.shape[:-1])
# The embedder may receive an input tensor that has a sequence length longer than can
# be fit. In that case, we should expect the wordpiece indexer to create padded windows
# of length `self.max_pieces` for us, and have them concatenated into one long sequence.
# E.g., "[CLS] I went to the [SEP] [CLS] to the store to [SEP] ..."
# We can then split the sequence into sub-sequences of that length, and concatenate them
# along the batch dimension so we effectively have one huge batch of partial sentences.
# This can then be fed into BERT without any sentence length issues. Keep in mind
# that the memory consumption can dramatically increase for large batches with extremely
# long sentences.
needs_split = full_seq_len > self.max_pieces
last_window_size = 0
if needs_split:
# Split the flattened list by the window size, `max_pieces`
split_input_ids = list(input_ids.split(self.max_pieces, dim=-1))
# We want all sequences to be the same length, so pad the last sequence
last_window_size = split_input_ids[-1].size(-1)
padding_amount = self.max_pieces - last_window_size
split_input_ids[-1] = F.pad(split_input_ids[-1], pad=[0, padding_amount], value=0)
# Now combine the sequences along the batch dimension
input_ids = torch.cat(split_input_ids, dim=0)
# if token_type_ids is None:
# token_type_ids = torch.zeros_like(input_ids)
input_mask = (input_ids != 0).long()
# input_ids may have extra dimensions, so we reshape down to 2-d
# before calling the BERT model and then reshape back at the end.
# MAYBE CHANGE THIS XLMRobertaForTokenClassification
all_encoder_layers = self.bert_model(input_ids=util.combine_initial_dims(input_ids),
# token_type_ids=util.combine_initial_dims(token_type_ids),
attention_mask=util.combine_initial_dims(input_mask))
all_encoder_layers = torch.stack(all_encoder_layers[2][1:]) # dump initial embeddings
if needs_split:
# First, unpack the output embeddings into one long sequence again
unpacked_embeddings = torch.split(all_encoder_layers, batch_size, dim=1)
unpacked_embeddings = torch.cat(unpacked_embeddings, dim=2)
# Next, select indices of the sequence such that it will result in embeddings representing the original
# sentence. To capture maximal context, the indices will be the middle part of each embedded window
# sub-sequence (plus any leftover start and final edge windows), e.g.,
# 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
# "[CLS] I went to the very fine [SEP] [CLS] the very fine store to eat [SEP]"
# with max_pieces = 8 should produce max context indices [2, 3, 4, 10, 11, 12] with additional start
# and final windows with indices [0, 1] and [14, 15] respectively.
# Find the stride as half the max pieces, ignoring the special start and end tokens
# Calculate an offset to extract the centermost embeddings of each window
stride = (self.max_pieces - self.start_tokens - self.end_tokens) // 2
stride_offset = stride // 2 + self.start_tokens
first_window = list(range(stride_offset))
max_context_windows = [i for i in range(full_seq_len)
if stride_offset - 1 < i % self.max_pieces < stride_offset + stride]
final_window_start = full_seq_len - (full_seq_len % self.max_pieces) + stride_offset + stride
final_window = list(range(final_window_start, full_seq_len))
select_indices = first_window + max_context_windows + final_window
initial_dims.append(len(select_indices))
recombined_embeddings = unpacked_embeddings[:, :, select_indices]
else:
recombined_embeddings = all_encoder_layers
# Recombine the outputs of all layers
# (layers, batch_size * d1 * ... * dn, sequence_length, embedding_dim)
# recombined = torch.cat(combined, dim=2)
input_mask = (recombined_embeddings != 0).long()
# At this point, mix is (batch_size * d1 * ... * dn, sequence_length, embedding_dim)
if offsets is None:
# Resize to (batch_size, d1, ..., dn, sequence_length, embedding_dim)
dims = initial_dims if needs_split else input_ids.size()
layers = util.uncombine_initial_dims(recombined_embeddings, dims)
else:
# offsets is (batch_size, d1, ..., dn, orig_sequence_length)
offsets2d = util.combine_initial_dims(offsets)
# now offsets is (batch_size * d1 * ... * dn, orig_sequence_length)
range_vector = util.get_range_vector(offsets2d.size(0),
device=util.get_device_of(recombined_embeddings)).unsqueeze(1)
# selected embeddings is also (batch_size * d1 * ... * dn, orig_sequence_length)
selected_embeddings = recombined_embeddings[:, range_vector, offsets2d]
# selected_embeddings = recombined_embeddings[range_vector, offsets2d]
layers = util.uncombine_initial_dims(selected_embeddings, offsets.size())
if self._scalar_mix is not None:
return self._scalar_mix(layers, input_mask)
elif self.combine_layers == "last":
return layers[-1]
else:
return layers
@TokenEmbedder.register("udify-xlmr-pretrained")
class UdifyPretrainedXlmrEmbedder(XlmrEmbedder):
def __init__(self, pretrained_model: str,
requires_grad: bool = False,
dropout: float = 0.1,
layer_dropout: float = 0.1,
combine_layers: str = "mix") -> None:
config = AutoConfig.from_pretrained(pretrained_model)
config.output_hidden_states = True
model = AutoModel.from_config(config)
for param in model.parameters():
param.requires_grad = requires_grad
super().__init__(bert_model=model,
layer_dropout=layer_dropout,
combine_layers=combine_layers)
self.model = model
self.dropout = dropout
self.set_dropout(dropout)
def set_dropout(self, dropout):
"""
Applies dropout to all BERT layers
"""
self.dropout = dropout
self.model.embeddings.dropout.p = dropout
for layer in self.model.encoder.layer:
layer.attention.self.dropout.p = dropout
layer.attention.output.dropout.p = dropout
layer.output.dropout.p = dropout
@TokenEmbedder.register("udify-xlmr-predictor")
class UdifyPredictionXlmrEmbedder(XlmrEmbedder):
"""To be used for inference only, pretrained model is unneeded"""
def __init__(self, bert_config: str,
requires_grad: bool = False,
dropout: float = 0.1,
layer_dropout: float = 0.1,
combine_layers: str = "mix") -> None:
config = AutoConfig.from_pretrained('xlm-roberta-base')
config.output_hidden_states = True
model = AutoModel.from_config(config)
for param in model.parameters():
param.requires_grad = requires_grad
super().__init__(bert_model=model,
layer_dropout=layer_dropout,
combine_layers=combine_layers)
self.model = model
self.dropout = dropout
self.set_dropout(dropout)
def set_dropout(self, dropout):
"""
Applies dropout to all BERT layers
"""
self.dropout = dropout
self.model.embeddings.dropout.p = dropout
for layer in self.model.encoder.layer:
layer.attention.self.dropout.p = dropout
layer.attention.output.dropout.p = dropout
layer.output.dropout.p = dropout
| en | 0.806316 | An extension of AllenNLP's BERT pretrained helper classes. Supports modification to BERT dropout, and applies a sliding window approach for long sentences. # TODO(joelgrus): Figure out how to generate token_type_ids out of this token indexer. # This is the default list of tokens that should not be lowercased. A token indexer that does the wordpiece-tokenization (e.g. for BERT embeddings). If you are using one of the pretrained BERT models, you'll want to use the ``PretrainedBertIndexer`` subclass rather than this base class. Parameters ---------- vocab : ``Dict[str, int]`` The mapping {wordpiece -> id}. Note this is not an AllenNLP ``Vocabulary``. wordpiece_tokenizer : ``Callable[[str], List[str]]`` A function that does the actual tokenization. namespace : str, optional (default: "wordpiece") The namespace in the AllenNLP ``Vocabulary`` into which the wordpieces will be loaded. use_starting_offsets : bool, optional (default: False) By default, the "offsets" created by the token indexer correspond to the last wordpiece in each word. If ``use_starting_offsets`` is specified, they will instead correspond to the first wordpiece in each word. max_pieces : int, optional (default: 514) The BERT embedder uses positional embeddings and so has a corresponding maximum length for its input ids. Any inputs longer than this will either be truncated (default), or be split apart and batched using a sliding window. do_lowercase : ``bool``, optional (default=``False``) Should we lowercase the provided tokens before getting the indices? You would need to do this if you are using an -uncased BERT model but your DatasetReader is not lowercasing tokens (which might be the case if you're also using other embeddings based on cased tokens). never_lowercase: ``List[str]``, optional Tokens that should never be lowercased. Default is []. start_tokens : ``List[str]``, optional (default=``None``) These are prepended to the tokens provided to ``tokens_to_indices``. end_tokens : ``List[str]``, optional (default=``None``) These are appended to the tokens provided to ``tokens_to_indices``. separator_token : ``str``, optional (default=``[SEP]``) This token indicates the segments in the sequence. truncate_long_sequences : ``bool``, optional (default=``True``) By default, long sequences will be truncated to the maximum sequence length. Otherwise, they will be split apart and batched using a sliding window. # The BERT code itself does a two-step tokenization: # sentence -> [words], and then word -> [wordpieces] # In AllenNLP, the first step is implemented as the ``BertSimpleWordSplitter``, # and this token indexer handles the second. # Use the defaults # Convert the start_tokens and end_tokens to wordpiece_ids # Convert the separator_token to wordpiece_ids # If we only use pretrained models, we don't need to do anything here. # pylint: disable=protected-access # This lowercases tokens if necessary # Obtain a nested sequence of wordpieces, each represented by a list of wordpiece ids # offsets[i] will give us the index into wordpiece_ids # for the wordpiece "corresponding to" the i-th input token. # If we're using initial offsets, we want to start at offset = len(text_tokens) # so that the first offset is the index of the first wordpiece of tokens[0]. # Otherwise, we want to start at len(text_tokens) - 1, so that the "previous" # offset is the last wordpiece of "tokens[-1]". # For initial offsets, the current value of ``offset`` is the start of # the current wordpiece, so add it to ``offsets`` and then increment it. # For final offsets, the current value of ``offset`` is the end of # the previous wordpiece, so increment it and then add it to ``offsets``. # Flattened list of wordpieces. In the end, the output of the model (e.g., BERT) should # have a sequence length equal to the length of this list. However, it will first be split into # chunks of length `self.max_pieces` so that they can be fit through the model. After packing # and passing through the model, it should be unpacked to represent the wordpieces in this list. # The code below will (possibly) pack the wordpiece sequence into multiple sub-sequences by using a sliding # window `window_length` that overlaps with previous windows according to the `stride`. Suppose we have # the following sentence: "I went to the store to buy some milk". Then a sliding window of length 4 and # stride of length 2 will split them up into: # "[I went to the] [to the store to] [store to buy some] [buy some milk [PAD]]". # This is to ensure that the model has context of as much of the sentence as possible to get accurate # embeddings. Finally, the sequences will be padded with any start/end piece ids, e.g., # "[CLS] I went to the [SEP] [CLS] to the store to [SEP] ...". # The embedder should then be able to split this token sequence by the window length, # pass them through the model, and recombine them. # Specify the stride to be half of `self.max_pieces`, minus any additional start/end wordpieces # If all the wordpieces fit, then we don't need to do anything special # Create a sliding window of wordpieces of length `max_pieces` that advances by `stride` steps and # add start/end wordpieces to each window # TODO: this currently does not respect word boundaries, so words may be cut in half between windows # However, this would increase complexity, as sequences would need to be padded/unpadded in the middle # Check for overlap in the last window. Throw it away if it is redundant. # Flatten the wordpiece windows # Constructing `token_type_ids` by `self._separator` # Our mask should correspond to the original tokens, # because calling util.get_text_field_mask on the # "wordpiece_id" tokens will produce the wrong shape. # However, because of the max_pieces constraint, we may # have truncated the wordpieces; accordingly, we want the mask # to correspond to the remaining tokens after truncation, which # is captured by the offsets. # pylint: disable=unused-argument We need to override this because the indexer generates multiple keys. # pylint: disable=no-self-use # pylint: disable=line-too-long A ``TokenIndexer`` corresponding to a pretrained BERT model. Parameters ---------- pretrained_model: ``str`` Either the name of the pretrained model to use (e.g. 'xlm-roberta-base'), or the path to the .txt file with its vocabulary. If the name is a key in the list of pretrained models at https://github.com/huggingface/pytorch-pretrained-BERT/blob/master/pytorch_pretrained_bert/tokenization.py#L33 the corresponding path will be used; otherwise it will be interpreted as a path or URL. use_starting_offsets: bool, optional (default: False) By default, the "offsets" created by the token indexer correspond to the last wordpiece in each word. If ``use_starting_offsets`` is specified, they will instead correspond to the first wordpiece in each word. do_lowercase: ``bool``, optional (default = True) Whether to lowercase the tokens before converting to wordpiece ids. never_lowercase: ``List[str]``, optional Tokens that should never be lowercased. Default is []. max_pieces: int, optional (default: 514) The BERT embedder uses positional embeddings and so has a corresponding maximum length for its input ids. Any inputs longer than this will either be truncated (default), or be split apart and batched using a sliding window. truncate_long_sequences : ``bool``, optional (default=``True``) By default, long sequences will be truncated to the maximum sequence length. Otherwise, they will be split apart and batched using a sliding window. # check length # check content # when it is a separator # when it is not A ``TokenEmbedder`` that produces BERT embeddings for your tokens. Should be paired with a ``BertIndexer``, which produces wordpiece ids. Most likely you probably want to use ``PretrainedBertEmbedder`` for one of the named pretrained models, not this base class. Parameters ---------- bert_model: ``BertModel`` The BERT model being wrapped. top_layer_only: ``bool``, optional (default = ``False``) If ``True``, then only return the top layer instead of apply the scalar mix. max_pieces : int, optional (default: 514) The BERT embedder uses positional embeddings and so has a corresponding maximum length for its input ids. Assuming the inputs are windowed and padded appropriately by this length, the embedder will split them into a large batch, feed them into BERT, and recombine the output as if it was a longer sequence. start_tokens : int, optional (default: 1) The number of starting special tokens input to BERT (usually 1, i.e., [CLS]) end_tokens : int, optional (default: 1) The number of ending tokens input to BERT (usually 1, i.e., [SEP]) combine_layers : str, optional (default: "mix") Options: "mix", "last", "all" Parameters ---------- input_ids : ``torch.LongTensor`` The (batch_size, ..., max_sequence_length) tensor of wordpiece ids. offsets : ``torch.LongTensor``, optional The BERT embeddings are one per wordpiece. However it's possible/likely you might want one per original token. In that case, ``offsets`` represents the indices of the desired wordpiece for each original token. Depending on how your token indexer is configured, this could be the position of the last wordpiece for each token, or it could be the position of the first wordpiece for each token. For example, if you had the sentence "Definitely not", and if the corresponding wordpieces were ["Def", "##in", "##ite", "##ly", "not"], then the input_ids would be 5 wordpiece ids, and the "last wordpiece" offsets would be [3, 4]. If offsets are provided, the returned tensor will contain only the wordpiece embeddings at those positions, and (in particular) will contain one embedding per token. If offsets are not provided, the entire tensor of wordpiece embeddings will be returned. token_type_ids : ``torch.LongTensor``, optional If an input consists of two sentences (as in the BERT paper), tokens from the first sentence should have type 0 and tokens from the second sentence should have type 1. If you don't provide this (the default BertIndexer doesn't) then it's assumed to be all 0s. # pylint: disable=arguments-differ # The embedder may receive an input tensor that has a sequence length longer than can # be fit. In that case, we should expect the wordpiece indexer to create padded windows # of length `self.max_pieces` for us, and have them concatenated into one long sequence. # E.g., "[CLS] I went to the [SEP] [CLS] to the store to [SEP] ..." # We can then split the sequence into sub-sequences of that length, and concatenate them # along the batch dimension so we effectively have one huge batch of partial sentences. # This can then be fed into BERT without any sentence length issues. Keep in mind # that the memory consumption can dramatically increase for large batches with extremely # long sentences. # Split the flattened list by the window size, `max_pieces` # We want all sequences to be the same length, so pad the last sequence # Now combine the sequences along the batch dimension # if token_type_ids is None: # token_type_ids = torch.zeros_like(input_ids) # input_ids may have extra dimensions, so we reshape down to 2-d # before calling the BERT model and then reshape back at the end. # MAYBE CHANGE THIS XLMRobertaForTokenClassification # token_type_ids=util.combine_initial_dims(token_type_ids), # dump initial embeddings # First, unpack the output embeddings into one long sequence again # Next, select indices of the sequence such that it will result in embeddings representing the original # sentence. To capture maximal context, the indices will be the middle part of each embedded window # sub-sequence (plus any leftover start and final edge windows), e.g., # 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 # "[CLS] I went to the very fine [SEP] [CLS] the very fine store to eat [SEP]" # with max_pieces = 8 should produce max context indices [2, 3, 4, 10, 11, 12] with additional start # and final windows with indices [0, 1] and [14, 15] respectively. # Find the stride as half the max pieces, ignoring the special start and end tokens # Calculate an offset to extract the centermost embeddings of each window # Recombine the outputs of all layers # (layers, batch_size * d1 * ... * dn, sequence_length, embedding_dim) # recombined = torch.cat(combined, dim=2) # At this point, mix is (batch_size * d1 * ... * dn, sequence_length, embedding_dim) # Resize to (batch_size, d1, ..., dn, sequence_length, embedding_dim) # offsets is (batch_size, d1, ..., dn, orig_sequence_length) # now offsets is (batch_size * d1 * ... * dn, orig_sequence_length) # selected embeddings is also (batch_size * d1 * ... * dn, orig_sequence_length) # selected_embeddings = recombined_embeddings[range_vector, offsets2d] Applies dropout to all BERT layers To be used for inference only, pretrained model is unneeded Applies dropout to all BERT layers | 2.448824 | 2 |
LinearRegression/ScriptEval_linearRegression.py | ChristofSchwarz/qs-python-samples | 7 | 6632701 | import logging
import logging.config
import grpc
from SSEData_linearRegression import ArgType, \
evaluate, \
get_arg_types, \
get_return_type, \
FunctionType, \
get_arguments
class ScriptEval:
"""
Class for SSE plugin ScriptEval functionality.
"""
def EvaluateScript(self, header, request, func_type):
"""
Evaluates script provided in the header, given the
arguments provided in the sequence of RowData objects, the request.
:param header:
:param request: an iterable sequence of RowData.
:param func_type: function type.
:return: an iterable sequence of RowData.
"""
# Retrieve data types from header
arg_types = get_arg_types(header)
ret_type = get_return_type(header)
logging.info('EvaluateScript: {} ({} {}) {}'
.format(header.script, arg_types, ret_type, func_type))
aggr = (func_type == FunctionType.Aggregation)
# Check if parameters are provided
if header.params:
# Verify argument type
if arg_types == ArgType.String:
# Create an empty list if tensor function
if aggr:
all_rows = []
# Iterate over bundled rows
for request_rows in request:
# Iterate over rows
for row in request_rows.rows:
# Retrieve numerical data from duals
params = get_arguments(arg_types, row.duals)
if aggr:
# Append value to list, for later aggregation
all_rows.append(params)
else:
# Evaluate script row wise
yield evaluate(header.script, ret_type, params=params)
# Evaluate script based on data from all rows
if aggr:
params = [list(param) for param in zip(*all_rows)]
yield evaluate(header.script, ret_type, params=params)
else:
# This plugin does not support other argument types than string.
raise grpc.RpcError(grpc.StatusCode.UNIMPLEMENTED,
'Argument type: {} not supported in this plugin.'.format(arg_types))
else:
# This plugin does not support script evaluation without parameters
raise grpc.RpcError(grpc.StatusCode.UNIMPLEMENTED,
'Script evaluation with no parameters is not supported in this plugin.')
| import logging
import logging.config
import grpc
from SSEData_linearRegression import ArgType, \
evaluate, \
get_arg_types, \
get_return_type, \
FunctionType, \
get_arguments
class ScriptEval:
"""
Class for SSE plugin ScriptEval functionality.
"""
def EvaluateScript(self, header, request, func_type):
"""
Evaluates script provided in the header, given the
arguments provided in the sequence of RowData objects, the request.
:param header:
:param request: an iterable sequence of RowData.
:param func_type: function type.
:return: an iterable sequence of RowData.
"""
# Retrieve data types from header
arg_types = get_arg_types(header)
ret_type = get_return_type(header)
logging.info('EvaluateScript: {} ({} {}) {}'
.format(header.script, arg_types, ret_type, func_type))
aggr = (func_type == FunctionType.Aggregation)
# Check if parameters are provided
if header.params:
# Verify argument type
if arg_types == ArgType.String:
# Create an empty list if tensor function
if aggr:
all_rows = []
# Iterate over bundled rows
for request_rows in request:
# Iterate over rows
for row in request_rows.rows:
# Retrieve numerical data from duals
params = get_arguments(arg_types, row.duals)
if aggr:
# Append value to list, for later aggregation
all_rows.append(params)
else:
# Evaluate script row wise
yield evaluate(header.script, ret_type, params=params)
# Evaluate script based on data from all rows
if aggr:
params = [list(param) for param in zip(*all_rows)]
yield evaluate(header.script, ret_type, params=params)
else:
# This plugin does not support other argument types than string.
raise grpc.RpcError(grpc.StatusCode.UNIMPLEMENTED,
'Argument type: {} not supported in this plugin.'.format(arg_types))
else:
# This plugin does not support script evaluation without parameters
raise grpc.RpcError(grpc.StatusCode.UNIMPLEMENTED,
'Script evaluation with no parameters is not supported in this plugin.')
| en | 0.628568 | Class for SSE plugin ScriptEval functionality. Evaluates script provided in the header, given the arguments provided in the sequence of RowData objects, the request. :param header: :param request: an iterable sequence of RowData. :param func_type: function type. :return: an iterable sequence of RowData. # Retrieve data types from header # Check if parameters are provided # Verify argument type # Create an empty list if tensor function # Iterate over bundled rows # Iterate over rows # Retrieve numerical data from duals # Append value to list, for later aggregation # Evaluate script row wise # Evaluate script based on data from all rows # This plugin does not support other argument types than string. # This plugin does not support script evaluation without parameters | 2.290521 | 2 |
src/main.py | Others/cos_memspec | 1 | 6632702 | #!/usr/local/bin/python3
from z3 import set_option
from shrinker import check_fragmentation
from vms import \
configure_setup_with_io_manager, \
configure_setup_with_io_and_chaining, \
configure_setup_with_io_and_sharing, \
setup_config_1, setup_config_2, setup_config_3, setup_config_4, setup_config_5
def kb(x):
return x * 1024
def mb(x):
return x * (1024 ** 2)
# def gen_config(size, complex_overlap_constraint):
# hw_config = HardwareConfig(region_count=3,
# subregion_count=8,
# complex_overlap_constraint=complex_overlap_constraint)
#
# server = Component("server", hw_config)
# client = Component("client", hw_config)
#
# flash = Partition("flash", 0x08000000, 0x08000000 + mb(1))
# sram = Partition("sram", 0x20000000, 0x20000000 + size)
#
# server_code = PartitionArena("server/code", flash, kb(10), [server], [])
# client_code = PartitionArena("client/code", flash, kb(11), [client], [])
#
# server_main = PartitionArena("server/main", sram, kb(50), [server], [server])
# client_main = PartitionArena("client/main", sram, kb(30), [client], [client])
#
# shared = PartitionArena("server+client/shared", sram, kb(25), [server, client],
# [server, client])
#
# components = [server, client]
# arenas = [server_code, client_code, server_main, client_main, shared]
#
# return (components, arenas)
def get_setup(i):
if i == 1:
return setup_config_1
elif i == 2:
return setup_config_2
elif i == 3:
return setup_config_3
elif i == 4:
return setup_config_4
elif i == 5:
return setup_config_5
def get_finalizer(i):
if i == 1:
return configure_setup_with_io_manager
elif i == 2:
return configure_setup_with_io_and_chaining
elif i == 3:
return configure_setup_with_io_and_sharing
if __name__ == '__main__':
set_option(max_args=10000000, max_lines=1000000, max_depth=10000000, max_visited=1000000)
print("Enter setup selection (1 - 5):")
setup = input()
print("Enter shared memory selection (1 - 3):")
finalizer = input()
setup_function = get_setup(setup)
finalizer_function = get_finalizer(finalizer)
def config_generator(ram_size, complex_overlap_constraint):
return finalizer_function(setup_function(ram_size, complex_overlap_constraint))
check_fragmentation(config_generator)
| #!/usr/local/bin/python3
from z3 import set_option
from shrinker import check_fragmentation
from vms import \
configure_setup_with_io_manager, \
configure_setup_with_io_and_chaining, \
configure_setup_with_io_and_sharing, \
setup_config_1, setup_config_2, setup_config_3, setup_config_4, setup_config_5
def kb(x):
return x * 1024
def mb(x):
return x * (1024 ** 2)
# def gen_config(size, complex_overlap_constraint):
# hw_config = HardwareConfig(region_count=3,
# subregion_count=8,
# complex_overlap_constraint=complex_overlap_constraint)
#
# server = Component("server", hw_config)
# client = Component("client", hw_config)
#
# flash = Partition("flash", 0x08000000, 0x08000000 + mb(1))
# sram = Partition("sram", 0x20000000, 0x20000000 + size)
#
# server_code = PartitionArena("server/code", flash, kb(10), [server], [])
# client_code = PartitionArena("client/code", flash, kb(11), [client], [])
#
# server_main = PartitionArena("server/main", sram, kb(50), [server], [server])
# client_main = PartitionArena("client/main", sram, kb(30), [client], [client])
#
# shared = PartitionArena("server+client/shared", sram, kb(25), [server, client],
# [server, client])
#
# components = [server, client]
# arenas = [server_code, client_code, server_main, client_main, shared]
#
# return (components, arenas)
def get_setup(i):
if i == 1:
return setup_config_1
elif i == 2:
return setup_config_2
elif i == 3:
return setup_config_3
elif i == 4:
return setup_config_4
elif i == 5:
return setup_config_5
def get_finalizer(i):
if i == 1:
return configure_setup_with_io_manager
elif i == 2:
return configure_setup_with_io_and_chaining
elif i == 3:
return configure_setup_with_io_and_sharing
if __name__ == '__main__':
set_option(max_args=10000000, max_lines=1000000, max_depth=10000000, max_visited=1000000)
print("Enter setup selection (1 - 5):")
setup = input()
print("Enter shared memory selection (1 - 3):")
finalizer = input()
setup_function = get_setup(setup)
finalizer_function = get_finalizer(finalizer)
def config_generator(ram_size, complex_overlap_constraint):
return finalizer_function(setup_function(ram_size, complex_overlap_constraint))
check_fragmentation(config_generator)
| en | 0.529271 | #!/usr/local/bin/python3 # def gen_config(size, complex_overlap_constraint): # hw_config = HardwareConfig(region_count=3, # subregion_count=8, # complex_overlap_constraint=complex_overlap_constraint) # # server = Component("server", hw_config) # client = Component("client", hw_config) # # flash = Partition("flash", 0x08000000, 0x08000000 + mb(1)) # sram = Partition("sram", 0x20000000, 0x20000000 + size) # # server_code = PartitionArena("server/code", flash, kb(10), [server], []) # client_code = PartitionArena("client/code", flash, kb(11), [client], []) # # server_main = PartitionArena("server/main", sram, kb(50), [server], [server]) # client_main = PartitionArena("client/main", sram, kb(30), [client], [client]) # # shared = PartitionArena("server+client/shared", sram, kb(25), [server, client], # [server, client]) # # components = [server, client] # arenas = [server_code, client_code, server_main, client_main, shared] # # return (components, arenas) | 2.026543 | 2 |
profiles_api/tests.py | Algernonagon/rest-api | 0 | 6632703 | from django.test import TestCase
from django.contrib import auth
from django.contrib.auth import get_user_model
User = get_user_model()
class AuthTestCase(TestCase):
def setUp(self):
self.u = User.objects.create_user('<EMAIL>', '<EMAIL>', '<PASSWORD>')
self.u.is_staff = True
self.u.is_superuser = True
self.u.is_active = True
self.u.save()
def testLogin(self):
self.client.login(username='<EMAIL>', password='<PASSWORD>')
| from django.test import TestCase
from django.contrib import auth
from django.contrib.auth import get_user_model
User = get_user_model()
class AuthTestCase(TestCase):
def setUp(self):
self.u = User.objects.create_user('<EMAIL>', '<EMAIL>', '<PASSWORD>')
self.u.is_staff = True
self.u.is_superuser = True
self.u.is_active = True
self.u.save()
def testLogin(self):
self.client.login(username='<EMAIL>', password='<PASSWORD>')
| none | 1 | 2.409289 | 2 |
|
lib/view.py | goznalo-git/rate.sx | 903 | 6632704 | from mng import MongoReader
from view_ansi import print_table
import sys
try:
reload(sys)
sys.setdefaultencoding("utf-8")
except NameError:
pass # Python 3 already defaults to utf-8
def show(config):
"main function"
default_config = {
'number_of_ticks': 12,
'number_of_coins': 10,
'currency': 'USD',
}
int_parameters = [
'number_of_ticks',
'number_of_coins',
]
alias = {
'n': 'number_of_coins',
}
for k,v in config.items():
k = alias.get(k,k)
default_config[k] = v
if k in int_parameters:
try:
default_config[k] = int(v)
except:
pass
#default_config.update(config)
config = default_config
mongo_reader = MongoReader(config)
data = mongo_reader.load_from_mongo()
market_cap_direction, vol_24h_direction, btc_dominance_direction = 0, 0, 0
marktcap_spark = "."*48
try:
output = print_table(
config['currency'],
data,
(market_cap_direction, vol_24h_direction, btc_dominance_direction),
marktcap_spark,
config)
except ValueError as e:
output = "ERROR: %s" % e
return output
| from mng import MongoReader
from view_ansi import print_table
import sys
try:
reload(sys)
sys.setdefaultencoding("utf-8")
except NameError:
pass # Python 3 already defaults to utf-8
def show(config):
"main function"
default_config = {
'number_of_ticks': 12,
'number_of_coins': 10,
'currency': 'USD',
}
int_parameters = [
'number_of_ticks',
'number_of_coins',
]
alias = {
'n': 'number_of_coins',
}
for k,v in config.items():
k = alias.get(k,k)
default_config[k] = v
if k in int_parameters:
try:
default_config[k] = int(v)
except:
pass
#default_config.update(config)
config = default_config
mongo_reader = MongoReader(config)
data = mongo_reader.load_from_mongo()
market_cap_direction, vol_24h_direction, btc_dominance_direction = 0, 0, 0
marktcap_spark = "."*48
try:
output = print_table(
config['currency'],
data,
(market_cap_direction, vol_24h_direction, btc_dominance_direction),
marktcap_spark,
config)
except ValueError as e:
output = "ERROR: %s" % e
return output
| en | 0.111993 | # Python 3 already defaults to utf-8 #default_config.update(config) | 2.587838 | 3 |
server/problems.py | ChuckMN/Apple-iOS-MDM-Server | 549 | 6632705 | <reponame>ChuckMN/Apple-iOS-MDM-Server<filename>server/problems.py
problems = []
| problems = [] | none | 1 | 1.052036 | 1 |
|
pydantic/color.py | FuegoFro/pydantic | 10 | 6632706 | """
Color definitions are used as per CSS3 specification:
http://www.w3.org/TR/css3-color/#svg-color
A few colors have multiple names referring to the sames colors, eg. `grey` and `gray` or `aqua` and `cyan`.
In these cases the LAST color when sorted alphabetically takes preferences,
eg. Color((0, 255, 255)).as_named() == 'cyan' because "cyan" comes after "aqua".
"""
import math
import re
from colorsys import hls_to_rgb, rgb_to_hls
from typing import TYPE_CHECKING, Any, Dict, Optional, Tuple, Union, cast
from .errors import ColorError
from .utils import Representation, almost_equal_floats
if TYPE_CHECKING:
from .typing import CallableGenerator, ReprArgs
ColorTuple = Union[Tuple[int, int, int], Tuple[int, int, int, float]]
ColorType = Union[ColorTuple, str]
HslColorTuple = Union[Tuple[float, float, float], Tuple[float, float, float, float]]
class RGBA:
"""
Internal use only as a representation of a color.
"""
__slots__ = 'r', 'g', 'b', 'alpha', '_tuple'
def __init__(self, r: float, g: float, b: float, alpha: Optional[float]):
self.r = r
self.g = g
self.b = b
self.alpha = alpha
self._tuple: Tuple[float, float, float, Optional[float]] = (r, g, b, alpha)
def __getitem__(self, item: Any) -> Any:
return self._tuple[item]
# these are not compiled here to avoid import slowdown, they'll be compiled the first time they're used, then cached
r_hex_short = r'\s*(?:#|0x)?([0-9a-f])([0-9a-f])([0-9a-f])([0-9a-f])?\s*'
r_hex_long = r'\s*(?:#|0x)?([0-9a-f]{2})([0-9a-f]{2})([0-9a-f]{2})([0-9a-f]{2})?\s*'
_r_255 = r'(\d{1,3}(?:\.\d+)?)'
_r_comma = r'\s*,\s*'
r_rgb = fr'\s*rgb\(\s*{_r_255}{_r_comma}{_r_255}{_r_comma}{_r_255}\)\s*'
_r_alpha = r'(\d(?:\.\d+)?|\.\d+|\d{1,2}%)'
r_rgba = fr'\s*rgba\(\s*{_r_255}{_r_comma}{_r_255}{_r_comma}{_r_255}{_r_comma}{_r_alpha}\s*\)\s*'
_r_h = r'(-?\d+(?:\.\d+)?|-?\.\d+)(deg|rad|turn)?'
_r_sl = r'(\d{1,3}(?:\.\d+)?)%'
r_hsl = fr'\s*hsl\(\s*{_r_h}{_r_comma}{_r_sl}{_r_comma}{_r_sl}\s*\)\s*'
r_hsla = fr'\s*hsl\(\s*{_r_h}{_r_comma}{_r_sl}{_r_comma}{_r_sl}{_r_comma}{_r_alpha}\s*\)\s*'
# colors where the two hex characters are the same, if all colors match this the short version of hex colors can be used
repeat_colors = {int(c * 2, 16) for c in '0123456789abcdef'}
rads = 2 * math.pi
class Color(Representation):
__slots__ = '_original', '_rgba'
def __init__(self, value: ColorType) -> None:
self._rgba: RGBA
self._original: ColorType
if isinstance(value, (tuple, list)):
self._rgba = parse_tuple(value)
elif isinstance(value, str):
self._rgba = parse_str(value)
elif isinstance(value, Color):
self._rgba = value._rgba
value = value._original
else:
raise ColorError(reason='value must be a tuple, list or string')
# if we've got here value must be a valid color
self._original = value
@classmethod
def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None:
field_schema.update(type='string', format='color')
def original(self) -> ColorType:
"""
Original value passed to Color
"""
return self._original
def as_named(self, *, fallback: bool = False) -> str:
if self._rgba.alpha is None:
rgb = cast(Tuple[int, int, int], self.as_rgb_tuple())
try:
return COLORS_BY_VALUE[rgb]
except KeyError as e:
if fallback:
return self.as_hex()
else:
raise ValueError('no named color found, use fallback=True, as_hex() or as_rgb()') from e
else:
return self.as_hex()
def as_hex(self) -> str:
"""
Hex string representing the color can be 3, 4, 6 or 8 characters depending on whether the string
a "short" representation of the color is possible and whether there's an alpha channel.
"""
values = [float_to_255(c) for c in self._rgba[:3]]
if self._rgba.alpha is not None:
values.append(float_to_255(self._rgba.alpha))
as_hex = ''.join(f'{v:02x}' for v in values)
if all(c in repeat_colors for c in values):
as_hex = ''.join(as_hex[c] for c in range(0, len(as_hex), 2))
return '#' + as_hex
def as_rgb(self) -> str:
"""
Color as an rgb(<r>, <g>, <b>) or rgba(<r>, <g>, <b>, <a>) string.
"""
if self._rgba.alpha is None:
return f'rgb({float_to_255(self._rgba.r)}, {float_to_255(self._rgba.g)}, {float_to_255(self._rgba.b)})'
else:
return (
f'rgba({float_to_255(self._rgba.r)}, {float_to_255(self._rgba.g)}, {float_to_255(self._rgba.b)}, '
f'{round(self._alpha_float(), 2)})'
)
def as_rgb_tuple(self, *, alpha: Optional[bool] = None) -> ColorTuple:
"""
Color as an RGB or RGBA tuple; red, green and blue are in the range 0 to 255, alpha if included is
in the range 0 to 1.
:param alpha: whether to include the alpha channel, options are
None - (default) include alpha only if it's set (e.g. not None)
True - always include alpha,
False - always omit alpha,
"""
r, g, b = [float_to_255(c) for c in self._rgba[:3]]
if alpha is None:
if self._rgba.alpha is None:
return r, g, b
else:
return r, g, b, self._alpha_float()
elif alpha:
return r, g, b, self._alpha_float()
else:
# alpha is False
return r, g, b
def as_hsl(self) -> str:
"""
Color as an hsl(<h>, <s>, <l>) or hsl(<h>, <s>, <l>, <a>) string.
"""
if self._rgba.alpha is None:
h, s, li = self.as_hsl_tuple(alpha=False) # type: ignore
return f'hsl({h * 360:0.0f}, {s * 100:0.0f}%, {li * 100:0.0f}%)'
else:
h, s, li, a = self.as_hsl_tuple(alpha=True) # type: ignore
return f'hsl({h * 360:0.0f}, {s * 100:0.0f}%, {li * 100:0.0f}%, {round(a, 2)})'
def as_hsl_tuple(self, *, alpha: Optional[bool] = None) -> HslColorTuple:
"""
Color as an HSL or HSLA tuple, e.g. hue, saturation, lightness and optionally alpha; all elements are in
the range 0 to 1.
NOTE: this is HSL as used in HTML and most other places, not HLS as used in python's colorsys.
:param alpha: whether to include the alpha channel, options are
None - (default) include alpha only if it's set (e.g. not None)
True - always include alpha,
False - always omit alpha,
"""
h, l, s = rgb_to_hls(self._rgba.r, self._rgba.g, self._rgba.b)
if alpha is None:
if self._rgba.alpha is None:
return h, s, l
else:
return h, s, l, self._alpha_float()
if alpha:
return h, s, l, self._alpha_float()
else:
# alpha is False
return h, s, l
def _alpha_float(self) -> float:
return 1 if self._rgba.alpha is None else self._rgba.alpha
@classmethod
def __get_validators__(cls) -> 'CallableGenerator':
yield cls
def __str__(self) -> str:
return self.as_named(fallback=True)
def __repr_args__(self) -> 'ReprArgs':
return [(None, self.as_named(fallback=True))] + [('rgb', self.as_rgb_tuple())] # type: ignore
def parse_tuple(value: Tuple[Any, ...]) -> RGBA:
"""
Parse a tuple or list as a color.
"""
if len(value) == 3:
r, g, b = [parse_color_value(v) for v in value]
return RGBA(r, g, b, None)
elif len(value) == 4:
r, g, b = [parse_color_value(v) for v in value[:3]]
return RGBA(r, g, b, parse_float_alpha(value[3]))
else:
raise ColorError(reason='tuples must have length 3 or 4')
def parse_str(value: str) -> RGBA:
"""
Parse a string to an RGBA tuple, trying the following formats (in this order):
* named color, see COLORS_BY_NAME below
* hex short eg. `<prefix>fff` (prefix can be `#`, `0x` or nothing)
* hex long eg. `<prefix>ffffff` (prefix can be `#`, `0x` or nothing)
* `rgb(<r>, <g>, <b>) `
* `rgba(<r>, <g>, <b>, <a>)`
"""
value_lower = value.lower()
try:
r, g, b = COLORS_BY_NAME[value_lower]
except KeyError:
pass
else:
return ints_to_rgba(r, g, b, None)
m = re.fullmatch(r_hex_short, value_lower)
if m:
*rgb, a = m.groups()
r, g, b = [int(v * 2, 16) for v in rgb]
if a:
alpha: Optional[float] = int(a * 2, 16) / 255
else:
alpha = None
return ints_to_rgba(r, g, b, alpha)
m = re.fullmatch(r_hex_long, value_lower)
if m:
*rgb, a = m.groups()
r, g, b = [int(v, 16) for v in rgb]
if a:
alpha = int(a, 16) / 255
else:
alpha = None
return ints_to_rgba(r, g, b, alpha)
m = re.fullmatch(r_rgb, value_lower)
if m:
return ints_to_rgba(*m.groups(), None) # type: ignore
m = re.fullmatch(r_rgba, value_lower)
if m:
return ints_to_rgba(*m.groups()) # type: ignore
m = re.fullmatch(r_hsl, value_lower)
if m:
h, h_units, s, l_ = m.groups()
return parse_hsl(h, h_units, s, l_)
m = re.fullmatch(r_hsla, value_lower)
if m:
h, h_units, s, l_, a = m.groups()
return parse_hsl(h, h_units, s, l_, parse_float_alpha(a))
raise ColorError(reason='string not recognised as a valid color')
def ints_to_rgba(r: Union[int, str], g: Union[int, str], b: Union[int, str], alpha: Optional[float]) -> RGBA:
return RGBA(parse_color_value(r), parse_color_value(g), parse_color_value(b), parse_float_alpha(alpha))
def parse_color_value(value: Union[int, str], max_val: int = 255) -> float:
"""
Parse a value checking it's a valid int in the range 0 to max_val and divide by max_val to give a number
in the range 0 to 1
"""
try:
color = float(value)
except ValueError:
raise ColorError(reason='color values must be a valid number')
if 0 <= color <= max_val:
return color / max_val
else:
raise ColorError(reason=f'color values must be in the range 0 to {max_val}')
def parse_float_alpha(value: Union[None, str, float, int]) -> Optional[float]:
"""
Parse a value checking it's a valid float in the range 0 to 1
"""
if value is None:
return None
try:
if isinstance(value, str) and value.endswith('%'):
alpha = float(value[:-1]) / 100
else:
alpha = float(value)
except ValueError:
raise ColorError(reason='alpha values must be a valid float')
if almost_equal_floats(alpha, 1):
return None
elif 0 <= alpha <= 1:
return alpha
else:
raise ColorError(reason='alpha values must be in the range 0 to 1')
def parse_hsl(h: str, h_units: str, s: str, l: str, alpha: Optional[float] = None) -> RGBA:
"""
Parse raw hue, saturation, lightness and alpha values and convert to RGBA.
"""
s_value, l_value = parse_color_value(s, 100), parse_color_value(l, 100)
h_value = float(h)
if h_units in {None, 'deg'}:
h_value = h_value % 360 / 360
elif h_units == 'rad':
h_value = h_value % rads / rads
else:
# turns
h_value = h_value % 1
r, g, b = hls_to_rgb(h_value, l_value, s_value)
return RGBA(r, g, b, alpha)
def float_to_255(c: float) -> int:
return int(round(c * 255))
COLORS_BY_NAME = {
'aliceblue': (240, 248, 255),
'antiquewhite': (250, 235, 215),
'aqua': (0, 255, 255),
'aquamarine': (127, 255, 212),
'azure': (240, 255, 255),
'beige': (245, 245, 220),
'bisque': (255, 228, 196),
'black': (0, 0, 0),
'blanchedalmond': (255, 235, 205),
'blue': (0, 0, 255),
'blueviolet': (138, 43, 226),
'brown': (165, 42, 42),
'burlywood': (222, 184, 135),
'cadetblue': (95, 158, 160),
'chartreuse': (127, 255, 0),
'chocolate': (210, 105, 30),
'coral': (255, 127, 80),
'cornflowerblue': (100, 149, 237),
'cornsilk': (255, 248, 220),
'crimson': (220, 20, 60),
'cyan': (0, 255, 255),
'darkblue': (0, 0, 139),
'darkcyan': (0, 139, 139),
'darkgoldenrod': (184, 134, 11),
'darkgray': (169, 169, 169),
'darkgreen': (0, 100, 0),
'darkgrey': (169, 169, 169),
'darkkhaki': (189, 183, 107),
'darkmagenta': (139, 0, 139),
'darkolivegreen': (85, 107, 47),
'darkorange': (255, 140, 0),
'darkorchid': (153, 50, 204),
'darkred': (139, 0, 0),
'darksalmon': (233, 150, 122),
'darkseagreen': (143, 188, 143),
'darkslateblue': (72, 61, 139),
'darkslategray': (47, 79, 79),
'darkslategrey': (47, 79, 79),
'darkturquoise': (0, 206, 209),
'darkviolet': (148, 0, 211),
'deeppink': (255, 20, 147),
'deepskyblue': (0, 191, 255),
'dimgray': (105, 105, 105),
'dimgrey': (105, 105, 105),
'dodgerblue': (30, 144, 255),
'firebrick': (178, 34, 34),
'floralwhite': (255, 250, 240),
'forestgreen': (34, 139, 34),
'fuchsia': (255, 0, 255),
'gainsboro': (220, 220, 220),
'ghostwhite': (248, 248, 255),
'gold': (255, 215, 0),
'goldenrod': (218, 165, 32),
'gray': (128, 128, 128),
'green': (0, 128, 0),
'greenyellow': (173, 255, 47),
'grey': (128, 128, 128),
'honeydew': (240, 255, 240),
'hotpink': (255, 105, 180),
'indianred': (205, 92, 92),
'indigo': (75, 0, 130),
'ivory': (255, 255, 240),
'khaki': (240, 230, 140),
'lavender': (230, 230, 250),
'lavenderblush': (255, 240, 245),
'lawngreen': (124, 252, 0),
'lemonchiffon': (255, 250, 205),
'lightblue': (173, 216, 230),
'lightcoral': (240, 128, 128),
'lightcyan': (224, 255, 255),
'lightgoldenrodyellow': (250, 250, 210),
'lightgray': (211, 211, 211),
'lightgreen': (144, 238, 144),
'lightgrey': (211, 211, 211),
'lightpink': (255, 182, 193),
'lightsalmon': (255, 160, 122),
'lightseagreen': (32, 178, 170),
'lightskyblue': (135, 206, 250),
'lightslategray': (119, 136, 153),
'lightslategrey': (119, 136, 153),
'lightsteelblue': (176, 196, 222),
'lightyellow': (255, 255, 224),
'lime': (0, 255, 0),
'limegreen': (50, 205, 50),
'linen': (250, 240, 230),
'magenta': (255, 0, 255),
'maroon': (128, 0, 0),
'mediumaquamarine': (102, 205, 170),
'mediumblue': (0, 0, 205),
'mediumorchid': (186, 85, 211),
'mediumpurple': (147, 112, 219),
'mediumseagreen': (60, 179, 113),
'mediumslateblue': (123, 104, 238),
'mediumspringgreen': (0, 250, 154),
'mediumturquoise': (72, 209, 204),
'mediumvioletred': (199, 21, 133),
'midnightblue': (25, 25, 112),
'mintcream': (245, 255, 250),
'mistyrose': (255, 228, 225),
'moccasin': (255, 228, 181),
'navajowhite': (255, 222, 173),
'navy': (0, 0, 128),
'oldlace': (253, 245, 230),
'olive': (128, 128, 0),
'olivedrab': (107, 142, 35),
'orange': (255, 165, 0),
'orangered': (255, 69, 0),
'orchid': (218, 112, 214),
'palegoldenrod': (238, 232, 170),
'palegreen': (152, 251, 152),
'paleturquoise': (175, 238, 238),
'palevioletred': (219, 112, 147),
'papayawhip': (255, 239, 213),
'peachpuff': (255, 218, 185),
'peru': (205, 133, 63),
'pink': (255, 192, 203),
'plum': (221, 160, 221),
'powderblue': (176, 224, 230),
'purple': (128, 0, 128),
'red': (255, 0, 0),
'rosybrown': (188, 143, 143),
'royalblue': (65, 105, 225),
'saddlebrown': (139, 69, 19),
'salmon': (250, 128, 114),
'sandybrown': (244, 164, 96),
'seagreen': (46, 139, 87),
'seashell': (255, 245, 238),
'sienna': (160, 82, 45),
'silver': (192, 192, 192),
'skyblue': (135, 206, 235),
'slateblue': (106, 90, 205),
'slategray': (112, 128, 144),
'slategrey': (112, 128, 144),
'snow': (255, 250, 250),
'springgreen': (0, 255, 127),
'steelblue': (70, 130, 180),
'tan': (210, 180, 140),
'teal': (0, 128, 128),
'thistle': (216, 191, 216),
'tomato': (255, 99, 71),
'turquoise': (64, 224, 208),
'violet': (238, 130, 238),
'wheat': (245, 222, 179),
'white': (255, 255, 255),
'whitesmoke': (245, 245, 245),
'yellow': (255, 255, 0),
'yellowgreen': (154, 205, 50),
}
COLORS_BY_VALUE = {v: k for k, v in COLORS_BY_NAME.items()}
| """
Color definitions are used as per CSS3 specification:
http://www.w3.org/TR/css3-color/#svg-color
A few colors have multiple names referring to the sames colors, eg. `grey` and `gray` or `aqua` and `cyan`.
In these cases the LAST color when sorted alphabetically takes preferences,
eg. Color((0, 255, 255)).as_named() == 'cyan' because "cyan" comes after "aqua".
"""
import math
import re
from colorsys import hls_to_rgb, rgb_to_hls
from typing import TYPE_CHECKING, Any, Dict, Optional, Tuple, Union, cast
from .errors import ColorError
from .utils import Representation, almost_equal_floats
if TYPE_CHECKING:
from .typing import CallableGenerator, ReprArgs
ColorTuple = Union[Tuple[int, int, int], Tuple[int, int, int, float]]
ColorType = Union[ColorTuple, str]
HslColorTuple = Union[Tuple[float, float, float], Tuple[float, float, float, float]]
class RGBA:
"""
Internal use only as a representation of a color.
"""
__slots__ = 'r', 'g', 'b', 'alpha', '_tuple'
def __init__(self, r: float, g: float, b: float, alpha: Optional[float]):
self.r = r
self.g = g
self.b = b
self.alpha = alpha
self._tuple: Tuple[float, float, float, Optional[float]] = (r, g, b, alpha)
def __getitem__(self, item: Any) -> Any:
return self._tuple[item]
# these are not compiled here to avoid import slowdown, they'll be compiled the first time they're used, then cached
r_hex_short = r'\s*(?:#|0x)?([0-9a-f])([0-9a-f])([0-9a-f])([0-9a-f])?\s*'
r_hex_long = r'\s*(?:#|0x)?([0-9a-f]{2})([0-9a-f]{2})([0-9a-f]{2})([0-9a-f]{2})?\s*'
_r_255 = r'(\d{1,3}(?:\.\d+)?)'
_r_comma = r'\s*,\s*'
r_rgb = fr'\s*rgb\(\s*{_r_255}{_r_comma}{_r_255}{_r_comma}{_r_255}\)\s*'
_r_alpha = r'(\d(?:\.\d+)?|\.\d+|\d{1,2}%)'
r_rgba = fr'\s*rgba\(\s*{_r_255}{_r_comma}{_r_255}{_r_comma}{_r_255}{_r_comma}{_r_alpha}\s*\)\s*'
_r_h = r'(-?\d+(?:\.\d+)?|-?\.\d+)(deg|rad|turn)?'
_r_sl = r'(\d{1,3}(?:\.\d+)?)%'
r_hsl = fr'\s*hsl\(\s*{_r_h}{_r_comma}{_r_sl}{_r_comma}{_r_sl}\s*\)\s*'
r_hsla = fr'\s*hsl\(\s*{_r_h}{_r_comma}{_r_sl}{_r_comma}{_r_sl}{_r_comma}{_r_alpha}\s*\)\s*'
# colors where the two hex characters are the same, if all colors match this the short version of hex colors can be used
repeat_colors = {int(c * 2, 16) for c in '0123456789abcdef'}
rads = 2 * math.pi
class Color(Representation):
__slots__ = '_original', '_rgba'
def __init__(self, value: ColorType) -> None:
self._rgba: RGBA
self._original: ColorType
if isinstance(value, (tuple, list)):
self._rgba = parse_tuple(value)
elif isinstance(value, str):
self._rgba = parse_str(value)
elif isinstance(value, Color):
self._rgba = value._rgba
value = value._original
else:
raise ColorError(reason='value must be a tuple, list or string')
# if we've got here value must be a valid color
self._original = value
@classmethod
def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None:
field_schema.update(type='string', format='color')
def original(self) -> ColorType:
"""
Original value passed to Color
"""
return self._original
def as_named(self, *, fallback: bool = False) -> str:
if self._rgba.alpha is None:
rgb = cast(Tuple[int, int, int], self.as_rgb_tuple())
try:
return COLORS_BY_VALUE[rgb]
except KeyError as e:
if fallback:
return self.as_hex()
else:
raise ValueError('no named color found, use fallback=True, as_hex() or as_rgb()') from e
else:
return self.as_hex()
def as_hex(self) -> str:
"""
Hex string representing the color can be 3, 4, 6 or 8 characters depending on whether the string
a "short" representation of the color is possible and whether there's an alpha channel.
"""
values = [float_to_255(c) for c in self._rgba[:3]]
if self._rgba.alpha is not None:
values.append(float_to_255(self._rgba.alpha))
as_hex = ''.join(f'{v:02x}' for v in values)
if all(c in repeat_colors for c in values):
as_hex = ''.join(as_hex[c] for c in range(0, len(as_hex), 2))
return '#' + as_hex
def as_rgb(self) -> str:
"""
Color as an rgb(<r>, <g>, <b>) or rgba(<r>, <g>, <b>, <a>) string.
"""
if self._rgba.alpha is None:
return f'rgb({float_to_255(self._rgba.r)}, {float_to_255(self._rgba.g)}, {float_to_255(self._rgba.b)})'
else:
return (
f'rgba({float_to_255(self._rgba.r)}, {float_to_255(self._rgba.g)}, {float_to_255(self._rgba.b)}, '
f'{round(self._alpha_float(), 2)})'
)
def as_rgb_tuple(self, *, alpha: Optional[bool] = None) -> ColorTuple:
"""
Color as an RGB or RGBA tuple; red, green and blue are in the range 0 to 255, alpha if included is
in the range 0 to 1.
:param alpha: whether to include the alpha channel, options are
None - (default) include alpha only if it's set (e.g. not None)
True - always include alpha,
False - always omit alpha,
"""
r, g, b = [float_to_255(c) for c in self._rgba[:3]]
if alpha is None:
if self._rgba.alpha is None:
return r, g, b
else:
return r, g, b, self._alpha_float()
elif alpha:
return r, g, b, self._alpha_float()
else:
# alpha is False
return r, g, b
def as_hsl(self) -> str:
"""
Color as an hsl(<h>, <s>, <l>) or hsl(<h>, <s>, <l>, <a>) string.
"""
if self._rgba.alpha is None:
h, s, li = self.as_hsl_tuple(alpha=False) # type: ignore
return f'hsl({h * 360:0.0f}, {s * 100:0.0f}%, {li * 100:0.0f}%)'
else:
h, s, li, a = self.as_hsl_tuple(alpha=True) # type: ignore
return f'hsl({h * 360:0.0f}, {s * 100:0.0f}%, {li * 100:0.0f}%, {round(a, 2)})'
def as_hsl_tuple(self, *, alpha: Optional[bool] = None) -> HslColorTuple:
"""
Color as an HSL or HSLA tuple, e.g. hue, saturation, lightness and optionally alpha; all elements are in
the range 0 to 1.
NOTE: this is HSL as used in HTML and most other places, not HLS as used in python's colorsys.
:param alpha: whether to include the alpha channel, options are
None - (default) include alpha only if it's set (e.g. not None)
True - always include alpha,
False - always omit alpha,
"""
h, l, s = rgb_to_hls(self._rgba.r, self._rgba.g, self._rgba.b)
if alpha is None:
if self._rgba.alpha is None:
return h, s, l
else:
return h, s, l, self._alpha_float()
if alpha:
return h, s, l, self._alpha_float()
else:
# alpha is False
return h, s, l
def _alpha_float(self) -> float:
return 1 if self._rgba.alpha is None else self._rgba.alpha
@classmethod
def __get_validators__(cls) -> 'CallableGenerator':
yield cls
def __str__(self) -> str:
return self.as_named(fallback=True)
def __repr_args__(self) -> 'ReprArgs':
return [(None, self.as_named(fallback=True))] + [('rgb', self.as_rgb_tuple())] # type: ignore
def parse_tuple(value: Tuple[Any, ...]) -> RGBA:
"""
Parse a tuple or list as a color.
"""
if len(value) == 3:
r, g, b = [parse_color_value(v) for v in value]
return RGBA(r, g, b, None)
elif len(value) == 4:
r, g, b = [parse_color_value(v) for v in value[:3]]
return RGBA(r, g, b, parse_float_alpha(value[3]))
else:
raise ColorError(reason='tuples must have length 3 or 4')
def parse_str(value: str) -> RGBA:
"""
Parse a string to an RGBA tuple, trying the following formats (in this order):
* named color, see COLORS_BY_NAME below
* hex short eg. `<prefix>fff` (prefix can be `#`, `0x` or nothing)
* hex long eg. `<prefix>ffffff` (prefix can be `#`, `0x` or nothing)
* `rgb(<r>, <g>, <b>) `
* `rgba(<r>, <g>, <b>, <a>)`
"""
value_lower = value.lower()
try:
r, g, b = COLORS_BY_NAME[value_lower]
except KeyError:
pass
else:
return ints_to_rgba(r, g, b, None)
m = re.fullmatch(r_hex_short, value_lower)
if m:
*rgb, a = m.groups()
r, g, b = [int(v * 2, 16) for v in rgb]
if a:
alpha: Optional[float] = int(a * 2, 16) / 255
else:
alpha = None
return ints_to_rgba(r, g, b, alpha)
m = re.fullmatch(r_hex_long, value_lower)
if m:
*rgb, a = m.groups()
r, g, b = [int(v, 16) for v in rgb]
if a:
alpha = int(a, 16) / 255
else:
alpha = None
return ints_to_rgba(r, g, b, alpha)
m = re.fullmatch(r_rgb, value_lower)
if m:
return ints_to_rgba(*m.groups(), None) # type: ignore
m = re.fullmatch(r_rgba, value_lower)
if m:
return ints_to_rgba(*m.groups()) # type: ignore
m = re.fullmatch(r_hsl, value_lower)
if m:
h, h_units, s, l_ = m.groups()
return parse_hsl(h, h_units, s, l_)
m = re.fullmatch(r_hsla, value_lower)
if m:
h, h_units, s, l_, a = m.groups()
return parse_hsl(h, h_units, s, l_, parse_float_alpha(a))
raise ColorError(reason='string not recognised as a valid color')
def ints_to_rgba(r: Union[int, str], g: Union[int, str], b: Union[int, str], alpha: Optional[float]) -> RGBA:
return RGBA(parse_color_value(r), parse_color_value(g), parse_color_value(b), parse_float_alpha(alpha))
def parse_color_value(value: Union[int, str], max_val: int = 255) -> float:
"""
Parse a value checking it's a valid int in the range 0 to max_val and divide by max_val to give a number
in the range 0 to 1
"""
try:
color = float(value)
except ValueError:
raise ColorError(reason='color values must be a valid number')
if 0 <= color <= max_val:
return color / max_val
else:
raise ColorError(reason=f'color values must be in the range 0 to {max_val}')
def parse_float_alpha(value: Union[None, str, float, int]) -> Optional[float]:
"""
Parse a value checking it's a valid float in the range 0 to 1
"""
if value is None:
return None
try:
if isinstance(value, str) and value.endswith('%'):
alpha = float(value[:-1]) / 100
else:
alpha = float(value)
except ValueError:
raise ColorError(reason='alpha values must be a valid float')
if almost_equal_floats(alpha, 1):
return None
elif 0 <= alpha <= 1:
return alpha
else:
raise ColorError(reason='alpha values must be in the range 0 to 1')
def parse_hsl(h: str, h_units: str, s: str, l: str, alpha: Optional[float] = None) -> RGBA:
"""
Parse raw hue, saturation, lightness and alpha values and convert to RGBA.
"""
s_value, l_value = parse_color_value(s, 100), parse_color_value(l, 100)
h_value = float(h)
if h_units in {None, 'deg'}:
h_value = h_value % 360 / 360
elif h_units == 'rad':
h_value = h_value % rads / rads
else:
# turns
h_value = h_value % 1
r, g, b = hls_to_rgb(h_value, l_value, s_value)
return RGBA(r, g, b, alpha)
def float_to_255(c: float) -> int:
return int(round(c * 255))
COLORS_BY_NAME = {
'aliceblue': (240, 248, 255),
'antiquewhite': (250, 235, 215),
'aqua': (0, 255, 255),
'aquamarine': (127, 255, 212),
'azure': (240, 255, 255),
'beige': (245, 245, 220),
'bisque': (255, 228, 196),
'black': (0, 0, 0),
'blanchedalmond': (255, 235, 205),
'blue': (0, 0, 255),
'blueviolet': (138, 43, 226),
'brown': (165, 42, 42),
'burlywood': (222, 184, 135),
'cadetblue': (95, 158, 160),
'chartreuse': (127, 255, 0),
'chocolate': (210, 105, 30),
'coral': (255, 127, 80),
'cornflowerblue': (100, 149, 237),
'cornsilk': (255, 248, 220),
'crimson': (220, 20, 60),
'cyan': (0, 255, 255),
'darkblue': (0, 0, 139),
'darkcyan': (0, 139, 139),
'darkgoldenrod': (184, 134, 11),
'darkgray': (169, 169, 169),
'darkgreen': (0, 100, 0),
'darkgrey': (169, 169, 169),
'darkkhaki': (189, 183, 107),
'darkmagenta': (139, 0, 139),
'darkolivegreen': (85, 107, 47),
'darkorange': (255, 140, 0),
'darkorchid': (153, 50, 204),
'darkred': (139, 0, 0),
'darksalmon': (233, 150, 122),
'darkseagreen': (143, 188, 143),
'darkslateblue': (72, 61, 139),
'darkslategray': (47, 79, 79),
'darkslategrey': (47, 79, 79),
'darkturquoise': (0, 206, 209),
'darkviolet': (148, 0, 211),
'deeppink': (255, 20, 147),
'deepskyblue': (0, 191, 255),
'dimgray': (105, 105, 105),
'dimgrey': (105, 105, 105),
'dodgerblue': (30, 144, 255),
'firebrick': (178, 34, 34),
'floralwhite': (255, 250, 240),
'forestgreen': (34, 139, 34),
'fuchsia': (255, 0, 255),
'gainsboro': (220, 220, 220),
'ghostwhite': (248, 248, 255),
'gold': (255, 215, 0),
'goldenrod': (218, 165, 32),
'gray': (128, 128, 128),
'green': (0, 128, 0),
'greenyellow': (173, 255, 47),
'grey': (128, 128, 128),
'honeydew': (240, 255, 240),
'hotpink': (255, 105, 180),
'indianred': (205, 92, 92),
'indigo': (75, 0, 130),
'ivory': (255, 255, 240),
'khaki': (240, 230, 140),
'lavender': (230, 230, 250),
'lavenderblush': (255, 240, 245),
'lawngreen': (124, 252, 0),
'lemonchiffon': (255, 250, 205),
'lightblue': (173, 216, 230),
'lightcoral': (240, 128, 128),
'lightcyan': (224, 255, 255),
'lightgoldenrodyellow': (250, 250, 210),
'lightgray': (211, 211, 211),
'lightgreen': (144, 238, 144),
'lightgrey': (211, 211, 211),
'lightpink': (255, 182, 193),
'lightsalmon': (255, 160, 122),
'lightseagreen': (32, 178, 170),
'lightskyblue': (135, 206, 250),
'lightslategray': (119, 136, 153),
'lightslategrey': (119, 136, 153),
'lightsteelblue': (176, 196, 222),
'lightyellow': (255, 255, 224),
'lime': (0, 255, 0),
'limegreen': (50, 205, 50),
'linen': (250, 240, 230),
'magenta': (255, 0, 255),
'maroon': (128, 0, 0),
'mediumaquamarine': (102, 205, 170),
'mediumblue': (0, 0, 205),
'mediumorchid': (186, 85, 211),
'mediumpurple': (147, 112, 219),
'mediumseagreen': (60, 179, 113),
'mediumslateblue': (123, 104, 238),
'mediumspringgreen': (0, 250, 154),
'mediumturquoise': (72, 209, 204),
'mediumvioletred': (199, 21, 133),
'midnightblue': (25, 25, 112),
'mintcream': (245, 255, 250),
'mistyrose': (255, 228, 225),
'moccasin': (255, 228, 181),
'navajowhite': (255, 222, 173),
'navy': (0, 0, 128),
'oldlace': (253, 245, 230),
'olive': (128, 128, 0),
'olivedrab': (107, 142, 35),
'orange': (255, 165, 0),
'orangered': (255, 69, 0),
'orchid': (218, 112, 214),
'palegoldenrod': (238, 232, 170),
'palegreen': (152, 251, 152),
'paleturquoise': (175, 238, 238),
'palevioletred': (219, 112, 147),
'papayawhip': (255, 239, 213),
'peachpuff': (255, 218, 185),
'peru': (205, 133, 63),
'pink': (255, 192, 203),
'plum': (221, 160, 221),
'powderblue': (176, 224, 230),
'purple': (128, 0, 128),
'red': (255, 0, 0),
'rosybrown': (188, 143, 143),
'royalblue': (65, 105, 225),
'saddlebrown': (139, 69, 19),
'salmon': (250, 128, 114),
'sandybrown': (244, 164, 96),
'seagreen': (46, 139, 87),
'seashell': (255, 245, 238),
'sienna': (160, 82, 45),
'silver': (192, 192, 192),
'skyblue': (135, 206, 235),
'slateblue': (106, 90, 205),
'slategray': (112, 128, 144),
'slategrey': (112, 128, 144),
'snow': (255, 250, 250),
'springgreen': (0, 255, 127),
'steelblue': (70, 130, 180),
'tan': (210, 180, 140),
'teal': (0, 128, 128),
'thistle': (216, 191, 216),
'tomato': (255, 99, 71),
'turquoise': (64, 224, 208),
'violet': (238, 130, 238),
'wheat': (245, 222, 179),
'white': (255, 255, 255),
'whitesmoke': (245, 245, 245),
'yellow': (255, 255, 0),
'yellowgreen': (154, 205, 50),
}
COLORS_BY_VALUE = {v: k for k, v in COLORS_BY_NAME.items()}
| en | 0.762648 | Color definitions are used as per CSS3 specification: http://www.w3.org/TR/css3-color/#svg-color A few colors have multiple names referring to the sames colors, eg. `grey` and `gray` or `aqua` and `cyan`. In these cases the LAST color when sorted alphabetically takes preferences, eg. Color((0, 255, 255)).as_named() == 'cyan' because "cyan" comes after "aqua". Internal use only as a representation of a color. # these are not compiled here to avoid import slowdown, they'll be compiled the first time they're used, then cached #|0x)?([0-9a-f])([0-9a-f])([0-9a-f])([0-9a-f])?\s*' #|0x)?([0-9a-f]{2})([0-9a-f]{2})([0-9a-f]{2})([0-9a-f]{2})?\s*' # colors where the two hex characters are the same, if all colors match this the short version of hex colors can be used # if we've got here value must be a valid color Original value passed to Color Hex string representing the color can be 3, 4, 6 or 8 characters depending on whether the string a "short" representation of the color is possible and whether there's an alpha channel. Color as an rgb(<r>, <g>, <b>) or rgba(<r>, <g>, <b>, <a>) string. Color as an RGB or RGBA tuple; red, green and blue are in the range 0 to 255, alpha if included is in the range 0 to 1. :param alpha: whether to include the alpha channel, options are None - (default) include alpha only if it's set (e.g. not None) True - always include alpha, False - always omit alpha, # alpha is False Color as an hsl(<h>, <s>, <l>) or hsl(<h>, <s>, <l>, <a>) string. # type: ignore # type: ignore Color as an HSL or HSLA tuple, e.g. hue, saturation, lightness and optionally alpha; all elements are in the range 0 to 1. NOTE: this is HSL as used in HTML and most other places, not HLS as used in python's colorsys. :param alpha: whether to include the alpha channel, options are None - (default) include alpha only if it's set (e.g. not None) True - always include alpha, False - always omit alpha, # alpha is False # type: ignore Parse a tuple or list as a color. Parse a string to an RGBA tuple, trying the following formats (in this order): * named color, see COLORS_BY_NAME below * hex short eg. `<prefix>fff` (prefix can be `#`, `0x` or nothing) * hex long eg. `<prefix>ffffff` (prefix can be `#`, `0x` or nothing) * `rgb(<r>, <g>, <b>) ` * `rgba(<r>, <g>, <b>, <a>)` # type: ignore # type: ignore Parse a value checking it's a valid int in the range 0 to max_val and divide by max_val to give a number in the range 0 to 1 Parse a value checking it's a valid float in the range 0 to 1 Parse raw hue, saturation, lightness and alpha values and convert to RGBA. # turns | 3.547344 | 4 |
angr-management/angrmanagement/logic/threads.py | Ruide/angr-dev | 0 | 6632707 | <filename>angr-management/angrmanagement/logic/threads.py
import thread
import threading
from PySide.QtCore import QEvent, QCoreApplication
from . import GlobalInfo
class ExecuteCodeEvent(QEvent):
def __init__(self, callable, args=None):
super(ExecuteCodeEvent, self).__init__(QEvent.User)
self.callable = callable
self.args = args
self.event = threading.Event()
self.result = None
self.exception = None
def execute(self):
if self.args is None:
self.callable()
else:
self.callable(*self.args)
class GUIObjProxy(object):
"""
Derived from http://code.activestate.com/recipes/496741-object-proxying/
"""
__slots__ = ["_obj", "__weakref__"]
def __init__(self, obj):
object.__setattr__(self, "_obj", obj)
#
# proxying (special cases)
#
def __getattribute__(self, name):
result = gui_thread_schedule(lambda: getattr(object.__getattribute__(self, "_obj"), name))
if result is None:
return result
if type(result) in [int, float, str, bool]:
return result
return GUIObjProxy(result)
def __delattr__(self, name):
gui_thread_schedule(lambda: delattr(object.__getattribute__(self, "_obj"), name))
def __setattr__(self, name, value):
gui_thread_schedule(lambda: setattr(object.__getattribute__(self, "_obj"), name, value))
def __nonzero__(self):
return gui_thread_schedule(lambda: bool(object.__getattribute__(self, "_obj")))
def __str__(self):
return gui_thread_schedule(lambda: str(object.__getattribute__(self, "_obj")))
def __repr__(self):
return gui_thread_schedule(lambda: repr(object.__getattribute__(self, "_obj")))
#
# factories
#
_special_names = [
'__abs__', '__add__', '__and__', '__call__', '__cmp__', '__coerce__',
'__contains__', '__delitem__', '__delslice__', '__div__', '__divmod__',
'__eq__', '__float__', '__floordiv__', '__ge__', '__getitem__',
'__getslice__', '__gt__', '__hash__', '__hex__', '__iadd__', '__iand__',
'__idiv__', '__idivmod__', '__ifloordiv__', '__ilshift__', '__imod__',
'__imul__', '__int__', '__invert__', '__ior__', '__ipow__', '__irshift__',
'__isub__', '__iter__', '__itruediv__', '__ixor__', '__le__', '__len__',
'__long__', '__lshift__', '__lt__', '__mod__', '__mul__', '__ne__',
'__neg__', '__oct__', '__or__', '__pos__', '__pow__', '__radd__',
'__rand__', '__rdiv__', '__rdivmod__', '__reduce__', '__reduce_ex__',
'__repr__', '__reversed__', '__rfloorfiv__', '__rlshift__', '__rmod__',
'__rmul__', '__ror__', '__rpow__', '__rrshift__', '__rshift__', '__rsub__',
'__rtruediv__', '__rxor__', '__setitem__', '__setslice__', '__sub__',
'__truediv__', '__xor__', 'next',
]
@classmethod
def _create_class_proxy(cls, theclass):
"""
Creates a proxy for the given class.
"""
def make_method(name):
def method(self, *args, **kw):
return gui_thread_schedule(lambda: getattr(object.__getattribute__(self, "_obj"), name)(*args, **kw))
return method
namespace = {}
for name in cls._special_names:
if hasattr(theclass, name):
namespace[name] = make_method(name)
return type("%s(%s)" % (cls.__name__, theclass.__name__), (cls,), namespace)
def __new__(cls, obj, *args, **kwargs):
"""
creates an proxy instance referencing `obj`. (obj, *args, **kwargs) are
passed to this class' __init__, so deriving classes can define an
__init__ method of their own.
note: _class_proxy_cache is unique per deriving class (each deriving
class must hold its own cache)
"""
try:
cache = cls.__dict__["_class_proxy_cache"]
except KeyError:
cls._class_proxy_cache = cache = {}
try:
theclass = cache[obj.__class__]
except KeyError:
cache[obj.__class__] = theclass = cls._create_class_proxy(obj.__class__)
ins = object.__new__(theclass)
theclass.__init__(ins, obj, *args, **kwargs)
return ins
def is_gui_thread():
return thread.get_ident() == GlobalInfo.gui_thread
def gui_thread_schedule(callable, args=None):
if is_gui_thread():
if args is None:
return callable()
else:
return callable(*args)
event = ExecuteCodeEvent(callable, args)
QCoreApplication.postEvent(GlobalInfo.main_window, event)
event.event.wait() # TODO: unsafe. to be fixed later.
if event.exception is not None:
raise event.exception[0], event.exception[1], event.exception[2]
return event.result
def gui_thread_schedule_async(callable, args=None):
if is_gui_thread():
if args is None:
callable()
else:
callable(*args)
event = ExecuteCodeEvent(callable, args)
QCoreApplication.postEvent(GlobalInfo.main_window, event)
| <filename>angr-management/angrmanagement/logic/threads.py
import thread
import threading
from PySide.QtCore import QEvent, QCoreApplication
from . import GlobalInfo
class ExecuteCodeEvent(QEvent):
def __init__(self, callable, args=None):
super(ExecuteCodeEvent, self).__init__(QEvent.User)
self.callable = callable
self.args = args
self.event = threading.Event()
self.result = None
self.exception = None
def execute(self):
if self.args is None:
self.callable()
else:
self.callable(*self.args)
class GUIObjProxy(object):
"""
Derived from http://code.activestate.com/recipes/496741-object-proxying/
"""
__slots__ = ["_obj", "__weakref__"]
def __init__(self, obj):
object.__setattr__(self, "_obj", obj)
#
# proxying (special cases)
#
def __getattribute__(self, name):
result = gui_thread_schedule(lambda: getattr(object.__getattribute__(self, "_obj"), name))
if result is None:
return result
if type(result) in [int, float, str, bool]:
return result
return GUIObjProxy(result)
def __delattr__(self, name):
gui_thread_schedule(lambda: delattr(object.__getattribute__(self, "_obj"), name))
def __setattr__(self, name, value):
gui_thread_schedule(lambda: setattr(object.__getattribute__(self, "_obj"), name, value))
def __nonzero__(self):
return gui_thread_schedule(lambda: bool(object.__getattribute__(self, "_obj")))
def __str__(self):
return gui_thread_schedule(lambda: str(object.__getattribute__(self, "_obj")))
def __repr__(self):
return gui_thread_schedule(lambda: repr(object.__getattribute__(self, "_obj")))
#
# factories
#
_special_names = [
'__abs__', '__add__', '__and__', '__call__', '__cmp__', '__coerce__',
'__contains__', '__delitem__', '__delslice__', '__div__', '__divmod__',
'__eq__', '__float__', '__floordiv__', '__ge__', '__getitem__',
'__getslice__', '__gt__', '__hash__', '__hex__', '__iadd__', '__iand__',
'__idiv__', '__idivmod__', '__ifloordiv__', '__ilshift__', '__imod__',
'__imul__', '__int__', '__invert__', '__ior__', '__ipow__', '__irshift__',
'__isub__', '__iter__', '__itruediv__', '__ixor__', '__le__', '__len__',
'__long__', '__lshift__', '__lt__', '__mod__', '__mul__', '__ne__',
'__neg__', '__oct__', '__or__', '__pos__', '__pow__', '__radd__',
'__rand__', '__rdiv__', '__rdivmod__', '__reduce__', '__reduce_ex__',
'__repr__', '__reversed__', '__rfloorfiv__', '__rlshift__', '__rmod__',
'__rmul__', '__ror__', '__rpow__', '__rrshift__', '__rshift__', '__rsub__',
'__rtruediv__', '__rxor__', '__setitem__', '__setslice__', '__sub__',
'__truediv__', '__xor__', 'next',
]
@classmethod
def _create_class_proxy(cls, theclass):
"""
Creates a proxy for the given class.
"""
def make_method(name):
def method(self, *args, **kw):
return gui_thread_schedule(lambda: getattr(object.__getattribute__(self, "_obj"), name)(*args, **kw))
return method
namespace = {}
for name in cls._special_names:
if hasattr(theclass, name):
namespace[name] = make_method(name)
return type("%s(%s)" % (cls.__name__, theclass.__name__), (cls,), namespace)
def __new__(cls, obj, *args, **kwargs):
"""
creates an proxy instance referencing `obj`. (obj, *args, **kwargs) are
passed to this class' __init__, so deriving classes can define an
__init__ method of their own.
note: _class_proxy_cache is unique per deriving class (each deriving
class must hold its own cache)
"""
try:
cache = cls.__dict__["_class_proxy_cache"]
except KeyError:
cls._class_proxy_cache = cache = {}
try:
theclass = cache[obj.__class__]
except KeyError:
cache[obj.__class__] = theclass = cls._create_class_proxy(obj.__class__)
ins = object.__new__(theclass)
theclass.__init__(ins, obj, *args, **kwargs)
return ins
def is_gui_thread():
return thread.get_ident() == GlobalInfo.gui_thread
def gui_thread_schedule(callable, args=None):
if is_gui_thread():
if args is None:
return callable()
else:
return callable(*args)
event = ExecuteCodeEvent(callable, args)
QCoreApplication.postEvent(GlobalInfo.main_window, event)
event.event.wait() # TODO: unsafe. to be fixed later.
if event.exception is not None:
raise event.exception[0], event.exception[1], event.exception[2]
return event.result
def gui_thread_schedule_async(callable, args=None):
if is_gui_thread():
if args is None:
callable()
else:
callable(*args)
event = ExecuteCodeEvent(callable, args)
QCoreApplication.postEvent(GlobalInfo.main_window, event)
| en | 0.770337 | Derived from http://code.activestate.com/recipes/496741-object-proxying/ # # proxying (special cases) # # # factories # Creates a proxy for the given class. creates an proxy instance referencing `obj`. (obj, *args, **kwargs) are passed to this class' __init__, so deriving classes can define an __init__ method of their own. note: _class_proxy_cache is unique per deriving class (each deriving class must hold its own cache) # TODO: unsafe. to be fixed later. | 2.263583 | 2 |
faculty/clients/user.py | jkeelan/faculty | 0 | 6632708 | <filename>faculty/clients/user.py
# Copyright 2018-2019 Faculty Science Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import namedtuple
from enum import Enum
from marshmallow import fields, post_load
from marshmallow_enum import EnumField
from faculty.clients.base import BaseSchema, BaseClient
class GlobalRole(Enum):
BASIC_USER = "global-basic-user"
FULL_USER = "global-full-user"
ADMIN = "global-admin"
User = namedtuple(
"User",
[
"id",
"username",
"full_name",
"email",
"created_at",
"enabled",
"global_roles",
"is_system",
],
)
class UserSchema(BaseSchema):
id = fields.UUID(data_key="userId", required=True)
username = fields.Str(required=True)
full_name = fields.Str(data_key="fullName", missing=None)
email = fields.Str(required=True)
created_at = fields.DateTime(data_key="createdAt", required=True)
enabled = fields.Boolean(required=True)
global_roles = fields.List(
EnumField(GlobalRole, by_value=True),
data_key="globalRoles",
missing=None,
)
is_system = fields.Boolean(data_key="isSystem", required=True)
@post_load
def make_user(self, data):
return User(**data)
class UserClient(BaseClient):
SERVICE_NAME = "flock"
def get_user(self, user_id):
endpoint = "/user/{}".format(user_id)
response = self._get(endpoint, UserSchema())
return response
def get_all_users(self, is_system=None, enabled=None):
params = {}
if is_system is not None:
params["isSystem"] = "true" if is_system else "false"
if enabled is not None:
params["isDisabled"] = "false" if enabled else "true"
endpoint = "/users"
response = self._get(endpoint, UserSchema(many=True), params=params)
return response
def set_global_roles(self, user_id, global_roles):
endpoint = "/user/{}/roles".format(user_id)
response = self._put(
endpoint, UserSchema(), json={"roles": global_roles}
)
return response
| <filename>faculty/clients/user.py
# Copyright 2018-2019 Faculty Science Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import namedtuple
from enum import Enum
from marshmallow import fields, post_load
from marshmallow_enum import EnumField
from faculty.clients.base import BaseSchema, BaseClient
class GlobalRole(Enum):
BASIC_USER = "global-basic-user"
FULL_USER = "global-full-user"
ADMIN = "global-admin"
User = namedtuple(
"User",
[
"id",
"username",
"full_name",
"email",
"created_at",
"enabled",
"global_roles",
"is_system",
],
)
class UserSchema(BaseSchema):
id = fields.UUID(data_key="userId", required=True)
username = fields.Str(required=True)
full_name = fields.Str(data_key="fullName", missing=None)
email = fields.Str(required=True)
created_at = fields.DateTime(data_key="createdAt", required=True)
enabled = fields.Boolean(required=True)
global_roles = fields.List(
EnumField(GlobalRole, by_value=True),
data_key="globalRoles",
missing=None,
)
is_system = fields.Boolean(data_key="isSystem", required=True)
@post_load
def make_user(self, data):
return User(**data)
class UserClient(BaseClient):
SERVICE_NAME = "flock"
def get_user(self, user_id):
endpoint = "/user/{}".format(user_id)
response = self._get(endpoint, UserSchema())
return response
def get_all_users(self, is_system=None, enabled=None):
params = {}
if is_system is not None:
params["isSystem"] = "true" if is_system else "false"
if enabled is not None:
params["isDisabled"] = "false" if enabled else "true"
endpoint = "/users"
response = self._get(endpoint, UserSchema(many=True), params=params)
return response
def set_global_roles(self, user_id, global_roles):
endpoint = "/user/{}/roles".format(user_id)
response = self._put(
endpoint, UserSchema(), json={"roles": global_roles}
)
return response
| en | 0.847013 | # Copyright 2018-2019 Faculty Science Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. | 2.226831 | 2 |
backend/backend/urls.py | indian-innovation-company/copr8 | 0 | 6632709 | <filename>backend/backend/urls.py<gh_stars>0
from contrib.views import char_count
from django.contrib import admin
from django.urls import include, path, re_path
from django.views.generic import TemplateView
from rest_framework import routers
from contrib.views import ResourcesViewSet
router = routers.DefaultRouter()
router.register(r'resources', ResourcesViewSet)
urlpatterns = [
path("admin/", admin.site.urls),
path("char_count", char_count, name="char_count"),
path('api/v1/', include(router.urls)),
re_path(".*", TemplateView.as_view(template_name="index.html")),
]
| <filename>backend/backend/urls.py<gh_stars>0
from contrib.views import char_count
from django.contrib import admin
from django.urls import include, path, re_path
from django.views.generic import TemplateView
from rest_framework import routers
from contrib.views import ResourcesViewSet
router = routers.DefaultRouter()
router.register(r'resources', ResourcesViewSet)
urlpatterns = [
path("admin/", admin.site.urls),
path("char_count", char_count, name="char_count"),
path('api/v1/', include(router.urls)),
re_path(".*", TemplateView.as_view(template_name="index.html")),
]
| none | 1 | 1.790193 | 2 |
|
src/rush/limiters/base.py | algobot76/rush | 41 | 6632710 | """Interface definition for limiters."""
import attr
from .. import quota
from .. import result
from .. import stores
@attr.s
class BaseLimiter:
"""Base object defining the interface for limiters."""
store: stores.BaseStore = attr.ib(
validator=attr.validators.instance_of(stores.BaseStore)
)
def rate_limit(
self, key: str, quantity: int, rate: quota.Quota
) -> result.RateLimitResult:
"""Apply the rate-limit to a quantity of requests."""
raise NotImplementedError()
def reset(self, key: str, rate: quota.Quota) -> result.RateLimitResult:
"""Reset the rate-limit for a given key."""
raise NotImplementedError()
| """Interface definition for limiters."""
import attr
from .. import quota
from .. import result
from .. import stores
@attr.s
class BaseLimiter:
"""Base object defining the interface for limiters."""
store: stores.BaseStore = attr.ib(
validator=attr.validators.instance_of(stores.BaseStore)
)
def rate_limit(
self, key: str, quantity: int, rate: quota.Quota
) -> result.RateLimitResult:
"""Apply the rate-limit to a quantity of requests."""
raise NotImplementedError()
def reset(self, key: str, rate: quota.Quota) -> result.RateLimitResult:
"""Reset the rate-limit for a given key."""
raise NotImplementedError()
| en | 0.770071 | Interface definition for limiters. Base object defining the interface for limiters. Apply the rate-limit to a quantity of requests. Reset the rate-limit for a given key. | 2.542717 | 3 |
learn/05learn-n19.py | keydepth/facedetect | 0 | 6632711 | <filename>learn/05learn-n19.py
from keras.layers import Activation, Conv2D, Dense, Flatten, MaxPooling2D
from keras.models import Sequential, load_model
from keras.utils.np_utils import to_categorical
# 画像と正解ラベルをリストにする
import random
from keras.utils.np_utils import to_categorical
import os
import cv2
import numpy as np
import matplotlib.pyplot as plt
img_file_name_list=os.listdir("./face_scratch_image/")
print(len(img_file_name_list))
for i in range(len(img_file_name_list)):
n=os.path.join("./face_scratch_image",img_file_name_list[i])
img = cv2.imread(n)
if img is None:
img_file_name_list.pop(i)
continue
if isinstance(img,type(None)) == True:
img_file_name_list.pop(i)
continue
height, width, channels = img.shape[:3]
if height!=64 or width!=64:
print(img_file_name_list[i])
img_file_name_list.pop(i)
continue
print(len(img_file_name_list))
X_train=[]
y_train=[]
for j in range(0,len(img_file_name_list)-1):
n=os.path.join("./face_scratch_image/",img_file_name_list[j])
img = cv2.imread(n)
b,g,r = cv2.split(img)
img = cv2.merge([r,g,b])
X_train.append(img)
n=img_file_name_list[j]
y_train=np.append(y_train,int(n[0:2])).reshape(j+1,1)
X_train=np.array(X_train)
img_file_name_list=os.listdir("./test_image/")
print(len(img_file_name_list))
for i in range(len(img_file_name_list)):
n=os.path.join("./test_image",img_file_name_list[i])
img = cv2.imread(n)
if isinstance(img,type(None)) == True:
img_file_name_list.pop(i)
continue
height, width, channels = img.shape[:3]
if height!=64 or width!=64:
print(img_file_name_list[i])
img_file_name_list.pop(i)
continue
print(len(img_file_name_list))
X_test=[]
y_test=[]
for j in range(0,len(img_file_name_list)):
n=os.path.join("./test_image",img_file_name_list[j])
img = cv2.imread(n)
b,g,r = cv2.split(img)
img = cv2.merge([r,g,b])
X_test.append(img)
n=img_file_name_list[j]
y_test=np.append(y_test,int(n[0:2])).reshape(j+1,1)
X_test=np.array(X_test)
#print(X_test[0])
#print(y_test[0])
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)
#plt.imshow(X_train[0])
#plt.show()
#print(y_train[0])
# モデルの定義
model = Sequential()
model.add(Conv2D(input_shape=(64, 64, 3), filters=32,kernel_size=(2, 2), strides=(1, 1), padding="same"))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(filters=32, kernel_size=(2, 2), strides=(1, 1), padding="same"))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(filters=32, kernel_size=(2, 2), strides=(1, 1), padding="same"))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(256))
model.add(Activation("sigmoid"))
model.add(Dense(128))
model.add(Activation('sigmoid'))
model.add(Dense(19))
model.add(Activation('softmax'))
# コンパイル
model.compile(optimizer='sgd', loss='categorical_crossentropy',metrics=['accuracy'])
# 学習
# model.fit(X_train, y_train, batch_size=32, epochs=100)
#グラフ用
history = model.fit(X_train, y_train, batch_size=32, epochs=25, verbose=1, validation_data=(X_test, y_test))
# 汎化制度の評価・表示
score = model.evaluate(X_test, y_test, batch_size=32, verbose=0)
print('validation loss:{0[0]}\nvalidation accuracy:{0[1]}'.format(score))
#acc, val_accのプロット
plt.plot(history.history["acc"], label="acc", ls="-", marker="o")
plt.plot(history.history["val_acc"], label="val_acc", ls="-", marker="x")
plt.ylabel("accuracy")
plt.xlabel("epoch")
plt.legend(loc="best")
plt.show()
#モデルを保存
model.save("my_model-n19-epoch25.h5")
| <filename>learn/05learn-n19.py
from keras.layers import Activation, Conv2D, Dense, Flatten, MaxPooling2D
from keras.models import Sequential, load_model
from keras.utils.np_utils import to_categorical
# 画像と正解ラベルをリストにする
import random
from keras.utils.np_utils import to_categorical
import os
import cv2
import numpy as np
import matplotlib.pyplot as plt
img_file_name_list=os.listdir("./face_scratch_image/")
print(len(img_file_name_list))
for i in range(len(img_file_name_list)):
n=os.path.join("./face_scratch_image",img_file_name_list[i])
img = cv2.imread(n)
if img is None:
img_file_name_list.pop(i)
continue
if isinstance(img,type(None)) == True:
img_file_name_list.pop(i)
continue
height, width, channels = img.shape[:3]
if height!=64 or width!=64:
print(img_file_name_list[i])
img_file_name_list.pop(i)
continue
print(len(img_file_name_list))
X_train=[]
y_train=[]
for j in range(0,len(img_file_name_list)-1):
n=os.path.join("./face_scratch_image/",img_file_name_list[j])
img = cv2.imread(n)
b,g,r = cv2.split(img)
img = cv2.merge([r,g,b])
X_train.append(img)
n=img_file_name_list[j]
y_train=np.append(y_train,int(n[0:2])).reshape(j+1,1)
X_train=np.array(X_train)
img_file_name_list=os.listdir("./test_image/")
print(len(img_file_name_list))
for i in range(len(img_file_name_list)):
n=os.path.join("./test_image",img_file_name_list[i])
img = cv2.imread(n)
if isinstance(img,type(None)) == True:
img_file_name_list.pop(i)
continue
height, width, channels = img.shape[:3]
if height!=64 or width!=64:
print(img_file_name_list[i])
img_file_name_list.pop(i)
continue
print(len(img_file_name_list))
X_test=[]
y_test=[]
for j in range(0,len(img_file_name_list)):
n=os.path.join("./test_image",img_file_name_list[j])
img = cv2.imread(n)
b,g,r = cv2.split(img)
img = cv2.merge([r,g,b])
X_test.append(img)
n=img_file_name_list[j]
y_test=np.append(y_test,int(n[0:2])).reshape(j+1,1)
X_test=np.array(X_test)
#print(X_test[0])
#print(y_test[0])
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)
#plt.imshow(X_train[0])
#plt.show()
#print(y_train[0])
# モデルの定義
model = Sequential()
model.add(Conv2D(input_shape=(64, 64, 3), filters=32,kernel_size=(2, 2), strides=(1, 1), padding="same"))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(filters=32, kernel_size=(2, 2), strides=(1, 1), padding="same"))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(filters=32, kernel_size=(2, 2), strides=(1, 1), padding="same"))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(256))
model.add(Activation("sigmoid"))
model.add(Dense(128))
model.add(Activation('sigmoid'))
model.add(Dense(19))
model.add(Activation('softmax'))
# コンパイル
model.compile(optimizer='sgd', loss='categorical_crossentropy',metrics=['accuracy'])
# 学習
# model.fit(X_train, y_train, batch_size=32, epochs=100)
#グラフ用
history = model.fit(X_train, y_train, batch_size=32, epochs=25, verbose=1, validation_data=(X_test, y_test))
# 汎化制度の評価・表示
score = model.evaluate(X_test, y_test, batch_size=32, verbose=0)
print('validation loss:{0[0]}\nvalidation accuracy:{0[1]}'.format(score))
#acc, val_accのプロット
plt.plot(history.history["acc"], label="acc", ls="-", marker="o")
plt.plot(history.history["val_acc"], label="val_acc", ls="-", marker="x")
plt.ylabel("accuracy")
plt.xlabel("epoch")
plt.legend(loc="best")
plt.show()
#モデルを保存
model.save("my_model-n19-epoch25.h5")
| ja | 0.970171 | # 画像と正解ラベルをリストにする #print(X_test[0]) #print(y_test[0]) #plt.imshow(X_train[0]) #plt.show() #print(y_train[0]) # モデルの定義 # コンパイル # 学習 # model.fit(X_train, y_train, batch_size=32, epochs=100) #グラフ用 # 汎化制度の評価・表示 #acc, val_accのプロット #モデルを保存 | 3.005438 | 3 |
scripts/modules/conditions.py | vkostyanetsky/Organizer | 0 | 6632712 | <filename>scripts/modules/conditions.py
import re
import datetime
def is_task_started(task, date):
result = True
regexp = '.*(, начиная с| с) ([0-9]{1,2}.[0-9]{1,2}.[0-9]{4})$'
groups = re.match(regexp, task['condition'])
if groups != None:
start_date = datetime.datetime.strptime(groups[2], '%d.%m.%Y')
result = date >= start_date
return result | <filename>scripts/modules/conditions.py
import re
import datetime
def is_task_started(task, date):
result = True
regexp = '.*(, начиная с| с) ([0-9]{1,2}.[0-9]{1,2}.[0-9]{4})$'
groups = re.match(regexp, task['condition'])
if groups != None:
start_date = datetime.datetime.strptime(groups[2], '%d.%m.%Y')
result = date >= start_date
return result | none | 1 | 2.832792 | 3 |
|
plugins/panorama/panorama/chart_factory.py | mohnjahoney/website_source | 13 | 6632713 | <reponame>mohnjahoney/website_source
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from functools import partial
from pandas import Series, DataFrame
# !! Import required to instantiate charts, do not remove
from nvd3 import *
import numpy
# A dict used for chart configuration.
# DEFAULT settings can be overwritten and/or completed by chart specific settings.
# In this case, the chart class is used as key.
DEFAULT_CONF = {
"DEFAULT": {"name": None, "display_container": False, "height": 300, "width": 700},
"stackedAreaChart": {"use_interactive_guideline": True, "x_axis_format": ""},
"multiBarChart": {"x_axis_format": ""},
"lineChart": {"x_is_date": True, "x_axis_format": "%m-%Y"},
}
CLASS_ALLOWED = (
"discreteBarChart",
"pieChart",
"multiBarChart",
"stackedAreaChart",
"lineChart",
)
class ChartFactory(object):
def __init__(self):
# TODO extra series conf ?
self.extra_series = {"tooltip": {"y_start": "", "y_end": " posts"}}
self.chart_conf = DEFAULT_CONF
def render(self, data, renderer):
""" Create a chart by using the renderer, add data, render and return it.
:param data: the chart input data. Can be a Series or a DataFrame.
:param renderer: the renderer to use to create the chart.
:return: the produced chart.
"""
chart = renderer()
if isinstance(data, Series):
self._add_series(series=data, chart=chart)
elif isinstance(data, DataFrame):
for column in data.columns.values:
series = data[column]
self._add_series(series=series, chart=chart)
chart.buildcontent()
return chart
def _add_series(self, series, chart):
# Converting values for Python 3.x compatibility, because numpy numbers are not supported by the JSON encoder
# TODO fix it in a better way
y = _list_convert(l=series.tolist())
x = _list_convert(l=series.index.get_values())
chart.add_serie(name=series.name, y=y, x=x, extra=self.extra_series)
def get_renderer(self, class_name):
""" Return a method responsible to create a chart corresponding to the given class_name.
Raises an exception if the class is not allowed.
:param class_name: the class of the chart to create.
:return: the method permitting to create the chart.
"""
if class_name not in CLASS_ALLOWED:
raise ValueError("Class [%s] not allowed for a renderer" % class_name)
return partial(self._create_chart, class_name=class_name)
def _create_chart(self, class_name, name):
""" Initialize a chart, with defaults values and its name.
:param class_name: the class of the chart to create.
:param name: its name.
:return: the chart.
"""
chart = eval(class_name)
# Initializing with default values
conf = self.chart_conf["DEFAULT"].copy()
if class_name in self.chart_conf:
# Overriding with specific chart values if defined
conf.update(self.chart_conf[class_name])
# Setting the chart name
conf["name"] = name
# Passing the dictionary as keywords
return chart(**conf)
def _list_convert(l):
""" Convert list containing potential numpy objects in order to transform them in Python standard types.
Other objects remain the same.
This method is a workaround to the behavior of the JSON encoder that does not handle numpy numbers.
:param l: the list to convert
:return: a new list with numpy objects converted and other objects remaining the same
"""
return list(map(_numpy_convert, l))
def _numpy_convert(obj):
""" Convert numpy numbers to standard numbers
:param obj: the object to convert
:return: the new object or the same object if it does not need to be converted
"""
if isinstance(obj, numpy.int_):
return int(obj)
elif isinstance(obj, numpy.float_):
return float(obj)
elif isinstance(obj, numpy.datetime64):
epoch_delta = obj - numpy.datetime64("1970-01-01T00:00:00Z")
return epoch_delta / numpy.timedelta64(1, "ms")
# TODO correct this hack from https://github.com/bokeh/bokeh/blob/master/bokeh/protocol.py
return obj
| # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from functools import partial
from pandas import Series, DataFrame
# !! Import required to instantiate charts, do not remove
from nvd3 import *
import numpy
# A dict used for chart configuration.
# DEFAULT settings can be overwritten and/or completed by chart specific settings.
# In this case, the chart class is used as key.
DEFAULT_CONF = {
"DEFAULT": {"name": None, "display_container": False, "height": 300, "width": 700},
"stackedAreaChart": {"use_interactive_guideline": True, "x_axis_format": ""},
"multiBarChart": {"x_axis_format": ""},
"lineChart": {"x_is_date": True, "x_axis_format": "%m-%Y"},
}
CLASS_ALLOWED = (
"discreteBarChart",
"pieChart",
"multiBarChart",
"stackedAreaChart",
"lineChart",
)
class ChartFactory(object):
def __init__(self):
# TODO extra series conf ?
self.extra_series = {"tooltip": {"y_start": "", "y_end": " posts"}}
self.chart_conf = DEFAULT_CONF
def render(self, data, renderer):
""" Create a chart by using the renderer, add data, render and return it.
:param data: the chart input data. Can be a Series or a DataFrame.
:param renderer: the renderer to use to create the chart.
:return: the produced chart.
"""
chart = renderer()
if isinstance(data, Series):
self._add_series(series=data, chart=chart)
elif isinstance(data, DataFrame):
for column in data.columns.values:
series = data[column]
self._add_series(series=series, chart=chart)
chart.buildcontent()
return chart
def _add_series(self, series, chart):
# Converting values for Python 3.x compatibility, because numpy numbers are not supported by the JSON encoder
# TODO fix it in a better way
y = _list_convert(l=series.tolist())
x = _list_convert(l=series.index.get_values())
chart.add_serie(name=series.name, y=y, x=x, extra=self.extra_series)
def get_renderer(self, class_name):
""" Return a method responsible to create a chart corresponding to the given class_name.
Raises an exception if the class is not allowed.
:param class_name: the class of the chart to create.
:return: the method permitting to create the chart.
"""
if class_name not in CLASS_ALLOWED:
raise ValueError("Class [%s] not allowed for a renderer" % class_name)
return partial(self._create_chart, class_name=class_name)
def _create_chart(self, class_name, name):
""" Initialize a chart, with defaults values and its name.
:param class_name: the class of the chart to create.
:param name: its name.
:return: the chart.
"""
chart = eval(class_name)
# Initializing with default values
conf = self.chart_conf["DEFAULT"].copy()
if class_name in self.chart_conf:
# Overriding with specific chart values if defined
conf.update(self.chart_conf[class_name])
# Setting the chart name
conf["name"] = name
# Passing the dictionary as keywords
return chart(**conf)
def _list_convert(l):
""" Convert list containing potential numpy objects in order to transform them in Python standard types.
Other objects remain the same.
This method is a workaround to the behavior of the JSON encoder that does not handle numpy numbers.
:param l: the list to convert
:return: a new list with numpy objects converted and other objects remaining the same
"""
return list(map(_numpy_convert, l))
def _numpy_convert(obj):
""" Convert numpy numbers to standard numbers
:param obj: the object to convert
:return: the new object or the same object if it does not need to be converted
"""
if isinstance(obj, numpy.int_):
return int(obj)
elif isinstance(obj, numpy.float_):
return float(obj)
elif isinstance(obj, numpy.datetime64):
epoch_delta = obj - numpy.datetime64("1970-01-01T00:00:00Z")
return epoch_delta / numpy.timedelta64(1, "ms")
# TODO correct this hack from https://github.com/bokeh/bokeh/blob/master/bokeh/protocol.py
return obj | en | 0.774642 | # -*- coding: utf-8 -*- # !! Import required to instantiate charts, do not remove # A dict used for chart configuration. # DEFAULT settings can be overwritten and/or completed by chart specific settings. # In this case, the chart class is used as key. # TODO extra series conf ? Create a chart by using the renderer, add data, render and return it. :param data: the chart input data. Can be a Series or a DataFrame. :param renderer: the renderer to use to create the chart. :return: the produced chart. # Converting values for Python 3.x compatibility, because numpy numbers are not supported by the JSON encoder # TODO fix it in a better way Return a method responsible to create a chart corresponding to the given class_name. Raises an exception if the class is not allowed. :param class_name: the class of the chart to create. :return: the method permitting to create the chart. Initialize a chart, with defaults values and its name. :param class_name: the class of the chart to create. :param name: its name. :return: the chart. # Initializing with default values # Overriding with specific chart values if defined # Setting the chart name # Passing the dictionary as keywords Convert list containing potential numpy objects in order to transform them in Python standard types. Other objects remain the same. This method is a workaround to the behavior of the JSON encoder that does not handle numpy numbers. :param l: the list to convert :return: a new list with numpy objects converted and other objects remaining the same Convert numpy numbers to standard numbers :param obj: the object to convert :return: the new object or the same object if it does not need to be converted # TODO correct this hack from https://github.com/bokeh/bokeh/blob/master/bokeh/protocol.py | 3.014992 | 3 |
utils/unpacker.py | woshimaliang/xcbuildkit | 165 | 6632714 | #!/usr/local/bin/python3
import msgpack
import service
import sys
import fcntl
import os
import time
import datetime
global log_file
global log_path
if len(sys.argv) > 1:
log_path = sys.argv[1]
else:
log_path = "/tmp/xcbuild.diags"
log_file = None
def unpack(unpacker, data=[]):
state = unpacker.tell()
log("AT", state)
try:
obj = unpacker.unpack()
log(obj)
except msgpack.exceptions.OutOfData as inst:
# Handle when the file is done
log("Done")
return False
except msgpack.exceptions.StackError as inst:
log("StackError", state)
log("Err", inst)
except Exception as inst:
log("Except-state", state)
log("At", data[state:state+400])
log("Err", inst)
try:
unpacker.skip()
except:
pass
return True
def log(*args, **kwargs):
global log_file
global log_path
now = datetime.datetime.now()
if log_file == None:
log_file = open(log_path,"w", 512)
log_file.write(str(now) + " INFO: "+" ".join(map(str,args))+"\n", **kwargs)
def dump_protocol(path, sanitize=False):
global log_path
log_path = path + ".diags"
data_file = open(path, 'rb')
data = data_file.read()
log("Data:", data)
unpacker = service.get_unpacker()
unpacker.feed(data)
if sanitize:
data = service.sanitize(data)
while True:
if not unpack(unpacker, data):
return
def loop():
global log_file
log("Start")
unpacker = service.get_unpacker()
buff = None
last_byte = None
byte = None
i = 0
while True:
last_byte = byte
byte = sys.stdin.buffer.read(1)
log_file.flush()
if not byte:
i = i + 1
if i > 10:
log("Waiting")
time.sleep(2.0)
else:
i = 0
if not buff:
buff = byte
if byte:
buff += byte
if (not byte or byte == b'') and buff:
unpacker.feed(buff)
while unpack(unpacker, buff):
log("Pack")
buff = None
byte = None
unpacker = service.get_unpacker()
orig_fl = fcntl.fcntl(sys.stdin, fcntl.F_GETFL)
fcntl.fcntl(sys.stdin, fcntl.F_SETFL, orig_fl | os.O_NONBLOCK)
loop()
| #!/usr/local/bin/python3
import msgpack
import service
import sys
import fcntl
import os
import time
import datetime
global log_file
global log_path
if len(sys.argv) > 1:
log_path = sys.argv[1]
else:
log_path = "/tmp/xcbuild.diags"
log_file = None
def unpack(unpacker, data=[]):
state = unpacker.tell()
log("AT", state)
try:
obj = unpacker.unpack()
log(obj)
except msgpack.exceptions.OutOfData as inst:
# Handle when the file is done
log("Done")
return False
except msgpack.exceptions.StackError as inst:
log("StackError", state)
log("Err", inst)
except Exception as inst:
log("Except-state", state)
log("At", data[state:state+400])
log("Err", inst)
try:
unpacker.skip()
except:
pass
return True
def log(*args, **kwargs):
global log_file
global log_path
now = datetime.datetime.now()
if log_file == None:
log_file = open(log_path,"w", 512)
log_file.write(str(now) + " INFO: "+" ".join(map(str,args))+"\n", **kwargs)
def dump_protocol(path, sanitize=False):
global log_path
log_path = path + ".diags"
data_file = open(path, 'rb')
data = data_file.read()
log("Data:", data)
unpacker = service.get_unpacker()
unpacker.feed(data)
if sanitize:
data = service.sanitize(data)
while True:
if not unpack(unpacker, data):
return
def loop():
global log_file
log("Start")
unpacker = service.get_unpacker()
buff = None
last_byte = None
byte = None
i = 0
while True:
last_byte = byte
byte = sys.stdin.buffer.read(1)
log_file.flush()
if not byte:
i = i + 1
if i > 10:
log("Waiting")
time.sleep(2.0)
else:
i = 0
if not buff:
buff = byte
if byte:
buff += byte
if (not byte or byte == b'') and buff:
unpacker.feed(buff)
while unpack(unpacker, buff):
log("Pack")
buff = None
byte = None
unpacker = service.get_unpacker()
orig_fl = fcntl.fcntl(sys.stdin, fcntl.F_GETFL)
fcntl.fcntl(sys.stdin, fcntl.F_SETFL, orig_fl | os.O_NONBLOCK)
loop()
| en | 0.771662 | #!/usr/local/bin/python3 # Handle when the file is done | 2.225312 | 2 |
eventmapper/__init__.py | Moguri/panda3d-eventmapper | 0 | 6632715 | <reponame>Moguri/panda3d-eventmapper
import pprint
from direct.showbase.DirectObject import DirectObject
from direct.directnotify.DirectNotifyGlobal import directNotify
from direct.showbase.MessengerGlobal import messenger
import panda3d.core as p3d
class EventMapper(DirectObject):
notify = directNotify.newCategory("EventMapper")
event_map_item_prefix = "event-map-item-"
def __init__(self, manage_gamepads=True):
super().__init__()
self.gamepad_label_map = {
'rshoulder': 'RB',
'rtrigger': 'RT',
'lshoulder': 'LB',
'ltrigger': 'LT',
}
self.gamepad_label_map_dualshock = {
'rshoulder': 'R1',
'rtrigger': 'R2',
'lshoulder': 'L1',
'ltrigger': 'L2',
'x': '□',
'y': '△',
'a': '×',
'b': '○',
}
# Setup gamepadds
self.gamepad_deadzone = 0.25
self._gamepads = []
self._prev_gamepad_events = []
if manage_gamepads:
for dev in base.devices.get_devices(p3d.InputDevice.DeviceClass.gamepad):
self._device_connected(dev)
base.task_mgr.add(self._update, 'eventmapper update')
# Setup input map
self.input_map = {}
self.reload_config()
def _update(self, task):
newevents = []
for gpidx, gamepad in enumerate(self._gamepads):
for axis in p3d.InputDevice.Axis:
state = gamepad.findAxis(axis)
if state and abs(state.value) > self.gamepad_deadzone:
axisname = str(axis).lower().replace('.', '_')
event = f'gamepad{gpidx}-{axisname}'
messenger.send(event, [state.value])
if state.value > 0:
event = f'{event}-pos'
else:
event = f'{event}-neg'
newevents.append(event)
if event not in self._prev_gamepad_events:
messenger.send(event)
else:
messenger.send(f'{event}-repeat')
for prevevent in self._prev_gamepad_events:
if prevevent not in newevents:
messenger.send(f'{prevevent}-up')
self._prev_gamepad_events = newevents
return task.cont
def _device_connected(self, device):
add_device = (
device.device_class == p3d.InputDevice.DeviceClass.gamepad and
device not in self._gamepads
)
if add_device:
self.notify.info('Detected {}'.format(device))
gpidx = len(self._gamepads)
self._gamepads.append(device)
base.attach_input_device(device, 'gamepad'+str(gpidx))
def _device_disconnected(self, device):
if device in self._gamepads:
self.notify.info('Disconnected {}'.format(device))
self._gamepads.remove(device)
base.detach_input_device(device)
def clear_aliases(self):
self.input_map = {}
self.ignoreAll()
def add_alias(self, input_event, output_event):
if input_event not in self.input_map:
self.input_map[input_event] = []
self.input_map[input_event].append(output_event)
def reload_config(self):
cvmgr = p3d.ConfigVariableManager.get_global_ptr()
# Remove previous mappings
self.clear_aliases()
# Build mappings from ConfigVariables
for cvar in cvmgr.variables:
if cvar.name.startswith(self.event_map_item_prefix):
cvar = p3d.ConfigVariableString(cvar.name, '')
outevent = cvar.name.replace(self.event_map_item_prefix, '')
for i in range(cvar.get_num_words()):
inevent = cvar.get_word(i)
if inevent == outevent:
# Prevent circular reference
self.notify.warning(
"skipping circular reference mapping {} to {}".format(
inevent, outevent
)
)
continue
self.add_alias(inevent, outevent)
self.notify.info("Loaded Event Map\n{}".format(pprint.pformat(self.input_map)))
# Listen for events
for trigger, events in self.input_map.items():
self.accept(trigger, self.send, [events, ''])
self.accept(trigger + '-up', self.send, [events, '-up'])
self.accept(trigger + '-repeat', self.send, [events, '-repeat'])
def send(self, events, suffix, *args):
for i in events:
self.notify.debug("throwing {}".format(i+suffix))
messenger.send(i + suffix, list(args))
def get_inputs_for_event(self, event):
return [key for key, value in self.input_map.items() if event in value]
def _get_mapped_gamepad_label(self, gamepad_device, inp):
if not inp.startswith('gamepad') or gamepad_device is None:
return ''
# remove gamepadN prefix
label = '-'.join(inp.split('-')[1:])
prefer_ds_labels = 'playstation' in gamepad_device.name.lower()
if label.startswith('action'):
label = label.replace('action_', '')
if prefer_ds_labels:
label = self.gamepad_label_map_dualshock.get(label, label.upper())
else:
label = self.gamepad_label_map.get(label, label.upper())
return label
return ''
def get_labels_for_event(self, event, default=None):
inputs = self.get_inputs_for_event(event)
keymap = base.win.get_keyboard_map() if 'base' in globals() else None
gpidx = [
int(i.split('-')[0].replace('gamepad', ''))
for i in inputs
if i.startswith('gamepad')
]
if gpidx and gpidx[0] < len(self._gamepads):
gamepad_device = self._gamepads[gpidx[0]]
elif self._gamepads:
gamepad_device = self._gamepads[0]
else:
gamepad_device = None
print(gamepad_device)
if default is not None:
inputs.append(default)
inputs = filter(
lambda x: x.startswith('gamepad') == bool(gamepad_device is not None),
inputs
)
retval = []
for inp in inputs:
inp = inp.replace('raw-', '')
retval.append(next(filter(None, [
self._get_mapped_gamepad_label(gamepad_device, inp),
keymap.get_mapped_button_label(inp) if keymap is not None else '',
inp
])))
return retval
| import pprint
from direct.showbase.DirectObject import DirectObject
from direct.directnotify.DirectNotifyGlobal import directNotify
from direct.showbase.MessengerGlobal import messenger
import panda3d.core as p3d
class EventMapper(DirectObject):
notify = directNotify.newCategory("EventMapper")
event_map_item_prefix = "event-map-item-"
def __init__(self, manage_gamepads=True):
super().__init__()
self.gamepad_label_map = {
'rshoulder': 'RB',
'rtrigger': 'RT',
'lshoulder': 'LB',
'ltrigger': 'LT',
}
self.gamepad_label_map_dualshock = {
'rshoulder': 'R1',
'rtrigger': 'R2',
'lshoulder': 'L1',
'ltrigger': 'L2',
'x': '□',
'y': '△',
'a': '×',
'b': '○',
}
# Setup gamepadds
self.gamepad_deadzone = 0.25
self._gamepads = []
self._prev_gamepad_events = []
if manage_gamepads:
for dev in base.devices.get_devices(p3d.InputDevice.DeviceClass.gamepad):
self._device_connected(dev)
base.task_mgr.add(self._update, 'eventmapper update')
# Setup input map
self.input_map = {}
self.reload_config()
def _update(self, task):
newevents = []
for gpidx, gamepad in enumerate(self._gamepads):
for axis in p3d.InputDevice.Axis:
state = gamepad.findAxis(axis)
if state and abs(state.value) > self.gamepad_deadzone:
axisname = str(axis).lower().replace('.', '_')
event = f'gamepad{gpidx}-{axisname}'
messenger.send(event, [state.value])
if state.value > 0:
event = f'{event}-pos'
else:
event = f'{event}-neg'
newevents.append(event)
if event not in self._prev_gamepad_events:
messenger.send(event)
else:
messenger.send(f'{event}-repeat')
for prevevent in self._prev_gamepad_events:
if prevevent not in newevents:
messenger.send(f'{prevevent}-up')
self._prev_gamepad_events = newevents
return task.cont
def _device_connected(self, device):
add_device = (
device.device_class == p3d.InputDevice.DeviceClass.gamepad and
device not in self._gamepads
)
if add_device:
self.notify.info('Detected {}'.format(device))
gpidx = len(self._gamepads)
self._gamepads.append(device)
base.attach_input_device(device, 'gamepad'+str(gpidx))
def _device_disconnected(self, device):
if device in self._gamepads:
self.notify.info('Disconnected {}'.format(device))
self._gamepads.remove(device)
base.detach_input_device(device)
def clear_aliases(self):
self.input_map = {}
self.ignoreAll()
def add_alias(self, input_event, output_event):
if input_event not in self.input_map:
self.input_map[input_event] = []
self.input_map[input_event].append(output_event)
def reload_config(self):
cvmgr = p3d.ConfigVariableManager.get_global_ptr()
# Remove previous mappings
self.clear_aliases()
# Build mappings from ConfigVariables
for cvar in cvmgr.variables:
if cvar.name.startswith(self.event_map_item_prefix):
cvar = p3d.ConfigVariableString(cvar.name, '')
outevent = cvar.name.replace(self.event_map_item_prefix, '')
for i in range(cvar.get_num_words()):
inevent = cvar.get_word(i)
if inevent == outevent:
# Prevent circular reference
self.notify.warning(
"skipping circular reference mapping {} to {}".format(
inevent, outevent
)
)
continue
self.add_alias(inevent, outevent)
self.notify.info("Loaded Event Map\n{}".format(pprint.pformat(self.input_map)))
# Listen for events
for trigger, events in self.input_map.items():
self.accept(trigger, self.send, [events, ''])
self.accept(trigger + '-up', self.send, [events, '-up'])
self.accept(trigger + '-repeat', self.send, [events, '-repeat'])
def send(self, events, suffix, *args):
for i in events:
self.notify.debug("throwing {}".format(i+suffix))
messenger.send(i + suffix, list(args))
def get_inputs_for_event(self, event):
return [key for key, value in self.input_map.items() if event in value]
def _get_mapped_gamepad_label(self, gamepad_device, inp):
if not inp.startswith('gamepad') or gamepad_device is None:
return ''
# remove gamepadN prefix
label = '-'.join(inp.split('-')[1:])
prefer_ds_labels = 'playstation' in gamepad_device.name.lower()
if label.startswith('action'):
label = label.replace('action_', '')
if prefer_ds_labels:
label = self.gamepad_label_map_dualshock.get(label, label.upper())
else:
label = self.gamepad_label_map.get(label, label.upper())
return label
return ''
def get_labels_for_event(self, event, default=None):
inputs = self.get_inputs_for_event(event)
keymap = base.win.get_keyboard_map() if 'base' in globals() else None
gpidx = [
int(i.split('-')[0].replace('gamepad', ''))
for i in inputs
if i.startswith('gamepad')
]
if gpidx and gpidx[0] < len(self._gamepads):
gamepad_device = self._gamepads[gpidx[0]]
elif self._gamepads:
gamepad_device = self._gamepads[0]
else:
gamepad_device = None
print(gamepad_device)
if default is not None:
inputs.append(default)
inputs = filter(
lambda x: x.startswith('gamepad') == bool(gamepad_device is not None),
inputs
)
retval = []
for inp in inputs:
inp = inp.replace('raw-', '')
retval.append(next(filter(None, [
self._get_mapped_gamepad_label(gamepad_device, inp),
keymap.get_mapped_button_label(inp) if keymap is not None else '',
inp
])))
return retval | en | 0.415349 | # Setup gamepadds # Setup input map # Remove previous mappings # Build mappings from ConfigVariables # Prevent circular reference # Listen for events # remove gamepadN prefix | 2.034299 | 2 |
game_data.py | jamestk97/BongBongAI | 0 | 6632716 | # The sprite data and level design were adopted from <Bong Bong> (1989) written by
# <NAME> (co-founder of Daum Corporation: https://en.wikipedia.org/wiki/Daum_(web_portal))
# while in Computer Science Department at Yonsei University, Korea.
# The program and the data had been open-sourced in a Korean online game community.
# Python scripts were written from scratch, but the data below was excerpted from the 1989 game.
#
# <<NAME>> was a remake of <PONPOKO (ポンポコ)> (1982) from Sigma Enterprise, Japan.
# More about the original game can be found here: https://en.wikipedia.org/wiki/Ponpoko
data_path = 'data/' # base image path
time_limit = 100 # each stage should be completed within 100 seconds
time_bonus = 100 # upon completion of each stage, (remaining seconds)x(time_bonus) is time bonus
life_limit = 3 # max number of trials
img_world = ['01.png', # background
'02.png', '03.png', '04.png', '05.png', # left / right / rectangle / island platform
'06.png', # ladder
'07.png', # needle
'08.png'] # bonus
img_tanuki = [['tl.png', ['tlj1.png', 'tlj2.png']], # left / left jump motions
['tr.png', ['trj1.png', 'trj2.png']], # right / right jump motions
'to.png', # up motion
['tx1.png', 'tx2.png', 'tx3.png', 'tx4.png', 'tx5.png'], # dead / fall animation
['al.png', 'ar.png'], # left / right small bonus
['bl.png', 'br.png']] # left / right large bonus
img_enemy1 = [['cl1.png', 'cl2.png'], # left motion
['cr1.png', 'cr2.png']] # right motion
img_enemy2 = ['el.png', 'er.png'] # left / right
img_fruit = ['f0.png', 'f1.png', 'f2.png', 'f3.png', 'f4.png',
'f5.png', 'f6.png', 'f7.png', 'f8.png', 'f9.png'] # fruit images of all stages
enemy_speeds = [[4, 0, 0, 3, 10],
[5, 0, 5, 0, 11],
[4, 0, 0, 6, 11],
[4, 0, 3, 4, 12],
[5, 0, 0, 6, 11],
[6, 0, 5, 0, 12],
[5, 0, 4, 5, 13],
[6, 0, 7, 0, 12],
[6, 7, 0, 5, 11],
[5, 0, 6, 8, 11]] # enemy2 information (location / speed) followed by enemy1 information (speed)
# Stage Information: Each stage is (rows x cols) = (12 x 20) cells
# . empty
# 2 platform / left half-circle
# 3 platform / right half-circle
# 4 platform / rectangle
# 5 platform / island
# 6 ladder
# 7 needle
# # target (100 points)
# a bonus (500 points)
# b bonus (1000 points)
# c enemy1 (always appear on the right)
stages = [
['....................',
'a.7#..#........7.#..',
'4643..26..246444444.',
'.6.....6....6.......',
'.6...#.6....6.a7..#.',
'443.2463.2444444464.',
'......6..........6..',
'#..b.76#.....7#..6..',
'43.26443.5.24446444.',
'....6..........6....',
'..#.6....7.#7..6....',
'44444444444444444444'], # stage 1
['....................',
'..7#...#7.7#..#.....',
'46443..246443.2643..',
'.6.......6.....6....',
'.6..a....6...b76..#.',
'443.263.243..6443.2.',
'.....6.......6......',
'.#...6.c#..#76....#.',
'4463.2443..24463.24.',
'..6...........6.....',
'..6...#.7..7#.6.....',
'44444444444444444444'], # stage 2
['....................',
'...#.....#7......7#.',
'..2446..24463..2644.',
'.....6.....6....6...',
'#...a6..b#.67...6#..',
'463.243.246443.243..',
'.6........6.........',
'.67.#....b6..#7.7#..',
'444643...243.246444.',
'...6...........6....',
'...6...7#7#7..76....',
'44444444444444444444'], # stage 3
['....................',
'.7#.....7#......#...',
'643...26443....2464.',
'6......6.........6..',
'6c.7#..6#...b#...6#.',
'446443.23.26443.244.',
'..6........6........',
'.#67......#67...#...',
'4444643...2443.2464.',
'....6............6..',
'....6#.7..7#.....6..',
'44444444444444444444'], # stage 4
['....................',
'#....#7...#...#.....',
'463.2443.23..243.26.',
'.6................6.',
'c6...#.....#7....#6.',
'43.263.....2463.244.',
'....6........6......',
'#...6b......a6..7#..',
'463.23......23.6444.',
'.6.............6....',
'.6....#7.7#7...6....',
'44444444444444444444'], # stage 5
['....................',
'..7#.......#7c...7..',
'4643.......2463.246.',
'.6...........6....6.',
'#6.b.7#.....76...#6.',
'43.2643....243.2644.',
'....6...........6...',
'#..76..#..#7....6.#.',
'446443.5..243.643.2.',
'..6...........6.....',
'..6#.a7.#.7...6.#...',
'44444444444444444444'], # stage 6
['....................',
'.#7.....#7...#.7..#.',
'.2463...2463.263.64.',
'...6......6...6..6..',
'..76a...#76..#67.6..',
'46443...2443.244644.',
'.6..............6...',
'#67...#.a.#.....6#..',
'44463.5.5.23..26444.',
'...6...........6....',
'.#.6..#7b.7#.7.6....',
'44444444444444444444'], # stage 7
['....................',
'...#......#.......#.',
'.263..263.23..263.2.',
'..6....6.......6....',
'.c67#..6#......6#...',
'46443..246....24464.',
'.6.......6.......6..',
'#6a...#..6..#..a76..',
'443.263.243.5..6444.',
'.....6.........6....',
'.#7..6#7.#7b...6....',
'44444444444444444444'], # stage 8 (NOTE: changed (5,15) from 6 to 4)
['....................',
'..7#...#..c.b....7#.',
'4643..23.23.23.6444.',
'.6.............6....',
'#6....7#..#7...6..#.',
'443.2643..246..5.64.',
'.....6......6....6..',
'.a#..6.....#6...76..',
'.263.23....243.643..',
'..6............6....',
'..6.7.#.7.#.7..6....',
'44444444444444444444'], # stage 9
['....................',
'a...7#..#.7..#..#7..',
'3.2643..2643.5..246.',
'...6.....6........6.',
'#..6.c...67....#.76.',
'3.23.26..2463..2644.',
'......6....6....6...',
'..7#.a6....6...#6...',
'4643.23..263...2464.',
'.6........6......6..',
'.6.#.7.#..6.7.#..6..',
'44444444444444444444'] # stage 10
]
| # The sprite data and level design were adopted from <Bong Bong> (1989) written by
# <NAME> (co-founder of Daum Corporation: https://en.wikipedia.org/wiki/Daum_(web_portal))
# while in Computer Science Department at Yonsei University, Korea.
# The program and the data had been open-sourced in a Korean online game community.
# Python scripts were written from scratch, but the data below was excerpted from the 1989 game.
#
# <<NAME>> was a remake of <PONPOKO (ポンポコ)> (1982) from Sigma Enterprise, Japan.
# More about the original game can be found here: https://en.wikipedia.org/wiki/Ponpoko
data_path = 'data/' # base image path
time_limit = 100 # each stage should be completed within 100 seconds
time_bonus = 100 # upon completion of each stage, (remaining seconds)x(time_bonus) is time bonus
life_limit = 3 # max number of trials
img_world = ['01.png', # background
'02.png', '03.png', '04.png', '05.png', # left / right / rectangle / island platform
'06.png', # ladder
'07.png', # needle
'08.png'] # bonus
img_tanuki = [['tl.png', ['tlj1.png', 'tlj2.png']], # left / left jump motions
['tr.png', ['trj1.png', 'trj2.png']], # right / right jump motions
'to.png', # up motion
['tx1.png', 'tx2.png', 'tx3.png', 'tx4.png', 'tx5.png'], # dead / fall animation
['al.png', 'ar.png'], # left / right small bonus
['bl.png', 'br.png']] # left / right large bonus
img_enemy1 = [['cl1.png', 'cl2.png'], # left motion
['cr1.png', 'cr2.png']] # right motion
img_enemy2 = ['el.png', 'er.png'] # left / right
img_fruit = ['f0.png', 'f1.png', 'f2.png', 'f3.png', 'f4.png',
'f5.png', 'f6.png', 'f7.png', 'f8.png', 'f9.png'] # fruit images of all stages
enemy_speeds = [[4, 0, 0, 3, 10],
[5, 0, 5, 0, 11],
[4, 0, 0, 6, 11],
[4, 0, 3, 4, 12],
[5, 0, 0, 6, 11],
[6, 0, 5, 0, 12],
[5, 0, 4, 5, 13],
[6, 0, 7, 0, 12],
[6, 7, 0, 5, 11],
[5, 0, 6, 8, 11]] # enemy2 information (location / speed) followed by enemy1 information (speed)
# Stage Information: Each stage is (rows x cols) = (12 x 20) cells
# . empty
# 2 platform / left half-circle
# 3 platform / right half-circle
# 4 platform / rectangle
# 5 platform / island
# 6 ladder
# 7 needle
# # target (100 points)
# a bonus (500 points)
# b bonus (1000 points)
# c enemy1 (always appear on the right)
stages = [
['....................',
'a.7#..#........7.#..',
'4643..26..246444444.',
'.6.....6....6.......',
'.6...#.6....6.a7..#.',
'443.2463.2444444464.',
'......6..........6..',
'#..b.76#.....7#..6..',
'43.26443.5.24446444.',
'....6..........6....',
'..#.6....7.#7..6....',
'44444444444444444444'], # stage 1
['....................',
'..7#...#7.7#..#.....',
'46443..246443.2643..',
'.6.......6.....6....',
'.6..a....6...b76..#.',
'443.263.243..6443.2.',
'.....6.......6......',
'.#...6.c#..#76....#.',
'4463.2443..24463.24.',
'..6...........6.....',
'..6...#.7..7#.6.....',
'44444444444444444444'], # stage 2
['....................',
'...#.....#7......7#.',
'..2446..24463..2644.',
'.....6.....6....6...',
'#...a6..b#.67...6#..',
'463.243.246443.243..',
'.6........6.........',
'.67.#....b6..#7.7#..',
'444643...243.246444.',
'...6...........6....',
'...6...7#7#7..76....',
'44444444444444444444'], # stage 3
['....................',
'.7#.....7#......#...',
'643...26443....2464.',
'6......6.........6..',
'6c.7#..6#...b#...6#.',
'446443.23.26443.244.',
'..6........6........',
'.#67......#67...#...',
'4444643...2443.2464.',
'....6............6..',
'....6#.7..7#.....6..',
'44444444444444444444'], # stage 4
['....................',
'#....#7...#...#.....',
'463.2443.23..243.26.',
'.6................6.',
'c6...#.....#7....#6.',
'43.263.....2463.244.',
'....6........6......',
'#...6b......a6..7#..',
'463.23......23.6444.',
'.6.............6....',
'.6....#7.7#7...6....',
'44444444444444444444'], # stage 5
['....................',
'..7#.......#7c...7..',
'4643.......2463.246.',
'.6...........6....6.',
'#6.b.7#.....76...#6.',
'43.2643....243.2644.',
'....6...........6...',
'#..76..#..#7....6.#.',
'446443.5..243.643.2.',
'..6...........6.....',
'..6#.a7.#.7...6.#...',
'44444444444444444444'], # stage 6
['....................',
'.#7.....#7...#.7..#.',
'.2463...2463.263.64.',
'...6......6...6..6..',
'..76a...#76..#67.6..',
'46443...2443.244644.',
'.6..............6...',
'#67...#.a.#.....6#..',
'44463.5.5.23..26444.',
'...6...........6....',
'.#.6..#7b.7#.7.6....',
'44444444444444444444'], # stage 7
['....................',
'...#......#.......#.',
'.263..263.23..263.2.',
'..6....6.......6....',
'.c67#..6#......6#...',
'46443..246....24464.',
'.6.......6.......6..',
'#6a...#..6..#..a76..',
'443.263.243.5..6444.',
'.....6.........6....',
'.#7..6#7.#7b...6....',
'44444444444444444444'], # stage 8 (NOTE: changed (5,15) from 6 to 4)
['....................',
'..7#...#..c.b....7#.',
'4643..23.23.23.6444.',
'.6.............6....',
'#6....7#..#7...6..#.',
'443.2643..246..5.64.',
'.....6......6....6..',
'.a#..6.....#6...76..',
'.263.23....243.643..',
'..6............6....',
'..6.7.#.7.#.7..6....',
'44444444444444444444'], # stage 9
['....................',
'a...7#..#.7..#..#7..',
'3.2643..2643.5..246.',
'...6.....6........6.',
'#..6.c...67....#.76.',
'3.23.26..2463..2644.',
'......6....6....6...',
'..7#.a6....6...#6...',
'4643.23..263...2464.',
'.6........6......6..',
'.6.#.7.#..6.7.#..6..',
'44444444444444444444'] # stage 10
]
| en | 0.705758 | # The sprite data and level design were adopted from <Bong Bong> (1989) written by # <NAME> (co-founder of Daum Corporation: https://en.wikipedia.org/wiki/Daum_(web_portal)) # while in Computer Science Department at Yonsei University, Korea. # The program and the data had been open-sourced in a Korean online game community. # Python scripts were written from scratch, but the data below was excerpted from the 1989 game. # # <<NAME>> was a remake of <PONPOKO (ポンポコ)> (1982) from Sigma Enterprise, Japan. # More about the original game can be found here: https://en.wikipedia.org/wiki/Ponpoko # base image path # each stage should be completed within 100 seconds # upon completion of each stage, (remaining seconds)x(time_bonus) is time bonus # max number of trials # background # left / right / rectangle / island platform # ladder # needle # bonus # left / left jump motions # right / right jump motions # up motion # dead / fall animation # left / right small bonus # left / right large bonus # left motion # right motion # left / right # fruit images of all stages # enemy2 information (location / speed) followed by enemy1 information (speed) # Stage Information: Each stage is (rows x cols) = (12 x 20) cells # . empty # 2 platform / left half-circle # 3 platform / right half-circle # 4 platform / rectangle # 5 platform / island # 6 ladder # 7 needle # # target (100 points) # a bonus (500 points) # b bonus (1000 points) # c enemy1 (always appear on the right) #..#........7.#..', #.6....6.a7..#.', #.....7#..6..', #.6....7.#7..6....', # stage 1 #...#7.7#..#.....', #.', #...6.c#..#76....#.', #.7..7#.6.....', # stage 2 #.....#7......7#.', #.67...6#..', #....b6..#7.7#..', #7#7..76....', # stage 3 #.....7#......#...', #..6#...b#...6#.', #67......#67...#...', #.7..7#.....6..', # stage 4 #7...#...#.....', #.....#7....#6.', #..', #7.7#7...6....', # stage 5 #.......#7c...7..', #.....76...#6.', #..#7....6.#.', #.a7.#.7...6.#...', # stage 6 #7.....#7...#.7..#.', #76..#67.6..', #.a.#.....6#..', #.6..#7b.7#.7.6....', # stage 7 #......#.......#.', #..6#......6#...', #..6..#..a76..', #7..6#7.#7b...6....', # stage 8 (NOTE: changed (5,15) from 6 to 4) #...#..c.b....7#.', #..#7...6..#.', #..6.....#6...76..', #.7.#.7..6....', # stage 9 #..#.7..#..#7..', #.76.', #.a6....6...#6...', #.7.#..6.7.#..6..', # stage 10 | 2.003449 | 2 |
main.py | Mort1J1/Python-spill | 0 | 6632717 | from cmath import pi
from math import sqrt
from turtle import Screen
import pygame
import sys
import random
from pyparsing import Or
from scipy import rand
from soupsieve import match
from sqlalchemy import case, false
pygame.init()
SCREEN_WIDTH = 1400
SCREEN_HEIGHT = 800
COLOR_BLACK = (0, 0, 0)
COLOR_WHITE = (255, 255, 255)
#Create surface
surface = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))
#Create title
pygame.display.set_caption("(^_ ^)")
#sounds
infectedsound = pygame.mixer.Sound('BallVeggBounce.wav')
#antall virus og murlocs for "score"
AntM = 0
AntV = 0
font = pygame.font.Font('freesansbold.ttf', 32)
fontPosx, fontPosy = 20, 20
def show_score(x, y):
scoreV = font.render("Virus: " + str(AntV), True, (255, 255, 255))
pos_x = SCREEN_WIDTH/2 + random.randrange(-200, 200)
pos_y = SCREEN_HEIGHT/2 + random.randrange(-200, 200)
running = True
murloclist = []
viruslist = []
def move_ball():
global pos_x
global pos_y
pos_x += 1
pos_y += 1
class Murloc():
def __init__(self):
self.x = SCREEN_WIDTH/2 + random.randrange(-200, 200)
self.y = SCREEN_HEIGHT/2 + random.randrange(-200, 200)
self.infected = False
self.size = random.randrange(35, 45)
self.color = (255, 0, random.randrange(0, 100))
self.spx = random.randrange(-5, 5, 2)
self.spy = random.randrange(-5, 5, 2)
self.infectedTime = 0
class Virus():
def __init__(self):
self.x = SCREEN_WIDTH/2 + random.randrange(-200, 200)
self.y = SCREEN_HEIGHT/2 + random.randrange(-200, 200)
self.spx = random.randrange(-7, 7, 2)
self.spy = random.randrange(-7, 7, 2)
self.size = random.randrange(4, 6)
self.infectiousR = random.randrange(0, 95)
self.color = (175, self.infectiousR+87, self.infectiousR+100)
self.copy = random.randrange(1, 3)
def create_murloc():
return Murloc()
# pygame.draw.circle(surface, m.color, (m.x, m.y), m.size)
def create_virus():
return Virus()
def move(m: Murloc):
Alive = True
i = 1000
while i > 0:
move_murlocs()
i -= 10
pygame.display.update()
surface.fill(COLOR_BLACK)
def clone_virus(m: Murloc):
for i in range (1, random.randrange(0, 6)):
v = create_virus()
v.x = m.x
v.y = m.y
viruslist.append(v)
def move_murlocs():
for m in murloclist:
if m.x + m.size >= SCREEN_WIDTH or m.x - m.size <= 0:
m.spx *= -1
if m.y + m.size >= SCREEN_HEIGHT or m.y - m.size <= 0:
m.spy *= -1
if m.infected == True and m.infectedTime+5000<pygame.time.get_ticks():
clone_virus(m)
murloclist.remove(m)
pygame.draw.circle(surface, m.color, (m.x, m.y), m.size)
m.x += m.spx
m.y += m.spy
def move_virus():
for m in viruslist:
if m.x + m.size >= SCREEN_WIDTH or m.x - m.size <= 0:
m.spx *= -1
if m.y + m.size >= SCREEN_HEIGHT or m.y - m.size <= 0:
m.spy *= -1
m.x += m.spx
m.y += m.spy
pygame.draw.circle(surface, m.color, (m.x, m.y), m.size)
# if m.x + m.size >= SCREEN_WIDTH:
# Alive = False
# elif m.x - m.size <= SCREEN_WIDTH:
# Alive = False
# elif m.y + m.size >= SCREEN_HEIGHT:
# Alive = False
# elif m.y - m.size <= SCREEN_HEIGHT:
# Alive = False
def infections(murloclist, viruslist):
for m in murloclist:
for v in viruslist:
if m.infected == False and (sqrt(((v.x - m.x)**2)+((v.y - m.y)**2)) <= (m.size + v.size)):
m.infected = True
infectedsound.play()
m.infectedTime = pygame.time.get_ticks()
print(m.infectedTime)
m.color = (39,134,39)
viruslist.remove(v)
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
sys.exit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_m:
m = create_murloc()
murloclist.append(m)
if event.key == pygame.K_c:
surface.fill(COLOR_BLACK)
if event.key == pygame.K_v:
v = create_virus()
viruslist.append(v)
if event.key == pygame.K_d:
swch = 4
move_murlocs()
move_virus()
infections(murloclist, viruslist)
pygame.display.update()
surface.fill(COLOR_BLACK) | from cmath import pi
from math import sqrt
from turtle import Screen
import pygame
import sys
import random
from pyparsing import Or
from scipy import rand
from soupsieve import match
from sqlalchemy import case, false
pygame.init()
SCREEN_WIDTH = 1400
SCREEN_HEIGHT = 800
COLOR_BLACK = (0, 0, 0)
COLOR_WHITE = (255, 255, 255)
#Create surface
surface = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))
#Create title
pygame.display.set_caption("(^_ ^)")
#sounds
infectedsound = pygame.mixer.Sound('BallVeggBounce.wav')
#antall virus og murlocs for "score"
AntM = 0
AntV = 0
font = pygame.font.Font('freesansbold.ttf', 32)
fontPosx, fontPosy = 20, 20
def show_score(x, y):
scoreV = font.render("Virus: " + str(AntV), True, (255, 255, 255))
pos_x = SCREEN_WIDTH/2 + random.randrange(-200, 200)
pos_y = SCREEN_HEIGHT/2 + random.randrange(-200, 200)
running = True
murloclist = []
viruslist = []
def move_ball():
global pos_x
global pos_y
pos_x += 1
pos_y += 1
class Murloc():
def __init__(self):
self.x = SCREEN_WIDTH/2 + random.randrange(-200, 200)
self.y = SCREEN_HEIGHT/2 + random.randrange(-200, 200)
self.infected = False
self.size = random.randrange(35, 45)
self.color = (255, 0, random.randrange(0, 100))
self.spx = random.randrange(-5, 5, 2)
self.spy = random.randrange(-5, 5, 2)
self.infectedTime = 0
class Virus():
def __init__(self):
self.x = SCREEN_WIDTH/2 + random.randrange(-200, 200)
self.y = SCREEN_HEIGHT/2 + random.randrange(-200, 200)
self.spx = random.randrange(-7, 7, 2)
self.spy = random.randrange(-7, 7, 2)
self.size = random.randrange(4, 6)
self.infectiousR = random.randrange(0, 95)
self.color = (175, self.infectiousR+87, self.infectiousR+100)
self.copy = random.randrange(1, 3)
def create_murloc():
return Murloc()
# pygame.draw.circle(surface, m.color, (m.x, m.y), m.size)
def create_virus():
return Virus()
def move(m: Murloc):
Alive = True
i = 1000
while i > 0:
move_murlocs()
i -= 10
pygame.display.update()
surface.fill(COLOR_BLACK)
def clone_virus(m: Murloc):
for i in range (1, random.randrange(0, 6)):
v = create_virus()
v.x = m.x
v.y = m.y
viruslist.append(v)
def move_murlocs():
for m in murloclist:
if m.x + m.size >= SCREEN_WIDTH or m.x - m.size <= 0:
m.spx *= -1
if m.y + m.size >= SCREEN_HEIGHT or m.y - m.size <= 0:
m.spy *= -1
if m.infected == True and m.infectedTime+5000<pygame.time.get_ticks():
clone_virus(m)
murloclist.remove(m)
pygame.draw.circle(surface, m.color, (m.x, m.y), m.size)
m.x += m.spx
m.y += m.spy
def move_virus():
for m in viruslist:
if m.x + m.size >= SCREEN_WIDTH or m.x - m.size <= 0:
m.spx *= -1
if m.y + m.size >= SCREEN_HEIGHT or m.y - m.size <= 0:
m.spy *= -1
m.x += m.spx
m.y += m.spy
pygame.draw.circle(surface, m.color, (m.x, m.y), m.size)
# if m.x + m.size >= SCREEN_WIDTH:
# Alive = False
# elif m.x - m.size <= SCREEN_WIDTH:
# Alive = False
# elif m.y + m.size >= SCREEN_HEIGHT:
# Alive = False
# elif m.y - m.size <= SCREEN_HEIGHT:
# Alive = False
def infections(murloclist, viruslist):
for m in murloclist:
for v in viruslist:
if m.infected == False and (sqrt(((v.x - m.x)**2)+((v.y - m.y)**2)) <= (m.size + v.size)):
m.infected = True
infectedsound.play()
m.infectedTime = pygame.time.get_ticks()
print(m.infectedTime)
m.color = (39,134,39)
viruslist.remove(v)
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
sys.exit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_m:
m = create_murloc()
murloclist.append(m)
if event.key == pygame.K_c:
surface.fill(COLOR_BLACK)
if event.key == pygame.K_v:
v = create_virus()
viruslist.append(v)
if event.key == pygame.K_d:
swch = 4
move_murlocs()
move_virus()
infections(murloclist, viruslist)
pygame.display.update()
surface.fill(COLOR_BLACK) | en | 0.242122 | #Create surface #Create title #sounds #antall virus og murlocs for "score" # pygame.draw.circle(surface, m.color, (m.x, m.y), m.size) # if m.x + m.size >= SCREEN_WIDTH: # Alive = False # elif m.x - m.size <= SCREEN_WIDTH: # Alive = False # elif m.y + m.size >= SCREEN_HEIGHT: # Alive = False # elif m.y - m.size <= SCREEN_HEIGHT: # Alive = False | 2.783146 | 3 |
scripts/bump-config.py | Thaodan/sailfishos-sony-tama-main | 0 | 6632718 | #!/usr/bin/env python3
import sys, glob
import lxml.etree as ET
from datetime import datetime
base = sys.argv[1]
for T in ['config', 'hal-version']:
for f in glob.glob(base + ('/*%s*/_service' % T)):
tree = ET.parse(f)
root = tree.getroot()
for c in root:
if c.tag is ET.Comment and c.text.find('Bump Config:') >= 0:
root.remove(c)
elif c.attrib['name'] == 'webhook':
root.remove(c)
comment = ET.Comment(' Bump Config: ' + datetime.now().isoformat(sep=' ', timespec='seconds') + ' ')
root.append(comment)
tree.write(f)
| #!/usr/bin/env python3
import sys, glob
import lxml.etree as ET
from datetime import datetime
base = sys.argv[1]
for T in ['config', 'hal-version']:
for f in glob.glob(base + ('/*%s*/_service' % T)):
tree = ET.parse(f)
root = tree.getroot()
for c in root:
if c.tag is ET.Comment and c.text.find('Bump Config:') >= 0:
root.remove(c)
elif c.attrib['name'] == 'webhook':
root.remove(c)
comment = ET.Comment(' Bump Config: ' + datetime.now().isoformat(sep=' ', timespec='seconds') + ' ')
root.append(comment)
tree.write(f)
| fr | 0.221828 | #!/usr/bin/env python3 | 2.297341 | 2 |
script/ssdnet/mycaffe.py | Hiroki-kt/image-recognition-train | 0 | 6632719 | <filename>script/ssdnet/mycaffe.py
import numpy as np
import re
import sys
import chainer.links.caffe.caffe_function as cf
from ssdnet.ssd import _Normalize
def _rename(name):
m = re.match(r'^conv(\d+)_([123])$', name)
if m:
i, j = list(map(int, m.groups()))
if i >= 6:
i += 2
return 'conv{:d}_{:d}'.format(i, j)
m = re.match(r'^fc([67])$', name)
if m:
return 'conv{:d}'.format(int(m.group(1)))
if name == r'conv4_3_norm':
return 'norm4'
m = re.match(r'^conv4_3_norm_mbox_(loc|conf)$', name)
if m:
return '{:s}/0'.format(m.group(1))
m = re.match(r'^fc7_mbox_(loc|conf)$', name)
if m:
return ('{:s}/1'.format(m.group(1)))
m = re.match(r'^conv(\d+)_2_mbox_(loc|conf)$', name)
if m:
i, type_ = int(m.group(1)), m.group(2)
if i >= 6:
return '{:s}/{:d}'.format(type_, i - 4)
return name
class _CaffeFunction(cf.CaffeFunction):
def __init__(self, model_path, verbose=False):
self.verbose = verbose
if self.verbose:
print('loading weights from {:s} ... '.format(model_path))
super(_CaffeFunction, self).__init__(model_path)
def add_link(self, name, link):
new_name = _rename(name)
if self.verbose:
print('{:s} -> {:s}'.format(name, new_name))
super(_CaffeFunction, self).add_link(new_name, link)
@cf._layer('Normalize', None)
def _setup_normarize(self, layer):
blobs = layer.blobs
func = _Normalize(cf._get_num(blobs[0]))
func.scale.data[:] = np.array(blobs[0].data)
self.add_link(layer.name, func)
@cf._layer('AnnotatedData', None)
@cf._layer('Flatten', None)
@cf._layer('MultiBoxLoss', None)
@cf._layer('Permute', None)
@cf._layer('PriorBox', None)
def _skip_layer(self, _):
pass
def load_caffe(model_path, verbose=False):
return _CaffeFunction(model_path, verbose)
| <filename>script/ssdnet/mycaffe.py
import numpy as np
import re
import sys
import chainer.links.caffe.caffe_function as cf
from ssdnet.ssd import _Normalize
def _rename(name):
m = re.match(r'^conv(\d+)_([123])$', name)
if m:
i, j = list(map(int, m.groups()))
if i >= 6:
i += 2
return 'conv{:d}_{:d}'.format(i, j)
m = re.match(r'^fc([67])$', name)
if m:
return 'conv{:d}'.format(int(m.group(1)))
if name == r'conv4_3_norm':
return 'norm4'
m = re.match(r'^conv4_3_norm_mbox_(loc|conf)$', name)
if m:
return '{:s}/0'.format(m.group(1))
m = re.match(r'^fc7_mbox_(loc|conf)$', name)
if m:
return ('{:s}/1'.format(m.group(1)))
m = re.match(r'^conv(\d+)_2_mbox_(loc|conf)$', name)
if m:
i, type_ = int(m.group(1)), m.group(2)
if i >= 6:
return '{:s}/{:d}'.format(type_, i - 4)
return name
class _CaffeFunction(cf.CaffeFunction):
def __init__(self, model_path, verbose=False):
self.verbose = verbose
if self.verbose:
print('loading weights from {:s} ... '.format(model_path))
super(_CaffeFunction, self).__init__(model_path)
def add_link(self, name, link):
new_name = _rename(name)
if self.verbose:
print('{:s} -> {:s}'.format(name, new_name))
super(_CaffeFunction, self).add_link(new_name, link)
@cf._layer('Normalize', None)
def _setup_normarize(self, layer):
blobs = layer.blobs
func = _Normalize(cf._get_num(blobs[0]))
func.scale.data[:] = np.array(blobs[0].data)
self.add_link(layer.name, func)
@cf._layer('AnnotatedData', None)
@cf._layer('Flatten', None)
@cf._layer('MultiBoxLoss', None)
@cf._layer('Permute', None)
@cf._layer('PriorBox', None)
def _skip_layer(self, _):
pass
def load_caffe(model_path, verbose=False):
return _CaffeFunction(model_path, verbose)
| none | 1 | 2.407453 | 2 |
|
bamboo_api/__init__.py | sdlm/dev_idwell_bot | 2 | 6632720 | <reponame>sdlm/dev_idwell_bot
__author__ = 'lionel.cuevas'
from .api import BambooAPIClient
| __author__ = 'lionel.cuevas'
from .api import BambooAPIClient | none | 1 | 1.112552 | 1 |
|
src/pytorch.py | ManiacMaxo/Thesis | 0 | 6632721 | import torch.nn as nn
import torch.nn.functional as F
class DanQ(nn.Sequential):
def __init__(self, outputs: int = 1):
super(DanQ, self).__init__()
self.conv = nn.Conv1d(in_channels=4, out_channels=320, kernel_size=26)
self.pooling = nn.MaxPool1d(kernel_size=13)
self.dropout1 = nn.Dropout(0.2)
self.bidirectional_lstm = nn.LSTM(
input_size=320,
hidden_size=16,
num_layers=2,
batch_first=True,
bidirectional=True,
)
self.dropout2 = nn.Dropout(0.5)
self.flatten = nn.Flatten()
self.linear = nn.Linear(75 * 640, 925)
self.out = nn.Linear(925, outputs)
def forward(self, x):
x = F.relu(self.conv(x))
x = self.pooling(x)
x = self.dropout1(x)
x = self.bidirectional_lstm(x)
x = self.dropout2(x)
x = self.flatten()
x = F.relu(self.linear(x))
x = F.sigmoid(self.out(x))
return x
| import torch.nn as nn
import torch.nn.functional as F
class DanQ(nn.Sequential):
def __init__(self, outputs: int = 1):
super(DanQ, self).__init__()
self.conv = nn.Conv1d(in_channels=4, out_channels=320, kernel_size=26)
self.pooling = nn.MaxPool1d(kernel_size=13)
self.dropout1 = nn.Dropout(0.2)
self.bidirectional_lstm = nn.LSTM(
input_size=320,
hidden_size=16,
num_layers=2,
batch_first=True,
bidirectional=True,
)
self.dropout2 = nn.Dropout(0.5)
self.flatten = nn.Flatten()
self.linear = nn.Linear(75 * 640, 925)
self.out = nn.Linear(925, outputs)
def forward(self, x):
x = F.relu(self.conv(x))
x = self.pooling(x)
x = self.dropout1(x)
x = self.bidirectional_lstm(x)
x = self.dropout2(x)
x = self.flatten()
x = F.relu(self.linear(x))
x = F.sigmoid(self.out(x))
return x
| none | 1 | 2.910286 | 3 |
|
bundle/kinesis_webrtc_manager/share/kinesis_webrtc_manager/package.py | gitobic/deepracer-simapp | 1 | 6632722 | # template generated by /usr/local/lib/python3.6/dist-packages/colcon_python_shell/shell/python_shell.py
# This script extends the environment for this package.
import pathlib
# assumes colcon_current_prefix has been injected into globals by caller
assert colcon_current_prefix
def prepend_unique_path(envvar, subdirectory):
global colcon_current_prefix
import os
paths = os.environ.get(envvar, '').split(os.pathsep)
# If subdirectory is relative, it is relative to prefix
new_path = str(pathlib.Path(colcon_current_prefix, subdirectory).resolve())
new_paths = [new_path, *(p for p in paths if p != new_path)]
os.environ[envvar] = os.pathsep.join(new_paths)
# source python hooks
for exe, args in [('share/kinesis_webrtc_manager/hook/cmake_prefix_path.py', []), ('share/kinesis_webrtc_manager/hook/ld_library_path_lib.py', []), ('share/kinesis_webrtc_manager/hook/ros_package_path.py', []), ('share/kinesis_webrtc_manager/hook/pkg_config_path.py', []), ('share/kinesis_webrtc_manager/hook/pkg_config_path_multiarch.py', [])]:
exec(pathlib.Path(colcon_current_prefix, exe).read_bytes())
| # template generated by /usr/local/lib/python3.6/dist-packages/colcon_python_shell/shell/python_shell.py
# This script extends the environment for this package.
import pathlib
# assumes colcon_current_prefix has been injected into globals by caller
assert colcon_current_prefix
def prepend_unique_path(envvar, subdirectory):
global colcon_current_prefix
import os
paths = os.environ.get(envvar, '').split(os.pathsep)
# If subdirectory is relative, it is relative to prefix
new_path = str(pathlib.Path(colcon_current_prefix, subdirectory).resolve())
new_paths = [new_path, *(p for p in paths if p != new_path)]
os.environ[envvar] = os.pathsep.join(new_paths)
# source python hooks
for exe, args in [('share/kinesis_webrtc_manager/hook/cmake_prefix_path.py', []), ('share/kinesis_webrtc_manager/hook/ld_library_path_lib.py', []), ('share/kinesis_webrtc_manager/hook/ros_package_path.py', []), ('share/kinesis_webrtc_manager/hook/pkg_config_path.py', []), ('share/kinesis_webrtc_manager/hook/pkg_config_path_multiarch.py', [])]:
exec(pathlib.Path(colcon_current_prefix, exe).read_bytes())
| en | 0.806064 | # template generated by /usr/local/lib/python3.6/dist-packages/colcon_python_shell/shell/python_shell.py # This script extends the environment for this package. # assumes colcon_current_prefix has been injected into globals by caller # If subdirectory is relative, it is relative to prefix # source python hooks | 1.977099 | 2 |
dl4nlp_pos_tagging/models/modules/token_embedders/embedding_sum.py | michaeljneely/model-uncertainty-pos-tagging | 1 | 6632723 | <reponame>michaeljneely/model-uncertainty-pos-tagging<gh_stars>1-10
import torch
from overrides import overrides
from typing import List
from allennlp.modules.token_embedders.token_embedder import TokenEmbedder
@TokenEmbedder.register("summed-embedding")
class SummedEmbedding(TokenEmbedder):
"""
This class is meant for summing multiple embeddings together.
Registered as a `TokenEmbedder` with name "summed-embedding".
"""
def __init__(self, token_embedders: List[TokenEmbedder]) -> None:
super().__init__()
self.token_embedders = token_embedders
same_dims = all(self.token_embedders[0].get_output_dim() == x.get_output_dim() for x in self.token_embedders)
if not same_dims:
raise ValueError("All token embedders must have the same outputs dimensionality.")
for idx, embedder in enumerate(token_embedders):
name = "embed_%s" % idx
self.add_module(name, embedder)
@overrides
def get_output_dim(self) -> int:
return self.token_embedders[0].get_output_dim()
@overrides
def forward(self, tokens: torch.Tensor) -> torch.Tensor:
outputs = None
for embedder in self.token_embedders:
embedding = embedder(tokens)
if outputs == None:
outputs = embedding
else:
outputs += embedding
return outputs
| import torch
from overrides import overrides
from typing import List
from allennlp.modules.token_embedders.token_embedder import TokenEmbedder
@TokenEmbedder.register("summed-embedding")
class SummedEmbedding(TokenEmbedder):
"""
This class is meant for summing multiple embeddings together.
Registered as a `TokenEmbedder` with name "summed-embedding".
"""
def __init__(self, token_embedders: List[TokenEmbedder]) -> None:
super().__init__()
self.token_embedders = token_embedders
same_dims = all(self.token_embedders[0].get_output_dim() == x.get_output_dim() for x in self.token_embedders)
if not same_dims:
raise ValueError("All token embedders must have the same outputs dimensionality.")
for idx, embedder in enumerate(token_embedders):
name = "embed_%s" % idx
self.add_module(name, embedder)
@overrides
def get_output_dim(self) -> int:
return self.token_embedders[0].get_output_dim()
@overrides
def forward(self, tokens: torch.Tensor) -> torch.Tensor:
outputs = None
for embedder in self.token_embedders:
embedding = embedder(tokens)
if outputs == None:
outputs = embedding
else:
outputs += embedding
return outputs | en | 0.884312 | This class is meant for summing multiple embeddings together. Registered as a `TokenEmbedder` with name "summed-embedding". | 2.635972 | 3 |
deeptools/_version.py | samhuairen/deepTools | 0 | 6632724 |
# This file is originally generated from Git information by running 'setup.py
# version'. Distribution tarballs contain a pre-generated copy of this file.
__version__ = '3.3.2'
|
# This file is originally generated from Git information by running 'setup.py
# version'. Distribution tarballs contain a pre-generated copy of this file.
__version__ = '3.3.2'
| en | 0.868437 | # This file is originally generated from Git information by running 'setup.py # version'. Distribution tarballs contain a pre-generated copy of this file. | 0.849952 | 1 |
optimizers/BAT.py | VicZH/Evolutionary-Algorithm-Racing | 0 | 6632725 | import numpy
import random
from solution import solution
class BAT(solution):
def __init__(self, objf, sol_shift, lb, ub, dim, PopSize, EvlNum):
# Loudness (constant or decreasing)
self.A = 0.5
# Pulse rate (constant or decreasing)
self.r = 0.5
self.dim = dim
self.popnum = PopSize
self.maxiers = int(EvlNum / PopSize)
self.optimizer = "BAT"
self.objfname = objf.__name__
self.objf = objf
self.sol_shift = sol_shift
# convert lb, ub to array
self.lb = numpy.array([lb for _ in range(dim)])
self.ub = numpy.array([ub for _ in range(dim)])
self.best = float("inf")
self.Qmin = 0 # Frequency minimum
self.Qmax = 2 # Frequency maximum
# Initializing arrays
self.Q = numpy.zeros(self.popnum) # Frequency
self.v = numpy.zeros((self.popnum, self.dim)) # Velocities
self.S = numpy.zeros((self.popnum, self.dim)) # new solutions
# initialize population
self.solutions = []
for p in range(PopSize):
sol = []
for d in range(dim):
d_val = random.uniform(self.lb[d], self.ub[d])
sol.append(d_val)
self.solutions.append(sol)
self.solutions = numpy.array(self.solutions)
self.population_fitness = []
# calculate fitness for all the population
for i in range(PopSize):
fitness = objf(self.solutions[i, :]-self.sol_shift)
self.population_fitness += [fitness]
if fitness < self.best:
self.best = fitness
self.bestIndividual = self.solutions[i, :]
def update(self, iter_id):
if iter_id < self.maxiers:
# Loop over all bats(solutions)
for i in range(0, self.popnum):
self.Q[i] = self.Qmin + (self.Qmin - self.Qmax) * random.random()
self.v[i, :] = self.v[i, :] + (self.solutions[i, :] - self.bestIndividual) * self.Q[i]
self.S[i, :] = self.solutions[i, :] + self.v[i, :]
# Check boundaries
for j in range(self.dim):
self.solutions[i, j] = numpy.clip(self.solutions[i, j], self.lb[j], self.ub[j])
# Pulse rate
if random.random() > self.r:
self.S[i, :] = self.bestIndividual + 0.001 * numpy.random.randn(self.dim)
# Evaluate new solutions
Fnew = self.objf(self.S[i, :]-self.sol_shift)
# Update if the solution improves
if (Fnew <= self.population_fitness[i]) and (random.random() < self.A):
self.solutions[i, :] = numpy.copy(self.S[i, :])
self.population_fitness[i] = Fnew
# Update the current best solution
if Fnew <= self.best:
self.bestIndividual = numpy.copy(self.S[i, :])
self.best = Fnew
| import numpy
import random
from solution import solution
class BAT(solution):
def __init__(self, objf, sol_shift, lb, ub, dim, PopSize, EvlNum):
# Loudness (constant or decreasing)
self.A = 0.5
# Pulse rate (constant or decreasing)
self.r = 0.5
self.dim = dim
self.popnum = PopSize
self.maxiers = int(EvlNum / PopSize)
self.optimizer = "BAT"
self.objfname = objf.__name__
self.objf = objf
self.sol_shift = sol_shift
# convert lb, ub to array
self.lb = numpy.array([lb for _ in range(dim)])
self.ub = numpy.array([ub for _ in range(dim)])
self.best = float("inf")
self.Qmin = 0 # Frequency minimum
self.Qmax = 2 # Frequency maximum
# Initializing arrays
self.Q = numpy.zeros(self.popnum) # Frequency
self.v = numpy.zeros((self.popnum, self.dim)) # Velocities
self.S = numpy.zeros((self.popnum, self.dim)) # new solutions
# initialize population
self.solutions = []
for p in range(PopSize):
sol = []
for d in range(dim):
d_val = random.uniform(self.lb[d], self.ub[d])
sol.append(d_val)
self.solutions.append(sol)
self.solutions = numpy.array(self.solutions)
self.population_fitness = []
# calculate fitness for all the population
for i in range(PopSize):
fitness = objf(self.solutions[i, :]-self.sol_shift)
self.population_fitness += [fitness]
if fitness < self.best:
self.best = fitness
self.bestIndividual = self.solutions[i, :]
def update(self, iter_id):
if iter_id < self.maxiers:
# Loop over all bats(solutions)
for i in range(0, self.popnum):
self.Q[i] = self.Qmin + (self.Qmin - self.Qmax) * random.random()
self.v[i, :] = self.v[i, :] + (self.solutions[i, :] - self.bestIndividual) * self.Q[i]
self.S[i, :] = self.solutions[i, :] + self.v[i, :]
# Check boundaries
for j in range(self.dim):
self.solutions[i, j] = numpy.clip(self.solutions[i, j], self.lb[j], self.ub[j])
# Pulse rate
if random.random() > self.r:
self.S[i, :] = self.bestIndividual + 0.001 * numpy.random.randn(self.dim)
# Evaluate new solutions
Fnew = self.objf(self.S[i, :]-self.sol_shift)
# Update if the solution improves
if (Fnew <= self.population_fitness[i]) and (random.random() < self.A):
self.solutions[i, :] = numpy.copy(self.S[i, :])
self.population_fitness[i] = Fnew
# Update the current best solution
if Fnew <= self.best:
self.bestIndividual = numpy.copy(self.S[i, :])
self.best = Fnew
| en | 0.650629 | # Loudness (constant or decreasing) # Pulse rate (constant or decreasing) # convert lb, ub to array # Frequency minimum # Frequency maximum # Initializing arrays # Frequency # Velocities # new solutions # initialize population # calculate fitness for all the population # Loop over all bats(solutions) # Check boundaries # Pulse rate # Evaluate new solutions # Update if the solution improves # Update the current best solution | 2.663596 | 3 |
ipyprogressbar/ipyprogressbar.py | WillahScott/ipyprogressbar | 3 | 6632726 | <reponame>WillahScott/ipyprogressbar
# -*- coding: utf-8 -*-
"""Python-asynchronous progressbar widgets for use in Jupyter/IPython in conjunction with `ipywidgets<https://ipywidgets.readthedocs.io>`_
.. module:: ipyprogressbar.asyncprogressbar
:platform: Unix, Windows
:synopsis: Progressbar that executes asynchronous of python
.. |widget| replace:: `ipywidgets.widget<https://ipywidgets.readthedocs.io/en/latest/examples/Widget%20Basics.html>`_
"""
import random
import time as tm
import threading
import ipywidgets as widgets
from IPython.display import display
class AsyncProgressBar(object):
"""Progressbar that executes asynchronously in a thread, while other python code executes
Parameters
----------
time : int
Number of seconds the progressbar will take to complete
description : str, optional
Label to be shown next to the progressbar
close_on_finish : bool, optional
Automatically close the progressbar upon completion
"""
def __init__(self, time, description='', identifier=None, close_on_finish=False):
self._id = identifier if identifier is not None else 'pb_' + str(random.random()).replace('0.', '')
self.total_time = time
self.close_on_finish = close_on_finish
self.__widget = widgets.IntProgress(value=0, min=0, max=100)
self.__widget.description = description
self.__last_value = None
self.__last_description = description
@property
def widget(self):
"""Returns the widget object to be displayed or included in a dashboard
Returns
-------
`ipywidgets.widget<>`_
Widget
"""
return self.__widget
def display(self):
display(self.widget)
def __fill_bar(self, time):
while self.__widget.value < 100:
tm.sleep(time / 20)
self.__widget.value += 5
# Close if on_finish option was specified
if self.close_on_finish:
self.__widget.close()
def run(self, time=None):
"""Triggers the progressbar to start updating (like an animation)
Parameters
----------
time : int, optional
Number of seconds the progressbar will take to complete
If given overrides the time specified on instantiation
"""
_time = time if time is not None else self.total_time
thread = threading.Thread(target=self.__fill_bar, args=(_time,))
thread.start()
def reset(self, percent=0):
"""Reset the progressbar to the specified percent (defaults to zero)
Parameters
----------
percent : int [0,100], optional
Int between 0 (empty) and 100 (full) to reset the progressbar to
"""
self.__widget.value = percent
def close(self, on_finish=True):
"""Close the widget
Parameters
----------
on_finish : bool, optional
Should the widget close when it reaches 100% (default), if not
the widget will close automatically (even if it's still running)
"""
if on_finish:
self.close_on_finish = True
else:
self.__last_value = self.__widget.value
self.__widget.close()
def reopen(self):
"""Reopen the widget after closing, with the previous last value"""
self.__widget = widgets.IntProgress(value=0, min=0, max=100)
self.__widget.value = self.__last_value if self.__last_value else 0
self.__widget.description = self.__last_description
| # -*- coding: utf-8 -*-
"""Python-asynchronous progressbar widgets for use in Jupyter/IPython in conjunction with `ipywidgets<https://ipywidgets.readthedocs.io>`_
.. module:: ipyprogressbar.asyncprogressbar
:platform: Unix, Windows
:synopsis: Progressbar that executes asynchronous of python
.. |widget| replace:: `ipywidgets.widget<https://ipywidgets.readthedocs.io/en/latest/examples/Widget%20Basics.html>`_
"""
import random
import time as tm
import threading
import ipywidgets as widgets
from IPython.display import display
class AsyncProgressBar(object):
"""Progressbar that executes asynchronously in a thread, while other python code executes
Parameters
----------
time : int
Number of seconds the progressbar will take to complete
description : str, optional
Label to be shown next to the progressbar
close_on_finish : bool, optional
Automatically close the progressbar upon completion
"""
def __init__(self, time, description='', identifier=None, close_on_finish=False):
self._id = identifier if identifier is not None else 'pb_' + str(random.random()).replace('0.', '')
self.total_time = time
self.close_on_finish = close_on_finish
self.__widget = widgets.IntProgress(value=0, min=0, max=100)
self.__widget.description = description
self.__last_value = None
self.__last_description = description
@property
def widget(self):
"""Returns the widget object to be displayed or included in a dashboard
Returns
-------
`ipywidgets.widget<>`_
Widget
"""
return self.__widget
def display(self):
display(self.widget)
def __fill_bar(self, time):
while self.__widget.value < 100:
tm.sleep(time / 20)
self.__widget.value += 5
# Close if on_finish option was specified
if self.close_on_finish:
self.__widget.close()
def run(self, time=None):
"""Triggers the progressbar to start updating (like an animation)
Parameters
----------
time : int, optional
Number of seconds the progressbar will take to complete
If given overrides the time specified on instantiation
"""
_time = time if time is not None else self.total_time
thread = threading.Thread(target=self.__fill_bar, args=(_time,))
thread.start()
def reset(self, percent=0):
"""Reset the progressbar to the specified percent (defaults to zero)
Parameters
----------
percent : int [0,100], optional
Int between 0 (empty) and 100 (full) to reset the progressbar to
"""
self.__widget.value = percent
def close(self, on_finish=True):
"""Close the widget
Parameters
----------
on_finish : bool, optional
Should the widget close when it reaches 100% (default), if not
the widget will close automatically (even if it's still running)
"""
if on_finish:
self.close_on_finish = True
else:
self.__last_value = self.__widget.value
self.__widget.close()
def reopen(self):
"""Reopen the widget after closing, with the previous last value"""
self.__widget = widgets.IntProgress(value=0, min=0, max=100)
self.__widget.value = self.__last_value if self.__last_value else 0
self.__widget.description = self.__last_description | en | 0.546435 | # -*- coding: utf-8 -*- Python-asynchronous progressbar widgets for use in Jupyter/IPython in conjunction with `ipywidgets<https://ipywidgets.readthedocs.io>`_ .. module:: ipyprogressbar.asyncprogressbar :platform: Unix, Windows :synopsis: Progressbar that executes asynchronous of python .. |widget| replace:: `ipywidgets.widget<https://ipywidgets.readthedocs.io/en/latest/examples/Widget%20Basics.html>`_ Progressbar that executes asynchronously in a thread, while other python code executes Parameters ---------- time : int Number of seconds the progressbar will take to complete description : str, optional Label to be shown next to the progressbar close_on_finish : bool, optional Automatically close the progressbar upon completion Returns the widget object to be displayed or included in a dashboard Returns ------- `ipywidgets.widget<>`_ Widget # Close if on_finish option was specified Triggers the progressbar to start updating (like an animation) Parameters ---------- time : int, optional Number of seconds the progressbar will take to complete If given overrides the time specified on instantiation Reset the progressbar to the specified percent (defaults to zero) Parameters ---------- percent : int [0,100], optional Int between 0 (empty) and 100 (full) to reset the progressbar to Close the widget Parameters ---------- on_finish : bool, optional Should the widget close when it reaches 100% (default), if not the widget will close automatically (even if it's still running) Reopen the widget after closing, with the previous last value | 3.439018 | 3 |
flask_s3_viewer/routers.py | blairdrummond/flask-s3-viewer | 7 | 6632727 | <reponame>blairdrummond/flask-s3-viewer<gh_stars>1-10
from .blueprints.view import blueprint as FlaskS3ViewerViewRouter
| from .blueprints.view import blueprint as FlaskS3ViewerViewRouter | none | 1 | 1.188688 | 1 |
|
test/implicit-cache/basic.py | moroten/scons | 1 | 6632728 | <gh_stars>1-10
#!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
Verify basic interactions of the --implicit-cache-* options.
This test used to set TargetSignatures('build') because we were
relying on the old behavior of non-essential changes in .h files
propagate to cause a rebuilt executable. We now just rely on
the default Decider('content') behavior and only check for the
rebuild of the object file itself when necessary.
"""
import os.path
import TestSCons
_exe = TestSCons._exe
_obj = TestSCons._obj
prog = 'prog' + _exe
subdir_prog = os.path.join('subdir', 'prog' + _exe)
variant_prog = os.path.join('variant', 'prog' + _exe)
variant_prog_obj = os.path.join('variant', 'prog' + _obj)
args = prog + ' ' + subdir_prog + ' ' + variant_prog
test = TestSCons.TestSCons()
test.subdir('include', 'subdir', ['subdir', 'include'], 'inc2')
test.write('SConstruct', """
env = Environment(CPPPATH = Split('inc2 include'))
obj = env.Object(target='prog', source='subdir/prog.c')
env.Program(target='prog', source=obj)
SConscript('subdir/SConscript', "env")
VariantDir('variant', 'subdir', 0)
include = Dir('include')
env = Environment(CPPPATH=['inc2', include])
SConscript('variant/SConscript', "env")
def copy(target, source, env):
open(str(target[0]), 'wt').write(open(str(source[0]), 'rt').read())
nodep = env.Command('nodeps.c', 'nodeps.in', action=copy)
env.Program('nodeps', 'nodeps.c')
env.Object(['one', 'two'], ['one.c'])
""")
test.write(['subdir', 'SConscript'],
"""
Import("env")
env.Program(target='prog', source='prog.c')
""")
test.write('nodeps.in', r"""
int
main(int argc, char *argv[])
{
argv[argc++] = "--";
return 0;
}
""")
test.write(['include', 'foo.h'], r"""
#define FOO_STRING "include/foo.h 1\n"
#include <bar.h>
""")
test.write(['include', 'bar.h'], r"""
#define BAR_STRING "include/bar.h 1\n"
""")
test.write(['include', 'baz.h'], r"""
#define BAZ_STRING "include/baz.h 1\n"
""")
test.write(['subdir', 'prog.c'], r"""
#include <foo.h>
#include <stdio.h>
int
main(int argc, char *argv[])
{
argv[argc++] = "--";
printf("subdir/prog.c\n");
printf(FOO_STRING);
printf(BAR_STRING);
return 0;
}
""")
test.write(['subdir', 'include', 'foo.h'], r"""
#define FOO_STRING "subdir/include/foo.h 1\n"
#include "bar.h"
""")
test.write(['subdir', 'include', 'bar.h'], r"""
#define BAR_STRING "subdir/include/bar.h 1\n"
""")
test.write('one.c' , r"""
#include <foo.h>
void one(void) { }
""")
test.run(arguments = "--implicit-cache " + args)
test.run(program = test.workpath(prog),
stdout = "subdir/prog.c\ninclude/foo.h 1\ninclude/bar.h 1\n")
test.run(program = test.workpath(subdir_prog),
stdout = "subdir/prog.c\nsubdir/include/foo.h 1\nsubdir/include/bar.h 1\n")
test.run(program = test.workpath(variant_prog),
stdout = "subdir/prog.c\ninclude/foo.h 1\ninclude/bar.h 1\n")
test.up_to_date(arguments = args)
# Make sure implicit dependencies work right when one is modifed:
test.write(['include', 'foo.h'], r"""
#define FOO_STRING "include/foo.h 2\n"
#include "bar.h"
""")
test.run(arguments = "--implicit-cache " + args)
test.run(program = test.workpath(prog),
stdout = "subdir/prog.c\ninclude/foo.h 2\ninclude/bar.h 1\n")
test.run(program = test.workpath(subdir_prog),
stdout = "subdir/prog.c\nsubdir/include/foo.h 1\nsubdir/include/bar.h 1\n")
test.run(program = test.workpath(variant_prog),
stdout = "subdir/prog.c\ninclude/foo.h 2\ninclude/bar.h 1\n")
test.up_to_date(arguments = args)
# Make sure that changing the order of includes causes rebuilds and
# doesn't produce redundant rebuilds:
test.write(['include', 'foo.h'], r"""
#define FOO_STRING "include/foo.h 2\n"
#include "bar.h"
#include "baz.h"
""")
test.run(arguments = "--implicit-cache " + args)
test.run(program = test.workpath(prog),
stdout = "subdir/prog.c\ninclude/foo.h 2\ninclude/bar.h 1\n")
test.run(program = test.workpath(subdir_prog),
stdout = "subdir/prog.c\nsubdir/include/foo.h 1\nsubdir/include/bar.h 1\n")
test.run(program = test.workpath(variant_prog),
stdout = "subdir/prog.c\ninclude/foo.h 2\ninclude/bar.h 1\n")
test.up_to_date(arguments = args)
test.write(['include', 'foo.h'], r"""
#define FOO_STRING "include/foo.h 2\n"
#include "baz.h"
#include "bar.h"
""")
test.run(arguments = "--implicit-cache " + args)
test.run(program = test.workpath(prog),
stdout = "subdir/prog.c\ninclude/foo.h 2\ninclude/bar.h 1\n")
test.run(program = test.workpath(subdir_prog),
stdout = "subdir/prog.c\nsubdir/include/foo.h 1\nsubdir/include/bar.h 1\n")
test.run(program = test.workpath(variant_prog),
stdout = "subdir/prog.c\ninclude/foo.h 2\ninclude/bar.h 1\n")
test.up_to_date(arguments = args)
# Add inc2/foo.h that should shadow include/foo.h, but
# because of implicit dependency caching, scons doesn't
# detect this:
test.write(['inc2', 'foo.h'], r"""
#define FOO_STRING "inc2/foo.h 1\n"
#include <bar.h>
""")
test.run(arguments = "--implicit-cache " + args)
test.run(program = test.workpath(prog),
stdout = "subdir/prog.c\ninclude/foo.h 2\ninclude/bar.h 1\n")
test.run(program = test.workpath(subdir_prog),
stdout = "subdir/prog.c\nsubdir/include/foo.h 1\nsubdir/include/bar.h 1\n")
test.run(program = test.workpath(variant_prog),
stdout = "subdir/prog.c\ninclude/foo.h 2\ninclude/bar.h 1\n")
# Now modifying include/foo.h should make scons aware of inc2/foo.h
test.write(['include', 'foo.h'], r"""
#define FOO_STRING "include/foo.h 3\n"
#include "bar.h"
""")
test.run(arguments = "--implicit-cache " + args)
test.run(program = test.workpath(prog),
stdout = "subdir/prog.c\ninc2/foo.h 1\ninclude/bar.h 1\n")
test.run(program = test.workpath(subdir_prog),
stdout = "subdir/prog.c\nsubdir/include/foo.h 1\nsubdir/include/bar.h 1\n")
test.run(program = test.workpath(variant_prog),
stdout = "subdir/prog.c\ninclude/foo.h 3\ninclude/bar.h 1\n")
# test a file with no dependencies where the source file is generated:
test.run(arguments = "--implicit-cache nodeps%s"%_exe)
test.write('nodeps.in', r"""
#include <foo.h>
int
main(int argc, char *argv[])
{
argv[argc++] = "--";
return 0;
}
""")
test.run(arguments = "--implicit-cache one%s"%_obj)
# Test forcing of implicit caching:
test.write(['include', 'foo.h'], r"""
#define FOO_STRING "include/foo.h 3\n"
#include "bar.h"
""")
# Cache the dependencies of prog_obj: foo.h and its included bar.h.
test.run(arguments = "--implicit-cache " + args)
# Now add baz.h to the implicit dependencies in foo.h.
test.write(['include', 'foo.h'], r"""
#define FOO_STRING "include/foo.h 3\n"
#include "baz.h"
#include "bar.h"
""")
# Rebuild variant_prog_obj because the already-cached foo.h changed,
# but use --implicit-deps-unchanged to avoid noticing the addition
# of baz.h to the implicit dependencies.
test.not_up_to_date(options = "--implicit-deps-unchanged",
arguments = variant_prog_obj)
test.write(['include', 'baz.h'], r"""
#define BAZ_STRING "include/baz.h 2\n"
""")
# variant_prog_obj is still up to date, because it doesn't know about
# baz.h and therefore the change we just made to it.
test.up_to_date(options = "--implicit-deps-unchanged",
arguments = variant_prog_obj)
# Now rebuild it normally.
test.not_up_to_date(arguments = variant_prog_obj)
# And rebuild its executable, just so everything's normal.
test.run(arguments = variant_prog)
# Test forcing rescanning:
test.write(['include', 'foo.h'], r"""
#define FOO_STRING "include/foo.h 3\n"
#include "bar.h"
""")
test.run(arguments = "--implicit-cache " + args)
test.write(['include', 'foo.h'], r"""
#define FOO_STRING "include/foo.h 3\n"
#include "baz.h"
#include "bar.h"
""")
test.not_up_to_date(options = "--implicit-deps-unchanged",
arguments = variant_prog_obj)
test.write(['include', 'baz.h'], r"""
#define BAZ_STRING "include/baz.h 2\n"
""")
test.up_to_date(options = "--implicit-deps-unchanged",
arguments = variant_prog_obj)
test.not_up_to_date(options = "--implicit-deps-changed",
arguments = variant_prog_obj)
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| #!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
Verify basic interactions of the --implicit-cache-* options.
This test used to set TargetSignatures('build') because we were
relying on the old behavior of non-essential changes in .h files
propagate to cause a rebuilt executable. We now just rely on
the default Decider('content') behavior and only check for the
rebuild of the object file itself when necessary.
"""
import os.path
import TestSCons
_exe = TestSCons._exe
_obj = TestSCons._obj
prog = 'prog' + _exe
subdir_prog = os.path.join('subdir', 'prog' + _exe)
variant_prog = os.path.join('variant', 'prog' + _exe)
variant_prog_obj = os.path.join('variant', 'prog' + _obj)
args = prog + ' ' + subdir_prog + ' ' + variant_prog
test = TestSCons.TestSCons()
test.subdir('include', 'subdir', ['subdir', 'include'], 'inc2')
test.write('SConstruct', """
env = Environment(CPPPATH = Split('inc2 include'))
obj = env.Object(target='prog', source='subdir/prog.c')
env.Program(target='prog', source=obj)
SConscript('subdir/SConscript', "env")
VariantDir('variant', 'subdir', 0)
include = Dir('include')
env = Environment(CPPPATH=['inc2', include])
SConscript('variant/SConscript', "env")
def copy(target, source, env):
open(str(target[0]), 'wt').write(open(str(source[0]), 'rt').read())
nodep = env.Command('nodeps.c', 'nodeps.in', action=copy)
env.Program('nodeps', 'nodeps.c')
env.Object(['one', 'two'], ['one.c'])
""")
test.write(['subdir', 'SConscript'],
"""
Import("env")
env.Program(target='prog', source='prog.c')
""")
test.write('nodeps.in', r"""
int
main(int argc, char *argv[])
{
argv[argc++] = "--";
return 0;
}
""")
test.write(['include', 'foo.h'], r"""
#define FOO_STRING "include/foo.h 1\n"
#include <bar.h>
""")
test.write(['include', 'bar.h'], r"""
#define BAR_STRING "include/bar.h 1\n"
""")
test.write(['include', 'baz.h'], r"""
#define BAZ_STRING "include/baz.h 1\n"
""")
test.write(['subdir', 'prog.c'], r"""
#include <foo.h>
#include <stdio.h>
int
main(int argc, char *argv[])
{
argv[argc++] = "--";
printf("subdir/prog.c\n");
printf(FOO_STRING);
printf(BAR_STRING);
return 0;
}
""")
test.write(['subdir', 'include', 'foo.h'], r"""
#define FOO_STRING "subdir/include/foo.h 1\n"
#include "bar.h"
""")
test.write(['subdir', 'include', 'bar.h'], r"""
#define BAR_STRING "subdir/include/bar.h 1\n"
""")
test.write('one.c' , r"""
#include <foo.h>
void one(void) { }
""")
test.run(arguments = "--implicit-cache " + args)
test.run(program = test.workpath(prog),
stdout = "subdir/prog.c\ninclude/foo.h 1\ninclude/bar.h 1\n")
test.run(program = test.workpath(subdir_prog),
stdout = "subdir/prog.c\nsubdir/include/foo.h 1\nsubdir/include/bar.h 1\n")
test.run(program = test.workpath(variant_prog),
stdout = "subdir/prog.c\ninclude/foo.h 1\ninclude/bar.h 1\n")
test.up_to_date(arguments = args)
# Make sure implicit dependencies work right when one is modifed:
test.write(['include', 'foo.h'], r"""
#define FOO_STRING "include/foo.h 2\n"
#include "bar.h"
""")
test.run(arguments = "--implicit-cache " + args)
test.run(program = test.workpath(prog),
stdout = "subdir/prog.c\ninclude/foo.h 2\ninclude/bar.h 1\n")
test.run(program = test.workpath(subdir_prog),
stdout = "subdir/prog.c\nsubdir/include/foo.h 1\nsubdir/include/bar.h 1\n")
test.run(program = test.workpath(variant_prog),
stdout = "subdir/prog.c\ninclude/foo.h 2\ninclude/bar.h 1\n")
test.up_to_date(arguments = args)
# Make sure that changing the order of includes causes rebuilds and
# doesn't produce redundant rebuilds:
test.write(['include', 'foo.h'], r"""
#define FOO_STRING "include/foo.h 2\n"
#include "bar.h"
#include "baz.h"
""")
test.run(arguments = "--implicit-cache " + args)
test.run(program = test.workpath(prog),
stdout = "subdir/prog.c\ninclude/foo.h 2\ninclude/bar.h 1\n")
test.run(program = test.workpath(subdir_prog),
stdout = "subdir/prog.c\nsubdir/include/foo.h 1\nsubdir/include/bar.h 1\n")
test.run(program = test.workpath(variant_prog),
stdout = "subdir/prog.c\ninclude/foo.h 2\ninclude/bar.h 1\n")
test.up_to_date(arguments = args)
test.write(['include', 'foo.h'], r"""
#define FOO_STRING "include/foo.h 2\n"
#include "baz.h"
#include "bar.h"
""")
test.run(arguments = "--implicit-cache " + args)
test.run(program = test.workpath(prog),
stdout = "subdir/prog.c\ninclude/foo.h 2\ninclude/bar.h 1\n")
test.run(program = test.workpath(subdir_prog),
stdout = "subdir/prog.c\nsubdir/include/foo.h 1\nsubdir/include/bar.h 1\n")
test.run(program = test.workpath(variant_prog),
stdout = "subdir/prog.c\ninclude/foo.h 2\ninclude/bar.h 1\n")
test.up_to_date(arguments = args)
# Add inc2/foo.h that should shadow include/foo.h, but
# because of implicit dependency caching, scons doesn't
# detect this:
test.write(['inc2', 'foo.h'], r"""
#define FOO_STRING "inc2/foo.h 1\n"
#include <bar.h>
""")
test.run(arguments = "--implicit-cache " + args)
test.run(program = test.workpath(prog),
stdout = "subdir/prog.c\ninclude/foo.h 2\ninclude/bar.h 1\n")
test.run(program = test.workpath(subdir_prog),
stdout = "subdir/prog.c\nsubdir/include/foo.h 1\nsubdir/include/bar.h 1\n")
test.run(program = test.workpath(variant_prog),
stdout = "subdir/prog.c\ninclude/foo.h 2\ninclude/bar.h 1\n")
# Now modifying include/foo.h should make scons aware of inc2/foo.h
test.write(['include', 'foo.h'], r"""
#define FOO_STRING "include/foo.h 3\n"
#include "bar.h"
""")
test.run(arguments = "--implicit-cache " + args)
test.run(program = test.workpath(prog),
stdout = "subdir/prog.c\ninc2/foo.h 1\ninclude/bar.h 1\n")
test.run(program = test.workpath(subdir_prog),
stdout = "subdir/prog.c\nsubdir/include/foo.h 1\nsubdir/include/bar.h 1\n")
test.run(program = test.workpath(variant_prog),
stdout = "subdir/prog.c\ninclude/foo.h 3\ninclude/bar.h 1\n")
# test a file with no dependencies where the source file is generated:
test.run(arguments = "--implicit-cache nodeps%s"%_exe)
test.write('nodeps.in', r"""
#include <foo.h>
int
main(int argc, char *argv[])
{
argv[argc++] = "--";
return 0;
}
""")
test.run(arguments = "--implicit-cache one%s"%_obj)
# Test forcing of implicit caching:
test.write(['include', 'foo.h'], r"""
#define FOO_STRING "include/foo.h 3\n"
#include "bar.h"
""")
# Cache the dependencies of prog_obj: foo.h and its included bar.h.
test.run(arguments = "--implicit-cache " + args)
# Now add baz.h to the implicit dependencies in foo.h.
test.write(['include', 'foo.h'], r"""
#define FOO_STRING "include/foo.h 3\n"
#include "baz.h"
#include "bar.h"
""")
# Rebuild variant_prog_obj because the already-cached foo.h changed,
# but use --implicit-deps-unchanged to avoid noticing the addition
# of baz.h to the implicit dependencies.
test.not_up_to_date(options = "--implicit-deps-unchanged",
arguments = variant_prog_obj)
test.write(['include', 'baz.h'], r"""
#define BAZ_STRING "include/baz.h 2\n"
""")
# variant_prog_obj is still up to date, because it doesn't know about
# baz.h and therefore the change we just made to it.
test.up_to_date(options = "--implicit-deps-unchanged",
arguments = variant_prog_obj)
# Now rebuild it normally.
test.not_up_to_date(arguments = variant_prog_obj)
# And rebuild its executable, just so everything's normal.
test.run(arguments = variant_prog)
# Test forcing rescanning:
test.write(['include', 'foo.h'], r"""
#define FOO_STRING "include/foo.h 3\n"
#include "bar.h"
""")
test.run(arguments = "--implicit-cache " + args)
test.write(['include', 'foo.h'], r"""
#define FOO_STRING "include/foo.h 3\n"
#include "baz.h"
#include "bar.h"
""")
test.not_up_to_date(options = "--implicit-deps-unchanged",
arguments = variant_prog_obj)
test.write(['include', 'baz.h'], r"""
#define BAZ_STRING "include/baz.h 2\n"
""")
test.up_to_date(options = "--implicit-deps-unchanged",
arguments = variant_prog_obj)
test.not_up_to_date(options = "--implicit-deps-changed",
arguments = variant_prog_obj)
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4: | en | 0.659193 | #!/usr/bin/env python # # __COPYRIGHT__ # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # Verify basic interactions of the --implicit-cache-* options. This test used to set TargetSignatures('build') because we were relying on the old behavior of non-essential changes in .h files propagate to cause a rebuilt executable. We now just rely on the default Decider('content') behavior and only check for the rebuild of the object file itself when necessary. env = Environment(CPPPATH = Split('inc2 include')) obj = env.Object(target='prog', source='subdir/prog.c') env.Program(target='prog', source=obj) SConscript('subdir/SConscript', "env") VariantDir('variant', 'subdir', 0) include = Dir('include') env = Environment(CPPPATH=['inc2', include]) SConscript('variant/SConscript', "env") def copy(target, source, env): open(str(target[0]), 'wt').write(open(str(source[0]), 'rt').read()) nodep = env.Command('nodeps.c', 'nodeps.in', action=copy) env.Program('nodeps', 'nodeps.c') env.Object(['one', 'two'], ['one.c']) Import("env") env.Program(target='prog', source='prog.c') int main(int argc, char *argv[]) { argv[argc++] = "--"; return 0; } #define FOO_STRING "include/foo.h 1\n" #include <bar.h> #define BAR_STRING "include/bar.h 1\n" #define BAZ_STRING "include/baz.h 1\n" #include <foo.h> #include <stdio.h> int main(int argc, char *argv[]) { argv[argc++] = "--"; printf("subdir/prog.c\n"); printf(FOO_STRING); printf(BAR_STRING); return 0; } #define FOO_STRING "subdir/include/foo.h 1\n" #include "bar.h" #define BAR_STRING "subdir/include/bar.h 1\n" #include <foo.h> void one(void) { } # Make sure implicit dependencies work right when one is modifed: #define FOO_STRING "include/foo.h 2\n" #include "bar.h" # Make sure that changing the order of includes causes rebuilds and # doesn't produce redundant rebuilds: #define FOO_STRING "include/foo.h 2\n" #include "bar.h" #include "baz.h" #define FOO_STRING "include/foo.h 2\n" #include "baz.h" #include "bar.h" # Add inc2/foo.h that should shadow include/foo.h, but # because of implicit dependency caching, scons doesn't # detect this: #define FOO_STRING "inc2/foo.h 1\n" #include <bar.h> # Now modifying include/foo.h should make scons aware of inc2/foo.h #define FOO_STRING "include/foo.h 3\n" #include "bar.h" # test a file with no dependencies where the source file is generated: #include <foo.h> int main(int argc, char *argv[]) { argv[argc++] = "--"; return 0; } # Test forcing of implicit caching: #define FOO_STRING "include/foo.h 3\n" #include "bar.h" # Cache the dependencies of prog_obj: foo.h and its included bar.h. # Now add baz.h to the implicit dependencies in foo.h. #define FOO_STRING "include/foo.h 3\n" #include "baz.h" #include "bar.h" # Rebuild variant_prog_obj because the already-cached foo.h changed, # but use --implicit-deps-unchanged to avoid noticing the addition # of baz.h to the implicit dependencies. #define BAZ_STRING "include/baz.h 2\n" # variant_prog_obj is still up to date, because it doesn't know about # baz.h and therefore the change we just made to it. # Now rebuild it normally. # And rebuild its executable, just so everything's normal. # Test forcing rescanning: #define FOO_STRING "include/foo.h 3\n" #include "bar.h" #define FOO_STRING "include/foo.h 3\n" #include "baz.h" #include "bar.h" #define BAZ_STRING "include/baz.h 2\n" # Local Variables: # tab-width:4 # indent-tabs-mode:nil # End: # vim: set expandtab tabstop=4 shiftwidth=4: | 1.657625 | 2 |
tests/test_config.py | eirki/kamera | 0 | 6632729 | #! /usr/bin/env python3
# coding: utf-8
import numpy as np
import pytest
from kamera import config
from tests.mock_dropbox import MockDropbox
def test_settings_default_tz(settings):
assert settings.default_tz == "US/Eastern"
def test_settings_recognition_tolerance(settings):
assert settings.recognition_tolerance == 0.4
def test_settings_tag_swaps(settings):
assert settings.tag_swaps == {"Paris/10e arrondissement": "Holiday/France"}
def test_settings_folder_names(settings):
assert settings.folder_names == {
1: "January",
2: "February",
3: "March",
4: "April",
5: "May",
6: "June",
7: "July",
8: "August",
9: "September",
10: "October",
11: "November",
12: "December",
}
def test_settings_locations(settings):
assert len(settings.locations) == 1
location = settings.locations[0]
assert location.name == "Paris"
assert location.lat == 48.8566
assert location.lng == 2.3522
assert len(location.spots) == 2
spot1, spot2 = location.spots
assert spot1.name == "Place de la Concorde"
assert spot1.lat == 48.8662
assert spot1.lng == 2.3242
assert spot2.name == "10e arrondissement"
assert spot2.lat == 48.8698
assert spot2.lng == 2.3523
def test_settings_recognition(settings):
assert len(settings.recognition_data) == 2
assert len(settings.recognition_data["Biden"]) == 1
assert isinstance(settings.recognition_data["Biden"][0], np.ndarray)
assert len(settings.recognition_data["Obama"]) == 2
assert isinstance(settings.recognition_data["Obama"][0], np.ndarray)
assert isinstance(settings.recognition_data["Obama"][1], np.ndarray)
@pytest.fixture()
def settings():
dbx = MockDropbox()
loaded_settings = config.Settings(dbx)
return loaded_settings
| #! /usr/bin/env python3
# coding: utf-8
import numpy as np
import pytest
from kamera import config
from tests.mock_dropbox import MockDropbox
def test_settings_default_tz(settings):
assert settings.default_tz == "US/Eastern"
def test_settings_recognition_tolerance(settings):
assert settings.recognition_tolerance == 0.4
def test_settings_tag_swaps(settings):
assert settings.tag_swaps == {"Paris/10e arrondissement": "Holiday/France"}
def test_settings_folder_names(settings):
assert settings.folder_names == {
1: "January",
2: "February",
3: "March",
4: "April",
5: "May",
6: "June",
7: "July",
8: "August",
9: "September",
10: "October",
11: "November",
12: "December",
}
def test_settings_locations(settings):
assert len(settings.locations) == 1
location = settings.locations[0]
assert location.name == "Paris"
assert location.lat == 48.8566
assert location.lng == 2.3522
assert len(location.spots) == 2
spot1, spot2 = location.spots
assert spot1.name == "Place de la Concorde"
assert spot1.lat == 48.8662
assert spot1.lng == 2.3242
assert spot2.name == "10e arrondissement"
assert spot2.lat == 48.8698
assert spot2.lng == 2.3523
def test_settings_recognition(settings):
assert len(settings.recognition_data) == 2
assert len(settings.recognition_data["Biden"]) == 1
assert isinstance(settings.recognition_data["Biden"][0], np.ndarray)
assert len(settings.recognition_data["Obama"]) == 2
assert isinstance(settings.recognition_data["Obama"][0], np.ndarray)
assert isinstance(settings.recognition_data["Obama"][1], np.ndarray)
@pytest.fixture()
def settings():
dbx = MockDropbox()
loaded_settings = config.Settings(dbx)
return loaded_settings
| en | 0.272496 | #! /usr/bin/env python3 # coding: utf-8 | 2.247773 | 2 |
pysecs/secs.py | greglucas/pysecs | 2 | 6632730 | import numpy as np
class SECS:
"""Spherical Elementary Current System (SECS).
The algorithm is implemented directly in spherical coordinates
from the equations of the 1999 Amm & Viljanen paper [1]_.
Parameters
----------
sec_df_loc : ndarray (nsec x 3 [lat, lon, r])
The latitude, longiutde, and radius of the divergence free (df) SEC locations.
sec_cf_loc : ndarray (nsec x 3 [lat, lon, r])
The latitude, longiutde, and radius of the curl free (cf) SEC locations.
References
----------
.. [1] <NAME>., and <NAME>. "Ionospheric disturbance magnetic field continuation
from the ground to the ionosphere using spherical elementary current systems."
Earth, Planets and Space 51.6 (1999): 431-440. doi:10.1186/BF03352247
"""
def __init__(self, sec_df_loc=None, sec_cf_loc=None):
if sec_df_loc is None and sec_cf_loc is None:
raise ValueError("Must initialize the object with SEC locations")
self.sec_df_loc = sec_df_loc
self.sec_cf_loc = sec_cf_loc
if self.sec_df_loc is not None:
self.sec_df_loc = np.asarray(sec_df_loc)
if self.sec_df_loc.shape[-1] != 3:
raise ValueError("SEC DF locations must have 3 columns (lat, lon, r)")
if self.sec_df_loc.ndim == 1:
# Add an empty dimension if only one SEC location is passed in
self.sec_df_loc = self.sec_df_loc[np.newaxis, ...]
if self.sec_cf_loc is not None:
self.sec_cf_loc = np.asarray(sec_cf_loc)
if self.sec_cf_loc.shape[-1] != 3:
raise ValueError("SEC CF locations must have 3 columns (lat, lon, r)")
if self.sec_cf_loc.ndim == 1:
# Add an empty dimension if only one SEC location is passed in
self.sec_cf_loc = self.sec_cf_loc[np.newaxis, ...]
# Storage of the scaling factors
self.sec_amps = None
self.sec_amps_var = None
@property
def has_df(self):
"""Whether this system has any divergence free currents."""
return self.sec_df_loc is not None
@property
def has_cf(self):
"""Whether this system has any curl free currents."""
return self.sec_cf_loc is not None
@property
def nsec(self):
"""The number of elementary currents in this system."""
nsec = 0
if self.has_df:
nsec += len(self.sec_df_loc)
if self.has_cf:
nsec += len(self.sec_cf_loc)
return nsec
def fit(self, obs_loc, obs_B, obs_std=None, epsilon=0.05):
"""Fits the SECS to the given observations.
Given a number of observation locations and measurements,
this function fits the SEC system to them. It uses singular
value decomposition (SVD) to fit the SEC amplitudes with the
`epsilon` parameter used to regularize the solution.
Parameters
----------
obs_locs : ndarray (nobs x 3 [lat, lon, r])
Contains latitude, longitude, and radius of the observation locations
(place where the measurements are made)
obs_B: ndarray (ntimes x nobs x 3 [Bx, By, Bz])
An array containing the measured/observed B-fields.
obs_std : ndarray (ntimes x nobs x 3 [varX, varY, varZ]), optional
Standard error of vector components at each observation location.
This can be used to weight different observations more/less heavily.
An infinite value eliminates the observation from the fit.
Default: ones(nobs x 3) equal weights
epsilon : float
Value used to regularize/smooth the SECS amplitudes. Multiplied by the
largest singular value obtained from SVD.
Default: 0.05
"""
if obs_loc.shape[-1] != 3:
raise ValueError("Observation locations must have 3 columns (lat, lon, r)")
if obs_B.ndim == 2:
# Just a single snapshot given, so expand the dimensionality
obs_B = obs_B[np.newaxis, ...]
# Assume unit standard error of all measurements
if obs_std is None:
obs_std = np.ones(obs_B.shape)
ntimes = len(obs_B)
# Calculate the transfer functions
T_obs = self._calc_T(obs_loc)
# Store the fit sec_amps in the object
self.sec_amps = np.empty((ntimes, self.nsec))
self.sec_amps_var = np.empty((ntimes, self.nsec))
# Calculate the singular value decomposition (SVD)
# NOTE: T_obs has shape (nobs, 3, nsec), we reshape it
# to (nobs*3, nsec); obs_std has shape (ntimes, nobs, 3),
# we reshape it to (ntimes, nobs*3), then loop over ntimes
# to solve using (potentially) time-dependent observation
# standard errors to weight the observations
for i in range(ntimes):
# Only (re-)calculate SVD when necessary
if i == 0 or not np.all(obs_std[i] == obs_std[i-1]):
# Weight T_obs with obs_std
svd_in = (T_obs.reshape(-1, self.nsec) /
obs_std[i].ravel()[:, np.newaxis])
# Find singular value decompostion
U, S, Vh = np.linalg.svd(svd_in, full_matrices=False)
# Eliminate singular values less than epsilon by setting their
# reciprocal to zero (setting S to infinity firsts avoids
# divide-by-zero warings)
S[S < epsilon * S.max()] = np.inf
W = 1./S
# Update VWU if obs_std changed
VWU = Vh.T @ (np.diag(W) @ U.T)
# Solve for SEC amplitudes and error variances
# shape: ntimes x nsec
self.sec_amps[i, :] = (VWU @ (obs_B[i] / obs_std[i]).reshape(-1).T).T
# Maybe we want the variance of the predictions sometime later...?
# shape: ntimes x nsec
valid = np.isfinite(obs_std[i].reshape(-1))
self.sec_amps_var[i, :] = np.sum(
(VWU[:,valid] * obs_std[i].reshape(-1)[valid])**2,
axis=1)
return self
def fit_unit_currents(self):
"""Sets all SECs to a unit current amplitude."""
self.sec_amps = np.ones((1, self.nsec))
return self
def predict(self, pred_loc, J=False):
"""Calculate the predicted magnetic field or currents.
After a set of observations has been fit to this system we can
predict the magnetic fields or currents at any other location. This
function uses those fit amplitudes to predict at the requested locations.
Parameters
----------
pred_loc: ndarray (npred x 3 [lat, lon, r])
An array containing the locations where the predictions are desired.
J: boolean
Whether to predict currents (J=True) or magnetic fields (J=False)
Default: False (magnetic field prediction)
Returns
-------
ndarray (ntimes x npred x 3 [lat, lon, r])
The predicted values calculated from the current amplitudes that were
fit to this system.
"""
if pred_loc.shape[-1] != 3:
raise ValueError("Prediction locations must have 3 columns (lat, lon, r)")
if self.sec_amps is None:
raise ValueError("There are no currents associated with the SECs," +
"you need to call .fit() first to fit to some observations.")
# T_pred shape=(npred x 3 x nsec)
# sec_amps shape=(nsec x ntimes)
if J:
# Predicting currents
T_pred = self._calc_J(pred_loc)
else:
# Predicting magnetic fields
T_pred = self._calc_T(pred_loc)
# NOTE: dot product is slow on multi-dimensional arrays (i.e. > 2 dimensions)
# Therefore this is implemented as tensordot, and the arguments are
# arranged to eliminate needs of transposing things later.
# The dot product is done over the SEC locations, so the final output
# is of shape: (ntimes x npred x 3)
return np.squeeze(np.tensordot(self.sec_amps, T_pred, (1, 2)))
def predict_B(self, pred_loc):
"""Calculate the predicted magnetic fields.
After a set of observations has been fit to this system we can
predict the magnetic fields or currents at any other location. This
function uses those fit amplitudes to predict at the requested locations.
Parameters
----------
pred_loc: ndarray (npred x 3 [lat, lon, r])
An array containing the locations where the predictions are desired.
Returns
-------
ndarray (ntimes x npred x 3 [lat, lon, r])
The predicted values calculated from the current amplitudes that were
fit to this system.
"""
return self.predict(pred_loc)
def predict_J(self, pred_loc):
"""Calculate the predicted currents.
After a set of observations has been fit to this system we can
predict the magnetic fields or currents at any other location. This
function uses those fit amplitudes to predict at the requested locations.
Parameters
----------
pred_loc: ndarray (npred x 3 [lat, lon, r])
An array containing the locations where the predictions are desired.
Returns
-------
ndarray (ntimes x npred x 3 [lat, lon, r])
The predicted values calculated from the current amplitudes that were
fit to this system.
"""
return self.predict(pred_loc, J=True)
def _calc_T(self, obs_loc):
"""Calculates the T transfer matrix.
The magnetic field transfer matrix to go from SEC locations to observation
locations. It assumes unit current amplitudes that will then be
scaled with the proper amplitudes later.
"""
if self.has_df:
T = T_df(obs_loc=obs_loc, sec_loc=self.sec_df_loc)
if self.has_cf:
T1 = T_cf(obs_loc=obs_loc, sec_loc=self.sec_cf_loc)
# df is already present in T
if self.has_df:
T = np.concatenate([T, T1], axis=2)
else:
T = T1
return T
def _calc_J(self, obs_loc):
"""Calculates the J transfer matrix.
The current transfer matrix to go from SEC locations to observation
locations. It assumes unit current amplitudes that will then be
scaled with the proper amplitudes later.
"""
if self.has_df:
J = J_df(obs_loc=obs_loc, sec_loc=self.sec_df_loc)
if self.has_cf:
J1 = J_cf(obs_loc=obs_loc, sec_loc=self.sec_cf_loc)
# df is already present in T
if self.has_df:
J = np.concatenate([J, J1], axis=2)
else:
J = J1
return J
def T_df(obs_loc, sec_loc):
"""Calculates the divergence free magnetic field transfer function.
The transfer function goes from SEC location to observation location
and assumes unit current SECs at the given locations.
Parameters
----------
obs_loc : ndarray (nobs, 3 [lat, lon, r])
The locations of the observation points.
sec_loc : ndarray (nsec, 3 [lat, lon, r])
The locations of the SEC points.
Returns
-------
ndarray (nobs, 3, nsec)
The T transfer matrix.
"""
nobs = len(obs_loc)
nsec = len(sec_loc)
obs_r = obs_loc[:, 2][:, np.newaxis]
sec_r = sec_loc[:, 2][np.newaxis, :]
theta = calc_angular_distance(obs_loc[:, :2], sec_loc[:, :2])
alpha = calc_bearing(obs_loc[:, :2], sec_loc[:, :2])
# magnetic permeability
mu0 = 4*np.pi*1e-7
# simplify calculations by storing this ratio
x = obs_r/sec_r
sin_theta = np.sin(theta)
cos_theta = np.cos(theta)
factor = 1./np.sqrt(1 - 2*x*cos_theta + x**2)
# Amm & Viljanen: Equation 9
Br = mu0/(4*np.pi*obs_r) * (factor - 1)
# Amm & Viljanen: Equation 10 (transformed to try and eliminate trig operations and
# divide by zeros)
Btheta = -mu0/(4*np.pi*obs_r) * (factor*(x - cos_theta) + cos_theta)
# If sin(theta) == 0: Btheta = 0
# There is a possible 0/0 in the expansion when sec_loc == obs_loc
Btheta = np.divide(Btheta, sin_theta, out=np.zeros_like(sin_theta),
where=sin_theta != 0)
# When observation points radii are outside of the sec locations
under_locs = sec_r < obs_r
# NOTE: If any SECs are below observations the math will be done on all points.
# This could be updated to only work on the locations where this condition
# occurs, but would make the code messier, with minimal performance gain
# except for very large matrices.
if np.any(under_locs):
# Flipped from previous case
x = sec_r/obs_r
# Amm & Viljanen: Equation A.7
Br2 = mu0*x/(4*np.pi*obs_r) * (1./np.sqrt(1 - 2*x*cos_theta + x**2) - 1)
# Amm & Viljanen: Equation A.8
Btheta2 = - mu0 / (4*np.pi*obs_r) * ((obs_r-sec_r*cos_theta) /
np.sqrt(obs_r**2 -
2*obs_r*sec_r*cos_theta +
sec_r**2) - 1)
Btheta2 = np.divide(Btheta2, sin_theta, out=np.zeros_like(sin_theta),
where=sin_theta != 0)
# Update only the locations where secs are under observations
Btheta[under_locs] = Btheta2[under_locs]
Br[under_locs] = Br2[under_locs]
# Transform back to Bx, By, Bz at each local point
T = np.empty((nobs, 3, nsec))
# alpha == angle (from cartesian x-axis (By), going towards y-axis (Bx))
T[:, 0, :] = -Btheta*np.sin(alpha)
T[:, 1, :] = -Btheta*np.cos(alpha)
T[:, 2, :] = -Br
return T
def T_cf(obs_loc, sec_loc):
"""Calculates the curl free magnetic field transfer function.
The transfer function goes from SEC location to observation location
and assumes unit current SECs at the given locations.
Parameters
----------
obs_loc : ndarray (nobs, 3 [lat, lon, r])
The locations of the observation points.
sec_loc : ndarray (nsec, 3 [lat, lon, r])
The locations of the SEC points.
Returns
-------
ndarray (nobs, 3, nsec)
The T transfer matrix.
"""
raise NotImplementedError("Curl Free Magnetic Field Transfers are not implemented yet.")
def J_df(obs_loc, sec_loc):
"""Calculates the divergence free current density transfer function.
The transfer function goes from SEC location to observation location
and assumes unit current SECs at the given locations.
Parameters
----------
obs_loc : ndarray (nobs, 3 [lat, lon, r])
The locations of the observation points.
sec_loc : ndarray (nsec, 3 [lat, lon, r])
The locations of the SEC points.
Returns
-------
ndarray (nobs, 3, nsec)
The J transfer matrix.
"""
nobs = len(obs_loc)
nsec = len(sec_loc)
obs_r = obs_loc[:, 2][:, np.newaxis]
sec_r = sec_loc[:, 2][np.newaxis, :]
# Input to the distance calculations is degrees, output is in radians
theta = calc_angular_distance(obs_loc[:, :2], sec_loc[:, :2])
alpha = calc_bearing(obs_loc[:, :2], sec_loc[:, :2])
# Amm & Viljanen: Equation 6
tan_theta2 = np.tan(theta/2.)
J_phi = 1./(4*np.pi*sec_r)
J_phi = np.divide(J_phi, tan_theta2, out=np.ones_like(tan_theta2)*np.inf,
where=tan_theta2 != 0.)
# Only valid on the SEC shell
J_phi[sec_r != obs_r] = 0.
# Transform back to Bx, By, Bz at each local point
J = np.empty((nobs, 3, nsec))
# alpha == angle (from cartesian x-axis (By), going towards y-axis (Bx))
J[:, 0, :] = -J_phi*np.cos(alpha)
J[:, 1, :] = J_phi*np.sin(alpha)
J[:, 2, :] = 0.
return J
def J_cf(obs_loc, sec_loc):
"""Calculates the curl free magnetic field transfer function.
The transfer function goes from SEC location to observation location
and assumes unit current SECs at the given locations.
Parameters
----------
obs_loc : ndarray (nobs, 3 [lat, lon, r])
The locations of the observation points.
sec_loc : ndarray (nsec, 3 [lat, lon, r])
The locations of the SEC points.
Returns
-------
ndarray (nobs, 3, nsec)
The J transfer matrix.
"""
nobs = len(obs_loc)
nsec = len(sec_loc)
obs_r = obs_loc[:, 2][:, np.newaxis]
sec_r = sec_loc[:, 2][np.newaxis, :]
theta = calc_angular_distance(obs_loc[:, :2], sec_loc[:, :2])
alpha = calc_bearing(obs_loc[:, :2], sec_loc[:, :2])
# Amm & Viljanen: Equation 7
tan_theta2 = np.tan(theta/2.)
J_theta = 1./(4*np.pi*sec_r)
J_theta = np.divide(J_theta, tan_theta2, out=np.ones_like(tan_theta2)*np.inf,
where=tan_theta2 != 0)
# Uniformly directed FACs around the globe, except the pole
# Integrated over the globe, this will lead to zero
J_r = -np.ones(J_theta.shape)/(4*np.pi*sec_r**2)
J_r[theta == 0.] = 1.
# Only valid on the SEC shell
J_theta[sec_r != obs_r] = 0.
J_r[sec_r != obs_r] = 0.
# Transform back to Bx, By, Bz at each local point
J = np.empty((nobs, 3, nsec))
# alpha == angle (from cartesian x-axis (By), going towards y-axis (Bx))
J[:, 0, :] = -J_theta*np.sin(alpha)
J[:, 1, :] = -J_theta*np.cos(alpha)
J[:, 2, :] = -J_r
return J
def calc_angular_distance(latlon1, latlon2):
"""Calculate the angular distance between a set of points.
This function calculates the angular distance in radians
between any number of latitude and longitude points.
Parameters
----------
latlon1 : ndarray (n x 2 [lat, lon])
An array of n (latitude, longitude) points.
latlon2 : ndarray (m x 2 [lat, lon])
An array of m (latitude, longitude) points.
Returns
-------
ndarray (n x m)
The array of distances between the input arrays.
"""
lat1 = np.deg2rad(latlon1[:, 0])[:, np.newaxis]
lon1 = np.deg2rad(latlon1[:, 1])[:, np.newaxis]
lat2 = np.deg2rad(latlon2[:, 0])[np.newaxis, :]
lon2 = np.deg2rad(latlon2[:, 1])[np.newaxis, :]
dlon = lon2 - lon1
# theta == angular distance between two points
theta = np.arccos(np.sin(lat1)*np.sin(lat2) +
np.cos(lat1)*np.cos(lat2)*np.cos(dlon))
return theta
def calc_bearing(latlon1, latlon2):
"""Calculate the bearing (direction) between a set of points.
This function calculates the bearing in radians
between any number of latitude and longitude points.
It is the direction from point 1 to point 2 going from the
cartesian x-axis towards the cartesian y-axis.
Parameters
----------
latlon1 : ndarray (n x 2 [lat, lon])
An array of n (latitude, longitude) points.
latlon2 : ndarray (m x 2 [lat, lon])
An array of m (latitude, longitude) points.
Returns
-------
ndarray (n x m)
The array of bearings between the input arrays.
"""
lat1 = np.deg2rad(latlon1[:, 0])[:, np.newaxis]
lon1 = np.deg2rad(latlon1[:, 1])[:, np.newaxis]
lat2 = np.deg2rad(latlon2[:, 0])[np.newaxis, :]
lon2 = np.deg2rad(latlon2[:, 1])[np.newaxis, :]
dlon = lon2 - lon1
# alpha == bearing, going from point1 to point2
# angle (from cartesian x-axis (By), going towards y-axis (Bx))
# Used to rotate the SEC coordinate frame into the observation coordinate
# frame.
# SEC coordinates are: theta (colatitude (+ away from North Pole)),
# phi (longitude, + east), r (+ out)
# Obs coordinates are: X (+ north), Y (+ east), Z (+ down)
alpha = np.pi/2 - np.arctan2(np.sin(dlon)*np.cos(lat2),
np.cos(lat1)*np.sin(lat2) -
np.sin(lat1)*np.cos(lat2)*np.cos(dlon))
return alpha
| import numpy as np
class SECS:
"""Spherical Elementary Current System (SECS).
The algorithm is implemented directly in spherical coordinates
from the equations of the 1999 Amm & Viljanen paper [1]_.
Parameters
----------
sec_df_loc : ndarray (nsec x 3 [lat, lon, r])
The latitude, longiutde, and radius of the divergence free (df) SEC locations.
sec_cf_loc : ndarray (nsec x 3 [lat, lon, r])
The latitude, longiutde, and radius of the curl free (cf) SEC locations.
References
----------
.. [1] <NAME>., and <NAME>. "Ionospheric disturbance magnetic field continuation
from the ground to the ionosphere using spherical elementary current systems."
Earth, Planets and Space 51.6 (1999): 431-440. doi:10.1186/BF03352247
"""
def __init__(self, sec_df_loc=None, sec_cf_loc=None):
if sec_df_loc is None and sec_cf_loc is None:
raise ValueError("Must initialize the object with SEC locations")
self.sec_df_loc = sec_df_loc
self.sec_cf_loc = sec_cf_loc
if self.sec_df_loc is not None:
self.sec_df_loc = np.asarray(sec_df_loc)
if self.sec_df_loc.shape[-1] != 3:
raise ValueError("SEC DF locations must have 3 columns (lat, lon, r)")
if self.sec_df_loc.ndim == 1:
# Add an empty dimension if only one SEC location is passed in
self.sec_df_loc = self.sec_df_loc[np.newaxis, ...]
if self.sec_cf_loc is not None:
self.sec_cf_loc = np.asarray(sec_cf_loc)
if self.sec_cf_loc.shape[-1] != 3:
raise ValueError("SEC CF locations must have 3 columns (lat, lon, r)")
if self.sec_cf_loc.ndim == 1:
# Add an empty dimension if only one SEC location is passed in
self.sec_cf_loc = self.sec_cf_loc[np.newaxis, ...]
# Storage of the scaling factors
self.sec_amps = None
self.sec_amps_var = None
@property
def has_df(self):
"""Whether this system has any divergence free currents."""
return self.sec_df_loc is not None
@property
def has_cf(self):
"""Whether this system has any curl free currents."""
return self.sec_cf_loc is not None
@property
def nsec(self):
"""The number of elementary currents in this system."""
nsec = 0
if self.has_df:
nsec += len(self.sec_df_loc)
if self.has_cf:
nsec += len(self.sec_cf_loc)
return nsec
def fit(self, obs_loc, obs_B, obs_std=None, epsilon=0.05):
"""Fits the SECS to the given observations.
Given a number of observation locations and measurements,
this function fits the SEC system to them. It uses singular
value decomposition (SVD) to fit the SEC amplitudes with the
`epsilon` parameter used to regularize the solution.
Parameters
----------
obs_locs : ndarray (nobs x 3 [lat, lon, r])
Contains latitude, longitude, and radius of the observation locations
(place where the measurements are made)
obs_B: ndarray (ntimes x nobs x 3 [Bx, By, Bz])
An array containing the measured/observed B-fields.
obs_std : ndarray (ntimes x nobs x 3 [varX, varY, varZ]), optional
Standard error of vector components at each observation location.
This can be used to weight different observations more/less heavily.
An infinite value eliminates the observation from the fit.
Default: ones(nobs x 3) equal weights
epsilon : float
Value used to regularize/smooth the SECS amplitudes. Multiplied by the
largest singular value obtained from SVD.
Default: 0.05
"""
if obs_loc.shape[-1] != 3:
raise ValueError("Observation locations must have 3 columns (lat, lon, r)")
if obs_B.ndim == 2:
# Just a single snapshot given, so expand the dimensionality
obs_B = obs_B[np.newaxis, ...]
# Assume unit standard error of all measurements
if obs_std is None:
obs_std = np.ones(obs_B.shape)
ntimes = len(obs_B)
# Calculate the transfer functions
T_obs = self._calc_T(obs_loc)
# Store the fit sec_amps in the object
self.sec_amps = np.empty((ntimes, self.nsec))
self.sec_amps_var = np.empty((ntimes, self.nsec))
# Calculate the singular value decomposition (SVD)
# NOTE: T_obs has shape (nobs, 3, nsec), we reshape it
# to (nobs*3, nsec); obs_std has shape (ntimes, nobs, 3),
# we reshape it to (ntimes, nobs*3), then loop over ntimes
# to solve using (potentially) time-dependent observation
# standard errors to weight the observations
for i in range(ntimes):
# Only (re-)calculate SVD when necessary
if i == 0 or not np.all(obs_std[i] == obs_std[i-1]):
# Weight T_obs with obs_std
svd_in = (T_obs.reshape(-1, self.nsec) /
obs_std[i].ravel()[:, np.newaxis])
# Find singular value decompostion
U, S, Vh = np.linalg.svd(svd_in, full_matrices=False)
# Eliminate singular values less than epsilon by setting their
# reciprocal to zero (setting S to infinity firsts avoids
# divide-by-zero warings)
S[S < epsilon * S.max()] = np.inf
W = 1./S
# Update VWU if obs_std changed
VWU = Vh.T @ (np.diag(W) @ U.T)
# Solve for SEC amplitudes and error variances
# shape: ntimes x nsec
self.sec_amps[i, :] = (VWU @ (obs_B[i] / obs_std[i]).reshape(-1).T).T
# Maybe we want the variance of the predictions sometime later...?
# shape: ntimes x nsec
valid = np.isfinite(obs_std[i].reshape(-1))
self.sec_amps_var[i, :] = np.sum(
(VWU[:,valid] * obs_std[i].reshape(-1)[valid])**2,
axis=1)
return self
def fit_unit_currents(self):
"""Sets all SECs to a unit current amplitude."""
self.sec_amps = np.ones((1, self.nsec))
return self
def predict(self, pred_loc, J=False):
"""Calculate the predicted magnetic field or currents.
After a set of observations has been fit to this system we can
predict the magnetic fields or currents at any other location. This
function uses those fit amplitudes to predict at the requested locations.
Parameters
----------
pred_loc: ndarray (npred x 3 [lat, lon, r])
An array containing the locations where the predictions are desired.
J: boolean
Whether to predict currents (J=True) or magnetic fields (J=False)
Default: False (magnetic field prediction)
Returns
-------
ndarray (ntimes x npred x 3 [lat, lon, r])
The predicted values calculated from the current amplitudes that were
fit to this system.
"""
if pred_loc.shape[-1] != 3:
raise ValueError("Prediction locations must have 3 columns (lat, lon, r)")
if self.sec_amps is None:
raise ValueError("There are no currents associated with the SECs," +
"you need to call .fit() first to fit to some observations.")
# T_pred shape=(npred x 3 x nsec)
# sec_amps shape=(nsec x ntimes)
if J:
# Predicting currents
T_pred = self._calc_J(pred_loc)
else:
# Predicting magnetic fields
T_pred = self._calc_T(pred_loc)
# NOTE: dot product is slow on multi-dimensional arrays (i.e. > 2 dimensions)
# Therefore this is implemented as tensordot, and the arguments are
# arranged to eliminate needs of transposing things later.
# The dot product is done over the SEC locations, so the final output
# is of shape: (ntimes x npred x 3)
return np.squeeze(np.tensordot(self.sec_amps, T_pred, (1, 2)))
def predict_B(self, pred_loc):
"""Calculate the predicted magnetic fields.
After a set of observations has been fit to this system we can
predict the magnetic fields or currents at any other location. This
function uses those fit amplitudes to predict at the requested locations.
Parameters
----------
pred_loc: ndarray (npred x 3 [lat, lon, r])
An array containing the locations where the predictions are desired.
Returns
-------
ndarray (ntimes x npred x 3 [lat, lon, r])
The predicted values calculated from the current amplitudes that were
fit to this system.
"""
return self.predict(pred_loc)
def predict_J(self, pred_loc):
"""Calculate the predicted currents.
After a set of observations has been fit to this system we can
predict the magnetic fields or currents at any other location. This
function uses those fit amplitudes to predict at the requested locations.
Parameters
----------
pred_loc: ndarray (npred x 3 [lat, lon, r])
An array containing the locations where the predictions are desired.
Returns
-------
ndarray (ntimes x npred x 3 [lat, lon, r])
The predicted values calculated from the current amplitudes that were
fit to this system.
"""
return self.predict(pred_loc, J=True)
def _calc_T(self, obs_loc):
"""Calculates the T transfer matrix.
The magnetic field transfer matrix to go from SEC locations to observation
locations. It assumes unit current amplitudes that will then be
scaled with the proper amplitudes later.
"""
if self.has_df:
T = T_df(obs_loc=obs_loc, sec_loc=self.sec_df_loc)
if self.has_cf:
T1 = T_cf(obs_loc=obs_loc, sec_loc=self.sec_cf_loc)
# df is already present in T
if self.has_df:
T = np.concatenate([T, T1], axis=2)
else:
T = T1
return T
def _calc_J(self, obs_loc):
"""Calculates the J transfer matrix.
The current transfer matrix to go from SEC locations to observation
locations. It assumes unit current amplitudes that will then be
scaled with the proper amplitudes later.
"""
if self.has_df:
J = J_df(obs_loc=obs_loc, sec_loc=self.sec_df_loc)
if self.has_cf:
J1 = J_cf(obs_loc=obs_loc, sec_loc=self.sec_cf_loc)
# df is already present in T
if self.has_df:
J = np.concatenate([J, J1], axis=2)
else:
J = J1
return J
def T_df(obs_loc, sec_loc):
"""Calculates the divergence free magnetic field transfer function.
The transfer function goes from SEC location to observation location
and assumes unit current SECs at the given locations.
Parameters
----------
obs_loc : ndarray (nobs, 3 [lat, lon, r])
The locations of the observation points.
sec_loc : ndarray (nsec, 3 [lat, lon, r])
The locations of the SEC points.
Returns
-------
ndarray (nobs, 3, nsec)
The T transfer matrix.
"""
nobs = len(obs_loc)
nsec = len(sec_loc)
obs_r = obs_loc[:, 2][:, np.newaxis]
sec_r = sec_loc[:, 2][np.newaxis, :]
theta = calc_angular_distance(obs_loc[:, :2], sec_loc[:, :2])
alpha = calc_bearing(obs_loc[:, :2], sec_loc[:, :2])
# magnetic permeability
mu0 = 4*np.pi*1e-7
# simplify calculations by storing this ratio
x = obs_r/sec_r
sin_theta = np.sin(theta)
cos_theta = np.cos(theta)
factor = 1./np.sqrt(1 - 2*x*cos_theta + x**2)
# Amm & Viljanen: Equation 9
Br = mu0/(4*np.pi*obs_r) * (factor - 1)
# Amm & Viljanen: Equation 10 (transformed to try and eliminate trig operations and
# divide by zeros)
Btheta = -mu0/(4*np.pi*obs_r) * (factor*(x - cos_theta) + cos_theta)
# If sin(theta) == 0: Btheta = 0
# There is a possible 0/0 in the expansion when sec_loc == obs_loc
Btheta = np.divide(Btheta, sin_theta, out=np.zeros_like(sin_theta),
where=sin_theta != 0)
# When observation points radii are outside of the sec locations
under_locs = sec_r < obs_r
# NOTE: If any SECs are below observations the math will be done on all points.
# This could be updated to only work on the locations where this condition
# occurs, but would make the code messier, with minimal performance gain
# except for very large matrices.
if np.any(under_locs):
# Flipped from previous case
x = sec_r/obs_r
# Amm & Viljanen: Equation A.7
Br2 = mu0*x/(4*np.pi*obs_r) * (1./np.sqrt(1 - 2*x*cos_theta + x**2) - 1)
# Amm & Viljanen: Equation A.8
Btheta2 = - mu0 / (4*np.pi*obs_r) * ((obs_r-sec_r*cos_theta) /
np.sqrt(obs_r**2 -
2*obs_r*sec_r*cos_theta +
sec_r**2) - 1)
Btheta2 = np.divide(Btheta2, sin_theta, out=np.zeros_like(sin_theta),
where=sin_theta != 0)
# Update only the locations where secs are under observations
Btheta[under_locs] = Btheta2[under_locs]
Br[under_locs] = Br2[under_locs]
# Transform back to Bx, By, Bz at each local point
T = np.empty((nobs, 3, nsec))
# alpha == angle (from cartesian x-axis (By), going towards y-axis (Bx))
T[:, 0, :] = -Btheta*np.sin(alpha)
T[:, 1, :] = -Btheta*np.cos(alpha)
T[:, 2, :] = -Br
return T
def T_cf(obs_loc, sec_loc):
"""Calculates the curl free magnetic field transfer function.
The transfer function goes from SEC location to observation location
and assumes unit current SECs at the given locations.
Parameters
----------
obs_loc : ndarray (nobs, 3 [lat, lon, r])
The locations of the observation points.
sec_loc : ndarray (nsec, 3 [lat, lon, r])
The locations of the SEC points.
Returns
-------
ndarray (nobs, 3, nsec)
The T transfer matrix.
"""
raise NotImplementedError("Curl Free Magnetic Field Transfers are not implemented yet.")
def J_df(obs_loc, sec_loc):
"""Calculates the divergence free current density transfer function.
The transfer function goes from SEC location to observation location
and assumes unit current SECs at the given locations.
Parameters
----------
obs_loc : ndarray (nobs, 3 [lat, lon, r])
The locations of the observation points.
sec_loc : ndarray (nsec, 3 [lat, lon, r])
The locations of the SEC points.
Returns
-------
ndarray (nobs, 3, nsec)
The J transfer matrix.
"""
nobs = len(obs_loc)
nsec = len(sec_loc)
obs_r = obs_loc[:, 2][:, np.newaxis]
sec_r = sec_loc[:, 2][np.newaxis, :]
# Input to the distance calculations is degrees, output is in radians
theta = calc_angular_distance(obs_loc[:, :2], sec_loc[:, :2])
alpha = calc_bearing(obs_loc[:, :2], sec_loc[:, :2])
# Amm & Viljanen: Equation 6
tan_theta2 = np.tan(theta/2.)
J_phi = 1./(4*np.pi*sec_r)
J_phi = np.divide(J_phi, tan_theta2, out=np.ones_like(tan_theta2)*np.inf,
where=tan_theta2 != 0.)
# Only valid on the SEC shell
J_phi[sec_r != obs_r] = 0.
# Transform back to Bx, By, Bz at each local point
J = np.empty((nobs, 3, nsec))
# alpha == angle (from cartesian x-axis (By), going towards y-axis (Bx))
J[:, 0, :] = -J_phi*np.cos(alpha)
J[:, 1, :] = J_phi*np.sin(alpha)
J[:, 2, :] = 0.
return J
def J_cf(obs_loc, sec_loc):
"""Calculates the curl free magnetic field transfer function.
The transfer function goes from SEC location to observation location
and assumes unit current SECs at the given locations.
Parameters
----------
obs_loc : ndarray (nobs, 3 [lat, lon, r])
The locations of the observation points.
sec_loc : ndarray (nsec, 3 [lat, lon, r])
The locations of the SEC points.
Returns
-------
ndarray (nobs, 3, nsec)
The J transfer matrix.
"""
nobs = len(obs_loc)
nsec = len(sec_loc)
obs_r = obs_loc[:, 2][:, np.newaxis]
sec_r = sec_loc[:, 2][np.newaxis, :]
theta = calc_angular_distance(obs_loc[:, :2], sec_loc[:, :2])
alpha = calc_bearing(obs_loc[:, :2], sec_loc[:, :2])
# Amm & Viljanen: Equation 7
tan_theta2 = np.tan(theta/2.)
J_theta = 1./(4*np.pi*sec_r)
J_theta = np.divide(J_theta, tan_theta2, out=np.ones_like(tan_theta2)*np.inf,
where=tan_theta2 != 0)
# Uniformly directed FACs around the globe, except the pole
# Integrated over the globe, this will lead to zero
J_r = -np.ones(J_theta.shape)/(4*np.pi*sec_r**2)
J_r[theta == 0.] = 1.
# Only valid on the SEC shell
J_theta[sec_r != obs_r] = 0.
J_r[sec_r != obs_r] = 0.
# Transform back to Bx, By, Bz at each local point
J = np.empty((nobs, 3, nsec))
# alpha == angle (from cartesian x-axis (By), going towards y-axis (Bx))
J[:, 0, :] = -J_theta*np.sin(alpha)
J[:, 1, :] = -J_theta*np.cos(alpha)
J[:, 2, :] = -J_r
return J
def calc_angular_distance(latlon1, latlon2):
"""Calculate the angular distance between a set of points.
This function calculates the angular distance in radians
between any number of latitude and longitude points.
Parameters
----------
latlon1 : ndarray (n x 2 [lat, lon])
An array of n (latitude, longitude) points.
latlon2 : ndarray (m x 2 [lat, lon])
An array of m (latitude, longitude) points.
Returns
-------
ndarray (n x m)
The array of distances between the input arrays.
"""
lat1 = np.deg2rad(latlon1[:, 0])[:, np.newaxis]
lon1 = np.deg2rad(latlon1[:, 1])[:, np.newaxis]
lat2 = np.deg2rad(latlon2[:, 0])[np.newaxis, :]
lon2 = np.deg2rad(latlon2[:, 1])[np.newaxis, :]
dlon = lon2 - lon1
# theta == angular distance between two points
theta = np.arccos(np.sin(lat1)*np.sin(lat2) +
np.cos(lat1)*np.cos(lat2)*np.cos(dlon))
return theta
def calc_bearing(latlon1, latlon2):
"""Calculate the bearing (direction) between a set of points.
This function calculates the bearing in radians
between any number of latitude and longitude points.
It is the direction from point 1 to point 2 going from the
cartesian x-axis towards the cartesian y-axis.
Parameters
----------
latlon1 : ndarray (n x 2 [lat, lon])
An array of n (latitude, longitude) points.
latlon2 : ndarray (m x 2 [lat, lon])
An array of m (latitude, longitude) points.
Returns
-------
ndarray (n x m)
The array of bearings between the input arrays.
"""
lat1 = np.deg2rad(latlon1[:, 0])[:, np.newaxis]
lon1 = np.deg2rad(latlon1[:, 1])[:, np.newaxis]
lat2 = np.deg2rad(latlon2[:, 0])[np.newaxis, :]
lon2 = np.deg2rad(latlon2[:, 1])[np.newaxis, :]
dlon = lon2 - lon1
# alpha == bearing, going from point1 to point2
# angle (from cartesian x-axis (By), going towards y-axis (Bx))
# Used to rotate the SEC coordinate frame into the observation coordinate
# frame.
# SEC coordinates are: theta (colatitude (+ away from North Pole)),
# phi (longitude, + east), r (+ out)
# Obs coordinates are: X (+ north), Y (+ east), Z (+ down)
alpha = np.pi/2 - np.arctan2(np.sin(dlon)*np.cos(lat2),
np.cos(lat1)*np.sin(lat2) -
np.sin(lat1)*np.cos(lat2)*np.cos(dlon))
return alpha
| en | 0.797658 | Spherical Elementary Current System (SECS). The algorithm is implemented directly in spherical coordinates from the equations of the 1999 Amm & Viljanen paper [1]_. Parameters ---------- sec_df_loc : ndarray (nsec x 3 [lat, lon, r]) The latitude, longiutde, and radius of the divergence free (df) SEC locations. sec_cf_loc : ndarray (nsec x 3 [lat, lon, r]) The latitude, longiutde, and radius of the curl free (cf) SEC locations. References ---------- .. [1] <NAME>., and <NAME>. "Ionospheric disturbance magnetic field continuation from the ground to the ionosphere using spherical elementary current systems." Earth, Planets and Space 51.6 (1999): 431-440. doi:10.1186/BF03352247 # Add an empty dimension if only one SEC location is passed in # Add an empty dimension if only one SEC location is passed in # Storage of the scaling factors Whether this system has any divergence free currents. Whether this system has any curl free currents. The number of elementary currents in this system. Fits the SECS to the given observations. Given a number of observation locations and measurements, this function fits the SEC system to them. It uses singular value decomposition (SVD) to fit the SEC amplitudes with the `epsilon` parameter used to regularize the solution. Parameters ---------- obs_locs : ndarray (nobs x 3 [lat, lon, r]) Contains latitude, longitude, and radius of the observation locations (place where the measurements are made) obs_B: ndarray (ntimes x nobs x 3 [Bx, By, Bz]) An array containing the measured/observed B-fields. obs_std : ndarray (ntimes x nobs x 3 [varX, varY, varZ]), optional Standard error of vector components at each observation location. This can be used to weight different observations more/less heavily. An infinite value eliminates the observation from the fit. Default: ones(nobs x 3) equal weights epsilon : float Value used to regularize/smooth the SECS amplitudes. Multiplied by the largest singular value obtained from SVD. Default: 0.05 # Just a single snapshot given, so expand the dimensionality # Assume unit standard error of all measurements # Calculate the transfer functions # Store the fit sec_amps in the object # Calculate the singular value decomposition (SVD) # NOTE: T_obs has shape (nobs, 3, nsec), we reshape it # to (nobs*3, nsec); obs_std has shape (ntimes, nobs, 3), # we reshape it to (ntimes, nobs*3), then loop over ntimes # to solve using (potentially) time-dependent observation # standard errors to weight the observations # Only (re-)calculate SVD when necessary # Weight T_obs with obs_std # Find singular value decompostion # Eliminate singular values less than epsilon by setting their # reciprocal to zero (setting S to infinity firsts avoids # divide-by-zero warings) # Update VWU if obs_std changed # Solve for SEC amplitudes and error variances # shape: ntimes x nsec # Maybe we want the variance of the predictions sometime later...? # shape: ntimes x nsec Sets all SECs to a unit current amplitude. Calculate the predicted magnetic field or currents. After a set of observations has been fit to this system we can predict the magnetic fields or currents at any other location. This function uses those fit amplitudes to predict at the requested locations. Parameters ---------- pred_loc: ndarray (npred x 3 [lat, lon, r]) An array containing the locations where the predictions are desired. J: boolean Whether to predict currents (J=True) or magnetic fields (J=False) Default: False (magnetic field prediction) Returns ------- ndarray (ntimes x npred x 3 [lat, lon, r]) The predicted values calculated from the current amplitudes that were fit to this system. # T_pred shape=(npred x 3 x nsec) # sec_amps shape=(nsec x ntimes) # Predicting currents # Predicting magnetic fields # NOTE: dot product is slow on multi-dimensional arrays (i.e. > 2 dimensions) # Therefore this is implemented as tensordot, and the arguments are # arranged to eliminate needs of transposing things later. # The dot product is done over the SEC locations, so the final output # is of shape: (ntimes x npred x 3) Calculate the predicted magnetic fields. After a set of observations has been fit to this system we can predict the magnetic fields or currents at any other location. This function uses those fit amplitudes to predict at the requested locations. Parameters ---------- pred_loc: ndarray (npred x 3 [lat, lon, r]) An array containing the locations where the predictions are desired. Returns ------- ndarray (ntimes x npred x 3 [lat, lon, r]) The predicted values calculated from the current amplitudes that were fit to this system. Calculate the predicted currents. After a set of observations has been fit to this system we can predict the magnetic fields or currents at any other location. This function uses those fit amplitudes to predict at the requested locations. Parameters ---------- pred_loc: ndarray (npred x 3 [lat, lon, r]) An array containing the locations where the predictions are desired. Returns ------- ndarray (ntimes x npred x 3 [lat, lon, r]) The predicted values calculated from the current amplitudes that were fit to this system. Calculates the T transfer matrix. The magnetic field transfer matrix to go from SEC locations to observation locations. It assumes unit current amplitudes that will then be scaled with the proper amplitudes later. # df is already present in T Calculates the J transfer matrix. The current transfer matrix to go from SEC locations to observation locations. It assumes unit current amplitudes that will then be scaled with the proper amplitudes later. # df is already present in T Calculates the divergence free magnetic field transfer function. The transfer function goes from SEC location to observation location and assumes unit current SECs at the given locations. Parameters ---------- obs_loc : ndarray (nobs, 3 [lat, lon, r]) The locations of the observation points. sec_loc : ndarray (nsec, 3 [lat, lon, r]) The locations of the SEC points. Returns ------- ndarray (nobs, 3, nsec) The T transfer matrix. # magnetic permeability # simplify calculations by storing this ratio # Amm & Viljanen: Equation 9 # Amm & Viljanen: Equation 10 (transformed to try and eliminate trig operations and # divide by zeros) # If sin(theta) == 0: Btheta = 0 # There is a possible 0/0 in the expansion when sec_loc == obs_loc # When observation points radii are outside of the sec locations # NOTE: If any SECs are below observations the math will be done on all points. # This could be updated to only work on the locations where this condition # occurs, but would make the code messier, with minimal performance gain # except for very large matrices. # Flipped from previous case # Amm & Viljanen: Equation A.7 # Amm & Viljanen: Equation A.8 # Update only the locations where secs are under observations # Transform back to Bx, By, Bz at each local point # alpha == angle (from cartesian x-axis (By), going towards y-axis (Bx)) Calculates the curl free magnetic field transfer function. The transfer function goes from SEC location to observation location and assumes unit current SECs at the given locations. Parameters ---------- obs_loc : ndarray (nobs, 3 [lat, lon, r]) The locations of the observation points. sec_loc : ndarray (nsec, 3 [lat, lon, r]) The locations of the SEC points. Returns ------- ndarray (nobs, 3, nsec) The T transfer matrix. Calculates the divergence free current density transfer function. The transfer function goes from SEC location to observation location and assumes unit current SECs at the given locations. Parameters ---------- obs_loc : ndarray (nobs, 3 [lat, lon, r]) The locations of the observation points. sec_loc : ndarray (nsec, 3 [lat, lon, r]) The locations of the SEC points. Returns ------- ndarray (nobs, 3, nsec) The J transfer matrix. # Input to the distance calculations is degrees, output is in radians # Amm & Viljanen: Equation 6 # Only valid on the SEC shell # Transform back to Bx, By, Bz at each local point # alpha == angle (from cartesian x-axis (By), going towards y-axis (Bx)) Calculates the curl free magnetic field transfer function. The transfer function goes from SEC location to observation location and assumes unit current SECs at the given locations. Parameters ---------- obs_loc : ndarray (nobs, 3 [lat, lon, r]) The locations of the observation points. sec_loc : ndarray (nsec, 3 [lat, lon, r]) The locations of the SEC points. Returns ------- ndarray (nobs, 3, nsec) The J transfer matrix. # Amm & Viljanen: Equation 7 # Uniformly directed FACs around the globe, except the pole # Integrated over the globe, this will lead to zero # Only valid on the SEC shell # Transform back to Bx, By, Bz at each local point # alpha == angle (from cartesian x-axis (By), going towards y-axis (Bx)) Calculate the angular distance between a set of points. This function calculates the angular distance in radians between any number of latitude and longitude points. Parameters ---------- latlon1 : ndarray (n x 2 [lat, lon]) An array of n (latitude, longitude) points. latlon2 : ndarray (m x 2 [lat, lon]) An array of m (latitude, longitude) points. Returns ------- ndarray (n x m) The array of distances between the input arrays. # theta == angular distance between two points Calculate the bearing (direction) between a set of points. This function calculates the bearing in radians between any number of latitude and longitude points. It is the direction from point 1 to point 2 going from the cartesian x-axis towards the cartesian y-axis. Parameters ---------- latlon1 : ndarray (n x 2 [lat, lon]) An array of n (latitude, longitude) points. latlon2 : ndarray (m x 2 [lat, lon]) An array of m (latitude, longitude) points. Returns ------- ndarray (n x m) The array of bearings between the input arrays. # alpha == bearing, going from point1 to point2 # angle (from cartesian x-axis (By), going towards y-axis (Bx)) # Used to rotate the SEC coordinate frame into the observation coordinate # frame. # SEC coordinates are: theta (colatitude (+ away from North Pole)), # phi (longitude, + east), r (+ out) # Obs coordinates are: X (+ north), Y (+ east), Z (+ down) | 3.132842 | 3 |
scripts/pysurfer_plot_500parcellation_surface_values.py | SarahMorgan/BrainsForPublication | 0 | 6632731 | #!/usr/bin/env python
#=============================================================================
# Created by <NAME>
# September 2014
# Contact: <EMAIL>
#=============================================================================
#=============================================================================
# IMPORTS
#=============================================================================
import os
import sys
import argparse
import numpy as np
import pandas as pd
import nibabel as nib
from surfer import Brain
import seaborn as sns
import itertools as it
import matplotlib.pylab as plt
import matplotlib.image as mpimg
import matplotlib.gridspec as gridspec
import matplotlib.colors as mcolors
import matplotlib.cm as cm
#=============================================================================
# FUNCTIONS
#=============================================================================
def setup_argparser():
'''
Code to read in arguments from the command line
Aso allows you to change some settings
'''
# Build a basic parser.
help_text = ('Plot a single value for each region in the NSPN 500 parcellation of the fsaverage surface')
sign_off = 'Author: <NAME> <<EMAIL>>'
parser = argparse.ArgumentParser(description=help_text, epilog=sign_off)
# Now add the arguments
parser.add_argument(dest='roi_file',
type=str,
metavar='roi_file',
help='roi file containing list of measure values - one for each region - csv format')
parser.add_argument('--fsaverageid',
type=str,
metavar='fsaverage_id',
help='FSaverage subject id',
default='fsaverageSubP')
parser.add_argument('-sd', '--subjects_dir',
type=str,
metavar='subjects_dir',
help='freesurfer subjects dir',
default=os.environ["SUBJECTS_DIR"])
parser.add_argument('-c', '--cmap',
type=str,
metavar='cmap',
help='colormap',
default='RdBu_r')
parser.add_argument('-c2', '--cmap2',
type=str,
metavar='cmap2',
help='colormap for the second overlay',
default='autumn')
parser.add_argument('-cf', '--color_file',
type=str,
metavar='color_file',
help='file containing list of custom colors',
default=None)
parser.add_argument('--center',
action='store_true',
help='center the color bar around 0')
parser.add_argument('-t', '--thresh',
type=float,
metavar='thresh',
help='mask values below this value',
default=-98)
parser.add_argument('-t2', '--thresh2',
type=float,
metavar='thresh2',
help='mask values below this value for the second color',
default=None)
parser.add_argument('-l', '--lower',
type=float,
metavar='lowerthr',
help='lower limit for colorbar',
default=None)
parser.add_argument('-u', '--upper',
type=float,
metavar='upperthr',
help='upper limit for colorbar',
default=None)
parser.add_argument('-s', '--surface',
type=str,
metavar='surface',
help='surface - one of "pial", "inflated" or "both"',
default='both')
parser.add_argument('-cst', '--cortex_style',
type=str,
metavar='cortex_style',
help='cortex style - one of "classic", "bone", "high_contrast" or "low_contrast"',
default='classic')
arguments = parser.parse_args()
return arguments, parser
#------------------------------------------------------------------------------
def calc_range(roi_data, l, u, thresh, center):
# Figure out the min and max for the color bar
if l == None:
l = roi_data[roi_data>thresh].min()
l = np.floor(l*20)/20.0
if u == None:
u = roi_data[roi_data>thresh].max()
u = np.ceil(u*20)/20.0
if center:
# Make sure the colorbar is centered
if l**2 < u **2:
l = u*-1
else:
u = l*-1
return l, u
#------------------------------------------------------------------------------
def plot_surface(vtx_data, subject_id, hemi, surface, subjects_dir, output_dir, prefix, l, u, cmap, thresh, thresh2=None, cmap2='autumn', cortex_style='classic'):
"""
This function needs more documentation, but for now
it is sufficient to know this one important fact:
For the variable "cmap":
If you pass a word that defines a matplotlib
colormap (eg: jet, Rd_Bu etc) then the code
will use that for the color scheme.
If you pass a **list** of colors then you'll
just loop through those colors instead.
"""
if cortex_style.count('_') == 2:
cortex_style_list = cortex_style.split('_')
cortex_name = cortex_style_list[0]
cortex_min = np.float(cortex_style_list[1])
cortex_max = np.float(cortex_style_list[2])
cortex_style = ( cortex_name, cortex_min, cortex_max, False )
# Open up a brain in pysurfer
brain = Brain(subject_id, hemi, surface,
subjects_dir = subjects_dir,
background="white",
size=(800, 665),
cortex=cortex_style)
# Create an empty brain if the values are all below threshold
if np.max(vtx_data) < thresh:
# Add your data to the brain
brain.add_data(vtx_data*0,
l,
u,
thresh = thresh,
colormap=cmap,
alpha=0.0)
# If you only have one threshold
# then add the data!
elif not thresh2:
# Add your data to the brain
brain.add_data(vtx_data,
l,
u,
thresh = thresh,
colormap=cmap,
alpha=.8)
else:
# Plot the data twice for the two
# different settings
vtx_data1 = np.copy(vtx_data)
vtx_data1[vtx_data1>thresh2] = 0
brain.add_data(vtx_data1,
l,
u,
thresh = thresh,
colormap = cmap,
alpha = .8)
brain.add_data(vtx_data,
l,
u,
thresh = thresh2,
colormap = cmap2,
alpha = .8)
# Save the images for medial and lateral
# putting a color bar on all of them
brain.save_imageset(prefix = os.path.join(output_dir, prefix),
views = views_list,
colorbar = range(len(views_list)) )
#-----------------------------------------------------------------------------
def combine_pngs(measure, surface, output_dir, cortex_style):
'''
Find four images and combine them into one nice picture
'''
figsize = (5,4)
fig = plt.figure(figsize = figsize, facecolor='white')
grid = gridspec.GridSpec(2, 2)
grid.update(left=0, right=1, top=1, bottom = 0.08, wspace=0, hspace=0)
f_list = [ '_'.join([os.path.join(output_dir, measure), 'lh', surface, cortex_style, 'lateral.png']),
'_'.join([os.path.join(output_dir, measure), 'rh', surface, cortex_style, 'lateral.png']),
'_'.join([os.path.join(output_dir, measure), 'lh', surface, cortex_style, 'medial.png']),
'_'.join([os.path.join(output_dir, measure), 'rh', surface, cortex_style, 'medial.png']) ]
# Plot each figure in turn
for g_loc, f in zip(grid, f_list):
ax = plt.Subplot(fig, g_loc)
fig.add_subplot(ax)
img = mpimg.imread(f)
# Crop the figures appropriately
# NOTE: this can change depending on which system you've made the
# images on originally - it's a bug that needs to be sorted out!
if 'lateral' in f:
img_cropped = img[75:589,55:(-50),:]
else:
img_cropped = img[45:600,25:(-25),:]
ax.imshow(img_cropped, interpolation='none')
ax.set_axis_off()
# Add the bottom of one of the images as the color bar
# at the bottom of the combo figure
grid_cbar = gridspec.GridSpec(1,1)
grid_cbar.update(left=0, right=1, top=0.08, bottom=0, wspace=0, hspace=0)
ax = plt.Subplot(fig, grid_cbar[0])
fig.add_subplot(ax)
img = mpimg.imread(f)
img_cbar = img[600:,:]
ax.imshow(img_cbar, interpolation='none')
ax.set_axis_off()
# Save the figure
filename = os.path.join(output_dir, '{}_{}_{}_combined.png'.format(measure, surface, cortex_style))
fig.savefig(filename, bbox_inches=0, dpi=300)
def add_four_hor_brains(grid, f_list, fig):
'''
Take the four pysurfer views (left lateral, left medial,
right medial and right lateral) and arrange them in a row
according to the grid positions given by grid
grid : the gridspec list of grid placements
f_list : list of four file pysurfer image files
big_fig : the figure to which you're adding the images
# THIS WAS UPDATED TO INCLUDE PLOTTING IN A GRID
# Should probably change the function name!
'''
for g_loc, f in zip(grid, f_list):
img = mpimg.imread(f)
# Crop the figures appropriately
# NOTE: this can change depending on which system you've made the
# images on originally - it's a bug that needs to be sorted out!
if 'lateral' in f:
img_cropped = img[115:564, 105:(-100),:]
else:
img_cropped = img[90:560, 60:(-55),:]
# Add an axis to the figure
ax_brain = plt.Subplot(fig, g_loc)
fig.add_subplot(ax_brain)
# Show the brain on this axis
ax_brain.imshow(img_cropped, interpolation='none')
ax_brain.set_axis_off()
return fig
def add_colorbar(grid, big_fig, cmap_name, y_min=0, y_max=1, cbar_min=0, cbar_max=1, vert=False, label=None, show_ticks=True, pad=0):
# Set the seaborn context and style
sns.set(style="white")
sns.set_context("poster", font_scale=3)
'''
Add a colorbar to the big_fig in the location defined by grid
grid : grid spec location to add colormap
big_fig : figure to which colorbar will be added
cmap_name : name of the colormap
x_min : the minimum value to plot this colorbar between
x_max : the maximum value to plot this colorbar between
cbar_min : minimum value for the colormap (default 0)
cbar_max : maximum value for the colormap (default 1)
vert : whether the colorbar should be vertical (default False)
label : the label for the colorbar (default: None)
ticks : whether to put the tick values on the colorbar (default: True)
pad : how much to shift the colorbar label by (default: 0)
'''
import matplotlib as mpl
from matplotlib.colors import LinearSegmentedColormap
# Add an axis to the big_fig
ax_cbar = plt.Subplot(big_fig, grid)
big_fig.add_subplot(ax_cbar)
# Normalise the colorbar so you have the correct upper and
# lower limits and define the three ticks you want to show
norm = mpl.colors.Normalize(vmin=cbar_min, vmax=cbar_max)
if show_ticks:
ticks = [y_min, np.average([y_min, y_max]), y_max]
else:
ticks=[]
# Figure out the orientation
if vert:
orientation='vertical'
rotation=270
else:
orientation='horizontal'
rotation=0
# Add in your colorbar:
cb = mpl.colorbar.ColorbarBase(ax_cbar,
cmap=cmap_name,
norm=norm,
orientation=orientation,
ticks=ticks,
boundaries=np.linspace(y_min, y_max, 300))
if label:
cb.set_label(label, rotation=rotation, labelpad=pad)
return big_fig
def brains_in_a_row(measure, surface, output_dir, cortex_style, l, u, cmap):
# Set up the figure
fig, ax = plt.subplots(figsize=(20,6), facecolor='white')
# Set up the grid
grid = gridspec.GridSpec(1,4)
grid.update(left=0.01, right=0.99, top=1.05, bottom=0.2, wspace=0, hspace=0)
# Set up the file list
f_list = [ '_'.join([os.path.join(output_dir, measure), 'lh', surface, cortex_style, 'lateral.png']),
'_'.join([os.path.join(output_dir, measure), 'lh', surface, cortex_style, 'medial.png']),
'_'.join([os.path.join(output_dir, measure), 'rh', surface, cortex_style, 'medial.png']),
'_'.join([os.path.join(output_dir, measure), 'rh', surface, cortex_style, 'lateral.png']) ]
# Add the brains
fig = add_four_hor_brains(grid, f_list, fig)
# Set up the colorbar grid
cb_grid = gridspec.GridSpec(1,1)
cb_grid.update(left=0.2,
right=0.8,
bottom=0.2,
top=0.25,
wspace=0,
hspace=0)
fig = add_colorbar(cb_grid[0], fig,
cmap_name=cmap,
cbar_min=l,
cbar_max=u,
y_min=l,
y_max=u,
label='')
# Turn off the axis
ax.set_axis_off()
# Save the figure
filename = os.path.join(output_dir, '{}_{}_{}_FourHorBrains.png'.format(measure, surface, cortex_style))
fig.savefig(filename, dpi=300)
# Close the figure
plt.close('all')
#=============================================================================
# SET SOME VARIABLES
#=============================================================================
# Read in the arguments from argparse
arguments, parser = setup_argparser()
subject_id = arguments.fsaverageid
subjects_dir = arguments.subjects_dir
roi_data_file = arguments.roi_file
l = arguments.lower
u = arguments.upper
cmap = arguments.cmap
cmap2 = arguments.cmap2
color_file = arguments.color_file
center = arguments.center
surface = arguments.surface
thresh = arguments.thresh
thresh2 = arguments.thresh2
cortex_style = arguments.cortex_style
# Define the output directory
output_dir = os.path.join(os.path.dirname(roi_data_file), 'PNGS')
if not os.path.isdir(output_dir):
os.mkdir(output_dir)
# Define the name of the measure you're plotting
measure = os.path.basename(roi_data_file)
measure = os.path.splitext(measure)[0]
# Define the aparc names
# Read in aparc names file
aparc_names_file = os.path.join(subjects_dir,
subject_id, "parcellation",
"500.names.txt")
# Read in the names from the aparc names file
# dropping the first 41
aparc_names = [line.strip() for line in open(aparc_names_file)]
aparc_names = aparc_names[41::]
# Figure out which surfaces you're going to use
if surface == 'both':
surface_list = [ "inflated", "pial" ]
elif surface == 'inflated':
surface_list = [ "inflated" ]
elif surface == 'pial':
surface_list = [ "pial" ]
else:
print "Do not recognise surface. Check {}".format(surface)
parser.print_help()
sys.exit()
hemi_list = [ "lh", "rh" ]
views_list = [ 'medial', 'lateral' ]
# Check that the inputs exist:
if not os.path.isfile(roi_data_file):
print "Roi data file doesn't exist"
sys.exit()
if not os.path.isdir(os.path.join(subjects_dir, subject_id, "surf")):
print "Fsaverage directory doesn't exist"
print "Check subjects_dir and subject_id"
sys.exit()
#=============================================================================
# READ IN THE MEASURE DATA
#=============================================================================
# Read in the data
df = pd.read_csv(roi_data_file, index_col=False, header=None)
#-------
# Make custom colorbar
if color_file:
cmap = [line.strip() for line in open(color_file)]
l = 1
u = len(cmap)
# If you've passed rgb values you need to convert
# these to tuples
if len(cmap[0].split()) == 3:
cmap = [ (np.float(x.split()[0]),
np.float(x.split()[1]),
np.float(x.split()[2])) for x in cmap ]
else:
# Set l and u so that they're the same for both hemispheres
l, u = calc_range(df[0], l, u, thresh, center)
# Now rearrange the data frame and match it up with
# the aparc names
df = df.T
df.columns = aparc_names
# Now make your pictures
for hemi, surface in it.product(hemi_list, surface_list):
prefix = '_'.join([measure, hemi, surface, cortex_style])
# Read in aparc annot file which will be inside
# the label folder of your fsaverage subject folder
aparc_file = os.path.join(subjects_dir,
subject_id, "label",
hemi + ".500.aparc.annot")
# Use nibabel to merge together the aparc_names and the aparc_file
labels, ctab, names = nib.freesurfer.read_annot(aparc_file)
# Create an empty roi_data array
roi_data = np.ones(len(names))*(thresh-1.0)
# Loop through the names and if they are in the data frame
# for this hemisphere then add that value to the roi_data array
for i, name in enumerate(names):
roi_name = '{}_{}'.format(hemi, name)
if roi_name in df.columns:
roi_data[i] = df[roi_name]
# Make a vector containing the data point at each vertex.
vtx_data = roi_data[labels]
# Write out the vtx_data
#nib.freesurfer.write_annot(f_name, vtx_data, ctab, names)
# Show this data on a brain
plot_surface(vtx_data, subject_id, hemi,
surface, subjects_dir,
output_dir, prefix,
l, u, cmap,
thresh,
cmap2=cmap2,
thresh2=thresh2,
cortex_style=cortex_style)
#=============================================================================
# COMBINE THE IMAGES
#=============================================================================
for surface in surface_list:
combine_pngs(measure, surface, output_dir, cortex_style)
brains_in_a_row(measure, surface, output_dir, cortex_style, l, u, cmap)
# You're done :)
# Happy International Women's Day 2017
# <3 Kx
| #!/usr/bin/env python
#=============================================================================
# Created by <NAME>
# September 2014
# Contact: <EMAIL>
#=============================================================================
#=============================================================================
# IMPORTS
#=============================================================================
import os
import sys
import argparse
import numpy as np
import pandas as pd
import nibabel as nib
from surfer import Brain
import seaborn as sns
import itertools as it
import matplotlib.pylab as plt
import matplotlib.image as mpimg
import matplotlib.gridspec as gridspec
import matplotlib.colors as mcolors
import matplotlib.cm as cm
#=============================================================================
# FUNCTIONS
#=============================================================================
def setup_argparser():
'''
Code to read in arguments from the command line
Aso allows you to change some settings
'''
# Build a basic parser.
help_text = ('Plot a single value for each region in the NSPN 500 parcellation of the fsaverage surface')
sign_off = 'Author: <NAME> <<EMAIL>>'
parser = argparse.ArgumentParser(description=help_text, epilog=sign_off)
# Now add the arguments
parser.add_argument(dest='roi_file',
type=str,
metavar='roi_file',
help='roi file containing list of measure values - one for each region - csv format')
parser.add_argument('--fsaverageid',
type=str,
metavar='fsaverage_id',
help='FSaverage subject id',
default='fsaverageSubP')
parser.add_argument('-sd', '--subjects_dir',
type=str,
metavar='subjects_dir',
help='freesurfer subjects dir',
default=os.environ["SUBJECTS_DIR"])
parser.add_argument('-c', '--cmap',
type=str,
metavar='cmap',
help='colormap',
default='RdBu_r')
parser.add_argument('-c2', '--cmap2',
type=str,
metavar='cmap2',
help='colormap for the second overlay',
default='autumn')
parser.add_argument('-cf', '--color_file',
type=str,
metavar='color_file',
help='file containing list of custom colors',
default=None)
parser.add_argument('--center',
action='store_true',
help='center the color bar around 0')
parser.add_argument('-t', '--thresh',
type=float,
metavar='thresh',
help='mask values below this value',
default=-98)
parser.add_argument('-t2', '--thresh2',
type=float,
metavar='thresh2',
help='mask values below this value for the second color',
default=None)
parser.add_argument('-l', '--lower',
type=float,
metavar='lowerthr',
help='lower limit for colorbar',
default=None)
parser.add_argument('-u', '--upper',
type=float,
metavar='upperthr',
help='upper limit for colorbar',
default=None)
parser.add_argument('-s', '--surface',
type=str,
metavar='surface',
help='surface - one of "pial", "inflated" or "both"',
default='both')
parser.add_argument('-cst', '--cortex_style',
type=str,
metavar='cortex_style',
help='cortex style - one of "classic", "bone", "high_contrast" or "low_contrast"',
default='classic')
arguments = parser.parse_args()
return arguments, parser
#------------------------------------------------------------------------------
def calc_range(roi_data, l, u, thresh, center):
# Figure out the min and max for the color bar
if l == None:
l = roi_data[roi_data>thresh].min()
l = np.floor(l*20)/20.0
if u == None:
u = roi_data[roi_data>thresh].max()
u = np.ceil(u*20)/20.0
if center:
# Make sure the colorbar is centered
if l**2 < u **2:
l = u*-1
else:
u = l*-1
return l, u
#------------------------------------------------------------------------------
def plot_surface(vtx_data, subject_id, hemi, surface, subjects_dir, output_dir, prefix, l, u, cmap, thresh, thresh2=None, cmap2='autumn', cortex_style='classic'):
"""
This function needs more documentation, but for now
it is sufficient to know this one important fact:
For the variable "cmap":
If you pass a word that defines a matplotlib
colormap (eg: jet, Rd_Bu etc) then the code
will use that for the color scheme.
If you pass a **list** of colors then you'll
just loop through those colors instead.
"""
if cortex_style.count('_') == 2:
cortex_style_list = cortex_style.split('_')
cortex_name = cortex_style_list[0]
cortex_min = np.float(cortex_style_list[1])
cortex_max = np.float(cortex_style_list[2])
cortex_style = ( cortex_name, cortex_min, cortex_max, False )
# Open up a brain in pysurfer
brain = Brain(subject_id, hemi, surface,
subjects_dir = subjects_dir,
background="white",
size=(800, 665),
cortex=cortex_style)
# Create an empty brain if the values are all below threshold
if np.max(vtx_data) < thresh:
# Add your data to the brain
brain.add_data(vtx_data*0,
l,
u,
thresh = thresh,
colormap=cmap,
alpha=0.0)
# If you only have one threshold
# then add the data!
elif not thresh2:
# Add your data to the brain
brain.add_data(vtx_data,
l,
u,
thresh = thresh,
colormap=cmap,
alpha=.8)
else:
# Plot the data twice for the two
# different settings
vtx_data1 = np.copy(vtx_data)
vtx_data1[vtx_data1>thresh2] = 0
brain.add_data(vtx_data1,
l,
u,
thresh = thresh,
colormap = cmap,
alpha = .8)
brain.add_data(vtx_data,
l,
u,
thresh = thresh2,
colormap = cmap2,
alpha = .8)
# Save the images for medial and lateral
# putting a color bar on all of them
brain.save_imageset(prefix = os.path.join(output_dir, prefix),
views = views_list,
colorbar = range(len(views_list)) )
#-----------------------------------------------------------------------------
def combine_pngs(measure, surface, output_dir, cortex_style):
'''
Find four images and combine them into one nice picture
'''
figsize = (5,4)
fig = plt.figure(figsize = figsize, facecolor='white')
grid = gridspec.GridSpec(2, 2)
grid.update(left=0, right=1, top=1, bottom = 0.08, wspace=0, hspace=0)
f_list = [ '_'.join([os.path.join(output_dir, measure), 'lh', surface, cortex_style, 'lateral.png']),
'_'.join([os.path.join(output_dir, measure), 'rh', surface, cortex_style, 'lateral.png']),
'_'.join([os.path.join(output_dir, measure), 'lh', surface, cortex_style, 'medial.png']),
'_'.join([os.path.join(output_dir, measure), 'rh', surface, cortex_style, 'medial.png']) ]
# Plot each figure in turn
for g_loc, f in zip(grid, f_list):
ax = plt.Subplot(fig, g_loc)
fig.add_subplot(ax)
img = mpimg.imread(f)
# Crop the figures appropriately
# NOTE: this can change depending on which system you've made the
# images on originally - it's a bug that needs to be sorted out!
if 'lateral' in f:
img_cropped = img[75:589,55:(-50),:]
else:
img_cropped = img[45:600,25:(-25),:]
ax.imshow(img_cropped, interpolation='none')
ax.set_axis_off()
# Add the bottom of one of the images as the color bar
# at the bottom of the combo figure
grid_cbar = gridspec.GridSpec(1,1)
grid_cbar.update(left=0, right=1, top=0.08, bottom=0, wspace=0, hspace=0)
ax = plt.Subplot(fig, grid_cbar[0])
fig.add_subplot(ax)
img = mpimg.imread(f)
img_cbar = img[600:,:]
ax.imshow(img_cbar, interpolation='none')
ax.set_axis_off()
# Save the figure
filename = os.path.join(output_dir, '{}_{}_{}_combined.png'.format(measure, surface, cortex_style))
fig.savefig(filename, bbox_inches=0, dpi=300)
def add_four_hor_brains(grid, f_list, fig):
'''
Take the four pysurfer views (left lateral, left medial,
right medial and right lateral) and arrange them in a row
according to the grid positions given by grid
grid : the gridspec list of grid placements
f_list : list of four file pysurfer image files
big_fig : the figure to which you're adding the images
# THIS WAS UPDATED TO INCLUDE PLOTTING IN A GRID
# Should probably change the function name!
'''
for g_loc, f in zip(grid, f_list):
img = mpimg.imread(f)
# Crop the figures appropriately
# NOTE: this can change depending on which system you've made the
# images on originally - it's a bug that needs to be sorted out!
if 'lateral' in f:
img_cropped = img[115:564, 105:(-100),:]
else:
img_cropped = img[90:560, 60:(-55),:]
# Add an axis to the figure
ax_brain = plt.Subplot(fig, g_loc)
fig.add_subplot(ax_brain)
# Show the brain on this axis
ax_brain.imshow(img_cropped, interpolation='none')
ax_brain.set_axis_off()
return fig
def add_colorbar(grid, big_fig, cmap_name, y_min=0, y_max=1, cbar_min=0, cbar_max=1, vert=False, label=None, show_ticks=True, pad=0):
# Set the seaborn context and style
sns.set(style="white")
sns.set_context("poster", font_scale=3)
'''
Add a colorbar to the big_fig in the location defined by grid
grid : grid spec location to add colormap
big_fig : figure to which colorbar will be added
cmap_name : name of the colormap
x_min : the minimum value to plot this colorbar between
x_max : the maximum value to plot this colorbar between
cbar_min : minimum value for the colormap (default 0)
cbar_max : maximum value for the colormap (default 1)
vert : whether the colorbar should be vertical (default False)
label : the label for the colorbar (default: None)
ticks : whether to put the tick values on the colorbar (default: True)
pad : how much to shift the colorbar label by (default: 0)
'''
import matplotlib as mpl
from matplotlib.colors import LinearSegmentedColormap
# Add an axis to the big_fig
ax_cbar = plt.Subplot(big_fig, grid)
big_fig.add_subplot(ax_cbar)
# Normalise the colorbar so you have the correct upper and
# lower limits and define the three ticks you want to show
norm = mpl.colors.Normalize(vmin=cbar_min, vmax=cbar_max)
if show_ticks:
ticks = [y_min, np.average([y_min, y_max]), y_max]
else:
ticks=[]
# Figure out the orientation
if vert:
orientation='vertical'
rotation=270
else:
orientation='horizontal'
rotation=0
# Add in your colorbar:
cb = mpl.colorbar.ColorbarBase(ax_cbar,
cmap=cmap_name,
norm=norm,
orientation=orientation,
ticks=ticks,
boundaries=np.linspace(y_min, y_max, 300))
if label:
cb.set_label(label, rotation=rotation, labelpad=pad)
return big_fig
def brains_in_a_row(measure, surface, output_dir, cortex_style, l, u, cmap):
# Set up the figure
fig, ax = plt.subplots(figsize=(20,6), facecolor='white')
# Set up the grid
grid = gridspec.GridSpec(1,4)
grid.update(left=0.01, right=0.99, top=1.05, bottom=0.2, wspace=0, hspace=0)
# Set up the file list
f_list = [ '_'.join([os.path.join(output_dir, measure), 'lh', surface, cortex_style, 'lateral.png']),
'_'.join([os.path.join(output_dir, measure), 'lh', surface, cortex_style, 'medial.png']),
'_'.join([os.path.join(output_dir, measure), 'rh', surface, cortex_style, 'medial.png']),
'_'.join([os.path.join(output_dir, measure), 'rh', surface, cortex_style, 'lateral.png']) ]
# Add the brains
fig = add_four_hor_brains(grid, f_list, fig)
# Set up the colorbar grid
cb_grid = gridspec.GridSpec(1,1)
cb_grid.update(left=0.2,
right=0.8,
bottom=0.2,
top=0.25,
wspace=0,
hspace=0)
fig = add_colorbar(cb_grid[0], fig,
cmap_name=cmap,
cbar_min=l,
cbar_max=u,
y_min=l,
y_max=u,
label='')
# Turn off the axis
ax.set_axis_off()
# Save the figure
filename = os.path.join(output_dir, '{}_{}_{}_FourHorBrains.png'.format(measure, surface, cortex_style))
fig.savefig(filename, dpi=300)
# Close the figure
plt.close('all')
#=============================================================================
# SET SOME VARIABLES
#=============================================================================
# Read in the arguments from argparse
arguments, parser = setup_argparser()
subject_id = arguments.fsaverageid
subjects_dir = arguments.subjects_dir
roi_data_file = arguments.roi_file
l = arguments.lower
u = arguments.upper
cmap = arguments.cmap
cmap2 = arguments.cmap2
color_file = arguments.color_file
center = arguments.center
surface = arguments.surface
thresh = arguments.thresh
thresh2 = arguments.thresh2
cortex_style = arguments.cortex_style
# Define the output directory
output_dir = os.path.join(os.path.dirname(roi_data_file), 'PNGS')
if not os.path.isdir(output_dir):
os.mkdir(output_dir)
# Define the name of the measure you're plotting
measure = os.path.basename(roi_data_file)
measure = os.path.splitext(measure)[0]
# Define the aparc names
# Read in aparc names file
aparc_names_file = os.path.join(subjects_dir,
subject_id, "parcellation",
"500.names.txt")
# Read in the names from the aparc names file
# dropping the first 41
aparc_names = [line.strip() for line in open(aparc_names_file)]
aparc_names = aparc_names[41::]
# Figure out which surfaces you're going to use
if surface == 'both':
surface_list = [ "inflated", "pial" ]
elif surface == 'inflated':
surface_list = [ "inflated" ]
elif surface == 'pial':
surface_list = [ "pial" ]
else:
print "Do not recognise surface. Check {}".format(surface)
parser.print_help()
sys.exit()
hemi_list = [ "lh", "rh" ]
views_list = [ 'medial', 'lateral' ]
# Check that the inputs exist:
if not os.path.isfile(roi_data_file):
print "Roi data file doesn't exist"
sys.exit()
if not os.path.isdir(os.path.join(subjects_dir, subject_id, "surf")):
print "Fsaverage directory doesn't exist"
print "Check subjects_dir and subject_id"
sys.exit()
#=============================================================================
# READ IN THE MEASURE DATA
#=============================================================================
# Read in the data
df = pd.read_csv(roi_data_file, index_col=False, header=None)
#-------
# Make custom colorbar
if color_file:
cmap = [line.strip() for line in open(color_file)]
l = 1
u = len(cmap)
# If you've passed rgb values you need to convert
# these to tuples
if len(cmap[0].split()) == 3:
cmap = [ (np.float(x.split()[0]),
np.float(x.split()[1]),
np.float(x.split()[2])) for x in cmap ]
else:
# Set l and u so that they're the same for both hemispheres
l, u = calc_range(df[0], l, u, thresh, center)
# Now rearrange the data frame and match it up with
# the aparc names
df = df.T
df.columns = aparc_names
# Now make your pictures
for hemi, surface in it.product(hemi_list, surface_list):
prefix = '_'.join([measure, hemi, surface, cortex_style])
# Read in aparc annot file which will be inside
# the label folder of your fsaverage subject folder
aparc_file = os.path.join(subjects_dir,
subject_id, "label",
hemi + ".500.aparc.annot")
# Use nibabel to merge together the aparc_names and the aparc_file
labels, ctab, names = nib.freesurfer.read_annot(aparc_file)
# Create an empty roi_data array
roi_data = np.ones(len(names))*(thresh-1.0)
# Loop through the names and if they are in the data frame
# for this hemisphere then add that value to the roi_data array
for i, name in enumerate(names):
roi_name = '{}_{}'.format(hemi, name)
if roi_name in df.columns:
roi_data[i] = df[roi_name]
# Make a vector containing the data point at each vertex.
vtx_data = roi_data[labels]
# Write out the vtx_data
#nib.freesurfer.write_annot(f_name, vtx_data, ctab, names)
# Show this data on a brain
plot_surface(vtx_data, subject_id, hemi,
surface, subjects_dir,
output_dir, prefix,
l, u, cmap,
thresh,
cmap2=cmap2,
thresh2=thresh2,
cortex_style=cortex_style)
#=============================================================================
# COMBINE THE IMAGES
#=============================================================================
for surface in surface_list:
combine_pngs(measure, surface, output_dir, cortex_style)
brains_in_a_row(measure, surface, output_dir, cortex_style, l, u, cmap)
# You're done :)
# Happy International Women's Day 2017
# <3 Kx
| en | 0.675663 | #!/usr/bin/env python #============================================================================= # Created by <NAME> # September 2014 # Contact: <EMAIL> #============================================================================= #============================================================================= # IMPORTS #============================================================================= #============================================================================= # FUNCTIONS #============================================================================= Code to read in arguments from the command line Aso allows you to change some settings # Build a basic parser. # Now add the arguments #------------------------------------------------------------------------------ # Figure out the min and max for the color bar # Make sure the colorbar is centered #------------------------------------------------------------------------------ This function needs more documentation, but for now it is sufficient to know this one important fact: For the variable "cmap": If you pass a word that defines a matplotlib colormap (eg: jet, Rd_Bu etc) then the code will use that for the color scheme. If you pass a **list** of colors then you'll just loop through those colors instead. # Open up a brain in pysurfer # Create an empty brain if the values are all below threshold # Add your data to the brain # If you only have one threshold # then add the data! # Add your data to the brain # Plot the data twice for the two # different settings # Save the images for medial and lateral # putting a color bar on all of them #----------------------------------------------------------------------------- Find four images and combine them into one nice picture # Plot each figure in turn # Crop the figures appropriately # NOTE: this can change depending on which system you've made the # images on originally - it's a bug that needs to be sorted out! # Add the bottom of one of the images as the color bar # at the bottom of the combo figure # Save the figure Take the four pysurfer views (left lateral, left medial, right medial and right lateral) and arrange them in a row according to the grid positions given by grid grid : the gridspec list of grid placements f_list : list of four file pysurfer image files big_fig : the figure to which you're adding the images # THIS WAS UPDATED TO INCLUDE PLOTTING IN A GRID # Should probably change the function name! # Crop the figures appropriately # NOTE: this can change depending on which system you've made the # images on originally - it's a bug that needs to be sorted out! # Add an axis to the figure # Show the brain on this axis # Set the seaborn context and style Add a colorbar to the big_fig in the location defined by grid grid : grid spec location to add colormap big_fig : figure to which colorbar will be added cmap_name : name of the colormap x_min : the minimum value to plot this colorbar between x_max : the maximum value to plot this colorbar between cbar_min : minimum value for the colormap (default 0) cbar_max : maximum value for the colormap (default 1) vert : whether the colorbar should be vertical (default False) label : the label for the colorbar (default: None) ticks : whether to put the tick values on the colorbar (default: True) pad : how much to shift the colorbar label by (default: 0) # Add an axis to the big_fig # Normalise the colorbar so you have the correct upper and # lower limits and define the three ticks you want to show # Figure out the orientation # Add in your colorbar: # Set up the figure # Set up the grid # Set up the file list # Add the brains # Set up the colorbar grid # Turn off the axis # Save the figure # Close the figure #============================================================================= # SET SOME VARIABLES #============================================================================= # Read in the arguments from argparse # Define the output directory # Define the name of the measure you're plotting # Define the aparc names # Read in aparc names file # Read in the names from the aparc names file # dropping the first 41 # Figure out which surfaces you're going to use # Check that the inputs exist: #============================================================================= # READ IN THE MEASURE DATA #============================================================================= # Read in the data #------- # Make custom colorbar # If you've passed rgb values you need to convert # these to tuples # Set l and u so that they're the same for both hemispheres # Now rearrange the data frame and match it up with # the aparc names # Now make your pictures # Read in aparc annot file which will be inside # the label folder of your fsaverage subject folder # Use nibabel to merge together the aparc_names and the aparc_file # Create an empty roi_data array # Loop through the names and if they are in the data frame # for this hemisphere then add that value to the roi_data array # Make a vector containing the data point at each vertex. # Write out the vtx_data #nib.freesurfer.write_annot(f_name, vtx_data, ctab, names) # Show this data on a brain #============================================================================= # COMBINE THE IMAGES #============================================================================= # You're done :) # Happy International Women's Day 2017 # <3 Kx | 1.785265 | 2 |
brainsprite/__init__.py | kchapelier/brainsprite | 13 | 6632732 | """Brainsprite python API."""
from .brainsprite import viewer_substitute
__all__ = ['viewer_substitute']
| """Brainsprite python API."""
from .brainsprite import viewer_substitute
__all__ = ['viewer_substitute']
| en | 0.250533 | Brainsprite python API. | 1.207383 | 1 |
corehq/apps/hqwebapp/tests/test_csrf_middleware.py | kkrampa/commcare-hq | 1 | 6632733 | from __future__ import absolute_import
from __future__ import unicode_literals
from bs4 import BeautifulSoup
from django.urls import reverse
from django.test import TestCase, Client
from corehq.apps.domain.models import Domain
from corehq.apps.users.models import WebUser
class TestCSRF(TestCase):
@classmethod
def setUpClass(cls):
super(TestCSRF, cls).setUpClass()
cls.domain = Domain(name="delhi", is_active=True)
cls.domain.save()
cls.username = 'bombme'
cls.password = '*******'
cls.user = WebUser.create(cls.domain.name, cls.username, cls.password, is_admin=True)
cls.user.eula.signed = True
@classmethod
def tearDownClass(cls):
cls.user.delete()
cls.domain.delete()
super(TestCSRF, cls).tearDownClass()
def test_csrf_ON(self):
csrf_sent, csrf_missing = self._form_post_with_and_without_csrf()
self.assertEqual(csrf_sent, 200)
self.assertEqual(csrf_missing, 403)
def _form_post_with_and_without_csrf(self):
client = Client(enforce_csrf_checks=True)
login_page = client.get(reverse('login'))
csrf_token = BeautifulSoup(login_page.content).find('input', {'id': 'csrfTokenContainer'}).get('value')
client.login(username=self.username, password=self.password)
form_data = {
'recipients': '+9199902334',
'message': 'sms',
'send_sms_button': ''
}
# There is no particular reason in using 'send_to_recipients' as the CSRF test view,
# all views unless decorated with csrf_exempt should be CSRF protected by default via Django's middleware
csrf_missing = client.post(reverse('send_to_recipients', args=[self.domain.name]), form_data).status_code
form_data['csrfmiddlewaretoken'] = csrf_token
csrf_sent = client.post(
reverse('send_to_recipients', args=[self.domain.name]), form_data, follow=True
).status_code
return csrf_sent, csrf_missing
| from __future__ import absolute_import
from __future__ import unicode_literals
from bs4 import BeautifulSoup
from django.urls import reverse
from django.test import TestCase, Client
from corehq.apps.domain.models import Domain
from corehq.apps.users.models import WebUser
class TestCSRF(TestCase):
@classmethod
def setUpClass(cls):
super(TestCSRF, cls).setUpClass()
cls.domain = Domain(name="delhi", is_active=True)
cls.domain.save()
cls.username = 'bombme'
cls.password = '*******'
cls.user = WebUser.create(cls.domain.name, cls.username, cls.password, is_admin=True)
cls.user.eula.signed = True
@classmethod
def tearDownClass(cls):
cls.user.delete()
cls.domain.delete()
super(TestCSRF, cls).tearDownClass()
def test_csrf_ON(self):
csrf_sent, csrf_missing = self._form_post_with_and_without_csrf()
self.assertEqual(csrf_sent, 200)
self.assertEqual(csrf_missing, 403)
def _form_post_with_and_without_csrf(self):
client = Client(enforce_csrf_checks=True)
login_page = client.get(reverse('login'))
csrf_token = BeautifulSoup(login_page.content).find('input', {'id': 'csrfTokenContainer'}).get('value')
client.login(username=self.username, password=self.password)
form_data = {
'recipients': '+9199902334',
'message': 'sms',
'send_sms_button': ''
}
# There is no particular reason in using 'send_to_recipients' as the CSRF test view,
# all views unless decorated with csrf_exempt should be CSRF protected by default via Django's middleware
csrf_missing = client.post(reverse('send_to_recipients', args=[self.domain.name]), form_data).status_code
form_data['csrfmiddlewaretoken'] = csrf_token
csrf_sent = client.post(
reverse('send_to_recipients', args=[self.domain.name]), form_data, follow=True
).status_code
return csrf_sent, csrf_missing
| en | 0.895531 | # There is no particular reason in using 'send_to_recipients' as the CSRF test view, # all views unless decorated with csrf_exempt should be CSRF protected by default via Django's middleware | 2.225385 | 2 |
setup.py | ProfBIT-develop/django-modeltranslation | 1 | 6632734 | #!/usr/bin/env python
from distutils.core import setup
# Dynamically calculate the version based on modeltranslation.VERSION.
version = __import__('modeltranslation').get_version()
setup(
name='django-modeltranslation',
version=version,
description='Translates Django models using a registration approach.',
long_description=(
'The modeltranslation application can be used to translate dynamic '
'content of existing models to an arbitrary number of languages '
'without having to change the original model classes. It uses a '
'registration approach (comparable to Django\'s admin app) to be able '
'to add translations to existing or new projects and is fully '
'integrated into the Django admin backend.'),
author='<NAME>',
author_email='<EMAIL>',
maintainer='<NAME>',
maintainer_email='<EMAIL>',
url='https://github.com/deschler/django-modeltranslation',
packages=['modeltranslation', 'modeltranslation.management',
'modeltranslation.management.commands'],
package_data={'modeltranslation': ['static/modeltranslation/css/*.css',
'static/modeltranslation/js/*.js']},
requires=['Django(>=1.11)'],
download_url='https://github.com/deschler/django-modeltranslation/archive/%s.tar.gz' % version,
classifiers=[
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Operating System :: OS Independent',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Framework :: Django',
'License :: OSI Approved :: BSD License'],
license='New BSD')
| #!/usr/bin/env python
from distutils.core import setup
# Dynamically calculate the version based on modeltranslation.VERSION.
version = __import__('modeltranslation').get_version()
setup(
name='django-modeltranslation',
version=version,
description='Translates Django models using a registration approach.',
long_description=(
'The modeltranslation application can be used to translate dynamic '
'content of existing models to an arbitrary number of languages '
'without having to change the original model classes. It uses a '
'registration approach (comparable to Django\'s admin app) to be able '
'to add translations to existing or new projects and is fully '
'integrated into the Django admin backend.'),
author='<NAME>',
author_email='<EMAIL>',
maintainer='<NAME>',
maintainer_email='<EMAIL>',
url='https://github.com/deschler/django-modeltranslation',
packages=['modeltranslation', 'modeltranslation.management',
'modeltranslation.management.commands'],
package_data={'modeltranslation': ['static/modeltranslation/css/*.css',
'static/modeltranslation/js/*.js']},
requires=['Django(>=1.11)'],
download_url='https://github.com/deschler/django-modeltranslation/archive/%s.tar.gz' % version,
classifiers=[
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Operating System :: OS Independent',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Framework :: Django',
'License :: OSI Approved :: BSD License'],
license='New BSD')
| en | 0.63457 | #!/usr/bin/env python # Dynamically calculate the version based on modeltranslation.VERSION. | 1.996652 | 2 |
mqtt_gate/main.py | icservis/majordomo | 0 | 6632735 | <gh_stars>0
import os
import binascii
import yaml
import paho.mqtt.client as mqtt
import re
from lib.garage import GarageDoor
print ("Welcome to GarageBerryPi!")
# Update the mqtt state topic
def update_state(value, topic):
print ("State change triggered: %s -> %s" % (topic, value))
client.publish(topic, value, retain=True)
# The callback for when the client receives a CONNACK response from the server.
def on_connect(client, userdata, flags, rc):
print ("Connected with result code: %s" % mqtt.connack_string(rc))
print ("Listening for server status on %s" % server_status_topic)
client.subscribe(server_status_topic)
for config in CONFIG['doors']:
availability_topic = config['availability_topic']
client.publish(availability_topic, "online", retain=False)
command_topic = config['command_topic']
print ("Listening for commands on %s" % command_topic)
client.subscribe(command_topic)
# Execute the specified command for a door
def execute_command(door, command):
try:
doorName = door.name
except:
doorName = door.id
print ("Executing command %s for door %s" % (command, doorName))
if command == "STEP" and door.state == 'closed':
door.step()
elif command == "OPEN" and door.state == 'closed':
door.open()
elif command == "CLOSE" and door.state == 'open':
door.close()
elif command == "STOP":
door.stop()
else:
print ("Invalid command: %s" % command)
with open(os.path.join(os.path.abspath(os.path.dirname(__file__)), 'config.yaml'), 'r') as ymlfile:
CONFIG = yaml.load(ymlfile,Loader=yaml.FullLoader)
### SETUP MQTT ###
server_status_topic = CONFIG['mqtt']['server_status_topic']
user = CONFIG['mqtt']['user']
password = CONFIG['mqtt']['password']
host = CONFIG['mqtt']['host']
port = int(CONFIG['mqtt']['port'])
discovery = bool(CONFIG['mqtt'].get('discovery'))
if 'discovery_prefix' not in CONFIG['mqtt']:
discovery_prefix = 'homeassistant'
else:
discovery_prefix = CONFIG['mqtt']['discovery_prefix']
client = mqtt.Client(client_id="MQTTGarageDoor_" + binascii.hexlify(os.urandom(32)).decode(), clean_session=True, userdata=None, protocol=4)
client.on_connect = on_connect
client.username_pw_set(user, password=password)
client.connect(host, port, 60)
### SETUP END ###
### MAIN LOOP ###
if __name__ == "__main__":
# Create door objects and create callback functions
for doorCfg in CONFIG['doors']:
# If no name it set, then set to id
if not doorCfg['name']:
doorCfg['name'] = doorCfg['id']
# Sanitize id value for mqtt
doorCfg['id'] = re.sub('W+', '', re.sub('s', ' ', doorCfg['id']))
if discovery is True:
base_topic = discovery_prefix + "/cover/" + doorCfg['id']
config_topic = base_topic + "/config"
doorCfg['command_topic'] = base_topic + "/set"
doorCfg['state_topic'] = base_topic + "/state"
command_topic = doorCfg['command_topic']
state_topic = doorCfg['state_topic']
door = GarageDoor(doorCfg)
# Callback per door that passes a reference to the door
def on_message(client, userdata, msg, door=door):
message = str(msg.payload.decode("utf-8"))
print ("Receiving message %s" % message)
execute_command(door, message)
# Callback per door that passes the doors state topic
def on_state_change(value, topic=state_topic):
update_state(value, topic)
client.message_callback_add(command_topic, on_message)
# Callback on status from server
def on_server_status_message(client, userdata, msg, door=door):
message = str(msg.payload.decode("utf-8"))
print ("Receiving status %s" % message)
for config in CONFIG['doors']:
availability_topic = config['availability_topic']
client.publish(availability_topic, "online", retain=False)
client.publish(state_topic, door.state, retain=True)
client.message_callback_add(server_status_topic, on_server_status_message)
# You can add additional listeners here and they will all be executed when the door state changes
door.onStateChange.addHandler(on_state_change)
def on_buttonPress():
print ("Button pressed")
door.onButtonPress.addHandler(on_buttonPress)
# Publish initial door state
client.publish(state_topic, door.state, retain=True)
# If discovery is enabled publish configuration
if discovery is True:
client.publish(config_topic,'{"name": "' + doorCfg['name'] + '", "command_topic": "' + command_topic + '", "state_topic": "' + state_topic + '"}', retain=True)
# Main loop
client.loop_forever()
| import os
import binascii
import yaml
import paho.mqtt.client as mqtt
import re
from lib.garage import GarageDoor
print ("Welcome to GarageBerryPi!")
# Update the mqtt state topic
def update_state(value, topic):
print ("State change triggered: %s -> %s" % (topic, value))
client.publish(topic, value, retain=True)
# The callback for when the client receives a CONNACK response from the server.
def on_connect(client, userdata, flags, rc):
print ("Connected with result code: %s" % mqtt.connack_string(rc))
print ("Listening for server status on %s" % server_status_topic)
client.subscribe(server_status_topic)
for config in CONFIG['doors']:
availability_topic = config['availability_topic']
client.publish(availability_topic, "online", retain=False)
command_topic = config['command_topic']
print ("Listening for commands on %s" % command_topic)
client.subscribe(command_topic)
# Execute the specified command for a door
def execute_command(door, command):
try:
doorName = door.name
except:
doorName = door.id
print ("Executing command %s for door %s" % (command, doorName))
if command == "STEP" and door.state == 'closed':
door.step()
elif command == "OPEN" and door.state == 'closed':
door.open()
elif command == "CLOSE" and door.state == 'open':
door.close()
elif command == "STOP":
door.stop()
else:
print ("Invalid command: %s" % command)
with open(os.path.join(os.path.abspath(os.path.dirname(__file__)), 'config.yaml'), 'r') as ymlfile:
CONFIG = yaml.load(ymlfile,Loader=yaml.FullLoader)
### SETUP MQTT ###
server_status_topic = CONFIG['mqtt']['server_status_topic']
user = CONFIG['mqtt']['user']
password = CONFIG['mqtt']['password']
host = CONFIG['mqtt']['host']
port = int(CONFIG['mqtt']['port'])
discovery = bool(CONFIG['mqtt'].get('discovery'))
if 'discovery_prefix' not in CONFIG['mqtt']:
discovery_prefix = 'homeassistant'
else:
discovery_prefix = CONFIG['mqtt']['discovery_prefix']
client = mqtt.Client(client_id="MQTTGarageDoor_" + binascii.hexlify(os.urandom(32)).decode(), clean_session=True, userdata=None, protocol=4)
client.on_connect = on_connect
client.username_pw_set(user, password=password)
client.connect(host, port, 60)
### SETUP END ###
### MAIN LOOP ###
if __name__ == "__main__":
# Create door objects and create callback functions
for doorCfg in CONFIG['doors']:
# If no name it set, then set to id
if not doorCfg['name']:
doorCfg['name'] = doorCfg['id']
# Sanitize id value for mqtt
doorCfg['id'] = re.sub('W+', '', re.sub('s', ' ', doorCfg['id']))
if discovery is True:
base_topic = discovery_prefix + "/cover/" + doorCfg['id']
config_topic = base_topic + "/config"
doorCfg['command_topic'] = base_topic + "/set"
doorCfg['state_topic'] = base_topic + "/state"
command_topic = doorCfg['command_topic']
state_topic = doorCfg['state_topic']
door = GarageDoor(doorCfg)
# Callback per door that passes a reference to the door
def on_message(client, userdata, msg, door=door):
message = str(msg.payload.decode("utf-8"))
print ("Receiving message %s" % message)
execute_command(door, message)
# Callback per door that passes the doors state topic
def on_state_change(value, topic=state_topic):
update_state(value, topic)
client.message_callback_add(command_topic, on_message)
# Callback on status from server
def on_server_status_message(client, userdata, msg, door=door):
message = str(msg.payload.decode("utf-8"))
print ("Receiving status %s" % message)
for config in CONFIG['doors']:
availability_topic = config['availability_topic']
client.publish(availability_topic, "online", retain=False)
client.publish(state_topic, door.state, retain=True)
client.message_callback_add(server_status_topic, on_server_status_message)
# You can add additional listeners here and they will all be executed when the door state changes
door.onStateChange.addHandler(on_state_change)
def on_buttonPress():
print ("Button pressed")
door.onButtonPress.addHandler(on_buttonPress)
# Publish initial door state
client.publish(state_topic, door.state, retain=True)
# If discovery is enabled publish configuration
if discovery is True:
client.publish(config_topic,'{"name": "' + doorCfg['name'] + '", "command_topic": "' + command_topic + '", "state_topic": "' + state_topic + '"}', retain=True)
# Main loop
client.loop_forever() | en | 0.701123 | # Update the mqtt state topic # The callback for when the client receives a CONNACK response from the server. # Execute the specified command for a door ### SETUP MQTT ### ### SETUP END ### ### MAIN LOOP ### # Create door objects and create callback functions # If no name it set, then set to id # Sanitize id value for mqtt # Callback per door that passes a reference to the door # Callback per door that passes the doors state topic # Callback on status from server # You can add additional listeners here and they will all be executed when the door state changes # Publish initial door state # If discovery is enabled publish configuration # Main loop | 2.616758 | 3 |
apps/test.py | xroynard/ms_deepvoxscene | 13 | 6632736 | <filename>apps/test.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: <NAME>
"""
from __future__ import print_function, division
import os
import sys
import glob
import argparse
import numpy as np
# Pytorch
import torch
from torch.utils.data import DataLoader
# cudnn optim
import torch.backends.cudnn as cudnn
cudnn.benchmark = True
from sklearn.metrics import confusion_matrix
# to import modules
sys.path.insert(0, os.path.abspath('..'))
from input import PointCloudDataset
#import models
from utils.tester import Tester
from utils.parameters import Parameters
if __name__ == '__main__':
###############################################################################
#%% Parses Command Line Arguments
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("-c", "--config",
dest='CONFIG_FILE',
default=os.path.join(os.path.curdir, os.path.pardir, "config", "test_config.yaml"),
help="config file",
)
parser.add_argument("-l", "--log_dir",
dest='LOG_DIR',
help="log directory of trained model (should contain config.yaml and subdir models/)",
)
parser.print_help()
args = parser.parse_args()
##############################################################################
#%% Read config file
# Read config file (loads config file of trained model)
params = Parameters(os.path.join(args.LOG_DIR, "config.yaml"))
params.update_parameters(args.CONFIG_FILE)
# Save parameters
params.write_parameters()
###############################################################################
#%% Load model
print()
# Init model
model = torch.load( os.path.join(params.getp("MODEL_DIR"), "best_train_model_checkpoint_fold_00_sample_000.tar" ) )
# Puts the model on device (GPU)
model = model.cuda(device=params.getp("DEVICE_ID"))
###############################################################################
#%% Read dataset repository
#
filenames = sorted( glob.glob( os.path.join(params.getp("DATASET_DIR"),"test","*.ply") ) )
print()
print("###############################################################################")
print("{} FOLDS in repo {}:".format(len(filenames), os.path.join(params.getp("DATASET_DIR"),"test") ))
print("Test Files:")
for indf,f in enumerate(filenames):
print("\t - File {:02d} -> {}".format(indf,f))
###############################################################################
#%% Load the testing dataset
for f in filenames:
dirname,fname = os.path.split(f)
name,ext = os.path.splitext(fname)
dset = PointCloudDataset(f,
scales=params.getp("SCALES"),
grid_size=params.getp("GRID_SIZE"),
voxel_size=params.getp("VOXEL_SIZE"),
nb_classes=params.getp("NB_CLASSES"),
testing=True,
use_no_labels=params.getp("USE_NO_LABELS"),
use_class0=(params.getp("DATASET")=="s3dis"),
use_color=params.getp("USE_COLOR"),
use_reflectance=params.getp("USE_REFLECTANCE"),
)
dset_loader = DataLoader(dset,
batch_size=params.getp("BATCH_SIZE"),
shuffle=False,
num_workers=params.getp("NUM_WORKERS"),
)#, pin_memory=True) #?
# Taille des dataset
print("Dataset sizes :", len(dset))
print("Dataloaders sizes:", len(dset_loader))
###############################################################################
#%% Test the model
print()
print("###############################################################################")
print("#------------------------------ TEST THE MODEL -------------------------------#")
print("###############################################################################")
# Tester
tester = Tester(params,
dset_loader,
model,
)
# Test the model
true_class, pred_class, pred_proba_class = tester.test_model()
###############################################################################
#%% Save cloud with predicted classes
# Saves cloud with predicted class for each point
RESULT_CLOUD_FILE = os.path.join(params.getp("CLOUD_DIR"), "classified_cloud_" + name + "_" + params.getp("MODEL_NAME") + ".ply")
dset.write_pred_cloud(pred_class, RESULT_CLOUD_FILE)
# Saves cloud with predicted class for each point and "probability" (output of softmax layer) of of belonging to in each class
RESULT_CLOUD_FILE = os.path.join(params.getp("CLOUD_DIR"), "classified_cloud_with_proba_" + name + "_" + params.getp("MODEL_NAME") + ".ply")
dset.write_pred_proba_cloud(pred_proba_class, RESULT_CLOUD_FILE)
###############################################################################
###############################################################################
###############################################################################
#%% Compute some stats
if true_class.max() >= 0:
# Compute Confusion Matrix
C = confusion_matrix(true_class, pred_class, labels=np.arange(params.getp("NB_CLASSES")))
print("Confusion Matrix:")
for row in C:
for col in row:
print("{:8d}".format(col),end='')
print("")
###############################################################################
TP = np.diag(C) # True Positives
FP = np.sum(C, axis=0) - TP # False Positives
FN = np.sum(C, axis=1) - TP # False Negatives
TN = np.sum(C) * np.ones(C.shape[0]) - TP - FP - FN # True Negatives
###############################################################################
print("\tOverall Accuracy: {:6.2f}%".format(100 * np.sum(np.diag(C))/np.sum(C)))
###############################################################################
print("Precision:")
S1 = C / np.sum(C,axis=0,dtype=np.float64)
for row in S1:
for col in row:
print("{:6.2f}% ".format(100 * col),end='')
print("")
print("\tMean Precision: {:6.2f}%".format(100 * np.mean(np.diag(S1))))
###############################################################################
print("Recall:")
S2 = (C.transpose() / np.sum(C,axis=1,dtype=np.float64)).transpose()
for row in S2:
for col in row:
print("{:6.2f}% ".format(100 * col),end='')
print("")
print("\tMean Recall: {:6.2f}%".format(100 * np.mean(np.diag(S2))))
###############################################################################
print("F1:")
S3 = 2 * S1 * S2 / (S1 + S2 + 1e-8)
for row in S3:
for col in row:
print("{:6.2f}% ".format(100 * col),end='')
print("")
print("\tMean F1: {:6.2f}%".format(100 * np.mean(np.diag(S3))))
###############################################################################
print()
###############################################################################
print("Precision:")
P = (TP) / (TP+FP)
for p in P:
print("{:6.2f}% ".format(100 * p),end='')
print()
print("\tMean F1: {:6.2f}%".format(100 * np.mean(P)))
###############################################################################
print("Recall:")
R = (TP) / (TP+FN)
for r in R:
print("{:6.2f}% ".format(100 * r),end='')
print()
print("\tMean F1: {:6.2f}%".format(100 * np.mean(R)))
###############################################################################
print("F1:")
F1 = (2*TP) / (2*TP+FP+FN)
for f1 in F1:
print("{:6.2f}% ".format(100 * f1),end='')
print()
print("\tMean F1: {:6.2f}%".format(100 * np.mean(F1)))
###############################################################################
IoU = (TP)/(TP+FP+FN)
print("IoU:")
for iou in IoU:
print("{:6.2f}% ".format(100 * iou),end='')
print()
print("\tMean IoU: {:6.2f}%".format(100 * np.mean(IoU))) | <filename>apps/test.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: <NAME>
"""
from __future__ import print_function, division
import os
import sys
import glob
import argparse
import numpy as np
# Pytorch
import torch
from torch.utils.data import DataLoader
# cudnn optim
import torch.backends.cudnn as cudnn
cudnn.benchmark = True
from sklearn.metrics import confusion_matrix
# to import modules
sys.path.insert(0, os.path.abspath('..'))
from input import PointCloudDataset
#import models
from utils.tester import Tester
from utils.parameters import Parameters
if __name__ == '__main__':
###############################################################################
#%% Parses Command Line Arguments
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("-c", "--config",
dest='CONFIG_FILE',
default=os.path.join(os.path.curdir, os.path.pardir, "config", "test_config.yaml"),
help="config file",
)
parser.add_argument("-l", "--log_dir",
dest='LOG_DIR',
help="log directory of trained model (should contain config.yaml and subdir models/)",
)
parser.print_help()
args = parser.parse_args()
##############################################################################
#%% Read config file
# Read config file (loads config file of trained model)
params = Parameters(os.path.join(args.LOG_DIR, "config.yaml"))
params.update_parameters(args.CONFIG_FILE)
# Save parameters
params.write_parameters()
###############################################################################
#%% Load model
print()
# Init model
model = torch.load( os.path.join(params.getp("MODEL_DIR"), "best_train_model_checkpoint_fold_00_sample_000.tar" ) )
# Puts the model on device (GPU)
model = model.cuda(device=params.getp("DEVICE_ID"))
###############################################################################
#%% Read dataset repository
#
filenames = sorted( glob.glob( os.path.join(params.getp("DATASET_DIR"),"test","*.ply") ) )
print()
print("###############################################################################")
print("{} FOLDS in repo {}:".format(len(filenames), os.path.join(params.getp("DATASET_DIR"),"test") ))
print("Test Files:")
for indf,f in enumerate(filenames):
print("\t - File {:02d} -> {}".format(indf,f))
###############################################################################
#%% Load the testing dataset
for f in filenames:
dirname,fname = os.path.split(f)
name,ext = os.path.splitext(fname)
dset = PointCloudDataset(f,
scales=params.getp("SCALES"),
grid_size=params.getp("GRID_SIZE"),
voxel_size=params.getp("VOXEL_SIZE"),
nb_classes=params.getp("NB_CLASSES"),
testing=True,
use_no_labels=params.getp("USE_NO_LABELS"),
use_class0=(params.getp("DATASET")=="s3dis"),
use_color=params.getp("USE_COLOR"),
use_reflectance=params.getp("USE_REFLECTANCE"),
)
dset_loader = DataLoader(dset,
batch_size=params.getp("BATCH_SIZE"),
shuffle=False,
num_workers=params.getp("NUM_WORKERS"),
)#, pin_memory=True) #?
# Taille des dataset
print("Dataset sizes :", len(dset))
print("Dataloaders sizes:", len(dset_loader))
###############################################################################
#%% Test the model
print()
print("###############################################################################")
print("#------------------------------ TEST THE MODEL -------------------------------#")
print("###############################################################################")
# Tester
tester = Tester(params,
dset_loader,
model,
)
# Test the model
true_class, pred_class, pred_proba_class = tester.test_model()
###############################################################################
#%% Save cloud with predicted classes
# Saves cloud with predicted class for each point
RESULT_CLOUD_FILE = os.path.join(params.getp("CLOUD_DIR"), "classified_cloud_" + name + "_" + params.getp("MODEL_NAME") + ".ply")
dset.write_pred_cloud(pred_class, RESULT_CLOUD_FILE)
# Saves cloud with predicted class for each point and "probability" (output of softmax layer) of of belonging to in each class
RESULT_CLOUD_FILE = os.path.join(params.getp("CLOUD_DIR"), "classified_cloud_with_proba_" + name + "_" + params.getp("MODEL_NAME") + ".ply")
dset.write_pred_proba_cloud(pred_proba_class, RESULT_CLOUD_FILE)
###############################################################################
###############################################################################
###############################################################################
#%% Compute some stats
if true_class.max() >= 0:
# Compute Confusion Matrix
C = confusion_matrix(true_class, pred_class, labels=np.arange(params.getp("NB_CLASSES")))
print("Confusion Matrix:")
for row in C:
for col in row:
print("{:8d}".format(col),end='')
print("")
###############################################################################
TP = np.diag(C) # True Positives
FP = np.sum(C, axis=0) - TP # False Positives
FN = np.sum(C, axis=1) - TP # False Negatives
TN = np.sum(C) * np.ones(C.shape[0]) - TP - FP - FN # True Negatives
###############################################################################
print("\tOverall Accuracy: {:6.2f}%".format(100 * np.sum(np.diag(C))/np.sum(C)))
###############################################################################
print("Precision:")
S1 = C / np.sum(C,axis=0,dtype=np.float64)
for row in S1:
for col in row:
print("{:6.2f}% ".format(100 * col),end='')
print("")
print("\tMean Precision: {:6.2f}%".format(100 * np.mean(np.diag(S1))))
###############################################################################
print("Recall:")
S2 = (C.transpose() / np.sum(C,axis=1,dtype=np.float64)).transpose()
for row in S2:
for col in row:
print("{:6.2f}% ".format(100 * col),end='')
print("")
print("\tMean Recall: {:6.2f}%".format(100 * np.mean(np.diag(S2))))
###############################################################################
print("F1:")
S3 = 2 * S1 * S2 / (S1 + S2 + 1e-8)
for row in S3:
for col in row:
print("{:6.2f}% ".format(100 * col),end='')
print("")
print("\tMean F1: {:6.2f}%".format(100 * np.mean(np.diag(S3))))
###############################################################################
print()
###############################################################################
print("Precision:")
P = (TP) / (TP+FP)
for p in P:
print("{:6.2f}% ".format(100 * p),end='')
print()
print("\tMean F1: {:6.2f}%".format(100 * np.mean(P)))
###############################################################################
print("Recall:")
R = (TP) / (TP+FN)
for r in R:
print("{:6.2f}% ".format(100 * r),end='')
print()
print("\tMean F1: {:6.2f}%".format(100 * np.mean(R)))
###############################################################################
print("F1:")
F1 = (2*TP) / (2*TP+FP+FN)
for f1 in F1:
print("{:6.2f}% ".format(100 * f1),end='')
print()
print("\tMean F1: {:6.2f}%".format(100 * np.mean(F1)))
###############################################################################
IoU = (TP)/(TP+FP+FN)
print("IoU:")
for iou in IoU:
print("{:6.2f}% ".format(100 * iou),end='')
print()
print("\tMean IoU: {:6.2f}%".format(100 * np.mean(IoU))) | de | 0.734961 | #!/usr/bin/env python3 # -*- coding: utf-8 -*- @author: <NAME> # Pytorch # cudnn optim # to import modules #import models ############################################################################### #%% Parses Command Line Arguments ############################################################################## #%% Read config file # Read config file (loads config file of trained model) # Save parameters ############################################################################### #%% Load model # Init model # Puts the model on device (GPU) ############################################################################### #%% Read dataset repository # ##############################################################################") ############################################################################### #%% Load the testing dataset #, pin_memory=True) #? # Taille des dataset ############################################################################### #%% Test the model ##############################################################################") #") ##############################################################################") # Tester # Test the model ############################################################################### #%% Save cloud with predicted classes # Saves cloud with predicted class for each point # Saves cloud with predicted class for each point and "probability" (output of softmax layer) of of belonging to in each class ############################################################################### ############################################################################### ############################################################################### #%% Compute some stats # Compute Confusion Matrix ############################################################################### # True Positives # False Positives # False Negatives # True Negatives ############################################################################### ############################################################################### ############################################################################### ############################################################################### ############################################################################### ############################################################################### ############################################################################### ############################################################################### ############################################################################### | 2.018967 | 2 |
Python/minesweeper.py | ruikunl/LeetCode | 5 | 6632737 | <gh_stars>1-10
# Time: O(m * n)
# Space: O(m + n)
# Let's play the minesweeper game (Wikipedia, online game)!
#
# You are given a 2D char matrix representing the game board. 'M' represents an unrevealed mine,
# 'E' represents an unrevealed empty square, 'B' represents a revealed blank square that has no adjacent
# (above, below, left, right, and all 4 diagonals) mines, digit ('1' to '8') represents
# how many mines are adjacent to this revealed square, and finally 'X' represents a revealed mine.
#
# Now given the next click position (row and column indices) among all the unrevealed squares ('M' or 'E'),
# return the board after revealing this position according to the following rules:
#
# If a mine ('M') is revealed, then the game is over - change it to 'X'.
# If an empty square ('E') with no adjacent mines is revealed, then change it to revealed blank ('B')
# and all of its adjacent unrevealed squares should be revealed recursively.
# If an empty square ('E') with at least one adjacent mine is revealed, then change it to a digit ('1' to '8')
# representing the number of adjacent mines.
# Return the board when no more squares will be revealed.
#
# Example 1:
# Input:
# [['E', 'E', 'E', 'E', 'E'],
# ['E', 'E', 'M', 'E', 'E'],
# ['E', 'E', 'E', 'E', 'E'],
# ['E', 'E', 'E', 'E', 'E']]
# Click : [3,0]
# Output:
# [['B', '1', 'E', '1', 'B'],
# ['B', '1', 'M', '1', 'B'],
# ['B', '1', '1', '1', 'B'],
# ['B', 'B', 'B', 'B', 'B']]
#
# Example 2:
# Input:
# [['B', '1', 'E', '1', 'B'],
# ['B', '1', 'M', '1', 'B'],
# ['B', '1', '1', '1', 'B'],
# ['B', 'B', 'B', 'B', 'B']]
#
# Click : [1,2]
# Output:
# [['B', '1', 'E', '1', 'B'],
# ['B', '1', 'X', '1', 'B'],
# ['B', '1', '1', '1', 'B'],
# ['B', 'B', 'B', 'B', 'B']]
#
# Note:
# The range of the input matrix's height and width is [1,50].
# The click position will only be an unrevealed square ('M' or 'E'),
# which also means the input board contains at least one clickable square.
# The input board won't be a stage when game is over (some mines have been revealed).
# For simplicity, not mentioned rules should be ignored in this problem.
# For example, you don't need to reveal all the unrevealed mines when the game is over,
# consider any cases that you will win the game or flag any squares.
class Solution(object):
def updateBoard(self, board, click):
"""
:type board: List[List[str]]
:type click: List[int]
:rtype: List[List[str]]
"""
q = collections.deque([click])
while q:
row, col = q.popleft()
if board[row][col] == 'M':
board[row][col] = 'X'
else:
count = 0
for i in xrange(-1, 2):
for j in xrange(-1, 2):
if i == 0 and j == 0:
continue
r, c = row + i, col + j
if not (0 <= r < len(board)) or not (0 <= c < len(board[r])):
continue
if board[r][c] == 'M' or board[r][c] == 'X':
count += 1
if count:
board[row][col] = chr(count + ord('0'))
else:
board[row][col] = 'B'
for i in xrange(-1, 2):
for j in xrange(-1, 2):
if i == 0 and j == 0:
continue
r, c = row + i, col + j
if not (0 <= r < len(board)) or not (0 <= c < len(board[r])):
continue
if board[r][c] == 'E':
q.append((r, c))
board[r][c] = ' '
return board
# Time: O(m * n)
# Space: O(m * n)
class Solution2(object):
def updateBoard(self, board, click):
"""
:type board: List[List[str]]
:type click: List[int]
:rtype: List[List[str]]
"""
row, col = click[0], click[1]
if board[row][col] == 'M':
board[row][col] = 'X'
else:
count = 0
for i in xrange(-1, 2):
for j in xrange(-1, 2):
if i == 0 and j == 0:
continue
r, c = row + i, col + j
if not (0 <= r < len(board)) or not (0 <= c < len(board[r])):
continue
if board[r][c] == 'M' or board[r][c] == 'X':
count += 1
if count:
board[row][col] = chr(count + ord('0'))
else:
board[row][col] = 'B'
for i in xrange(-1, 2):
for j in xrange(-1, 2):
if i == 0 and j == 0:
continue
r, c = row + i, col + j
if not (0 <= r < len(board)) or not (0 <= c < len(board[r])):
continue
if board[r][c] == 'E':
self.updateBoard(board, (r, c))
return board
| # Time: O(m * n)
# Space: O(m + n)
# Let's play the minesweeper game (Wikipedia, online game)!
#
# You are given a 2D char matrix representing the game board. 'M' represents an unrevealed mine,
# 'E' represents an unrevealed empty square, 'B' represents a revealed blank square that has no adjacent
# (above, below, left, right, and all 4 diagonals) mines, digit ('1' to '8') represents
# how many mines are adjacent to this revealed square, and finally 'X' represents a revealed mine.
#
# Now given the next click position (row and column indices) among all the unrevealed squares ('M' or 'E'),
# return the board after revealing this position according to the following rules:
#
# If a mine ('M') is revealed, then the game is over - change it to 'X'.
# If an empty square ('E') with no adjacent mines is revealed, then change it to revealed blank ('B')
# and all of its adjacent unrevealed squares should be revealed recursively.
# If an empty square ('E') with at least one adjacent mine is revealed, then change it to a digit ('1' to '8')
# representing the number of adjacent mines.
# Return the board when no more squares will be revealed.
#
# Example 1:
# Input:
# [['E', 'E', 'E', 'E', 'E'],
# ['E', 'E', 'M', 'E', 'E'],
# ['E', 'E', 'E', 'E', 'E'],
# ['E', 'E', 'E', 'E', 'E']]
# Click : [3,0]
# Output:
# [['B', '1', 'E', '1', 'B'],
# ['B', '1', 'M', '1', 'B'],
# ['B', '1', '1', '1', 'B'],
# ['B', 'B', 'B', 'B', 'B']]
#
# Example 2:
# Input:
# [['B', '1', 'E', '1', 'B'],
# ['B', '1', 'M', '1', 'B'],
# ['B', '1', '1', '1', 'B'],
# ['B', 'B', 'B', 'B', 'B']]
#
# Click : [1,2]
# Output:
# [['B', '1', 'E', '1', 'B'],
# ['B', '1', 'X', '1', 'B'],
# ['B', '1', '1', '1', 'B'],
# ['B', 'B', 'B', 'B', 'B']]
#
# Note:
# The range of the input matrix's height and width is [1,50].
# The click position will only be an unrevealed square ('M' or 'E'),
# which also means the input board contains at least one clickable square.
# The input board won't be a stage when game is over (some mines have been revealed).
# For simplicity, not mentioned rules should be ignored in this problem.
# For example, you don't need to reveal all the unrevealed mines when the game is over,
# consider any cases that you will win the game or flag any squares.
class Solution(object):
def updateBoard(self, board, click):
"""
:type board: List[List[str]]
:type click: List[int]
:rtype: List[List[str]]
"""
q = collections.deque([click])
while q:
row, col = q.popleft()
if board[row][col] == 'M':
board[row][col] = 'X'
else:
count = 0
for i in xrange(-1, 2):
for j in xrange(-1, 2):
if i == 0 and j == 0:
continue
r, c = row + i, col + j
if not (0 <= r < len(board)) or not (0 <= c < len(board[r])):
continue
if board[r][c] == 'M' or board[r][c] == 'X':
count += 1
if count:
board[row][col] = chr(count + ord('0'))
else:
board[row][col] = 'B'
for i in xrange(-1, 2):
for j in xrange(-1, 2):
if i == 0 and j == 0:
continue
r, c = row + i, col + j
if not (0 <= r < len(board)) or not (0 <= c < len(board[r])):
continue
if board[r][c] == 'E':
q.append((r, c))
board[r][c] = ' '
return board
# Time: O(m * n)
# Space: O(m * n)
class Solution2(object):
def updateBoard(self, board, click):
"""
:type board: List[List[str]]
:type click: List[int]
:rtype: List[List[str]]
"""
row, col = click[0], click[1]
if board[row][col] == 'M':
board[row][col] = 'X'
else:
count = 0
for i in xrange(-1, 2):
for j in xrange(-1, 2):
if i == 0 and j == 0:
continue
r, c = row + i, col + j
if not (0 <= r < len(board)) or not (0 <= c < len(board[r])):
continue
if board[r][c] == 'M' or board[r][c] == 'X':
count += 1
if count:
board[row][col] = chr(count + ord('0'))
else:
board[row][col] = 'B'
for i in xrange(-1, 2):
for j in xrange(-1, 2):
if i == 0 and j == 0:
continue
r, c = row + i, col + j
if not (0 <= r < len(board)) or not (0 <= c < len(board[r])):
continue
if board[r][c] == 'E':
self.updateBoard(board, (r, c))
return board | en | 0.80327 | # Time: O(m * n) # Space: O(m + n) # Let's play the minesweeper game (Wikipedia, online game)! # # You are given a 2D char matrix representing the game board. 'M' represents an unrevealed mine, # 'E' represents an unrevealed empty square, 'B' represents a revealed blank square that has no adjacent # (above, below, left, right, and all 4 diagonals) mines, digit ('1' to '8') represents # how many mines are adjacent to this revealed square, and finally 'X' represents a revealed mine. # # Now given the next click position (row and column indices) among all the unrevealed squares ('M' or 'E'), # return the board after revealing this position according to the following rules: # # If a mine ('M') is revealed, then the game is over - change it to 'X'. # If an empty square ('E') with no adjacent mines is revealed, then change it to revealed blank ('B') # and all of its adjacent unrevealed squares should be revealed recursively. # If an empty square ('E') with at least one adjacent mine is revealed, then change it to a digit ('1' to '8') # representing the number of adjacent mines. # Return the board when no more squares will be revealed. # # Example 1: # Input: # [['E', 'E', 'E', 'E', 'E'], # ['E', 'E', 'M', 'E', 'E'], # ['E', 'E', 'E', 'E', 'E'], # ['E', 'E', 'E', 'E', 'E']] # Click : [3,0] # Output: # [['B', '1', 'E', '1', 'B'], # ['B', '1', 'M', '1', 'B'], # ['B', '1', '1', '1', 'B'], # ['B', 'B', 'B', 'B', 'B']] # # Example 2: # Input: # [['B', '1', 'E', '1', 'B'], # ['B', '1', 'M', '1', 'B'], # ['B', '1', '1', '1', 'B'], # ['B', 'B', 'B', 'B', 'B']] # # Click : [1,2] # Output: # [['B', '1', 'E', '1', 'B'], # ['B', '1', 'X', '1', 'B'], # ['B', '1', '1', '1', 'B'], # ['B', 'B', 'B', 'B', 'B']] # # Note: # The range of the input matrix's height and width is [1,50]. # The click position will only be an unrevealed square ('M' or 'E'), # which also means the input board contains at least one clickable square. # The input board won't be a stage when game is over (some mines have been revealed). # For simplicity, not mentioned rules should be ignored in this problem. # For example, you don't need to reveal all the unrevealed mines when the game is over, # consider any cases that you will win the game or flag any squares. :type board: List[List[str]] :type click: List[int] :rtype: List[List[str]] # Time: O(m * n) # Space: O(m * n) :type board: List[List[str]] :type click: List[int] :rtype: List[List[str]] | 3.769517 | 4 |
djackal/erra.py | jrog612/djackal | 0 | 6632738 | from enum import Enum
class Erra(Enum):
def __str__(self):
return self.name
@property
def message(self):
return self.value
@property
def code(self):
return self.name
def get_message(self, context=None):
message = self.message
if context and message:
message = message.format(**context)
return message
def response_data(self, context=None):
message = self.get_message(context=context)
return {'code': self.code, 'message': message}
| from enum import Enum
class Erra(Enum):
def __str__(self):
return self.name
@property
def message(self):
return self.value
@property
def code(self):
return self.name
def get_message(self, context=None):
message = self.message
if context and message:
message = message.format(**context)
return message
def response_data(self, context=None):
message = self.get_message(context=context)
return {'code': self.code, 'message': message}
| none | 1 | 2.845569 | 3 |
|
tests/test_coverage_sorteddict.py | monkeywithacupcake/python-sortedcontainers | 1 | 6632739 | <reponame>monkeywithacupcake/python-sortedcontainers
# -*- coding: utf-8 -*-
import random, string
from .context import sortedcontainers
from sortedcontainers import SortedDict
import pytest
from sys import hexversion
if hexversion < 0x03000000:
range = xrange
def negate(value):
return -value
def modulo(value):
return value % 10
def get_keysview(dic):
if hexversion < 0x03000000:
return dic.viewkeys()
else:
return dic.keys()
def get_itemsview(dic):
if hexversion < 0x03000000:
return dic.viewitems()
else:
return dic.items()
def test_init():
temp = SortedDict()
assert temp.key is None
temp._check()
def test_init_key():
temp = SortedDict(negate)
assert temp.key == negate
temp._check()
def test_init_args():
temp = SortedDict([('a', 1), ('b', 2)])
assert len(temp) == 2
assert temp['a'] == 1
assert temp['b'] == 2
temp._check()
def test_init_kwargs():
temp = SortedDict(a=1, b=2)
assert len(temp) == 2
assert temp['a'] == 1
assert temp['b'] == 2
temp._check()
def test_clear():
mapping = [(val, pos) for pos, val in enumerate(string.ascii_lowercase)]
temp = SortedDict(mapping)
assert len(temp) == 26
assert list(temp.items()) == mapping
temp.clear()
assert len(temp) == 0
def test_contains():
mapping = [(val, pos) for pos, val in enumerate(string.ascii_lowercase)]
temp = SortedDict(mapping)
assert all((val in temp) for val in string.ascii_lowercase)
def test_delitem():
mapping = [(val, pos) for pos, val in enumerate(string.ascii_lowercase)]
temp = SortedDict(mapping)
del temp['a']
temp._check()
def test_getitem():
mapping = [(val, pos) for pos, val in enumerate(string.ascii_lowercase)]
temp = SortedDict(mapping)
assert all((temp[val] == pos) for pos, val in enumerate(string.ascii_lowercase))
def test_eq():
mapping = [(val, pos) for pos, val in enumerate(string.ascii_lowercase)]
temp1 = SortedDict(mapping)
temp2 = SortedDict(mapping)
assert temp1 == temp2
assert not (temp1 != temp2)
temp2['a'] = 100
assert temp1 != temp2
assert not (temp1 == temp2)
del temp2['a']
assert temp1 != temp2
assert not (temp1 == temp2)
temp2['zz'] = 0
assert temp1 != temp2
assert not (temp1 == temp2)
def test_iter():
mapping = [(val, pos) for pos, val in enumerate(string.ascii_lowercase)]
temp = SortedDict(mapping)
assert all(lhs == rhs for lhs, rhs in zip(temp, string.ascii_lowercase))
def test_iter_key():
temp = SortedDict(negate, ((val, val) for val in range(100)))
temp._reset(7)
assert all(lhs == rhs for lhs, rhs in zip(temp, reversed(range(100))))
def test_reversed():
mapping = [(val, pos) for pos, val in enumerate(string.ascii_lowercase)]
temp = SortedDict(mapping)
assert all(lhs == rhs for lhs, rhs in
zip(reversed(temp), reversed(string.ascii_lowercase)))
def test_reversed_key():
temp = SortedDict(modulo, ((val, val) for val in range(100)))
temp._reset(7)
values = sorted(range(100), key=modulo)
assert all(lhs == rhs for lhs, rhs in zip(reversed(temp), reversed(values)))
def test_islice():
mapping = [(val, pos) for pos, val in enumerate(string.ascii_lowercase)]
temp = SortedDict(mapping)
temp._reset(7)
for start in range(30):
for stop in range(30):
assert list(temp.islice(start, stop)) == list(string.ascii_lowercase[start:stop])
def test_irange():
mapping = [(val, pos) for pos, val in enumerate(string.ascii_lowercase)]
temp = SortedDict(mapping)
temp._reset(7)
for start in range(26):
for stop in range(start + 1, 26):
result = list(string.ascii_lowercase[start:stop])
assert list(temp.irange(result[0], result[-1])) == result
def test_irange_key():
temp = SortedDict(modulo, ((val, val) for val in range(100)))
temp._reset(7)
values = sorted(range(100), key=modulo)
for start in range(10):
for stop in range(start, 10):
result = list(temp.irange_key(start, stop))
assert result == values[(start * 10):((stop + 1) * 10)]
def test_len():
mapping = [(val, pos) for pos, val in enumerate(string.ascii_lowercase)]
temp = SortedDict(mapping)
assert len(temp) == 26
def test_setitem():
temp = SortedDict()
for pos, key in enumerate(string.ascii_lowercase):
temp[key] = pos
temp._check()
assert len(temp) == 26
for pos, key in enumerate(string.ascii_lowercase):
temp[key] = pos
temp._check()
assert len(temp) == 26
def test_copy():
mapping = [(val, pos) for pos, val in enumerate(string.ascii_lowercase)]
temp = SortedDict(mapping)
dup = temp.copy()
assert len(temp) == 26
assert len(dup) == 26
dup.clear()
assert len(temp) == 26
assert len(dup) == 0
def test_copy_copy():
import copy
mapping = [(val, pos) for pos, val in enumerate(string.ascii_lowercase)]
temp = SortedDict(mapping)
dup = copy.copy(temp)
assert len(temp) == 26
assert len(dup) == 26
dup.clear()
assert len(temp) == 26
assert len(dup) == 0
def test_fromkeys():
mapping = [(val, pos) for pos, val in enumerate(string.ascii_lowercase)]
temp = SortedDict.fromkeys(mapping, 1)
assert all(temp[key] == 1 for key in temp)
def test_get():
mapping = [(val, pos) for pos, val in enumerate(string.ascii_lowercase)]
temp = SortedDict(mapping)
assert temp.get('a') == 0
assert temp.get('A', -1) == -1
def test_has_key():
if hexversion > 0x03000000:
return
mapping = [(val, pos) for pos, val in enumerate(string.ascii_lowercase)]
temp = SortedDict(mapping)
assert temp.has_key('a')
def test_items():
mapping = [(val, pos) for pos, val in enumerate(string.ascii_lowercase)]
temp = SortedDict(mapping)
assert list(temp.items()) == mapping
def test_keys():
mapping = [(val, pos) for pos, val in enumerate(string.ascii_lowercase)]
temp = SortedDict(mapping)
assert list(temp.keys()) == [key for key, pos in mapping]
def test_values():
mapping = [(val, pos) for pos, val in enumerate(string.ascii_lowercase)]
temp = SortedDict(mapping)
assert list(temp.values()) == [pos for key, pos in mapping]
def test_iterkeys():
temp = SortedDict()
with pytest.raises(AttributeError):
temp.iterkeys
def test_notgiven():
assert repr(SortedDict._SortedDict__not_given) == '<not-given>'
def test_pop():
mapping = [(val, pos) for pos, val in enumerate(string.ascii_lowercase)]
temp = SortedDict(mapping)
assert temp.pop('a') == 0
assert temp.pop('a', -1) == -1
def test_pop2():
mapping = [(val, pos) for pos, val in enumerate(string.ascii_lowercase)]
temp = SortedDict(mapping)
with pytest.raises(KeyError):
temp.pop('A')
def test_popitem():
mapping = [(val, pos) for pos, val in enumerate(string.ascii_lowercase)]
temp = SortedDict(mapping)
assert temp.popitem() == ('z', 25)
def test_popitem2():
temp = SortedDict()
with pytest.raises(KeyError):
temp.popitem()
def test_popitem3():
mapping = [(val, pos) for pos, val in enumerate(string.ascii_lowercase)]
temp = SortedDict(mapping)
assert temp.popitem(index=0) == ('a', 0)
def test_peekitem():
mapping = [(val, pos) for pos, val in enumerate(string.ascii_lowercase)]
temp = SortedDict(mapping)
assert temp.peekitem() == ('z', 25)
assert temp.peekitem(0) == ('a', 0)
assert temp.peekitem(index=4) == ('e', 4)
def test_peekitem2():
mapping = [(val, pos) for pos, val in enumerate(string.ascii_lowercase)]
temp = SortedDict(mapping)
with pytest.raises(IndexError):
temp.peekitem(index=100)
def test_setdefault():
mapping = [(val, pos) for pos, val in enumerate(string.ascii_lowercase)]
temp = SortedDict(mapping)
assert temp.setdefault('a', -1) == 0
assert temp['a'] == 0
assert temp.setdefault('A', -1) == -1
def test_update():
mapping = [(val, pos) for pos, val in enumerate(string.ascii_lowercase)]
temp = SortedDict()
temp.update()
temp.update(mapping)
temp.update(dict(mapping))
temp.update(mapping[5:7])
assert list(temp.items()) == mapping
def test_update2():
mapping = [(val, pos) for pos, val in enumerate(string.ascii_lowercase)]
temp = SortedDict()
temp.update(**dict(mapping))
assert list(temp.items()) == mapping
def test_repr():
temp = SortedDict({'alice': 3, 'bob': 1, 'carol': 2, 'dave': 4})
assert repr(temp) == "SortedDict({'alice': 3, 'bob': 1, 'carol': 2, 'dave': 4})"
class Identity(object):
def __call__(self, value):
return value
def __repr__(self):
return 'identity'
def test_repr_recursion():
temp = SortedDict(Identity(), {'alice': 3, 'bob': 1, 'carol': 2, 'dave': 4})
temp['bob'] = temp
assert repr(temp) == "SortedDict(identity, {'alice': 3, 'bob': ..., 'carol': 2, 'dave': 4})"
def test_repr_subclass():
class CustomSortedDict(SortedDict):
pass
temp = CustomSortedDict({'alice': 3, 'bob': 1, 'carol': 2, 'dave': 4})
assert repr(temp) == "CustomSortedDict({'alice': 3, 'bob': 1, 'carol': 2, 'dave': 4})"
def test_index():
mapping = [(val, pos) for pos, val in enumerate(string.ascii_lowercase)]
temp = SortedDict(mapping)
assert temp.index('a') == 0
assert temp.index('f', 3, -3) == 5
def test_iloc():
mapping = [(val, pos) for pos, val in enumerate(string.ascii_lowercase)]
temp = SortedDict(mapping)
assert len(temp.iloc) == 26
assert temp.iloc[0] == 'a'
assert temp.iloc[-1] == 'z'
assert temp.iloc[-3:] == ['x', 'y', 'z']
del temp.iloc[0]
assert temp.iloc[0] == 'b'
del temp.iloc[-3:]
assert temp.iloc[-1] == 'w'
def test_index_key():
temp = SortedDict(negate, ((val, val) for val in range(100)))
temp._reset(7)
assert all(temp.index(val) == (99 - val) for val in range(100))
def test_bisect():
mapping = [(val, pos) for pos, val in enumerate(string.ascii_lowercase)]
temp = SortedDict(mapping)
assert temp.bisect_left('a') == 0
assert temp.bisect_right('f') == 6
assert temp.bisect('f') == 6
def test_bisect_key():
temp = SortedDict(modulo, ((val, val) for val in range(100)))
temp._reset(7)
assert all(temp.bisect(val) == ((val % 10) + 1) * 10 for val in range(100))
assert all(temp.bisect_right(val) == ((val % 10) + 1) * 10 for val in range(100))
assert all(temp.bisect_left(val) == (val % 10) * 10 for val in range(100))
def test_bisect_key2():
temp = SortedDict(modulo, ((val, val) for val in range(100)))
temp._reset(7)
assert all(temp.bisect_key(val) == ((val % 10) + 1) * 10 for val in range(10))
assert all(temp.bisect_key_right(val) == ((val % 10) + 1) * 10 for val in range(10))
assert all(temp.bisect_key_left(val) == (val % 10) * 10 for val in range(10))
def test_keysview():
mapping = [(val, pos) for pos, val in enumerate(string.ascii_lowercase)]
temp = SortedDict(mapping[:13])
keys = temp.keys()
assert len(keys) == 13
assert 'a' in keys
assert list(keys) == [val for val, pos in mapping[:13]]
assert keys[0] == 'a'
assert list(reversed(keys)) == list(reversed(string.ascii_lowercase[:13]))
assert keys.index('f') == 5
assert keys.count('m') == 1
assert keys.count('0') == 0
assert keys.isdisjoint(['1', '2', '3'])
temp.update(mapping[13:])
assert len(keys) == 26
assert 'z' in keys
assert list(keys) == [val for val, pos in mapping]
that = dict(mapping)
that_keys = get_keysview(that)
assert keys == that_keys
assert not (keys != that_keys)
assert not (keys < that_keys)
assert not (keys > that_keys)
assert keys <= that_keys
assert keys >= that_keys
assert list(keys & that_keys) == [val for val, pos in mapping]
assert list(keys | that_keys) == [val for val, pos in mapping]
assert list(keys - that_keys) == []
assert list(keys ^ that_keys) == []
keys = SortedDict(mapping[:2]).keys()
assert repr(keys) == "SortedKeysView(SortedDict({'a': 0, 'b': 1}))"
def test_valuesview():
mapping = [(val, pos) for pos, val in enumerate(string.ascii_lowercase)]
temp = SortedDict(mapping[:13])
values = temp.values()
assert len(values) == 13
assert 0 in values
assert list(values) == [pos for val, pos in mapping[:13]]
assert values[0] == 0
assert values[-3:] == [10, 11, 12]
assert list(reversed(values)) == list(reversed(range(13)))
assert values.index(5) == 5
assert values.count(10) == 1
temp.update(mapping[13:])
assert len(values) == 26
assert 25 in values
assert list(values) == [pos for val, pos in mapping]
values = SortedDict(mapping[:2]).values()
assert repr(values) == "SortedValuesView(SortedDict({'a': 0, 'b': 1}))"
def test_values_view_index():
mapping = [(val, pos) for pos, val in enumerate(string.ascii_lowercase)]
temp = SortedDict(mapping[:13])
values = temp.values()
with pytest.raises(ValueError):
values.index(100)
def test_itemsview():
mapping = [(val, pos) for pos, val in enumerate(string.ascii_lowercase)]
temp = SortedDict(mapping[:13])
items = temp.items()
assert len(items) == 13
assert ('a', 0) in items
assert list(items) == mapping[:13]
assert items[0] == ('a', 0)
assert items[-3:] == [('k', 10), ('l', 11), ('m', 12)]
assert list(reversed(items)) == list(reversed(mapping[:13]))
assert items.index(('f', 5)) == 5
assert items.count(('m', 12)) == 1
assert items.isdisjoint([('0', 26), ('1', 27)])
assert not items.isdisjoint([('a', 0), ('b', 1)])
temp.update(mapping[13:])
assert len(items) == 26
assert ('z', 25) in items
assert list(items) == mapping
that = dict(mapping)
that_items = get_itemsview(that)
assert items == that_items
assert not (items != that_items)
assert not (items < that_items)
assert not (items > that_items)
assert items <= that_items
assert items >= that_items
assert list(items & that_items) == mapping
assert list(items | that_items) == mapping
assert list(items - that_items) == []
assert list(items ^ that_items) == []
items = SortedDict(mapping[:2]).items()
assert repr(items) == "SortedItemsView(SortedDict({'a': 0, 'b': 1}))"
def test_items_view_index():
mapping = [(val, pos) for pos, val in enumerate(string.ascii_lowercase)]
temp = SortedDict(mapping[:13])
items = temp.items()
with pytest.raises(ValueError):
items.index(('f', 100))
def test_pickle():
import pickle
alpha = SortedDict(negate, zip(range(10000), range(10000)))
alpha._reset(500)
beta = pickle.loads(pickle.dumps(alpha))
assert alpha == beta
assert alpha._key == beta._key
| # -*- coding: utf-8 -*-
import random, string
from .context import sortedcontainers
from sortedcontainers import SortedDict
import pytest
from sys import hexversion
if hexversion < 0x03000000:
range = xrange
def negate(value):
return -value
def modulo(value):
return value % 10
def get_keysview(dic):
if hexversion < 0x03000000:
return dic.viewkeys()
else:
return dic.keys()
def get_itemsview(dic):
if hexversion < 0x03000000:
return dic.viewitems()
else:
return dic.items()
def test_init():
temp = SortedDict()
assert temp.key is None
temp._check()
def test_init_key():
temp = SortedDict(negate)
assert temp.key == negate
temp._check()
def test_init_args():
temp = SortedDict([('a', 1), ('b', 2)])
assert len(temp) == 2
assert temp['a'] == 1
assert temp['b'] == 2
temp._check()
def test_init_kwargs():
temp = SortedDict(a=1, b=2)
assert len(temp) == 2
assert temp['a'] == 1
assert temp['b'] == 2
temp._check()
def test_clear():
mapping = [(val, pos) for pos, val in enumerate(string.ascii_lowercase)]
temp = SortedDict(mapping)
assert len(temp) == 26
assert list(temp.items()) == mapping
temp.clear()
assert len(temp) == 0
def test_contains():
mapping = [(val, pos) for pos, val in enumerate(string.ascii_lowercase)]
temp = SortedDict(mapping)
assert all((val in temp) for val in string.ascii_lowercase)
def test_delitem():
mapping = [(val, pos) for pos, val in enumerate(string.ascii_lowercase)]
temp = SortedDict(mapping)
del temp['a']
temp._check()
def test_getitem():
mapping = [(val, pos) for pos, val in enumerate(string.ascii_lowercase)]
temp = SortedDict(mapping)
assert all((temp[val] == pos) for pos, val in enumerate(string.ascii_lowercase))
def test_eq():
mapping = [(val, pos) for pos, val in enumerate(string.ascii_lowercase)]
temp1 = SortedDict(mapping)
temp2 = SortedDict(mapping)
assert temp1 == temp2
assert not (temp1 != temp2)
temp2['a'] = 100
assert temp1 != temp2
assert not (temp1 == temp2)
del temp2['a']
assert temp1 != temp2
assert not (temp1 == temp2)
temp2['zz'] = 0
assert temp1 != temp2
assert not (temp1 == temp2)
def test_iter():
mapping = [(val, pos) for pos, val in enumerate(string.ascii_lowercase)]
temp = SortedDict(mapping)
assert all(lhs == rhs for lhs, rhs in zip(temp, string.ascii_lowercase))
def test_iter_key():
temp = SortedDict(negate, ((val, val) for val in range(100)))
temp._reset(7)
assert all(lhs == rhs for lhs, rhs in zip(temp, reversed(range(100))))
def test_reversed():
mapping = [(val, pos) for pos, val in enumerate(string.ascii_lowercase)]
temp = SortedDict(mapping)
assert all(lhs == rhs for lhs, rhs in
zip(reversed(temp), reversed(string.ascii_lowercase)))
def test_reversed_key():
temp = SortedDict(modulo, ((val, val) for val in range(100)))
temp._reset(7)
values = sorted(range(100), key=modulo)
assert all(lhs == rhs for lhs, rhs in zip(reversed(temp), reversed(values)))
def test_islice():
mapping = [(val, pos) for pos, val in enumerate(string.ascii_lowercase)]
temp = SortedDict(mapping)
temp._reset(7)
for start in range(30):
for stop in range(30):
assert list(temp.islice(start, stop)) == list(string.ascii_lowercase[start:stop])
def test_irange():
mapping = [(val, pos) for pos, val in enumerate(string.ascii_lowercase)]
temp = SortedDict(mapping)
temp._reset(7)
for start in range(26):
for stop in range(start + 1, 26):
result = list(string.ascii_lowercase[start:stop])
assert list(temp.irange(result[0], result[-1])) == result
def test_irange_key():
temp = SortedDict(modulo, ((val, val) for val in range(100)))
temp._reset(7)
values = sorted(range(100), key=modulo)
for start in range(10):
for stop in range(start, 10):
result = list(temp.irange_key(start, stop))
assert result == values[(start * 10):((stop + 1) * 10)]
def test_len():
mapping = [(val, pos) for pos, val in enumerate(string.ascii_lowercase)]
temp = SortedDict(mapping)
assert len(temp) == 26
def test_setitem():
temp = SortedDict()
for pos, key in enumerate(string.ascii_lowercase):
temp[key] = pos
temp._check()
assert len(temp) == 26
for pos, key in enumerate(string.ascii_lowercase):
temp[key] = pos
temp._check()
assert len(temp) == 26
def test_copy():
mapping = [(val, pos) for pos, val in enumerate(string.ascii_lowercase)]
temp = SortedDict(mapping)
dup = temp.copy()
assert len(temp) == 26
assert len(dup) == 26
dup.clear()
assert len(temp) == 26
assert len(dup) == 0
def test_copy_copy():
import copy
mapping = [(val, pos) for pos, val in enumerate(string.ascii_lowercase)]
temp = SortedDict(mapping)
dup = copy.copy(temp)
assert len(temp) == 26
assert len(dup) == 26
dup.clear()
assert len(temp) == 26
assert len(dup) == 0
def test_fromkeys():
mapping = [(val, pos) for pos, val in enumerate(string.ascii_lowercase)]
temp = SortedDict.fromkeys(mapping, 1)
assert all(temp[key] == 1 for key in temp)
def test_get():
mapping = [(val, pos) for pos, val in enumerate(string.ascii_lowercase)]
temp = SortedDict(mapping)
assert temp.get('a') == 0
assert temp.get('A', -1) == -1
def test_has_key():
if hexversion > 0x03000000:
return
mapping = [(val, pos) for pos, val in enumerate(string.ascii_lowercase)]
temp = SortedDict(mapping)
assert temp.has_key('a')
def test_items():
mapping = [(val, pos) for pos, val in enumerate(string.ascii_lowercase)]
temp = SortedDict(mapping)
assert list(temp.items()) == mapping
def test_keys():
mapping = [(val, pos) for pos, val in enumerate(string.ascii_lowercase)]
temp = SortedDict(mapping)
assert list(temp.keys()) == [key for key, pos in mapping]
def test_values():
mapping = [(val, pos) for pos, val in enumerate(string.ascii_lowercase)]
temp = SortedDict(mapping)
assert list(temp.values()) == [pos for key, pos in mapping]
def test_iterkeys():
temp = SortedDict()
with pytest.raises(AttributeError):
temp.iterkeys
def test_notgiven():
assert repr(SortedDict._SortedDict__not_given) == '<not-given>'
def test_pop():
mapping = [(val, pos) for pos, val in enumerate(string.ascii_lowercase)]
temp = SortedDict(mapping)
assert temp.pop('a') == 0
assert temp.pop('a', -1) == -1
def test_pop2():
mapping = [(val, pos) for pos, val in enumerate(string.ascii_lowercase)]
temp = SortedDict(mapping)
with pytest.raises(KeyError):
temp.pop('A')
def test_popitem():
mapping = [(val, pos) for pos, val in enumerate(string.ascii_lowercase)]
temp = SortedDict(mapping)
assert temp.popitem() == ('z', 25)
def test_popitem2():
temp = SortedDict()
with pytest.raises(KeyError):
temp.popitem()
def test_popitem3():
mapping = [(val, pos) for pos, val in enumerate(string.ascii_lowercase)]
temp = SortedDict(mapping)
assert temp.popitem(index=0) == ('a', 0)
def test_peekitem():
mapping = [(val, pos) for pos, val in enumerate(string.ascii_lowercase)]
temp = SortedDict(mapping)
assert temp.peekitem() == ('z', 25)
assert temp.peekitem(0) == ('a', 0)
assert temp.peekitem(index=4) == ('e', 4)
def test_peekitem2():
mapping = [(val, pos) for pos, val in enumerate(string.ascii_lowercase)]
temp = SortedDict(mapping)
with pytest.raises(IndexError):
temp.peekitem(index=100)
def test_setdefault():
mapping = [(val, pos) for pos, val in enumerate(string.ascii_lowercase)]
temp = SortedDict(mapping)
assert temp.setdefault('a', -1) == 0
assert temp['a'] == 0
assert temp.setdefault('A', -1) == -1
def test_update():
mapping = [(val, pos) for pos, val in enumerate(string.ascii_lowercase)]
temp = SortedDict()
temp.update()
temp.update(mapping)
temp.update(dict(mapping))
temp.update(mapping[5:7])
assert list(temp.items()) == mapping
def test_update2():
mapping = [(val, pos) for pos, val in enumerate(string.ascii_lowercase)]
temp = SortedDict()
temp.update(**dict(mapping))
assert list(temp.items()) == mapping
def test_repr():
temp = SortedDict({'alice': 3, 'bob': 1, 'carol': 2, 'dave': 4})
assert repr(temp) == "SortedDict({'alice': 3, 'bob': 1, 'carol': 2, 'dave': 4})"
class Identity(object):
def __call__(self, value):
return value
def __repr__(self):
return 'identity'
def test_repr_recursion():
temp = SortedDict(Identity(), {'alice': 3, 'bob': 1, 'carol': 2, 'dave': 4})
temp['bob'] = temp
assert repr(temp) == "SortedDict(identity, {'alice': 3, 'bob': ..., 'carol': 2, 'dave': 4})"
def test_repr_subclass():
class CustomSortedDict(SortedDict):
pass
temp = CustomSortedDict({'alice': 3, 'bob': 1, 'carol': 2, 'dave': 4})
assert repr(temp) == "CustomSortedDict({'alice': 3, 'bob': 1, 'carol': 2, 'dave': 4})"
def test_index():
mapping = [(val, pos) for pos, val in enumerate(string.ascii_lowercase)]
temp = SortedDict(mapping)
assert temp.index('a') == 0
assert temp.index('f', 3, -3) == 5
def test_iloc():
mapping = [(val, pos) for pos, val in enumerate(string.ascii_lowercase)]
temp = SortedDict(mapping)
assert len(temp.iloc) == 26
assert temp.iloc[0] == 'a'
assert temp.iloc[-1] == 'z'
assert temp.iloc[-3:] == ['x', 'y', 'z']
del temp.iloc[0]
assert temp.iloc[0] == 'b'
del temp.iloc[-3:]
assert temp.iloc[-1] == 'w'
def test_index_key():
temp = SortedDict(negate, ((val, val) for val in range(100)))
temp._reset(7)
assert all(temp.index(val) == (99 - val) for val in range(100))
def test_bisect():
mapping = [(val, pos) for pos, val in enumerate(string.ascii_lowercase)]
temp = SortedDict(mapping)
assert temp.bisect_left('a') == 0
assert temp.bisect_right('f') == 6
assert temp.bisect('f') == 6
def test_bisect_key():
temp = SortedDict(modulo, ((val, val) for val in range(100)))
temp._reset(7)
assert all(temp.bisect(val) == ((val % 10) + 1) * 10 for val in range(100))
assert all(temp.bisect_right(val) == ((val % 10) + 1) * 10 for val in range(100))
assert all(temp.bisect_left(val) == (val % 10) * 10 for val in range(100))
def test_bisect_key2():
temp = SortedDict(modulo, ((val, val) for val in range(100)))
temp._reset(7)
assert all(temp.bisect_key(val) == ((val % 10) + 1) * 10 for val in range(10))
assert all(temp.bisect_key_right(val) == ((val % 10) + 1) * 10 for val in range(10))
assert all(temp.bisect_key_left(val) == (val % 10) * 10 for val in range(10))
def test_keysview():
mapping = [(val, pos) for pos, val in enumerate(string.ascii_lowercase)]
temp = SortedDict(mapping[:13])
keys = temp.keys()
assert len(keys) == 13
assert 'a' in keys
assert list(keys) == [val for val, pos in mapping[:13]]
assert keys[0] == 'a'
assert list(reversed(keys)) == list(reversed(string.ascii_lowercase[:13]))
assert keys.index('f') == 5
assert keys.count('m') == 1
assert keys.count('0') == 0
assert keys.isdisjoint(['1', '2', '3'])
temp.update(mapping[13:])
assert len(keys) == 26
assert 'z' in keys
assert list(keys) == [val for val, pos in mapping]
that = dict(mapping)
that_keys = get_keysview(that)
assert keys == that_keys
assert not (keys != that_keys)
assert not (keys < that_keys)
assert not (keys > that_keys)
assert keys <= that_keys
assert keys >= that_keys
assert list(keys & that_keys) == [val for val, pos in mapping]
assert list(keys | that_keys) == [val for val, pos in mapping]
assert list(keys - that_keys) == []
assert list(keys ^ that_keys) == []
keys = SortedDict(mapping[:2]).keys()
assert repr(keys) == "SortedKeysView(SortedDict({'a': 0, 'b': 1}))"
def test_valuesview():
mapping = [(val, pos) for pos, val in enumerate(string.ascii_lowercase)]
temp = SortedDict(mapping[:13])
values = temp.values()
assert len(values) == 13
assert 0 in values
assert list(values) == [pos for val, pos in mapping[:13]]
assert values[0] == 0
assert values[-3:] == [10, 11, 12]
assert list(reversed(values)) == list(reversed(range(13)))
assert values.index(5) == 5
assert values.count(10) == 1
temp.update(mapping[13:])
assert len(values) == 26
assert 25 in values
assert list(values) == [pos for val, pos in mapping]
values = SortedDict(mapping[:2]).values()
assert repr(values) == "SortedValuesView(SortedDict({'a': 0, 'b': 1}))"
def test_values_view_index():
mapping = [(val, pos) for pos, val in enumerate(string.ascii_lowercase)]
temp = SortedDict(mapping[:13])
values = temp.values()
with pytest.raises(ValueError):
values.index(100)
def test_itemsview():
mapping = [(val, pos) for pos, val in enumerate(string.ascii_lowercase)]
temp = SortedDict(mapping[:13])
items = temp.items()
assert len(items) == 13
assert ('a', 0) in items
assert list(items) == mapping[:13]
assert items[0] == ('a', 0)
assert items[-3:] == [('k', 10), ('l', 11), ('m', 12)]
assert list(reversed(items)) == list(reversed(mapping[:13]))
assert items.index(('f', 5)) == 5
assert items.count(('m', 12)) == 1
assert items.isdisjoint([('0', 26), ('1', 27)])
assert not items.isdisjoint([('a', 0), ('b', 1)])
temp.update(mapping[13:])
assert len(items) == 26
assert ('z', 25) in items
assert list(items) == mapping
that = dict(mapping)
that_items = get_itemsview(that)
assert items == that_items
assert not (items != that_items)
assert not (items < that_items)
assert not (items > that_items)
assert items <= that_items
assert items >= that_items
assert list(items & that_items) == mapping
assert list(items | that_items) == mapping
assert list(items - that_items) == []
assert list(items ^ that_items) == []
items = SortedDict(mapping[:2]).items()
assert repr(items) == "SortedItemsView(SortedDict({'a': 0, 'b': 1}))"
def test_items_view_index():
mapping = [(val, pos) for pos, val in enumerate(string.ascii_lowercase)]
temp = SortedDict(mapping[:13])
items = temp.items()
with pytest.raises(ValueError):
items.index(('f', 100))
def test_pickle():
import pickle
alpha = SortedDict(negate, zip(range(10000), range(10000)))
alpha._reset(500)
beta = pickle.loads(pickle.dumps(alpha))
assert alpha == beta
assert alpha._key == beta._key | en | 0.769321 | # -*- coding: utf-8 -*- | 2.423865 | 2 |
move.py | martinogden/chessval | 0 | 6632740 | <reponame>martinogden/chessval<filename>move.py
class flags(object):
DPUSH = 0x01
KCASTLE = 0x02
QCASTLE = 0x04
CAPTURE = 0x08
EP = 0x10
def new(frm, to, cpiece=-1, flags=0x00, cr=0X0F, promotion=None):
return (frm, to, cpiece, flags, cr, promotion)
| class flags(object):
DPUSH = 0x01
KCASTLE = 0x02
QCASTLE = 0x04
CAPTURE = 0x08
EP = 0x10
def new(frm, to, cpiece=-1, flags=0x00, cr=0X0F, promotion=None):
return (frm, to, cpiece, flags, cr, promotion) | none | 1 | 2.300516 | 2 |
|
modeling/model_utils/unet_parts.py | UESTC-Liuxin/SkmtSeg | 2 | 6632741 | """ Parts of the U-Net model """
import torch
import torch.nn as nn
import torch.nn.functional as F
from utils.utils import init_weights
class DoubleConv(nn.Module):
"""(convolution => [BN] => ReLU) * 2"""
def __init__(self, in_channels, out_channels, BatchNorm,mid_channels=None):
super().__init__()
if not mid_channels:
mid_channels = out_channels
self.double_conv = nn.Sequential(
nn.Conv2d(in_channels, mid_channels, kernel_size=3, padding=1),
BatchNorm(mid_channels),
nn.ReLU(inplace=True),
nn.Conv2d(mid_channels, out_channels, kernel_size=3, padding=1),
BatchNorm(out_channels),
nn.ReLU(inplace=True)
)
def forward(self, x):
return self.double_conv(x)
class Up(nn.Module):
"""Upscaling then double conv"""
def __init__(self, in_channels, out_channels,BatchNorm):
super().__init__()
self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
self.conv = DoubleConv(in_channels, out_channels, BatchNorm, in_channels // 2)
def forward(self, x1, x2):
x1 = self.up(x1)
# input is CHW
diffY = x2.size()[2] - x1.size()[2]
diffX = x2.size()[3] - x1.size()[3]
x1 = F.pad(x1, [diffX // 2, diffX - diffX // 2,
diffY // 2, diffY - diffY // 2])
x = torch.cat([x2, x1], dim=1)
return self.conv(x)
class OutConv(nn.Module):
def __init__(self, in_channels, out_channels):
super(OutConv, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=1)
def forward(self, x):
return self.conv(x)
class unetConv2(nn.Module):
def __init__(self, in_size, out_size, is_batchnorm, n=2, ks=3, stride=1, padding=1):
super(unetConv2, self).__init__()
self.n = n
self.ks = ks
self.stride = stride
self.padding = padding
s = stride
p = padding
if is_batchnorm:
for i in range(1, n + 1):
conv = nn.Sequential(nn.Conv2d(in_size, out_size, ks, s, p),
nn.BatchNorm2d(out_size),
nn.ReLU(inplace=True), )
setattr(self, 'conv%d' % i, conv)
in_size = out_size
else:
for i in range(1, n + 1):
conv = nn.Sequential(nn.Conv2d(in_size, out_size, ks, s, p),
nn.ReLU(inplace=True), )
setattr(self, 'conv%d' % i, conv)
in_size = out_size
# initialise the blocks
for m in self.children():
init_weights(m, init_type='kaiming')
def forward(self, inputs):
x = inputs
for i in range(1, self.n + 1):
conv = getattr(self, 'conv%d' % i)
x = conv(x)
return x
class unetUp(nn.Module):
def __init__(self, in_size, out_size, is_deconv, n_concat=2):
super(unetUp, self).__init__()
self.conv = unetConv2(in_size + (n_concat - 2) * out_size, out_size, False)
if is_deconv:
self.up = nn.ConvTranspose2d(in_size, out_size, kernel_size=2, stride=2, padding=0)
else:
self.up = nn.Sequential(
nn.UpsamplingBilinear2d(scale_factor=2),
nn.Conv2d(in_size, out_size, 1))
# initialise the blocks
for m in self.children():
if m.__class__.__name__.find('unetConv2') != -1: continue
init_weights(m, init_type='kaiming')
def forward(self, high_feature, *low_feature):
outputs0 = self.up(high_feature)
for feature in low_feature:
outputs0 = torch.cat([outputs0, feature], 1)
return self.conv(outputs0) | """ Parts of the U-Net model """
import torch
import torch.nn as nn
import torch.nn.functional as F
from utils.utils import init_weights
class DoubleConv(nn.Module):
"""(convolution => [BN] => ReLU) * 2"""
def __init__(self, in_channels, out_channels, BatchNorm,mid_channels=None):
super().__init__()
if not mid_channels:
mid_channels = out_channels
self.double_conv = nn.Sequential(
nn.Conv2d(in_channels, mid_channels, kernel_size=3, padding=1),
BatchNorm(mid_channels),
nn.ReLU(inplace=True),
nn.Conv2d(mid_channels, out_channels, kernel_size=3, padding=1),
BatchNorm(out_channels),
nn.ReLU(inplace=True)
)
def forward(self, x):
return self.double_conv(x)
class Up(nn.Module):
"""Upscaling then double conv"""
def __init__(self, in_channels, out_channels,BatchNorm):
super().__init__()
self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
self.conv = DoubleConv(in_channels, out_channels, BatchNorm, in_channels // 2)
def forward(self, x1, x2):
x1 = self.up(x1)
# input is CHW
diffY = x2.size()[2] - x1.size()[2]
diffX = x2.size()[3] - x1.size()[3]
x1 = F.pad(x1, [diffX // 2, diffX - diffX // 2,
diffY // 2, diffY - diffY // 2])
x = torch.cat([x2, x1], dim=1)
return self.conv(x)
class OutConv(nn.Module):
def __init__(self, in_channels, out_channels):
super(OutConv, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=1)
def forward(self, x):
return self.conv(x)
class unetConv2(nn.Module):
def __init__(self, in_size, out_size, is_batchnorm, n=2, ks=3, stride=1, padding=1):
super(unetConv2, self).__init__()
self.n = n
self.ks = ks
self.stride = stride
self.padding = padding
s = stride
p = padding
if is_batchnorm:
for i in range(1, n + 1):
conv = nn.Sequential(nn.Conv2d(in_size, out_size, ks, s, p),
nn.BatchNorm2d(out_size),
nn.ReLU(inplace=True), )
setattr(self, 'conv%d' % i, conv)
in_size = out_size
else:
for i in range(1, n + 1):
conv = nn.Sequential(nn.Conv2d(in_size, out_size, ks, s, p),
nn.ReLU(inplace=True), )
setattr(self, 'conv%d' % i, conv)
in_size = out_size
# initialise the blocks
for m in self.children():
init_weights(m, init_type='kaiming')
def forward(self, inputs):
x = inputs
for i in range(1, self.n + 1):
conv = getattr(self, 'conv%d' % i)
x = conv(x)
return x
class unetUp(nn.Module):
def __init__(self, in_size, out_size, is_deconv, n_concat=2):
super(unetUp, self).__init__()
self.conv = unetConv2(in_size + (n_concat - 2) * out_size, out_size, False)
if is_deconv:
self.up = nn.ConvTranspose2d(in_size, out_size, kernel_size=2, stride=2, padding=0)
else:
self.up = nn.Sequential(
nn.UpsamplingBilinear2d(scale_factor=2),
nn.Conv2d(in_size, out_size, 1))
# initialise the blocks
for m in self.children():
if m.__class__.__name__.find('unetConv2') != -1: continue
init_weights(m, init_type='kaiming')
def forward(self, high_feature, *low_feature):
outputs0 = self.up(high_feature)
for feature in low_feature:
outputs0 = torch.cat([outputs0, feature], 1)
return self.conv(outputs0) | en | 0.584623 | Parts of the U-Net model (convolution => [BN] => ReLU) * 2 Upscaling then double conv # input is CHW # initialise the blocks # initialise the blocks | 2.786952 | 3 |
scripts/previousScripts-2015-12-25/approxMC_boolector_bvurem.py | mistryrakesh/SMTApproxMC | 0 | 6632742 | #!/home/rakeshmistry/bin/Python-3.4.3/bin/python3
# @author: <NAME> - 'inspire'
# @date: 2015-06-14
import sys
import re
import os
import math
import random
import functools
import collections
import numpy
################################################################################
# Functions to generate SMT2 expressions
def extractExpr(m, n, var):
return "((_ extract " + str(m) + " " + str(n) + ") " + str(var) + ")"
def xorExpr(var1, var2):
return "(xor " + str(var1) + " " + str(var2) + ")"
def zeroExtendExpr(bitWidth, varName):
return "((_ zero_extend " + str(bitWidth) + ") " + varName + ")"
def bvmulExpr(var1, var2):
return "(bvmul " + str(var1) + " " + str(var2) + ")"
def bvaddExpr(var1, var2):
return "(bvadd " + str(var1) + " " + str(var2) + ")"
def eqExpr(var1, var2):
return "(= " + str(var1) + " " + str(var2) + ")"
def constExpr(num, bitWidth):
return "(_ bv" + str(num) + " " + str(bitWidth) + ")"
def andExpr(var1, var2):
return "(and " + str(var1) + " " + str(var2) + ")"
def bvuremExpr(var1, var2):
return "(bvurem " + str(var1) + " " + str(var2) + ")"
################################################################################
# Function: populatePrimesMap
# @param: primesMap - file containing primes.
# Each line is of the form -- k prime
# where for every number 'k' the value of 'prime' is smallest prime > 2^k
#
# returns map of prime numbers for 2^k (1 <= k <= 100)
def populatePrimesMap(primesFile):
primesMap = {}
for line in primesFile:
strList = line.split()
k = int(strList[0])
primesMap[k] = int(strList[1])
return primesMap
# Function: populatePrimesMap
# @param: primesMap - file containing primes.
# Each line is of the form -- k prime
# where for every number 'k' the value of 'prime' is smallest prime > 2^k
#
# returns map of prime numbers for 2^k (1 <= k <= 100)
def populateEpsilonMap(probFile):
epsilonMap = {}
for line in probFile:
strList = line.rstrip().split(":")
k = int(strList[0])
epsilonMap[k] = float(strList[1])
return epsilonMap
# Function: computeNewBitwidth
def computeNewBitwidth(k, slices, varMap):
totalBitwidth = 0
for key in varMap.keys():
totalBitwidth += math.ceil(float(varMap[key]) / k)
newBitwidth = k + int(math.ceil(math.log(slices * totalBitwidth, 2))) + 1 # +1 since 's' can be upto 'prime-1'
return newBitwidth
# Function: generateEquationConstraint
# @param: varmap - a map of variables with key as variable name and value being
# its width
# @param: maxBitwidth - maximum bitwidth
# @param: slices - number of slices for each variable to create
#
# Generates an equation of the form:
# a1x1 + a2x2 + ... = s*prime + r
def generateEquationConstraint(varMap, primesMap, maxBitwidth, slices):
generateEquationConstraint.counter += 1
k = int(math.ceil(float(maxBitwidth) / slices))
twoPowerK = 2 ** k
prime = primesMap[k]
newBitwidth = computeNewBitwidth(k, slices, varMap)
primeCoeff = "temp_prime_coeff_" + str(generateEquationConstraint.counter)
primeCoeffDecl = "(declare-fun " + primeCoeff + " () (_ BitVec " + str(newBitwidth - (k + 1)) + "))\n"
bvmulList = []
for key in varMap.keys():
originalKey = key
if varMap[key] != maxBitwidth:
key = zeroExtendExpr(maxBitwidth - varMap[key], key)
assert maxBitwidth >= slices
# find slice widths of variable
keyDivWidth = int(maxBitwidth / slices)
bitRemaining = maxBitwidth % slices
# list containing width of each variable slice
keyDivWidthList = [keyDivWidth] * slices
for i in range(bitRemaining):
keyDivWidthList[i] += 1
coeff = []
for i in range(slices):
coeff.append(random.randint(0, twoPowerK - 1))
keyDivs = []
msbPos = maxBitwidth - 1
remSlices = 0
for i in range(slices):
lsbPos = msbPos - keyDivWidthList[i] + 1
if lsbPos < varMap[originalKey]:
keyDivs.append(extractExpr(msbPos, lsbPos, key))
remSlices += 1
msbPos = msbPos - keyDivWidthList[i]
zxtndKeyDivs = []
for i in range(remSlices):
zxtndKeyDivs.append(zeroExtendExpr(newBitwidth - keyDivWidthList[i], keyDivs[i]))
bvmulStrs = []
for i in range(remSlices):
bvmulList.append(bvmulExpr(constExpr(coeff[i], newBitwidth), zxtndKeyDivs[i]))
lhsStr = functools.reduce(lambda x, y: bvaddExpr(x, y), bvmulList)
lhsStr = bvuremExpr(lhsStr, constExpr(prime, newBitwidth))
r = random.randint(0, prime - 1)
rhsStr = constExpr(r, newBitwidth)
constraint = eqExpr(lhsStr, rhsStr)
return constraint, primeCoeffDecl, prime
# Function: parseSmt2File
# @param: smt2File - input SMT2 file
# @return: varmap - a map containing as key the names of the variables and value as their bitwidth
# @return: smtFilePrefix - string containing the initial part of smt2File (until start of 'assert' in 'smt2File')
#
# Creates variable map and also copies the initial part of SMT2 file (until start of 'assert' in 'smt2File')
# This would later be appended with our constraints to create the new SMT2 file
def parseSmt2FileVariables(smt2File):
# create regex to specific lines
compiledVarPattern = re.compile("[ \t]*\(declare-fun")
compiledAssertPattern = re.compile("assert")
# read variable info in map
varMap = {}
assertLine = ""
scriptName = os.path.basename(__file__)
smtFilePrefix = "; [" + scriptName + "] Autogenerated from source file: " + smt2File.name + "\n"
for line in smt2File:
if compiledAssertPattern.search(line):
assertLine = line
break;
smtFilePrefix += line
if compiledVarPattern.search(line):
wordList = line.split()
varName = wordList[1]
varWidthStr = wordList[-1].rstrip(")")
if varWidthStr.isdigit():
varWidth = int(varWidthStr)
varMap[varName] = varWidth
return varMap, smtFilePrefix, assertLine
# Function: parseSmt2File
# @param: smt2File - input SMT2 file
# @param: newConstraints - string which is a SMT2 constraint
# @return: smtFileSuffix - string containing our constraints followed by rest of input file
#
# returns a string after our adding our constraints to the rest of the input file
def parseSmt2FileSuffix(smt2File, newConstraints):
compiledCheckSatPattern = re.compile("check-sat")
smtFileSuffix = ""
for line in smt2File:
if compiledCheckSatPattern.search(line):
smtFileSuffix += "(assert"
smtFileSuffix += " " + newConstraints + ")\n"
smtFileSuffix += line
break
smtFileSuffix += line
# write everything after '(check-sat)'
for line in smt2File:
smtFileSuffix += line
return smtFileSuffix
# Function: generateSMT2FileFromConstraints
def generateSMT2FileFromConstraints(smt2prefix, coeffDeclList, lastAssertLine, constraintList, smt2suffix, tempFileName):
outputSMT2File = open(tempFileName, "w")
outputSMT2File.write(smt2prefix)
for decl in coeffDeclList:
outputSMT2File.write(decl)
outputSMT2File.write("(assert")
strConstraints = functools.reduce(lambda x, y: andExpr(x, y), constraintList)
outputSMT2File.write(strConstraints)
outputSMT2File.write(")\n")
outputSMT2File.write(lastAssertLine)
outputSMT2File.write(smt2suffix)
outputSMT2File.close()
# Function: generateSMT1FromSMT2File
def generateSMT1FromSMT2File(smt2FileName, smt1FileName):
cmd = "boolector -ds1 -o " + smt1FileName + " " + smt2FileName
return os.system(cmd)
# Funtion: countSolutions
def countSolutions(smtResultsFileName):
smtResultsFile = open(smtResultsFileName, "r")
count = 0
for line in smtResultsFile:
if line == "sat\n":
count += 1
return count
def getCommonPrimesAndMedian(runResults, logFile):
commonPrimes = runResults[0][1]
for i in range(1, len(runResults)):
commonPrimes = commonPrimes & runResults[i][1]
logFile.write("commomPrimes: " + str(list(commonPrimes.elements())) + "\n")
valList = []
for i in range(len(runResults)):
# subResult = runResults[i][1].subtract(commonPrimes)
subResult = runResults[i][1] - commonPrimes
if subResult == None:
valList.append(runResults[i][0])
else:
prod = 1
for key in subResult:
prod = prod * (key ** subResult[key])
valList.append(prod * runResults[i][0])
logFile.write("valList: " + str(valList) + "\n")
return (list(commonPrimes.elements()), numpy.median(valList))
# Function: main
def main(argv):
generateEquationConstraint.counter = 0;
# check for correct number of arguments
scriptName = os.path.basename(__file__)
if len(argv) < 6:
sys.stderr.write("Error: Invalid arguments.\n")
sys.stderr.write(" [Usage]: " + scriptName + " <input_SMT2_file> <primes_file> <num_iterations> <log_file> <output_file>\n")
sys.exit(1)
# open files
inputSMTFile = open(argv[1], "r")
primesFile = open(argv[2], "r")
numIterations = int(argv[3])
logFile = open(argv[4], "w", 1)
finalOutputFile = open(argv[5], "w", 1)
# probMapFile = open(argv[3], "r")
primesMap = populatePrimesMap(primesFile)
# epsilonMap = populateEpsilonMap(probMapFile)
(varMap, smt2prefix, lastAssertLine) = parseSmt2FileVariables(inputSMTFile)
smt2suffix = parseSmt2FileSuffix(inputSMTFile, "true")
maxBitwidth = max(varMap.values())
# print("maxBitwidth: " + str(maxBitwidth))
# find pivot solutions
tempDir = os.getcwd() + "/temp_amc"
smtSolver = os.path.dirname(os.path.realpath(__file__)) + "/../boolector-mc/boolector/boolector"
if not os.path.exists(tempDir):
os.makedirs(tempDir)
timeout = 2400
minPivot = 1
epsilon = 0.8 # epsilonMap[maxBitwidth]
maxPivot = int(2*math.ceil(4.94*(1+1/epsilon)*(1+1/epsilon)))
# print("maxPivot: " + str(maxPivot))
scriptStartTime = os.times()
logFile.write("Script start time: " + str(scriptStartTime) + "\n")
logFile.write("maxBitwidth: " + str(maxBitwidth) + "\n")
logFile.write("Epsilon: " + str(epsilon) + "\n")
logFile.write("maxPivot: " + str(maxPivot) + "\n")
iterationRunResults = []
timedOutRuns = set()
for i in range(numIterations):
tempSMT2FileName = tempDir + "/temp_" + str(i) + ".smt2"
tempOutputFile = tempDir + "/solverResults_" + str(i) + ".txt"
tempErrorFile = tempDir + "/solverErrors_" + str(i) + ".txt"
tempSMT1FileName = tempDir + "/temp_" + str(i) + ".smt1"
slices = 2
logFile.write("\n\n################################################################################\n")
logFile.write("Iteration: " + str(i) + "\n")
constraintList = []
coeffDeclList = []
primeList = []
(constraint, coeffDecl, prime) = generateEquationConstraint(varMap, primesMap, maxBitwidth, slices)
constraintList.append(constraint)
coeffDeclList.append(coeffDecl)
primeList.append(prime)
innerLoopRun = 0
while True:
logFile.write("\n----\n")
logFile.write("innerLoopRun: " + str(innerLoopRun) + "\n")
innerLoopRun += 1
generateSMT2FileFromConstraints(smt2prefix, coeffDeclList, lastAssertLine, constraintList, smt2suffix, tempSMT2FileName)
conversionResult = generateSMT1FromSMT2File(tempSMT2FileName, tempSMT1FileName)
if conversionResult != 0:
sys.stderr.write("Error while converting from SMT2 File to SMT1 file. Aborting ...\n")
logFile.write("Error while converting from SMT2 File to SMT1 file. Aborting ...\n")
logFile.close()
exit(1)
cmd = "doalarm -t profile " + str(timeout) + " " + smtSolver + " -i -m --maxsolutions=" + str(maxPivot) + " " + tempSMT1FileName + " >" + tempOutputFile + " 2>>" + tempErrorFile;
logFile.write("cmd: " + cmd + "\n")
startTime = os.times()
os.system(cmd)
endTime = os.times()
logFile.write("startTime: " + str(startTime) + "\n")
logFile.write("endTime: " + str(endTime) + "\n")
logFile.write("cmd time: " + str(endTime.elapsed - startTime.elapsed) + "\n")
hasTimedOut = False
if (endTime.elapsed - startTime.elapsed) > (timeout - 10):
hasTimedOut = True
numSolutions = countSolutions(tempOutputFile)
logFile.write("numConstraints: " + str(len(constraintList)) + ", slices: " + str(slices) + ", numSolutions: " + str(numSolutions) + ", hasTimedOut: " + str(hasTimedOut) + "\n")
if numSolutions >= maxPivot:
(constraint, coeffDecl, prime) = generateEquationConstraint(varMap, primesMap, maxBitwidth, slices)
constraintList.append(constraint)
coeffDeclList.append(coeffDecl)
primeList.append(prime)
elif numSolutions >= minPivot and not hasTimedOut:
break;
elif numSolutions >= 0:
constraintList.pop()
coeffDeclList.pop()
primeList.pop()
if (slices >= maxBitwidth):
if hasTimedOut:
timedOutRuns.add(i)
# logFile.write("hasTimedOut after adding last constraint: " + str(hasTimedOut) + "\n")
break
slices = (slices * 2) if (slices * 2) < maxBitwidth else maxBitwidth;
(constraint, coeffDecl, prime) = generateEquationConstraint(varMap, primesMap, maxBitwidth, slices)
constraintList.append(constraint)
coeffDeclList.append(coeffDecl)
primeList.append(prime)
logFile.flush()
# raw_input("Press Enter to continue...")
iterationRunResults.append((numSolutions, collections.Counter(primeList)))
scriptEndTime = os.times()
logFile.write("Script end time: " + str(scriptEndTime) + "\n")
logFile.write("Total script time: " + str(scriptEndTime.children_user + scriptEndTime.children_system - scriptStartTime.children_user - scriptStartTime.children_system) + "\n")
logFile.write("iterationRunResults: " + str(iterationRunResults) + "\n")
(commonPrimes, med) = getCommonPrimesAndMedian(iterationRunResults, logFile)
logFile.write("commonPrimes: " + str(commonPrimes) + ", median: " + str(med) + ", ")
finalOutputFile.write(str(maxBitwidth) + ";")
for primes in commonPrimes:
finalOutputFile.write(str(primes) + ",")
finalOutputFile.write(";" + str(med) + ";")
finalOutputFile.write(str(scriptEndTime.children_user + scriptEndTime.children_system - scriptStartTime.children_user - scriptStartTime.children_system) + ";")
finalOutputFile.write(str(len(timedOutRuns)) + ";")
logFile.write("Timedout in runs: " + str(timedOutRuns) + ";")
finalOutputFile.write("Timedout in runs: " + str(timedOutRuns))
finalOutputFile.close()
logFile.close()
if __name__ == "__main__":
main(sys.argv)
| #!/home/rakeshmistry/bin/Python-3.4.3/bin/python3
# @author: <NAME> - 'inspire'
# @date: 2015-06-14
import sys
import re
import os
import math
import random
import functools
import collections
import numpy
################################################################################
# Functions to generate SMT2 expressions
def extractExpr(m, n, var):
return "((_ extract " + str(m) + " " + str(n) + ") " + str(var) + ")"
def xorExpr(var1, var2):
return "(xor " + str(var1) + " " + str(var2) + ")"
def zeroExtendExpr(bitWidth, varName):
return "((_ zero_extend " + str(bitWidth) + ") " + varName + ")"
def bvmulExpr(var1, var2):
return "(bvmul " + str(var1) + " " + str(var2) + ")"
def bvaddExpr(var1, var2):
return "(bvadd " + str(var1) + " " + str(var2) + ")"
def eqExpr(var1, var2):
return "(= " + str(var1) + " " + str(var2) + ")"
def constExpr(num, bitWidth):
return "(_ bv" + str(num) + " " + str(bitWidth) + ")"
def andExpr(var1, var2):
return "(and " + str(var1) + " " + str(var2) + ")"
def bvuremExpr(var1, var2):
return "(bvurem " + str(var1) + " " + str(var2) + ")"
################################################################################
# Function: populatePrimesMap
# @param: primesMap - file containing primes.
# Each line is of the form -- k prime
# where for every number 'k' the value of 'prime' is smallest prime > 2^k
#
# returns map of prime numbers for 2^k (1 <= k <= 100)
def populatePrimesMap(primesFile):
primesMap = {}
for line in primesFile:
strList = line.split()
k = int(strList[0])
primesMap[k] = int(strList[1])
return primesMap
# Function: populatePrimesMap
# @param: primesMap - file containing primes.
# Each line is of the form -- k prime
# where for every number 'k' the value of 'prime' is smallest prime > 2^k
#
# returns map of prime numbers for 2^k (1 <= k <= 100)
def populateEpsilonMap(probFile):
epsilonMap = {}
for line in probFile:
strList = line.rstrip().split(":")
k = int(strList[0])
epsilonMap[k] = float(strList[1])
return epsilonMap
# Function: computeNewBitwidth
def computeNewBitwidth(k, slices, varMap):
totalBitwidth = 0
for key in varMap.keys():
totalBitwidth += math.ceil(float(varMap[key]) / k)
newBitwidth = k + int(math.ceil(math.log(slices * totalBitwidth, 2))) + 1 # +1 since 's' can be upto 'prime-1'
return newBitwidth
# Function: generateEquationConstraint
# @param: varmap - a map of variables with key as variable name and value being
# its width
# @param: maxBitwidth - maximum bitwidth
# @param: slices - number of slices for each variable to create
#
# Generates an equation of the form:
# a1x1 + a2x2 + ... = s*prime + r
def generateEquationConstraint(varMap, primesMap, maxBitwidth, slices):
generateEquationConstraint.counter += 1
k = int(math.ceil(float(maxBitwidth) / slices))
twoPowerK = 2 ** k
prime = primesMap[k]
newBitwidth = computeNewBitwidth(k, slices, varMap)
primeCoeff = "temp_prime_coeff_" + str(generateEquationConstraint.counter)
primeCoeffDecl = "(declare-fun " + primeCoeff + " () (_ BitVec " + str(newBitwidth - (k + 1)) + "))\n"
bvmulList = []
for key in varMap.keys():
originalKey = key
if varMap[key] != maxBitwidth:
key = zeroExtendExpr(maxBitwidth - varMap[key], key)
assert maxBitwidth >= slices
# find slice widths of variable
keyDivWidth = int(maxBitwidth / slices)
bitRemaining = maxBitwidth % slices
# list containing width of each variable slice
keyDivWidthList = [keyDivWidth] * slices
for i in range(bitRemaining):
keyDivWidthList[i] += 1
coeff = []
for i in range(slices):
coeff.append(random.randint(0, twoPowerK - 1))
keyDivs = []
msbPos = maxBitwidth - 1
remSlices = 0
for i in range(slices):
lsbPos = msbPos - keyDivWidthList[i] + 1
if lsbPos < varMap[originalKey]:
keyDivs.append(extractExpr(msbPos, lsbPos, key))
remSlices += 1
msbPos = msbPos - keyDivWidthList[i]
zxtndKeyDivs = []
for i in range(remSlices):
zxtndKeyDivs.append(zeroExtendExpr(newBitwidth - keyDivWidthList[i], keyDivs[i]))
bvmulStrs = []
for i in range(remSlices):
bvmulList.append(bvmulExpr(constExpr(coeff[i], newBitwidth), zxtndKeyDivs[i]))
lhsStr = functools.reduce(lambda x, y: bvaddExpr(x, y), bvmulList)
lhsStr = bvuremExpr(lhsStr, constExpr(prime, newBitwidth))
r = random.randint(0, prime - 1)
rhsStr = constExpr(r, newBitwidth)
constraint = eqExpr(lhsStr, rhsStr)
return constraint, primeCoeffDecl, prime
# Function: parseSmt2File
# @param: smt2File - input SMT2 file
# @return: varmap - a map containing as key the names of the variables and value as their bitwidth
# @return: smtFilePrefix - string containing the initial part of smt2File (until start of 'assert' in 'smt2File')
#
# Creates variable map and also copies the initial part of SMT2 file (until start of 'assert' in 'smt2File')
# This would later be appended with our constraints to create the new SMT2 file
def parseSmt2FileVariables(smt2File):
# create regex to specific lines
compiledVarPattern = re.compile("[ \t]*\(declare-fun")
compiledAssertPattern = re.compile("assert")
# read variable info in map
varMap = {}
assertLine = ""
scriptName = os.path.basename(__file__)
smtFilePrefix = "; [" + scriptName + "] Autogenerated from source file: " + smt2File.name + "\n"
for line in smt2File:
if compiledAssertPattern.search(line):
assertLine = line
break;
smtFilePrefix += line
if compiledVarPattern.search(line):
wordList = line.split()
varName = wordList[1]
varWidthStr = wordList[-1].rstrip(")")
if varWidthStr.isdigit():
varWidth = int(varWidthStr)
varMap[varName] = varWidth
return varMap, smtFilePrefix, assertLine
# Function: parseSmt2File
# @param: smt2File - input SMT2 file
# @param: newConstraints - string which is a SMT2 constraint
# @return: smtFileSuffix - string containing our constraints followed by rest of input file
#
# returns a string after our adding our constraints to the rest of the input file
def parseSmt2FileSuffix(smt2File, newConstraints):
compiledCheckSatPattern = re.compile("check-sat")
smtFileSuffix = ""
for line in smt2File:
if compiledCheckSatPattern.search(line):
smtFileSuffix += "(assert"
smtFileSuffix += " " + newConstraints + ")\n"
smtFileSuffix += line
break
smtFileSuffix += line
# write everything after '(check-sat)'
for line in smt2File:
smtFileSuffix += line
return smtFileSuffix
# Function: generateSMT2FileFromConstraints
def generateSMT2FileFromConstraints(smt2prefix, coeffDeclList, lastAssertLine, constraintList, smt2suffix, tempFileName):
outputSMT2File = open(tempFileName, "w")
outputSMT2File.write(smt2prefix)
for decl in coeffDeclList:
outputSMT2File.write(decl)
outputSMT2File.write("(assert")
strConstraints = functools.reduce(lambda x, y: andExpr(x, y), constraintList)
outputSMT2File.write(strConstraints)
outputSMT2File.write(")\n")
outputSMT2File.write(lastAssertLine)
outputSMT2File.write(smt2suffix)
outputSMT2File.close()
# Function: generateSMT1FromSMT2File
def generateSMT1FromSMT2File(smt2FileName, smt1FileName):
cmd = "boolector -ds1 -o " + smt1FileName + " " + smt2FileName
return os.system(cmd)
# Funtion: countSolutions
def countSolutions(smtResultsFileName):
smtResultsFile = open(smtResultsFileName, "r")
count = 0
for line in smtResultsFile:
if line == "sat\n":
count += 1
return count
def getCommonPrimesAndMedian(runResults, logFile):
commonPrimes = runResults[0][1]
for i in range(1, len(runResults)):
commonPrimes = commonPrimes & runResults[i][1]
logFile.write("commomPrimes: " + str(list(commonPrimes.elements())) + "\n")
valList = []
for i in range(len(runResults)):
# subResult = runResults[i][1].subtract(commonPrimes)
subResult = runResults[i][1] - commonPrimes
if subResult == None:
valList.append(runResults[i][0])
else:
prod = 1
for key in subResult:
prod = prod * (key ** subResult[key])
valList.append(prod * runResults[i][0])
logFile.write("valList: " + str(valList) + "\n")
return (list(commonPrimes.elements()), numpy.median(valList))
# Function: main
def main(argv):
generateEquationConstraint.counter = 0;
# check for correct number of arguments
scriptName = os.path.basename(__file__)
if len(argv) < 6:
sys.stderr.write("Error: Invalid arguments.\n")
sys.stderr.write(" [Usage]: " + scriptName + " <input_SMT2_file> <primes_file> <num_iterations> <log_file> <output_file>\n")
sys.exit(1)
# open files
inputSMTFile = open(argv[1], "r")
primesFile = open(argv[2], "r")
numIterations = int(argv[3])
logFile = open(argv[4], "w", 1)
finalOutputFile = open(argv[5], "w", 1)
# probMapFile = open(argv[3], "r")
primesMap = populatePrimesMap(primesFile)
# epsilonMap = populateEpsilonMap(probMapFile)
(varMap, smt2prefix, lastAssertLine) = parseSmt2FileVariables(inputSMTFile)
smt2suffix = parseSmt2FileSuffix(inputSMTFile, "true")
maxBitwidth = max(varMap.values())
# print("maxBitwidth: " + str(maxBitwidth))
# find pivot solutions
tempDir = os.getcwd() + "/temp_amc"
smtSolver = os.path.dirname(os.path.realpath(__file__)) + "/../boolector-mc/boolector/boolector"
if not os.path.exists(tempDir):
os.makedirs(tempDir)
timeout = 2400
minPivot = 1
epsilon = 0.8 # epsilonMap[maxBitwidth]
maxPivot = int(2*math.ceil(4.94*(1+1/epsilon)*(1+1/epsilon)))
# print("maxPivot: " + str(maxPivot))
scriptStartTime = os.times()
logFile.write("Script start time: " + str(scriptStartTime) + "\n")
logFile.write("maxBitwidth: " + str(maxBitwidth) + "\n")
logFile.write("Epsilon: " + str(epsilon) + "\n")
logFile.write("maxPivot: " + str(maxPivot) + "\n")
iterationRunResults = []
timedOutRuns = set()
for i in range(numIterations):
tempSMT2FileName = tempDir + "/temp_" + str(i) + ".smt2"
tempOutputFile = tempDir + "/solverResults_" + str(i) + ".txt"
tempErrorFile = tempDir + "/solverErrors_" + str(i) + ".txt"
tempSMT1FileName = tempDir + "/temp_" + str(i) + ".smt1"
slices = 2
logFile.write("\n\n################################################################################\n")
logFile.write("Iteration: " + str(i) + "\n")
constraintList = []
coeffDeclList = []
primeList = []
(constraint, coeffDecl, prime) = generateEquationConstraint(varMap, primesMap, maxBitwidth, slices)
constraintList.append(constraint)
coeffDeclList.append(coeffDecl)
primeList.append(prime)
innerLoopRun = 0
while True:
logFile.write("\n----\n")
logFile.write("innerLoopRun: " + str(innerLoopRun) + "\n")
innerLoopRun += 1
generateSMT2FileFromConstraints(smt2prefix, coeffDeclList, lastAssertLine, constraintList, smt2suffix, tempSMT2FileName)
conversionResult = generateSMT1FromSMT2File(tempSMT2FileName, tempSMT1FileName)
if conversionResult != 0:
sys.stderr.write("Error while converting from SMT2 File to SMT1 file. Aborting ...\n")
logFile.write("Error while converting from SMT2 File to SMT1 file. Aborting ...\n")
logFile.close()
exit(1)
cmd = "doalarm -t profile " + str(timeout) + " " + smtSolver + " -i -m --maxsolutions=" + str(maxPivot) + " " + tempSMT1FileName + " >" + tempOutputFile + " 2>>" + tempErrorFile;
logFile.write("cmd: " + cmd + "\n")
startTime = os.times()
os.system(cmd)
endTime = os.times()
logFile.write("startTime: " + str(startTime) + "\n")
logFile.write("endTime: " + str(endTime) + "\n")
logFile.write("cmd time: " + str(endTime.elapsed - startTime.elapsed) + "\n")
hasTimedOut = False
if (endTime.elapsed - startTime.elapsed) > (timeout - 10):
hasTimedOut = True
numSolutions = countSolutions(tempOutputFile)
logFile.write("numConstraints: " + str(len(constraintList)) + ", slices: " + str(slices) + ", numSolutions: " + str(numSolutions) + ", hasTimedOut: " + str(hasTimedOut) + "\n")
if numSolutions >= maxPivot:
(constraint, coeffDecl, prime) = generateEquationConstraint(varMap, primesMap, maxBitwidth, slices)
constraintList.append(constraint)
coeffDeclList.append(coeffDecl)
primeList.append(prime)
elif numSolutions >= minPivot and not hasTimedOut:
break;
elif numSolutions >= 0:
constraintList.pop()
coeffDeclList.pop()
primeList.pop()
if (slices >= maxBitwidth):
if hasTimedOut:
timedOutRuns.add(i)
# logFile.write("hasTimedOut after adding last constraint: " + str(hasTimedOut) + "\n")
break
slices = (slices * 2) if (slices * 2) < maxBitwidth else maxBitwidth;
(constraint, coeffDecl, prime) = generateEquationConstraint(varMap, primesMap, maxBitwidth, slices)
constraintList.append(constraint)
coeffDeclList.append(coeffDecl)
primeList.append(prime)
logFile.flush()
# raw_input("Press Enter to continue...")
iterationRunResults.append((numSolutions, collections.Counter(primeList)))
scriptEndTime = os.times()
logFile.write("Script end time: " + str(scriptEndTime) + "\n")
logFile.write("Total script time: " + str(scriptEndTime.children_user + scriptEndTime.children_system - scriptStartTime.children_user - scriptStartTime.children_system) + "\n")
logFile.write("iterationRunResults: " + str(iterationRunResults) + "\n")
(commonPrimes, med) = getCommonPrimesAndMedian(iterationRunResults, logFile)
logFile.write("commonPrimes: " + str(commonPrimes) + ", median: " + str(med) + ", ")
finalOutputFile.write(str(maxBitwidth) + ";")
for primes in commonPrimes:
finalOutputFile.write(str(primes) + ",")
finalOutputFile.write(";" + str(med) + ";")
finalOutputFile.write(str(scriptEndTime.children_user + scriptEndTime.children_system - scriptStartTime.children_user - scriptStartTime.children_system) + ";")
finalOutputFile.write(str(len(timedOutRuns)) + ";")
logFile.write("Timedout in runs: " + str(timedOutRuns) + ";")
finalOutputFile.write("Timedout in runs: " + str(timedOutRuns))
finalOutputFile.close()
logFile.close()
if __name__ == "__main__":
main(sys.argv)
| en | 0.560302 | #!/home/rakeshmistry/bin/Python-3.4.3/bin/python3 # @author: <NAME> - 'inspire' # @date: 2015-06-14 ################################################################################ # Functions to generate SMT2 expressions ################################################################################ # Function: populatePrimesMap # @param: primesMap - file containing primes. # Each line is of the form -- k prime # where for every number 'k' the value of 'prime' is smallest prime > 2^k # # returns map of prime numbers for 2^k (1 <= k <= 100) # Function: populatePrimesMap # @param: primesMap - file containing primes. # Each line is of the form -- k prime # where for every number 'k' the value of 'prime' is smallest prime > 2^k # # returns map of prime numbers for 2^k (1 <= k <= 100) # Function: computeNewBitwidth # +1 since 's' can be upto 'prime-1' # Function: generateEquationConstraint # @param: varmap - a map of variables with key as variable name and value being # its width # @param: maxBitwidth - maximum bitwidth # @param: slices - number of slices for each variable to create # # Generates an equation of the form: # a1x1 + a2x2 + ... = s*prime + r # find slice widths of variable # list containing width of each variable slice # Function: parseSmt2File # @param: smt2File - input SMT2 file # @return: varmap - a map containing as key the names of the variables and value as their bitwidth # @return: smtFilePrefix - string containing the initial part of smt2File (until start of 'assert' in 'smt2File') # # Creates variable map and also copies the initial part of SMT2 file (until start of 'assert' in 'smt2File') # This would later be appended with our constraints to create the new SMT2 file # create regex to specific lines # read variable info in map # Function: parseSmt2File # @param: smt2File - input SMT2 file # @param: newConstraints - string which is a SMT2 constraint # @return: smtFileSuffix - string containing our constraints followed by rest of input file # # returns a string after our adding our constraints to the rest of the input file # write everything after '(check-sat)' # Function: generateSMT2FileFromConstraints # Function: generateSMT1FromSMT2File # Funtion: countSolutions # subResult = runResults[i][1].subtract(commonPrimes) # Function: main # check for correct number of arguments # open files # probMapFile = open(argv[3], "r") # epsilonMap = populateEpsilonMap(probMapFile) # print("maxBitwidth: " + str(maxBitwidth)) # find pivot solutions # epsilonMap[maxBitwidth] # print("maxPivot: " + str(maxPivot)) ################################################################################\n") # logFile.write("hasTimedOut after adding last constraint: " + str(hasTimedOut) + "\n") # raw_input("Press Enter to continue...") | 2.175645 | 2 |
message_creator/multi_list_pb2.py | jameshp/deviceadminserver | 0 | 6632743 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: multi_list.proto
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='multi_list.proto',
package='Proto.Config',
syntax='proto3',
serialized_pb=b'\n\x10multi_list.proto\x12\x0cProto.Config\"\x91\x01\n\x16multilist_entry_object\x12\x0c\n\x04name\x18\x01 \x01(\t\x12<\n\x04\x64\x61ta\x18\x02 \x03(\x0b\x32..Proto.Config.multilist_entry_object.DataEntry\x1a+\n\tDataEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"T\n\x10multilist_object\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x32\n\x04\x64\x61ta\x18\x02 \x03(\x0b\x32$.Proto.Config.multilist_entry_objectB5\n net.ktc.miles.model.proto.configB\x0fMultiListObjectH\x01\x62\x06proto3'
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_MULTILIST_ENTRY_OBJECT_DATAENTRY = _descriptor.Descriptor(
name='DataEntry',
full_name='Proto.Config.multilist_entry_object.DataEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='Proto.Config.multilist_entry_object.DataEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='value', full_name='Proto.Config.multilist_entry_object.DataEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), b'8\001'),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=137,
serialized_end=180,
)
_MULTILIST_ENTRY_OBJECT = _descriptor.Descriptor(
name='multilist_entry_object',
full_name='Proto.Config.multilist_entry_object',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='Proto.Config.multilist_entry_object.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='data', full_name='Proto.Config.multilist_entry_object.data', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_MULTILIST_ENTRY_OBJECT_DATAENTRY, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=35,
serialized_end=180,
)
_MULTILIST_OBJECT = _descriptor.Descriptor(
name='multilist_object',
full_name='Proto.Config.multilist_object',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='Proto.Config.multilist_object.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='data', full_name='Proto.Config.multilist_object.data', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=182,
serialized_end=266,
)
_MULTILIST_ENTRY_OBJECT_DATAENTRY.containing_type = _MULTILIST_ENTRY_OBJECT
_MULTILIST_ENTRY_OBJECT.fields_by_name['data'].message_type = _MULTILIST_ENTRY_OBJECT_DATAENTRY
_MULTILIST_OBJECT.fields_by_name['data'].message_type = _MULTILIST_ENTRY_OBJECT
DESCRIPTOR.message_types_by_name['multilist_entry_object'] = _MULTILIST_ENTRY_OBJECT
DESCRIPTOR.message_types_by_name['multilist_object'] = _MULTILIST_OBJECT
multilist_entry_object = _reflection.GeneratedProtocolMessageType('multilist_entry_object', (_message.Message,), dict(
DataEntry = _reflection.GeneratedProtocolMessageType('DataEntry', (_message.Message,), dict(
DESCRIPTOR = _MULTILIST_ENTRY_OBJECT_DATAENTRY,
__module__ = 'multi_list_pb2'
# @@protoc_insertion_point(class_scope:Proto.Config.multilist_entry_object.DataEntry)
))
,
DESCRIPTOR = _MULTILIST_ENTRY_OBJECT,
__module__ = 'multi_list_pb2'
# @@protoc_insertion_point(class_scope:Proto.Config.multilist_entry_object)
))
_sym_db.RegisterMessage(multilist_entry_object)
_sym_db.RegisterMessage(multilist_entry_object.DataEntry)
multilist_object = _reflection.GeneratedProtocolMessageType('multilist_object', (_message.Message,), dict(
DESCRIPTOR = _MULTILIST_OBJECT,
__module__ = 'multi_list_pb2'
# @@protoc_insertion_point(class_scope:Proto.Config.multilist_object)
))
_sym_db.RegisterMessage(multilist_object)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), b'\n net.ktc.miles.model.proto.configB\017MultiListObjectH\001')
_MULTILIST_ENTRY_OBJECT_DATAENTRY.has_options = True
_MULTILIST_ENTRY_OBJECT_DATAENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), b'8\001')
# @@protoc_insertion_point(module_scope)
| # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: multi_list.proto
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='multi_list.proto',
package='Proto.Config',
syntax='proto3',
serialized_pb=b'\n\x10multi_list.proto\x12\x0cProto.Config\"\x91\x01\n\x16multilist_entry_object\x12\x0c\n\x04name\x18\x01 \x01(\t\x12<\n\x04\x64\x61ta\x18\x02 \x03(\x0b\x32..Proto.Config.multilist_entry_object.DataEntry\x1a+\n\tDataEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"T\n\x10multilist_object\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x32\n\x04\x64\x61ta\x18\x02 \x03(\x0b\x32$.Proto.Config.multilist_entry_objectB5\n net.ktc.miles.model.proto.configB\x0fMultiListObjectH\x01\x62\x06proto3'
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_MULTILIST_ENTRY_OBJECT_DATAENTRY = _descriptor.Descriptor(
name='DataEntry',
full_name='Proto.Config.multilist_entry_object.DataEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='Proto.Config.multilist_entry_object.DataEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='value', full_name='Proto.Config.multilist_entry_object.DataEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), b'8\001'),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=137,
serialized_end=180,
)
_MULTILIST_ENTRY_OBJECT = _descriptor.Descriptor(
name='multilist_entry_object',
full_name='Proto.Config.multilist_entry_object',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='Proto.Config.multilist_entry_object.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='data', full_name='Proto.Config.multilist_entry_object.data', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_MULTILIST_ENTRY_OBJECT_DATAENTRY, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=35,
serialized_end=180,
)
_MULTILIST_OBJECT = _descriptor.Descriptor(
name='multilist_object',
full_name='Proto.Config.multilist_object',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='Proto.Config.multilist_object.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='data', full_name='Proto.Config.multilist_object.data', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=182,
serialized_end=266,
)
_MULTILIST_ENTRY_OBJECT_DATAENTRY.containing_type = _MULTILIST_ENTRY_OBJECT
_MULTILIST_ENTRY_OBJECT.fields_by_name['data'].message_type = _MULTILIST_ENTRY_OBJECT_DATAENTRY
_MULTILIST_OBJECT.fields_by_name['data'].message_type = _MULTILIST_ENTRY_OBJECT
DESCRIPTOR.message_types_by_name['multilist_entry_object'] = _MULTILIST_ENTRY_OBJECT
DESCRIPTOR.message_types_by_name['multilist_object'] = _MULTILIST_OBJECT
multilist_entry_object = _reflection.GeneratedProtocolMessageType('multilist_entry_object', (_message.Message,), dict(
DataEntry = _reflection.GeneratedProtocolMessageType('DataEntry', (_message.Message,), dict(
DESCRIPTOR = _MULTILIST_ENTRY_OBJECT_DATAENTRY,
__module__ = 'multi_list_pb2'
# @@protoc_insertion_point(class_scope:Proto.Config.multilist_entry_object.DataEntry)
))
,
DESCRIPTOR = _MULTILIST_ENTRY_OBJECT,
__module__ = 'multi_list_pb2'
# @@protoc_insertion_point(class_scope:Proto.Config.multilist_entry_object)
))
_sym_db.RegisterMessage(multilist_entry_object)
_sym_db.RegisterMessage(multilist_entry_object.DataEntry)
multilist_object = _reflection.GeneratedProtocolMessageType('multilist_object', (_message.Message,), dict(
DESCRIPTOR = _MULTILIST_OBJECT,
__module__ = 'multi_list_pb2'
# @@protoc_insertion_point(class_scope:Proto.Config.multilist_object)
))
_sym_db.RegisterMessage(multilist_object)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), b'\n net.ktc.miles.model.proto.configB\017MultiListObjectH\001')
_MULTILIST_ENTRY_OBJECT_DATAENTRY.has_options = True
_MULTILIST_ENTRY_OBJECT_DATAENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), b'8\001')
# @@protoc_insertion_point(module_scope)
| en | 0.348111 | # Generated by the protocol buffer compiler. DO NOT EDIT! # source: multi_list.proto # @@protoc_insertion_point(imports) # @@protoc_insertion_point(class_scope:Proto.Config.multilist_entry_object.DataEntry) # @@protoc_insertion_point(class_scope:Proto.Config.multilist_entry_object) # @@protoc_insertion_point(class_scope:Proto.Config.multilist_object) # @@protoc_insertion_point(module_scope) | 1.220371 | 1 |
tests/test_notebooks.py | francescodonato/GPflux | 100 | 6632744 | #
# Copyright (c) 2021 The GPflux Contributors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import glob
import os
import sys
import time
import traceback
import jupytext
import nbformat
import pytest
from nbconvert.preprocessors import ExecutePreprocessor
from nbconvert.preprocessors.execute import CellExecutionError
# To blacklist a notebook, add its full base name (including .ipynb extension,
# but without any directory component). If there are several notebooks in
# different directories with the same base name, they will all get blacklisted
# (change the blacklisting check to something else in that case, if need be!)
BLACKLISTED_NOTEBOOKS = [
"conditional_deep_gp.py",
"deep_nonstationary_gp_samples.py",
]
def _nbpath():
this_dir = os.path.dirname(__file__)
return os.path.join(this_dir, "../docs/notebooks/")
def test_notebook_dir_exists():
assert os.path.isdir(_nbpath())
def get_notebooks():
"""
Returns all notebooks in `_nbpath` that are not blacklisted.
"""
def notebook_blacklisted(nb):
blacklisted_notebooks_basename = map(os.path.basename, BLACKLISTED_NOTEBOOKS)
return os.path.basename(nb) in blacklisted_notebooks_basename
# recursively traverse the notebook directory in search for ipython notebooks
all_notebooks = glob.iglob(os.path.join(_nbpath(), "**", "*.py"), recursive=True)
notebooks_to_test = [nb for nb in all_notebooks if not notebook_blacklisted(nb)]
return notebooks_to_test
def _preproc():
pythonkernel = "python" + str(sys.version_info[0])
return ExecutePreprocessor(timeout=300, kernel_name=pythonkernel, interrupt_on_timeout=True)
def _exec_notebook(notebook_filename):
with open(notebook_filename) as notebook_file:
nb = jupytext.read(notebook_file, as_version=nbformat.current_nbformat)
try:
meta_data = {"path": os.path.dirname(notebook_filename)}
_preproc().preprocess(nb, {"metadata": meta_data})
except CellExecutionError as cell_error:
traceback.print_exc(file=sys.stdout)
msg = "Error executing the notebook {0}. See above for error.\nCell error: {1}"
pytest.fail(msg.format(notebook_filename, str(cell_error)))
@pytest.mark.notebooks
@pytest.mark.parametrize("notebook_file", get_notebooks())
def test_notebook(notebook_file):
_exec_notebook(notebook_file)
def test_has_notebooks():
assert len(get_notebooks()) >= 2, "there are probably some notebooks that were not discovered"
| #
# Copyright (c) 2021 The GPflux Contributors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import glob
import os
import sys
import time
import traceback
import jupytext
import nbformat
import pytest
from nbconvert.preprocessors import ExecutePreprocessor
from nbconvert.preprocessors.execute import CellExecutionError
# To blacklist a notebook, add its full base name (including .ipynb extension,
# but without any directory component). If there are several notebooks in
# different directories with the same base name, they will all get blacklisted
# (change the blacklisting check to something else in that case, if need be!)
BLACKLISTED_NOTEBOOKS = [
"conditional_deep_gp.py",
"deep_nonstationary_gp_samples.py",
]
def _nbpath():
this_dir = os.path.dirname(__file__)
return os.path.join(this_dir, "../docs/notebooks/")
def test_notebook_dir_exists():
assert os.path.isdir(_nbpath())
def get_notebooks():
"""
Returns all notebooks in `_nbpath` that are not blacklisted.
"""
def notebook_blacklisted(nb):
blacklisted_notebooks_basename = map(os.path.basename, BLACKLISTED_NOTEBOOKS)
return os.path.basename(nb) in blacklisted_notebooks_basename
# recursively traverse the notebook directory in search for ipython notebooks
all_notebooks = glob.iglob(os.path.join(_nbpath(), "**", "*.py"), recursive=True)
notebooks_to_test = [nb for nb in all_notebooks if not notebook_blacklisted(nb)]
return notebooks_to_test
def _preproc():
pythonkernel = "python" + str(sys.version_info[0])
return ExecutePreprocessor(timeout=300, kernel_name=pythonkernel, interrupt_on_timeout=True)
def _exec_notebook(notebook_filename):
with open(notebook_filename) as notebook_file:
nb = jupytext.read(notebook_file, as_version=nbformat.current_nbformat)
try:
meta_data = {"path": os.path.dirname(notebook_filename)}
_preproc().preprocess(nb, {"metadata": meta_data})
except CellExecutionError as cell_error:
traceback.print_exc(file=sys.stdout)
msg = "Error executing the notebook {0}. See above for error.\nCell error: {1}"
pytest.fail(msg.format(notebook_filename, str(cell_error)))
@pytest.mark.notebooks
@pytest.mark.parametrize("notebook_file", get_notebooks())
def test_notebook(notebook_file):
_exec_notebook(notebook_file)
def test_has_notebooks():
assert len(get_notebooks()) >= 2, "there are probably some notebooks that were not discovered"
| en | 0.860274 | # # Copyright (c) 2021 The GPflux Contributors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # To blacklist a notebook, add its full base name (including .ipynb extension, # but without any directory component). If there are several notebooks in # different directories with the same base name, they will all get blacklisted # (change the blacklisting check to something else in that case, if need be!) Returns all notebooks in `_nbpath` that are not blacklisted. # recursively traverse the notebook directory in search for ipython notebooks | 1.996073 | 2 |
supabase_py/lib/supabase_storage_client.py | olirice/supabase-py | 0 | 6632745 | <filename>supabase_py/lib/supabase_storage_client.py
from supabase_py.lib.storage.storage_bucket_api import StorageBucketAPI
class SupabaseStorageClient(StorageBucketAPI):
"""
Manage the storage bucket and files
Examples
--------
>>> url = storage_file.create_signed_url("poll3o/test2.txt", 80) # signed url
>>> loop.run_until_complete(storage_file.download("poll3o/test2.txt")) #upload or download
>>> loop.run_until_complete(storage_file.upload("poll3o/test2.txt","path_file_upload"))
>>> list_buckets = storage.list_buckets()
>>> list_files = storage_file.list("pollo")
"""
def __init__(self, url, headers):
super().__init__(url, headers)
# def StorageFileApi(self, id_, replace=False):
# return StorageFileApi(self.url, self.headers, id_, replace)
def StorageBucketAPI(self):
return StorageBucketAPI(self.url, self.headers)
| <filename>supabase_py/lib/supabase_storage_client.py
from supabase_py.lib.storage.storage_bucket_api import StorageBucketAPI
class SupabaseStorageClient(StorageBucketAPI):
"""
Manage the storage bucket and files
Examples
--------
>>> url = storage_file.create_signed_url("poll3o/test2.txt", 80) # signed url
>>> loop.run_until_complete(storage_file.download("poll3o/test2.txt")) #upload or download
>>> loop.run_until_complete(storage_file.upload("poll3o/test2.txt","path_file_upload"))
>>> list_buckets = storage.list_buckets()
>>> list_files = storage_file.list("pollo")
"""
def __init__(self, url, headers):
super().__init__(url, headers)
# def StorageFileApi(self, id_, replace=False):
# return StorageFileApi(self.url, self.headers, id_, replace)
def StorageBucketAPI(self):
return StorageBucketAPI(self.url, self.headers)
| en | 0.583502 | Manage the storage bucket and files Examples -------- >>> url = storage_file.create_signed_url("poll3o/test2.txt", 80) # signed url >>> loop.run_until_complete(storage_file.download("poll3o/test2.txt")) #upload or download >>> loop.run_until_complete(storage_file.upload("poll3o/test2.txt","path_file_upload")) >>> list_buckets = storage.list_buckets() >>> list_files = storage_file.list("pollo") # def StorageFileApi(self, id_, replace=False): # return StorageFileApi(self.url, self.headers, id_, replace) | 2.731857 | 3 |
node_modules/nuclide/pkg/nuclide-debugger-native-rpc/scripts/thread_manager.py | kevingatera/kgatewebapp | 1 | 6632746 | <reponame>kevingatera/kgatewebapp
# Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree.
from find_lldb import get_lldb
from remote_objects import ValueListRemoteObject
CALL_STACK_OBJECT_GROUP = 'thread_stack'
MAX_STOP_REASON_DESCRIPTION_LENGTH = 1024
class ThreadManager(object):
"""Manages all the threads in target process.
The client expects one `Debugger.threadCreated` message for each thread.
"""
def __init__(self, debugger_store):
"""Initialize a ThreadManager for a given connection.
"""
self._debugger_store = debugger_store
self._previousStopThreadId = None
self._threadSwitchMessage = None
def update_thread_switch_message(self, process):
stopThreadId = process.GetSelectedThread().GetThreadID()
if self._previousStopThreadId is not None and self._previousStopThreadId != stopThreadId:
self._threadSwitchMessage = "Active thread switched from thread {0} to thread {1}" \
.format(self._previousStopThreadId, stopThreadId)
else:
self._threadSwitchMessage = None
self._previousStopThreadId = stopThreadId
def send_threads_updated(self, process):
"""Update threads status for input process."""
threads_array = []
lldb = get_lldb()
stopThreadId = process.GetSelectedThread().GetThreadID()
for thread in process.threads:
description_stream = lldb.SBStream()
thread.GetDescription(description_stream)
frame = thread.GetSelectedFrame()
location = self._debugger_store.location_serializer \
.get_frame_location(frame)
threads_array.append({
'id': thread.GetThreadID(),
'name': thread.GetName(),
'address': self._get_frame_name(frame),
'location': location,
'hasSource': self._debugger_store.location_serializer.has_source(frame),
'stopReason': self.get_thread_stop_description(thread),
'description': description_stream.GetData(),
})
params = {
'owningProcessId': process.id,
'stopThreadId': stopThreadId,
'threads': threads_array,
}
self._debugger_store.chrome_channel.send_notification('Debugger.threadsUpdated', params)
def get_thread_stack(self, thread):
"""Fetch serialized callstack for input thread."""
result = []
for frame in thread.frames:
# SBFrame.GetVariables(arguments, locals, statics, in_scope_only)
variables = frame.GetVariables(True, True, False, True)
local_variables = self._debugger_store.remote_object_manager.add_object(
ValueListRemoteObject(
variables,
self._debugger_store.remote_object_manager.
get_add_object_func(CALL_STACK_OBJECT_GROUP)),
CALL_STACK_OBJECT_GROUP)
scopeChainObject = local_variables.serialized_value
scopeChainObject.update({'description': 'Locals'})
result.append({
'callFrameId': "%d.%d" % (frame.thread.idx, frame.idx),
'functionName': self._get_frame_name(frame),
'location': self._debugger_store.location_serializer.get_frame_location(frame),
'hasSource': self._debugger_store.location_serializer.has_source(frame),
'scopeChain': [{
'object': scopeChainObject,
'type': 'local',
}],
})
return result
def get_thread_stop_description(self, thread):
return thread.GetStopDescription(MAX_STOP_REASON_DESCRIPTION_LENGTH)
def _get_frame_name(self, frame):
target = frame.GetThread().GetProcess().GetTarget()
offset = frame.GetPCAddress().GetLoadAddress(target) \
- frame.GetSymbol().GetStartAddress().GetLoadAddress(target)
return "%s +%x" % (frame.name, offset)
def get_thread_switch_message(self):
return self._threadSwitchMessage
def release(self):
self._debugger_store.remote_object_manager.release_object_group(CALL_STACK_OBJECT_GROUP)
| # Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree.
from find_lldb import get_lldb
from remote_objects import ValueListRemoteObject
CALL_STACK_OBJECT_GROUP = 'thread_stack'
MAX_STOP_REASON_DESCRIPTION_LENGTH = 1024
class ThreadManager(object):
"""Manages all the threads in target process.
The client expects one `Debugger.threadCreated` message for each thread.
"""
def __init__(self, debugger_store):
"""Initialize a ThreadManager for a given connection.
"""
self._debugger_store = debugger_store
self._previousStopThreadId = None
self._threadSwitchMessage = None
def update_thread_switch_message(self, process):
stopThreadId = process.GetSelectedThread().GetThreadID()
if self._previousStopThreadId is not None and self._previousStopThreadId != stopThreadId:
self._threadSwitchMessage = "Active thread switched from thread {0} to thread {1}" \
.format(self._previousStopThreadId, stopThreadId)
else:
self._threadSwitchMessage = None
self._previousStopThreadId = stopThreadId
def send_threads_updated(self, process):
"""Update threads status for input process."""
threads_array = []
lldb = get_lldb()
stopThreadId = process.GetSelectedThread().GetThreadID()
for thread in process.threads:
description_stream = lldb.SBStream()
thread.GetDescription(description_stream)
frame = thread.GetSelectedFrame()
location = self._debugger_store.location_serializer \
.get_frame_location(frame)
threads_array.append({
'id': thread.GetThreadID(),
'name': thread.GetName(),
'address': self._get_frame_name(frame),
'location': location,
'hasSource': self._debugger_store.location_serializer.has_source(frame),
'stopReason': self.get_thread_stop_description(thread),
'description': description_stream.GetData(),
})
params = {
'owningProcessId': process.id,
'stopThreadId': stopThreadId,
'threads': threads_array,
}
self._debugger_store.chrome_channel.send_notification('Debugger.threadsUpdated', params)
def get_thread_stack(self, thread):
"""Fetch serialized callstack for input thread."""
result = []
for frame in thread.frames:
# SBFrame.GetVariables(arguments, locals, statics, in_scope_only)
variables = frame.GetVariables(True, True, False, True)
local_variables = self._debugger_store.remote_object_manager.add_object(
ValueListRemoteObject(
variables,
self._debugger_store.remote_object_manager.
get_add_object_func(CALL_STACK_OBJECT_GROUP)),
CALL_STACK_OBJECT_GROUP)
scopeChainObject = local_variables.serialized_value
scopeChainObject.update({'description': 'Locals'})
result.append({
'callFrameId': "%d.%d" % (frame.thread.idx, frame.idx),
'functionName': self._get_frame_name(frame),
'location': self._debugger_store.location_serializer.get_frame_location(frame),
'hasSource': self._debugger_store.location_serializer.has_source(frame),
'scopeChain': [{
'object': scopeChainObject,
'type': 'local',
}],
})
return result
def get_thread_stop_description(self, thread):
return thread.GetStopDescription(MAX_STOP_REASON_DESCRIPTION_LENGTH)
def _get_frame_name(self, frame):
target = frame.GetThread().GetProcess().GetTarget()
offset = frame.GetPCAddress().GetLoadAddress(target) \
- frame.GetSymbol().GetStartAddress().GetLoadAddress(target)
return "%s +%x" % (frame.name, offset)
def get_thread_switch_message(self):
return self._threadSwitchMessage
def release(self):
self._debugger_store.remote_object_manager.release_object_group(CALL_STACK_OBJECT_GROUP) | en | 0.841837 | # Copyright (c) 2015-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the LICENSE file in # the root directory of this source tree. Manages all the threads in target process. The client expects one `Debugger.threadCreated` message for each thread. Initialize a ThreadManager for a given connection. Update threads status for input process. Fetch serialized callstack for input thread. # SBFrame.GetVariables(arguments, locals, statics, in_scope_only) | 2.23464 | 2 |
examples/swarmalator/plot_swarmalator.py | wordsworthgroup/libode | 11 | 6632747 | from os import listdir
from os.path import join
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
plt.style.use('dark_background')
#plt.rc('font', family='monospace')
frgb = lambda theta: 0.45*(1 + np.cos(theta))
def rgb(theta):
r = frgb(theta)
g = frgb(theta - 2*np.pi/3)
b = frgb(theta + 2*np.pi/3)
return(r,g,b,1)
#load results
t = np.fromfile(join('out', 'swarmalator_t'))
fns = [join('out',fn) for fn in listdir('out') if fn[-1] != 't']
fns = sorted(fns, key=lambda fn: int(fn.split('_')[-1]))
out = [np.fromfile(fn).astype('float16') for fn in fns]
nag = len(out)//3
x, y, theta = out[:nag], out[nag:nag*2], out[nag*2:]
x, y, theta = np.vstack(x), np.vstack(y), np.vstack(theta)
#x, y, theta = x, y, theta
nout = x.shape[1]
fig, ax = plt.subplots(1,1)
ax.set_xlim(x.min()*1.05, x.max()*1.05)
ax.set_ylim(y.min()*1.05, y.max()*1.05)
ax.set_aspect('equal')
ax.grid(False)
ax.set_xlabel("$x$")
ax.set_ylabel("$y$")
ax.set_title('Oscillators That Sync and Swarm')
L = [ax.plot(x[i,0], y[i,0], '.', color=rgb(theta[i,0]), markersize=10)[0] for i in range(nag)]
def init():
return(L)
def animate(i):
for j,l in enumerate(L):
l.set_xdata(x[j,i])
l.set_ydata(y[j,i])
l.set_color(rgb(theta[j,i]))
return(L)
ani = animation.FuncAnimation(fig, animate, init_func=init, frames=nout, interval=0.01, blit=True)
plt.show()
| from os import listdir
from os.path import join
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
plt.style.use('dark_background')
#plt.rc('font', family='monospace')
frgb = lambda theta: 0.45*(1 + np.cos(theta))
def rgb(theta):
r = frgb(theta)
g = frgb(theta - 2*np.pi/3)
b = frgb(theta + 2*np.pi/3)
return(r,g,b,1)
#load results
t = np.fromfile(join('out', 'swarmalator_t'))
fns = [join('out',fn) for fn in listdir('out') if fn[-1] != 't']
fns = sorted(fns, key=lambda fn: int(fn.split('_')[-1]))
out = [np.fromfile(fn).astype('float16') for fn in fns]
nag = len(out)//3
x, y, theta = out[:nag], out[nag:nag*2], out[nag*2:]
x, y, theta = np.vstack(x), np.vstack(y), np.vstack(theta)
#x, y, theta = x, y, theta
nout = x.shape[1]
fig, ax = plt.subplots(1,1)
ax.set_xlim(x.min()*1.05, x.max()*1.05)
ax.set_ylim(y.min()*1.05, y.max()*1.05)
ax.set_aspect('equal')
ax.grid(False)
ax.set_xlabel("$x$")
ax.set_ylabel("$y$")
ax.set_title('Oscillators That Sync and Swarm')
L = [ax.plot(x[i,0], y[i,0], '.', color=rgb(theta[i,0]), markersize=10)[0] for i in range(nag)]
def init():
return(L)
def animate(i):
for j,l in enumerate(L):
l.set_xdata(x[j,i])
l.set_ydata(y[j,i])
l.set_color(rgb(theta[j,i]))
return(L)
ani = animation.FuncAnimation(fig, animate, init_func=init, frames=nout, interval=0.01, blit=True)
plt.show()
| en | 0.178467 | #plt.rc('font', family='monospace') #load results #x, y, theta = x, y, theta | 2.396326 | 2 |
lectures/l08-inclass.py | davidd-55/cs152fa21 | 1 | 6632748 | # ---
# jupyter:
# jupytext:
# formats: ipynb,py:percent
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.11.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# %%
import torch
import torchvision
# %%
torch.rand(3)
# %%
torch.rand(3).shape
# %%
torch.rand(16, 17)
# %%
torch.rand(16, 17).shape
# %%
torch.rand(1, 2, 3, 4, 5)
# %%
torch.rand(1, 2, 3, 4, 5).shape
# %%
a = torch.rand(5, 12)
b = torch.rand(12, 16)
# %%
a.shape, b.shape
# %%
a * b # Element-wise multiplication
# %%
c = a @ b
# %%
c.shape
# %%
c = b @ a
# %%
c = b.T @ a.T
# %%
c.shape
# %%
# MNIST : hello world
# EMNIST : extended with letters in addition to digits
# KMNIST : Kuzushiji, Japanese characters
# QMNIST : newer MNIST with better source information
data_path = "../data/"
mnist_transforms = torchvision.transforms.Compose(
[
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize((0.1307,), (0.3081,)),
]
)
train_dataset = torchvision.datasets.MNIST(
root=data_path, train=True, download=True, transform=mnist_transforms
)
# %%
train_dataset
# %%
train_dataset.data[0]
# %%
train_loader = torch.utils.data.DataLoader(train_dataset)
# %%
for (image, label) in train_loader:
print(image.shape, label.shape)
break
# %%
image
# %%
label
# %%
import matplotlib.pyplot as plt
# %%
plt.imshow(image.squeeze())# .permute((2, 1, 0))) #permute((-1, -2, 0)))
# %%
help(image)
# %%
a = torch.randn(3, 2)
b = torch.randn(2, 2)
# a, b, a * b
# %%
a @ b
# %%
image.shape
# %%
num_pixels = 28 * 28
w = torch.randn(num_pixels, 1)
w.shape
# %%
w.T @ image.view(num_pixels, 1)
# %%
torch.sigmoid(w.T @ image.view(num_pixels, 1))
# %%
| # ---
# jupyter:
# jupytext:
# formats: ipynb,py:percent
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.11.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# %%
import torch
import torchvision
# %%
torch.rand(3)
# %%
torch.rand(3).shape
# %%
torch.rand(16, 17)
# %%
torch.rand(16, 17).shape
# %%
torch.rand(1, 2, 3, 4, 5)
# %%
torch.rand(1, 2, 3, 4, 5).shape
# %%
a = torch.rand(5, 12)
b = torch.rand(12, 16)
# %%
a.shape, b.shape
# %%
a * b # Element-wise multiplication
# %%
c = a @ b
# %%
c.shape
# %%
c = b @ a
# %%
c = b.T @ a.T
# %%
c.shape
# %%
# MNIST : hello world
# EMNIST : extended with letters in addition to digits
# KMNIST : Kuzushiji, Japanese characters
# QMNIST : newer MNIST with better source information
data_path = "../data/"
mnist_transforms = torchvision.transforms.Compose(
[
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize((0.1307,), (0.3081,)),
]
)
train_dataset = torchvision.datasets.MNIST(
root=data_path, train=True, download=True, transform=mnist_transforms
)
# %%
train_dataset
# %%
train_dataset.data[0]
# %%
train_loader = torch.utils.data.DataLoader(train_dataset)
# %%
for (image, label) in train_loader:
print(image.shape, label.shape)
break
# %%
image
# %%
label
# %%
import matplotlib.pyplot as plt
# %%
plt.imshow(image.squeeze())# .permute((2, 1, 0))) #permute((-1, -2, 0)))
# %%
help(image)
# %%
a = torch.randn(3, 2)
b = torch.randn(2, 2)
# a, b, a * b
# %%
a @ b
# %%
image.shape
# %%
num_pixels = 28 * 28
w = torch.randn(num_pixels, 1)
w.shape
# %%
w.T @ image.view(num_pixels, 1)
# %%
torch.sigmoid(w.T @ image.view(num_pixels, 1))
# %%
| en | 0.256277 | # --- # jupyter: # jupytext: # formats: ipynb,py:percent # text_representation: # extension: .py # format_name: percent # format_version: '1.3' # jupytext_version: 1.11.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # %% # %% # %% # %% # %% # %% # %% # %% # %% # %% # Element-wise multiplication # %% # %% # %% # %% # %% # %% # MNIST : hello world # EMNIST : extended with letters in addition to digits # KMNIST : Kuzushiji, Japanese characters # QMNIST : newer MNIST with better source information # %% # %% # %% # %% # %% # %% # %% # %% # .permute((2, 1, 0))) #permute((-1, -2, 0))) # %% # %% # a, b, a * b # %% # %% # %% # %% # %% # %% | 2.56517 | 3 |
tensorflow/contrib/distributions/python/ops/mvn.py | returncode13/tensorflow | 1 | 6632749 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Multivariate Normal distribution classes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from tensorflow.contrib.distributions.python.ops import distribution
from tensorflow.contrib.distributions.python.ops import operator_pd_cholesky
from tensorflow.contrib.distributions.python.ops import operator_pd_diag
from tensorflow.contrib.distributions.python.ops import operator_pd_full
from tensorflow.contrib.distributions.python.ops import operator_pd_vdvt_update
from tensorflow.contrib.framework.python.framework import tensor_util as contrib_tensor_util
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
__all__ = [
"MultivariateNormalDiag",
"MultivariateNormalCholesky",
"MultivariateNormalFull",
"MultivariateNormalDiagPlusVDVT",
]
class MultivariateNormalOperatorPD(distribution.Distribution):
"""The multivariate normal distribution on `R^k`.
This distribution is defined by a 1-D mean `mu` and an instance of
`OperatorPDBase`, which provides access to a symmetric positive definite
operator, which defines the covariance.
#### Mathematical details
With `C` the covariance matrix represented by the operator, the PDF of this
distribution is:
```
f(x) = (2 pi)^(-k/2) |det(C)|^(-1/2) exp(-1/2 (x - mu)^T C^{-1} (x - mu))
```
#### Examples
A single multi-variate Gaussian distribution is defined by a vector of means
of length `k`, and a covariance matrix of shape `k x k`.
Extra leading dimensions, if provided, allow for batches.
```python
# Initialize a single 3-variate Gaussian.
mu = [1, 2, 3]
chol = [[1, 0, 0.], [1, 3, 0], [1, 2, 3]]
cov = tf.contrib.distributions.OperatorPDCholesky(chol)
dist = tf.contrib.distributions.MultivariateNormalOperatorPD(mu, cov)
# Evaluate this on an observation in R^3, returning a scalar.
dist.pdf([-1, 0, 1.])
# Initialize a batch of two 3-variate Gaussians.
mu = [[1, 2, 3], [11, 22, 33.]]
chol = ... # shape 2 x 3 x 3, lower triangular, positive diagonal.
cov = tf.contrib.distributions.OperatorPDCholesky(chol)
dist = tf.contrib.distributions.MultivariateNormalOperatorPD(mu, cov)
# Evaluate this on a two observations, each in R^3, returning a length two
# tensor.
x = [[-1, 0, 1], [-11, 0, 11.]] # Shape 2 x 3.
dist.pdf(x)
```
"""
def __init__(self,
mu,
cov,
validate_args=True,
allow_nan_stats=False,
name="MultivariateNormalCov"):
"""Multivariate Normal distributions on `R^k`.
User must provide means `mu`, and an instance of `OperatorPDBase`, `cov`,
which determines the covariance.
Args:
mu: `float` or `double` tensor with shape `[N1,...,Nb, k]`, `b >= 0`.
cov: `float` or `double` instance of `OperatorPDBase` with same `dtype`
as `mu` and shape `[N1,...,Nb, k, k]`.
validate_args: Whether to validate input with asserts. If `validate_args`
is `False`, and the inputs are invalid, correct behavior is not
guaranteed.
allow_nan_stats: `Boolean`, default `False`. If `False`, raise an
exception if a statistic (e.g. mean/mode/etc...) is undefined for any
batch member If `True`, batch members with valid parameters leading to
undefined statistics will return NaN for this statistic.
name: The name to give Ops created by the initializer.
Raises:
TypeError: If `mu` and `cov` are different dtypes.
"""
self._allow_nan_stats = allow_nan_stats
self._validate_args = validate_args
with ops.name_scope(name):
with ops.op_scope([mu] + cov.inputs, "init"):
self._cov = cov
self._mu = self._check_mu(mu)
self._name = name
def _check_mu(self, mu):
"""Return `mu` after validity checks and possibly with assertations."""
mu = ops.convert_to_tensor(mu)
cov = self._cov
if mu.dtype != cov.dtype:
raise TypeError(
"mu and cov must have the same dtype. Found mu.dtype = %s, "
"cov.dtype = %s"
% (mu.dtype, cov.dtype))
# Try to validate with static checks.
mu_shape = mu.get_shape()
cov_shape = cov.get_shape()
if mu_shape.is_fully_defined() and cov_shape.is_fully_defined():
if mu_shape != cov_shape[:-1]:
raise ValueError(
"mu.shape and cov.shape[:-1] should match. Found: mu.shape=%s, "
"cov.shape=%s" % (mu_shape, cov_shape))
else:
return mu
# Static checks could not be run, so possibly do dyamic checks.
if not self.validate_args:
return mu
else:
assert_same_rank = check_ops.assert_equal(
array_ops.rank(mu) + 1,
cov.rank(),
data=["mu should have rank 1 less than cov. Found: rank(mu) = ",
array_ops.rank(mu), " rank(cov) = ", cov.rank()],
)
with ops.control_dependencies([assert_same_rank]):
assert_same_shape = check_ops.assert_equal(
array_ops.shape(mu),
cov.vector_shape(),
data=["mu.shape and cov.shape[:-1] should match. "
"Found: shape(mu) = "
, array_ops.shape(mu), " shape(cov) = ", cov.shape()],
)
return control_flow_ops.with_dependencies([assert_same_shape], mu)
@property
def validate_args(self):
"""`Boolean` describing behavior on invalid input."""
return self._validate_args
@property
def allow_nan_stats(self):
"""`Boolean` describing behavior when stats are undefined."""
return self._allow_nan_stats
@property
def dtype(self):
return self._mu.dtype
def get_event_shape(self):
"""`TensorShape` available at graph construction time."""
# Recall _check_mu ensures mu and self._cov have same batch shape.
return self._cov.get_shape()[-1:]
def event_shape(self, name="event_shape"):
"""Shape of a sample from a single distribution as a 1-D int32 `Tensor`."""
# Recall _check_mu ensures mu and self._cov have same batch shape.
with ops.name_scope(self.name):
with ops.op_scope(self._cov.inputs, name):
return array_ops.pack([self._cov.vector_space_dimension()])
def batch_shape(self, name="batch_shape"):
"""Batch dimensions of this instance as a 1-D int32 `Tensor`."""
# Recall _check_mu ensures mu and self._cov have same batch shape.
with ops.name_scope(self.name):
with ops.op_scope(self._cov.inputs, name):
return self._cov.batch_shape()
def get_batch_shape(self):
"""`TensorShape` available at graph construction time."""
# Recall _check_mu ensures mu and self._cov have same batch shape.
return self._cov.get_batch_shape()
@property
def mu(self):
return self._mu
@property
def sigma(self):
"""Dense (batch) covariance matrix, if available."""
with ops.name_scope(self.name):
return self._cov.to_dense()
def mean(self, name="mean"):
"""Mean of each batch member."""
with ops.name_scope(self.name):
with ops.op_scope([self._mu], name):
return array_ops.identity(self._mu)
def mode(self, name="mode"):
"""Mode of each batch member."""
with ops.name_scope(self.name):
with ops.op_scope([self._mu], name):
return array_ops.identity(self._mu)
def variance(self, name="variance"):
"""Variance of each batch member."""
with ops.name_scope(self.name):
return self.sigma
def log_sigma_det(self, name="log_sigma_det"):
"""Log of determinant of covariance matrix."""
with ops.name_scope(self.name):
with ops.op_scope(self._cov.inputs, name):
return self._cov.log_det()
def sigma_det(self, name="sigma_det"):
"""Determinant of covariance matrix."""
with ops.name_scope(self.name):
with ops.op_scope(self._cov.inputs, name):
return math_ops.exp(self._cov.log_det())
def log_prob(self, x, name="log_prob"):
"""Log prob of observations `x` given these Multivariate Normals.
`x` is a batch vector with compatible shape if `x` is a `Tensor` whose
shape can be broadcast up to either:
````
self.batch_shape + self.event_shape
OR
[M1,...,Mm] + self.batch_shape + self.event_shape
```
Args:
x: Compatible batch vector with same `dtype` as this distribution.
name: The name to give this op.
Returns:
log_prob: tensor of dtype `dtype`, the log-PDFs of `x`.
"""
# Q: Why are shape requirements as stated above?
# A: The compatible shapes are precisely the ones that will broadcast to
# a shape compatible with self._cov.
# See Operator base class for notes about shapes compatible with self._cov.
with ops.name_scope(self.name):
with ops.op_scope([self._mu, x] + self._cov.inputs, name):
x = ops.convert_to_tensor(x)
contrib_tensor_util.assert_same_float_dtype((self._mu, x))
# _check_mu asserts that self.mu has same batch shape as self.cov.
# so batch shape of self.mu = that of self._cov and self, and the
# batch shape of x_centered is a broadcast version of these. If this
# broadcast results in a shape like
# [M1,...,Mm] + self.batch_shape + self.event_shape
# OR
# self.batch_shape + self.event_shape
# then subsequent operator calls are guaranteed to work.
x_centered = x - self.mu
# Compute the term x^{-1} sigma^{-1} x which appears in the exponent of
# the pdf.
x_whitened_norm = self._cov.inv_quadratic_form_on_vectors(x_centered)
log_sigma_det = self.log_sigma_det()
log_two_pi = constant_op.constant(
math.log(2 * math.pi), dtype=self.dtype)
k = math_ops.cast(self._cov.vector_space_dimension(), self.dtype)
log_prob_value = -(log_sigma_det + k * log_two_pi + x_whitened_norm) / 2
output_static_shape = x_centered.get_shape()[:-1]
log_prob_value.set_shape(output_static_shape)
return log_prob_value
def prob(self, x, name="prob"):
"""The PDF of observations `x` under these Multivariate Normals.
`x` is a batch vector with compatible shape if `x` is a `Tensor` whose
shape can be broadcast up to either:
````
self.batch_shape + self.event_shape
OR
[M1,...,Mm] + self.batch_shape + self.event_shape
```
Args:
x: Compatible batch vector with same `dtype` as this distribution.
name: The name to give this op.
Returns:
prob: tensor of dtype `dtype`, the prob values of `x`.
"""
with ops.name_scope(self.name):
with ops.op_scope([self._mu, x] + self._cov.inputs, name):
return math_ops.exp(self.log_prob(x))
def entropy(self, name="entropy"):
"""The entropies of these Multivariate Normals.
Args:
name: The name to give this op.
Returns:
entropy: tensor of dtype `dtype`, the entropies.
"""
with ops.name_scope(self.name):
with ops.op_scope([self._mu] + self._cov.inputs, name):
log_sigma_det = self.log_sigma_det()
one_plus_log_two_pi = constant_op.constant(1 + math.log(2 * math.pi),
dtype=self.dtype)
# Use broadcasting rules to calculate the full broadcast sigma.
k = math_ops.cast(self._cov.vector_space_dimension(), dtype=self.dtype)
entropy_value = (k * one_plus_log_two_pi + log_sigma_det) / 2
entropy_value.set_shape(log_sigma_det.get_shape())
return entropy_value
def sample_n(self, n, seed=None, name="sample_n"):
"""Sample `n` observations from the Multivariate Normal Distributions.
Args:
n: `Scalar`, type int32, the number of observations to sample.
seed: Python integer, the random seed.
name: The name to give this op.
Returns:
samples: `[n, ...]`, a `Tensor` of `n` samples for each
of the distributions determined by broadcasting the hyperparameters.
"""
with ops.name_scope(self.name):
with ops.op_scope([self._mu, n] + self._cov.inputs, name):
# Recall _check_mu ensures mu and self._cov have same batch shape.
broadcast_shape = self.mu.get_shape()
n = ops.convert_to_tensor(n)
shape = array_ops.concat(0, [self._cov.vector_shape(), [n]])
white_samples = random_ops.random_normal(shape=shape,
mean=0,
stddev=1,
dtype=self.dtype,
seed=seed)
correlated_samples = self._cov.sqrt_matmul(white_samples)
# Move the last dimension to the front
perm = array_ops.concat(0, (
array_ops.pack([array_ops.rank(correlated_samples) - 1]),
math_ops.range(0, array_ops.rank(correlated_samples) - 1)))
# TODO(ebrevdo): Once we get a proper tensor contraction op,
# perform the inner product using that instead of batch_matmul
# and this slow transpose can go away!
correlated_samples = array_ops.transpose(correlated_samples, perm)
samples = correlated_samples + self.mu
# Provide some hints to shape inference
n_val = tensor_util.constant_value(n)
final_shape = tensor_shape.vector(n_val).concatenate(broadcast_shape)
samples.set_shape(final_shape)
return samples
@property
def is_reparameterized(self):
return True
@property
def name(self):
return self._name
@property
def is_continuous(self):
return True
class MultivariateNormalDiag(MultivariateNormalOperatorPD):
"""The multivariate normal distribution on `R^k`.
This distribution is defined by a 1-D mean `mu` and a 1-D diagonal
`diag_stdev`, representing the standard deviations. This distribution
assumes the random variables, `(X_1,...,X_k)` are independent, thus no
non-diagonal terms of the covariance matrix are needed.
This allows for `O(k)` pdf evaluation, sampling, and storage.
#### Mathematical details
The PDF of this distribution is defined in terms of the diagonal covariance
determined by `diag_stdev`: `C_{ii} = diag_stdev[i]**2`.
```
f(x) = (2 pi)^(-k/2) |det(C)|^(-1/2) exp(-1/2 (x - mu)^T C^{-1} (x - mu))
```
#### Examples
A single multi-variate Gaussian distribution is defined by a vector of means
of length `k`, and the square roots of the (independent) random variables.
Extra leading dimensions, if provided, allow for batches.
```python
# Initialize a single 3-variate Gaussian with diagonal standard deviation.
mu = [1, 2, 3.]
diag_stdev = [4, 5, 6.]
dist = tf.contrib.distributions.MultivariateNormalDiag(mu, diag_stdev)
# Evaluate this on an observation in R^3, returning a scalar.
dist.pdf([-1, 0, 1])
# Initialize a batch of two 3-variate Gaussians.
mu = [[1, 2, 3], [11, 22, 33]] # shape 2 x 3
diag_stdev = ... # shape 2 x 3, positive.
dist = tf.contrib.distributions.MultivariateNormalDiag(mu, diag_stdev)
# Evaluate this on a two observations, each in R^3, returning a length two
# tensor.
x = [[-1, 0, 1], [-11, 0, 11]] # Shape 2 x 3.
dist.pdf(x)
```
"""
def __init__(
self,
mu,
diag_stdev,
validate_args=True,
allow_nan_stats=False,
name="MultivariateNormalDiag"):
"""Multivariate Normal distributions on `R^k`.
User must provide means `mu` and standard deviations `diag_stdev`.
Each batch member represents a random vector `(X_1,...,X_k)` of independent
random normals.
The mean of `X_i` is `mu[i]`, and the standard deviation is `diag_stdev[i]`.
Args:
mu: Rank `N + 1` `float` or `double` tensor with shape `[N1,...,Nb, k]`,
`b >= 0`.
diag_stdev: Rank `N + 1` `Tensor` with same `dtype` and shape as `mu`,
representing the standard deviations. Must be positive.
validate_args: Whether to validate input with asserts. If `validate_args`
is `False`,
and the inputs are invalid, correct behavior is not guaranteed.
allow_nan_stats: `Boolean`, default `False`. If `False`, raise an
exception if a statistic (e.g. mean/mode/etc...) is undefined for any
batch member If `True`, batch members with valid parameters leading to
undefined statistics will return NaN for this statistic.
name: The name to give Ops created by the initializer.
Raises:
TypeError: If `mu` and `diag_stdev` are different dtypes.
"""
cov = operator_pd_diag.OperatorPDSqrtDiag(
diag_stdev, verify_pd=validate_args)
super(MultivariateNormalDiag, self).__init__(
mu, cov, allow_nan_stats=allow_nan_stats, validate_args=validate_args,
name=name)
class MultivariateNormalDiagPlusVDVT(MultivariateNormalOperatorPD):
"""The multivariate normal distribution on `R^k`.
Every batch member of this distribution is defined by a mean and a lightweight
covariance matrix `C`.
#### Mathematical details
The PDF of this distribution in terms of the mean `mu` and covariance `C` is:
```
f(x) = (2 pi)^(-k/2) |det(C)|^(-1/2) exp(-1/2 (x - mu)^T C^{-1} (x - mu))
```
For every batch member, this distribution represents `k` random variables
`(X_1,...,X_k)`, with mean `E[X_i] = mu[i]`, and covariance matrix
`C_{ij} := E[(X_i - mu[i])(X_j - mu[j])]`
The user initializes this class by providing the mean `mu`, and a lightweight
definition of `C`:
```
C = SS^T = SS = (M + V D V^T) (M + V D V^T)
M is diagonal (k x k)
V = is shape (k x r), typically r << k
D = is diagonal (r x r), optional (defaults to identity).
```
This allows for `O(kr + r^3)` pdf evaluation and determinant, and `O(kr)`
sampling and storage (per batch member).
#### Examples
A single multi-variate Gaussian distribution is defined by a vector of means
of length `k`, and square root of the covariance `S = M + V D V^T`. Extra
leading dimensions, if provided, allow for batches.
```python
# Initialize a single 3-variate Gaussian with covariance square root
# S = M + V D V^T, where V D V^T is a matrix-rank 2 update.
mu = [1, 2, 3.]
diag_large = [1.1, 2.2, 3.3]
v = ... # shape 3 x 2
diag_small = [4., 5.]
dist = tf.contrib.distributions.MultivariateNormalDiagPlusVDVT(
mu, diag_large, v, diag_small=diag_small)
# Evaluate this on an observation in R^3, returning a scalar.
dist.pdf([-1, 0, 1])
# Initialize a batch of two 3-variate Gaussians. This time, don't provide
# diag_small. This means S = M + V V^T.
mu = [[1, 2, 3], [11, 22, 33]] # shape 2 x 3
diag_large = ... # shape 2 x 3
v = ... # shape 2 x 3 x 1, a matrix-rank 1 update.
dist = tf.contrib.distributions.MultivariateNormalDiagPlusVDVT(
mu, diag_large, v)
# Evaluate this on a two observations, each in R^3, returning a length two
# tensor.
x = [[-1, 0, 1], [-11, 0, 11]] # Shape 2 x 3.
dist.pdf(x)
```
"""
def __init__(
self,
mu,
diag_large,
v,
diag_small=None,
validate_args=True,
allow_nan_stats=False,
name="MultivariateNormalDiagPlusVDVT"):
"""Multivariate Normal distributions on `R^k`.
For every batch member, this distribution represents `k` random variables
`(X_1,...,X_k)`, with mean `E[X_i] = mu[i]`, and covariance matrix
`C_{ij} := E[(X_i - mu[i])(X_j - mu[j])]`
The user initializes this class by providing the mean `mu`, and a
lightweight definition of `C`:
```
C = SS^T = SS = (M + V D V^T) (M + V D V^T)
M is diagonal (k x k)
V = is shape (k x r), typically r << k
D = is diagonal (r x r), optional (defaults to identity).
```
Args:
mu: Rank `n + 1` `float` or `double` tensor with shape `[N1,...,Nn, k]`,
`n >= 0`. The means.
diag_large: Optional rank `n + 1` `float` or `double` tensor, shape
`[N1,...,Nn, k]` `n >= 0`. Defines the diagonal matrix `M`.
v: Rank `n + 1` `float` or `double` tensor, shape `[N1,...,Nn, k, r]`
`n >= 0`. Defines the matrix `V`.
diag_small: Rank `n + 1` `float` or `double` tensor, shape
`[N1,...,Nn, k]` `n >= 0`. Defines the diagonal matrix `D`. Default
is `None`, which means `D` will be the identity matrix.
validate_args: Whether to validate input with asserts. If `validate_args`
is `False`,
and the inputs are invalid, correct behavior is not guaranteed.
allow_nan_stats: `Boolean`, default `False`. If `False`, raise an
exception if a statistic (e.g. mean/mode/etc...) is undefined for any
batch member If `True`, batch members with valid parameters leading to
undefined statistics will return NaN for this statistic.
name: The name to give Ops created by the initializer.
"""
m = operator_pd_diag.OperatorPDDiag(diag_large, verify_pd=validate_args)
cov = operator_pd_vdvt_update.OperatorPDSqrtVDVTUpdate(
m, v, diag=diag_small, verify_pd=validate_args,
verify_shapes=validate_args)
super(MultivariateNormalDiagPlusVDVT, self).__init__(
mu, cov, allow_nan_stats=allow_nan_stats, validate_args=validate_args,
name=name)
class MultivariateNormalCholesky(MultivariateNormalOperatorPD):
"""The multivariate normal distribution on `R^k`.
This distribution is defined by a 1-D mean `mu` and a Cholesky factor `chol`.
Providing the Cholesky factor allows for `O(k^2)` pdf evaluation and sampling,
and requires `O(k^2)` storage.
#### Mathematical details
The Cholesky factor `chol` defines the covariance matrix: `C = chol chol^T`.
The PDF of this distribution is then:
```
f(x) = (2 pi)^(-k/2) |det(C)|^(-1/2) exp(-1/2 (x - mu)^T C^{-1} (x - mu))
```
#### Examples
A single multi-variate Gaussian distribution is defined by a vector of means
of length `k`, and a covariance matrix of shape `k x k`.
Extra leading dimensions, if provided, allow for batches.
```python
# Initialize a single 3-variate Gaussian with diagonal covariance.
# Note, this would be more efficient with MultivariateNormalDiag.
mu = [1, 2, 3.]
chol = [[1, 0, 0], [0, 3, 0], [0, 0, 2]]
dist = tf.contrib.distributions.MultivariateNormalCholesky(mu, chol)
# Evaluate this on an observation in R^3, returning a scalar.
dist.pdf([-1, 0, 1])
# Initialize a batch of two 3-variate Gaussians.
mu = [[1, 2, 3], [11, 22, 33]]
chol = ... # shape 2 x 3 x 3, lower triangular, positive diagonal.
dist = tf.contrib.distributions.MultivariateNormalCholesky(mu, chol)
# Evaluate this on a two observations, each in R^3, returning a length two
# tensor.
x = [[-1, 0, 1], [-11, 0, 11]] # Shape 2 x 3.
dist.pdf(x)
```
Trainable (batch) Choesky matrices can be created with
`tf.contrib.distributions.batch_matrix_diag_transform()`
"""
def __init__(self,
mu,
chol,
validate_args=True,
allow_nan_stats=False,
name="MultivariateNormalCholesky"):
"""Multivariate Normal distributions on `R^k`.
User must provide means `mu` and `chol` which holds the (batch) Cholesky
factors, such that the covariance of each batch member is `chol chol^T`.
Args:
mu: `(N+1)-D` `float` or `double` tensor with shape `[N1,...,Nb, k]`,
`b >= 0`.
chol: `(N+2)-D` `Tensor` with same `dtype` as `mu` and shape
`[N1,...,Nb, k, k]`. The upper triangular part is ignored (treated as
though it is zero), and the diagonal must be positive.
validate_args: Whether to validate input with asserts. If `validate_args`
is `False`, and the inputs are invalid, correct behavior is not
guaranteed.
allow_nan_stats: `Boolean`, default `False`. If `False`, raise an
exception if a statistic (e.g. mean/mode/etc...) is undefined for any
batch member If `True`, batch members with valid parameters leading to
undefined statistics will return NaN for this statistic.
name: The name to give Ops created by the initializer.
Raises:
TypeError: If `mu` and `chol` are different dtypes.
"""
cov = operator_pd_cholesky.OperatorPDCholesky(chol, verify_pd=validate_args)
super(MultivariateNormalCholesky, self).__init__(
mu,
cov,
allow_nan_stats=allow_nan_stats,
validate_args=validate_args,
name=name)
class MultivariateNormalFull(MultivariateNormalOperatorPD):
"""The multivariate normal distribution on `R^k`.
This distribution is defined by a 1-D mean `mu` and covariance matrix `sigma`.
Evaluation of the pdf, determinant, and sampling are all `O(k^3)` operations.
#### Mathematical details
With `C = sigma`, the PDF of this distribution is:
```
f(x) = (2 pi)^(-k/2) |det(C)|^(-1/2) exp(-1/2 (x - mu)^T C^{-1} (x - mu))
```
#### Examples
A single multi-variate Gaussian distribution is defined by a vector of means
of length `k`, and a covariance matrix of shape `k x k`.
Extra leading dimensions, if provided, allow for batches.
```python
# Initialize a single 3-variate Gaussian with diagonal covariance.
mu = [1, 2, 3.]
sigma = [[1, 0, 0], [0, 3, 0], [0, 0, 2.]]
dist = tf.contrib.distributions.MultivariateNormalFull(mu, chol)
# Evaluate this on an observation in R^3, returning a scalar.
dist.pdf([-1, 0, 1])
# Initialize a batch of two 3-variate Gaussians.
mu = [[1, 2, 3], [11, 22, 33.]]
sigma = ... # shape 2 x 3 x 3, positive definite.
dist = tf.contrib.distributions.MultivariateNormalFull(mu, sigma)
# Evaluate this on a two observations, each in R^3, returning a length two
# tensor.
x = [[-1, 0, 1], [-11, 0, 11.]] # Shape 2 x 3.
dist.pdf(x)
```
"""
def __init__(self,
mu,
sigma,
validate_args=True,
allow_nan_stats=False,
name="MultivariateNormalFull"):
"""Multivariate Normal distributions on `R^k`.
User must provide means `mu` and `sigma`, the mean and covariance.
Args:
mu: `(N+1)-D` `float` or `double` tensor with shape `[N1,...,Nb, k]`,
`b >= 0`.
sigma: `(N+2)-D` `Tensor` with same `dtype` as `mu` and shape
`[N1,...,Nb, k, k]`. Each batch member must be positive definite.
validate_args: Whether to validate input with asserts. If `validate_args`
is `False`, and the inputs are invalid, correct behavior is not
guaranteed.
allow_nan_stats: `Boolean`, default `False`. If `False`, raise an
exception if a statistic (e.g. mean/mode/etc...) is undefined for any
batch member If `True`, batch members with valid parameters leading to
undefined statistics will return NaN for this statistic.
name: The name to give Ops created by the initializer.
Raises:
TypeError: If `mu` and `sigma` are different dtypes.
"""
cov = operator_pd_full.OperatorPDFull(sigma, verify_pd=validate_args)
super(MultivariateNormalFull, self).__init__(
mu,
cov,
allow_nan_stats=allow_nan_stats,
validate_args=validate_args,
name=name)
| # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Multivariate Normal distribution classes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from tensorflow.contrib.distributions.python.ops import distribution
from tensorflow.contrib.distributions.python.ops import operator_pd_cholesky
from tensorflow.contrib.distributions.python.ops import operator_pd_diag
from tensorflow.contrib.distributions.python.ops import operator_pd_full
from tensorflow.contrib.distributions.python.ops import operator_pd_vdvt_update
from tensorflow.contrib.framework.python.framework import tensor_util as contrib_tensor_util
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
__all__ = [
"MultivariateNormalDiag",
"MultivariateNormalCholesky",
"MultivariateNormalFull",
"MultivariateNormalDiagPlusVDVT",
]
class MultivariateNormalOperatorPD(distribution.Distribution):
"""The multivariate normal distribution on `R^k`.
This distribution is defined by a 1-D mean `mu` and an instance of
`OperatorPDBase`, which provides access to a symmetric positive definite
operator, which defines the covariance.
#### Mathematical details
With `C` the covariance matrix represented by the operator, the PDF of this
distribution is:
```
f(x) = (2 pi)^(-k/2) |det(C)|^(-1/2) exp(-1/2 (x - mu)^T C^{-1} (x - mu))
```
#### Examples
A single multi-variate Gaussian distribution is defined by a vector of means
of length `k`, and a covariance matrix of shape `k x k`.
Extra leading dimensions, if provided, allow for batches.
```python
# Initialize a single 3-variate Gaussian.
mu = [1, 2, 3]
chol = [[1, 0, 0.], [1, 3, 0], [1, 2, 3]]
cov = tf.contrib.distributions.OperatorPDCholesky(chol)
dist = tf.contrib.distributions.MultivariateNormalOperatorPD(mu, cov)
# Evaluate this on an observation in R^3, returning a scalar.
dist.pdf([-1, 0, 1.])
# Initialize a batch of two 3-variate Gaussians.
mu = [[1, 2, 3], [11, 22, 33.]]
chol = ... # shape 2 x 3 x 3, lower triangular, positive diagonal.
cov = tf.contrib.distributions.OperatorPDCholesky(chol)
dist = tf.contrib.distributions.MultivariateNormalOperatorPD(mu, cov)
# Evaluate this on a two observations, each in R^3, returning a length two
# tensor.
x = [[-1, 0, 1], [-11, 0, 11.]] # Shape 2 x 3.
dist.pdf(x)
```
"""
def __init__(self,
mu,
cov,
validate_args=True,
allow_nan_stats=False,
name="MultivariateNormalCov"):
"""Multivariate Normal distributions on `R^k`.
User must provide means `mu`, and an instance of `OperatorPDBase`, `cov`,
which determines the covariance.
Args:
mu: `float` or `double` tensor with shape `[N1,...,Nb, k]`, `b >= 0`.
cov: `float` or `double` instance of `OperatorPDBase` with same `dtype`
as `mu` and shape `[N1,...,Nb, k, k]`.
validate_args: Whether to validate input with asserts. If `validate_args`
is `False`, and the inputs are invalid, correct behavior is not
guaranteed.
allow_nan_stats: `Boolean`, default `False`. If `False`, raise an
exception if a statistic (e.g. mean/mode/etc...) is undefined for any
batch member If `True`, batch members with valid parameters leading to
undefined statistics will return NaN for this statistic.
name: The name to give Ops created by the initializer.
Raises:
TypeError: If `mu` and `cov` are different dtypes.
"""
self._allow_nan_stats = allow_nan_stats
self._validate_args = validate_args
with ops.name_scope(name):
with ops.op_scope([mu] + cov.inputs, "init"):
self._cov = cov
self._mu = self._check_mu(mu)
self._name = name
def _check_mu(self, mu):
"""Return `mu` after validity checks and possibly with assertations."""
mu = ops.convert_to_tensor(mu)
cov = self._cov
if mu.dtype != cov.dtype:
raise TypeError(
"mu and cov must have the same dtype. Found mu.dtype = %s, "
"cov.dtype = %s"
% (mu.dtype, cov.dtype))
# Try to validate with static checks.
mu_shape = mu.get_shape()
cov_shape = cov.get_shape()
if mu_shape.is_fully_defined() and cov_shape.is_fully_defined():
if mu_shape != cov_shape[:-1]:
raise ValueError(
"mu.shape and cov.shape[:-1] should match. Found: mu.shape=%s, "
"cov.shape=%s" % (mu_shape, cov_shape))
else:
return mu
# Static checks could not be run, so possibly do dyamic checks.
if not self.validate_args:
return mu
else:
assert_same_rank = check_ops.assert_equal(
array_ops.rank(mu) + 1,
cov.rank(),
data=["mu should have rank 1 less than cov. Found: rank(mu) = ",
array_ops.rank(mu), " rank(cov) = ", cov.rank()],
)
with ops.control_dependencies([assert_same_rank]):
assert_same_shape = check_ops.assert_equal(
array_ops.shape(mu),
cov.vector_shape(),
data=["mu.shape and cov.shape[:-1] should match. "
"Found: shape(mu) = "
, array_ops.shape(mu), " shape(cov) = ", cov.shape()],
)
return control_flow_ops.with_dependencies([assert_same_shape], mu)
@property
def validate_args(self):
"""`Boolean` describing behavior on invalid input."""
return self._validate_args
@property
def allow_nan_stats(self):
"""`Boolean` describing behavior when stats are undefined."""
return self._allow_nan_stats
@property
def dtype(self):
return self._mu.dtype
def get_event_shape(self):
"""`TensorShape` available at graph construction time."""
# Recall _check_mu ensures mu and self._cov have same batch shape.
return self._cov.get_shape()[-1:]
def event_shape(self, name="event_shape"):
"""Shape of a sample from a single distribution as a 1-D int32 `Tensor`."""
# Recall _check_mu ensures mu and self._cov have same batch shape.
with ops.name_scope(self.name):
with ops.op_scope(self._cov.inputs, name):
return array_ops.pack([self._cov.vector_space_dimension()])
def batch_shape(self, name="batch_shape"):
"""Batch dimensions of this instance as a 1-D int32 `Tensor`."""
# Recall _check_mu ensures mu and self._cov have same batch shape.
with ops.name_scope(self.name):
with ops.op_scope(self._cov.inputs, name):
return self._cov.batch_shape()
def get_batch_shape(self):
"""`TensorShape` available at graph construction time."""
# Recall _check_mu ensures mu and self._cov have same batch shape.
return self._cov.get_batch_shape()
@property
def mu(self):
return self._mu
@property
def sigma(self):
"""Dense (batch) covariance matrix, if available."""
with ops.name_scope(self.name):
return self._cov.to_dense()
def mean(self, name="mean"):
"""Mean of each batch member."""
with ops.name_scope(self.name):
with ops.op_scope([self._mu], name):
return array_ops.identity(self._mu)
def mode(self, name="mode"):
"""Mode of each batch member."""
with ops.name_scope(self.name):
with ops.op_scope([self._mu], name):
return array_ops.identity(self._mu)
def variance(self, name="variance"):
"""Variance of each batch member."""
with ops.name_scope(self.name):
return self.sigma
def log_sigma_det(self, name="log_sigma_det"):
"""Log of determinant of covariance matrix."""
with ops.name_scope(self.name):
with ops.op_scope(self._cov.inputs, name):
return self._cov.log_det()
def sigma_det(self, name="sigma_det"):
"""Determinant of covariance matrix."""
with ops.name_scope(self.name):
with ops.op_scope(self._cov.inputs, name):
return math_ops.exp(self._cov.log_det())
def log_prob(self, x, name="log_prob"):
"""Log prob of observations `x` given these Multivariate Normals.
`x` is a batch vector with compatible shape if `x` is a `Tensor` whose
shape can be broadcast up to either:
````
self.batch_shape + self.event_shape
OR
[M1,...,Mm] + self.batch_shape + self.event_shape
```
Args:
x: Compatible batch vector with same `dtype` as this distribution.
name: The name to give this op.
Returns:
log_prob: tensor of dtype `dtype`, the log-PDFs of `x`.
"""
# Q: Why are shape requirements as stated above?
# A: The compatible shapes are precisely the ones that will broadcast to
# a shape compatible with self._cov.
# See Operator base class for notes about shapes compatible with self._cov.
with ops.name_scope(self.name):
with ops.op_scope([self._mu, x] + self._cov.inputs, name):
x = ops.convert_to_tensor(x)
contrib_tensor_util.assert_same_float_dtype((self._mu, x))
# _check_mu asserts that self.mu has same batch shape as self.cov.
# so batch shape of self.mu = that of self._cov and self, and the
# batch shape of x_centered is a broadcast version of these. If this
# broadcast results in a shape like
# [M1,...,Mm] + self.batch_shape + self.event_shape
# OR
# self.batch_shape + self.event_shape
# then subsequent operator calls are guaranteed to work.
x_centered = x - self.mu
# Compute the term x^{-1} sigma^{-1} x which appears in the exponent of
# the pdf.
x_whitened_norm = self._cov.inv_quadratic_form_on_vectors(x_centered)
log_sigma_det = self.log_sigma_det()
log_two_pi = constant_op.constant(
math.log(2 * math.pi), dtype=self.dtype)
k = math_ops.cast(self._cov.vector_space_dimension(), self.dtype)
log_prob_value = -(log_sigma_det + k * log_two_pi + x_whitened_norm) / 2
output_static_shape = x_centered.get_shape()[:-1]
log_prob_value.set_shape(output_static_shape)
return log_prob_value
def prob(self, x, name="prob"):
"""The PDF of observations `x` under these Multivariate Normals.
`x` is a batch vector with compatible shape if `x` is a `Tensor` whose
shape can be broadcast up to either:
````
self.batch_shape + self.event_shape
OR
[M1,...,Mm] + self.batch_shape + self.event_shape
```
Args:
x: Compatible batch vector with same `dtype` as this distribution.
name: The name to give this op.
Returns:
prob: tensor of dtype `dtype`, the prob values of `x`.
"""
with ops.name_scope(self.name):
with ops.op_scope([self._mu, x] + self._cov.inputs, name):
return math_ops.exp(self.log_prob(x))
def entropy(self, name="entropy"):
"""The entropies of these Multivariate Normals.
Args:
name: The name to give this op.
Returns:
entropy: tensor of dtype `dtype`, the entropies.
"""
with ops.name_scope(self.name):
with ops.op_scope([self._mu] + self._cov.inputs, name):
log_sigma_det = self.log_sigma_det()
one_plus_log_two_pi = constant_op.constant(1 + math.log(2 * math.pi),
dtype=self.dtype)
# Use broadcasting rules to calculate the full broadcast sigma.
k = math_ops.cast(self._cov.vector_space_dimension(), dtype=self.dtype)
entropy_value = (k * one_plus_log_two_pi + log_sigma_det) / 2
entropy_value.set_shape(log_sigma_det.get_shape())
return entropy_value
def sample_n(self, n, seed=None, name="sample_n"):
"""Sample `n` observations from the Multivariate Normal Distributions.
Args:
n: `Scalar`, type int32, the number of observations to sample.
seed: Python integer, the random seed.
name: The name to give this op.
Returns:
samples: `[n, ...]`, a `Tensor` of `n` samples for each
of the distributions determined by broadcasting the hyperparameters.
"""
with ops.name_scope(self.name):
with ops.op_scope([self._mu, n] + self._cov.inputs, name):
# Recall _check_mu ensures mu and self._cov have same batch shape.
broadcast_shape = self.mu.get_shape()
n = ops.convert_to_tensor(n)
shape = array_ops.concat(0, [self._cov.vector_shape(), [n]])
white_samples = random_ops.random_normal(shape=shape,
mean=0,
stddev=1,
dtype=self.dtype,
seed=seed)
correlated_samples = self._cov.sqrt_matmul(white_samples)
# Move the last dimension to the front
perm = array_ops.concat(0, (
array_ops.pack([array_ops.rank(correlated_samples) - 1]),
math_ops.range(0, array_ops.rank(correlated_samples) - 1)))
# TODO(ebrevdo): Once we get a proper tensor contraction op,
# perform the inner product using that instead of batch_matmul
# and this slow transpose can go away!
correlated_samples = array_ops.transpose(correlated_samples, perm)
samples = correlated_samples + self.mu
# Provide some hints to shape inference
n_val = tensor_util.constant_value(n)
final_shape = tensor_shape.vector(n_val).concatenate(broadcast_shape)
samples.set_shape(final_shape)
return samples
@property
def is_reparameterized(self):
return True
@property
def name(self):
return self._name
@property
def is_continuous(self):
return True
class MultivariateNormalDiag(MultivariateNormalOperatorPD):
"""The multivariate normal distribution on `R^k`.
This distribution is defined by a 1-D mean `mu` and a 1-D diagonal
`diag_stdev`, representing the standard deviations. This distribution
assumes the random variables, `(X_1,...,X_k)` are independent, thus no
non-diagonal terms of the covariance matrix are needed.
This allows for `O(k)` pdf evaluation, sampling, and storage.
#### Mathematical details
The PDF of this distribution is defined in terms of the diagonal covariance
determined by `diag_stdev`: `C_{ii} = diag_stdev[i]**2`.
```
f(x) = (2 pi)^(-k/2) |det(C)|^(-1/2) exp(-1/2 (x - mu)^T C^{-1} (x - mu))
```
#### Examples
A single multi-variate Gaussian distribution is defined by a vector of means
of length `k`, and the square roots of the (independent) random variables.
Extra leading dimensions, if provided, allow for batches.
```python
# Initialize a single 3-variate Gaussian with diagonal standard deviation.
mu = [1, 2, 3.]
diag_stdev = [4, 5, 6.]
dist = tf.contrib.distributions.MultivariateNormalDiag(mu, diag_stdev)
# Evaluate this on an observation in R^3, returning a scalar.
dist.pdf([-1, 0, 1])
# Initialize a batch of two 3-variate Gaussians.
mu = [[1, 2, 3], [11, 22, 33]] # shape 2 x 3
diag_stdev = ... # shape 2 x 3, positive.
dist = tf.contrib.distributions.MultivariateNormalDiag(mu, diag_stdev)
# Evaluate this on a two observations, each in R^3, returning a length two
# tensor.
x = [[-1, 0, 1], [-11, 0, 11]] # Shape 2 x 3.
dist.pdf(x)
```
"""
def __init__(
self,
mu,
diag_stdev,
validate_args=True,
allow_nan_stats=False,
name="MultivariateNormalDiag"):
"""Multivariate Normal distributions on `R^k`.
User must provide means `mu` and standard deviations `diag_stdev`.
Each batch member represents a random vector `(X_1,...,X_k)` of independent
random normals.
The mean of `X_i` is `mu[i]`, and the standard deviation is `diag_stdev[i]`.
Args:
mu: Rank `N + 1` `float` or `double` tensor with shape `[N1,...,Nb, k]`,
`b >= 0`.
diag_stdev: Rank `N + 1` `Tensor` with same `dtype` and shape as `mu`,
representing the standard deviations. Must be positive.
validate_args: Whether to validate input with asserts. If `validate_args`
is `False`,
and the inputs are invalid, correct behavior is not guaranteed.
allow_nan_stats: `Boolean`, default `False`. If `False`, raise an
exception if a statistic (e.g. mean/mode/etc...) is undefined for any
batch member If `True`, batch members with valid parameters leading to
undefined statistics will return NaN for this statistic.
name: The name to give Ops created by the initializer.
Raises:
TypeError: If `mu` and `diag_stdev` are different dtypes.
"""
cov = operator_pd_diag.OperatorPDSqrtDiag(
diag_stdev, verify_pd=validate_args)
super(MultivariateNormalDiag, self).__init__(
mu, cov, allow_nan_stats=allow_nan_stats, validate_args=validate_args,
name=name)
class MultivariateNormalDiagPlusVDVT(MultivariateNormalOperatorPD):
"""The multivariate normal distribution on `R^k`.
Every batch member of this distribution is defined by a mean and a lightweight
covariance matrix `C`.
#### Mathematical details
The PDF of this distribution in terms of the mean `mu` and covariance `C` is:
```
f(x) = (2 pi)^(-k/2) |det(C)|^(-1/2) exp(-1/2 (x - mu)^T C^{-1} (x - mu))
```
For every batch member, this distribution represents `k` random variables
`(X_1,...,X_k)`, with mean `E[X_i] = mu[i]`, and covariance matrix
`C_{ij} := E[(X_i - mu[i])(X_j - mu[j])]`
The user initializes this class by providing the mean `mu`, and a lightweight
definition of `C`:
```
C = SS^T = SS = (M + V D V^T) (M + V D V^T)
M is diagonal (k x k)
V = is shape (k x r), typically r << k
D = is diagonal (r x r), optional (defaults to identity).
```
This allows for `O(kr + r^3)` pdf evaluation and determinant, and `O(kr)`
sampling and storage (per batch member).
#### Examples
A single multi-variate Gaussian distribution is defined by a vector of means
of length `k`, and square root of the covariance `S = M + V D V^T`. Extra
leading dimensions, if provided, allow for batches.
```python
# Initialize a single 3-variate Gaussian with covariance square root
# S = M + V D V^T, where V D V^T is a matrix-rank 2 update.
mu = [1, 2, 3.]
diag_large = [1.1, 2.2, 3.3]
v = ... # shape 3 x 2
diag_small = [4., 5.]
dist = tf.contrib.distributions.MultivariateNormalDiagPlusVDVT(
mu, diag_large, v, diag_small=diag_small)
# Evaluate this on an observation in R^3, returning a scalar.
dist.pdf([-1, 0, 1])
# Initialize a batch of two 3-variate Gaussians. This time, don't provide
# diag_small. This means S = M + V V^T.
mu = [[1, 2, 3], [11, 22, 33]] # shape 2 x 3
diag_large = ... # shape 2 x 3
v = ... # shape 2 x 3 x 1, a matrix-rank 1 update.
dist = tf.contrib.distributions.MultivariateNormalDiagPlusVDVT(
mu, diag_large, v)
# Evaluate this on a two observations, each in R^3, returning a length two
# tensor.
x = [[-1, 0, 1], [-11, 0, 11]] # Shape 2 x 3.
dist.pdf(x)
```
"""
def __init__(
self,
mu,
diag_large,
v,
diag_small=None,
validate_args=True,
allow_nan_stats=False,
name="MultivariateNormalDiagPlusVDVT"):
"""Multivariate Normal distributions on `R^k`.
For every batch member, this distribution represents `k` random variables
`(X_1,...,X_k)`, with mean `E[X_i] = mu[i]`, and covariance matrix
`C_{ij} := E[(X_i - mu[i])(X_j - mu[j])]`
The user initializes this class by providing the mean `mu`, and a
lightweight definition of `C`:
```
C = SS^T = SS = (M + V D V^T) (M + V D V^T)
M is diagonal (k x k)
V = is shape (k x r), typically r << k
D = is diagonal (r x r), optional (defaults to identity).
```
Args:
mu: Rank `n + 1` `float` or `double` tensor with shape `[N1,...,Nn, k]`,
`n >= 0`. The means.
diag_large: Optional rank `n + 1` `float` or `double` tensor, shape
`[N1,...,Nn, k]` `n >= 0`. Defines the diagonal matrix `M`.
v: Rank `n + 1` `float` or `double` tensor, shape `[N1,...,Nn, k, r]`
`n >= 0`. Defines the matrix `V`.
diag_small: Rank `n + 1` `float` or `double` tensor, shape
`[N1,...,Nn, k]` `n >= 0`. Defines the diagonal matrix `D`. Default
is `None`, which means `D` will be the identity matrix.
validate_args: Whether to validate input with asserts. If `validate_args`
is `False`,
and the inputs are invalid, correct behavior is not guaranteed.
allow_nan_stats: `Boolean`, default `False`. If `False`, raise an
exception if a statistic (e.g. mean/mode/etc...) is undefined for any
batch member If `True`, batch members with valid parameters leading to
undefined statistics will return NaN for this statistic.
name: The name to give Ops created by the initializer.
"""
m = operator_pd_diag.OperatorPDDiag(diag_large, verify_pd=validate_args)
cov = operator_pd_vdvt_update.OperatorPDSqrtVDVTUpdate(
m, v, diag=diag_small, verify_pd=validate_args,
verify_shapes=validate_args)
super(MultivariateNormalDiagPlusVDVT, self).__init__(
mu, cov, allow_nan_stats=allow_nan_stats, validate_args=validate_args,
name=name)
class MultivariateNormalCholesky(MultivariateNormalOperatorPD):
"""The multivariate normal distribution on `R^k`.
This distribution is defined by a 1-D mean `mu` and a Cholesky factor `chol`.
Providing the Cholesky factor allows for `O(k^2)` pdf evaluation and sampling,
and requires `O(k^2)` storage.
#### Mathematical details
The Cholesky factor `chol` defines the covariance matrix: `C = chol chol^T`.
The PDF of this distribution is then:
```
f(x) = (2 pi)^(-k/2) |det(C)|^(-1/2) exp(-1/2 (x - mu)^T C^{-1} (x - mu))
```
#### Examples
A single multi-variate Gaussian distribution is defined by a vector of means
of length `k`, and a covariance matrix of shape `k x k`.
Extra leading dimensions, if provided, allow for batches.
```python
# Initialize a single 3-variate Gaussian with diagonal covariance.
# Note, this would be more efficient with MultivariateNormalDiag.
mu = [1, 2, 3.]
chol = [[1, 0, 0], [0, 3, 0], [0, 0, 2]]
dist = tf.contrib.distributions.MultivariateNormalCholesky(mu, chol)
# Evaluate this on an observation in R^3, returning a scalar.
dist.pdf([-1, 0, 1])
# Initialize a batch of two 3-variate Gaussians.
mu = [[1, 2, 3], [11, 22, 33]]
chol = ... # shape 2 x 3 x 3, lower triangular, positive diagonal.
dist = tf.contrib.distributions.MultivariateNormalCholesky(mu, chol)
# Evaluate this on a two observations, each in R^3, returning a length two
# tensor.
x = [[-1, 0, 1], [-11, 0, 11]] # Shape 2 x 3.
dist.pdf(x)
```
Trainable (batch) Choesky matrices can be created with
`tf.contrib.distributions.batch_matrix_diag_transform()`
"""
def __init__(self,
mu,
chol,
validate_args=True,
allow_nan_stats=False,
name="MultivariateNormalCholesky"):
"""Multivariate Normal distributions on `R^k`.
User must provide means `mu` and `chol` which holds the (batch) Cholesky
factors, such that the covariance of each batch member is `chol chol^T`.
Args:
mu: `(N+1)-D` `float` or `double` tensor with shape `[N1,...,Nb, k]`,
`b >= 0`.
chol: `(N+2)-D` `Tensor` with same `dtype` as `mu` and shape
`[N1,...,Nb, k, k]`. The upper triangular part is ignored (treated as
though it is zero), and the diagonal must be positive.
validate_args: Whether to validate input with asserts. If `validate_args`
is `False`, and the inputs are invalid, correct behavior is not
guaranteed.
allow_nan_stats: `Boolean`, default `False`. If `False`, raise an
exception if a statistic (e.g. mean/mode/etc...) is undefined for any
batch member If `True`, batch members with valid parameters leading to
undefined statistics will return NaN for this statistic.
name: The name to give Ops created by the initializer.
Raises:
TypeError: If `mu` and `chol` are different dtypes.
"""
cov = operator_pd_cholesky.OperatorPDCholesky(chol, verify_pd=validate_args)
super(MultivariateNormalCholesky, self).__init__(
mu,
cov,
allow_nan_stats=allow_nan_stats,
validate_args=validate_args,
name=name)
class MultivariateNormalFull(MultivariateNormalOperatorPD):
"""The multivariate normal distribution on `R^k`.
This distribution is defined by a 1-D mean `mu` and covariance matrix `sigma`.
Evaluation of the pdf, determinant, and sampling are all `O(k^3)` operations.
#### Mathematical details
With `C = sigma`, the PDF of this distribution is:
```
f(x) = (2 pi)^(-k/2) |det(C)|^(-1/2) exp(-1/2 (x - mu)^T C^{-1} (x - mu))
```
#### Examples
A single multi-variate Gaussian distribution is defined by a vector of means
of length `k`, and a covariance matrix of shape `k x k`.
Extra leading dimensions, if provided, allow for batches.
```python
# Initialize a single 3-variate Gaussian with diagonal covariance.
mu = [1, 2, 3.]
sigma = [[1, 0, 0], [0, 3, 0], [0, 0, 2.]]
dist = tf.contrib.distributions.MultivariateNormalFull(mu, chol)
# Evaluate this on an observation in R^3, returning a scalar.
dist.pdf([-1, 0, 1])
# Initialize a batch of two 3-variate Gaussians.
mu = [[1, 2, 3], [11, 22, 33.]]
sigma = ... # shape 2 x 3 x 3, positive definite.
dist = tf.contrib.distributions.MultivariateNormalFull(mu, sigma)
# Evaluate this on a two observations, each in R^3, returning a length two
# tensor.
x = [[-1, 0, 1], [-11, 0, 11.]] # Shape 2 x 3.
dist.pdf(x)
```
"""
def __init__(self,
mu,
sigma,
validate_args=True,
allow_nan_stats=False,
name="MultivariateNormalFull"):
"""Multivariate Normal distributions on `R^k`.
User must provide means `mu` and `sigma`, the mean and covariance.
Args:
mu: `(N+1)-D` `float` or `double` tensor with shape `[N1,...,Nb, k]`,
`b >= 0`.
sigma: `(N+2)-D` `Tensor` with same `dtype` as `mu` and shape
`[N1,...,Nb, k, k]`. Each batch member must be positive definite.
validate_args: Whether to validate input with asserts. If `validate_args`
is `False`, and the inputs are invalid, correct behavior is not
guaranteed.
allow_nan_stats: `Boolean`, default `False`. If `False`, raise an
exception if a statistic (e.g. mean/mode/etc...) is undefined for any
batch member If `True`, batch members with valid parameters leading to
undefined statistics will return NaN for this statistic.
name: The name to give Ops created by the initializer.
Raises:
TypeError: If `mu` and `sigma` are different dtypes.
"""
cov = operator_pd_full.OperatorPDFull(sigma, verify_pd=validate_args)
super(MultivariateNormalFull, self).__init__(
mu,
cov,
allow_nan_stats=allow_nan_stats,
validate_args=validate_args,
name=name)
| en | 0.778227 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== Multivariate Normal distribution classes. The multivariate normal distribution on `R^k`. This distribution is defined by a 1-D mean `mu` and an instance of `OperatorPDBase`, which provides access to a symmetric positive definite operator, which defines the covariance. #### Mathematical details With `C` the covariance matrix represented by the operator, the PDF of this distribution is: ``` f(x) = (2 pi)^(-k/2) |det(C)|^(-1/2) exp(-1/2 (x - mu)^T C^{-1} (x - mu)) ``` #### Examples A single multi-variate Gaussian distribution is defined by a vector of means of length `k`, and a covariance matrix of shape `k x k`. Extra leading dimensions, if provided, allow for batches. ```python # Initialize a single 3-variate Gaussian. mu = [1, 2, 3] chol = [[1, 0, 0.], [1, 3, 0], [1, 2, 3]] cov = tf.contrib.distributions.OperatorPDCholesky(chol) dist = tf.contrib.distributions.MultivariateNormalOperatorPD(mu, cov) # Evaluate this on an observation in R^3, returning a scalar. dist.pdf([-1, 0, 1.]) # Initialize a batch of two 3-variate Gaussians. mu = [[1, 2, 3], [11, 22, 33.]] chol = ... # shape 2 x 3 x 3, lower triangular, positive diagonal. cov = tf.contrib.distributions.OperatorPDCholesky(chol) dist = tf.contrib.distributions.MultivariateNormalOperatorPD(mu, cov) # Evaluate this on a two observations, each in R^3, returning a length two # tensor. x = [[-1, 0, 1], [-11, 0, 11.]] # Shape 2 x 3. dist.pdf(x) ``` Multivariate Normal distributions on `R^k`. User must provide means `mu`, and an instance of `OperatorPDBase`, `cov`, which determines the covariance. Args: mu: `float` or `double` tensor with shape `[N1,...,Nb, k]`, `b >= 0`. cov: `float` or `double` instance of `OperatorPDBase` with same `dtype` as `mu` and shape `[N1,...,Nb, k, k]`. validate_args: Whether to validate input with asserts. If `validate_args` is `False`, and the inputs are invalid, correct behavior is not guaranteed. allow_nan_stats: `Boolean`, default `False`. If `False`, raise an exception if a statistic (e.g. mean/mode/etc...) is undefined for any batch member If `True`, batch members with valid parameters leading to undefined statistics will return NaN for this statistic. name: The name to give Ops created by the initializer. Raises: TypeError: If `mu` and `cov` are different dtypes. Return `mu` after validity checks and possibly with assertations. # Try to validate with static checks. # Static checks could not be run, so possibly do dyamic checks. `Boolean` describing behavior on invalid input. `Boolean` describing behavior when stats are undefined. `TensorShape` available at graph construction time. # Recall _check_mu ensures mu and self._cov have same batch shape. Shape of a sample from a single distribution as a 1-D int32 `Tensor`. # Recall _check_mu ensures mu and self._cov have same batch shape. Batch dimensions of this instance as a 1-D int32 `Tensor`. # Recall _check_mu ensures mu and self._cov have same batch shape. `TensorShape` available at graph construction time. # Recall _check_mu ensures mu and self._cov have same batch shape. Dense (batch) covariance matrix, if available. Mean of each batch member. Mode of each batch member. Variance of each batch member. Log of determinant of covariance matrix. Determinant of covariance matrix. Log prob of observations `x` given these Multivariate Normals. `x` is a batch vector with compatible shape if `x` is a `Tensor` whose shape can be broadcast up to either: ```` self.batch_shape + self.event_shape OR [M1,...,Mm] + self.batch_shape + self.event_shape ``` Args: x: Compatible batch vector with same `dtype` as this distribution. name: The name to give this op. Returns: log_prob: tensor of dtype `dtype`, the log-PDFs of `x`. # Q: Why are shape requirements as stated above? # A: The compatible shapes are precisely the ones that will broadcast to # a shape compatible with self._cov. # See Operator base class for notes about shapes compatible with self._cov. # _check_mu asserts that self.mu has same batch shape as self.cov. # so batch shape of self.mu = that of self._cov and self, and the # batch shape of x_centered is a broadcast version of these. If this # broadcast results in a shape like # [M1,...,Mm] + self.batch_shape + self.event_shape # OR # self.batch_shape + self.event_shape # then subsequent operator calls are guaranteed to work. # Compute the term x^{-1} sigma^{-1} x which appears in the exponent of # the pdf. The PDF of observations `x` under these Multivariate Normals. `x` is a batch vector with compatible shape if `x` is a `Tensor` whose shape can be broadcast up to either: ```` self.batch_shape + self.event_shape OR [M1,...,Mm] + self.batch_shape + self.event_shape ``` Args: x: Compatible batch vector with same `dtype` as this distribution. name: The name to give this op. Returns: prob: tensor of dtype `dtype`, the prob values of `x`. The entropies of these Multivariate Normals. Args: name: The name to give this op. Returns: entropy: tensor of dtype `dtype`, the entropies. # Use broadcasting rules to calculate the full broadcast sigma. Sample `n` observations from the Multivariate Normal Distributions. Args: n: `Scalar`, type int32, the number of observations to sample. seed: Python integer, the random seed. name: The name to give this op. Returns: samples: `[n, ...]`, a `Tensor` of `n` samples for each of the distributions determined by broadcasting the hyperparameters. # Recall _check_mu ensures mu and self._cov have same batch shape. # Move the last dimension to the front # TODO(ebrevdo): Once we get a proper tensor contraction op, # perform the inner product using that instead of batch_matmul # and this slow transpose can go away! # Provide some hints to shape inference The multivariate normal distribution on `R^k`. This distribution is defined by a 1-D mean `mu` and a 1-D diagonal `diag_stdev`, representing the standard deviations. This distribution assumes the random variables, `(X_1,...,X_k)` are independent, thus no non-diagonal terms of the covariance matrix are needed. This allows for `O(k)` pdf evaluation, sampling, and storage. #### Mathematical details The PDF of this distribution is defined in terms of the diagonal covariance determined by `diag_stdev`: `C_{ii} = diag_stdev[i]**2`. ``` f(x) = (2 pi)^(-k/2) |det(C)|^(-1/2) exp(-1/2 (x - mu)^T C^{-1} (x - mu)) ``` #### Examples A single multi-variate Gaussian distribution is defined by a vector of means of length `k`, and the square roots of the (independent) random variables. Extra leading dimensions, if provided, allow for batches. ```python # Initialize a single 3-variate Gaussian with diagonal standard deviation. mu = [1, 2, 3.] diag_stdev = [4, 5, 6.] dist = tf.contrib.distributions.MultivariateNormalDiag(mu, diag_stdev) # Evaluate this on an observation in R^3, returning a scalar. dist.pdf([-1, 0, 1]) # Initialize a batch of two 3-variate Gaussians. mu = [[1, 2, 3], [11, 22, 33]] # shape 2 x 3 diag_stdev = ... # shape 2 x 3, positive. dist = tf.contrib.distributions.MultivariateNormalDiag(mu, diag_stdev) # Evaluate this on a two observations, each in R^3, returning a length two # tensor. x = [[-1, 0, 1], [-11, 0, 11]] # Shape 2 x 3. dist.pdf(x) ``` Multivariate Normal distributions on `R^k`. User must provide means `mu` and standard deviations `diag_stdev`. Each batch member represents a random vector `(X_1,...,X_k)` of independent random normals. The mean of `X_i` is `mu[i]`, and the standard deviation is `diag_stdev[i]`. Args: mu: Rank `N + 1` `float` or `double` tensor with shape `[N1,...,Nb, k]`, `b >= 0`. diag_stdev: Rank `N + 1` `Tensor` with same `dtype` and shape as `mu`, representing the standard deviations. Must be positive. validate_args: Whether to validate input with asserts. If `validate_args` is `False`, and the inputs are invalid, correct behavior is not guaranteed. allow_nan_stats: `Boolean`, default `False`. If `False`, raise an exception if a statistic (e.g. mean/mode/etc...) is undefined for any batch member If `True`, batch members with valid parameters leading to undefined statistics will return NaN for this statistic. name: The name to give Ops created by the initializer. Raises: TypeError: If `mu` and `diag_stdev` are different dtypes. The multivariate normal distribution on `R^k`. Every batch member of this distribution is defined by a mean and a lightweight covariance matrix `C`. #### Mathematical details The PDF of this distribution in terms of the mean `mu` and covariance `C` is: ``` f(x) = (2 pi)^(-k/2) |det(C)|^(-1/2) exp(-1/2 (x - mu)^T C^{-1} (x - mu)) ``` For every batch member, this distribution represents `k` random variables `(X_1,...,X_k)`, with mean `E[X_i] = mu[i]`, and covariance matrix `C_{ij} := E[(X_i - mu[i])(X_j - mu[j])]` The user initializes this class by providing the mean `mu`, and a lightweight definition of `C`: ``` C = SS^T = SS = (M + V D V^T) (M + V D V^T) M is diagonal (k x k) V = is shape (k x r), typically r << k D = is diagonal (r x r), optional (defaults to identity). ``` This allows for `O(kr + r^3)` pdf evaluation and determinant, and `O(kr)` sampling and storage (per batch member). #### Examples A single multi-variate Gaussian distribution is defined by a vector of means of length `k`, and square root of the covariance `S = M + V D V^T`. Extra leading dimensions, if provided, allow for batches. ```python # Initialize a single 3-variate Gaussian with covariance square root # S = M + V D V^T, where V D V^T is a matrix-rank 2 update. mu = [1, 2, 3.] diag_large = [1.1, 2.2, 3.3] v = ... # shape 3 x 2 diag_small = [4., 5.] dist = tf.contrib.distributions.MultivariateNormalDiagPlusVDVT( mu, diag_large, v, diag_small=diag_small) # Evaluate this on an observation in R^3, returning a scalar. dist.pdf([-1, 0, 1]) # Initialize a batch of two 3-variate Gaussians. This time, don't provide # diag_small. This means S = M + V V^T. mu = [[1, 2, 3], [11, 22, 33]] # shape 2 x 3 diag_large = ... # shape 2 x 3 v = ... # shape 2 x 3 x 1, a matrix-rank 1 update. dist = tf.contrib.distributions.MultivariateNormalDiagPlusVDVT( mu, diag_large, v) # Evaluate this on a two observations, each in R^3, returning a length two # tensor. x = [[-1, 0, 1], [-11, 0, 11]] # Shape 2 x 3. dist.pdf(x) ``` Multivariate Normal distributions on `R^k`. For every batch member, this distribution represents `k` random variables `(X_1,...,X_k)`, with mean `E[X_i] = mu[i]`, and covariance matrix `C_{ij} := E[(X_i - mu[i])(X_j - mu[j])]` The user initializes this class by providing the mean `mu`, and a lightweight definition of `C`: ``` C = SS^T = SS = (M + V D V^T) (M + V D V^T) M is diagonal (k x k) V = is shape (k x r), typically r << k D = is diagonal (r x r), optional (defaults to identity). ``` Args: mu: Rank `n + 1` `float` or `double` tensor with shape `[N1,...,Nn, k]`, `n >= 0`. The means. diag_large: Optional rank `n + 1` `float` or `double` tensor, shape `[N1,...,Nn, k]` `n >= 0`. Defines the diagonal matrix `M`. v: Rank `n + 1` `float` or `double` tensor, shape `[N1,...,Nn, k, r]` `n >= 0`. Defines the matrix `V`. diag_small: Rank `n + 1` `float` or `double` tensor, shape `[N1,...,Nn, k]` `n >= 0`. Defines the diagonal matrix `D`. Default is `None`, which means `D` will be the identity matrix. validate_args: Whether to validate input with asserts. If `validate_args` is `False`, and the inputs are invalid, correct behavior is not guaranteed. allow_nan_stats: `Boolean`, default `False`. If `False`, raise an exception if a statistic (e.g. mean/mode/etc...) is undefined for any batch member If `True`, batch members with valid parameters leading to undefined statistics will return NaN for this statistic. name: The name to give Ops created by the initializer. The multivariate normal distribution on `R^k`. This distribution is defined by a 1-D mean `mu` and a Cholesky factor `chol`. Providing the Cholesky factor allows for `O(k^2)` pdf evaluation and sampling, and requires `O(k^2)` storage. #### Mathematical details The Cholesky factor `chol` defines the covariance matrix: `C = chol chol^T`. The PDF of this distribution is then: ``` f(x) = (2 pi)^(-k/2) |det(C)|^(-1/2) exp(-1/2 (x - mu)^T C^{-1} (x - mu)) ``` #### Examples A single multi-variate Gaussian distribution is defined by a vector of means of length `k`, and a covariance matrix of shape `k x k`. Extra leading dimensions, if provided, allow for batches. ```python # Initialize a single 3-variate Gaussian with diagonal covariance. # Note, this would be more efficient with MultivariateNormalDiag. mu = [1, 2, 3.] chol = [[1, 0, 0], [0, 3, 0], [0, 0, 2]] dist = tf.contrib.distributions.MultivariateNormalCholesky(mu, chol) # Evaluate this on an observation in R^3, returning a scalar. dist.pdf([-1, 0, 1]) # Initialize a batch of two 3-variate Gaussians. mu = [[1, 2, 3], [11, 22, 33]] chol = ... # shape 2 x 3 x 3, lower triangular, positive diagonal. dist = tf.contrib.distributions.MultivariateNormalCholesky(mu, chol) # Evaluate this on a two observations, each in R^3, returning a length two # tensor. x = [[-1, 0, 1], [-11, 0, 11]] # Shape 2 x 3. dist.pdf(x) ``` Trainable (batch) Choesky matrices can be created with `tf.contrib.distributions.batch_matrix_diag_transform()` Multivariate Normal distributions on `R^k`. User must provide means `mu` and `chol` which holds the (batch) Cholesky factors, such that the covariance of each batch member is `chol chol^T`. Args: mu: `(N+1)-D` `float` or `double` tensor with shape `[N1,...,Nb, k]`, `b >= 0`. chol: `(N+2)-D` `Tensor` with same `dtype` as `mu` and shape `[N1,...,Nb, k, k]`. The upper triangular part is ignored (treated as though it is zero), and the diagonal must be positive. validate_args: Whether to validate input with asserts. If `validate_args` is `False`, and the inputs are invalid, correct behavior is not guaranteed. allow_nan_stats: `Boolean`, default `False`. If `False`, raise an exception if a statistic (e.g. mean/mode/etc...) is undefined for any batch member If `True`, batch members with valid parameters leading to undefined statistics will return NaN for this statistic. name: The name to give Ops created by the initializer. Raises: TypeError: If `mu` and `chol` are different dtypes. The multivariate normal distribution on `R^k`. This distribution is defined by a 1-D mean `mu` and covariance matrix `sigma`. Evaluation of the pdf, determinant, and sampling are all `O(k^3)` operations. #### Mathematical details With `C = sigma`, the PDF of this distribution is: ``` f(x) = (2 pi)^(-k/2) |det(C)|^(-1/2) exp(-1/2 (x - mu)^T C^{-1} (x - mu)) ``` #### Examples A single multi-variate Gaussian distribution is defined by a vector of means of length `k`, and a covariance matrix of shape `k x k`. Extra leading dimensions, if provided, allow for batches. ```python # Initialize a single 3-variate Gaussian with diagonal covariance. mu = [1, 2, 3.] sigma = [[1, 0, 0], [0, 3, 0], [0, 0, 2.]] dist = tf.contrib.distributions.MultivariateNormalFull(mu, chol) # Evaluate this on an observation in R^3, returning a scalar. dist.pdf([-1, 0, 1]) # Initialize a batch of two 3-variate Gaussians. mu = [[1, 2, 3], [11, 22, 33.]] sigma = ... # shape 2 x 3 x 3, positive definite. dist = tf.contrib.distributions.MultivariateNormalFull(mu, sigma) # Evaluate this on a two observations, each in R^3, returning a length two # tensor. x = [[-1, 0, 1], [-11, 0, 11.]] # Shape 2 x 3. dist.pdf(x) ``` Multivariate Normal distributions on `R^k`. User must provide means `mu` and `sigma`, the mean and covariance. Args: mu: `(N+1)-D` `float` or `double` tensor with shape `[N1,...,Nb, k]`, `b >= 0`. sigma: `(N+2)-D` `Tensor` with same `dtype` as `mu` and shape `[N1,...,Nb, k, k]`. Each batch member must be positive definite. validate_args: Whether to validate input with asserts. If `validate_args` is `False`, and the inputs are invalid, correct behavior is not guaranteed. allow_nan_stats: `Boolean`, default `False`. If `False`, raise an exception if a statistic (e.g. mean/mode/etc...) is undefined for any batch member If `True`, batch members with valid parameters leading to undefined statistics will return NaN for this statistic. name: The name to give Ops created by the initializer. Raises: TypeError: If `mu` and `sigma` are different dtypes. | 1.74776 | 2 |
write_to_file.py | arpanrec/bitwarden-export-migrate | 0 | 6632750 | <filename>write_to_file.py
import gnupg
import os
import tempfile
class EncryptAndWriteToFile:
def __init__(self, keyid) -> None:
self.keyid = keyid
dirpath = tempfile.mkdtemp()
self.gpg = gnupg.GPG(gnupghome=os.path.abspath(dirpath))
self.gpg.encoding = 'utf-8'
self.gpg.recv_keys('hkps://keys.openpgp.org', keyid)
self.gpg.trust_keys([keyid], 'TRUST_ULTIMATE')
def write(self, data, path):
encrypted_ascii_data = self.gpg.encrypt(data, self.keyid)
write_obj = WriteToFile()
write_obj.write(str(encrypted_ascii_data), f'{path}.asc')
class WriteToFile:
def write(self, data, path):
if isinstance(data, str):
mode = 'w'
elif isinstance(data, bytes):
mode = 'wb'
else:
raise Exception('Type Unable to Write %s' % type(data))
with open(path, mode) as file_attach:
file_attach.write(data)
| <filename>write_to_file.py
import gnupg
import os
import tempfile
class EncryptAndWriteToFile:
def __init__(self, keyid) -> None:
self.keyid = keyid
dirpath = tempfile.mkdtemp()
self.gpg = gnupg.GPG(gnupghome=os.path.abspath(dirpath))
self.gpg.encoding = 'utf-8'
self.gpg.recv_keys('hkps://keys.openpgp.org', keyid)
self.gpg.trust_keys([keyid], 'TRUST_ULTIMATE')
def write(self, data, path):
encrypted_ascii_data = self.gpg.encrypt(data, self.keyid)
write_obj = WriteToFile()
write_obj.write(str(encrypted_ascii_data), f'{path}.asc')
class WriteToFile:
def write(self, data, path):
if isinstance(data, str):
mode = 'w'
elif isinstance(data, bytes):
mode = 'wb'
else:
raise Exception('Type Unable to Write %s' % type(data))
with open(path, mode) as file_attach:
file_attach.write(data)
| none | 1 | 3.27645 | 3 |
Subsets and Splits