metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "jkulhanek/deep-rl-pytorch",
"score": 3
} |
#### File: deep_rl/common/storage_test.py
```python
import unittest
import numpy as np
from .storage import SequenceStorage as ExperienceReplay, SequenceSampler, BatchSequenceStorage, LambdaSampler, PlusOneSampler, merge_batches
class SequenceStorageTest(unittest.TestCase):
def assertNumpyArrayEqual(self, a1, a2, msg = 'Arrays must be equal'):
if not np.array_equal(a1, a2):
self.fail(msg=f"{a1} != {a2} : " + msg)
def testShouldStoreAll(self):
replay = ExperienceReplay(4, samplers = (SequenceSampler(2),))
replay.insert(1, 0, 0.0, False)
replay.insert(5, 0, 0.0, False)
replay.insert(2, 0, 0.0, False)
replay.insert(4, 0, 0.0, False)
replay.insert(6, 0, 0.0, False)
replay.insert(7, 0, 0.0, False)
self.assertEqual(replay[0][0], 2)
self.assertEqual(replay[1][0], 4)
self.assertEqual(replay[2][0], 6)
self.assertEqual(replay[3][0], 7)
def testNegativeIndex(self):
replay = ExperienceReplay(4, samplers = (SequenceSampler(2),))
replay.insert(1, 0, 0.0, False)
replay.insert(5, 0, 0.0, False)
replay.insert(2, 0, 0.0, False)
replay.insert(4, 0, 0.0, False)
replay.insert(6, 0, 0.0, False)
replay.insert(7, 0, 0.0, False)
self.assertEqual(replay[-4][0], 2)
self.assertEqual(replay[-3][0], 4)
self.assertEqual(replay[-2][0], 6)
self.assertEqual(replay[-1][0], 7)
def testLength(self):
replay = ExperienceReplay(4, samplers = (SequenceSampler(2),))
self.assertEqual(len(replay), 0)
replay.insert(1, 0, 0.0, False)
self.assertEqual(len(replay), 1)
replay.insert(2, 0, 0.0, False)
self.assertEqual(len(replay), 2)
replay.insert(4, 0, 0.0, False)
self.assertEqual(len(replay), 3)
replay.insert(6, 0, 0.0, False)
self.assertEqual(len(replay), 4)
replay.insert(7, 0, 0.0, False)
self.assertEqual(len(replay), 4)
def testSamplerStats(self):
replay = ExperienceReplay(4, samplers = (LambdaSampler(2, lambda _, get: get(-1)[0] % 2 == 0),))
replay.insert(1, 0, 0.0, False)
replay.insert(2, 0, 0.0, False)
replay.insert(4, 0, 0.0, False)
replay.insert(6, 0, 0.0, False)
replay.insert(7, 0, 0.0, False)
self.assertEqual(replay.selector_lengths[0], 2)
def testSamplerStatsRemove(self):
replay = ExperienceReplay(4, samplers = (LambdaSampler(2, lambda _, get: get(-1)[0] % 2 == 0),))
replay.insert(6, 0, 0.0, False)
self.assertNumpyArrayEqual(replay.selector_data[:, 0], [False, False, False, False])
replay.insert(2, 0, 0.0, False)
self.assertNumpyArrayEqual(replay.selector_data[:, 0], [False, True, False, False])
replay.insert(4, 0, 0.0, False)
self.assertNumpyArrayEqual(replay.selector_data[:, 0], [False, True, True, False])
replay.insert(6, 0, 0.0, False)
self.assertNumpyArrayEqual(replay.selector_data[:, 0], [False, True, True, True])
replay.insert(7, 0, 0.0, False)
self.assertEqual(replay.selector_lengths[0], 2)
self.assertNumpyArrayEqual(replay.selector_data[:, 0], [False, False, True, True])
def testSamplingWithEpisodeEnd(self):
import numpy
numpy.random.seed(1)
replay = ExperienceReplay(4, samplers = (LambdaSampler(2, lambda _, get: get(-1)[0] % 2 == 0),))
replay.insert(6, 0, 0.0, False)
replay.insert(2, 0, 0.0, True)
replay.insert(4, 0, 0.0, False)
replay.insert(6, 0, 0.0, True)
replay.insert(7, 0, 0.0, False)
wasSampled = set()
wasFirst = set()
for _ in range(100):
batch = replay.sample(0)
wasSampled.add(batch[0][1])
wasFirst.add(batch[0][0])
self.assertEqual(batch[0].shape[0], 2)
self.assertSetEqual(wasFirst, set([4]))
self.assertSetEqual(wasSampled, set([6]))
def testResampling(self):
import numpy
numpy.random.seed(1)
replay = ExperienceReplay(4, samplers = (LambdaSampler(2, lambda _, get: get(-1)[0] % 2 == 0),))
replay.insert(6, 0, 0.0, False)
replay.insert(2, 0, 0.0, False)
replay.insert(4, 0, 0.0, False)
replay.insert(6, 0, 0.0, False)
replay.insert(7, 0, 0.0, False)
toBeSampled = set([4, 6])
wasSampled = set()
wasFirst = set()
for _ in range(100):
batch = replay.sample(0)
wasSampled.add(batch[0][1])
wasFirst.add(batch[0][0])
self.assertEqual(batch[0].shape[0], 2)
self.assertEqual(len(toBeSampled - wasSampled), 0, 'something was not sampled')
self.assertEqual(len(wasSampled), len(toBeSampled), 'something was not supposed to be sampled')
self.assertSetEqual(wasFirst, set([2,4]))
def testPlusOneSampling(self):
import numpy
numpy.random.seed(1)
replay = ExperienceReplay(4, samplers = (PlusOneSampler(2),))
replay.insert(6, 0, 0.0, False)
replay.insert(2, 0, 0.0, True)
replay.insert(4, 0, 0.0, False)
replay.insert(6, 0, 0.0, True)
replay.insert(7, 0, 0.0, False)
wasSampled = set()
wasFirst = set()
for _ in range(100):
batch = replay.sample(0)
wasSampled.add(batch[0][-1])
wasFirst.add(batch[0][0])
self.assertEqual(batch[0].shape[0], 3)
self.assertSetEqual(wasFirst, set([4]))
self.assertSetEqual(wasSampled, set([7]))
def testPlusOneResampling(self):
import numpy
numpy.random.seed(1)
replay = ExperienceReplay(4, samplers = (PlusOneSampler(2),))
replay.insert(6, 0, 0.0, False)
replay.insert(2, 0, 0.0, True)
replay.insert(4, 0, 0.0, False)
replay.insert(6, 0, 0.0, False)
replay.insert(7, 0, 0.0, False)
wasSampled = set()
wasFirst = set()
for _ in range(100):
batch = replay.sample(0)
wasSampled.add(batch[0][-1])
wasFirst.add(batch[0][0])
self.assertEqual(batch[0].shape[0], 3)
self.assertSetEqual(wasFirst, set([4]))
self.assertSetEqual(wasSampled, set([7]))
def testPlusOneShortMemory(self):
import numpy
numpy.random.seed(1)
replay = ExperienceReplay(4, samplers = (PlusOneSampler(2),))
replay.insert(1, 0, 0.0, False)
replay.insert(2, 0, 0.0, True)
for _ in range(100):
batch = replay.sample(0)
self.assertIsNone(batch)
class BatchSequenceStorageTest(unittest.TestCase):
def testStore(self):
replay = BatchSequenceStorage(2, 4, samplers = [SequenceSampler(2)])
replay.insert(np.array([1,2]), np.array([1,1]), np.array([1.0, 1.0]), np.array([False, False]))
replay.insert(np.array([3,4]), np.array([1,1]), np.array([1.0, 1.0]), np.array([False, False]))
replay.insert(np.array([5,6]), np.array([1,1]), np.array([1.0, 1.0]), np.array([False, False]))
replay.insert(np.array([7,8]), np.array([1,1]), np.array([1.0, 1.0]), np.array([False, False]))
def testSampleShape(self):
replay = BatchSequenceStorage(2, 4, samplers = [SequenceSampler(2)])
replay.insert(np.array([1,2]), np.array([1,1]), np.array([1.0, 1.0]), np.array([False, False]))
replay.insert(np.array([3,4]), np.array([1,1]), np.array([1.0, 1.0]), np.array([False, False]))
replay.insert(np.array([5,6]), np.array([1,1]), np.array([1.0, 1.0]), np.array([False, False]))
replay.insert(np.array([7,8]), np.array([1,1]), np.array([1.0, 1.0]), np.array([False, False]))
sample = replay.sample(0, batch_size=3)
self.assertEqual(sample[0].shape, (3, 2,))
self.assertEqual(sample[1].shape, (3, 2,))
self.assertEqual(sample[2].shape, (3, 2,))
self.assertEqual(sample[3].shape, (3, 2,))
class StorageUtilTest(unittest.TestCase):
def testMergeBatches(self):
batch1 = (np.ones((2,5)), [np.zeros((2,7)), np.ones((2,))])
batch2 = (np.ones((3,5)), [np.zeros((3,7)), np.ones((3,))])
merges = merge_batches(batch1, batch2)
self.assertIsInstance(merges, tuple)
self.assertIsInstance(merges[1], list)
self.assertIsInstance(merges[0], np.ndarray)
self.assertTupleEqual(merges[0].shape, (5,5))
self.assertTupleEqual(merges[1][0].shape, (5,7))
self.assertTupleEqual(merges[1][1].shape, (5,))
def testZeroBatch(self):
batch1 = (np.ones((2,5)), [np.zeros((2,7)), np.ones((2,))])
batch2 = []
merges = merge_batches(batch1, batch2)
self.assertIsInstance(merges, tuple)
self.assertIsInstance(merges[1], list)
self.assertIsInstance(merges[0], np.ndarray)
self.assertTupleEqual(merges[0].shape, (2,5))
self.assertTupleEqual(merges[1][0].shape, (2,7))
self.assertTupleEqual(merges[1][1].shape, (2,))
if __name__ == '__main__':
unittest.main()
```
#### File: deep_rl/common/tensorflow_summary.py
```python
from collections import namedtuple
import tensorflow as tf
import os
def create_row(time, data, metricname):
return namedtuple('DataRow', ('monitor'))(namedtuple('DataRowMonitor', ('l', 'r'))(time, data[metricname]))
def extract_tensorflow_summary(path, metricname = 'score'):
SUMMARY_NAMES = [metricname]
time_steps = []
metrics = { x:[] for x in SUMMARY_NAMES }
log_files = (os.path.join(path, x) for x in os.listdir(path))
for filename in log_files:
for e in tf.train.summary_iterator(filename):
if set(SUMMARY_NAMES).intersection((v.tag for v in e.summary.value)):
time_steps.append(e.step)
for v in e.summary.value:
if v.tag in SUMMARY_NAMES:
metrics[v.tag].append(v.simple_value)
return create_row(time_steps, metrics, metricname)
```
#### File: deep_rl/common/torchsummary.py
```python
import torch
import torch.nn as nn
from torch.autograd import Variable
import gym.spaces
from functools import partial
from collections import OrderedDict
import numpy as np
def get_observation_shape(batch_size, space):
if isinstance(space, gym.spaces.Box):
return [(batch_size,) + space.shape]
if isinstance(space, gym.spaces.Tuple):
return sum(map(partial(get_observation_shape, batch_size), space), [])
def sample_space(sizes, dtype):
if isinstance(sizes, tuple):
if len(sizes) == 0:
return tuple()
elif type(sizes[0]) == int:
return torch.rand(*sizes).type(dtype)
else:
return tuple(sample_space(list(sizes), dtype))
elif isinstance(sizes, list):
return [sample_space(x, dtype) for x in sizes]
else:
raise Exception('Not supported')
def sum_space(sizes):
if isinstance(sizes, tuple):
if len(sizes) == 0:
return 0
elif type(sizes[0]) == int:
return np.prod(list(sizes))
else:
return sum_space(list(sizes))
elif isinstance(sizes, list):
return np.sum([sum_space(x) for x in sizes])
else:
return sizes
def shrink_shape(shape):
res = None
if isinstance(shape, tuple):
res = shrink_shape(list(shape))
if len(res) == 0 or isinstance(res[0], (tuple, list)):
res = tuple(res)
elif isinstance(shape, list):
res = [shrink_shape(x) for x in shape]
if res is not None:
if len(res) == 1:
shape = res[0]
else:
shape = res
return shape
def get_shape(tensor, shrink=False):
if shrink:
return shrink_shape(get_shape(tensor))
if isinstance(tensor, tuple):
return tuple(get_shape(list(tensor)))
elif isinstance(tensor, list):
return [get_shape(x) for x in tensor]
else:
return list(tensor.size())
def summary(model, input_size, device="cuda"):
# create properties
summary = OrderedDict()
hooks = []
registered_modules = set()
hook_dict = dict()
def register_hook(module):
def hook(module, input, output):
class_name = str(module.__class__).split(".")[-1].split("'")[0]
if module.__class__.__name__ == 'TimeDistributed':
class_name = str(module.inner.__class__).split(".")[-1].split("'")[0]
module_idx = len(summary)
m_key = "%s-%i" % (class_name, module_idx + 1)
summary[m_key] = OrderedDict()
summary[m_key]["input_shape"] = list(input[0].size())
if isinstance(output, (tuple, list)) and isinstance(output[-1], tuple):
summary[m_key]["output_shape"] = get_shape(output[:-1], True)
summary[m_key]["state_shape"] = get_shape(output[-1])
else:
summary[m_key]["output_shape"] = get_shape(output, True)
params = 0
summary[m_key]["nb_params"] = sum_space([x.size() for x in module.parameters()])
summary[m_key]["nb_trainable_params"] = sum_space([x.size() for x in module.parameters() if x.requires_grad])
if (
not isinstance(module, nn.Sequential)
and not isinstance(module, nn.ModuleList)
and not module in registered_modules
and not (module == model)
):
hook_obj = module.register_forward_hook(hook)
hooks.append(hook_obj)
hook_dict[module] = hook_obj
if module.__class__.__name__ == 'TimeDistributed':
registered_modules.add(module.inner)
if module.inner in hook_dict:
hook_obj = hook_dict.pop(module.inner)
hook_obj.remove()
hooks.remove(hook_obj)
device = device.lower()
assert device in [
"cuda",
"cpu",
], "Input device is not valid, please specify 'cuda' or 'cpu'"
if device == "cuda" and torch.cuda.is_available():
dtype = torch.cuda.FloatTensor
else:
dtype = torch.FloatTensor
# batch_size of 2 for batchnorm
x = sample_space(input_size, dtype)
# print(type(x[0]))
# register hook
model.apply(register_hook)
# make a forward pass
# print(x.shape)
model(*x)
# remove these hooks
for h in hooks:
h.remove()
print("----------------------------------------------------------------")
line_new = "{:>20} {:>25} {:>15}".format("Layer (type)", "Output Shape", "Param #")
print(line_new)
print("================================================================")
total_params = 0
total_output = 0
trainable_params = 0
for layer in summary:
# input_shape, output_shape, trainable, nb_params
line_new = "{:>20} {:>25} {:>15}".format(
layer,
str(summary[layer]["output_shape"]),
"{0:,}".format(summary[layer]["nb_params"]),
)
total_params += summary[layer]["nb_params"]
total_output += sum_space(summary[layer]["output_shape"])
trainable_params += summary[layer]["nb_trainable_params"]
print(line_new)
# assume 4 bytes/number (float on cuda).
total_input_size = abs(sum_space(input_size) * 4. / (1024 ** 2.))
total_output_size = abs(2. * total_output * 4. / (1024 ** 2.)) # x2 for gradients
total_params_size = abs(total_params * 4. / (1024 ** 2.))
total_size = total_params_size + total_output_size + total_input_size
print("================================================================")
print("Total params: {0:,}".format(total_params))
print("Trainable params: {0:,}".format(trainable_params))
print("Non-trainable params: {0:,}".format(total_params - trainable_params))
print("----------------------------------------------------------------")
print("Input size (MB): %0.2f" % total_input_size)
print("Forward/backward pass size (MB): %0.2f" % total_output_size)
print("Params size (MB): %0.2f" % total_params_size)
print("Estimated Total Size (MB): %0.2f" % total_size)
print("----------------------------------------------------------------")
# return summary
def minimal_summary(model, input_size):
# assume 4 bytes/number (float on cuda).
total_params = sum_space([x.size() for x in model.parameters()])
trainable_params = sum_space([x.size() for x in model.parameters() if x.requires_grad])
total_input_size = abs(sum_space(input_size) * 4. / (1024 ** 2.))
total_params_size = abs(total_params * 4. / (1024 ** 2.))
print("================================================================")
print("Total params: {0:,}".format(total_params))
print("Trainable params: {0:,}".format(trainable_params))
print("Non-trainable params: {0:,}".format(total_params - trainable_params))
print("----------------------------------------------------------------")
print("Input size (MB): %0.2f" % total_input_size)
print("Params size (MB): %0.2f" % total_params_size)
print("================================================================")
```
#### File: deep-rl-pytorch/experiments/breakout_a2c.py
```python
from deep_rl import register_trainer
from deep_rl.actor_critic import A2C
from deep_rl.actor_critic.model import TimeDistributedConv
@register_trainer(max_time_steps=10e6, validation_period=None, episode_log_interval=10, save=False)
class Trainer(A2C):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.num_processes = 16
self.num_steps = 5
self.gamma = .99
def create_model(self):
return TimeDistributedConv(self.env.single_observation_space.shape[0], self.env.single_action_space.n)
def default_args():
return dict(
env_kwargs='BreakoutNoFrameskip-v4',
model_kwargs=dict()
)
```
#### File: deep-rl-pytorch/experiments/cartpole_dqn.py
```python
import gym
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import deep_rl.deepq as deepq
from deep_rl import register_trainer
class Model(nn.Module):
def __init__(self):
super().__init__()
def init_weights(m):
if type(m) == nn.Linear:
nn.init.xavier_uniform_(m.weight)
m.bias.data.fill_(0)
self.layer = nn.Linear(4, 256)
self.adventage = nn.Linear(256, 2)
self.value = nn.Linear(256, 1)
self.apply(init_weights)
def forward(self, inputs):
features = self.layer(inputs)
features = F.relu(features)
value = self.value(features)
adventage = self.adventage(features)
features = adventage + value - adventage.mean()
return features
@register_trainer(max_time_steps=100000, episode_log_interval=10)
class Trainer(deepq.DeepQTrainer):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.annealing_steps = 10000
self.preprocess_steps = 1000
self.replay_size = 50000
self.allow_gpu = False
def create_model(self):
return Model()
def create_env(self, env):
env = super().create_env(env)
class W(gym.ObservationWrapper):
def observation(self, o):
return o.astype(np.float32)
return W(env)
def default_args():
return dict(
env_kwargs=dict(id='CartPole-v0'),
model_kwargs=dict()
)
```
#### File: deep-rl-pytorch/tests/unreal_storage_test.py
```python
import unittest
import numpy as np
from deep_rl.actor_critic.unreal.storage import BatchExperienceReplay
class StorageTest(unittest.TestCase):
def testRpZerosOnly(self):
return
np.random.seed(1)
s = BatchExperienceReplay(3, 1000, 4)
s.insert(np.array([1, 5, 3]), np.array([1, 1, 2]), np.array([0, 0, 1]), np.array([0, 0, 0]))
s.insert(np.array([2, 6, 3]), np.array([1, 1, 2]), np.array([1, 0, 0]), np.array([0, 0, 0]))
s.insert(np.array([3, 7, 3]), np.array([1, 1, 2]), np.array([0, 1, 0]), np.array([0, 0, 0]))
s.insert(np.array([4, 8, 3]), np.array([1, 1, 2]), np.array([0, 0, 0]), np.array([0, 0, 0]))
sequence = s.sample_rp_sequence()
np.testing.assert_array_equal(sequence[2], np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 1, 0, 0]]))
def testRpOnesOnly(self):
np.random.seed(1)
s = BatchExperienceReplay(3, 1000, 4)
s.insert(np.array([1, 5, 3]), np.array([1, 1, 2]), np.array([0, 0, 1]), np.array([0, 0, 0]))
s.insert(np.array([2, 6, 3]), np.array([1, 1, 2]), np.array([1, 0, 0]), np.array([0, 0, 0]))
s.insert(np.array([3, 7, 3]), np.array([1, 1, 2]), np.array([0, 1, 0]), np.array([0, 0, 0]))
s.insert(np.array([4, 8, 3]), np.array([1, 1, 2]), np.array([1, 1, 1]), np.array([0, 0, 0]))
sequence = s.sample_rp_sequence()
np.testing.assert_array_equal(sequence[2], np.array([[1, 0, 0, 1], [0, 1, 0, 1], [0, 1, 0, 1]]))
def testRpNormal(self):
np.random.seed(1)
s = BatchExperienceReplay(3, 1000, 4)
s.insert(np.array([1, 5, 3]), np.array([1, 1, 2]), np.array([0, 0, 1]), np.array([0, 0, 0]))
s.insert(np.array([2, 6, 3]), np.array([1, 1, 2]), np.array([1, 0, 0]), np.array([0, 0, 0]))
s.insert(np.array([3, 7, 3]), np.array([1, 1, 2]), np.array([0, 1, 0]), np.array([0, 0, 0]))
s.insert(np.array([4, 8, 3]), np.array([1, 1, 2]), np.array([1, 1, 0]), np.array([0, 0, 0]))
sequence = s.sample_rp_sequence()
print(sequence)
np.testing.assert_array_equal(sequence[2], np.array([[1, 0, 0, 0], [0, 1, 0, 1], [0, 1, 0, 1]]))
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "jkulhanek/faster-rcnn-pytorch",
"score": 2
} |
#### File: faster-rcnn-pytorch/src/loss.py
```python
import torch
from model_utils import calculate_iou, create_anchor_space, calculate_anchors, transform_parameters
import model
import torch.nn as nn
import torch.nn.functional as F
TOTAL_ANCHORS = 256
def balance_anchors(positive_anchors, negative_anchors):
# We randomly disable some positive and some negative anchors
p_samples = (positive_anchors == 1).nonzero()
n_samples = (negative_anchors == 1).nonzero()
pos_count = p_samples.size(0)
neg_count = n_samples.size(0)
negative_anchors = negative_anchors.fill_(0)
# Set those indices to 0
non_p_samples = p_samples[torch.randperm(pos_count)[(TOTAL_ANCHORS // 2):]]
n_samples = n_samples[torch.randperm(neg_count)[:(TOTAL_ANCHORS - (min(TOTAL_ANCHORS // 2, pos_count)))]]
# Note: there is a place for optimization
for elem in non_p_samples:
positive_anchors[elem[0], elem[1], elem[2], elem[3]] = 0
for elem in n_samples:
negative_anchors[elem[0], elem[1], elem[2], elem[3]] = 1
return (positive_anchors, negative_anchors, )
class RPNLoss(nn.Module):
def __init__(self, cache_anchor_space = False, lambda_multiplicator = 10):
super(RPNLoss, self).__init__()
self.lambda_multiplicator = lambda_multiplicator
self.cache_anchor_space = cache_anchor_space
# TODO: implement anchor space caching
def forward(self, input, target):
feature_size = input.size()[2:4]
size = target["size"]
batch_size = input.size()[0]
input = input.view(batch_size * len(model.anchors), -1, feature_size[0], feature_size[1])
anchor_space = create_anchor_space(size, feature_size, model.anchors)
anchor_space_size = feature_size[0] * feature_size[1] * len(model.anchors)
iou = calculate_iou(anchor_space.view(1, len(model.anchors), feature_size[0], feature_size[1], 4).repeat(batch_size, 1, 1, 1, 1), target["boxes"])
max_iou, max_iou_indices = torch.max(iou, -1)
# Calculates the cls loss
positive_labels, negative_labels = calculate_anchors(size, target["boxes"], anchor_space, iou, max_iou, ignore_out_of_range=True)
# Balance anchors
positive_labels, negative_labels = balance_anchors(positive_labels, negative_labels)
positive_labels = positive_labels.view(-1, feature_size[0], feature_size[1])
negative_labels = negative_labels.view(-1, feature_size[0], feature_size[1])
cls_loss_prep = F.cross_entropy(input[:,0:2,:,:], positive_labels.type(torch.LongTensor), reduce=False)
cls_loss_prep *= (positive_labels | negative_labels).type(torch.FloatTensor)
cls_loss = torch.sum(cls_loss_prep)
# Calculate the regression loss
max_gt_box = target["boxes"].view(batch_size, -1,1,4,1,1).repeat(1,1,len(model.anchors), 1, feature_size[0], feature_size[1]).gather(1,
max_iou_indices.view(batch_size, 1, len(model.anchors), 1, feature_size[0], feature_size[1]).repeat(1,1,1,4,1,1)) \
.view(batch_size, len(model.anchors), 4, feature_size[0], feature_size[1])
max_gt_box = max_gt_box.view(batch_size * len(model.anchors), -1, feature_size[0], feature_size[1])
tstar = transform_parameters(max_gt_box, anchor_space)
t = transform_parameters(input[:,2:,:,:], anchor_space)
reg_loss_prep = F.smooth_l1_loss(t, tstar, reduce = False)
reg_loss_prep *= positive_labels.view(len(model.anchors), feature_size[0], feature_size[1], 1).type(torch.FloatTensor)
reg_loss = torch.sum(reg_loss_prep)
ncls = batch_size * TOTAL_ANCHORS
nreg = anchor_space_size #number of anchor locations
loss = ncls * cls_loss + nreg * reg_loss * self.lambda_multiplicator
return loss
``` |
{
"source": "jkulhanek/icra2017-visual-navigation",
"score": 2
} |
#### File: jkulhanek/icra2017-visual-navigation/deeprl.py
```python
import deep_rl
from threading import Lock
import os
import json
default_configuration = dict(
#visdom = dict(
# server = 'http://localhost',
# port = 8097
#),
house3d = dict(
framework_path = '/House3D', # '/House3D',
dataset_path = '~/datasets/suncg' # '/datasets/suncg'
),
models_path = '~/models',
videos_path = '~/results/videos'
)
basepath = os.path.expanduser('~/.visual_navigation')
os.makedirs(basepath, exist_ok=True)
configuration = dict(**default_configuration)
if not os.path.exists(os.path.join(basepath, 'config')):
with open(os.path.join(basepath, 'config'), 'w+') as f:
json.dump(configuration, f)
with open(os.path.join(basepath, 'config'), 'r') as f:
configuration.update(**json.load(f))
def expand_user(d):
if isinstance(d, dict):
dnew = dict()
for key, v in d.items():
if key.endswith('_path') and isinstance(v, str) and v.startswith('~'):
dnew[key] = os.path.expanduser(v)
else:
dnew[key] = expand_user(v)
return dnew
return d
configuration = expand_user(configuration)
deep_rl.configure(**configuration)
configuration = deep_rl.configuration
logger = deep_rl.common.metrics.MetricWriter(session_name='icra-tensorflow')
oldsave = logger.save
def save(path):
print('saving metrics to %s' % path)
oldsave(path)
print('metrics saved')
logger.save = save
metrics_lock = Lock()
save_path = os.path.join(configuration.get('models_path'), 'icra-tensorflow')
os.makedirs(save_path, exist_ok=True)
def get_logger():
class _LoggerProxy:
def __enter__(self):
metrics_lock.acquire()
return logger
def __exit__(self, *args, **kwargs):
metrics_lock.release()
return None
return _LoggerProxy()
``` |
{
"source": "jkulhanek/lemmatag-tf2",
"score": 2
} |
#### File: jkulhanek/lemmatag-tf2/download_datasets.py
```python
from tqdm import tqdm
import requests
import math
import tarfile
import os
def download_file(url, filename=None):
# Streaming, so we can iterate over the response
r = requests.get(url, stream=True)
# Total size in bytes
total_size = int(r.headers.get('content-length', 0))
block_size = 1024
wrote = 0
with open(filename, 'wb') as f:
for data in tqdm(r.iter_content(block_size),
total=math.ceil(total_size // block_size),
unit='KB',
unit_scale=True):
wrote = wrote + len(data)
f.write(data)
if total_size != 0 and wrote != total_size:
print("Error, something went wrong")
def extract_file(read_filename, output_path):
tar = tarfile.open(read_filename, 'r')
tar.extractall(output_path)
# Converts CoNLL format of Universal Dependency (UD) files to LemmaTag format
# See http://universaldependencies.org/format.html
column_names = [
"ID", "FORM", "LEMMA", "UPOS", "XPOS", "FEATS", "HEAD", "DEPREL", "DEPS", "MISC"
]
column_pos = {name: i for i, name in enumerate(column_names)}
def conllu_to_lemmatag(lines, pos_column="XPOS", max_lines=None):
line_count = 0
for line in lines:
line = line.strip()
if line.startswith("#"):
continue
elif line == "":
line_count = 0
yield ""
else:
if max_lines and line_count and line_count >= max_lines:
continue
line_count += 1
tokens = line.split("\t")
yield "\t".join([tokens[column_pos["FORM"]], tokens[column_pos["LEMMA"]], tokens[column_pos[pos_column]]])
def convert_dataset(path):
allfiles = []
for root, dirs, files in os.walk(path, topdown=False):
for filename in files:
fname, ext = os.path.splitext(filename)
if ext == '.conllu':
allfiles.append(os.path.join(root, filename))
for filename in tqdm(allfiles):
fname, ext = os.path.splitext(filename)
writepath = fname + '.lemmatag'
with open(filename, 'r') as fr, open(writepath, 'w+') as fw:
fw.writelines(x + '\n' for x in conllu_to_lemmatag(fr))
def download_dataset(data_folder):
dataset_url = "https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-2837/ud-treebanks-v2.2.tgz" \
"?sequence=1&isAllowed=y"
dataset_path = os.path.join(data_folder, "ud-treebanks-v2.2.tgz")
print("Downloading dataset")
download_file(dataset_url, dataset_path)
print("Extracting dataset")
extract_file(dataset_path, data_folder)
print("Converting to LemmaTag format")
dataset_path = os.path.join(data_folder, 'ud-treebanks-v2.2')
convert_dataset(dataset_path)
print("Downloaded successfully")
def ensure_dataset_exists(data_folder):
dataset_path = os.path.join(data_folder, 'ud-treebanks-v2.2')
if not os.path.exists(dataset_path):
download_dataset(data_folder)
return True
if __name__ == "__main__":
data_folder = os.environ['DATASETS_PATH'] if 'DATASETS_PATH' in os.environ else os.path.expanduser('~/datasets')
if not ensure_dataset_exists(data_folder):
print('Dataset already exists')
# download_dataset(data_folder)
```
#### File: jkulhanek/lemmatag-tf2/morpho_dataset.py
```python
import os
import sys
import urllib.request
import zipfile
import tensorflow as tf
from download_datasets import ensure_dataset_exists
import numpy as np
# Loads a morphological dataset in a vertical format.
# - The data consists of three Datasets
# - train
# - dev
# - test
# - Each dataset is composed of factors (FORMS, LEMMAS, TAGS), each an
# object containing the following fields:
# - word_strings: Strings of the original words.
# - word_ids: Word ids of the original words (uses <unk> and <pad>).
# - words_map: String -> word_id map.
# - words: Word_id -> string list.
# - alphabet_map: Character -> char_id map.
# - alphabet: Char_id -> character list.
# - charseqs: Sequences of characters of the original words.
class MorphoDataset:
class Factor:
PAD = 0
UNK = 1
BOW = 2
EOW = 3
def __init__(self, characters, train=None):
self.words_map = train.words_map if train else {"<pad>": self.PAD, "<unk>": self.UNK}
self.words = train.words if train else ["<pad>", "<unk>"]
self.word_ids = []
self.word_strings = []
self.characters = characters
if characters:
self.alphabet_map = train.alphabet_map if train else {
"<pad>": self.PAD, "<unk>": self.UNK, "<bow>": self.BOW, "<eow>": self.EOW}
self.alphabet = train.alphabet if train else ["<pad>", "<unk>", "<bow>", "<eow>"]
self.charseqs = []
class FactorBatch:
def __init__(self, word_ids, charseqs=None):
self.word_ids = word_ids
self.charseqs = charseqs
class Dataset:
FORMS = 0
LEMMAS = 1
TAGS = 2
FACTORS = 3
def __init__(self, data_file, train=None, shuffle_batches=True, add_bow_eow=False, max_sentences=None, seed=42):
# Create factors
self._data = []
for f in range(self.FACTORS):
self._data.append(MorphoDataset.Factor(f in [self.FORMS, self.LEMMAS], train._data[f] if train else None))
in_sentence = False
for line in data_file:
line = line.decode("utf-8").rstrip("\r\n")
if line:
columns = line.split("\t")
for f in range(self.FACTORS):
factor = self._data[f]
if not in_sentence:
if len(factor.word_ids):
factor.word_ids[-1] = np.array(factor.word_ids[-1], np.int32)
factor.word_ids.append([])
factor.word_strings.append([])
if factor.characters:
factor.charseqs.append([])
word = columns[f]
factor.word_strings[-1].append(word)
# Character-level information
if factor.characters:
factor.charseqs[-1].append([])
if add_bow_eow:
factor.charseqs[-1][-1].append(MorphoDataset.Factor.BOW)
for c in word:
if c not in factor.alphabet_map:
if train:
c = "<unk>"
else:
factor.alphabet_map[c] = len(factor.alphabet)
factor.alphabet.append(c)
factor.charseqs[-1][-1].append(factor.alphabet_map[c])
if add_bow_eow:
factor.charseqs[-1][-1].append(MorphoDataset.Factor.EOW)
# Word-level information
if word not in factor.words_map:
if train:
word = "<unk>"
else:
factor.words_map[word] = len(factor.words)
factor.words.append(word)
factor.word_ids[-1].append(factor.words_map[word])
in_sentence = True
else:
in_sentence = False
if max_sentences is not None and len(self._data[self.FORMS].word_ids) >= max_sentences:
break
self._size = len(self._data[self.FORMS].word_ids)
self._shuffler = np.random.RandomState(seed) if shuffle_batches else None
@property
def data(self):
return self._data
def size(self):
return self._size
def batches(self, size=None):
permutation = self._shuffler.permutation(self._size) if self._shuffler else np.arange(self._size)
while len(permutation):
batch_size = min(size or np.inf, len(permutation))
batch_perm = permutation[:batch_size]
permutation = permutation[batch_size:]
batch = []
max_sentence_len = max(len(self._data[self.FORMS].word_ids[i]) for i in batch_perm)
# Word-level data
for factor in self._data:
batch.append(MorphoDataset.FactorBatch(np.zeros([batch_size, max_sentence_len], np.int32)))
for i in range(batch_size):
batch[-1].word_ids[i, :len(factor.word_ids[batch_perm[i]])] = factor.word_ids[batch_perm[i]]
# Character-level data
for f, factor in enumerate(self._data):
if not factor.characters:
continue
max_charseq_len = max(len(charseq) for i in batch_perm for charseq in factor.charseqs[i])
batch[f].charseqs = np.zeros([batch_size, max_sentence_len, max_charseq_len], np.int32)
for i in range(batch_size):
for j, charseq in enumerate(factor.charseqs[batch_perm[i]]):
batch[f].charseqs[i, j, :len(charseq)] = charseq
yield batch
def __init__(self, add_bow_eow=False, max_sentences=None):
data_folder = os.environ['DATASETS_PATH'] if 'DATASETS_PATH' in os.environ else os.path.expanduser('~/datasets')
ensure_dataset_exists(data_folder)
dataset_path = os.path.join(data_folder, 'ud-treebanks-v2.2/UD_Czech-PDT')
for dataset in ["train", "dev", "test"]:
with open(os.path.join(dataset_path, f'cs_pdt-ud-{dataset}.lemmatag'), 'rb') as dataset_file:
setattr(self, dataset, self.Dataset(dataset_file,
train=self.train if dataset != "train" else None,
shuffle_batches=dataset == "train",
add_bow_eow=add_bow_eow,
max_sentences=max_sentences))
``` |
{
"source": "jkulhanek/mnist-recognition-pytorch",
"score": 3
} |
#### File: mnist-recognition-pytorch/src/train.py
```python
import torch
import torch.optim as optim
import torch.nn as nn
from model import Net
import dataset
learningRate = 0.01
epochs = 2
net = Net()
optimizer = optim.SGD(net.parameters(), lr=learningRate)
criterion = nn.CrossEntropyLoss()
for epoch in range(2): # loop over the dataset multiple times
running_loss = 0.0
for i, data in enumerate(dataset.trainloader, 0):
# get the inputs
inputs, labels = data
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# print statistics
running_loss += loss.item()
if i % 2000 == 1999: # print every 2000 mini-batches
print('[%d, %5d] loss: %.3f' %
(epoch + 1, i + 1, running_loss / 2000))
running_loss = 0.0
print('Finished Training')
# Store weights
torch.save(net.state_dict(), 'net.pth')
# Run the test
def test():
correct = 0
total = 0
with torch.no_grad():
for data in dataset.testloader:
images, labels = data
outputs = net(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the 10000 test images: %d %%' % (
100 * correct / total))
test()
``` |
{
"source": "jkulhanek/robot-visual-navigation",
"score": 2
} |
#### File: python/environment/image_collection_environment.py
```python
import os
import h5py
import gym
from gym.wrappers import TimeLimit
import numpy as np
import random
ACTION_LIST = []
def move_position(position, mult=1):
rotation = position[2]
if rotation == 0:
return (position[0] + 1 * mult, position[1], rotation)
elif rotation == 1:
return (position[0], position[1] + 1 * mult, rotation)
elif rotation == 2:
return (position[0] - 1 * mult, position[1], rotation)
elif rotation == 3:
return (position[0], position[1] - 1 * mult, rotation)
def compute_complexity_distance(pointa, pointb):
ax, ay, ar = pointa
bx, by, br = pointb
return abs(ax - bx) + abs(ay - by) + abs((ar - br + 1) % 4 - 1) * 2
class ImageEnvironmentWrapper(gym.ObservationWrapper):
def __init__(self, env):
super(ImageEnvironmentWrapper, self).__init__(env)
self._stds = [51.764749543249216, 51.764749543249216, 1064.4242973195394, 1064.4242973195394]
self._means = [172.50841217557178, 172.50841217557178, 980.5952, 980.5952]
self.observation_space = gym.spaces.Tuple(
tuple([gym.spaces.Box(-1.0, 1.0, x.shape, dtype=np.float32) for x in self.observation_space.spaces]))
def observation(self, observation):
return [(o.astype(np.float32) - m) / s for o, m, s in zip(list(observation), self._means, self._stds)]
class ImageEnvironment(gym.Env):
metadata = {'render.modes': ['rgb_array']}
def __init__(self, screen_size=(84, 84), dataset_name='turtle_room', path=None, has_end_action=False, augment_images=True, **kwargs):
super(ImageEnvironment, self).__init__(**kwargs)
if path is None:
path = (os.environ['DATASETS_PATH'] if 'DATASETS_PATH' in os.environ else os.path.expanduser(
'~/datasets')) + '/%s/grid_compiled.hdf5' % dataset_name
self.path = path
self.has_end_action = has_end_action
self._file = None
self.augment_images = augment_images
self._datasetSelector = "%sx%s" % screen_size
height, width = screen_size
self.action_space = gym.spaces.Discrete(4 if not has_end_action else 5)
self.observation_space = gym.spaces.Tuple((
gym.spaces.Box(0, 255, (height, width, 3), dtype=np.uint8),
gym.spaces.Box(0, 225, (height, width, 3), dtype=np.uint8),
gym.spaces.Box(0, 225, (height, width, 1), dtype=np.uint16),
gym.spaces.Box(0, 225, (height, width, 1), dtype=np.uint16)))
self._last_observation = None
self._initialized = False
self._random = random.Random()
self._next_task = None
self._physicalPosition = None
self.complexity = None
def _initialize(self):
if self._initialized:
return False
if self._file is None:
self._file = h5py.File(self.path, 'r')
self._positions = self._file["grid"]
self._physicalPositions = self._file["positions"]
self._images = self._file[self._datasetSelector + ("/augmented_images" if self.augment_images else "/images")]
self._depths = self._file[self._datasetSelector + ("/augmented_depths" if self.augment_images else "/depths")]
wid, hei, _, nsamples = self._positions.shape
self._allowedPoints = set()
self._goalPoints = []
self._nongoalPoints = []
for x in range(wid):
for y in range(hei):
isany = all(any(self._positions[x, y, r, i] != -1 for i in range(nsamples)) for r in range(4))
if isany:
self._allowedPoints.add((x, y))
for (x, y) in self._allowedPoints:
for r in range(4):
xn, yn, _ = move_position((x, y, r))
if (xn, yn) not in self._allowedPoints:
self._goalPoints.append((x, y, r))
else:
self._nongoalPoints.append((x, y, r))
self._initialized = True
# Prepare complexity lookup
self._complexityCache = dict()
for g in self._goalPoints:
cmpCache = []
self._complexityCache[g] = cmpCache
for i in range(wid + hei + 4 + 1):
startPoints = []
cmpCache.append(startPoints)
for x in self._nongoalPoints:
dist = compute_complexity_distance(g, x)
if dist == i:
startPoints.append(x)
def set_complexity(self, complexity=None):
self.complexity = complexity
def step(self, action):
assert self._initialized
self._position, collided = self._move(self._position, action)
terminal = self.is_goal(self._position)
reward = 1.0 if terminal else (-0.01 if collided else 0)
if self.has_end_action:
if action == 4:
if terminal:
reward = 1.0
terminal = True
else:
reward = 0
terminal = True
else:
reward = (-0.01 if collided else 0)
terminal = False
obs = None if terminal else self._observe()
self._last_observation = obs if obs is not None else tuple([np.copy(x) for x in list(self._last_observation)])
return self._last_observation, reward, terminal, dict()
def _ensure_in_grid(self, position):
x, y, _ = position
return (x, y) in self._allowedPoints
def _move(self, position, action):
x, y, r = position
if action == 0:
# Forward
npos = move_position(position, 1)
if self._ensure_in_grid(npos):
return npos, False
else:
return position, True
elif action == 1:
# Backward
npos = move_position(position, -1)
if self._ensure_in_grid(npos):
return npos, False
else:
return position, True
elif action == 2:
# Left
npos = (x, y, (r + 1) % 4)
return npos, True
elif action == 3:
# Right
npos = (x, y, (r - 1) % 4)
return npos, True
else:
return position, False
def _observe(self):
x, y, r = self._position
index = self._positions[x, y, r, self._random.randrange((self._positions[x, y, r] != -1).sum())]
self._physicalPosition = (self._physicalPositions[index], self._physicalPositions[self._goalIndex])
indexg = self._goalIndex
if self.augment_images:
irender = self._random.randrange(self._images.shape[1])
return (
self._images[index, irender, ...],
self._images[indexg, self._goalRender, ...],
np.expand_dims(self._depths[index, irender, ...], 2),
np.expand_dims(self._depths[indexg, self._goalRender, ...], 2)
)
else:
return (
self._images[index, ...],
self._images[indexg, ...],
np.expand_dims(self._depths[index, ...], 2),
np.expand_dims(self._depths[indexg, ...], 2)
)
def set_next_task(self, position, goal):
self._next_task = (position, goal)
@property
def position(self):
c, g = self._physicalPosition
return list(c[:3]), list(g[:3])
def reset(self):
self._initialize()
# Sample goal
self._goal = self.sample_goal() if self._next_task is None else self._next_task[1]
xg, yg, rg = self._goal
self._goalIndex = self._positions[xg, yg, rg, self._random.randrange((self._positions[xg, yg, rg] != -1).sum())]
if self.augment_images:
self._goalRender = self._random.randrange(self._images.shape[1])
self._position = self.sample_position(self._goal) if self._next_task is None else self._next_task[0]
self._last_observation = self._observe()
self._next_task = None
return self._last_observation
def sample_goal(self):
# Sample a goal on the edge of the grid
return self._random.choice(self._goalPoints)
def sample_position(self, goal):
choiceArray = None
if self.complexity is None:
choiceArray = self._nongoalPoints
else:
choiceArray = []
for i in range(min(len(self._complexityCache[goal]), self.complexity + 1)):
choiceArray.extend(self._complexityCache[goal][i])
return self._random.choice(choiceArray)
def is_goal(self, position):
diff = abs(self._goal[0] - position[0]) + abs(self._goal[1] - position[1])
return diff <= 1 and self._goal[2] == position[2]
def seed(self, seed=None):
self._random.seed(seed)
def close(self):
if self._file is not None:
self._file.close()
self._file = None
self._initialized = False
def browse(self):
from .browser import GoalKeyboardAgent
agent = GoalKeyboardAgent(self, [0, 1, 2, 3])
agent.show()
def render(self, mode='rgb_array', close=False):
if mode == 'rgb_array':
return self._last_observation[0]
# elif mode is 'human':
# pop up a window and render
else:
super(ImageEnvironment, self).render(mode=mode) # just raise an exception
if __name__ == "__main__":
from PIL import Image
import matplotlib
matplotlib.use("TkAgg")
import matplotlib.pyplot as plt
w = TimeLimit(ImageEnvironment(), max_episode_steps=300)
i = Image.open("../assets/images/63.png")
plt.imshow(w._image_aug(images=[np.array(i)])[0])
plt.show()
```
#### File: python/environment/__init__.py
```python
import gym
import gym.spaces
from gym.wrappers import TimeLimit
import random
import numpy as np
from deep_rl.common.env import ScaledFloatFrame
from gym.vector import AsyncVectorEnv, SyncVectorEnv
def _createImageEnvironment(**kwargs):
from .image_collection_environment import ImageEnvironment, ImageEnvironmentWrapper
return ImageEnvironmentWrapper(TimeLimit(ImageEnvironment(**kwargs), 300))
def _createDmhouseEnvironment(**kwargs):
import dmhouse
env = gym.make('DMHouse-v1', **kwargs, renderer='software')
return ScaledFloatFrame(env)
class SingleImageWrapper(gym.ObservationWrapper):
def __init__(self, env):
super().__init__(env)
template = env.observation_space[0].spaces[0]
shape = (6,) + template.shape[1:]
self.observation_space = gym.spaces.Box(template.low.min(), template.high.max(), shape, template.dtype)
def observation(self, observation):
observations, last_reward_action = observation
observations = np.concatenate((observations[0], observations[1]), 0)
return observations
def create_multiscene(num_processes, wrap=lambda e: e, seed=None, use_dummy=False, **kwargs):
funcs = []
if seed is None:
seed = random.randint(0, 15487469)
for i in range(num_processes):
def func():
import environment
env = wrap(gym.make(**kwargs))
env.seed((seed * i * 231893) % 15487469) # pseudo-independent random sequences
return env
funcs.append(func)
if use_dummy:
return SyncVectorEnv(funcs)
else:
return AsyncVectorEnv(funcs)
gym.register("TurtleLab-v0", entry_point=_createImageEnvironment, kwargs=dict(dataset_name='turtle_room'))
gym.register(
id='DMHouseCustom-v1',
entry_point=_createDmhouseEnvironment,
kwargs=dict()
)
```
#### File: robot-visual-navigation/python/stacktracer.py
```python
import threading
import time
import os
import sys
import traceback
from pygments import highlight
from pygments.lexers import PythonLexer
from pygments.formatters import HtmlFormatter
# Taken from http://bzimmer.ziclix.com/2008/12/17/python-thread-dumps/
def stacktraces():
code = []
for threadId, stack in sys._current_frames().items():
code.append("\n# ThreadID: %s" % threadId)
for filename, lineno, name, line in traceback.extract_stack(stack):
code.append('File: "%s", line %d, in %s' % (filename, lineno, name))
if line:
code.append(" %s" % (line.strip()))
return highlight("\n".join(code), PythonLexer(), HtmlFormatter(
full=False,
# style="native",
noclasses=True,
))
# This part was made by nagylzs
class TraceDumper(threading.Thread):
"""Dump stack traces into a given file periodically."""
def __init__(self, fpath, interval, auto):
"""
@param fpath: File path to output HTML (stack trace file)
@param auto: Set flag (True) to update trace continuously.
Clear flag (False) to update only if file not exists.
(Then delete the file to force update.)
@param interval: In seconds: how often to update the trace file.
"""
assert(interval > 0.1)
self.auto = auto
self.interval = interval
self.fpath = os.path.abspath(fpath)
self.stop_requested = threading.Event()
threading.Thread.__init__(self)
def run(self):
while not self.stop_requested.isSet():
time.sleep(self.interval)
if self.auto or not os.path.isfile(self.fpath):
self.stacktraces()
def stop(self):
self.stop_requested.set()
self.join()
try:
if os.path.isfile(self.fpath):
os.unlink(self.fpath)
except:
pass
def stacktraces(self):
fout = open(self.fpath, "w+")
try:
fout.write(stacktraces())
fout.flush()
finally:
fout.close()
_tracer = None
def trace_start(fpath, interval=5, auto=True):
"""Start tracing into the given file."""
global _tracer
if _tracer is None:
_tracer = TraceDumper(fpath, interval, auto)
_tracer.setDaemon(True)
_tracer.start()
else:
raise Exception("Already tracing to %s" % _tracer.fpath)
def trace_stop():
"""Stop tracing."""
global _tracer
if _tracer is None:
raise Exception("Not tracing, cannot stop.")
else:
_tracer.stop()
_tracer = None
```
#### File: robot-visual-navigation/python/testing_agents.py
```python
from deep_rl.core import AbstractAgent
from deep_rl import register_agent
from deep_rl import configuration
import numpy as np
import os
import random
from collections import deque
@register_agent("random", is_end = True)
class RandomAgent(AbstractAgent):
def __init__(self, *args, is_end = True, **kwargs):
self.is_end = is_end
super().__init__(*args, **kwargs)
def act(self, o):
return [random.randrange(5 if self.is_end else 4)]
@register_agent("random-end", is_end = True)
class RandomAgent(AbstractAgent):
def __init__(self, *args, is_end = True, **kwargs):
self.is_end = is_end
super().__init__(*args, **kwargs)
self.env = None
def set_environment(self, env):
self.env = env.unwrapped
def act(self, o):
return [random.randrange(4)] if self.env._position != self.env._goal else [4]
@register_agent("turtleroom-constant-stochastic")
class StochasticAgent(AbstractAgent):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._is_initialized = False
def _initialize(self):
if self._is_initialized:
return
self._is_initialized = True
checkpoint_dir = configuration.get('models_path')
path = os.path.join(checkpoint_dir, self.name, 'distribution.npy')
actions = np.load(path)
self._actions = actions
def act(self, obs):
return [np.random.choice(list(range(len(self._actions))), p = self._actions)]
@register_agent("shortest-path")
class ShortestPathAgent(AbstractAgent):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.env = None
self.steps = []
def set_environment(self, env):
self.env = env.unwrapped
def reset_state(self):
self.steps = []
def _compute_optimal_steps(self):
s = self._find_reversed_path(self.env._position, self.env._goal)
s.reverse()
return s
def _find_reversed_path(self, position, goal):
closed = set([position])
o = deque([(position, [])])
MAX_PATH = 38
while len(o) != 0:
position, s = o.popleft()
if position == goal:
s.append(4)
return s
if len(s) == MAX_PATH:
continue
for a in range(4):
npos, _ = self.env._move(position, a)
if npos in closed:
continue
sn = list(s)
sn.append(a)
o.append((npos, sn))
closed.add(npos)
return None
def act(self, o):
if self.env is None:
raise Exception("Must call 'set_environment'")
if len(self.steps) == 0:
self.steps = self._compute_optimal_steps()
return [self.steps.pop()]
```
#### File: map_collector/src/navigator.py
```python
import numpy as np
from controller import Controller
from wrappers import visualize
from collections import defaultdict
from math import pi
GRID_SIZE = 0.5
ROTATION_STEPS = 4
def move_position(position, rotation):
if rotation == 0:
return (position[0] + 1, position[1])
elif rotation == 1:
return (position[0], position[1] + 1)
elif rotation == 2:
return (position[0] - 1, position[1])
elif rotation == 3:
return (position[0], position[1] - 1)
class Navigator:
def __init__(self, controller):
self.controller = controller
def _move(self, position):
return self.controller.move_to((position[0] * GRID_SIZE, position[1] * GRID_SIZE))
def _rotate(self, rotation):
return self.controller.rotate_to(rotation * 2 * pi / 4)
def _can_move(self):
return not self.controller.is_occupied()
def collect(self, observation, position, rotation):
print("Collecting %s-%s" % (position, rotation))
def explore(self):
self.maze = defaultdict(lambda: 0)
self.maze[(0,0)] = 1
position = (0,0)
rotation = 0
self._explore(position, rotation)
def _explore(self, position, rotation):
self.maze[position] = 2
collect_spots = []
for i in range(4):
if self.maze[move_position(position, rotation)] == 0:
canMove = self._can_move()
state = 1 if canMove else 3
self.maze[move_position(position, rotation)] = state
if canMove:
collect_spots.append((move_position(position, rotation), rotation))
for r in range(3):
self.collect(self.controller.observe(), position, rotation + (float(r) / 3))
self._rotate(rotation + (float(r + 1) / 3))
if i != 3:
rotation = (rotation + 1) % 4
else:
self._rotate(rotation)
for i in range(4):
if len(collect_spots) > 0:
pos, rot = collect_spots.pop()
if rot == rotation:
self._move(pos)
self._explore(pos, rot)
self._move(position)
else:
collect_spots.append((pos, rot))
if i != 3:
rotation = (rotation - 1) % 4
self._rotate(rotation)
```
#### File: robot_evaluator/src/main.py
```python
from sensor_msgs.msg import Image
from std_msgs.msg import Int32,String
from controller import Controller
from robot_agent_msgs.msg import ComputeStepRequest
from convert import convert_image
import rospy
import argparse
import numpy as np
import os
from math import pi, sqrt
import random
import sys
class Puller(object):
def __init__(self, *args, **kwargs):
super(Puller, self).__init__(*args, **kwargs)
self._target = None
self._action = None
self._observation = None
rospy.Subscriber("robot_visual_navigation/set_goal", Image, self._set_target, queue_size = 10)
rospy.Subscriber("robot_visual_navigation/action", Int32, self._set_action, queue_size = 10)
rospy.Subscriber("camera/rgb/image_raw", Image, self._set_observation, queue_size = 10)
self.reset_state_publisher = rospy.Publisher("robot_visual_navigation/reset_state", String, queue_size = 10)
def _set_target(self, msg):
self._target = msg
s = String()
s.data = "empty"
self.reset_state_publisher.publish(s)
def _set_observation(self, msg):
self._observation = msg
def _set_action(self, msg):
self._action = msg.data
def wait_for_target(self):
rate = rospy.Rate(10)
while not rospy.is_shutdown() and self._target is None:
rate.sleep()
return self._target
def wait_for_observation(self):
rate = rospy.Rate(10)
while not rospy.is_shutdown() and self._observation is None:
rate.sleep()
return self._observation
def wait_for_action(self):
rate = rospy.Rate(10)
while not rospy.is_shutdown() and self._action is None:
rate.sleep()
action = self._action
self._action = None
return action
def load_target(x, y, r):
import h5py
with h5py.File(os.path.expanduser("~/datasets/turtle_room/grid_compiled.hdf5"),"r") as f:
imageIndex = f["grid"][x, y, r, random.randrange(np.sum(f["grid"][x, y, r, :] != -1))]
targetImage = f["84x84/images"][imageIndex]
msg = convert_image(targetImage)
return msg
def angle_difference(a, b):
return (a - b + pi) % (2 * pi) - pi
def dist(pos, target):
x, y, r = pos
gx, gy, gr = target
return sqrt((gx - x) ** 2 + (gy - y) ** 2)
def is_goal(pos, target):
x, y, r = pos
gx, gy, gr = target
return ((gx - x) ** 2 + (gy - y) ** 2) < 0.3 ** 2 and abs(angle_difference(r, gr)) < 0.52
parser = argparse.ArgumentParser("robot_evaluator")
parser.add_argument("x", type=int)
parser.add_argument("y", type=int)
parser.add_argument("r", type=int)
parser.add_argument("--end", action="store_true")
args = parser.parse_args()
rospy.init_node("robot_evaluator")
rospy.loginfo("starting")
ctrl = Controller()
puller = Puller()
compute_step_client = rospy.Publisher("robot_visual_navigation/begin_compute_step", ComputeStepRequest, queue_size = 10)
reset_goal_client = rospy.Publisher('robot_visual_navigation/reset_state', String, queue_size = 10)
rospy.sleep(rospy.Duration(nsecs=100 * 1000000))
ctrl.start()
angle = 0
rospy.loginfo("running")
#reset agent service
rmsg = String()
rmsg.data = "empty"
reset_goal_client.publish(rmsg)
rospy.sleep(rospy.Duration(nsecs=500 * 1000000))
rospy.loginfo("goal cleared")
rospy.loginfo("setting target: %s, %s, %s" % (args.x, args.y, args.r))
targetmsg = load_target(args.x, args.y, args.r)
number_of_actions = 0
actions_taken = []
positions = []
real = ctrl.position_and_rotation()
positions.append(real)
while not rospy.is_shutdown():
req = ComputeStepRequest()
req.goal = targetmsg
rospy.loginfo("waiting for observation")
req.observation = puller.wait_for_observation()
req.sender = "empty"
compute_step_client.publish(req)
rospy.loginfo("waiting for action")
action = puller.wait_for_action()
rospy.loginfo("received action %s" % action)
if action == 0:
ctrl.move_by(0.2)
elif action == 1:
ctrl.move_by(-0.2)
elif action == 2:
ctrl.rotate_by(1.57)
elif action == 3:
ctrl.rotate_by(-1.57)
elif action == 4:
rospy.sleep(rospy.Duration(secs=1))
rospy.sleep(rospy.Duration(secs=0,nsecs=800 * 1000000))
number_of_actions += 1
actions_taken.append(action)
# If the goal is reached, return
phy = (args.x - 2) * 0.2, (args.y - 3) * 0.2, (args.r * 1.57 + pi) % (2 * pi) - pi
real = ctrl.position_and_rotation()
positions.append(real)
if args.end:
if action == 4:
if is_goal(real, phy):
rospy.loginfo("goal correctly signaled after %s steps" % number_of_actions)
else:
rospy.loginfo("goal incorrectly signaled after %s steps" % number_of_actions)
rospy.loginfo("position: %.2f %.2f %.2f" % real)
rospy.loginfo("goal %.2f %.2f %.2f" % phy)
# write results
with open("results.txt", "a") as f:
f.write("(%s %s %s) -> (%s %s %s) (%s %s %s) = %s (%s) %s [%s] [%s]\n" % (tuple(real) + (args.x, args.y, args.r) + phy + (is_goal(real, phy), dist(real, phy), len(actions_taken), ", ".join(map(str, actions_taken)), ", ".join(map(str, positions)))))
f.flush()
sys.exit()
elif is_goal(real, phy):
rospy.loginfo("goal reached after %s steps" % number_of_actions)
rospy.loginfo("position: %.2f %.2f %.2f" % real)
rospy.loginfo("goal %.2f %.2f %.2f" % phy)
# write results
with open("results.txt", "a") as f:
f.write("(%s %s %s) = %s [%s]\n" % (args.x, args.y, args.r, len(actions_taken), ", ".join(map(str, actions_taken))))
f.flush()
sys.exit()
``` |
{
"source": "jkulhanek/soloist",
"score": 2
} |
#### File: soloist/data/loader.py
```python
import os
import functools
import logging
from data.utils import ConcatDialogDataset, split_name, wrap_dataset_with_blacklist
RESTRICTED_DOMAINS = ['hotel', 'train', 'restaurant', 'attraction', 'taxi',
'hospital', 'police', 'rentalcar', 'flight', 'hotels',
'restaurant-search', 'flights']
DATASETS_PATH = os.path.join(os.path.expanduser(os.environ.get('DATASETS_PATH', '~/datasets')), 'soloist')
logger = logging.getLogger()
def load_dataset(name, restrict_domains=False, augment='disabled', use_blacklist=False, **kwargs):
if restrict_domains:
return load_dataset(name, domains=RESTRICTED_DOMAINS, **kwargs)
if '+' in name:
# This is a concat dataset
datasets = name.split('+')
_load_dataset = functools.partial(load_dataset, **kwargs)
datasets = list(map(_load_dataset, datasets))
return ConcatDialogDataset(datasets)
dataset_name, split = split_name(name)
from data.dataset import load_dataset as load_custom_dataset
dataset = load_custom_dataset(name, **kwargs)
if use_blacklist:
dataset = add_blacklist(dataset, name)
return dataset
def add_blacklist(dataset, name):
dataset_name, split = split_name(name)
with open(os.path.join(DATASETS_PATH, dataset_name, f'{split}-blacklist.txt'), 'r') as f:
blacklist = sorted(set(int(x.rstrip()) for x in f))
logging.warning(f'Some examples ({100 * len(blacklist) / len(dataset):.2f}%) were ignored by a blacklist.')
return wrap_dataset_with_blacklist(dataset, blacklist)
def load_backtranslation_transformation(name):
import data.backtranslation
def get_backtranslation_datasets(name):
if '+' in name:
datasets = name.split('+')
return sum(map(get_backtranslation_datasets, datasets), [])
if name.endswith('.yaml'):
return [name]
new_name, split = split_name(name)
if split in {'dev', 'val', 'train', 'test', 'validation', 'training', 'testing', 'development'}:
name = new_name
if name == 'multiwoz-2.0':
# NOTE: we do not have backtranslations for MultiWOZ 2.0
return ['multiwoz-2.1']
return [name]
backtranslation_dict = data.backtranslation.load_backtranslations(list(set(get_backtranslation_datasets(name))))
return data.backtranslation.BackTranslateAugmentation(backtranslation_dict)
``` |
{
"source": "jkulhanek/torchdata",
"score": 2
} |
#### File: torchdata/test/test_dataframe.py
```python
import os
import unittest
import warnings
from itertools import chain
import expecttest
from _utils._common_utils_for_test import create_temp_dir, reset_after_n_next_calls
from torchdata.datapipes.iter import DataFrameMaker, FileLister, FileOpener, IterableWrapper, ParquetDataFrameLoader
try:
import torcharrow
import torcharrow.dtypes as dt
HAS_TORCHARROW = True
except ImportError:
HAS_TORCHARROW = False
try:
import pyarrow
import pyarrow.parquet as parquet
HAS_PYARROW = True
except ImportError:
HAS_PYARROW = False
skipIfNoPyArrow = unittest.skipIf(not HAS_PYARROW, "no PyArrow.")
skipIfNoTorchArrow = unittest.skipIf(not HAS_TORCHARROW, "no TorchArrow.")
@skipIfNoTorchArrow
class TestDataFrame(expecttest.TestCase):
def setUp(self) -> None:
self.temp_dir = create_temp_dir()
if HAS_PYARROW:
self._write_parquet_files()
def tearDown(self) -> None:
try:
self.temp_dir.cleanup()
except Exception as e:
warnings.warn(f"TestDataFrame was not able to cleanup temp dir due to {e}")
def _write_parquet_files(self):
# Create TorchArrow DataFrames
DTYPE = dt.Struct([dt.Field("Values", dt.int32)])
df1 = torcharrow.dataframe([(i,) for i in range(10)], dtype=DTYPE)
df2 = torcharrow.dataframe([(i,) for i in range(100)], dtype=DTYPE)
# Write them as parquet files
for i, df in enumerate([df1, df2]):
fname = f"df{i}.parquet"
self._write_df_as_parquet(df, fname)
self._write_multiple_dfs_as_parquest([df1, df2], fname="merged.parquet")
def _custom_files_set_up(self, files):
for fname, content in files.items():
temp_file_path = os.path.join(self.temp_dir.name, fname)
with open(temp_file_path, "w") as f:
f.write(content)
def _compare_dataframes(self, expected_df, actual_df):
self.assertEqual(len(expected_df), len(actual_df))
for exp, act in zip(expected_df, actual_df):
self.assertEqual(exp, act)
def _write_df_as_parquet(self, df, fname: str) -> None:
table = df.to_arrow()
parquet.write_table(table, os.path.join(self.temp_dir.name, fname))
def _write_multiple_dfs_as_parquest(self, dfs, fname: str) -> None:
tables = [df.to_arrow() for df in dfs]
merged_table = pyarrow.concat_tables(tables)
parquet.write_table(merged_table, os.path.join(self.temp_dir.name, fname))
def test_dataframe_maker_iterdatapipe(self):
source_data = [(i,) for i in range(10)]
source_dp = IterableWrapper(source_data)
DTYPE = dt.Struct([dt.Field("Values", dt.int32)])
# Functional Test: DataPipe correctly converts into a single TorchArrow DataFrame
df_dp = source_dp.dataframe(dtype=DTYPE)
df = list(df_dp)[0]
expected_df = torcharrow.dataframe([(i,) for i in range(10)], dtype=DTYPE)
self._compare_dataframes(expected_df, df)
# Functional Test: DataPipe correctly converts into multiple TorchArrow DataFrames, based on size argument
df_dp = DataFrameMaker(source_dp, dataframe_size=5, dtype=DTYPE)
dfs = list(df_dp)
expected_dfs = [
torcharrow.dataframe([(i,) for i in range(5)], dtype=DTYPE),
torcharrow.dataframe([(i,) for i in range(5, 10)], dtype=DTYPE),
]
for exp_df, act_df in zip(expected_dfs, dfs):
self._compare_dataframes(exp_df, act_df)
# __len__ Test:
df_dp = source_dp.dataframe(dtype=DTYPE)
self.assertEqual(1, len(df_dp))
self.assertEqual(10, len(list(df_dp)[0]))
df_dp = source_dp.dataframe(dataframe_size=5, dtype=DTYPE)
self.assertEqual(2, len(df_dp))
self.assertEqual(5, len(list(df_dp)[0]))
# Reset Test:
n_elements_before_reset = 1
res_before_reset, res_after_reset = reset_after_n_next_calls(df_dp, n_elements_before_reset)
for exp_df, act_df in zip(expected_dfs[:1], res_before_reset):
self._compare_dataframes(exp_df, act_df)
for exp_df, act_df in zip(expected_dfs, res_after_reset):
self._compare_dataframes(exp_df, act_df)
def test_dataframe_maker_with_csv(self):
def get_name(path_and_stream):
return os.path.basename(path_and_stream[0]), path_and_stream[1]
csv_files = {"1.csv": "key,item\na,1\nb,2"}
self._custom_files_set_up(csv_files)
datapipe1 = FileLister(self.temp_dir.name, "*.csv")
datapipe2 = FileOpener(datapipe1, mode="b")
datapipe3 = datapipe2.map(get_name)
csv_dict_parser_dp = datapipe3.parse_csv_as_dict()
# Functional Test: Correctly generate TorchArrow DataFrame from CSV
DTYPE = dt.Struct([dt.Field("key", dt.string), dt.Field("item", dt.string)])
df_dp = csv_dict_parser_dp.dataframe(dtype=DTYPE, columns=["key", "item"])
expected_dfs = [torcharrow.dataframe([{"key": "a", "item": "1"}, {"key": "b", "item": "2"}], dtype=DTYPE)]
for exp_df, act_df in zip(expected_dfs, list(df_dp)):
self._compare_dataframes(exp_df, act_df)
# Functional: making sure DataPipe works even without `columns` input
df_dp = csv_dict_parser_dp.dataframe(dtype=DTYPE)
for exp_df, act_df in zip(expected_dfs, list(df_dp)):
self._compare_dataframes(exp_df, act_df)
@skipIfNoPyArrow
def test_parquet_dataframe_reader_iterdatapipe(self):
DTYPE = dt.Struct([dt.Field("Values", dt.int32)])
# Functional Test: read from Parquet files and output TorchArrow DataFrames
source_dp = FileLister(self.temp_dir.name, masks="df*.parquet")
parquet_df_dp = ParquetDataFrameLoader(source_dp, dtype=DTYPE)
expected_dfs = [
torcharrow.dataframe([(i,) for i in range(10)], dtype=DTYPE),
torcharrow.dataframe([(i,) for i in range(100)], dtype=DTYPE),
]
for exp_df, act_df in zip(expected_dfs, list(parquet_df_dp)):
self._compare_dataframes(exp_df, act_df)
# Functional Test: correctly read from a Parquet file that was a merged DataFrame
merged_source_dp = FileLister(self.temp_dir.name, masks="merged.parquet")
merged_parquet_df_dp = ParquetDataFrameLoader(merged_source_dp, dtype=DTYPE)
expected_merged_dfs = [torcharrow.dataframe([(i,) for i in chain(range(10), range(100))], dtype=DTYPE)]
for exp_df, act_df in zip(expected_merged_dfs, list(merged_parquet_df_dp)):
self._compare_dataframes(exp_df, act_df)
# __len__ Test: no valid length because we do not know the number of row groups in advance
with self.assertRaisesRegex(TypeError, "has no len"):
len(parquet_df_dp)
# Reset Test:
n_elements_before_reset = 1
res_before_reset, res_after_reset = reset_after_n_next_calls(parquet_df_dp, n_elements_before_reset)
for exp_df, act_df in zip(expected_dfs[:1], res_before_reset):
self._compare_dataframes(exp_df, act_df)
for exp_df, act_df in zip(expected_dfs, res_after_reset):
self._compare_dataframes(exp_df, act_df)
if __name__ == "__main__":
unittest.main()
```
#### File: iter/util/samplemultiplexer.py
```python
import random
from typing import Dict, Iterator, Optional, Sized, TypeVar
from torchdata.datapipes.iter import IterDataPipe
T_co = TypeVar("T_co", covariant=True)
class SampleMultiplexerDataPipe(IterDataPipe[T_co]):
"""
Takes a `Dict` of (IterDataPipe, Weight), and yields items by sampling from these
DataPipes with respect to their weights. When individual DataPipes are exhausted, continues to sample from
the remaining DataPipes according to their relative weights.
If you wish to maintain the same ratio of weights indefinitely, you need to ensure that the
inputs are never exhausted, by, for instance, applying ``cycle`` to them.
Sampling is controlled by the provided random ``seed``. If you don't provide it, the sampling
will not be deterministic.
Args:
pipes_to_weights_dict: a `Dict` of IterDataPipes and Weights. The total weight of
unexhausted DataPipes will be normalized to 1 for the purpose of sampling.
seed: random seed to initialize the random number generator
Example:
>>> from torchdata.datapipes.iter import IterableWrapper, SampleMultiplexer
>>> source_dp1 = IterableWrapper([0] * 10)
>>> source_dp2 = IterableWrapper([1] * 10)
>>> d = {source_dp1: 99999999, source_dp2: 0.0000001}
>>> sample_mul_dp = SampleMultiplexer(pipes_to_weights_dict=d, seed=0)
>>> list(sample_mul_dp)
[0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
"""
def __init__(
self,
pipes_to_weights_dict: Dict[IterDataPipe[T_co], float],
seed: Optional[int] = None,
):
if not pipes_to_weights_dict:
raise ValueError("Empty dictionary passed to SampleMultiplexerDataPipe")
total_weight: float = 0
for v in pipes_to_weights_dict.values():
if v <= 0:
raise ValueError(f"Expecting a positive and non-zero weight, got {v}")
total_weight += v
self.pipes_and_weights = [(k, v / total_weight) for k, v in pipes_to_weights_dict.items()]
if seed is None:
self.random = random.Random()
else:
self.random = random.Random(seed)
self.length: Optional[int] = None
def __iter__(self) -> Iterator[T_co]:
pipes_and_weights = [(iter(k), v) for k, v in self.pipes_and_weights]
while len(pipes_and_weights) > 1:
r = self.random.random()
s: float = 0
for it, weight in pipes_and_weights:
s += weight
if r < s:
try:
item = next(it)
yield item
except StopIteration:
# remove the current stream
new_total = 1 - weight
assert new_total > 0
pipes_and_weights = [(k, v / new_total) for k, v in pipes_and_weights if k != it]
break
# only one stream left
for item in pipes_and_weights[0][0]:
yield item
def __len__(self) -> int:
if self.length is not None:
if self.length == -1:
raise TypeError(f"{type(self).__name__} instance doesn't have valid length")
return self.length
if all(isinstance(dp, Sized) for dp, _ in self.pipes_and_weights):
self.length = sum(len(dp) for dp, _ in self.pipes_and_weights)
else:
self.length = -1
return len(self)
``` |
{
"source": "jkulhanek/viewformer",
"score": 2
} |
#### File: viewformer/commands/download_model.py
```python
import click
from viewformer.utils import pull_checkpoint
@click.command('download-model')
@click.argument('checkpoint', type=str)
def main(checkpoint: str):
print(f'Downloading checkpoint {checkpoint}')
pull_checkpoint(checkpoint, override=True)
print(f'Checkpoint {checkpoint} downloaded')
```
#### File: viewformer/commands/generate_codes.py
```python
from aparse import click
from typing import List
from viewformer.utils import SplitIndices
from viewformer.data import transform_dataset
# Use memory growth for tf
try:
import tensorflow as tf
gpus = tf.config.list_physical_devices('GPU')
if gpus:
# Currently, memory growth needs to be the same across GPUs
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
except ImportError:
pass
class LatentCodeTransformer:
def _convert_image_type(self, image):
if image.dtype == 'uint8':
image = (image.astype('float32') / 255.) * 2. - 1.
if image.shape[-1] == 3:
image = image.transpose((0, 3, 1, 2))
return image
def update_dataset_info(self, dataset_info):
dataset_info['token_image_size'] = self.image_size // self.model.config.stride
self.dataset_info = dataset_info
return dataset_info
def __init__(self, model, batch_size: int = None, device=None):
if device is not None:
model = model.to(device)
self.model = model
self.image_size = model.config.image_size
self.batch_size = batch_size if batch_size is not None else model.config.batch_size
self.device = device
def output_features(self, features):
if features is not None and 'cameras-gqn' in features:
return ['codes', 'cameras-gqn']
else:
return ['codes', 'cameras']
def __call__(self, split, dataset):
import torch
import webdataset as wds
with torch.no_grad():
dataset = wds.filters.map_(dataset, lambda x: (torch.from_numpy(x['cameras']), torch.from_numpy(self._convert_image_type(x['frames'])), [len(x['frames'])] * len(x['frames'])))
dataset = wds.filters.unbatched_(dataset)
dataset = wds.filters.batched_(dataset, self.batch_size)
past_cameras = None
past_codes = None
def update_cummulative_variable(past, value, sequence_sizes):
sequence_sizes = list(sequence_sizes)
output = []
if past is not None:
value = torch.cat([past, value], 0)
sequence_sizes = ([sequence_sizes[0]] * len(past)) + sequence_sizes
while len(sequence_sizes) > 0 and len(value) >= sequence_sizes[0]:
output.append(value[:sequence_sizes[0]])
value = value[sequence_sizes[0]:]
sequence_sizes = sequence_sizes[sequence_sizes[0]:]
past = value
return past, output
if hasattr(self.model, 'encode'):
predict_step = lambda x: self.model.encode(x.to(self.device))[-1].detach().cpu()
else:
predict_step = lambda x: self.model(x.to(self.device))[-1].detach().cpu()
for batch_id, (cameras, frames, sequence_sizes) in enumerate(dataset):
codes = predict_step(frames)
past_codes, codes = update_cummulative_variable(past_codes, codes, sequence_sizes)
past_cameras, cameras = update_cummulative_variable(past_cameras, cameras, sequence_sizes)
for cur_cameras, cur_codes in zip(cameras, codes):
yield dict(cameras=cur_cameras, codes=cur_codes)
@click.command('generate-codes')
def main(dataset: str, output: str, model: str,
shards: SplitIndices = None,
batch_size: int = None,
splits: List[str] = None,
profile_batch_id: int = None, use_gpu: bool = True):
import torch
from viewformer.utils.torch import load_model
device = 'cpu' if not use_gpu or torch.cuda.device_count() == 0 else 'cuda'
device = torch.device(device)
model = load_model(model)
transformer = LatentCodeTransformer(model, batch_size=batch_size, device=device)
transform_dataset(dataset, output, transformer,
splits=splits,
shards=shards)
if __name__ == '__main__':
main()
```
#### File: data/loaders/__init__.py
```python
from typing import Optional
from .interiornet import InteriorNetLoader
from .dataset import DatasetLoader
from .sevenscenes import SevenScenesLoader
from .colors import ColorsLoader
from .co3d import CO3DLoader
from .shapenet import ShapenetLoader
from .sm7 import SM7Loader
from viewformer.data._common import ShuffledLoader, FixedSequenceSizeLoader, ChangedImageSizeLoader
_registry = dict()
def register_loader(loader_class):
name = loader_class.__name__.lower()[:-len('Loader')]
class _Wrapped(loader_class):
def __init__(self,
shuffle_sequences: Optional[bool] = None,
shuffle_sequence_items: Optional[bool] = None,
shuffle: Optional[bool] = None,
sequence_size: Optional[int] = None,
image_size: int = None,
seed: int = None,
**kwargs):
raise NotImplementedError()
def __new__(self,
shuffle_sequences: Optional[bool] = None,
shuffle_sequence_items: Optional[bool] = None,
shuffle: Optional[bool] = None,
sequence_size: Optional[int] = None,
image_size: int = None,
seed: int = None,
**kwargs):
if seed is not None:
kwargs['seed'] = seed
seed = seed if seed is not None else 42
custom_resize = getattr(loader_class, '_custom_resize', False)
custom_shuffle = getattr(loader_class, '_custom_shuffle', False)
custom_sequence_size = getattr(loader_class, '_custom_sequence_size', False)
if custom_resize:
kwargs['image_size'] = image_size
if custom_sequence_size:
kwargs['sequence_size'] = sequence_size
if shuffle is not None:
assert shuffle_sequence_items is None
assert shuffle_sequences is None
shuffle_sequence_items = shuffle_sequences = shuffle
else:
assert shuffle is None
shuffle_sequence_items = shuffle_sequence_items or False
shuffle_sequences = shuffle_sequences or False
if custom_shuffle:
loader = loader_class(shuffle_sequences=shuffle_sequences,
shuffle_sequence_items=shuffle_sequence_items,
sequence_size=sequence_size,
seed=seed, **kwargs)
else:
loader = loader_class(**kwargs)
if shuffle_sequence_items:
loader = ShuffledLoader(loader, seed, shuffle_sequence_items=True)
if sequence_size is not None and not custom_sequence_size:
loader = FixedSequenceSizeLoader(loader, sequence_size)
if shuffle_sequences:
loader = ShuffledLoader(loader, seed, shuffle_sequences=True)
if image_size is not None and not custom_resize:
loader = ChangedImageSizeLoader(loader, image_size)
return loader
_registry[name] = _Wrapped
return _Wrapped
def build(name, *args, **kwargs):
return _registry[name](*args, **kwargs)
def get_loader(name):
return _registry[name]
def get_loader_names():
return list(_registry.keys())
def get_loaders():
return _registry
DatasetLoader = register_loader(DatasetLoader)
InteriorNetLoader = register_loader(InteriorNetLoader)
SevenScenesLoader = register_loader(SevenScenesLoader)
ColorsLoader = register_loader(ColorsLoader)
CO3DLoader = register_loader(CO3DLoader)
ShapenetLoader = register_loader(ShapenetLoader)
SM7Loader = register_loader(SM7Loader)
```
#### File: data/loaders/shapenet.py
```python
import os
from collections import defaultdict
from functools import partial
import numpy as np
import sys
from typing import List
from PIL import Image
from viewformer.utils.geometry import rotation_matrix_to_quaternion, quaternion_normalize
from viewformer.data._common import LazyArray
try:
from functools import cache
except ImportError:
from functools import lru_cache
cache = lru_cache()
ALL_CATEGORIES = ['cars', 'chairs']
_BLACKLIST = defaultdict(set)
_BLACKLIST['cars_train'] = {'4cce557de0c31a0e70a43c2d978e502e'}
_BLACKLIST['chairs_train'] = {
# Missing files
'18e5d3054fba58bf6e30a0dcfb43d654',
'2a197b179994b877f63e8e405d49b8ce',
'2be29f8ad81e2742eaf14273fa406ffc',
'2cb0ac27f1cdb3f0b2db0181fdb9f615',
'3d5053323021b1babbaf011bdbf27c0e',
'4a671498c6e96238bf8db636a3460ee5',
'4a89aad97f4c503068d1b9a1d97e2846',
'738188ae01887d2349bb1cbbf9a4206',
'8b552c23c064b96179368d1198f406e7',
'9505568d7a277c7bdd7092ed47061a36',
'9d0043b17b97ac694925bc492489de9c',
'b46361e53253c07b6fa2cfca412075ea',
'b88d8b5e5fbee4fa8336a02debb9923b',
'c41fe0605cfe70571c25d54737ed5c8e',
'cadf69f5353039e8593ebeeedbff73b',
'chairs_2.0_train',
'd323e6d19dc58526f2c0effc06a15c11',
'e94befd51c02533b17b431cae0dd70ed',
# Invalid poses
'8f13ac6499dfcc83f381af8194aa4242',
'7f8fc2fdc88e4ca1152b86a40777b4c',
'49d6f3affe205cc4b04cb542e2c50eb4',
'cbe006da89cca7ffd6bab114dd47e3f',
'47d13a704da37b588fda227abcbd8611',
'59c89dc89cf0d34e597976c675750537',
'2d08a64e4a257e007135fc51795b4038',
'752edd549ca958252b4875f731f71cd',
'd5b9579151041cbd9b9f2eb77f5e247e',
}
_SEQ_SIZES = {
'cars_train': (2151 - 1, 250),
'cars_test': (704, 251),
'chairs_train': (4613 - 27, 200),
'chairs_test': (1317, 251),
}
class ShapenetLoader:
_images_per_scene = dict()
def __init__(self, path: str, split: str, categories: List[str] = None, seed=None, sequences=None):
assert split in ['test', 'train']
if categories is None:
categories = ALL_CATEGORIES
self.categories = categories
self.split = split
self.path = path
self.sequences = sequences
if len(self.categories) == 1:
_, self.sequence_size = _SEQ_SIZES[f'{self.categories[0]}_{self.split}']
def num_images_per_sequence(self):
if self.sequences is not None:
return sum(([_SEQ_SIZES[f'{x}_{self.split}'][-1]] * len(self._get_seqs(x)) for x in self.categories), [])
return sum(([ln] * num for num, ln in (_SEQ_SIZES[f'{x}_{self.split}'] for x in self.categories)), [])
@cache
def __len__(self):
if self.sequences is not None:
return sum(len(self._get_seqs(x)) for x in self.categories)
return sum(num for num, ln in (_SEQ_SIZES[f'{x}_{self.split}'] for x in self.categories))
@staticmethod
def camera_to_world_matrices_to_cameras(cam_to_world):
position = cam_to_world[..., :-1, -1]
R = cam_to_world[..., :-1, :-1]
quaternion = rotation_matrix_to_quaternion(R)
quaternion = quaternion_normalize(quaternion)
return np.concatenate([position, quaternion], -1)
@cache
def _get_seqs(self, category):
xs = os.listdir(os.path.join(self.path, f'{category}_{self.split}'))
if self.sequences is not None:
xs = set(xs)
xs = [x for x in self.sequences if x in xs]
else:
xs = [x for x in xs if x not in _BLACKLIST[f'{category}_{self.split}']]
xs.sort()
return xs
def read_camera(self, category, seq_name, i):
with open(os.path.join(self.path, f'{category}_{self.split}', seq_name, 'pose', f'{i:06d}.txt'), 'r') as f:
camera_to_world_matrix = np.array(list(map(float, f.read().strip().split())), dtype=np.float32)
camera_to_world_matrix = camera_to_world_matrix.reshape((4, 4))
return self.camera_to_world_matrices_to_cameras(camera_to_world_matrix)
def read_image(self, category, seq_name, i):
return np.array(Image.open(os.path.join(self.path, f'{category}_{self.split}', seq_name, 'rgb', f'{i:06d}.png')).convert('RGB'))
def __getitem__(self, i):
# Find split
for cat in self.categories:
num, ln = _SEQ_SIZES[f'{cat}_{self.split}']
if i < num:
break
i -= num
else:
raise StopIteration()
indices = list(range(ln))
seq_name = self._get_seqs(cat)[i]
output = dict()
output['cameras'] = LazyArray(indices, partial(self.read_camera, cat, seq_name))
output['frames'] = LazyArray(indices, partial(self.read_image, cat, seq_name))
output['sequence_id'] = seq_name
return output
if __name__ == '__main__':
ll = ShapenetLoader(sys.argv[1])
ll[0]
breakpoint()
```
#### File: viewformer/data/tfrecord_dataset.py
```python
import os
import json
import struct
import math
from typing import List, Union, Callable, Any
from functools import partial
import tensorflow as tf
import tqdm
from tensorflow.data import Dataset
from viewformer.utils import SplitIndices, dict_replace
from viewformer.utils.geometry_tf import quaternion_multiply, make_quaternion_y, make_quaternion_x
from viewformer.utils.geometry_tf import quaternion_to_euler
from ._common import get_dataset_url, get_dataset_info, expand_path
def loader_to_dataset(loader):
assert len(loader) > 0
first_batch = loader[0]
types = {k: str(x.dtype) for k, x in first_batch.items()}
shapes = {k: x.shape for k, x in first_batch.items()}
dataset = tf.data.Dataset.from_generator(lambda: loader, output_types=types, output_shapes=shapes)
return dataset
def generate_dataset_handle_existing_settings(path, settings, ignore=None):
if ignore is None:
ignore = {'features'}
else:
ignore = set(ignore)
path = os.path.join(path, 'info.json')
if os.path.exists(path):
with tf.io.gfile.GFile(path, mode='r') as f:
old_settings = json.load(f)
old_settings_str = json.dumps({k: v for k, v in old_settings.items() if not k.endswith('_size') and k not in ignore}, sort_keys=True, indent=2)
settings_str = json.dumps({k: v for k, v in settings.items() if not k.endswith('_size') and k not in ignore}, sort_keys=True, indent=2)
if old_settings_str == settings_str:
return # Ok, we can override the dataset
else:
while True:
print('There already exists a dataset with the same name, but different parameters')
print('old parameters:')
print(old_settings_str)
print('new parameters:')
print(settings_str)
print()
resp = input('Do you want to override it? [y/n]\n')
if resp.lower() == 'y':
tf.io.gfile.rmtree(os.path.dirname(path))
tf.io.gfile.makedirs(os.path.dirname(path))
break
elif resp.lower() == 'n':
exit(0)
def transform_viewpoint(v):
y, p = tf.split(v[..., 3:], 2, axis=-1)
# position, [yaw, pitch]
view_vector = [v[..., :3], tf.cos(y), tf.sin(y), tf.cos(p), tf.sin(p)]
v_hat = tf.concat(view_vector, axis=-1)
return v_hat
def transform_image(x):
return x * 2 - 1
def _load_dataset(load_dataset_fn, path: str, split: str, batch_size: int):
# def generate_distributed_dataset(input_context: tf.distribute.InputContext):
# dataset = Dataset.from_tensor_slices(paths)
# local_batch_size = input_context.get_per_replica_batch_size(batch_size)
# dataset = dataset.shard(input_context.num_input_pipelines, input_context.input_pipeline_id)
# if split == 'train':
# dataset = dataset.shuffle(1000)
# dataset = dataset.interleave(tf.data.TFRecordDataset,
# cycle_length=num_workers, block_length=1)
# dataset = load_dataset_fn(dataset, info, split)
# dataset = dataset.batch(local_batch_size)
# dataset = dataset.prefetch(prefetch)
# return dataset
# strategy = tf.distribute.get_strategy()
# return strategy.distribute_datasets_from_function(generate_distributed_dataset)
pass
def load_image_dataset(path: str, batch_size: int, image_size: int, repeat: int = None):
info = get_dataset_info(path)
assert info['frame_size'] == image_size, f'Dataset has a different image size: {info["frame_size"]} != {image_size}'
def load_split(split):
paths = [x + '.tfrecord' for x in expand_path(get_dataset_url(path, split, info))]
feature_description = {
'frames': tf.io.RaggedFeature(tf.string),
}
def parse_example(x):
x = tf.io.parse_example(x, feature_description)
return x['frames']
def preprocess_data(frame):
frame = tf.io.decode_image(frame, dtype=tf.float32)
frame = tf.ensure_shape(frame, (info['frame_size'], info['frame_size'], 3))
frame = transform_image(frame)
return frame
def _load_dataset(input_context):
dataset = Dataset.from_tensor_slices(paths)
local_batch_size = input_context.get_per_replica_batch_size(batch_size)
dataset = dataset.shard(input_context.num_input_pipelines, input_context.input_pipeline_id)
if split == 'train':
dataset = dataset.shuffle(1000)
d = dataset.interleave(tf.data.TFRecordDataset,
cycle_length=tf.data.AUTOTUNE, block_length=1)
d = d.map(parse_example, num_parallel_calls=tf.data.AUTOTUNE)
d = d.flat_map(lambda x: tf.data.Dataset.from_tensor_slices(x))
d = d.map(preprocess_data, num_parallel_calls=tf.data.AUTOTUNE)
# Note, we shuffle both the training and the validation sets
d = d.shuffle(1000)
if repeat is not None:
d = d.repeat(repeat)
d = d.batch(local_batch_size)
d = d.prefetch(tf.data.AUTOTUNE)
return d
strategy = tf.distribute.get_strategy()
return strategy.distribute_datasets_from_function(_load_dataset)
return tuple(map(load_split, ('train', 'test')))
def load_token_dataset(path: str, batch_size: int, sequence_size: int, token_image_size: int, repeat: int = None, max_samples_per_environment: int = -1, transform=None):
info = get_dataset_info(path.split(',')[0])
poses_num_dim = 5 if 'cameras-gqn' in info.get('features', set()) else 7
def load_split(training):
# shuffle = split == 'train'
paths = []
for dpath in path.split(','):
info = get_dataset_info(dpath)
split = 'train' if training else ('val' if 'val' in info.get('splits', []) else 'test')
paths.extend([x + '.tfrecord' for x in expand_path(get_dataset_url(dpath, split, info))])
feature_description = {
'cameras': tf.io.RaggedFeature(tf.float32),
'codes': tf.io.RaggedFeature(tf.int64),
}
def parse_example(x):
x = tf.io.parse_example(x, feature_description)
poses = tf.reshape(x['cameras'], [-1, poses_num_dim])
if poses_num_dim == 5:
poses = fix_legacy_gqn_cameras(poses)
tokens = tf.reshape(x['codes'], [-1, token_image_size, token_image_size])
# Shuffle train environments
# Note, we should also shuffle dev
indices = tf.range(start=0, limit=tf.shape(poses)[0], dtype=tf.int32)
shuffled_indices = tf.random.shuffle(indices)
poses = tf.gather(poses, shuffled_indices)
tokens = tf.gather(tokens, shuffled_indices)
return tf.data.Dataset.from_tensors((poses, tokens)).unbatch().batch(sequence_size, drop_remainder=True)
def _load_dataset(input_context):
dataset = Dataset.from_tensor_slices(paths)
local_batch_size = input_context.get_per_replica_batch_size(batch_size)
dataset = dataset.shard(input_context.num_input_pipelines, input_context.input_pipeline_id)
if training:
dataset = dataset.shuffle(1000)
d = dataset.interleave(tf.data.TFRecordDataset,
cycle_length=tf.data.AUTOTUNE, block_length=1)
d = d.interleave(parse_example, cycle_length=8, num_parallel_calls=tf.data.AUTOTUNE)
# Sample multiple queries per environment
def transform_environment(x, y):
env_d = (tf.data.Dataset.from_tensor_slices((x, y))
.shuffle(1000)
.batch(sequence_size, drop_remainder=True)
.take(max_samples_per_environment))
if transform is not None:
env_d = env_d.map(partial(transform, split='train' if training else 'test'))
return env_d
d = d.flat_map(transform_environment)
d = d.shuffle(1000)
if repeat is not None:
d = d.repeat(repeat)
d = d.batch(local_batch_size)
d = d.prefetch(tf.data.AUTOTUNE)
return d
strategy = tf.distribute.get_strategy()
return strategy.distribute_datasets_from_function(_load_dataset)
return tuple(map(load_split, (True, False)))
def format_image(image):
if len(tf.shape(image)) > 1 and tf.shape(image)[-3] == 3:
image = tf.transpose(image, (0, 2, 3, 1))
return image
def fix_legacy_gqn_cameras(poses, position_multiplier=1.0):
x, y, z, yaw, pitch = tf.unstack(poses, 5, axis=-1)
return tf.concat(
(position_multiplier * tf.stack([y, -z, -x], axis=-1),
quaternion_multiply(make_quaternion_y(math.pi - yaw), make_quaternion_x(pitch))),
-1)
def get_legacy_gqn_representation(cameras):
xyz, quaternion = tf.split(cameras, [3, 4], axis=-1)
x, y, z = tf.unstack(xyz, 3, axis=-1)
rx, ry, rz = tf.unstack(quaternion_to_euler(quaternion), 3, axis=-1)
ry = ((math.pi - ry) + math.pi) % (2 * math.pi) - math.pi
return tf.stack([-z, x, -y, ry, rx], axis=-1)
def read_shards(shard_paths, info, image_size=None,
features=None, _decode_image=True, shuffle_sequences: bool = False,
split=None):
if split is None:
split = os.path.split(next(iter(shard_paths)))[-1][len(info['name'] + '-'):]
split = split[:split.rindex('-of')]
split = split[:split.rindex('-')]
sequence_size = info.get(f'{split}_sequence_size', None)
if features is None:
features = info.get('features', {'cameras', 'frames'})
if 'codes' in features or 'code_probs' in features or 'code_probs_truncated' in features:
token_image_size = info['token_image_size']
if image_size is not None:
assert info['frame_size'] == image_size, f'Dataset has a different image size: {info["frame_size"]} != {image_size}'
# Prepare dataset
feature_description = dict()
if 'cameras' in features or 'cameras-gqn' in features:
poses_num_dim = 5 if 'cameras-gqn' in features else 7
if sequence_size is None:
feature_description['cameras'] = tf.io.RaggedFeature(tf.float32)
else:
feature_description['cameras'] = tf.io.FixedLenFeature([sequence_size * poses_num_dim], tf.float32)
if 'codes' in features:
if sequence_size is None:
feature_description['codes'] = tf.io.RaggedFeature(tf.int64)
else:
feature_description['codes'] = tf.io.FixedLenFeature([sequence_size * info['token_image_size'] ** 2], tf.int64)
if 'images' in features or 'frames' in features:
if sequence_size is None:
feature_description['frames'] = tf.io.RaggedFeature(tf.string)
else:
feature_description['frames'] = tf.io.FixedLenFeature([sequence_size], tf.string)
def parse_example(x):
output = tf.io.parse_example(x, feature_description)
if 'cameras' in features or 'cameras-gqn' in features:
poses = tf.reshape(output['cameras'], [-1, poses_num_dim])
if poses_num_dim == 5:
poses = fix_legacy_gqn_cameras(poses)
output['cameras'] = poses
if 'codes' in features:
tokens = tf.reshape(output['codes'], [-1, token_image_size, token_image_size])
output['codes'] = tokens
if 'frames' in features or 'images' in features:
frame_size = info['frame_size']
if _decode_image:
output['frames'] = tf.map_fn(partial(tf.io.decode_image, dtype=tf.uint8, expand_animations=False), output['frames'], fn_output_signature=tf.uint8)
return output
dataset = tf.data.TFRecordDataset(shard_paths)
dataset = dataset.map(parse_example, num_parallel_calls=tf.data.AUTOTUNE)
return dataset
def get_shard_filename(path, split, shard_id, size):
return f'{path}-{split}-{shard_id:06d}-of-{size:06d}.tfrecord'
def build_shard_index(tfrecord_file: str, index_file: str) -> None:
infile = open(tfrecord_file, "rb")
outfile = open(index_file, "w")
while True:
current = infile.tell()
byte_len = infile.read(8)
if len(byte_len) == 0:
break
infile.read(4)
proto_len = struct.unpack("q", byte_len)[0]
infile.read(proto_len)
infile.read(4)
outfile.write(str(current) + " " + str(infile.tell() - current) + "\n")
infile.close()
outfile.close()
def write_shard(path, data, features: List[str]):
with tf.io.TFRecordWriter(f'{path}.tfrecord.tmp') as current_writer:
for i, sequence in enumerate(data):
feature = dict()
if 'cameras' in features or 'cameras-gqn' in features:
cameras = tf.convert_to_tensor(sequence['cameras'])
if hasattr(cameras, 'numpy'):
cameras = cameras.numpy()
feature['cameras'] = tf.train.Feature(float_list=tf.train.FloatList(value=cameras.reshape([-1])))
if 'codes' in features:
value = tf.convert_to_tensor(sequence['codes'])
if value.dtype == 'int32':
value = tf.cast(value, tf.int64)
if hasattr(value, 'numpy'):
value = value.numpy()
feature['codes'] = tf.train.Feature(int64_list=tf.train.Int64List(value=tf.reshape(value, [-1])))
if 'frames' in features:
value = tf.convert_to_tensor(sequence['frames'])
value = format_image(value)
if hasattr(value[0], 'dtype') and value[0].dtype == 'uint8':
value = [x.numpy() for x in tf.map_fn(tf.image.encode_jpeg, value, dtype=tf.string)]
feature['frames'] = tf.train.Feature(bytes_list=tf.train.BytesList(value=value))
example = tf.train.Example(features=tf.train.Features(feature=feature))
current_writer.write(example.SerializeToString())
current_writer.flush()
try:
build_shard_index(f'{path}.tfrecord.tmp', f'{path}.index')
except Exception:
print(f'Failed to create index for shard: {path}.tfrecord')
tf.io.gfile.rename(f'{path}.tfrecord.tmp', f'{path}.tfrecord', overwrite=True)
```
#### File: viewformer/evaluate/evaluate_codebook.py
```python
from aparse import click, ConditionalType
import os
import tqdm
import json
from typing import Optional
from collections import OrderedDict
from itertools import chain
import tensorflow as tf
from viewformer.utils.tensorflow import load_model
from viewformer.data.loaders import get_loaders
from viewformer.data._common import resize
from viewformer.utils.metrics import LPIPSMetric, SSIMMetric, PSNRMetric, ImageRMSE
def resize_tf(images, size, method=None):
return tf.convert_to_tensor(resize(images.numpy(), size, method=method))
class Evaluator:
def __init__(self, image_size: int = None):
self.image_size = image_size
self._image_generation_metrics = [
tf.keras.metrics.MeanSquaredError('mse'),
ImageRMSE('rmse'),
tf.keras.metrics.MeanAbsoluteError('mae'),
PSNRMetric('psnr'),
LPIPSMetric('vgg', name='lpips'),
SSIMMetric('ssim')]
def update_state(self, ground_truth_images, generated_images):
image_size = self.image_size
if image_size is None:
image_size = tf.maximum(tf.shape(ground_truth_images)[-2], tf.shape(generated_images)[-2])
ground_truth_images = resize_tf(ground_truth_images, image_size)
if tf.shape(generated_images)[-2] != image_size:
# When upsampling generated image, we will use bilinear as well
generated_images = resize_tf(generated_images, image_size, 'bilinear')
for metric in self._image_generation_metrics:
metric.update_state(ground_truth_images, generated_images)
def get_progress_bar_info(self):
return OrderedDict([
('img_rgbl1', float(next((x for x in self._image_generation_metrics if x.name == 'mae')).result())),
('img_lpips', float(next((x for x in self._image_generation_metrics if x.name == 'lpips')).result()))])
def result(self):
return OrderedDict((
(m.name, float(m.result()))
for m in chain(self._image_generation_metrics)))
def build_store_predictions(job_dir, limit: int = None):
os.makedirs(job_dir, exist_ok=True)
# assert len(os.listdir(job_dir)) == 0, f'Evaluation directory {job_dir} is not empty'
i = 0
def store_predictions(ground_truth_images, generated_images, postfix: str = ''):
nonlocal i
for gt_img, gen_img in zip(ground_truth_images, generated_images):
if limit is None or limit == -1 or i < limit:
tf.io.write_file(tf.constant(os.path.join(job_dir, f'{i:08d}-gen{postfix}.png')), tf.io.encode_png(gen_img))
tf.io.write_file(tf.constant(os.path.join(job_dir, f'{i:08d}-gt{postfix}.png')), tf.io.encode_png(gt_img))
i += 1
return store_predictions
def generate_batch_predictions(codebook_model, images):
fimages = resize_tf(images, codebook_model.config.image_size)
fimages = tf.image.convert_image_dtype(fimages, tf.float32) * 2 - 1
codes = codebook_model.encode(fimages)[-1] # [N, H', W']
generated_images = codebook_model.decode_code(codes)
generated_images = tf.clip_by_value(generated_images, -1, 1)
generated_images = tf.image.convert_image_dtype(generated_images / 2 + 0.5, tf.uint8)
return dict(
ground_truth_images=images,
generated_images=generated_images)
#
# Types used in argument parsing
#
def _loader_switch_cls(cls):
class Loader(cls):
# Disable image_size argument in loader classes
def __init__(self, *args, image_size=None, sequence_size=None, **kwargs):
raise NotImplementedError()
def __new__(_cls, *args, **kwargs):
# Return callback to construct Loader on the Fly
return lambda image_size, sequence_size: cls(*args, **kwargs, image_size=image_size, sequence_size=sequence_size)
return Loader
LoaderSwitch = ConditionalType('Loader', {k: _loader_switch_cls(v) for k, v in get_loaders().items()}, default='dataset')
@click.command('evaluate')
def main(loader: LoaderSwitch,
codebook_model: str,
job_dir: str,
batch_size: int,
num_eval_images: Optional[int] = None,
num_store_images: int = -1,
single_image_per_scene: bool = True,
image_size: Optional[int] = None):
codebook_model = load_model(codebook_model)
if single_image_per_scene:
loader = loader(None, None)
else:
loader = loader(None, 1)
store_predictions = build_store_predictions(job_dir, num_store_images)
evaluator = Evaluator(image_size=image_size)
dataset = tf.data.Dataset.from_generator(lambda: (x['frames'] for x in loader),
output_types=tf.uint8)
if single_image_per_scene:
dataset = dataset.map(lambda x: x[:1])
dataset = dataset.unbatch()
if num_eval_images is not None:
dataset = dataset.take(num_eval_images)
else:
num_eval_images = sum(loader.num_images_per_sequence())
dataset = dataset.batch(batch_size)
with tqdm.tqdm(total=(num_eval_images + batch_size - 1) // batch_size, desc='evaluating') as progress:
for batch in tqdm.tqdm(dataset):
batch_prediction = generate_batch_predictions(codebook_model, batch)
store_predictions(**batch_prediction)
evaluator.update_state(**batch_prediction)
progress.set_postfix(evaluator.get_progress_bar_info())
progress.update()
result = evaluator.result()
with open(os.path.join(job_dir, 'results.json'), 'w+') as f:
json.dump(result, f)
print('Results:')
for m, val in result.items():
print(f' {m}: {val:.6f}')
if __name__ == '__main__':
main()
```
#### File: viewformer/evaluate/evaluate_sevenscenes_multictx.py
```python
import os
import json
import random
from aparse import click
from typing import List, Optional
import numpy as np
import tensorflow as tf
import tqdm
from viewformer.utils.tensorflow import load_model
from viewformer.data.loaders import SevenScenesLoader
from viewformer.data.loaders.sevenscenes import ALL_SCENES
from viewformer.evaluate.evaluate_transformer_multictx import generate_batch_predictions, build_store_predictions, MultiContextEvaluator, print_metrics
from viewformer.evaluate.evaluate_sevenscenes import SceneLookup
@click.command('evaluate-sevenscenes-multictx')
def main(path: str,
transformer_model: str,
codebook_model: str,
job_dir: str,
batch_size: int,
scenes: List[str] = None,
num_eval_sequences: Optional[int] = 100,
store_ctx: bool = True,
num_store_images: int = 100):
if scenes is None:
scenes = ALL_SCENES
codebook_model = load_model(codebook_model)
all_results = dict()
model = None
for scene in scenes:
scene_lookup = SceneLookup(path, scene, 128)
if model is None or transformer_model.format(scene=scene) != transformer_model:
model = load_model(transformer_model.format(scene=scene))
def build_batch(batch):
gt_frames = batch['frames']
gt_cameras = batch['cameras']
ctx = random.sample(scene_lookup.files, 19)
ctx_cameras, ctx_frames = tuple(np.stack(y, 0) for y in zip(*(scene_lookup[x] for x in ctx)))
cameras = np.concatenate((ctx_cameras, gt_cameras), 0)[np.newaxis, ...]
frames = np.concatenate((ctx_frames, gt_frames), 0)[np.newaxis, ...]
return tf.convert_to_tensor(cameras), tf.convert_to_tensor(frames)
store_predictions = build_store_predictions(os.path.join(job_dir, scene), num_store_images)
evaluator = MultiContextEvaluator(20, image_size=128)
test_loader = SevenScenesLoader(path=path, split='test',
sequence_size=1,
image_size=128,
scenes=[scene],
_load_file_paths=True)
random_indices = random.Random(42).sample(list(range(len(test_loader))), min(len(test_loader), num_eval_sequences))
with tqdm.tqdm(total=len(random_indices), desc=f'evaluating {scene}') as progress:
for index in tqdm.tqdm(random_indices):
cameras, frames = build_batch(test_loader[index])
batch_prediction = generate_batch_predictions(model, codebook_model, frames, cameras)
evaluator.update_state(**batch_prediction)
if store_ctx:
batch_prediction['ctx'] = frames[:, :-1]
store_predictions(**batch_prediction)
progress.set_postfix(evaluator.get_progress_bar_info())
progress.update()
result = evaluator.result()
all_results[scene] = result
print(f'Results on {scene}:')
print_metrics(result)
os.makedirs(os.path.join(job_dir, scene), exist_ok=True)
with open(os.path.join(job_dir, scene, 'results.json'), 'w+') as f:
json.dump(result, f)
os.makedirs(job_dir, exist_ok=True)
with open(os.path.join(job_dir, 'results.json'), 'w+') as f:
json.dump(all_results, f)
if __name__ == '__main__':
main()
```
#### File: viewformer/evaluate/generate_images.py
```python
import os
from aparse import click
import tqdm
import tensorflow as tf
from viewformer.utils.tensorflow import load_model
from viewformer.data.loaders import DatasetLoader
from .evaluate_transformer import generate_batch_predictions, LoaderSwitch
@click.command('generate-gqn-images')
def main(dataset_path: str,
job_dir: str,
transformer_model: str,
codebook_model: str):
num_eval_sequences = 5
transformer_model = load_model(transformer_model)
codebook_model = load_model(codebook_model)
loader = DatasetLoader(dataset_path, 'test', image_size=codebook_model.config.image_size)
dataset = tf.data.Dataset.from_generator(lambda: loader,
output_types={
'frames': tf.uint8,
'cameras': tf.float32})
num_eval_sequences = num_eval_sequences if num_eval_sequences is not None else len(loader)
dataset = dataset.take(num_eval_sequences)
dataset = dataset.batch(1)
for i, batch in enumerate(tqdm.tqdm(dataset, total=num_eval_sequences, desc='generating')):
batch['frames'] = tf.concat((batch['frames'][:, :3], batch['frames'][:, -1:]), 1)
batch['cameras'] = tf.concat((batch['cameras'][:, :3], batch['cameras'][:, -1:]), 1)
batch_prediction = generate_batch_predictions(transformer_model, codebook_model, batch['frames'], batch['cameras'])
for gt_image, gen_image in zip(batch_prediction['ground_truth_images'], batch_prediction['generated_images']):
tf.io.write_file(os.path.join(job_dir, f'gen{i}.png'), tf.image.encode_png(tf.image.convert_image_dtype(gen_image, 'uint8')))
tf.io.write_file(os.path.join(job_dir, f'gt{i}.png'), tf.image.encode_png(tf.image.convert_image_dtype(gt_image, 'uint8')))
for j, img in enumerate(batch['frames'][0, :-1]):
tf.io.write_file(os.path.join(job_dir, f'c{i}_{j}.png'), tf.image.encode_png(tf.image.convert_image_dtype(img, 'uint8')))
if __name__ == '__main__':
main()
```
#### File: viewformer/models/config.py
```python
import copy
from typing import Tuple, List, Optional
from aparse import Literal
from dataclasses import dataclass, fields, field, is_dataclass
from viewformer.utils.schedules import Schedule
ModelType = Literal['codebook', 'transformer']
def asdict(obj):
dict_factory = dict
def _asdict_inner(obj, dict_factory):
if hasattr(obj, 'from_str'):
return str(obj)
elif is_dataclass(obj):
result = []
for f in fields(obj):
value = _asdict_inner(getattr(obj, f.name), dict_factory)
result.append((f.name, value))
return dict_factory(result)
elif isinstance(obj, tuple) and hasattr(obj, '_fields'):
return type(obj)(*[_asdict_inner(v, dict_factory) for v in obj])
elif isinstance(obj, (list, tuple)):
return type(obj)(_asdict_inner(v, dict_factory) for v in obj)
elif isinstance(obj, dict):
return type(obj)((_asdict_inner(k, dict_factory),
_asdict_inner(v, dict_factory))
for k, v in obj.items())
else:
return copy.deepcopy(obj)
if not is_dataclass(obj):
raise TypeError("asdict() should be called on dataclass instances")
return _asdict_inner(obj, dict_factory)
@dataclass
class ModelConfig:
model: str = field(init=False)
def __post_init__(self):
cls_name = type(self).__name__
assert cls_name.endswith('Config')
cls_name = cls_name[:-len('Config')]
cls_name = cls_name.lower()
self.model = cls_name
def asdict(self):
return asdict(self)
@classmethod
def supported_config_dict(cls):
configs = {}
if cls != ModelConfig:
configs[cls.__name__.lower()[:-len('config')]] = cls
for c in cls.__subclasses__():
configs.update(c.supported_config_dict())
return configs
@dataclass
class MIGTConfig(ModelConfig):
n_embeddings: int = 1024
n_head: int = 12
d_model: int = 768
dropout: float = 0.1
n_layer: int = 12
weight_decay: float = 0.01
label_smoothing: float = 0.0
learning_rate: float = 6.4e-4
batch_size: int = 64
gradient_clip_val: float = 0.0
sequence_size: int = 20
token_image_size: int = 8
total_steps: int = 300000
n_loss_skip: int = 4
augment_poses: Literal['no', 'relative', 'simple', 'advanced'] = 'relative'
use_dynamic_pose_loss: bool = False
localization_weight: Schedule = Schedule.from_str('1')
image_generation_weight: float = 1.
pose_multiplier: float = 1.
random_pose_multiplier: float = 1.
@property
def model_type(self):
return 'transformer'
@dataclass
class VQGANConfig(ModelConfig):
learning_rate: float = 1.584e-3
embed_dim: int = 256
n_embed: int = 1024
z_channels: int = 256
resolution: int = 256
in_channels: int = 3
out_ch: int = 3
ch: int = 128
num_res_blocks: int = 2
ch_mult: List[int] = field(default_factory=lambda: [1, 1, 2, 2, 4])
attn_resolutions: List[int] = field(default_factory=lambda: [16])
gradient_clip_val: float = .0
batch_size: int = 352
image_size: int = 128
total_steps: int = 200000
codebook_weight: float = 1.0
pixelloss_weight: float = 1.0
perceptual_weight: float = 1.0
@property
def stride(self):
return 2 ** (len(self.ch_mult) - 1)
@property
def model_type(self):
return 'codebook'
```
#### File: viewformer/utils/_common.py
```python
import os
import shutil
import inspect
import logging
import tarfile
import sys
from dataclasses import dataclass
from functools import partial
from collections import Counter
class SplitIndices:
def __init__(self, indices):
if isinstance(indices, range):
self._indices = f'{indices.start}:{indices.stop}:{indices.step}'
elif isinstance(indices, list):
self._indices = ','.join(str(x) for x in indices)
elif isinstance(indices, SplitIndices):
self._indices = indices._indices
else:
self._indices = indices
@classmethod
def from_str(cls, str_val):
return SplitIndices(str_val)
def __repr__(self):
return self._indices
def __str__(self):
return self._indices
def restrict(self, b):
vals = []
if not isinstance(b, SplitIndices):
b = SplitIndices(b)
limit = b.left_limit()
for x in self._indices.split(','):
xx = [int(a) if a else None for a in x.split(':')]
if len(xx) == 1:
if xx[0] in b:
vals.append(xx[0])
elif len(xx) == 2:
xx.append(None)
if len(xx) == 3:
cur = xx[0]
if cur is None:
cur = 0
while (xx[1] is None or cur < xx[1]) and cur < limit:
if cur in b:
vals.append(cur)
cur += 1 if xx[2] is None else xx[2]
return SplitIndices(','.join(map(str, vals)))
def __contains__(self, val):
for x in self._indices.split(','):
xx = [int(a) if a else None for a in x.split(':')]
if len(xx) == 1:
if val == xx[0]:
return True
else:
continue
if len(xx) == 2:
step = 1
else:
step = xx[-1]
start, stop = xx[:2]
if start is None:
start = 0
if (val - start) % step == 0 and (stop is None or val < stop) and (start is None or val >= start):
return True
return False
def left_limit(self):
max_v = -float('inf')
for x in self._indices.split(','):
xx = [int(a) if a else None for a in x.split(':')]
if len(xx) == 1:
max_v = max(max_v, xx[0] + 1)
if xx[1] is None:
return float('inf')
return xx[1]
return max_v
def __iter__(self):
if self._indices == '':
return
for x in self._indices.split(','):
xx = [int(a) if a else None for a in x.split(':')]
if len(xx) == 1:
yield xx[0]
elif len(xx) == 2:
xx.append(None)
if len(xx) == 3:
cur = xx[0]
if cur is None:
cur = 0
while xx[1] is None or cur < xx[1]:
yield cur
cur += 1 if xx[2] is None else xx[2]
def is_torch_model(checkpoint):
return checkpoint.endswith('.pth') or checkpoint.endswith('.ckpt')
def batch_slice(x, ind):
if isinstance(x, tuple):
return tuple(map(partial(batch_slice, ind=ind), x))
elif isinstance(x, dict):
return x.__class__([(k, batch_slice(v, ind)) for k, v in x.items()])
return x[ind]
def batch_len(x):
if isinstance(x, tuple):
return batch_len(x[0])
elif isinstance(x, dict):
return batch_len(next(iter(x.values())))
# return x.shape[0]
return len(x)
def dict_replace(d, key, value):
d = dict(**d)
d[key] = value
return d
def single(iterator):
value = None
for x in iterator:
if value is not None:
raise RuntimeError('Iterable contains more than one item')
value = (x,)
if value is None:
raise StopIteration('Iterable contains no items')
return value[0]
def unique(iterable):
outputted = set()
for x in iterable:
if x not in outputted:
outputted.add(x)
yield x
def pull_checkpoint(checkpoint, override=False):
import requests
from tqdm import tqdm
path = f'https://data.ciirc.cvut.cz/public/projects/2022ViewFormer/checkpoints/{checkpoint}.tar.gz'
basename = os.path.split(path)[1][:-len('.tar.gz')]
local_path = os.path.expanduser(f'~/.cache/viewformer/{basename}')
if os.path.exists(local_path):
if override:
shutil.rmtree(local_path)
else:
return local_path
os.makedirs(local_path, exist_ok=True)
response = requests.get(path, stream=True)
total_size_in_bytes = int(response.headers.get('content-length', 0))
if response.status_code != 200:
raise Exception(f'Model {checkpoint} not found')
stream = response.raw
_old_read = stream.read
def _read(size):
progress_bar.update(size)
return _old_read(size)
setattr(stream, 'read', _read)
with tqdm(total=total_size_in_bytes, unit='iB', unit_scale=True) as progress_bar, \
tarfile.open(fileobj=stream, mode='r') as tfile:
tfile.extractall(local_path)
return local_path
```
#### File: viewformer/utils/metrics.py
```python
import tensorflow as tf
from tensorflow.python.util import nest
from viewformer.utils import geometry_tf as geometry
def _with_flat_batch(flat_batch_fn):
def fn(x, *args, **kwargs):
shape = tf.shape(x)
flat_batch_x = tf.reshape(x, tf.concat([[-1], shape[-3:]], axis=0))
flat_batch_r = flat_batch_fn(flat_batch_x, *args, **kwargs)
r = nest.map_structure(lambda x: tf.reshape(x, tf.concat([shape[:-3], x.shape[1:]], axis=0)),
flat_batch_r)
return r
return fn
def ssim(X, Y, K1=0.01, K2=0.03, win_size=7,
data_range=1.0, use_sample_covariance=True):
"""
Structural SIMilarity (SSIM) index between two images
Args:
X: A tensor of shape `[..., in_height, in_width, in_channels]`.
Y: A tensor of shape `[..., in_height, in_width, in_channels]`.
Returns:
The SSIM between images X and Y.
Reference:
https://github.com/scikit-image/scikit-image/blob/master/skimage/measure/_structural_similarity.py
Broadcasting is supported.
"""
X = tf.convert_to_tensor(X)
Y = tf.convert_to_tensor(Y)
ndim = 2 # number of spatial dimensions
nch = tf.shape(X)[-1]
filter_func = _with_flat_batch(tf.nn.depthwise_conv2d)
kernel = tf.cast(tf.fill([win_size, win_size, nch, 1], 1 / win_size ** 2), X.dtype)
filter_args = {'filter': kernel, 'strides': [1] * 4, 'padding': 'VALID'}
NP = win_size ** ndim
# filter has already normalized by NP
if use_sample_covariance:
cov_norm = NP / (NP - 1) # sample covariance
else:
cov_norm = 1.0 # population covariance to match Wang et. al. 2004
# compute means
ux = filter_func(X, **filter_args)
uy = filter_func(Y, **filter_args)
# compute variances and covariances
uxx = filter_func(X * X, **filter_args)
uyy = filter_func(Y * Y, **filter_args)
uxy = filter_func(X * Y, **filter_args)
vx = cov_norm * (uxx - ux * ux)
vy = cov_norm * (uyy - uy * uy)
vxy = cov_norm * (uxy - ux * uy)
R = data_range
C1 = (K1 * R) ** 2
C2 = (K2 * R) ** 2
A1, A2, B1, B2 = ((2 * ux * uy + C1,
2 * vxy + C2,
ux ** 2 + uy ** 2 + C1,
vx + vy + C2))
D = B1 * B2
S = (A1 * A2) / D
ssim = tf.reduce_mean(S, axis=[-3, -2, -1])
return ssim
class AllowNanMean(tf.metrics.Mean):
def __init__(self, name, dtype='float32', allow_nan=True, **kwargs):
super().__init__(name=name, dtype=dtype, **kwargs)
self.allow_nan = allow_nan
def update_state(self, values, sample_weight=None):
if self.allow_nan:
values = tf.reshape(values, (-1,))
if sample_weight is None:
sample_weight = tf.ones_like(values)
values = tf.where(tf.math.is_nan(values), tf.zeros_like(values), values)
sample_weight = sample_weight * (1. - tf.cast(tf.math.is_nan(values), sample_weight.dtype))
super().update_state(values, sample_weight)
class CameraPositionError(AllowNanMean):
def __init__(self, name='pose_pos_err', **kwargs):
super().__init__(name=name, **kwargs)
def update_state(self, x1, x2):
super().update_state(tf.norm(x1[..., :3] - x2[..., :3], ord=2, axis=-1))
class CameraOrientationError(AllowNanMean):
def __init__(self, name='pose_ori_err', **kwargs):
super().__init__(name=name, **kwargs)
def update_state(self, x1, x2):
x1 = geometry.quaternion_normalize(x1[..., 3:])
x2 = geometry.quaternion_normalize(x2[..., 3:])
# We use sin method because it is more stable around
# the point, where the rotation is 0
diff = geometry.quaternion_multiply(x1, geometry.quaternion_conjugate(x2))
theta = 2 * tf.asin(tf.linalg.norm(diff[..., 1:], axis=-1))
super().update_state(theta)
class Median(tf.metrics.Metric):
def __init__(self, name='median', **kwargs):
super().__init__(name=name, **kwargs)
self._store = None
def update_state(self, values):
values = tf.convert_to_tensor(values)
values = tf.reshape(values, (-1,))
values = tf.cast(values, self.dtype)
if self._store is None:
self._store = values
else:
self._store = tf.concat((self._store, values), 0)
def reset_states(self):
self._store = None
def result(self):
# Compute median
vals = tf.sort(self._store)
if len(vals) % 2 == 1:
return vals[(len(vals) - 1) // 2]
else:
return 0.5 * (vals[int(len(vals) // 2 - 1)] + vals[int(len(vals) // 2)])
class CameraPositionMedian(Median):
def __init__(self, name='pose_pos_median', **kwargs):
super().__init__(name=name, **kwargs)
def update_state(self, x1, x2):
super().update_state(tf.norm(x1[..., :3] - x2[..., :3], ord=2, axis=-1))
class CameraOrientationMedian(Median):
def __init__(self, name='pose_ori_median', **kwargs):
super().__init__(name=name, **kwargs)
def update_state(self, x1, x2):
x1 = geometry.quaternion_normalize(x1[..., 3:])
x2 = geometry.quaternion_normalize(x2[..., 3:])
# We use sin method because it is more stable around
# the point, where the rotation is 0
diff = geometry.quaternion_multiply(x1, geometry.quaternion_conjugate(x2))
theta = 2 * tf.asin(tf.linalg.norm(diff[..., 1:], axis=-1))
super().update_state(theta)
class ImageRMSE(tf.keras.metrics.Mean):
def __init__(self, name=None, **kwargs):
super().__init__(name=name, **kwargs)
def update_state(self, gt_images, images):
gt_images = tf.image.convert_image_dtype(gt_images, 'float32') * 255.
images = tf.image.convert_image_dtype(images, 'float32') * 255.
val = tf.reduce_mean(tf.math.squared_difference(gt_images, images), (-1, -2, -3))
val = tf.math.sqrt(val)
super().update_state(val)
class SSIMMetric(tf.keras.metrics.Mean):
def __init__(self, name='ssim', **kwargs):
super().__init__(name=name, **kwargs)
def update_state(self, gt_images, images):
gt_images = tf.image.convert_image_dtype(gt_images, 'float32')
images = tf.image.convert_image_dtype(images, 'float32')
val = ssim(gt_images, images, 1)
super().update_state(val)
class PSNRMetric(tf.keras.metrics.Mean):
def __init__(self, name='psnr', **kwargs):
super().__init__(name=name, **kwargs)
def update_state(self, gt_images, images):
gt_images = tf.image.convert_image_dtype(gt_images, 'float32')
images = tf.image.convert_image_dtype(images, 'float32')
val = tf.image.psnr(gt_images, images, 1)
super().update_state(val)
class LPIPSMetric(tf.keras.metrics.Mean):
_lpips_pool = dict()
def __init__(self, net='vgg', name=None, **kwargs):
from viewformer.models.utils import lpips
if name is None:
name = f'lpips-{net}'
super().__init__(name=name, **kwargs)
if net not in self._lpips_pool:
self._lpips_pool[net] = lpips(net)
self.lpips = self._lpips_pool[net]
def update_state(self, gt_images, images):
gt_images = tf.image.convert_image_dtype(gt_images, 'float32')
images = tf.image.convert_image_dtype(images, 'float32')
val = self.lpips(gt_images, images)
super().update_state(val)
```
#### File: viewformer/utils/tensorflow.py
```python
from typing import List
import os
import json
import tensorflow as tf
import logging
from viewformer.models import AutoModel, load_config, ModelNotFoundError
_logger = logging.getLogger(__name__)
def shape_list(tensor: tf.Tensor) -> List[int]:
dynamic = tf.shape(tensor)
if tensor.shape == tf.TensorShape(None):
return dynamic
static = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(static)]
def load_model(checkpoint, restore_weights: bool = True, **kwargs):
model_path, checkpoint = os.path.split(checkpoint)
if not tf.io.gfile.exists(os.path.join(model_path, 'config.json')) and '/' not in model_path:
# It could be a network checkpoint
from viewformer.utils import pull_checkpoint
model_path = pull_checkpoint(checkpoint)
if os.path.exists(os.path.join(model_path, 'model.index')):
checkpoint = 'model'
else:
checkpoint = 'model.ckpt' # Torch checkpoint
with tf.io.gfile.GFile(os.path.join(model_path, 'config.json'), mode='r') as f:
config = json.load(f)
config.update(kwargs)
config = load_config(config)
is_th = checkpoint.endswith('.pth') or checkpoint.endswith('.ckpt')
if is_th:
_logger.warn('the loaded model is a PyTorch checkpoint')
from viewformer.models import AutoModelTH
import torch
from viewformer.utils.convert import convert_weights_th_to_tf
th_model = AutoModelTH.from_config(config)
checkpoint_data = torch.load(
tf.io.gfile.GFile(os.path.join(model_path, checkpoint), mode='rb'), map_location=torch.device('cpu'))
th_model.load_state_dict(checkpoint_data['state_dict'])
try:
model = AutoModel.from_config(th_model.config)
convert_weights_th_to_tf(th_model, model)
return model
except ModelNotFoundError:
_logger.warn('the loaded model is not implemented for TensorFlow, we will try to load it using ONNX')
from viewformer.utils.convert import convert_model_th_to_tf
return convert_model_th_to_tf(th_model, checkpoint)
else:
model = AutoModel.from_config(config)
if restore_weights:
status = model.load_weights(os.path.join(model_path, checkpoint))
if hasattr(status, 'expect_partial'):
status.expect_partial()
return model
def batch_cat(*x, axis=0):
if isinstance(x[0], tuple):
return tuple(batch_cat(*y, axis=axis) for y in zip(*x))
elif isinstance(x[0], dict):
return x[0].__class__([(k, batch_cat(*[y[k] for y in x], axis=axis)) for k in x[0].keys()])
return tf.concat(x, axis=axis)
```
#### File: viewformer/utils/testing.py
```python
from itertools import chain
import torch
import tensorflow as tf
from .convert import zip_th_tf_parameters, convert_weights_th_to_tf
def assert_weights_same(torch_module, tf_module, atol=1e-6, rtol=1e-5):
for th_weight, tf_variable in zip_th_tf_parameters(torch_module, tf_module, permute_torch=True):
tf_weight = torch.tensor(tf_variable.numpy())
torch.testing.assert_allclose(tf_weight, th_weight, atol=atol, rtol=rtol)
def _assert_torch_modules_same(m1, m2, input_shape, atol, rtol, transform_weights=None):
flat_torch_inputs = []
def generate_input(input_shape):
if isinstance(input_shape[0], tuple):
return tuple(map(generate_input, input_shape))
else:
inp = torch.randn(input_shape, dtype=torch.float32)
inp.requires_grad = True
flat_torch_inputs.append(inp)
return inp
inp = generate_input(input_shape)
if not isinstance(inp, tuple):
inp = (inp,)
state_dict = m1.state_dict()
if transform_weights is not None:
state_dict = dict(map(transform_weights, state_dict.items()))
m2.load_state_dict(state_dict)
m1.train()
m1.zero_grad()
m2.train()
m2.zero_grad()
o1 = m1(*inp)
o2 = m2(*inp)
def assert_weights_same(m1, m2, atol=atol, rtol=rtol):
s1 = m1.state_dict()
if transform_weights is not None:
s1 = dict(map(transform_weights, s1.items()))
s2 = m2.state_dict()
for v1, v2 in zip(s1.values(), s2.values()):
torch.testing.assert_allclose(v1, v2, atol=atol, rtol=rtol)
assert_weights_same(m1, m2, atol=atol, rtol=rtol)
def assert_same(o1, o2):
if isinstance(o1, torch.Tensor):
assert o1.shape == o2.shape
torch.testing.assert_allclose(o1, o2, atol=atol, rtol=rtol)
elif isinstance(o1, (tuple, list)):
for x1, x2 in zip(o1, o2):
assert_same(x1, x2)
assert_same(o1, o2)
# Assert loss is same
def generate_losses(o1, o2):
if isinstance(o1, tuple):
weights = torch.randn((len(o1),), dtype=torch.float32)
l1, l2 = 0, 0
for w, m_o1, m_o2 in zip(weights, o1, o2):
m_o1, m_o2 = generate_losses(m_o1, m_o2)
l1 += w * m_o1.float()
l2 += w * m_o2.float()
return l1, l2
elif len(o1.shape) == 0 or len(o2) == 1:
return o1.view(tuple()), o2.view(tuple())
else:
weights = torch.randn(o1.shape, dtype=torch.float32)
l1 = (o1 * weights).mean()
l2 = (o2 * weights).mean()
return l1, l2
l1, l2 = generate_losses(o1, o2)
assert abs(l1.item() - l2.item()) < atol
# Assert weights are the same after backprop
l1.backward()
grad1_input = [x.grad.clone().detach() for x in flat_torch_inputs]
if any(True for x in m1.parameters()):
torch.optim.SGD(m1.parameters(), 0.01, 0.0, nesterov=False).step()
for p in flat_torch_inputs:
p.grad = None
l2.backward()
grad2_input = [x.grad.clone().detach() for x in flat_torch_inputs]
if any(True for x in m2.parameters()):
torch.optim.SGD(m2.parameters(), 0.01, 0.0, nesterov=False).step()
assert_weights_same(m1, m2)
# Assert gradient wrt. input is the same
for g1, g2 in zip(grad1_input, grad2_input):
torch.testing.assert_allclose(g1, g2, atol=atol, rtol=rtol)
def assert_modules_same(torch_module, tf_module, input_shape, atol=1e-5, transpose=True, transform_weights=None, rtol=1e-5):
if isinstance(tf_module, torch.nn.Module):
return _assert_torch_modules_same(torch_module, tf_module, input_shape, atol=atol, transform_weights=transform_weights, rtol=rtol)
# We will start by copying weights
flat_tf_inputs = []
flat_torch_inputs = []
def generate_input(input_shape):
if isinstance(input_shape[0], tuple):
return tuple(zip(*map(generate_input, input_shape)))
else:
inp = torch.randn(input_shape, dtype=torch.float32)
inp.requires_grad = True
flat_torch_inputs.append(inp)
if len(input_shape) == 4:
tf_inp = inp.permute(0, 2, 3, 1)
else:
tf_inp = inp
tf_inp = tf_inp.detach().clone().numpy()
tf_inp = tf.Variable(tf_inp, trainable=True)
flat_tf_inputs.append(tf_inp)
return inp, tf_inp
inp, tf_inp = generate_input(input_shape)
if not isinstance(inp, tuple):
inp = (inp,)
tf_inp = (tf_inp,)
tf_module(*tf_inp)
convert_weights_th_to_tf(torch_module, tf_module)
# tf_module(tf_inp)
torch_module.train()
torch_module.zero_grad()
torch_output = torch_module(*inp)
with tf.GradientTape() as tape:
tf_output = tf_module(*tf_inp, training=True)
assert_weights_same(torch_module, tf_module)
def assert_same(o1, o2):
if isinstance(o1, torch.Tensor):
o2 = torch.tensor(o2.numpy())
if len(o2.shape) == 4:
o2 = o2.permute(0, 3, 1, 2)
assert o1.shape == o2.shape
torch.testing.assert_allclose(o1, o2, atol=atol, rtol=rtol)
elif isinstance(o1, (tuple, list)):
for x1, x2 in zip(o1, o2):
assert_same(x1, x2)
assert_same(torch_output, tf_output)
# Assert loss is same
def generate_losses(th_output, tf_output):
if isinstance(th_output, tuple):
weights = torch.randn((len(th_output),), dtype=torch.float32)
tf_loss, th_loss = 0, 0
for w, th_o, tf_o in zip(weights, th_output, tf_output):
th_o, tf_o = generate_losses(th_o, tf_o)
tf_loss += tf.cast(tf_o, tf.float32) * w
th_loss += w * th_o.float()
return th_loss, tf_loss
elif len(th_output.shape) == 0 or len(th_output) == 1:
return th_output.view(tuple()), tf.reshape(tf_output, [])
else:
if len(tf_output.shape) == 4:
tf_output = tf.transpose(tf_output, [0, 3, 1, 2])
weights = torch.randn(th_output.shape, dtype=torch.float32)
th_loss = (th_output * weights).mean()
tf_loss = tf.reduce_mean(tf.cast(tf_output, tf.float32) * weights.numpy())
return th_loss, tf_loss
th_loss, tf_loss = generate_losses(torch_output, tf_output)
assert abs(th_loss.item() - tf_loss.numpy()) < atol
# Assert weights are the same after backprop
tf_grads = tape.gradient(tf_loss, list(chain(tf_module.trainable_variables, flat_tf_inputs)))
tf.keras.optimizers.SGD(0.01).apply_gradients(zip(tf_grads, tf_module.trainable_variables))
th_loss.backward()
if any(True for x in torch_module.parameters()):
torch.optim.SGD(torch_module.parameters(), 0.01, 0.0, nesterov=False).step()
assert_weights_same(torch_module, tf_module, atol=atol, rtol=rtol)
# Assert gradient wrt. input is the same
tf_grads = tf_grads[-len(flat_tf_inputs):]
for th_var, tf_var in zip(flat_torch_inputs, tf_grads):
if len(tf_var.shape) == 4:
tf_var = tf.transpose(tf_var, [0, 3, 1, 2])
torch.testing.assert_allclose(th_var.grad, torch.tensor(tf_var.numpy()), atol=atol, rtol=rtol)
``` |
{
"source": "jkuli-net/ConvFFT",
"score": 2
} |
#### File: jkuli-net/ConvFFT/ConvFFTTorch1.py
```python
# this is meant to be a drop in replacement for torch.conv
# functional_conv1d_fft replaces torch.nn.functional.conv1d
# Conv1d_fft replaces torch.nn.Conv1d
# supports 1d, 2d and 3d convolution
# api is not exactly matching yet
# unsupported: stride, dilation, groups, etc
# b[0,:,:] = ifft( fft(x[0,:,:]) * fft(k[0,0,:,:]) + fft(x[1,:,:]) * fft(k[1,0,:,:]) + fft(x[2,:,:]) * fft(k[2,0,:,:]) )
# b[1,:,:] = ifft( fft(x[0,:,:]) * fft(k[0,1,:,:]) + fft(x[1,:,:]) * fft(k[1,1,:,:]) + fft(x[2,:,:]) * fft(k[2,1,:,:]) )
# b[2,:,:] = ifft( fft(x[0,:,:]) * fft(k[0,2,:,:]) + fft(x[1,:,:]) * fft(k[1,2,:,:]) + fft(x[2,:,:]) * fft(k[2,2,:,:]) )
# b[3,:,:] = ifft( fft(x[0,:,:]) * fft(k[0,3,:,:]) + fft(x[1,:,:]) * fft(k[1,3,:,:]) + fft(x[2,:,:]) * fft(k[2,3,:,:]) )
# b_fft[:,0,0] += bias[:] * prod(shape)
import torch
class conv_fft_function(torch.autograd.Function):
@staticmethod
def forward(ctx, x, k, bias=None, padding = 'valid', fft_dim = 1):
#channel first format only
#if these dims are missing, need to skip the sum_reduce
if x.dim() < fft_dim + 2:
raise NotImplementedError('vector input to conv_fft expected to have shape (batch, channels, data_dim0, data_dimN)')
if k.dim() < fft_dim + 2:
raise NotImplementedError('kernel input to conv_fft expected to have shape (outchannels, inchannels, data_dim0, data_dimN)')
in_channels = k.shape[-(fft_dim + 1)]
out_channels = k.shape[-(fft_dim + 2)]
#the axes where fft is calculated
fft_axes = list(range(-fft_dim, 0))
#kernel size along fft_axes
kernel_size = k.shape[-fft_dim:]
#input, padded, and output sizes along fft_axes, padded is the size used for fft
if padding=='roll':
input_size = x.shape[-fft_dim:]
padded_size = list(x.shape[-fft_dim:])
output_size = x.shape[-fft_dim:]
if padding=='valid':
input_size = x.shape[-fft_dim:]
padded_size = list(x.shape[-fft_dim:])
output_size = [ input_size[i] - (kernel_size[i] - 1) for i in range(fft_dim) ]
if padding=='same':
input_size = x.shape[-fft_dim:]
padded_size = [ input_size[i] + (kernel_size[i] // 2) for i in range(fft_dim) ]
output_size = x.shape[-fft_dim:]
if isinstance(padding, int):
input_size = x.shape[-fft_dim:]
padded_size = [ input_size[i] + padding * 2 for i in range(fft_dim) ]
output_size = [ padding * 2 + input_size[i] - (kernel_size[i] - 1) for i in range(fft_dim) ]
#the kernel needs rolled, all other data are aligned to zero
kernel_roll = [-((size - 1) // 2) for size in kernel_size ]
kernel_unroll = [ ((size - 1) // 2) for size in kernel_size ]
#corrections to padding
# padded_size will be the size of the fft
# any larger paddings should work here
# other sizes might be faster
#'valid' and other strange paddings cause a correction to kernel_roll, other data remain aligned to zero
for i in range(fft_dim):
#for example, if you only want even size fft
#if padded_size[i] & 1:
# padded_size[i] = padded_size[i] + 1
if padding!='roll':
padded_size[i] = padded_size[i] + 31 & ~31
if padding=='valid':
offset = (min(kernel_size[i], input_size[i]) - 1) // 2
kernel_roll[i] = kernel_roll[i] + offset
kernel_unroll[i] = kernel_unroll[i] - offset
if isinstance(padding, int):
offset = (min(kernel_size[i], input_size[i]) - 1) // 2 - padding
kernel_roll[i] = kernel_roll[i] + offset
kernel_unroll[i] = kernel_unroll[i] - offset
#the kernel gets padded up to padded_size before being rolled, slightly inefficient
if fft_dim == 1:
kernel_padding = [0, padded_size[-1] - kernel_size[-1]]
if fft_dim == 2:
kernel_padding = [0, padded_size[-1] - kernel_size[-1], 0, padded_size[-2] - kernel_size[-2]]
if fft_dim == 3:
kernel_padding = [0, padded_size[-1] - kernel_size[-1], 0, padded_size[-2] - kernel_size[-2], 0, padded_size[-3] - kernel_size[-3]]
#these are used only to insert a 1 into the shape
x_fft_shape = x.shape[:-(fft_dim+1)] + (1, in_channels) + tuple(padded_size[:-1]) + (padded_size[-1] // 2 + 1,)
dz_db_fft_shape = x.shape[:-(fft_dim+1)] + (out_channels,1) + tuple(padded_size[:-1]) + (padded_size[-1] // 2 + 1,)
#outputs will be trimmed by these slices
b_slice_size = [...] + [ slice(0, output_size[i]) for i in range(fft_dim) ]
x_slice_size = [...] + [ slice(0, input_size[i]) for i in range(fft_dim) ]
k_slice_size = [...] + [ slice(0, kernel_size[i]) for i in range(fft_dim) ]
x_fft = torch.reshape(torch.fft.rfftn(x, dim=fft_axes, s=padded_size), x_fft_shape)
k_fft = torch.fft.rfftn(torch.roll(torch.nn.functional.pad(k, kernel_padding), kernel_roll, fft_axes), dim=fft_axes)
b_fft = torch.sum(x_fft * torch.conj(k_fft), dim=-(fft_dim + 1)) #sum along in_channels dim
#bias is added to zero bin of fft, it needs scaled by prod(padded_size)
if bias != None:
prod_padded_size = 1
for s in padded_size:
prod_padded_size *= s
b_fft[ (..., ) + (0, ) * fft_dim ] += bias * prod_padded_size
b = torch.fft.irfftn(b_fft, dim=fft_axes, s=padded_size)[b_slice_size]
ctx.save_for_backward(x_fft, k_fft)
ctx.my_saved_variables = [
bias,
fft_dim, dz_db_fft_shape,
padded_size,
kernel_unroll, fft_axes,
x_slice_size,
k_slice_size ]
return b
@staticmethod
def backward(ctx, dz_db):
x_fft, k_fft = ctx.saved_tensors
bias, fft_dim, dz_db_fft_shape, padded_size, kernel_unroll, fft_axes, x_slice_size, k_slice_size = ctx.my_saved_variables
dz_db_fft = torch.reshape(torch.fft.rfftn(dz_db, dim=fft_axes, s=padded_size), dz_db_fft_shape)
#the zero freq dc bin of an fft ... is the sum of the signal ...
#so dz_dbias[out_channel] = dz_db_fft[out_channel, 0, 0].real
if bias != None:
#this should instead sum all leading axes
dz_dbias = torch.sum(dz_db_fft[ (..., 0) + (0,) * fft_dim ], dim=0).real #sum along batch dim(s)
else:
dz_dbias = None
dz_dx_fft = torch.sum(dz_db_fft * k_fft, dim=-(fft_dim + 2)) #sum along out_channels dim
dz_dx = torch.fft.irfftn(dz_dx_fft, dim=fft_axes, s=padded_size)[x_slice_size]
#this should instead sum all leading axes
#reshape(-1, out_c, in_c, *fft_size)
#if i wanted broadcasted conv k=(extradim1, out, in, kernelsize), x=(extradim0, extradim1, in, kernelsize)
#sum pre-channel axes (size>1) in dz_da_fft that are 1 or missing in k_fft.shape, keepdim if 1 is present
dz_dk_fft = torch.sum( x_fft * torch.conj(dz_db_fft), dim=0 ) #sum along batch dim(s)
dz_dk = torch.roll(torch.fft.irfftn(dz_dk_fft, dim=fft_axes, s=padded_size), kernel_unroll, fft_axes)[k_slice_size]
return dz_dx, dz_dk, dz_dbias, None, None
import math
class Conv_fft(torch.nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, bias=True, padding=0, device=None, dtype=torch.float32):
super(Conv_fft, self).__init__()
self.padding = padding
weight = torch.zeros((out_channels, in_channels, *kernel_size), dtype=dtype, device=device)
self.weight = torch.nn.Parameter(weight)
n = in_channels
for k in kernel_size:
n *= k
stdv = 1. / math.sqrt(n)
self.weight.data.uniform_(-stdv, stdv)
if bias:
bias = torch.zeros((out_channels,), dtype=dtype, device=device)
self.bias = torch.nn.Parameter(bias)
self.bias.data.uniform_(-stdv, stdv)
else:
self.bias = None
class Conv1d_fft(Conv_fft):
def __init__(self, *args, **kwargs):
super(Conv1d_fft, self).__init__(*args, **kwargs)
def forward(self, x):
return conv_fft_function.apply(x, self.weight, self.bias, self.padding, 1)
class Conv2d_fft(Conv_fft):
def __init__(self, *args, **kwargs):
super(Conv2d_fft, self).__init__(*args, **kwargs)
def forward(self, x):
return conv_fft_function.apply(x, self.weight, self.bias, self.padding, 2)
class Conv3d_fft(Conv_fft):
def __init__(self, *args, **kwargs):
super(Conv3d_fft, self).__init__(*args, **kwargs)
def forward(self, x):
return conv_fft_function.apply(x, self.weight, self.bias, self.padding, 3)
def functional_conv1d_fft(x, k, bias=None, padding='valid'):
return conv_fft_function.apply(x, k, bias, padding, 1)
def functional_conv2d_fft(x, k, bias=None, padding='valid'):
return conv_fft_function.apply(x, k, bias, padding, 2)
def functional_conv3d_fft(x, k, bias=None, padding='valid'):
return conv_fft_function.apply(x, k, bias, padding, 3)
``` |
{
"source": "jKulrativid/DNA_cut_for_grade_XII",
"score": 3
} |
#### File: DNA_cut_for_grade_XII/CODE/graphing.py
```python
import matplotlib
import numpy as np
import matplotlib.pyplot as plt
import random
def circle_plt(dna_stack):
labels = list()
sizes = list()
fig1, ax1 = plt.subplots()
ax1.pie(sizes, explode=None, labels=labels, autopct='%1.1f%%',
shadow=False, startangle=random.randint(0, 360))
ax1.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.
plt.show()
def horizon_bar_plt(dna_stack):
# Fixing random state for reproducibility
np.random.seed(19680801)
plt.rcdefaults()
fig, ax = plt.subplots()
# Example data
people = ('Tom', 'Dick', 'Harry', 'Slim', 'Jim')
y_pos = np.arange(len(people)) # np.arange(x) = [0, 1, 2 ... x-1]
performance = 3 + 10 * np.random.rand(len(people))
error = np.random.rand(len(people))
ax.barh(y_pos, performance, xerr=error, align='center')
ax.set_yticks(y_pos)
ax.set_yticklabels(people)
ax.invert_yaxis() # labels read top-to-bottom
ax.set_xlabel('Performance')
ax.set_title('How fast do you want to go today?')
plt.show()
def linear_plt(dna_stack):
fig, ax = plt.subplots()
if __name__ == '__main__':
# Formatting data to plotter
'''
a = ['GACCGGCCTAG', 'GATCCGGGC', 'GACC', 'GGCCTAGGATCC', 'GGGC', 'GACC', 'GGCCTAG', 'GATCC', 'GGGC']
times = [1, 3, 5, 2, 4]
circle_plt(a)
'''
horizon_bar_plt(None)
```
#### File: DNA_cut_for_grade_XII/CODE/usage_class.py
```python
class DNA:
def __init__(self, strand, direction):
self.name = 'Unnamed DNA'
self.strand = strand.upper()
self.start = direction[0] + '\''
self.stop = direction[3] + '\''
self.a, self.t, self.c, self.g, self.non_base = self.count_each()
def show_all(self):
print(self.name)
print('{}{}{}'.format(self.start, ' ' * len(self.strand), self.stop))
print('{}{}{}'.format(' '*len(self.start), self.strand, ' '*len(self.stop)))
print('{}{}{}'.format(self.start, ' ' * len(self.strand), self.stop))
def rename(self, name):
self.name = name
def show_length(self):
print('Length of {!r} = {}'.format(self.name, len(self.strand)))
def count_each(self):
a, t, c, g = 0, 0, 0, 0
non_base = 0
for base in self.strand:
if base == 'A':
a += 1
elif base == 'T':
t += 1
elif base == 'C':
c += 1
elif base == 'G':
g += 1
else:
non_base += 1
return a, t, c, g, non_base
class Enzyme(DNA):
def __init__(self, strand, direction, delimiter):
super().__init__(strand, direction)
self.name = 'Unnamed Enzyme'
self.delimiter = delimiter
self.strand, self.cut_position = self.enzyme_setting()
# ตัวสุดท้ายของสายที่โดนตัดคือ ตัวที่ self.cut_position (เริ่มนับตัวแรกจาก1)
def enzyme_setting(self):
pure_dna = ''
cnt = 0 # cnt = index ของ
pos = None
for base in self.strand:
if base not in ('A', 'T', 'C', 'G'):
pos = cnt
else:
pure_dna += base
cnt += 1
if pos is None:
return pure_dna, 'Not found'
else:
return pure_dna, pos
class PetriDish:
def __init__(self):
self.stack = list()
def add(self, dna):
self.stack.append(dna)
def show(self):
for index, item in enumerate(self.stack):
print(f'{index+1}. {item.name}')
class DNAStack(PetriDish):
def __init__(self):
super().__init__()
class EnzymeStack(PetriDish):
def __init__(self):
super().__init__()
class CutTest:
def __init__(self, enzyme_stack):
self.history = dict()
for enzyme in enzyme_stack.stack:
self.history[enzyme.name] = 0
def cut_specific(self, dna, enzyme, cutted_stack):
cut_from = 0
for i in range(len(dna.strand) - len(enzyme.strand) + 1):
match_enzyme = True
for j in range(len(enzyme.strand)):
if dna.strand[i + j] != enzyme.strand[j]:
match_enzyme = False
break
if match_enzyme:
cut_end = i
cutted_stack.stack.append(dna.strand[cut_from: cut_end + enzyme.cut_position])
cut_from = cut_end + enzyme.cut_position
cut_end = len(dna.strand) + 1
cutted_stack.stack.append(dna.strand[cut_from: cut_end + enzyme.cut_position])
def cut_all(self, dna, enzyme_stack, cutted_stack):
cut_from = 0
for i in range(len(dna.strand)):
for n in range(len(enzyme_stack.stack)):
match_enzyme = True
for j in range(len(enzyme_stack.stack[n].strand)):
if i + j < len(test_dna.strand):
if dna.strand[i + j] != enzyme_stack.stack[n].strand[j]:
match_enzyme = False
break
else:
match_enzyme = False
break
if match_enzyme:
cut_end = i
cutted_stack.stack.append(dna.strand[cut_from: cut_end + enzyme_stack.stack[n].cut_position])
cut_from = cut_end + enzyme_stack.stack[n].cut_position
if dna.strand[cut_from: len(dna.strand)+1] != '':
cutted_stack.stack.append(dna.strand[cut_from: len(dna.strand)+1])
'''
def show_base(self):
self.count_base()
for _ in range(4):
print('{} Base: {:0>5}'.format())
def count_base(self):
n_a, n_t, n_c, n_g = 0, 0, 0, 0
for base in self.strand:
if base == 'A':
n_a += 1
elif base == 'G':
n_g += 1
'''
'''
@staticmethod
def cut_position(strand, delim):
delim_position = 0
for base in strand:
if base != delim:
delim_position += 1
elif base == delim:
if delim_position != 0:
return delim_position+1, strand.replace(delim, '').upper()
else:
return 'Not Found', strand.replace(delim, '').upper()
'''
# tester
if __name__ == '__main__':
# DNA
test_dna = DNA('gaccggcctaggatccgggc', '3to5')
test_dna.rename('Test DNA')
test_dna.show_all()
print(test_dna.a)
test_dna.show_length()
print('='*60)
# Enzyme <BamHI>
test_enzyme1 = Enzyme('cctag|g', '3to5', '|')
test_enzyme1.rename('BamHI')
test_enzyme1.show_all()
print(test_enzyme1.a)
test_enzyme1.show_length()
print('Cut Positon = {}'.format(test_enzyme1.cut_position))
# Enzyme <HaeIII>
test_enzyme2 = Enzyme('cc|gg', '3to5', '|')
test_enzyme2.rename('HaeIII')
test_enzyme2.show_all()
print(test_enzyme2.a)
test_enzyme2.show_length()
print('Cut Positon = {}'.format(test_enzyme2.cut_position))
# EnzymeStack
enzyme_stack = EnzymeStack()
enzyme_stack.add(test_enzyme1)
enzyme_stack.add(test_enzyme2)
enzyme_stack.show()
# DNA_stack
dna_keeper = DNAStack()
# cutter
cutter = CutTest(enzyme_stack)
# cut specific
cutter.cut_specific(test_dna, test_enzyme1, dna_keeper)
cutter.cut_specific(test_dna, test_enzyme2, dna_keeper)
# cut all
cutter.cut_all(test_dna, enzyme_stack, dna_keeper)
# output
print(dna_keeper.stack)
``` |
{
"source": "jkumwenda/AGP",
"score": 2
} |
#### File: AGP/backend/views.py
```python
from django.shortcuts import render
from django.contrib.auth.models import User
from rest_framework import viewsets, permissions
from .serializers import *
from .models import *
from .views_helper import *
from rest_framework.response import Response
from rest_framework.decorators import action
import json
class UserViewSet(viewsets.ModelViewSet):
queryset = User.objects.all()
serializer_class = UserSerializer
def get_queryset(self):
queryset = super().get_queryset()
data = ViewsHelper.filter_test(self, queryset)
return data
class SlotSizeViewSet(viewsets.ModelViewSet):
queryset = SlotSize.objects.all()
serializer_class = SlotSizeSerializer
class SignupViewSet(viewsets.ModelViewSet):
permission_classes = (permissions.AllowAny,)
queryset = User.objects.all()
serializer_class = UserSerializer
http_method_names = ['post']
class CountryViewSet(viewsets.ModelViewSet):
queryset = Country.objects.all()
serializer_class = CountrySerializer
class CourseViewSet(viewsets.ModelViewSet):
queryset = Course.objects.all()
serializer_class = CourseSerializer
# /COURSE/1/TYPES/
@action(methods=['GET'], detail=True)
def types(self, request, pk=None):
course = self.get_object()
serializer = CourseTypeSerializer(
course.types.all(), many=True
)
return Response(serializer.data)
class RatingViewSet(viewsets.ModelViewSet):
queryset = Rating.objects.all()
serializer_class = RatingSerializer
class TypeViewSet(viewsets.ModelViewSet):
queryset = Type.objects.all()
serializer_class = TypeSerializer
class CourseTypeViewSet(viewsets.ModelViewSet):
queryset = CourseType.objects.all()
serializer_class = CourseTypeSerializer
class HoleViewSet(viewsets.ModelViewSet):
queryset = Hole.objects.all()
serializer_class = HoleSerializer
class CourseTypeHoleViewSet(viewsets.ModelViewSet):
queryset = CourseTypeHole.objects.all()
serializer_class = CourseTypeHoleSerializer
class ClubViewSet(viewsets.ModelViewSet):
queryset = Club.objects.all()
serializer_class = ClubSerializer
class ClubCourseViewSet(viewsets.ModelViewSet):
queryset = ClubCourse.objects.all()
serializer_class = ClubCourseSerializer
class ProfileViewSet(viewsets.ModelViewSet):
queryset = Profile.objects.all()
serializer_class = ProfileSerializer
@action(methods=['POST'], detail=True)
def checkPermissions(self, request, pk=None):
permisisionList = request.data['permissionCodes']
profile = self.get_object()
profileRoles = ProfileRole.objects.filter(fk_profileid=pk)
codes = []
for profileRole in profileRoles:
rolePermissions = RolePermission.objects.filter(
fk_roleid=profileRole.fk_roleid)
codes.append([
permission.fk_permissionid.code for permission in rolePermissions if permission.fk_permissionid.code in permisisionList])
return Response(sum(codes, []))
class UserProfileViewSet(viewsets.ModelViewSet):
queryset = Profile.objects.all()
serializer_class = UserProfileSerializer
def get_queryset(self):
queryset = super().get_queryset()
data = ViewsHelper.filter_user_profile(self, queryset,
self.request.query_params.get('username'))
return data
class PlayerViewSet(viewsets.ModelViewSet):
queryset = ProfileRole.objects.all()
serializer_class = PlayerSerializer
def get_queryset(self):
queryset = super().get_queryset()
data = ViewsHelper.filter_player_profile(self, queryset)
return data
class ClubProfileViewSet(viewsets.ModelViewSet):
queryset = ClubProfile.objects.all()
serializer_class = ClubProfileSerializer
class HandicapViewSet(viewsets.ModelViewSet):
queryset = Handicap.objects.all()
serializer_class = HandicapSerializer
class RoleViewSet(viewsets.ModelViewSet):
queryset = Role.objects.all()
serializer_class = RoleSerializer
@action(methods=['POST', 'PATCH'], detail=True)
def permission(self, request, pk=None):
permission = Permission.objects.get(
pk_permissionid=request.data['pk_permissionid'])
role = self.get_object()
if request.method == 'POST':
role.Permissions.add(permission)
elif request.method == 'PATCH':
role.Permissions.remove(permission)
role.save()
serializer = RoleSerializer(role, many=False)
return Response(serializer.data)
class GenderViewSet(viewsets.ModelViewSet):
queryset = Gender.objects.all()
serializer_class = GenderSerializer
class ProfileGenderViewSet(viewsets.ModelViewSet):
permission_classes = (permissions.AllowAny,)
queryset = Gender.objects.all()
serializer_class = GenderSerializer
class ProfileRoleViewSet(viewsets.ModelViewSet):
queryset = ProfileRole.objects.all()
serializer_class = ProfileRoleSerializer
def get_queryset(self):
queryset = super().get_queryset()
data = ViewsHelper.filter_profile_role(
self, queryset, self.request.query_params.get('profile_id'))
return data
class PermissionViewSet(viewsets.ModelViewSet):
queryset = Permission.objects.all()
serializer_class = PermissionSerializer
class RolePermissionViewSet(viewsets.ModelViewSet):
queryset = RolePermission.objects.all()
serializer_class = RolePermissionSerializer
def get_queryset(self):
queryset = super().get_queryset()
data = ViewsHelper.filter_role_permission(
self, queryset, self.request.query_params.get('permission'),
self.request.query_params.get('profile_id'))
return data
class RegistrationDateViewSet(viewsets.ModelViewSet):
queryset = RegistrationDate.objects.all()
serializer_class = RegistrationDateSerializer
class FormatViewSet(viewsets.ModelViewSet):
queryset = Format.objects.all()
serializer_class = FormatSerializer
class DrawTypeViewSet(viewsets.ModelViewSet):
queryset = DrawType.objects.all()
serializer_class = DrawTypeSerializer
class RegisterViewSet(viewsets.ModelViewSet):
queryset = Register.objects.all()
serializer_class = RegisterSerializer
def get_queryset(self):
queryset = super().get_queryset()
eventId = self.request.query_params.get('event')
profile = self.request.query_params.get('profile')
return ViewsHelper.filter_scores(self, queryset, eventId, profile)
class SlotViewSet(viewsets.ModelViewSet):
queryset = Slot.objects.all()
serializer_class = SlotSerializer
class InformationViewSet(viewsets.ModelViewSet):
queryset = Information.objects.all()
serializer_class = InformationSerializer
class FieldViewSet(viewsets.ModelViewSet):
queryset = Field.objects.all()
serializer_class = FieldSerializer
class EventViewSet(viewsets.ModelViewSet):
queryset = Event.objects.all()
serializer_class = EventSerializer
def get_queryset(self):
queryset = super().get_queryset()
profileId = self.request.query_params.get('profile')
return ViewsHelper.filter_events(self, queryset, profileId)
class PublicEventViewSet(viewsets.ModelViewSet):
permission_classes = (permissions.AllowAny,)
queryset = Event.objects.all()
serializer_class = EventSerializer
class EventTypeViewSet(viewsets.ModelViewSet):
queryset = EventType.objects.all()
serializer_class = EventTypeSerializer
class EventFormatViewSet(viewsets.ModelViewSet):
queryset = EventFormat.objects.all()
serializer_class = EventFormatSerializer
class FieldViewSet(viewsets.ModelViewSet):
queryset = Field.objects.all()
serializer_class = FieldSerializer
class GameViewSet(viewsets.ModelViewSet):
queryset = Event.objects.all()
serializer_class = GameSerializer
def get_queryset(self):
queryset = super().get_queryset()
data = ViewsHelper.filter_games(
self, queryset, self.request.query_params.get('end_date'),
)
return data
class ScoreViewSet(viewsets.ModelViewSet):
queryset = Score.objects.all()
serializer_class = ScoreSerializer
def get_queryset(self):
queryset = super().get_queryset()
eventId = self.request.query_params.get('event')
profile = self.request.query_params.get('profile')
return ViewsHelper.filter_scores(self, queryset, eventId, profile)
class EventCourseTypeViewSet(viewsets.ModelViewSet):
queryset = EventCourseType.objects.all()
serializer_class = EventCourseTypeSerializer
def get_queryset(self):
queryset = super().get_queryset()
eventId = self.request.query_params.get('event')
genderId = self.request.query_params.get('gender')
return ViewsHelper.filter_event_course_types(self, queryset, eventId, genderId)
``` |
{
"source": "jkung2314/InfoSec",
"score": 2
} |
#### File: InfoSec/dmca_v3/dmca_processing.py
```python
import base64
import credentials as c
import connectEmail
import sys
import re
import datetime
import pytz
import time
from xmljson import badgerfish as bf
from xml.etree.ElementTree import fromstring
from json import dumps
from json import loads
from ipaddress import ip_network, ip_address
from trueUser import trueUser
def to_pst(dateconvert):
if not re.search(r"[0-9]+-[0-9]+-[0-9]+ [0-9]+:[0-9]+:[0-9]+.[0-9]+ GMT", \
dateconvert) is None:
fmt = '%Y-%m-%d %H:%M:%S.%f GMT'
elif not re.search(r"[0-9]+-[0-9]+-[0-9]+T[0-9]+:[0-9]+:[0-9]+Z", \
dateconvert) is None:
fmt = '%Y-%m-%dT%H:%M:%SZ'
else:
fmt = '%Y-%m-%dT%H:%M:%S.%fZ'
# convert GMT/ZULU to PST
# create 'naive' datetime object
utc_dt = datetime.datetime.strptime(dateconvert, str(fmt))
# make datetime object 'aware'
utc_dt = pytz.utc.localize(utc_dt)
# create PST time zone
pa_tz = pytz.timezone('US/Pacific')
# convert UTC to PST
pa_dt = pa_tz.normalize(utc_dt.astimezone(pa_tz))
# convert PST datetime object to a string
dateconvert = str(pa_dt)
# cut off excess time info
dateconvert = re.split('-[0-9]{2}:[0-9]{2}$', dateconvert)[0]
# check for ending decimal format
if not re.search('[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2}.[0-9]+', dateconvert) is None:
dateconvert = re.split('.[0-9]+$', dateconvert)[0]
return dateconvert
def to_timestamp(date):
# convert pst date to unix timestamp
ts = time.mktime(datetime.datetime.strptime(date, "%Y-%m-%d %H:%M:%S").timetuple())
return ts
def formatEmailBodyToJson(emailBody):
## Flatten this email text into a single line of text suitable for translaction into json
# emails come out of google with a line limit and the continuation character is an equals sign
# look for an equals sign followed by a new line and remove that.
emailBody = emailBody.replace(b'=\n', b'')
# This data from google also has some html escape artifacts, %3D would be an equals sign, instead we
# are just left with 3D. Remove it.
emailBody = emailBody.replace(b'3D', b'')
# The media companies also pollute their xml with a bunch of garbage that makes conversion to json impossible
# Remove it. This is all found inside the <Infringement> tag.
emailBody = emailBody[:emailBody.find(b'xmlns=')] + emailBody[emailBody.find(b'.xsd\"') + 5:]
# At this stage we still have the entire email. We only want the XML data. Look for the start of the XML.
# Typically the XML ends with the closing Infringement tag so look for that to signify the end of the XML.
xmlstart = emailBody.find(b"<?xml")
xmlend = emailBody.find(b"</Infringement>") + len(b"</Infringement>")
# slice the email text into just the XML now that we have found starting and ending positions.
emailBody = emailBody[xmlstart:xmlend]
# Convert this XML into json data.
jsondata = loads(dumps((bf.data(fromstring(emailBody)))))
return jsondata
def send_email(results, start_time):
subjectLine = '**DMCA Report**'
message = results
message += '\n\n' + \
("DMCA script finished in %s seconds" % (time.time() - start_time + 1.5))
connectEmail.sendEmail(c.dmcauser, base64.b64decode(c.dmcapass), ['<EMAIL>','<EMAIL>','<EMAIL>'], subjectLine, message)
def main():
start_time = time.time()
dataholder = {}
# Initialize trueUser identification class
trueUserObj = trueUser()
# Get emails from the dmca email account
bodiesOfEmails = connectEmail.connect(c.dmcauser, base64.urlsafe_b64decode(c.dmcapass.encode()).decode(), 1, 'dmca')
# Parse each email body storing similar IPs into the dataholder variable
print("numberofemails: {}".format(len(bodiesOfEmails)))
for body in bodiesOfEmails:
# convert and truncate the email body to usable json case data
jsondata = formatEmailBodyToJson(body)
# store the fields we need from the infringement case
caseid = str(jsondata['Infringement']['Case']['ID'].get('$'))
ip = str(jsondata['Infringement']['Source']['IP_Address'].get('$'))
port = str(jsondata['Infringement']['Source']['Port'].get('$'))
tstamp = str(jsondata['Infringement']['Source']['TimeStamp'].get('$'))
fname = str(jsondata['Infringement']['Content']['Item']['FileName'].get('$'))
title = str(jsondata['Infringement']['Content']['Item']['Title'].get('$'))
# We use this so we can 'rollup' the case data with each unique IP.
# if we already have an entry in the holder for this IP append the new case to the array stored in the
# holder dictionary
# else, create a new array associated with the ip in the dictionary and populate the array with the array
# of case values.
if ip in dataholder:
dataholder[ip].append([caseid, ip, port, tstamp, fname, title])
else:
dataholder[ip] = [[caseid, ip, port, tstamp, fname, title]]
# Build the email and also look for users
msg = ''
for ip in dataholder:
msg = msg + ip # item is the IP address
msg = msg + "\n" + "---------------------------------------------------------------------------------------------\n"
for case in dataholder[ip]:
# Pythonized time object in PST
pythonizedInfringementTime = datetime.datetime.strptime(case[3], "%Y-%m-%dT%H:%M:%SZ")
utc = pytz.timezone('UTC')
infringementTime = utc.localize(pythonizedInfringementTime)
infringementTime = infringementTime.astimezone(pytz.timezone('US/Pacific'))
natted = False
# Get correct ip
network = ip_network("192.168.127.12/28")
infringementIP = case[1]
if ip_address(infringementIP) in network:
# Get netflow start + end times
netflowStartTime = datetime.datetime.strftime(infringementTime - datetime.timedelta(minutes=10), "%Y-%m-%d %H:%M")
netflowEndTime = datetime.datetime.strftime(infringementTime + datetime.timedelta(minutes=10), "%Y-%m-%d %H:%M")
trueIP = trueUserObj.getTrueIP(case[2], case[1], netflowStartTime, netflowEndTime)
natted = True
# If can't locate trueIP
if trueIP == None:
msg = msg + "CASE ID: {0}\n".format(case[0])
msg = msg + "IP: \t{0}\n".format(case[1])
msg = msg + "TRUE IP: NOT FOUND\n".format(trueIP)
msg = msg + "Port: \t{0}\n".format(case[2])
msg = msg + "TIMESTAMP: {0} Pacific\n".format(to_pst(case[3]))
msg = msg + "FILENAME: {0}\n".format(case[4])
msg = msg + "TITLE: \t{0}\n".format(case[5])
msg = msg + "USERS: \n"
msg = msg + "TRUE IP NOT FOUND, CONTINUING TO NEXT CASE..."
continue
else:
trueIP = infringementIP
msg = msg + "CASE ID: {0}\n".format(case[0])
msg = msg + "IP: \t{0}\n".format(case[1])
if natted:
msg = msg + "TRUE IP: {0}\n".format(trueIP)
msg = msg + "Port: \t{0}\n".format(case[2])
msg = msg + "TIMESTAMP: {0} Pacific\n".format(to_pst(case[3]))
msg = msg + "FILENAME: {0}\n".format(case[4])
msg = msg + "TITLE: \t{0}\n".format(case[5])
msg = msg + "USERS: \n"
# User identification
userQueryStartTime = datetime.datetime.strftime(infringementTime - datetime.timedelta(minutes=240), "%Y-%m-%d %H:%M")
userQueryEndTime = datetime.datetime.strftime(infringementTime + datetime.timedelta(minutes=120), "%Y-%m-%d %H:%M")
users = trueUserObj.getUser(trueIP, userQueryStartTime, userQueryEndTime)
# If results are found but user is null, run macaddress search
if len(users[2]) != 0 and users[0] != True:
# Print found results from user identification query, if exists
if len(users[3]) != 0:
for user in users[3]:
msg = msg + "username: {0}, userAffiliation: {1}, authsource: {2}, macaddress: {3}, authtime: {4}\n".format(user[1], user[2], user[4], user[3], user[0])
macaddrQueryStartTime = datetime.datetime.strftime(infringementTime - datetime.timedelta(minutes=10080), "%Y-%m-%d %H:%M")
macaddrQueryEndTime = datetime.datetime.strftime(infringementTime, "%Y-%m-%d %H:%M")
resultsList = trueUserObj.searchMacaddress(users[2], macaddrQueryStartTime, macaddrQueryEndTime)
if resultsList is not None:
for result in resultsList:
msg = msg + "username: {0}, userAffiliation: {1}, authsource: {2}, macaddress: {3}, authtime: {4}\n".format(result[1], result[2], result[6], result[3], result[0])
msg = msg + "\n\n"
# If results are found and there exists one non-null user identified, print rows
elif len(users[3]) != 0 and users[0] == True:
for user in users[3]:
msg = msg + "username: {0}, userAffiliation: {1}, authsource: {2}, macaddress: {3}, authtime: {4}\n".format(user[1], user[2], user[4], user[3], user[0])
msg = msg + "\n\n"
# If no results found from user or macaddress search
else:
msg = msg + "No login entries for any users found\n\n"
send_email(msg, start_time)
main()
print("done...")
```
#### File: InfoSec/dmca_v3/ldapServer.py
```python
import ldap #python-ldap
import credentials
import json
class ldapServer:
_LDAP_SERVER = ''
_LDAP_DN = ''
_LDAP_FIELDS = []
_connection = None
def __init__(self, LDAP_SERVER="", LDAP_DN="", LDAP_FIELDS=""):
_LDAP_SERVER = LDAP_SERVER
_LDAP_DN = LDAP_DN
_LDAP_FIELDS = LDAP_FIELDS
def setServer(self):
self._LDAP_SERVER = credentials.UCSC_LDAP_SERVER
self._LDAP_DN = credentials.UCSC_LDAP_DN
self._LDAP_FIELDS = credentials.UCSC_LDAP_FIELDS
def connect(self):
self._connection = ldap.initialize(self._LDAP_SERVER)
def search(self, uservalue):
""" search for exact match on uid or mail field with wild card """
if self._connection is None:
self.connect()
results = self._connection.search_s(self._LDAP_DN, ldap.SCOPE_SUBTREE, '(|(uid={0})(mail=*{0}*))'.format(uservalue), self._LDAP_FIELDS )
return results
def uid_search(self, username):
""" search by uid field only with an exact match"""
if self._connection is None:
self.connect()
results = self._connection.search_s(self._LDAP_DN, ldap.SCOPE_SUBTREE, '(uid={0})'.format(username), self._LDAP_FIELDS )
return results
def bind(self, username, password):
if self._connection is None:
self.connect()
self._connection.simple_bind_s(credentials.UCSC_LDAP_BIND_DN.format(username), password)
return True
def unbind(self):
self._connection.unbind()
self._connection = None
return True
```
#### File: InfoSec/dmca_v3/trueUser.py
```python
import credentials
import datetime
import pytz
from elasticsearch import Elasticsearch
import json
import sys
import ldap
from ldapServer import ldapServer
class trueUser:
# Elasticsearch object
es = None
# ldap object
ldapObj = None
# Get current timezone UTC offset
pacific_now = datetime.datetime.now(pytz.timezone('US/Pacific'))
timezone = "-0{0}:00".format(str(pacific_now.utcoffset().total_seconds()/60/60)[1])
def __init__(self):
self.es = Elasticsearch(
[credentials.es_server],
http_auth=(credentials.es_username, credentials.es_password),
port=credentials.es_port,
use_ssl=True,
verify_certs=False,
# You must have a copy of the CA cert present to use in this code
ca_certs=credentials.ca_certs
)
# Connect to UCSC ldap server
self.ldapObj = ldapServer()
self.ldapObj.setServer()
try:
self.ldapObj.connect()
self.ldapObj.bind(credentials.LDAP_USERNAME, credentials.LDAP_PASSWORD)
except ldap.INVALID_CREDENTIALS as e:
print("Invalid credentials")
except Exception as e:
print(e)
# Searches Elasticsearch for trueIP, given NATTED IP, port, start, and end time
# Returns trueIP if exists
def getTrueIP(self, infringementPort, infringementIP, queryStartTime, queryEndTime):
pacific_now = datetime.datetime.now(pytz.timezone('US/Pacific'))
timezone = "-0{0}:00".format(str(pacific_now.utcoffset().total_seconds()/60/60)[1])
netflowQuery = json.dumps({
"query": {
"bool": {
"must": [{
"query_string": {
"query": "netflow.xlate_src_port: {0} AND netflow.xlate_src_addr_ipv4: {1} AND netflow.natEvent: 1".format(infringementPort, infringementIP),
"analyze_wildcard": True,
"default_field": "*"
}
}, {
"range": {
"@timestamp": {
"gte": queryStartTime,
"lte": queryEndTime,
"format": "yyyy-MM-dd HH:mm",
"time_zone": self.timezone
}
}
}],
"filter": [],
"should": [],
"must_not": []
}
}
})
results = self.es.search(index='netflow-*', body=netflowQuery, size=2000)
if len(results['hits']['hits']) == 0:
trueIP = None
else:
trueIP = results['hits']['hits'][0]['_source']['srcip']
return trueIP
# Searches Elasticsearch for user, given trueIP, start, and end time
# Prints all authentications with matching ip and timerange
# Returned values:
# boolean value for user found, identified users list (may be empty),
# macaddress list (may be empty), and results list (may be empty)
def getUser(self, trueIP, queryStartTime, queryEndTime):
queryUser = json.dumps({
"query": {
"bool": {
"must": [
{
"match": {"srcip": trueIP}
},
{
"range": {
"@timestamp": {
"gte": queryStartTime,
"lte": queryEndTime,
"format": "yyyy-MM-dd HH:mm",
"time_zone": self.timezone
}
}
}
],
"filter": [],
"should": []
}
}
})
results = self.es.search(index='logstash-auth-*', body=queryUser, size=2000)
foundUser = False
foundUsersList = []
macaddressList = []
resultsList = []
for hit in results['hits']['hits']:
data = hit['_source']
username = data['username']
if username != 'null':
foundUsersList.append(username)
foundUser = True
timestamp = data['@timestamp']
try:
macaddress = data['macaddress']
except:
macaddress = None
authsource = data['authsource']
if username == 'null':
userAffiliation = None
if macaddress not in macaddressList:
macaddressList.append(macaddress)
else:
try:
userAffiliation = self.ldapObj.uid_search(username)[0][1]['eduPersonAffiliation'][0].decode('utf-8')
except:
userAffiliation = None
if macaddress is not None:
macaddress = macaddress.upper()
result = [timestamp, username, userAffiliation, macaddress, authsource]
resultsList.append(result)
return foundUser, foundUsersList, macaddressList, resultsList
# Searches Elasticsearch for user, given macaddress, start, and end time
# Returns list with all authentications with matching macaddress(es) and timerange (may be empty)
def searchMacaddress(self, macaddressList, queryStartTime, queryEndTime):
for macaddress in macaddressList:
query_macaddress = json.dumps({
"query": {
"bool": {
"must": [
{
"match": {"macaddress": macaddress}
},
{
"range": {
"@timestamp": {
"gte": queryStartTime,
"lte": queryEndTime,
"format": "yyyy-MM-dd HH:mm",
"time_zone": self.timezone
}
}
}
],
"filter": [],
"should": []
}
}
})
result = self.es.search(index='logstash-auth-*', body=query_macaddress, size=2000)
resultsList = []
for hit in result['hits']['hits']:
data = hit['_source']
username = data['username']
if data['macaddress'] == macaddress:
if username != 'null':
timestamp = data['@timestamp']
realMacaddress = data['macaddress'].upper()
try:
city = data['geoip']['city_name']
region = data['geoip']['region_name']
except:
city = None
region = None
authsource = data['authsource']
try:
userAffiliation = self.ldapObj.uid_search(username)[0][1]['eduPersonAffiliation'][0].decode('utf-8')
except:
userAffiliation = None
result = [timestamp, username, userAffiliation, realMacaddress, city, region, authsource]
resultsList.append(result)
return resultsList
```
#### File: InfoSec/HaveIBeenPwned/pwned.py
```python
import settings
import ldapServer
from DB import compromisedDB
from kafka import KafkaProducer
from kafka import KafkaConsumer
import json
import imaplib
import email
import re
import urllib2
import requests
import datetime
database = compromisedDB()
#start connection
try:
database.connect()
except:
print ("Unable to connect to database.")
ldapObj = ldapServer.ldapServer() #New ldap object
ldapObj.connect() #Connect to server
# Connect KafkaProducer
kafkaserver = ["itsec-prod-elk-3.ucsc.edu:9092", "itsec-prod-elk-8.ucsc.edu:9092", "itsec-prod-elk-9.ucsc.edu:9092"]
topic = 'secinc'
try:
kproducer = KafkaProducer(bootstrap_servers = kafkaserver)
except Exception as ex:
raise Exception(ex)
#Login to email
M = imaplib.IMAP4_SSL('imap.gmail.com')
M.login(settings.email_login, settings.email_pass)
M.select('inbox')
#Search for any emails within the last day from "Have I Been Pwned"
date = (datetime.datetime.now().date() - datetime.timedelta(days=1)).strftime("%d-%b-%Y")
rv, data = M.search(None, 'From', "Have I Been Pwned", 'Since', date)
data[0] = data[0].split()
try:
#Get last email
mail = data[0][-1]
except IndexError:
print("No new mail from HaveIBeenPwned in the last day")
exit(1)
#Fetch message data
typ, msg_data = M.fetch(str(mail), '(RFC822)')
msg = email.message_from_string(msg_data[0][1])
msg_id = msg.get('Message-ID')
subject = msg.get('Subject')
subject = subject.replace(" ", "_")
#Determine if email has been previously processed
history = open("processed_emails.txt", "r")
if msg_id in history.read():
history.close()
exit(1)
else:
print ("PROCESSING <{0}>: <{1}>").format(datetime.datetime.now(), subject)
history = open("processed_emails.txt", "a")
history.write(str(msg_id) + "\n")
history.close()
#Decode and get Pastebin link
msg = str(msg.get_payload()[0])
code = None
link = re.findall('(https?:\/\/(?:www\.|(?!www))[a-zA-Z0-9][a-zA-Z0-9-]+[a-zA-Z0-9]\.[^\s]{2,}|www\.[a-zA-Z0-9][a-zA-Z0-9-]+[a-zA-Z0-9]\.[^\s]{2,}|https?:\/\/(?:www\.|(?!www))[a-zA-Z0-9]\.[^\s]{2,}|www\.[a-zA-Z0-9]\.[^\s]{2,})', str(msg))
for i in link:
if 'https://scrape.pastebin.com/' in i or 'https://pastebin.com/' in i:
code = i
M.close()
if code is None:
print("Pastebin Link Not Found")
exit(1)
code = code.rsplit('/', 1)[-1]
url = 'https://pastebin.com/raw/' + str(code)
try:
userList = urllib2.urlopen(url).read()
except:
print("Pastebin link expired.")
exit(1)
today = datetime.datetime.now().date()
today = str(date).replace("-", "_")
#Write to log file
r = requests.get(url, verify=False)
fName = "haveibeenpwneddump_" + today + subject + ".log"
file = open(fName, 'wb')
file.write(r.content)
file.close()
#Send json data to kafka
def kafkaSend(username, email, row):
data = {}
data['username'] = username
data['email'] = email
data['category'] = 'compromised account'
data['reason'] = 'Have I Been Pwned - Valid Credentials'
data['detection_timestamp'] = datetime.datetime.today().strftime("%Y-%m-%d %H:%M:%S")
data['logrow'] = row
# Format JSON
json_data = json.dumps(data)
# Send to Kafka
kproducer.send(topic, json_data.encode('utf-8'))
kproducer.flush()
#Function for Binding
def Bind(username, password, user, row):
result = ldapObj.bind(username, password)
if result == True:
print ("[{0}] *** Bind Successful: Valid credentials ***".format(username))
info = ldapObj.uid_search(username)
email = info[0][1]['mail'][0]
kafkaSend(username, email, row)
else:
print ("[{0}] *** Bind Failed: Invalid credentials ***".format(username))
#LDAP function for email:password format
def Fldap(username, user, password, row):
result = ldapObj.uid_search(username)
if len(result) < 1:
print ("{0} is not in campus LDAP".format(username))
else:
print ([result[0][0], username, user])
Bind(username, password, user, row)
# sleep for a little bit to avoid hammering the ldap
time.sleep(0.1)
#LDAP function for username only format
def Uldap(username, row):
result = ldapObj.uid_search(username)
if len(result) < 1:
print ("{0} is not in campus LDAP".format(username))
else:
print (result)
#Check if in Postgres database
def inDatabase(username, password):
row = database.searchUsername(username)
if row[0] == 0:
return False
else:
if password == None:
return True
else:
data = database.searchUsernamePassword(username, password)
if data[0] == 0:
return False
return True
#finish
def done():
database.close() #commit and close
exit(1)
#Process file
try:
userList = userList.strip().rsplit('\n')
except IOError as e:
print (e)
for user in userList:
row = user
if str(user).find("@") > 0:
username = user[0:str(user).find("@")]
else:
username = user
if "ucsc" in str(user):
if ":" in str(user):
password = user.split(":")
password = password[1]
domain = user.split("@")
domain = domain[1].split(":")
domain = domain[0]
else:
password = None
domain = user.split("@")
domain = domain[1]
if inDatabase(username, password) == False:
dumpName = 'Have I Been Pwned'
dateAdded = None
database.insert(username, password, domain, current_time, dumpName, dateAdded)
if password != None:
Fldap(username, user, password, row)
else:
Uldap(username, row)
else:
print (username + " LOCATED in database, ignoring...")
done()
``` |
{
"source": "jkunimune15/kodi-analysis",
"score": 3
} |
#### File: kodi-analysis/src/diameter.py
```python
import numpy as np
import matplotlib.pyplot as plt
""" E is in MeV, D in μm, vB in μm/h, τ in h, and k in (MeV)^-1 """
def E(D, τ=5, vB=2.66, k=.8, n=1.2, a=1, z=1):
return z**2*a*((2*τ*vB/D - 1)/k)**(1/n)
def D(E, τ=5, vB=2.66, k=.8, n=1.2, a=1, z=1):
return np.where(E > 0,
2*τ*vB/(1 + k*(E/(z**2*a))**n),
np.nan)
if __name__ == '__main__':
plt.rcParams.update({'font.family': 'serif', 'font.size': 14})
x = np.linspace(1, 16)
# for k, n in [(.849, .806), (.626, .867), (.651, .830), (.651, .779), (.868, 1.322)]:
# plt.plot(x, D(x, k=k, n=n), '-')
plt.plot(x, D(x, a=1, z=1), '-k', linewidth=3)
# print(x.min(), E(3), E(1.7), x.max())
# plt.fill_between([E(1.7), x.max()], [D(x.max()), D(x.max())], [1.7, 1.7], color='b', alpha=.2)
# plt.fill_between([E(3), x.min()], [3, 3], [D(x.min()), D(x.min())], color='r', alpha=.2)
# plt.title("Relationship between incident energy and track diameter")
plt.xlabel("Energy (MeV)")
plt.ylabel("Diameter (μm)")
plt.tight_layout()
plt.show()
```
#### File: kodi-analysis/src/main.py
```python
import logging
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib.colors import CenteredNorm, ListedColormap, LinearSegmentedColormap, LogNorm
import numpy as np
import os
import pandas as pd
import time
from cmap import REDS, ORANGES, YELLOWS, GREENS, CYANS, BLUES, VIOLETS, GREYS, COFFEE
from coordinate import tim_coordinates, project
from hdf5_util import load_hdf5
import segnal as mysignal
from reconstruct_2d import reconstruct, get_relative_aperture_positions
plt.rcParams["legend.framealpha"] = 1
plt.rcParams.update({'font.family': 'sans', 'font.size': 16})
e_in_bounds = 2
SKIP_RECONSTRUCTION = False
SHOW_PLOTS = False
SHOW_OFFSET = False
OBJECT_SIZE = 200e-4 # (cm)
RESOLUTION = 5e-4
EXPANSION_FACTOR = 1.20
PLOT_CONTOUR = .25
PLOT_RADIUS = 80 # (μm)
APERTURE_CONFIGURATION = 'hex'
CHARGE_FITTING = 'all'
MAX_NUM_PIXELS = 200
INPUT_FOLDER = '../scans/'
OUTPUT_FOLDER = '../results/'
SHOT = 'Shot number'
TIM = 'TIM'
APERTURE_RADIUS = 'Aperture Radius'
APERTURE_SPACING = 'Aperture Separation'
APERTURE_DISTANCE = 'L1'
MAGNIFICATION = 'Magnification'
ROTATION = 'Rotation'
ETCH_TIME = 'Etch time'
R_OFFSET = 'Offset (um)'
Θ_OFFSET = 'Offset theta (deg)'
Φ_OFFSET = 'Offset phi (deg)'
R_FLOW = 'Flow (km/s)'
Θ_FLOW = 'Flow theta (deg)'
Φ_FLOW = 'Flow phi (deg)'
CMAP = {'all': GREYS, 'lo': REDS, 'md': GREENS, 'hi': BLUES, 'xray': VIOLETS, 'synth': GREYS,
'0': GREYS, '1': REDS, '2': ORANGES, '3': YELLOWS, '4': GREENS, '5': CYANS, '6': BLUES, '7': VIOLETS}
def center_of_mass(x_bins, y_bins, N):
return np.array([
np.average((x_bins[:-1] + x_bins[1:])/2, weights=N.sum(axis=1)),
np.average((y_bins[:-1] + y_bins[1:])/2, weights=N.sum(axis=0))])
def resample(x_bins, y_bins, N):
""" double the bin size of this square 2d histogram """
n = (x_bins.size - 1)//2
x_bins = x_bins[::2]
y_bins = y_bins[::2]
Np = np.zeros((n, n))
for i in range(0, 2):
for j in range(0, 2):
Np += N[i:2*n:2,j:2*n:2]
return x_bins, y_bins, Np
def plot_cooked_data(xC_bins, yC_bins, NC, xI_bins, yI_bins, NI,
x0, y0, M, energy_min, energy_max, energy_cut, data, **kwargs):
""" plot the data along with the initial fit to it, and the
reconstructed superaperture.
"""
while xI_bins.size > MAX_NUM_PIXELS+1: # resample the penumbral images to increase the bin size
xC_bins, yC_bins, NC = resample(xC_bins, yC_bins, NC)
xI_bins, yI_bins, NI = resample(xI_bins, yI_bins, NI)
s0 = data[APERTURE_SPACING]*1e-4
r0 = data[APERTURE_RADIUS]*1e-4*(M + 1)
r_img = (xI_bins.max() - xI_bins.min())/2
plt.figure()
plt.pcolormesh(xC_bins, yC_bins, NC.T, vmax=np.quantile(NC, (NC.size-6)/NC.size), rasterized=True)
T = np.linspace(0, 2*np.pi)
for dx, dy in get_relative_aperture_positions(s0, r0, xC_bins.max(), mode=APERTURE_CONFIGURATION):
plt.plot(x0 + dx + r0*np.cos(T), y0 + dy + r0*np.sin(T), '--w')
plt.plot(x0 + dx + r_img*np.cos(T), y0 + dy + r_img*np.sin(T), '--w')
plt.axis('square')
plt.title(f"{energy_min:.1f} – {min(12.5, energy_max):.1f} MeV")
plt.xlabel("x (cm)")
plt.ylabel("y (cm)")
bar = plt.colorbar()
bar.ax.set_ylabel("Counts")
plt.tight_layout()
plt.figure()
plt.pcolormesh(xI_bins, yI_bins, NI.T, vmax=np.quantile(NI, (NI.size-6)/NI.size), rasterized=True)
T = np.linspace(0, 2*np.pi)
# plt.plot(x0 + r0*np.cos(T), y0 + r0*np.sin(T), '--w')
plt.axis('square')
plt.title(f"{energy_min:.1f} – {min(12.5, energy_max):.1f} MeV)")
plt.xlabel("x (cm)")
plt.ylabel("y (cm)")
bar = plt.colorbar()
bar.ax.set_ylabel("Counts")
plt.tight_layout()
for filetype in ['png', 'eps']:
plt.savefig(OUTPUT_FOLDER+f'{data[SHOT]}-tim{data[TIM]}-{energy_cut:s}-projection.{filetype}')
if SHOW_PLOTS:
plt.show()
plt.close('all')
def plot_radial_data(rI_bins, zI, r_actual, z_actual, r_uncharged, z_uncharged,
δ, Q, energy_min, energy_max, energy_cut, data, **kwargs):
plt.figure()
plt.locator_params(steps=[1, 2, 4, 5, 10])
plt.fill_between(np.repeat(rI_bins, 2)[1:-1], 0, np.repeat(zI, 2)/1e3, label="Data", color='#f9A72E')
plt.plot(r_actual, z_actual/1e3, '-', color='#0C6004', linewidth=2, label="Fit with charging")
plt.plot(r_uncharged, z_uncharged/1e3, '--', color='#0F71F0', linewidth=2, label="Fit without charging")
plt.xlim(0, rI_bins.max())
plt.ylim(0, min(zI.max()*1.05, z_actual.max()*1.20)/1e3)
plt.xlabel("Radius (cm)")
plt.ylabel("Track density (10³/cm²)")
plt.legend()
plt.title(f"{energy_min:.1f} – {min(12.5, energy_max):.1f} MeV")
plt.tight_layout()
for filetype in ['png', 'eps']:
plt.savefig(OUTPUT_FOLDER+f'{data[SHOT]}-tim{data[TIM]}-{energy_cut:s}-penumbral-lineout.{filetype}')
if SHOW_PLOTS:
plt.show()
plt.close('all')
def plot_reconstruction(x_bins, y_bins, Z, e_min, e_max, cut_name, data):
p0, (p1, θ1), (p2, θ2) = mysignal.shape_parameters(
(x_bins[:-1] + x_bins[1:])/2,
(y_bins[:-1] + y_bins[1:])/2,
Z, contour=PLOT_CONTOUR) # compute the three number summary
x0, y0 = p1*np.cos(θ1), p1*np.sin(θ1)
plt.figure() # plot the reconstructed source image
plt.locator_params(steps=[1, 2, 5, 10])
plt.pcolormesh((x_bins - x0)/1e-4, (y_bins - y0)/1e-4, Z.T, cmap=CMAP[cut_name], vmin=0, rasterized=True)
plt.contour(((x_bins[1:] + x_bins[:-1])/2 - x0)/1e-4, ((y_bins[1:] + y_bins[:-1])/2 - y0)/1e-4, Z.T, levels=[PLOT_CONTOUR*np.max(Z)], colors='w', linestyle='dashed')
# T = np.linspace(0, 2*np.pi, 144)
# R = p0 + p2*np.cos(2*(T - θ2))
# plt.plot(R*np.cos(T)/1e-4, R*np.sin(T)/1e-4, 'w--')
plt.axis('equal')
# plt.colorbar()
plt.axis('square')
if cut_name == 'synth':
pass
elif e_max is None:
plt.title("X-ray image")
else:
plt.title(f"{e_min:.1f} – {min(12.5, e_max):.1f} MeV")
plt.xlabel("x (μm)")
plt.ylabel("y (μm)")
plt.axis([-PLOT_RADIUS, PLOT_RADIUS, -PLOT_RADIUS, PLOT_RADIUS])
plt.tight_layout()
logging.info(f"saving {OUTPUT_FOLDER}{data[SHOT]}-tim{data[TIM]}-{cut_name}-reconstruction.png")
for filetype in ['png', 'eps']:
plt.savefig(OUTPUT_FOLDER+f"{data[SHOT]}-tim{data[TIM]}-{cut_name}-reconstruction.{filetype}")
j_lineout = np.argmax(np.sum(Z, axis=0))
plt.figure()
plt.plot((np.repeat(x_bins, 2)[1:-1] - x0)/1e-4, np.repeat(Z[:,j_lineout], 2))
plt.xlabel("x (μm)")
plt.ylabel("Fluence")
plt.xlim(-150, 150)
plt.ylim(0, None)
plt.tight_layout()
for filetype in ['png', 'eps']:
plt.savefig(OUTPUT_FOLDER+f"{data[SHOT]}-tim{data[TIM]}-{cut_name}-reconstruction-lineout.{filetype}")
if SHOW_PLOTS:
plt.show()
plt.close('all')
return p0, (p2, θ2)
def plot_overlaid_contors(reconstructions, projected_offset, projected_flow, data):
for i, (x_bins, y_bins, N, cmap) in enumerate(reconstructions): # convert the x and y bin edges to pixel centers
x, y = (x_bins[:-1] + x_bins[1:])/2, (y_bins[:-1] + y_bins[1:])/2
X, Y = np.meshgrid(x, y, indexing='ij')
reconstructions[i][0:2] = X, Y
if i == int(len(reconstructions)*3/4):
x0 = X[np.unravel_index(np.argmax(N), N.shape)] # calculate the centroid of the highest energy bin
y0 = Y[np.unravel_index(np.argmax(N), N.shape)]
x_off, y_off, z_off = projected_offset
x_flo, y_flo, z_flo = projected_flow
plt.figure()
plt.locator_params(steps=[1, 2, 5, 10], nbins=6)
for X, Y, N, cmap in reconstructions:
if len(reconstructions) > 3:
plt.contour((X - x0)/1e-4, (Y - y0)/1e-4, N/N.max(), levels=[PLOT_CONTOUR], colors=[cmap.colors[-1]])
else:
plt.contourf((X - x0)/1e-4, (Y - y0)/1e-4, N/N.max(), levels=[PLOT_CONTOUR, 1], colors=[cmap.colors[-1]])
if SHOW_OFFSET:
plt.plot([0, x_off/1e-4], [0, y_off/1e-4], '-k')
plt.scatter([x_off/1e-4], [y_off/1e-4], color='k')
plt.arrow(0, 0, x_flo/1e-4, y_flo/1e-4, color='k', head_width=5, head_length=5, length_includes_head=True)
plt.text(0.05, 0.95, "offset out of page = {:.3f}\nflow out of page = {:.3f}".format(
z_off/np.sqrt(x_off**2 + y_off**2 + z_off**2), z_flo/np.sqrt(x_flo**2 + y_flo**2 + z_flo**2)),
verticalalignment='top', transform=plt.gca().transAxes)
plt.axis('square')
plt.axis([-PLOT_RADIUS, PLOT_RADIUS, -PLOT_RADIUS, PLOT_RADIUS])
plt.xlabel("x (μm)")
plt.ylabel("y (μm)")
plt.title("TIM {} on shot {}".format(data[TIM], data[SHOT]))
plt.tight_layout()
print(f"saving as {OUTPUT_FOLDER}{data[SHOT]}-tim{data[TIM]}-overlaid-reconstruction.png")
for filetype in ['png', 'eps']:
plt.savefig(OUTPUT_FOLDER+f"{data[SHOT]}-tim{data[TIM]}-overlaid-reconstruction.{filetype}")
plt.close('all')
if __name__ == '__main__':
logging.basicConfig(
level=logging.INFO,
format="{asctime:s} |{levelname:4.4s}| {message:s}", style='{',
datefmt="%m-%d %H:%M",
handlers=[
logging.FileHandler(OUTPUT_FOLDER+"log.txt", encoding='utf-8'),
logging.StreamHandler(),
]
)
try:
results = pd.read_csv(OUTPUT_FOLDER+"summary.csv", dtype={'shot': str}) # start by reading the existing data or creating a new file
except IOError:
results = pd.DataFrame(data={"shot": ['placeholder'], "tim": [0], "energy_cut": ['placeholder']}) # be explicit that shots can be str, but usually look like int
shot_list = pd.read_csv('../shot_list.csv', dtype={SHOT: str})
for i, data in shot_list.iterrows(): # iterate thru the shot list
input_filename = None
for fname in os.listdir(INPUT_FOLDER): # search for filenames that match each row
if (fname.endswith('.txt') or fname.endswith('.pkl')) \
and str(data[SHOT]) in fname and ('tim'+str(data[TIM]) in fname.lower() or 'tim' not in fname.lower()) \
and data[ETCH_TIME].replace(' ','') in fname:
input_filename = fname
print()
logging.info("Beginning reconstruction for TIM {} on shot {}".format(data[TIM], data[SHOT]))
break
if input_filename is None:
logging.info(" Could not find text file for TIM {} on shot {}".format(data[TIM], data[SHOT]))
continue
else:
output_filename = f"{data[SHOT]}-tim{data[TIM]}"
if not SKIP_RECONSTRUCTION:
reconstruction = reconstruct( # perform the 2d reconstruccion
input_filename = INPUT_FOLDER+input_filename,
output_filename = OUTPUT_FOLDER+output_filename,
rA = data[APERTURE_RADIUS]/1.e4,
sA = data[APERTURE_SPACING]/1.e4,
L = data[APERTURE_DISTANCE],
M = data[MAGNIFICATION],
rotation = np.radians(data[ROTATION]),
etch_time = float(data[ETCH_TIME].strip(' h')),
aperture_configuration = APERTURE_CONFIGURATION,
aperture_charge_fitting = CHARGE_FITTING,
object_size = OBJECT_SIZE,
resolution = RESOLUTION,
expansion_factor = EXPANSION_FACTOR,
show_plots=False,
)
results = results[(results.shot != data[SHOT]) | (results.tim != data[TIM])] # clear any previous versions of this reconstruccion
for result in reconstruction:
logging.info(f"a reconstruccion was completed: {result}")
results = results.append( # and save the new ones to the dataframe
dict(
shot=data[SHOT],
tim=data[TIM],
offset_magnitude=np.nan,
offset_angle=np.nan,
**result),
ignore_index=True)
results = results[results.shot != 'placeholder']
else:
logging.info('"completed" the reconstruccion ;)')
images_on_this_los = (results.shot == data[SHOT]) & (results.tim == data[TIM])
logging.info("in total we have:")
logging.info(results[images_on_this_los])
for i, result in results[images_on_this_los].iterrows(): # plot the reconstruccion in each energy cut
if result.energy_cut != 'xray':
cut = result.energy_cut
xC_bins, yC_bins, NC = load_hdf5(f'{OUTPUT_FOLDER}{output_filename}-{cut}-raw')
xI_bins, yI_bins, NI = load_hdf5(f'{OUTPUT_FOLDER}{output_filename}-{cut}-projection')
plot_cooked_data(xC_bins, yC_bins, NC, xI_bins, yI_bins, NI,
data=data, **result)
try:
rI, r1, r2, zI, z1, z2 = load_hdf5(f'{OUTPUT_FOLDER}{output_filename}-{cut}-radial')
plot_radial_data(rI, zI, r1, z1, r2, z2, data=data, **result)
except IOError:
pass
x_bins, y_bins, B = load_hdf5(f'{OUTPUT_FOLDER}{output_filename}-{cut}-reconstruction')
plot_reconstruction(x_bins, y_bins, B, result.energy_min, result.energy_max, result.energy_cut, data)
for cut_set in [['0', '1', '2', '3', '4', '5', '6', '7'], ['lo', 'hi']]: # create the nested plots
filenames = []
for cut_name in cut_set:
results_in_this_cut = results[images_on_this_los & (results.energy_cut == cut_name)]
if results_in_this_cut.shape[0] >= 1:
filenames.append((f"{OUTPUT_FOLDER}{output_filename}-{cut_name}-reconstruction", CMAP[cut_name]))
if len(filenames) >= len(cut_set)*3/4:
print(f"creating the nested plot with the {cut_set} cut set")
reconstructions = []
for filename, cmap in filenames:
reconstructions.append([*load_hdf5(filename), cmap])
dxL, dyL = center_of_mass(*reconstructions[0][:3])
dxH, dyH = center_of_mass(*reconstructions[-1][:3])
dx, dy = dxH - dxL, dyH - dyL
logging.info(f"Δ = {np.hypot(dx, dy)/1e-4:.1f} μm, θ = {np.degrees(np.arctan2(dx, dy)):.1f}")
results.offset_magnitude[images_on_this_los] = np.hypot(dx, dy)/1e-4
results.offset_angle[images_on_this_los] = np.degrees(np.arctan2(dy, dx))
basis = tim_coordinates(data[TIM])
plot_overlaid_contors(
reconstructions,
project(float(data[R_OFFSET]), float(data[Θ_OFFSET]), float(data[Φ_OFFSET]), basis)*1e-4, # cm
project(float(data[R_FLOW]), float(data[Θ_FLOW]), float(data[Φ_FLOW]), basis)*1e-4, # cm/ns
data
)
break
else:
print(f"we didn't have what I need for the {cut_set} cut set")
try:
xray = np.loadtxt(INPUT_FOLDER+'KoDI_xray_data1 - {:d}-TIM{:d}-{:d}.mat.csv'.format(int(data[SHOT]), int(data[TIM]), [2,4,5].index(int(data[TIM]))+1), delimiter=',').T
except (ValueError, OSError):
xray = None
if xray is not None:
logging.info("x-ray image")
xX_bins, yX_bins = np.linspace(-100e-4, 100e-4, 101), np.linspace(-100e-4, 100e-4, 101)
p0, (p2, θ2) = plot_reconstruction(xX_bins, yX_bins, xray, None, None, "xray", data)
results = results[(results.shot != data[SHOT]) | (results.tim != data[TIM]) | (results.energy_cut != 'xray')] # clear any previous versions of this reconstruccion
results = results.append( # and save the new ones to the dataframe
dict(
shot=data[SHOT],
tim=data[TIM],
energy_cut='xray',
P0_magnitude=p0/1e-4,
P2_magnitude=p2/1e-4,
P2_angle=θ2),
ignore_index=True)
results = results.sort_values(['shot', 'tim', 'energy_min', 'energy_max'], ascending=[True, True, True, False])
results.to_csv(OUTPUT_FOLDER+"/summary.csv", index=False) # save the results to disk
``` |
{
"source": "jkunimune15/Map-Projections",
"score": 3
} |
#### File: src/zupplemental/generate_graticule.py
```python
import math
AXIAL_TILT = 23.43694
ANTI_TILT = 66.56306 #I have both of these because it's the easiest way to deal with roundoff
def plot_meridian(lamd, granularity, cut=0, clazz=None):
class_attr = 'class="{}" '.format(clazz) if clazz is not None else ''
tag = '\t\t\t<path {}d="M{},{}'.format(class_attr, lamd, cut-90) + 'v{}'.format(granularity)*round((180-2*cut)/granularity) + '" />'
print(tag)
def plot_parallel(phid, granularity, cut=0, clazz=None):
class_attr = 'class="{}" '.format(clazz) if clazz is not None else ''
tag = '\t\t\t<path {}d="M{},{}'.format(class_attr, cut-180, phid) + 'h{}'.format(granularity)*round((360-2*cut)/granularity) + '" />'
print(tag)
def generate_graticule(spacing, granularity, include_tropics=False, adjust_poles=False, double_dateline=False):
"""Generate a mesh of latitude and longitude lines"""
NUM_BASE = 90//spacing
cuts = [0]*(4*NUM_BASE)
if adjust_poles: #if this is True, reduce the number of meridians as you approach the pole
old_num = 1
for p in range(0, 90, spacing):
new_num = old_num*int(1/math.cos(math.radians(p+spacing/2))/old_num)
while NUM_BASE%new_num != 0: new_num -= 1
if new_num >= 2*old_num:
for i in range(len(cuts)):
if i%old_num == 0 and i%new_num != 0:
cuts[i] = 90-p
old_num = new_num
for x in range(spacing, 180, spacing):
plot_meridian(-x, granularity, cut=cuts[x//spacing%len(cuts)])
plot_meridian(x, granularity, cut=cuts[x//spacing%len(cuts)])
for y in range(spacing, 90, spacing):
plot_parallel(-y, granularity)
plot_parallel(y, granularity)
for x in [-180, 0, 180] if double_dateline else [-180, 0]:
plot_meridian(x, granularity, clazz="prime-m")
plot_parallel(0, granularity, clazz="equator")
if include_tropics:
plot_parallel(-AXIAL_TILT, granularity, clazz="tropics")
plot_parallel(AXIAL_TILT, granularity, clazz="tropics")
plot_parallel(-ANTI_TILT, granularity, clazz="circles")
plot_parallel(ANTI_TILT, granularity, clazz="circles")
def generate_backdrop(resolution, ctr_meridian=0):
"""generate a backdrop type thing that will only work in standard aspect"""
left_side = (ctr_meridian+.001)%360 - 180
middle = (ctr_meridian+180)%360 - 180
right_side = (ctr_meridian-.001)%360 - 180
tag = '\t\t\t<path d="M{:.3f},-90'.format(left_side) + 'v{}'.format(resolution)*int(180//resolution) +\
'L{:.3f},90L{:.3f},90'.format(middle, right_side) + 'v-{}'.format(resolution)*int(180//resolution) +\
'L{:.3f},-90 Z" />'.format(middle)
print(tag)
```
#### File: src/zupplemental/generate_orthodromes.py
```python
import math
import numpy as np
from helpers import obliquify, plot
PHI = (math.sqrt(5)+1)/2
ATH = math.atan(1/2)
def plot_orthodrome(phi0, lam0, tht0):
points = []
for p in range(-90, 91):
points.append(obliquify(math.radians(p), tht0, phi0, lam0))
plot(points, close=False)
def generate_orthodromes():
"""generate an icosohedral orthodromic mesh, like the Brilliant logo (#notsponsored)"""
for l in range(-180, 180, 36):
plot_orthodrome(math.pi/2, 0, math.radians(l))
for l in range(0, 360, 72):
plot_orthodrome(ATH, math.radians(l), math.pi*0.2)
plot_orthodrome(ATH, math.radians(l), math.pi*0.4)
plot_orthodrome(ATH, math.radians(l), math.pi*1.2)
plot_orthodrome(ATH, math.radians(l), math.pi*1.4)
``` |
{
"source": "j-kun/Irre-Katze-Level-Editor",
"score": 3
} |
#### File: Irre-Katze-Level-Editor/py/model_history.py
```python
UPDATE_LAST = 'update-last'
class History(object):
# ---------- initialize ----------
def __init__(self, model, attributesToBackup, maxSize, onUndoOrRedoListener=None):
self.model = model
self.setAttributesToBackup( attributesToBackup )
self.maxSize = maxSize
self.onLoadListener = onUndoOrRedoListener
self.clear()
def setAttributesToBackup(self, attributesToBackup):
'''attributesToBackup: an iterable of attribute specifications.
each specification is a list/tuple of up to three elements:
0: the name of the attribute
1: a function to copy the value of the attribute
2: a flag whether corresponding value of the last history entry shall be updated.
this can be useful to save a cursor.
'''
self.attributesToBackup = list()
for attrSpec in attributesToBackup:
if not isinstance(attrSpec, (tuple, list)) or len(attrSpec) == 1:
self.attributesToBackup.append((attrSpec, None, None))
elif len(attrSpec) == 2:
self.attributesToBackup.append((attrSpec[0], attrSpec[1], None))
elif len(attrSpec) == 3:
self.attributesToBackup.append(attrSpec)
else:
assert False
def clear(self):
self.history = list()
self.historyIndex = -1
# ---------- getters ----------
def __len__(self):
return len(self.history)
# ---------- save ----------
def makeBackup(self):
self.historyIndex += 1
while self.historyIndex < len(self.history):
del self.history[-1]
if len(self.history) > 0:
prior = self.history[-1]
else:
prior = None
d = dict()
for attr, copy, updateLast in self.attributesToBackup:
val = getattr(self.model, attr)
if copy != None:
val = copy(val)
d[attr] = val
if updateLast == UPDATE_LAST and prior != None:
prior[attr] = val
self.history.append(d)
while len(self.history) > self.maxSize:
del self.history[0]
self.historyIndex -= 1
# ---------- load ----------
def undo(self):
if self.historyIndex <= 0:
return False
self.historyIndex -= 1
self._load()
return True
def redo(self):
if self.historyIndex + 1 >= len(self.history):
return False
self.historyIndex += 1
self._load()
return True
def _load(self):
d = self.history[self.historyIndex]
for attr, copy, updateLast in self.attributesToBackup:
val = d[attr]
if copy != None:
val = copy(val)
setattr(self.model, attr, val)
if self.onLoadListener != None:
self.onLoadListener()
if __name__=='__main__':
class Model(object):
ATTRIBUTES_TO_BACKUP = (
# attribute name, copy function
('x', None),
)
def __init__(self):
self.x = 0
self.history = History(self, self.ATTRIBUTES_TO_BACKUP, 30)
self.history.makeBackup()
def increment(self):
self.x += 1
self.history.makeBackup()
N = 3
m = Model()
for i in range(N):
m.increment()
assert m.x == N
assert m.history.undo()
assert m.x == N - 1
assert m.history.redo()
assert m.x == N
assert not m.history.redo()
for i in range(N):
assert m.history.undo()
assert m.x == N - 1 - i
assert not m.history.undo()
assert m.x == 0
assert len(m.history) == N + 1
print("tests successful")
```
#### File: Irre-Katze-Level-Editor/py/model_object_catalog.py
```python
import os.path
import re
import logging
# other
import system
import locales
_ = locales._
def toAbsPath(relPath):
return os.path.join(os.path.split(os.path.split(__file__)[0])[0], relPath)
PATH = toAbsPath('images/gif')
ENCODING = 'windows-1252'
if system.isPython3():
def codeToChr(num):
return bytes((num,)).decode(ENCODING)
else:
def codeToChr(num):
return chr(num).decode(ENCODING)
# ========== OBJECTS ==========
OBJ_NONE = ord('!')
OBJ_START = ord('0')
OBJ_END = ord('[')
OBJ_HELIUM= ord('b')
OBJ_PIPE = ord('X')
OBJ_DRUM = ord('W')
OBJ_WAND = ord('a')
OBJ_SUN = ord('c')
OBJ_ARROW_LEFT = ord('w')
OBJ_ARROW_RIGHT = ord('x')
OBJ_ARROW_UP = ord('y')
OBJ_ARROW_DOWN = ord('z')
OBJ_BURNING_CAN = ord('u')
OBJ_FIRE = ord('#')
OBJ_RAIN = ord('$')
# ========== CATEGORIES ==========
TITLE_TO_EAT = _("To Eat")
TITLE_TO_MOVE = _("To Move")
TITLE_OBSTACLES = _("Obstacles")
TITLE_OBSTACLES_GRAVITY = _("Obstacles 1")
TITLE_OBSTACLES_FIX = _("Obstacles 2")
TITLE_MAGIC = _("Other")
TITLE_UNCATEGORIZED = _("Uncategorized")
CATEGORY_TO_EAT = (65, 66,67,68,69,70,71,72,73,74, 75, 76, 77,78, 123,125)
CATEGORY_TO_MOVE = (44,55, 45,49, 46,50, 47,89, 51,52, 53,54, 79,80, 103,104, 109,110, 112,113)
CATEGORY_OBSTACLES_GRAVITY = (
# balls
37,38,39,40,41,42,
# others
43, 81,82, 83,84, 100,101,102,
# fire
# exception: 118 [water] does *not* fall
115,116,117,118,
# invert
98,
)
CATEGORY_OBSTACLES_FIX = (
# normal
58,59,60, 61,62,
# combined objects (fix)
56, 57, 63, 90, 105, 196, 228,
# combined objects (moveable)
111, 114,
# arrows
119,120,121,122,
# deadly
35,36,
)
CATEGORY_OBSTACLES = CATEGORY_OBSTACLES_GRAVITY + CATEGORY_OBSTACLES_FIX
CATEGORY_MAGIC = (
# start, end, empty
48 ,91, 33,
# explosives
85,86, 106, 107, 108,
# magic
87,88, 97, 99,
)
def getCategory(obj):
if obj in CATEGORY_TO_EAT:
return TITLE_TO_EAT
elif obj in CATEGORY_TO_MOVE:
return TITLE_TO_EAT
elif obj in CATEGORY_MAGIC:
return TITLE_TO_EAT
elif obj in CATEGORY_OBSTACLES:
return TITLE_TO_EAT
else:
return TITLE_UNCATEGORIZED
def getObjects():
reo = re.compile(r"^irka(?P<id>\d+)[.][a-z]+$")
l = os.listdir(PATH)
for fn in l:
if fn[0] == '.':
logging.debug(_("ignoring hidden file {fn} in {path}").format(fn=fn, path=PATH_IMAGES))
continue
m = reo.match(fn)
if not m:
logging.warning(_("invalid file {fn} in {path}").format(fn=fn, path=PATH_IMAGES))
continue
yield int(m.group('id'))
CATEGORY_ALL = list(getObjects())
CATEGORY_ALL.append(OBJ_NONE)
CATEGORY_ALL.remove(93) # open door
CATEGORY_ALL.sort()
CATEGORY_UNCATEGORIZED = list(set(CATEGORY_ALL) - (set(CATEGORY_TO_EAT)|set(CATEGORY_TO_MOVE)|set(CATEGORY_MAGIC)|set(CATEGORY_OBSTACLES)))
CATEGORY_UNCATEGORIZED.sort()
categories = (
(TITLE_TO_EAT, CATEGORY_TO_EAT),
(TITLE_TO_MOVE, CATEGORY_TO_MOVE),
(TITLE_OBSTACLES_GRAVITY, CATEGORY_OBSTACLES_GRAVITY),
(TITLE_OBSTACLES_FIX, CATEGORY_OBSTACLES_FIX),
(TITLE_MAGIC, CATEGORY_MAGIC),
)
if len(CATEGORY_UNCATEGORIZED) > 0:
categories += ((TITLE_UNCATEGORIZED, CATEGORY_UNCATEGORIZED),)
def checkNoDoubles():
n = 0
s = set()
for title, items in categories:
n += len(items)
s |= set(items)
return n == len(s)
assert checkNoDoubles()
# ========== GRAVITY ==========
GRAVITY_OBEYING = (
# everything to eat
65, 66,67,68,69,70,71,72,73,74, 75, 76, 77,78, 123,125,
# some fillings (to be moved)
44, 45, 46, 47, 79, 109,
# one container (to be moved on) [money bag]
80,
# balls
37,38,39,40,41,42,
# other obstacles
43, 81,82, 83,84, 100,101,102,
# fire
115,116,117,
# explosives
85,86, 106, 107, 108,
# magic
OBJ_DRUM,OBJ_PIPE, OBJ_WAND, OBJ_SUN,
)
GRAVITY_RESISTANT = (
# some fillings (to be moved)
51, 53, 103, 112,
# almost all containers (to be moved on)
55, 49, 50, 89, 52, 54, 104, 110, 113,
# normal not-falling obstacles
58,59,60, 61,62,
# water
118,
# combined objects (fix)
56, 57, 63, 90, 105, 196, 228,
# combined objects (moveable)
111, 114,
# arrows
119,120,121,122,
# deadly
35,36,
# doors
91, 93
)
assert set(GRAVITY_OBEYING) & set(GRAVITY_RESISTANT) == set()
# ========== MOVABILITY ==========
ATTR_MOVABLE = (
# all fillings (to be moved)
44, 45, 46, 47, 51, 53, 79, 103, 109, 112,
# some containers (to be moved on)
49, 50, 52, 80, 110, 113,
# some combined objects (after moved on)
111, 114,
# balls
37,38,39,40,41,42,
# other obstacles
43, 81,82, 83, 100,101,102,
# match, can, water
115,116, 118,
# explosives
86, 106, 107, 108,
)
ATTR_EATABLE = CATEGORY_TO_EAT + (OBJ_DRUM, OBJ_PIPE, OBJ_WAND, OBJ_SUN, 93)
ATTR_FIXED = (
# obstacles
OBJ_HELIUM, 58, 59, 60, 61, 62, 84,
# some containers (to be moved on)
55, 89, 54, 104,
# most combined objects (after moved on)
56, 57, 63, 90, 105, 196, 228,
# arrows
119,120,121,122,
# trigger for dynamite
85,
# closed door
91,
)
ATTR_KILLING = (OBJ_FIRE, OBJ_RAIN, OBJ_BURNING_CAN)
# no duplicates
##assert len(set(ATTR_MOVABLE)) == len(ATTR_MOVABLE)
##assert len(set(ATTR_EATABLE)) == len(ATTR_EATABLE)
##assert len(set(ATTR_FIXED)) == len(ATTR_FIXED)
##assert len(set(ATTR_KILLING)) == len(ATTR_KILLING)
assert len( set(ATTR_MOVABLE) | set(ATTR_EATABLE) | set(ATTR_FIXED) | set(ATTR_KILLING) ) \
== len(ATTR_MOVABLE) + len(ATTR_EATABLE) + len(ATTR_FIXED) + len(ATTR_KILLING)
# ========== COMMENTS ==========
OBJECT_DESCRIPTION_FORMAT = u"{objCode:03d} ({objChr}): {properties}"
OBJECT_DESCRIPTION_PROPERTY_SEP = u""
OBJECT_DESCRIPTION_PROPERTY_FORMAT = u"\n - {}"
def getObjectDescription(obj):
properties = formatList(getPropertiesList(obj), sep = OBJECT_DESCRIPTION_PROPERTY_SEP, form = OBJECT_DESCRIPTION_PROPERTY_FORMAT)
out = OBJECT_DESCRIPTION_FORMAT.format(objCode=obj, objChr=codeToChr(obj), properties=properties)
return out
def getPropertiesList(obj):
properties = list()
if obj in GRAVITY_OBEYING:
properties.append(_("obeys gravity"))
elif obj in GRAVITY_RESISTANT:
properties.append(_("gravity resistant"))
if obj in ATTR_MOVABLE:
properties.append(_("movable"))
elif obj in ATTR_EATABLE:
properties.append(_("eatable"))
elif obj in ATTR_FIXED:
properties.append(_("*not* movable"))
elif obj in ATTR_KILLING:
properties.append(_("kills you when stepping on it"))
tmp = getComment(obj)
if tmp != "":
properties.append(tmp)
return properties
def getComment(obj):
# special fields
if obj == OBJ_NONE:
return _("empty field")
elif obj == OBJ_START:
return _("start field")
elif obj == OBJ_END:
return _("end field (optional)")
elif obj == OBJ_PIPE:
return _("flips the board vertically")
elif obj == OBJ_HELIUM:
return _("floats upward")
# keys
elif obj == 123:
return _("last key must be eaten last (but before last bell)")
# bell
elif obj == 125:
return _("last bell must be eaten last (even after last key)")
# wand
elif obj == 97:
return _("disables gravity for two steps")
# drum
elif obj == 87:
return _("converts all 061 (=) into 040 (()")
# sun
elif obj == 99:
return _("removes all movable balls")
# red-yellow ball
elif obj == 61:
return _("is converted by 087 (W)")
elif obj == OBJ_FIRE or obj == OBJ_BURNING_CAN:
return _("kills you when stepping over it")
elif obj == OBJ_RAIN:
return _("kills you when stepping under it")
return ""
def formatList(l, form, sep):
return sep.join(form.format(x) for x in l)
# ========== TEST ==========
if __name__=='__main__':
#for i in getObjects():
# print(i)
print(TITLE_UNCATEGORIZED)
print(CATEGORY_UNCATEGORIZED)
print()
print("unkown reaction to gravity:")
print(set(CATEGORY_ALL) - set(GRAVITY_OBEYING) - set(GRAVITY_RESISTANT))
print()
print("unkown attribute (movable, eatable, fixed, killing):")
print(set(CATEGORY_ALL) - set(ATTR_MOVABLE) - set(ATTR_EATABLE) - set(ATTR_FIXED) - set(ATTR_KILLING))
```
#### File: Irre-Katze-Level-Editor/py/open_directory.py
```python
import os
import subprocess
import logging
log = logging.getLogger(__name__)
# other libraries
import system
# ---------- settings ----------
WC_FILEPATH = '{filepath}'
CMD_OPEN_FILE_WINDOWS = ("cmd", "/C", "start", "", WC_FILEPATH)
CMD_OPEN_FILE_LINUX = ("xdg-open", WC_FILEPATH)
CMD_OPEN_FILE_MAC = ("open", "--", WC_FILEPATH)
# ---------- internal commands ----------
def _format_open_file_cmd(cmd, filepath):
cmd = list(cmd)
for i in range(len(cmd)):
cmd[i] = cmd[i].replace(WC_FILEPATH, filepath)
return cmd
def _run_cmd(cmd):
log.debug("executing {cmd}".format(cmd=cmd))
return subprocess.Popen(cmd)
# ---------- interface ----------
if system.isWindows():
def open_directory(path, select):
'''select=True: parent directory is opened, path (file or directory) is selected.
select=False: path (directory) is opened and nothing is selected.'''
cmd = ["explorer"]
if select:
cmd.append("/select,")
cmd.append(path)
return _run_cmd(cmd)
def open_file(path):
cmd = _format_open_file_cmd(CMD_OPEN_FILE_WINDOWS, path)
return _run_cmd(cmd)
elif system.isLinux():
def open_directory(path, select):
'''select=True: parent directory is opened, path (file or directory) is selected.
select=False: path (directory) is opened and nothing is selected.'''
if select:
dirpath, filename = os.path.split(path)
else:
dirpath = path
cmd = ["xdg-open", dirpath]
return _run_cmd(cmd)
def open_file(path):
cmd = _format_open_file_cmd(CMD_OPEN_FILE_LINUX, path)
return _run_cmd(cmd)
elif system.isMac():
#https://developer.apple.com/library/mac/documentation/Darwin/Reference/ManPages/man1/open.1.html
def open_directory(path, select):
'''select=True: parent directory is opened, path (file or directory) is selected.
select=False: path (directory) is opened and nothing is selected.'''
cmd = ["open"]
if select:
cmd.append("-R")
cmd.append("--")
cmd.append(path)
return _run_cmd(cmd)
def open_file(path):
cmd = _format_open_file_cmd(CMD_OPEN_FILE_MAC, path)
return _run_cmd(cmd)
else:
raise ValueError("unknown operating system: "+system.osName)
# ---------- test program ----------
if __name__=='__main__':
import os
def get_some_subdirectory(path):
l = os.listdir(path)
for fn in l:
ffn = os.path.join(path, fn)
if os.path.isdir(ffn):
if fn[0]!='.':
return ffn
def get_some_file(path):
l = os.listdir(path)
for fn in l:
ffn = os.path.join(path, fn)
if os.path.isfile(ffn):
if fn[0]!='.':
return ffn
return get_some_file(get_some_subdirectory(path))
path = os.path.expanduser("~")
path = get_some_subdirectory(path)
path = get_some_subdirectory(path)
path = get_some_file(path)
print(path)
open_directory(path, select=True)
``` |
{
"source": "JKunst/alpe-predictor",
"score": 3
} |
#### File: JKunst/alpe-predictor/strava.py
```python
import base64
import os
import arrow
import httpx
import streamlit as st
import sweat
from bokeh.models.widgets import Div
import pandas as pd
import datetime
APP_URL = os.environ["APP_URL"]
STRAVA_CLIENT_ID = os.environ["STRAVA_CLIENT_ID"]
STRAVA_CLIENT_SECRET = os.environ["STRAVA_CLIENT_SECRET"]
STRAVA_AUTHORIZATION_URL = "https://www.strava.com/oauth/authorize"
STRAVA_API_BASE_URL = "https://www.strava.com/api/v3"
DEFAULT_ACTIVITY_LABEL = "NO_ACTIVITY_SELECTED"
STRAVA_ORANGE = "#fc4c02"
@st.cache(show_spinner=False)
def load_image_as_base64(image_path):
with open(image_path, "rb") as f:
contents = f.read()
return base64.b64encode(contents).decode("utf-8")
def powered_by_strava_logo():
base64_image = load_image_as_base64("./static/api_logo_pwrdBy_strava_horiz_light.png")
st.markdown(
f'<img src="data:image/png;base64,{base64_image}" width="100%" alt="powered by strava">',
unsafe_allow_html=True,
)
def authorization_url():
request = httpx.Request(
method="GET",
url=STRAVA_AUTHORIZATION_URL,
params={
"client_id": STRAVA_CLIENT_ID,
"redirect_uri": APP_URL,
"response_type": "code",
"approval_prompt": "auto",
"scope": "activity:read_all,profile:read_all"
}
)
return request.url
def login_header(header=None):
strava_authorization_url = authorization_url()
if header is None:
base = st
else:
col1, _, _, button = header
base = button
with col1:
pass# powered_by_strava_logo()
base64_image = load_image_as_base64("./static/[email protected]")
base.markdown(
(
f"<a href=\"{strava_authorization_url}\">"
f" <img alt=\"strava login\" src=\"data:image/png;base64,{base64_image}\" width=\"100%\">"
f"</a>"
),
unsafe_allow_html=True,
)
def logout_header(header=None):
if header is None:
base = st
else:
_, col2, _, button = header
base = button
with col2:
powered_by_strava_logo()
if base.button("Log out"):
js = f"window.location.href = '{APP_URL}'"
html = f"<img src onerror=\"{js}\">"
div = Div(text=html)
st.bokeh_chart(div)
def logged_in_title(strava_auth, header=None):
if header is None:
base = st
else:
col, _, _, _ = header
base = col
first_name = strava_auth["athlete"]["firstname"]
last_name = strava_auth["athlete"]["lastname"]
col.markdown(f"*Welcome, {first_name} {last_name}!*")
@st.cache(show_spinner=False, suppress_st_warning=True)
def exchange_authorization_code(authorization_code):
response = httpx.post(
url="https://www.strava.com/oauth/token",
json={
"client_id": STRAVA_CLIENT_ID,
"client_secret": STRAVA_CLIENT_SECRET,
"code": authorization_code,
"grant_type": "authorization_code",
}
)
try:
response.raise_for_status()
except httpx.HTTPStatusError:
st.error("Something went wrong while authenticating with Strava. Please reload and try again")
st.experimental_set_query_params()
st.stop()
return
strava_auth = response.json()
return strava_auth
def authenticate(header=None, stop_if_unauthenticated=True):
query_params = st.experimental_get_query_params()
authorization_code = query_params.get("code", [None])[0]
if authorization_code is None:
authorization_code = query_params.get("session", [None])[0]
if authorization_code is None:
login_header(header=header)
if stop_if_unauthenticated:
st.stop()
return
else:
logout_header(header=header)
strava_auth = exchange_authorization_code(authorization_code)
logged_in_title(strava_auth, header)
st.experimental_set_query_params(session=authorization_code)
return strava_auth
def header():
col1, col2, col3 = st.columns(3)
with col3:
strava_button = st.empty()
return col1, col2, col3, strava_button
@st.cache(show_spinner=False)
def get_athlete(auth):
access_token = auth["access_token"]
response = httpx.get(
url=f"{STRAVA_API_BASE_URL}/athlete",
headers={
"Authorization": f"Bearer {access_token}",
},
)
return response.json()
@st.cache(show_spinner=False)
def get_athlete_stats(auth, id):
access_token = auth["access_token"]
response = httpx.get(
url=f"{STRAVA_API_BASE_URL}/athletes/{id}/stats",
headers={
"Authorization": f"Bearer {access_token}",
},
)
return response.json()
@st.cache(show_spinner=False)
def get_activities(auth, page=1):
access_token = auth["access_token"]
response = httpx.get(
url=f"{STRAVA_API_BASE_URL}/athlete/activities",
params={
"page": page,
},
headers={
"Authorization": f"Bearer {access_token}",
},
)
return response.json()
def activity_label(activity):
if activity["name"] == DEFAULT_ACTIVITY_LABEL:
return ""
start_date = arrow.get(activity["start_date_local"])
human_readable_date = start_date.humanize(granularity=["day"])
date_string = start_date.format("YYYY-MM-DD")
return f"{activity['name']} - {date_string} ({human_readable_date})"
def select_strava_activity(auth):
col1, col2 = st.columns([1, 3])
with col1:
page = st.number_input(
label="Activities page",
min_value=1,
help="The Strava API returns your activities in chunks of 30. Increment this field to go to the next page.",
)
with col2:
activities = get_activities(auth=auth, page=page)
if not activities:
st.info("This Strava account has no activities or you ran out of pages.")
st.stop()
default_activity = {"name": DEFAULT_ACTIVITY_LABEL, "start_date_local": ""}
activity = st.selectbox(
label="Select an activity",
options=[default_activity] + activities,
format_func=activity_label,
)
if activity["name"] == DEFAULT_ACTIVITY_LABEL:
st.write("No activity selected")
st.stop()
return
activity_url = f"https://www.strava.com/activities/{activity['id']}"
st.markdown(
f"<a href=\"{activity_url}\" style=\"color:{STRAVA_ORANGE};\">View on Strava</a>",
unsafe_allow_html=True
)
return activity
@st.cache()
def strava_user(auth):
athlete = get_athlete(auth)
id = athlete['id']
stats = get_athlete_stats(auth,id)
rr = stats['recent_ride_totals']
athlete_list = []
athlete_list.append([datetime.datetime.now(),
athlete['id'],
athlete['firstname'], athlete['lastname'], athlete['sex'], athlete['premium'],
athlete['ftp'], athlete['weight']])
athlete_list = pd.DataFrame(columns=['time','id','firstname','lastname','sex','premium','ftp','weight'], data=athlete_list)
return athlete_list
@st.cache(show_spinner=False)
def zwift_strava_activity(auth):
for page in [1,100]:
activities = get_activities(auth=auth, page=page)
if not activities:
break
activity_list = []
for activity in activities:
if activity['type'] =="VirtualRide":
activity_list.append([activity['name'], round(activity['distance']/1000,1),activity['average_watts'], activity['weighted_average_watts'], round(activity['suffer_score'],1)])
activity_list = pd.DataFrame(columns=['name','distance','average_watts','weighted_average_watts','suffer_score'], data=activity_list)
return activity_list
@st.cache(show_spinner=False)
def all_strava_activity(auth):
for page in [1, 100]:
activities = get_activities(auth=auth, page=page)
if not activities:
break
activity_list = []
for activity in activities:
if activity['type'] in ["Ride", "VirtualRide"]:
try:
av_watts = activity['average_watts']
except: pass
activity_list.append([
activity['id'],
activity['athlete']['id'],
activity['name'],
activity['distance'],
activity['moving_time'],
activity['type'],
activity['start_date'],
activity['average_speed'],
av_watts,
#activity['weighted_average_watts'],
activity['suffer_score']])
activity_list = pd.DataFrame(
data=activity_list, columns=['id','athlete','name','distance',
'moving_time',
'type',
'start_date',
'average_speed',
'average_watts',
'suffer_score'])
return activity_list
@st.cache(show_spinner=False, max_entries=30, allow_output_mutation=True)
def download_activity(activity, strava_auth):
with st.spinner(f"Downloading activity \"{activity['name']}\"..."):
return sweat.read_strava(activity["id"], strava_auth["access_token"])
``` |
{
"source": "j-kun/youtube-dl-gui",
"score": 3
} |
#### File: j-kun/youtube-dl-gui/logging_setup.py
```python
import os
import atexit
import logging, logging.config, json
getLogger = logging.getLogger
# other libraries
import metainfo
# constants
LEVEL_MAX = logging.CRITICAL*10
LEVEL_SENTINEL = logging.WARNING-1 # shall not be printed on stderr
logging.addLevelName(LEVEL_SENTINEL, 'SENTINEL')
FN_LOGGING_JSON = "logging.json"
# a function which I am missing in the logging library
def get_level_number(level):
if isinstance(level, int):
return level
try:
return int(level)
except ValueError:
return logging.getLevelName(level)
# "fake" logger because I can not log as long as the real logger is not setup
class DelayedLogger(object):
def __init__(self):
self.entries = list()
def log(self, level, msg):
self.entries.append((level, msg))
def write(self, log):
for level, msg in self.entries:
log.log(level, msg)
return log
# check whether log file is already in use
class UniqueFilenameCreator(object):
PATTERN = "_{counter}"
def __init__(self, desired_file_name):
self.ffn = desired_file_name
def create(self):
if self.is_filename_usable(self.ffn):
return self.ffn
_log.log(logging.INFO, "log file {ffn!r} is already in use.".format(ffn=self.ffn))
i = 1
path_name, ext = os.path.splitext(self.ffn)
while True:
ffn = path_name + self.PATTERN.format(counter=i) + ext
if self.is_filename_usable(ffn):
return ffn
i += 1
@staticmethod
def is_filename_usable(ffn):
if os.path.isfile(ffn):
with open(ffn, 'rt') as f:
for ln in f:
pass
if logfile.END_LINE not in ln:
# log file is (most likely) in use by other currently running instance
return False
return True
# read logging configuration file
def read_logging_configuration_file():
ffn_log_configuration = metainfo.get_config_ffn(FN_LOGGING_JSON, log=_log)
_log.log(logging.INFO, "log configuration file location: {ffn}".format(ffn=ffn_log_configuration))
with open(ffn_log_configuration, 'rt') as f:
_log_settings = json.load(f)
# specify directory for log file if not given
_fn_log_file = _log_settings['handlers']['log-file']['filename']
if os.path.isabs(_fn_log_file):
ffn_log_file = _fn_log_file
_log_directory = os.path.split(ffn_log_file)[0]
else:
_log_directory = metainfo.PATH_LOG
ffn_log_file = os.path.join(_log_directory, _fn_log_file)
# create directory for log file if not existing
if not os.path.isfile(ffn_log_file):
if not os.path.isdir(_log_directory):
os.makedirs(_log_directory)
_log.log(logging.INFO, "created directory for log file: {}".format(_log_directory))
# make sure to not use the log file of another currently running instance
ffn_log_file = UniqueFilenameCreator(ffn_log_file).create()
_log_settings['handlers']['log-file']['filename'] = ffn_log_file
_log.log(logging.DEBUG, "log file location: {}".format(ffn_log_file))
# configure logging module
logging.config.dictConfig(_log_settings)
logfile.init(_log_settings)
# enable/disable log file in log settings (does not take effect until restart)
class LogFile(object):
END_LINE = " end of log ".center(30, '=')
@staticmethod
def iter_file_handlers(_log_settings):
handlers = _log_settings['handlers']
for handler in handlers:
handler = handlers[handler]
if 'filename' in handler:
yield handler
def init(self, _log_settings):
self._ffn = _log_settings['handlers']['log-file']['filename']
for handler in self.iter_file_handlers(_log_settings):
if get_level_number(handler['level']) <= LEVEL_MAX:
self._is_enabled = True
return
self._is_enabled = False
return
def get_name(self):
return self._ffn
def get_directory(self):
return os.path.split(self.get_name())[0]
def is_enabled(self):
return self._is_enabled
def disable(self):
if not self._is_enabled:
return True
ffn_log_configuration = metainfo.get_config_ffn(FN_LOGGING_JSON, log=_log, create=True)
with open(ffn_log_configuration, 'rt') as f:
_log_settings = json.load(f)
for handler in self.iter_file_handlers(_log_settings):
handler['filename'] = "/dev/null"
handler['level'] = LEVEL_MAX+1
with open(ffn_log_configuration, 'wt') as f:
_log.log(logging.INFO, "writing log configuration file (disable logfile): {ffn}".format(ffn=ffn_log_configuration))
f.write(json.dumps(_log_settings, indent=4, sort_keys=True))
self._is_enabled = False
return True
def enable(self):
if self._is_enabled:
return True
ffn_log_configuration = metainfo.get_config_ffn(FN_LOGGING_JSON, log=_log, create=True)
with open(ffn_log_configuration, 'rt') as f:
_log_settings = json.load(f)
try:
handler = _log_settings['handlers']['log-file']
except KeyError:
try:
handler = next(self.iter_file_handlers(_log_settings))
except StopIteration:
return False
handler['filename'] = os.path.join(metainfo.PATH_LOG, 'youtube-dl-gui.log')
handler['level'] = logging.DEBUG
with open(ffn_log_configuration, 'wt') as f:
_log.log(logging.INFO, "writing log configuration file (enable logfile): {ffn}".format(ffn=ffn_log_configuration))
f.write(json.dumps(_log_settings, indent=4, sort_keys=True))
self._is_enabled = True
return True
def set_enable(self, value):
if value:
self.enable()
else:
self.disable()
assert self._is_enabled == bool(value)
def append_end_line(self):
if os.path.isfile(self.get_name()):
_log.log(LEVEL_SENTINEL, self.END_LINE)
def remove(self):
if not self._is_enabled:
return
os.remove(self.get_name())
log_directory = self.get_directory()
if len(os.listdir(log_directory))==0:
print("rmdir {0!r}".format(log_directory))
os.rmdir(log_directory)
# execute
_log = DelayedLogger()
logfile = LogFile()
read_logging_configuration_file()
_log = _log.write(logging.getLogger(__name__))
atexit.register(logfile.append_end_line) # gui.WindowMain.close is not called for example when program is closed via Keyboard Interrupt
``` |
{
"source": "jkunze/mrcyamz",
"score": 3
} |
#### File: jkunze/mrcyamz/pagination.py
```python
def getPaginationDetails(dbConnector=None, page=1, listing=None, browse=True, search_words=None):
""" Return a dict of details used to construct pagination bar
:param dbConnector: database connector
:type dbConnector: obj
:param page: Page number being served
:type page: int
:param listing: type of browse. used for redirects
:type listing: str
:param browse: is this for browse or for search
:type browse: boolean
:param search_words: what terms to search the database by
:type search_words: str
:rtype: dict
"""
details = {}
terms_per_page = 10
if browse:
details['terms'] = dbConnector.getChunkTerms(
sortBy="term_string", page=page, tpp=terms_per_page) # issue with defaults? thoughts?
details['termCount'] = dbConnector.getLengthTerms()
else:
details['terms'] = dbConnector.searchPage(string=search_words, page=page, tpp=terms_per_page)
details['termCount'] = dbConnector.searchLength(search_words)
if details['termCount'] > 0:
# which page you are on
details['page'] = page
# what type of browse we are looking at
details['listing'] = listing
# first page
details['start'] = 1
# last page
details['end'] = ((details['termCount'] // terms_per_page) + (0 if details['termCount'] % terms_per_page == 0 else 1))
# first page to have a link to
details['first'] = page - 5 if page - 5 > 0 else 1
# last page to have a link to
details['last'] = page + 6 if page + 6 < details['end'] else details['end'] + 1
# are there more pages than are showing on the left?
details['dots_left'] = details['first'] > details['start']
# are there more pages than are showing on the right?
details['dots_right'] = details['last'] < details['end']
# are there more than ten more pages to the right?
details['next_ten'] = (page + 10) <= details['end']
# are there more than ten more pages to the left?
details['prev_ten'] = (page - 10) >= details['start']
return details
```
#### File: mrcyamz/seaice/pretty.py
```python
import sys
import datetime
import json
import re
import string
from dateutil import tz
# Some JavaScripts that are embedded into some of the outputs. #
js_confirmRemoveTerm = """
function ConfirmRemoveTerm(id, concept_id) {
var r=window.confirm("Are you sure you want to delete term '" + concept_id + "'?");
if (r==true) {
x=id;
var form = document.createElement("form");
form.setAttribute("method", "post");
form.setAttribute("action", "/term=" + id + "/remove");
field = document.createElement("input");
field.setAttribute("name", "id");
field.setAttribute("value", id);
form.appendChild(field);
document.body.appendChild(form);
form.submit();
} else { x="nil"; } } """
js_confirmRemoveComment = """
function ConfirmRemoveComment(id) {
var r=window.confirm("Are you sure you want to delete your comment?");
if (r==true) {
x=id;
var form = document.createElement("form");
form.setAttribute("method", "post");
form.setAttribute("action", "/comment=" + id + "/remove");
field = document.createElement("input");
field.setAttribute("name", "id");
field.setAttribute("value", id);
form.appendChild(field);
document.body.appendChild(form);
form.submit();
} else { x="nil"; } } """
js_termAction = """
function TermAction(id, v) {
var form = document.createElement("form");
form.setAttribute("method", "post");
if (v == "up" || v == "down") {
var action = "vote";
} else {
var action = "track";
}
form.setAttribute("action", "/term=" + id + "/" + action);
field = document.createElement("input");
field.setAttribute("name", "action")
field.setAttribute("value", v);
form.appendChild(field);
document.body.appendChild(form);
form.submit();
} """
js_copyToClipboard = """
function CopyToClipboard(text) {
window.prompt("Hit Ctrl-C (Cmd-C), then Enter to copy this reference to your clipboard. " +
"Embedding the reference in your term definition or comment " +
"will create a hyperlink to this term.", text);
}
"""
# Background color to display with term class.
colorOf = {"vernacular": "#FFFF66", "canonical": "#3CEB10", "deprecated": "#E8E8E8"}
# Name of months. See :func:`seaice.pretty.printPrettyDate`.
monthOf = [
"January",
"February",
"March",
"April",
"May",
"June",
"July",
"August",
"September",
"October",
"November",
"December",
]
# A nice color: A5C6D6
tag_style = """
style="font-size: 95%;
font-family: 'Sans-Serif', Arial, serif;
color:white; background-color:#0082C3;
border-radius:4px; text-decoration:none"
"""
gtag_style = """
style="font-size: 85%;
font-family: 'Sans-Serif', Arial, serif;
color:blue; background-color:#cceeff;
border-radius:4px; text-decoration:none"
"""
ref_string = '<a href=/term={0} title="{2}">{1}</a>'
tag_string = "<a href=/tag/{0} " + tag_style + "> <b>#</b> {1} </a>"
gtag_string = '<a href=/tag/{0} title="{2}" ' + gtag_style + "> {1} </a>"
term_tag_string = '<a href=/term={0} title="{1}">{2}</a>'
# Regular expressions for string matches.
token_ref_regex = re.compile("(?<!#\{g: )([#&]+)([\w.-]+)")
# Caution: exactly one space here -----^
# The "lookbehind" regex relies on ref_norm using just one space.
# We use it to match #foo NOT inside a #{g:... construct.
# ref_regex = re.compile("#\{\s*(([gstkm])\s*:+)?\s*#*([^}|]*?)(\s*\|+\s*([^}]*?))?\s*\}")
ref_regex = re.compile("#\{\s*(([gstkm])\s*:+)?\s*([^}|]*?)(\s*\|+\s*([^}]*?))?\s*\}")
# subexpr start positions: 01 2 3 4
# _xtag_regex = re.compile('#(([a-zA-Z][a-zA-Z0-9_\-\.]*)_term)') # hack!
# tag_regex = re.compile("#([a-zA-Z][a-zA-Z0-9_\-\.]*[a-zA-Z0-9])")
# _xterm_tag_regex = re.compile('#\{\s*([a-zA-Z0-9]+)\s*:\s*(relate. to[^\{\}]*)\}') # hack!
# term_tag_regex = re.compile("#\{\s*([a-zA-Z0-9]+)\s*:\s*([^\{\}]*)\}")
permalink_regex = re.compile("^http://(.*)$")
# The "uniquerifier" (ixuniq) makes a tag string uniquer for indexing,
# improving search precision when a user clicks on a tag. The ixuniq
# string is prepended to a user-defined tag during storage normalization
# and stripped out during display normalization.
ixuniq = "xq"
ixqlen = len(ixuniq)
tagstart = "#{g: " # note: final space is important
def token_ref_norm(m):
"""Promote "&ref" to "#{t: ref} and promote "#ref" to "#{g: ref}".
:param string: The input string.
:returns: Modified plain text string.
"""
# token = m.group(1)
# if token.startswith('#'):
# return '#{g: ' + token + '}'
sigil = m.group(1)
token = m.group(2)
if sigil == "#":
return tagstart + ixuniq + token + "}"
# return '#{g: ' + ixuniq + token + '}'
elif sigil == "&":
return "#{t: " + token + "}"
else:
return sigil + token # return untouched if doubled
# def _xterm_tag_norm(db_con, m):
# """ Promote old style "#{hNNNN : relate[ds] to}" into new style
# "#{t: term string | hNNNN }".
# """
# concept_id = m.group(1)
# term = db_con.getTermByConceptId(concept_id)
# if term:
# term_string = term['term_string']
# else:
# term_string = concept_id + '(undefined)'
#
# return '#{t: %s | %s }' % (term_string, concept_id)
def refs_norm(db_con, string, force=False):
"""Resolve references in text entries before storing in DB.
First promote each simple "#ref" into "#{g: ixuniq+ref | concept_id}
and each simple "&ref" into #{t: ref | concept_id}
:param db_con: DB connection.
:type db_con: seaice.SeaIceConnector.SeaIceConnector
:param string: The input string.
:returns: Modified plain text string.
"""
string = token_ref_regex.sub(lambda m: token_ref_norm(m), string)
# now convert each curly "#{reference}
string = ref_regex.sub(lambda m: ref_norm(db_con, m, force), string)
return string
# looks a lot like printRefAsHTML, but is about how we _store_ things
def ref_norm(db_con, m, force=False):
"""Input a regular expression match and output a normalized reference.
A DB connector is required to resolve the tag string by ID.
A reference has the form #{reftype: humstring [ | IDstring ]}
- reftype is one of
t (term), g (tag), m (mtype), k (link)
- humstring is the human-readable equivalent of IDstring
- IDstring is a machine-readable string, either a concept_id or,
in the case of "k" link, a URL.
- the normalized reference will include all three parts
- normalization is based on looking up the given humstring, but
that only happens if IDstring is not present or force=True
- humstring will have "(undefined)" added if lookup fails or
"(ambiguous)" added if lookup returns more than one hit
:param db_con: DB connection.
:type db_con: seaice.SeaIceConnector.SeaIceConnector
:param m: Regular expression match.
:type m: re.MatchObject
:param force: flag to force humstring lookup
:type m: boolean
"""
(rp) = m.groups() # rp = ref parts, the part between #{ and }
# we want subexpressions 1, 2, and 4
reftype, humstring, IDstring = rp[1], rp[2], rp[4]
if not reftype:
reftype = "t" # apply default reftype
if not humstring and not IDstring: # when both are empty
return "#{}" # this is all we do for now
# If we get here, one of them is non-empty.
if reftype == "k": # an external link (URL)
if humstring and not IDstring: # assume the caller
IDstring = humstring # mixed up the order
if IDstring and not humstring: # if no humanstring
humstring = IDstring # use link text instead
return "#{k: %s | %s }" % (humstring, IDstring)
# If we get here, reftype is not k, and humstring is expected to
# reference a term_string in the dictionary. If IDstring is empty or
# force=True, humstring is looked up in order to resolve it to a unique
# IDstring (so if IDstring is wrong, delete it or use force=True to
# correct it).
#
if IDstring and not force:
return "#{%s: %s | %s}" % (reftype, humstring, IDstring)
if humstring.startswith("---"): # reserved magic string
return "#{%s: %s}" % (reftype, humstring)
# If we get here, we're going to do the lookup.
prefix = tagstart if reftype == "g" else ""
n, term = db_con.getTermByInitialTermString(prefix + humstring)
if n == 1:
term_string, concept_id = term["term_string"], term["concept_id"]
if reftype == "g":
return term_string # if found, it's already in returnable form
if n == 0:
term_string, concept_id = (humstring + "(undefined)"), "-"
elif n == 2:
term_string, concept_id = (humstring + "(ambiguous)"), "-"
# print >>sys.stderr, "n=%s, humstring=%s, term_string" % (n, humstring, term_string)
return "#{%s: %s | %s}" % (reftype, term_string, concept_id)
# this space ^ is relied on by a (fixed width) lookbehind regex
# Processing tags in text areas. #
def innerAnchor(
db_con, term_string, concept_id, definition, tagAsTerm, contentAsTerm=False
):
"""Input ...
A DB connector is required to resolve the concept_id to a definition.
A term_string is either a literal string or of the form
'#{g: humstring | concept_id}'.
Returns an HTML anchor with href and title defined either for a term
page (where one term is central to the page) or for search/browse results.
The returned Inner_Anchor will be of the form
[ id="copyLink" onclick="CopyToClipboard('#{X: %s | %s}');" ]
href='...' title='...'>...
Yes, that's an isolated, unmatched ">" in the return string.
:param db_con: DB connection.
:type db_con: seaice.SeaIceConnector.SeaIceConnector
"""
if definition is not None:
attribs = 'href="/term=%s" title="%s"' % (
concept_id,
processRefsAsText(definition, tagAsTerm=True).replace('"', """),
)
else:
attribs = 'href="#" title="Click to get a reference link to this term."'
attribs += ' id="copyLink"'
if not term_string.startswith("#{g:"):
if definition is None:
attribs += ''' onclick="CopyToClipboard('#{t: %s | %s}');"''' % (
term_string,
concept_id,
)
if contentAsTerm:
return attribs + ">Term:"
else:
return attribs + ">" + term_string
# yyy compile these regex's? -- maybe not since execution is rare
t = re.sub("^#{g:\s*(%s)?" % ixuniq, "", term_string)
t = "#" + re.sub("\s*\|.*", "", t)
if definition is None:
attribs += ''' onclick="CopyToClipboard('%s');"''' % term_string
return attribs + ">" + t
def printRefAsHTML(db_con, reftype, humstring, IDstring, tagAsTerm):
"""Input reftype, human readable string, machine readable string,
and output the reference as HTML.
A DB connector is required to resolve the tag string by ID.
A reference has the form #{ reftype: humstring [ | IDstring ] }
- reftype is one of
t (term), g (tag), s (section), m (mtype), k (link)
#t (term), g (tag), e (element), v (value), m (mtype), k (link)
- humstring is the human-readable equivalent of IDstring
- IDstring is a machine-readable string, either a concept_id or,
in the case of "k" link, a URL.
- Note that the reference should have been normalized before being
stored in the database. (xxx check if that's true for API uploading)
:param db_con: DB connection.
:type db_con: seaice.SeaIceConnector.SeaIceConnector
"""
if not reftype:
reftype = "t" # apply default reftype
if not humstring and not IDstring: # when empty
return "#{}" # this is all we do for now
if reftype == "k": # an external link (URL)
if humstring and not IDstring: # assume the caller
IDstring = humstring # mixed up the order
if not humstring: # if no humanstring
humstring = IDstring # use link text instead
if not IDstring.startswith("http:"):
IDstring = "http://" + IDstring
return '<a href="%s">%s</a>' % (IDstring, humstring)
if humstring.startswith("---"): # EndRefs
if humstring.startswith("---e"):
return "<br>Elements: "
if humstring.startswith("---v"):
return "<br>Values: "
if humstring.startswith("---t"):
return "<br> "
# If we get here, reftype is not k, and IDstring (concept_id)
# is expected to reference a term in the dictionary.
#
term = db_con.getTermByConceptId(IDstring)
term_def = "Def: " + (term["definition"] if term else "(undefined)")
# yyy can we improve poor search for '#tag' query?
if reftype == "g":
# yyy in theory don't need to check before removing uniquerifier string
# as all normalized tag ids will start with it
if humstring.startswith(ixuniq): # stored index "uniquerifier" string
humstring = humstring[ixqlen:] # but remove "uniquerifier" on display
if not tagAsTerm:
humstring_lower = humstring.lower()
return gtag_string.format(humstring_lower, humstring, term_def)
else: # if tagAsTerm, format tag like a term
humstring = "#" + humstring # pointing to definition, not search
return ref_string.format(IDstring, humstring, term_def)
# xxx not using tagAsTerm -- remove?
def printRefAsText(m, tagAsTerm):
"""Input a regular expression match and return the reference as Text.
A reference has the form #{ reftype: humstring [ | IDstring ] }
- reftype is one of
t (term), g (tag), s (section), m (mtype), k (link)
#t (term), g (tag), e (element), v (value), m (mtype), k (link)
- humstring is the human-readable equivalent of IDstring
- IDstring is a machine-readable string, either a concept_id or,
in the case of "k" link, a URL.
- Note that the reference should have been normalized before being
stored in the database. (xxx check if that's true for API uploading)
:param m: Regular expression match.
:type m: re.MatchObject
"""
(rp) = m.groups() # rp = ref parts, the part between #{ and }
# we want subexpressions 1, 2, and 4
reftype, humstring, IDstring = rp[1], rp[2], rp[4]
if not reftype:
reftype = "t" # apply default reftype
if not humstring and not IDstring: # when empty
return ""
if reftype == "k": # an external link (URL)
if humstring and not IDstring: # assume the caller
IDstring = humstring # mixed up the order
if not humstring: # if no humanstring
humstring = IDstring # use link text instead
if not IDstring.startswith("http:"):
IDstring = "http://" + IDstring
return "%s (%s)" % (humstring, IDstring)
if humstring.startswith("---"): # EndRefs
if humstring.startswith("---e"):
return "\nElements: "
if humstring.startswith("---v"):
return "\nValues: "
if humstring.startswith("---t"):
return "\n "
# If we get here, reftype is not k, and IDstring (concept_id)
# is expected to reference a term in the dictionary.
#
if reftype == "g":
# yyy in theory don't need to check before removing uniquerifier string
# as all normalized tag ids will start with it
if humstring.startswith(ixuniq): # stored index "uniquerifier" string
humstring = humstring[ixqlen:] # but remove "uniquerifier" on display
return "#" + humstring
return humstring
def processTagsAsHTML(db_con, string, tagAsTerm=False):
"""Process tags in DB text entries into HTML.
:param db_con: DB connection.
:type db_con: seaice.SeaIceConnector.SeaIceConnector
:param string: The input string.
:returns: HTML-formatted string.
"""
# preserve user-defined newlines by converting to line breaks on output
# replace tags afterwards (because replacement may add newlines)
string = string.replace("\n", "\n<br>")
string = ref_regex.sub(lambda m: printRefReAsHTML(db_con, m, tagAsTerm), string)
string = string.replace("##", "#") # escape mechanism
string = string.replace("&&", "&")
return string
def printRefReAsHTML(db_con, m, tagAsTerm):
(rp) = m.groups() # rp = ref parts, the part between #{ and }
# we want subexpressions 1, 2, and 4
reftype, humstring, IDstring = rp[1], rp[2], rp[4]
return printRefAsHTML(db_con, reftype, humstring, IDstring, tagAsTerm)
def processRefsAsText(string, tagAsTerm=False):
"""Render references in DB text entries into plain text.
:param string: The input string.
:returns: tag-neutralized string.
"""
string = ref_regex.sub(lambda m: printRefAsText(m, tagAsTerm), string)
string = string.replace("##", "#") # escape mechanism
string = string.replace("&&", "&")
return string
# Pretty prints. #
def printPrettyDate(T, brief=False):
"""Format output of a timestamp.
If a small amount of time has elapsed between *T_now*
and *T*, then return the interval. **TODO:** This should
be localized based on the HTTP request.
:param T: Timestamp.
:type T: datetime.datetime
:rtype: str
"""
T = T.astimezone(tz.tzlocal())
T_elapsed = datetime.datetime.now(tz=tz.tzlocal()) - T
if T_elapsed < datetime.timedelta(seconds=30):
return "just now"
elif T_elapsed < datetime.timedelta(minutes=1):
return "%s seconds ago" % (T_elapsed.seconds)
elif T_elapsed < datetime.timedelta(hours=1):
return "%s minute%s ago" % (
T_elapsed.seconds / 60,
"" if T_elapsed.seconds / 60 == 1 else "s",
)
elif T_elapsed < datetime.timedelta(hours=24):
return "%s hour%s ago" % (
T_elapsed.seconds / 3600,
"" if T_elapsed.seconds / 3600 == 1 else "s",
)
elif T_elapsed < datetime.timedelta(days=7):
return "%s day%s ago" % (T_elapsed.days, "" if T_elapsed.days == 1 else "s")
else:
mth = monthOf[T.month - 1]
if brief:
mth = mth[0:3]
return "%s %s %s" % (T.day, mth, T.year)
def printAsJSObject(rows, fd=sys.stdout):
"""Print table rows as JSON-formatted object.
:param rows: Table rows.
:type rows: dict iterator
:param fd: File descriptor to which to output the result (default is sys.stdout).
:type fd: file
"""
for row in rows:
for (col, value) in row.items():
if type(value) == datetime.datetime:
row[col] = str(value)
print(json.dumps(rows, sort_keys=True, indent=2, separators=(",", ": ")), file=fd)
def getPrettyParagraph(db_con, text, leftMargin=8, width=60):
"""Format some text into a nice paragraph for displaying in the terminal.
Output the result directly to sys.stdout.
:param db_con: DB connection.
:type db_con: seaice.SeaIceConnector.SeaIceConnector
:param text: The paragraph.
:type text: str
:param leftMargin: Number of spaces to print before the start of each line.
:type leftMargin: int
:param width: Number of characters to print per line.
:type wdith: int
"""
lineLength = 0
fella = " " * (leftMargin - 1)
for word in text.split(" "):
if lineLength < width:
fella += word + " "
lineLength += len(word) + 1
else:
fella += word + "\n" + (" " * (leftMargin - 1))
lineLength = 0
return fella
def getPrettyTerm(db_con, row, leftMargin=5):
"""Return a term string.
:param db_con: DB connection.
:type db_con: seaice.SeaIceConnector.SeaIceConnector
:param row: Table row
:type row: dict
"""
text = " " * leftMargin + "TERM: %-26s ID: %-7d created: %s\n" % (
"%s (%d)" % (row["term_string"], row["up"] - row["down"]),
row["id"],
row["created"].strftime("%Y-%m-%d %H:%M:%S"),
)
text += (
" " * leftMargin
+ "URI: %-40s" % row["persistent_id"]
+ "Last modified: %s" % (row["modified"].strftime("%Y-%m-%d %H:%M:%S"))
)
text += "\n\n"
text += getPrettyParagraph(db_con, "DEFINITION: " + row["definition"])
text += "\n\n"
text += getPrettyParagraph(db_con, "EXAMPLES: " + row["examples"])
# text += "\n Ownership: %s" % db_con.getUserNameById(row['owner_id'])
return text
def getPrettyComment(db_con, row, leftMargin=5):
"""Return a comment string.
:param db_con: DB connection.
:type db_con: seaice.SeaIceConnector.SeaIceConnector
:param rows: Table rows.
:type rows: dict iterator
"""
return "yeah"
def printTermsPretty(db_con, rows):
"""Print term rows to terminal.
:param db_con: DB connection.
:type db_con: seaice.SeaIceConnector.SeaIceConnector
:param rows: Table rows.
:type rows: dict iterator
"""
for row in rows:
print(getPrettyTerm(db_con, row))
def printTermsAsLinks(db_con, rows):
"""Print terms as a link list (pun intended).
:param rows: Table rows.
:type rows: dict iterator
:returns: HTML-formatted string.
"""
string = ""
for row in rows:
# string += '<li><a href="/term=%s">%s</a></li>' % (row['concept_id'], row['term_string'])
string += "<li><a %s</a></li>" % innerAnchor(
db_con,
row["term_string"],
row["concept_id"],
row["definition"],
tagAsTerm=True,
)
return string
def printTermAsHTML(db_con, row, user_id=0):
"""Format a term for the term page, e.g. `this <http://seaice.herokuapp.com/term=1001>`_.
This is the main page where you can look at a term. It includes a term definition,
examples, a voting form, ownership, and other stuff.
:param db_con: DB connection.
:type db_con: seaice.SeaIceConnector.SeaIceConnector
:param row: Term row.
:type row: dict
:param user_id: Surrogate ID of user requesting the page. Defaults to 0 if session is
unauthenticated.
:type user_id: int
:returns: HTML-formatted string.
"""
vote = db_con.getVote(0 if not user_id else user_id, row["id"])
string = (
"<script>"
+ js_confirmRemoveTerm
+ js_termAction
+ js_copyToClipboard
+ "</script>"
)
# Voting
string += "<table>"
string += " <tr><td width=150px rowspan=4 align=center valign=top>"
string += (
' <a id="voteUp" title="+1" href="#up" onclick="return TermAction(%s, \'up\');">'
% row["id"]
)
string += ' <img src="/static/img/%s.png"></a><br>' % (
"up_set" if vote == 1 else "up"
)
string += " <h4>"
if row["up"] > 0:
string += ' <font color="#004d73">+%s</font> ' % row["up"]
if row["down"] > 0:
string += ' <font color="#797979">-%s</font>' % row["down"]
if row["up"] == 0 and row["down"] == 0:
string += "0"
string += " </h4>"
string += (
' <a id="voteDown" title="-1" href="#down" onclick="return TermAction(%s, \'down\');">'
% row["id"]
)
string += ' <img src="/static/img/%s.png"></a><br>' % (
"down_set" if vote == -1 else "down"
)
good = db_con.checkTracking(0 if not user_id else user_id, row["id"])
string += (
' <br><a id="star" title="Track this term" href="#star"'
+ " onclick=\"return TermAction({1}, '{0}');\">[{2}]</a><br> ".format(
("unstar" if good else "star"), row["id"], "unwatch" if good else "watch"
)
)
string += " </td></tr>\n"
iAnchor = innerAnchor(
db_con,
row["term_string"],
row["concept_id"],
None,
tagAsTerm=True,
contentAsTerm=True,
)
# Name/Class
string += " <tr>"
string += " <td valign=top width=8%><i><a {0}</a></i></td>".format(iAnchor)
string += (
' <td valign=top width=25%><font size="3"><strong><a href=\'/term='
+ row["concept_id"]
+ "'>"
+ row["term_string"]
+ "</a></strong></font><td>"
)
string += " <td valign=top width=5% rowspan=2>"
string += " <nobr><i>Class: </i></nobr><br>"
string += " </td>\n"
string += " <td valign=top width=16% rowspan=2>"
string += ' <nobr><font style="background-color:{2};border-radius:4px;"> {0} </font> <i> ({1}%)</i></nobr><br>'.format(
row["class"], int(100 * row["consensus"]), colorOf[row["class"]]
)
string += " </td>\n"
# Retrieve persistent_id
term_persistent_id = row["persistent_id"]
if term_persistent_id is None:
persistent_id = ""
permalink = ""
else:
persistent_id = term_persistent_id
permalink = permalink_regex.search(persistent_id).groups(0)[0]
# Created/modified/Owner
string += " <td valign=top width=20% rowspan=3>"
string += " <nobr><i>Created %s</i></nobr><br>" % printPrettyDate(
row["created"]
)
string += " <nobr><i>Last modified %s</i></nobr><br>" % printPrettyDate(
row["modified"]
)
string += (
" <nobr><i>Contributed by</i> %s</nobr><br>"
% db_con.getUserNameById(row["owner_id"], full=True)
)
orcid = db_con.getOrcidById(row["owner_id"])
if orcid:
string += (
" <nobr><i>ORCID</i> <a target='_blank' href='https://sandbox.orcid.org/%s'>%s</a></nobr><br>"
% (orcid, orcid)
)
if persistent_id != "":
string += " <br>"
string += (
" <nobr><i>Permalink:</i><br> " + permalink + "</nobr><br>"
)
if user_id == row["owner_id"]:
string += ' <br><a href="/term=%s/edit">[edit]</a>' % row["concept_id"]
string += """ <a id="removeTerm" title="Click to delete term" href="#"
onclick="return ConfirmRemoveTerm(%s, '%s');">[remove]</a><br>\n""" % (
row["id"],
row["concept_id"],
)
# Copy reference tag
# string += ''' <hr><a id="copyLink" title="Click to get a reference link to this term." href="#"
# onclick="CopyToClipboard('#{t: %s | %s}');">Get term link</a>''' % (row['term_string'], row['concept_id'])
string += " </td>"
string += " </tr>\n"
# Definition/Examples
string += " <tr>"
string += " <td valign=top><i>Definition:</i></td>"
string += (
" <td colspan=4 valign=top style='padding-right:36px'><font size=\"3\"> %s</font></td>"
% processTagsAsHTML(db_con, row["definition"])
)
string += " </tr>"
string += " <tr>"
string += " <td valign=top><i>Examples:</i></td>"
string += (
" <td colspan=4 valign=top style='padding-right:36px'><font size=\"3\"> %s</font></td>"
% processTagsAsHTML(db_con, row["examples"])
)
string += " </tr>"
string += "</table>"
return string
# xxx not called right now -- needed?
def printTermsAsHTML(db_con, rows, user_id=0):
"""Format search results for display on the web page.
:param db_con: DB connection.
:type db_con: seaice.SeaIceConnector.SeaIceConnector
:param row: Term rows.
:type row: dict iterator
:param user_id: Surrogate ID of user requesting the page. Defaults to 0 if session is
unauthenticated.
:type user_id: int
:returns: HTML-formatted string.
"""
string = "<script>" + js_confirmRemoveTerm + "</script><table>"
for row in rows:
string += " <tr>"
string += " <td valign=top width=75%><i>Term:</i>"
string += ' <font size="3"><strong>{0}</strong></font>'.format(
row["term_string"]
)
string += ' <a href="/term=%s">[view]</a>' % row["concept_id"]
if user_id == row["owner_id"]:
string += ' <a href="/term=%s/edit">[edit]</a>' % row["concept_id"]
string += """ <a id="removeTerm" title="Click to delete term" href="#"
onclick="return ConfirmRemoveTerm(%s, '%s');">[remove]</a>""" % (
row["id"],
row["concept_id"],
)
string += ' <i>Class:</i> <font style="background-color:{2}"> {0} </font> <i> ({1}%)</i>'.format(
row["class"], int(100 * row["consensus"]), colorOf[row["class"]]
)
string += " </td>"
string += " <td valign=top rowspan=2>"
string += " <nobr><i>Created %s</i></nobr><br>" % printPrettyDate(
row["created"]
)
string += " <nobr><i>Last modified %s</i></nobr><br>" % printPrettyDate(
row["modified"]
)
string += (
" <nobr><i>Contributed by</i> %s</nobr><br>"
% db_con.getUserNameById(row["owner_id"], full=True)
)
orcid = db_con.getOrcidById(row["owner_id"])
if orcid:
string += (
" <nobr><i>ORCID</i> <a target='_blank' href='https://sandbox.orcid.org/%s'>%s</a></nobr><br>"
% (orcid, orcid)
)
string += " </td>"
string += " </tr>"
string += " <tr>"
string += " <td valign=top>"
string += (
' <i>Definition:</i> <font size="3"> %s</font> '
% processTagsAsHTML(db_con, row["definition"])
)
string += (
' <i>Examples:</i> <font size="3"> %s</font></td>'
% processTagsAsHTML(db_con, row["examples"])
)
string += " </tr>"
string += " <tr height=16><td></td></tr>"
string += "</table>"
return string
def summarizeConsensus(consensus):
"""
Return 'high', 'medium' or 'low' as a rough indicator of consensus.
"""
cons = int(100 * consensus)
if cons >= 70:
return "high"
elif cons >= 30:
return "medium"
else:
return "low"
def printTermsAsBriefHTML(db_con, rows, user_id=0):
"""Format table rows as abbreviated HTML table, e.g.
`this <http://seaice.herokuapp.com/browse/volatile>`_.
:param db_con: DB connection.
:type db_con: seaice.SeaIceConnector.SeaIceConnector
:param row: Term rows.
:type row: dict iterator
:param user_id: Surrogate ID of user requesting the page. Defaults to 0 if session is
unauthenticated.
:type user_id: int
:returns: HTML-formatted string.
"""
string = "<table width=100%>"
string += """<tr style="background-color:#E8E8E8"><td class='col-lg-5'>Term</td>
<td class='col-lg-1' style="text-align: center">Score</td>
<td class='col-lg-1'>Consensus</td><td class='col-lg-1'>Class</td>
<td class='col-lg-2'>Contributed by</td>
<td class='col-lg-2'>Last modified</td></tr>"""
for row in rows:
iAnchor = innerAnchor(
db_con,
row["term_string"],
row["concept_id"],
row["definition"],
tagAsTerm=True,
)
# string += '''<tr><td><a title="Def: {8}" href=/term={5}>{0}</a></td><td>{1}</td><td>{2}</td>
string += "<tr><td class='col-lg-5'><a %s</a></td>" % iAnchor
orcid = db_con.getOrcidById(row["owner_id"])
name = db_con.getUserNameById(row["owner_id"], full=True)
string += """<td class='col-lg-1' style="text-align: center">{0}</td>
<td class='col-lg-1' style="text-align: center">{1}</td>
<td class='col-lg-1'><font style="background-color:{4}"> {2} </font></td>
<td class='col-lg-2'>{3}</td>
<td class='col-lg-2'>{5}</tr>""".format(
# processTagsAsHTML(db_con, row['term_string'], tagAsTerm=True),
row["up"] - row["down"],
summarizeConsensus(row["consensus"]),
row["class"],
"<a target='_blank' href='https://sandbox.orcid.org/%s'>%s</a>"
% (orcid, name)
if orcid
else name,
# row['concept_id'],
colorOf[row["class"]],
printPrettyDate(row["modified"]),
)
string += "</table>"
return string
def printCommentsAsHTML(db_con, rows, user_id=0):
"""Format comments for display on the term page.
:param db_con: DB connection.
:type db_con: seaice.SeaIceConnector.SeaIceConnector
:param row: Comment rows.
:type row: dict iterator
:param user_id: Surrogate ID of user requesting the page. Defaults to 0 if session is
unauthenticated.
:type user_id: int
:returns: HTML-formatted string.
"""
string = (
"<script>"
+ js_confirmRemoveComment
+ '</script><table style="margin-left: 50px"><tr><td><hr></td></tr>'
)
for row in rows:
string += "<tr>"
string += " <td align=left valign=top width=70%>{0}".format(
processTagsAsHTML(db_con, row["comment_string"])
)
if user_id == row["owner_id"]:
string += ' <nobr><a href="/comment=%d/edit">[edit]</a>' % row["id"]
string += (
""" <a id="removeComment" title="Click to remove this comment" href="#"
onclick="return ConfirmRemoveComment(%s);">[remove]</a></nobr>"""
% row["id"]
)
orcid = db_con.getOrcidById(row["owner_id"])
name = db_con.getUserNameById(row["owner_id"], True)
string += " - "
if orcid:
string += "<a target='_blank' href='https://sandbox.orcid.org/{0}'>{1}</a>".format(
orcid, name
)
else:
string += name
string += ' <font color="#B8B8B8"> <i>{0}</i></font>'.format(
printPrettyDate(row["created"])
)
string += "<hr></td></tr>"
string += "</table>"
return string
``` |
{
"source": "jkupferer/containers-quickstarts",
"score": 2
} |
#### File: openshift-template-deployer/operator/operator.py
```python
import kopf
import kubernetes
import os
import json
import subprocess
import yaml
operator_domain = os.environ.get('OPERATOR_DOMAIN', 'app.example.com')
config_map_label = operator_domain + '/config'
app_name_label = operator_domain + '/name'
if os.path.exists('/var/run/secrets/kubernetes.io/serviceaccount/namespace'):
kubernetes.config.load_incluster_config()
namespace = open("/var/run/secrets/kubernetes.io/serviceaccount/namespace").read()
else:
kubernetes.config.load_kube_config()
namespace = kubernetes.config.list_kube_config_contexts()[1]['context']['namespace']
core_v1_api = kubernetes.client.CoreV1Api()
custom_objects_api = kubernetes.client.CustomObjectsApi()
def owner_reference_from_resource(resource):
return dict(
apiVersion = resource['apiVersion'],
controller = True,
blockOwnerDeletion = False,
kind = resource['kind'],
name = resource['metadata']['name'],
uid = resource['metadata']['uid']
)
def process_template(owner_reference, template_name, template_namespace, template_parameters):
'''
Use `oc` to process template and produce resource list json.
'''
oc_process_cmd = [
'oc', 'process', template_namespace + '//' + template_name,
'-l', '{0}={1}'.format(app_name_label, owner_reference['name']),
'-o', 'json',
]
for k, v in template_parameters.items():
oc_process_cmd.extend(['-p', '{0}={1}'.format(k, v)])
oc_process_result = subprocess.run(oc_process_cmd, stdout=subprocess.PIPE, check=True)
resource_list = json.loads(oc_process_result.stdout)
add_owner_reference(resource_list, owner_reference)
return resource_list
def add_owner_reference(resource_list, owner_reference):
'''
Add owner references to resource definition metadata.
'''
for item in resource_list['items']:
metadata = item['metadata']
if 'ownerReferences' in metadata:
if owner_reference not in metadata['ownerReferences']:
metadata['ownerReferences'].append(owner_reference)
else:
metadata['ownerReferences'] = [owner_reference]
def sanity_check_config_map(config_map):
metadata = config_map['metadata']
name = metadata['name']
if not 'data' in config_map or 'config' not in config_map['data']:
raise kopf.PermanentError('Config map must include config data')
def deploy_app_from_config_map(config_map, logger):
'''
Deploy application based on config map
'''
sanity_check_config_map(config_map)
name = config_map['metadata']['name']
try:
config = yaml.safe_load(config_map['data']['config'])
except yaml.parser.ParserError as e:
raise kopf.PermanentError('Unable to load config YAML: {0}'.format(str(e)))
owner_reference = owner_reference_from_resource(config_map)
deploy_app(owner_reference, config, logger)
def deploy_app(owner_reference, config, logger):
logger.info("Deploying app '%s'", owner_reference['name'])
if 'template' in config:
template_name = config['template'].get('name')
template_namespace = config['template'].get('namespace', namespace)
template_parameters = config['template'].get('parameters', {})
logger.info("Processing resources from template %s//%s", template_namespace, template_name)
resource_list = process_template(owner_reference, template_name, template_namespace, template_parameters)
oc_apply_result = subprocess.run(
['oc', 'apply', '-f', '-'],
check=True,
input=json.dumps(resource_list).encode('utf-8'),
stdout=subprocess.PIPE,
)
for line in oc_apply_result.stdout.decode('utf-8').splitlines():
logger.info(line)
@kopf.on.startup()
def configure(settings: kopf.OperatorSettings, **_):
# Disable scanning for CustomResourceDefinitions
settings.scanning.disabled = True
@kopf.on.create('', 'v1', 'configmaps', labels={config_map_label: kopf.PRESENT})
def on_create_config_map(body, logger, **_):
logger.info("New app ConfigMap '%s'", body['metadata']['name'])
deploy_app_from_config_map(body, logger)
``` |
{
"source": "jkupka/dash-leaflet",
"score": 2
} |
#### File: jkupka/dash-leaflet/usage_gallery.py
```python
import json
import dash
import dash_core_components as dcc
import dash_html_components as html
import dash_leaflet as dl
import settings
from dash.dependencies import Output, Input
# Mapbox setup
mapbox_url = "https://api.mapbox.com/styles/v1/mapbox/{id}/tiles/{{z}}/{{x}}/{{y}}{{r}}?access_token={access_token}"
mapbox_token = settings.MAPBOX_TOKEN
mapbox_ids = ["light-v9", "dark-v9", "streets-v9", "outdoors-v9", "satellite-streets-v9"]
# region Example 1
MAP_ID = "map-id"
BASE_LAYER_ID = "base-layer-id"
BASE_LAYER_DROPDOWN_ID = "base-layer-drop-down-id"
COORDINATE_CLICK_ID = "coordinate-click-id"
def render_example1():
comment = """ Marker with default icon, marker with custom icon, circle marker (fixed pixel radius),
circle (fixed physical radius), polyline, polygon and rectangle, all supporting tooltips and popups. """
return [
html.H1("Example 1: Basic components"),
html.P(comment),
dl.Map(id=MAP_ID, style={'width': '1000px', 'height': '500px'}, center=[56.05, 10.25], zoom=10, children=[
dl.TileLayer(id=BASE_LAYER_ID),
# Marker with tool tip and popup.
dl.Marker(position=[56, 9.8], children=[
dl.Tooltip("Marker tooltip"),
dl.Popup([
html.H1("Marker popup"),
html.P("with inline html")
])
]),
# Marker with custom icon.
dl.Marker(position=[55.94, 9.96], icon={
"iconUrl": "/assets/149059.svg",
"iconSize": [25, 25]
}, children=[
dl.Tooltip("Marker with custom icon")
]),
# Circle marker (with fixed radius in pixel).
dl.CircleMarker(center=[56.05, 10.15], radius=20, children=[
dl.Popup('Circle marker, 20px')
]),
# Circle with fixed radius in meters.
dl.Circle(center=[56.145, 10.21], radius=2000, color='rgb(255,128,0)', children=[
dl.Tooltip('Circle, 2km radius')
]),
# Polyline marker.
dl.Polyline(id='polyline', positions=[[56.06, 10.0],
[56.056, 10.01],
[56.064, 10.028],
[56.0523, 10.0717],
[56.044, 10.073]], children=[
dl.Tooltip('Polyline')
]),
# Polygon marker.
dl.Polygon(id='polygon', positions=[[56.013, 9.84],
[56.0544, 9.939],
[56.003, 10.001]], children=[
dl.Tooltip('Polygon')
]),
# Rectangle marker.
dl.Rectangle(id='rectangle', bounds=[[55.9, 10.2], [56.0, 10.5]], children=[
dl.Tooltip('Rectangle')
])]),
dcc.RadioItems(
id=BASE_LAYER_DROPDOWN_ID,
options=[{"label": i, "value": mapbox_url.format(id=i, access_token=mapbox_token)} for i in mapbox_ids],
labelStyle={'display': 'inline-block'},
value=mapbox_url.format(id="light-v9", access_token=mapbox_token)
),
html.P("Coordinate (click on map):"),
html.Div(id=COORDINATE_CLICK_ID),
]
def register_example1(app):
@app.callback(Output(BASE_LAYER_ID, "url"),
[Input(BASE_LAYER_DROPDOWN_ID, "value")])
def set_baselayer(url):
return url
@app.callback(Output(COORDINATE_CLICK_ID, 'children'),
[Input(MAP_ID, 'click_lat_lng')])
def click_coord(e):
if e is not None:
return json.dumps(e)
else:
return "-"
# endregion
# region Example 2
def render_example2():
return [
html.H1("Example 2: WMSTileLayer"),
dl.Map(style={'width': '1000px', 'height': '500px'},
center=[40, -100],
zoom=4,
children=[
dl.TileLayer(url=mapbox_url.format(id="dark-v9", access_token=mapbox_token)),
dl.WMSTileLayer(url="http://mesonet.agron.iastate.edu/cgi-bin/wms/nexrad/n0r.cgi",
layers="nexrad-n0r-900913",
format="image/png",
transparent=True),
])
]
# endregion
# region Example 3
def render_example3():
return [
html.H1("Example 3: ImageOverlay"),
dl.Map(style={'width': '1000px', 'height': '500px'},
bounds=[[40.712216, -74.22655], [40.773941, -74.12544]],
children=[
dl.TileLayer(),
dl.ImageOverlay(opacity=0.5, url="/assets/newark_nj_1922.jpg",
bounds=[[40.712216, -74.22655], [40.773941, -74.12544]])
]),
]
# endregion
# region Example 4
BUTTON_PLAY_ID = "button-play"
VIDEO_OVERLAY_ID = "video-overlay-id"
def render_example4():
return [
html.H1("Example 4: VideoOverlay"),
dl.Map(style={'width': '1000px', 'height': '500px'},
bounds=[[32, -130], [13, -100]],
children=[
dl.TileLayer(url=mapbox_url.format(id="satellite-streets-v9", access_token=mapbox_token)),
dl.VideoOverlay(id=VIDEO_OVERLAY_ID, url="/assets/patricia_nasa.webm",
bounds=[[32, -130], [13, -100]]),
]),
html.Button(id=BUTTON_PLAY_ID, children="Play/pause"),
]
def register_example4(app):
@app.callback(Output(VIDEO_OVERLAY_ID, 'play'),
[Input(BUTTON_PLAY_ID, 'n_clicks')])
def play_pause(n):
if n is None or n % 2 == 0:
return True
else:
return False
# endregion
# region Example 5
GEOTIFF_ID = "geotiff-id"
GEOTIFF_MARKER_ID = "geotiff-marker-id"
def render_example5():
# Example from https://plot.ly/python/scatter-plots-on-maps/#us-airports-map
color_domain = dict(domainMin=20, domainMax=40, colorscale=['white', 'orange', 'red'])
return [
html.H1("Example 5: GeoTIFFOverlay"),
html.P("US airports (most arrivals)"),
dl.Map(style={'width': '1000px', 'height': '500px'},
center=[25, 45],
zoom=5,
children=[
dl.TileLayer(url="https://cartodb-basemaps-{s}.global.ssl.fastly.net/dark_nolabels/{z}/{x}/{y}.png"),
dl.GeoTIFFOverlay(id=GEOTIFF_ID, interactive=True, url="/assets/tz850.tiff", band=1, opacity=0.9,
**color_domain),
dl.Colorbar(width=200, height=20, **color_domain, unit="°C", style={'color': 'white'}),
html.Div(id=GEOTIFF_MARKER_ID)
]),
]
def register_example5(app):
@app.callback(Output(GEOTIFF_MARKER_ID, 'children'),
[Input(GEOTIFF_ID, 'click_lat_lng_val')])
def geotiff_marker(x):
if x is not None:
lat, lon, val = x
return dl.Marker(position=[lat, lon], icon={
"iconUrl": "/assets/thermometer.png",
"iconSize": [40, 40],
"iconAnchor": [20, 36]
}, children=[
dl.Tooltip('{:.1f}°C'.format(val))
])
else:
return None
# endregion
app = dash.Dash(__name__, external_scripts=['https://codepen.io/chriddyp/pen/bWLwgP.css'])
# Create layout.
app.layout = html.Div(
render_example1() +
render_example2() +
render_example3() +
render_example4() +
render_example5()
)
# Bind callbacks.
register_example1(app)
register_example4(app)
register_example5(app)
if __name__ == '__main__':
app.run_server(debug=False, port=8150)
``` |
{
"source": "jku/python-tuf",
"score": 3
} |
#### File: python-tuf/tests/test_examples.py
```python
import glob
import os
import shutil
import tempfile
import unittest
from pathlib import Path
class TestRepoExamples(unittest.TestCase):
"""Unit test class for 'repo_example' scripts.
Provides a '_run_example_script' method to run (exec) a script located in
the 'repo_example' directory.
"""
@classmethod
def setUpClass(cls):
"""Locate and cache 'repo_example' dir."""
base = Path(__file__).resolve().parents[1]
cls.repo_examples_dir = base / "examples" / "repo_example"
def setUp(self):
"""Create and change into test dir.
NOTE: Test scripts are expected to create dirs/files in new CWD."""
self.original_cwd = os.getcwd()
self.base_test_dir = os.path.realpath(tempfile.mkdtemp())
os.chdir(self.base_test_dir)
def tearDown(self):
"""Change back to original dir and remove test dir, which may contain
dirs/files the test created at test-time CWD."""
os.chdir(self.original_cwd)
shutil.rmtree(self.base_test_dir)
def _run_script_and_assert_files(self, script_name, filenames_created):
"""Run script in 'repo_example' dir and assert that it created the
files corresponding to the passed filenames inside a 'tmp*' test dir at
CWD."""
script_path = str(self.repo_examples_dir / script_name)
with open(script_path, "rb") as f:
# pylint: disable=exec-used
exec(
compile(f.read(), script_path, "exec"),
{"__file__": script_path},
)
test_dirs = glob.glob("tmp*")
self.assertTrue(
len(test_dirs) == 1, f"expected 1 'tmp*' test dir, got {test_dirs}"
)
test_dir = test_dirs.pop()
for name in filenames_created:
metadata_path = Path(test_dir) / f"{name}"
self.assertTrue(
metadata_path.exists(), f"missing '{metadata_path}' file"
)
def test_basic_repo(self):
"""Run 'basic_repo.py' and assert creation of metadata files."""
self._run_script_and_assert_files(
"basic_repo.py",
[
"1.python-scripts.json",
"1.root.json",
"1.snapshot.json",
"1.targets.json",
"2.root.json",
"2.snapshot.json",
"2.targets.json",
"timestamp.json",
],
)
if __name__ == "__main__":
unittest.main()
```
#### File: python-tuf/tests/test_updater_key_rotations.py
```python
import os
import sys
import tempfile
import unittest
from dataclasses import dataclass
from typing import Dict, List, Optional, Type
from securesystemslib.signer import SSlibSigner
from tests import utils
from tests.repository_simulator import RepositorySimulator
from tests.utils import run_sub_tests_with_dataset
from tuf.api.metadata import Key, Metadata, Root
from tuf.exceptions import UnsignedMetadataError
from tuf.ngclient import Updater
@dataclass
class MdVersion:
keys: List[int]
threshold: int
sigs: List[int]
res: Optional[Type[Exception]] = None
class TestUpdaterKeyRotations(unittest.TestCase):
"""Test ngclient root rotation handling"""
# set dump_dir to trigger repository state dumps
dump_dir: Optional[str] = None
@classmethod
def setUpClass(cls) -> None:
cls.sim: RepositorySimulator
cls.metadata_dir: str
# pylint: disable-next=consider-using-with
cls.temp_dir = tempfile.TemporaryDirectory()
# Pre-create a bunch of keys and signers
cls.keys: List[Key] = []
cls.signers: List[SSlibSigner] = []
for _ in range(10):
key, signer = RepositorySimulator.create_key()
cls.keys.append(key)
cls.signers.append(signer)
@classmethod
def tearDownClass(cls) -> None:
cls.temp_dir.cleanup()
def setup_subtest(self) -> None:
# Setup repository for subtest: make sure no roots have been published
self.sim = RepositorySimulator()
self.sim.signed_roots.clear()
self.sim.root.version = 0
if self.dump_dir is not None:
# create subtest dumpdir
name = f"{self.id().split('.')[-1]}-{self.case_name}"
self.sim.dump_dir = os.path.join(self.dump_dir, name)
os.mkdir(self.sim.dump_dir)
def _run_refresh(self) -> None:
"""Create new updater, run refresh"""
if self.sim.dump_dir is not None:
self.sim.write()
# bootstrap with initial root
self.metadata_dir = tempfile.mkdtemp(dir=self.temp_dir.name)
with open(os.path.join(self.metadata_dir, "root.json"), "bw") as f:
f.write(self.sim.signed_roots[0])
updater = Updater(
self.metadata_dir,
"https://example.com/metadata/",
fetcher=self.sim,
)
updater.refresh()
# fmt: off
root_rotation_cases = {
"1-of-1 key rotation": [
MdVersion(keys=[1], threshold=1, sigs=[1]),
MdVersion(keys=[2], threshold=1, sigs=[2, 1]),
MdVersion(keys=[2], threshold=1, sigs=[2]),
],
"1-of-1 key rotation, unused signatures": [
MdVersion(keys=[1], threshold=1, sigs=[3, 1, 4]),
MdVersion(keys=[2], threshold=1, sigs=[3, 2, 1, 4]),
MdVersion(keys=[2], threshold=1, sigs=[3, 2, 4]),
],
"1-of-1 key rotation fail: not signed with old key": [
MdVersion(keys=[1], threshold=1, sigs=[1]),
MdVersion(keys=[2], threshold=1, sigs=[2, 3, 4], res=UnsignedMetadataError),
],
"1-of-1 key rotation fail: not signed with new key": [
MdVersion(keys=[1], threshold=1, sigs=[1]),
MdVersion(keys=[2], threshold=1, sigs=[1, 3, 4], res=UnsignedMetadataError),
],
"3-of-5, sign with different keycombos": [
MdVersion(keys=[0, 1, 2, 3, 4], threshold=3, sigs=[0, 2, 4]),
MdVersion(keys=[0, 1, 2, 3, 4], threshold=3, sigs=[0, 4, 1]),
MdVersion(keys=[0, 1, 2, 3, 4], threshold=3, sigs=[0, 1, 3]),
MdVersion(keys=[0, 1, 2, 3, 4], threshold=3, sigs=[0, 1, 3]),
],
"3-of-5, one key rotated": [
MdVersion(keys=[0, 1, 2, 3, 4], threshold=3, sigs=[0, 2, 4]),
MdVersion(keys=[0, 1, 3, 4, 5], threshold=3, sigs=[0, 4, 1]),
],
"3-of-5, one key rotate fails: not signed with 3 new keys": [
MdVersion(keys=[0, 1, 2, 3, 4], threshold=3, sigs=[0, 2, 4]),
MdVersion(keys=[0, 1, 3, 4, 5], threshold=3, sigs=[0, 2, 4], res=UnsignedMetadataError),
],
"3-of-5, one key rotate fails: not signed with 3 old keys": [
MdVersion(keys=[0, 1, 2, 3, 4], threshold=3, sigs=[0, 2, 4]),
MdVersion(keys=[0, 1, 3, 4, 5], threshold=3, sigs=[0, 4, 5], res=UnsignedMetadataError),
],
"3-of-5, one key rotated, with intermediate step": [
MdVersion(keys=[0, 1, 2, 3, 4], threshold=3, sigs=[0, 2, 4]),
MdVersion(keys=[0, 1, 3, 4, 5], threshold=3, sigs=[0, 2, 4, 5]),
MdVersion(keys=[0, 1, 3, 4, 5], threshold=3, sigs=[0, 4, 5]),
],
"3-of-5, all keys rotated, with intermediate step": [
MdVersion(keys=[0, 1, 2, 3, 4], threshold=3, sigs=[0, 2, 4]),
MdVersion(keys=[5, 6, 7, 8, 9], threshold=3, sigs=[0, 2, 4, 5, 6, 7]),
MdVersion(keys=[5, 6, 7, 8, 9], threshold=3, sigs=[5, 6, 7]),
],
"1-of-3 threshold increase to 2-of-3": [
MdVersion(keys=[1, 2, 3], threshold=1, sigs=[1]),
MdVersion(keys=[1, 2, 3], threshold=2, sigs=[1, 2]),
],
"1-of-3 threshold bump to 2-of-3 fails: new threshold not reached": [
MdVersion(keys=[1, 2, 3], threshold=1, sigs=[1]),
MdVersion(keys=[1, 2, 3], threshold=2, sigs=[2], res=UnsignedMetadataError),
],
"2-of-3 threshold decrease to 1-of-3": [
MdVersion(keys=[1, 2, 3], threshold=2, sigs=[1, 2]),
MdVersion(keys=[1, 2, 3], threshold=1, sigs=[1, 2]),
MdVersion(keys=[1, 2, 3], threshold=1, sigs=[1]),
],
"2-of-3 threshold decr. to 1-of-3 fails: old threshold not reached": [
MdVersion(keys=[1, 2, 3], threshold=2, sigs=[1, 2]),
MdVersion(keys=[1, 2, 3], threshold=1, sigs=[1], res=UnsignedMetadataError),
],
"1-of-2 threshold increase to 2-of-2": [
MdVersion(keys=[1], threshold=1, sigs=[1]),
MdVersion(keys=[1, 2], threshold=2, sigs=[1, 2]),
],
}
# fmt: on
@run_sub_tests_with_dataset(root_rotation_cases)
def test_root_rotation(self, root_versions: List[MdVersion]) -> None:
"""Test Updater.refresh() with various sequences of root updates
Each MdVersion in the list describes root keys and signatures of a
remote root metadata version. As an example:
MdVersion([1,2,3], 2, [1,2])
defines a root that contains keys 1, 2 and 3 with threshold 2. The
metadata is signed with keys 1 and 2.
Assert that refresh() result is expected and that local root on disk is
the expected one after all roots have been loaded from remote using the
standard client update workflow.
"""
self.setup_subtest()
# Publish all remote root versions defined in root_versions
for rootver in root_versions:
# clear root keys, signers
self.sim.root.roles[Root.type].keyids.clear()
self.sim.signers[Root.type].clear()
self.sim.root.roles[Root.type].threshold = rootver.threshold
for i in rootver.keys:
self.sim.root.add_key(Root.type, self.keys[i])
for i in rootver.sigs:
self.sim.add_signer(Root.type, self.signers[i])
self.sim.root.version += 1
self.sim.publish_root()
# run client workflow, assert success/failure
expected_error = root_versions[-1].res
if expected_error is None:
self._run_refresh()
expected_local_root = self.sim.signed_roots[-1]
else:
# failure expected: local root should be the root before last
with self.assertRaises(expected_error):
self._run_refresh()
expected_local_root = self.sim.signed_roots[-2]
# assert local root on disk is expected
with open(os.path.join(self.metadata_dir, "root.json"), "rb") as f:
self.assertEqual(f.read(), expected_local_root)
# fmt: off
non_root_rotation_cases: Dict[str, MdVersion] = {
"1-of-1 key rotation":
MdVersion(keys=[2], threshold=1, sigs=[2]),
"1-of-1 key rotation, unused signatures":
MdVersion(keys=[1], threshold=1, sigs=[3, 1, 4]),
"1-of-1 key rotation fail: not signed with new key":
MdVersion(keys=[2], threshold=1, sigs=[1, 3, 4], res=UnsignedMetadataError),
"3-of-5, one key signature wrong: not signed with 3 expected keys":
MdVersion(keys=[0, 1, 3, 4, 5], threshold=3, sigs=[0, 2, 4], res=UnsignedMetadataError),
"2-of-5, one key signature mising: threshold not reached":
MdVersion(keys=[0, 1, 3, 4, 5], threshold=3, sigs=[0, 4], res=UnsignedMetadataError),
"3-of-5, sign first combo":
MdVersion(keys=[0, 1, 2, 3, 4], threshold=3, sigs=[0, 2, 4]),
"3-of-5, sign second combo":
MdVersion(keys=[0, 1, 2, 3, 4], threshold=3, sigs=[0, 4, 1]),
"3-of-5, sign third combo":
MdVersion(keys=[0, 1, 2, 3, 4], threshold=3, sigs=[0, 1, 3]),
"3-of-5, sign fourth combo":
MdVersion(keys=[0, 1, 2, 3, 4], threshold=3, sigs=[1, 2, 3]),
"3-of-5, sign fifth combo":
MdVersion(keys=[0, 1, 2, 3, 4], threshold=3, sigs=[2, 3, 4]),
}
# fmt: on
@run_sub_tests_with_dataset(non_root_rotation_cases)
def test_non_root_rotations(self, md_version: MdVersion) -> None:
"""Test Updater.refresh() with various sequences of metadata updates
Each MdVersion in the list describes metadata keys and signatures
of a remote metadata version. As an example:
MdVersion([1,2,3], 2, [1,2])
defines a metadata that contains keys 1, 2 and 3 with threshold 2. The
metadata is signed with keys 1 and 2.
Assert that refresh() result is expected and that local metadata on disk
is the expected one after all roots have been loaded from remote using
the standard client update workflow.
"""
self.setup_subtest()
roles = ["timestamp", "snapshot", "targets"]
for role in roles:
# clear role keys, signers
self.sim.root.roles[role].keyids.clear()
self.sim.signers[role].clear()
self.sim.root.roles[role].threshold = md_version.threshold
for i in md_version.keys:
self.sim.root.add_key(role, self.keys[i])
for i in md_version.sigs:
self.sim.add_signer(role, self.signers[i])
self.sim.root.version += 1
self.sim.publish_root()
# run client workflow, assert success/failure
expected_error = md_version.res
if expected_error is None:
self._run_refresh()
# Call fetch_metadata to sign metadata with new keys
expected_local_md: Metadata = self.sim._fetch_metadata(role)
# assert local metadata role is on disk as expected
md_path = os.path.join(self.metadata_dir, f"{role}.json")
with open(md_path, "rb") as f:
data = f.read()
self.assertEqual(data, expected_local_md)
else:
# failure expected
with self.assertRaises(expected_error):
self._run_refresh()
if __name__ == "__main__":
if "--dump" in sys.argv:
TestUpdaterKeyRotations.dump_dir = tempfile.mkdtemp()
print(f"Repository dumps in {TestUpdaterKeyRotations.dump_dir}")
sys.argv.remove("--dump")
utils.configure_test_logging(sys.argv)
unittest.main()
```
#### File: python-tuf/tests/test_updater_with_simulator.py
```python
import builtins
import os
import sys
import tempfile
import unittest
from typing import Optional, Tuple
from unittest.mock import MagicMock, Mock, patch
from tests import utils
from tests.repository_simulator import RepositorySimulator
from tuf.api.metadata import SPECIFICATION_VERSION, TargetFile, Targets
from tuf.exceptions import BadVersionNumberError, UnsignedMetadataError
from tuf.ngclient import Updater
class TestUpdater(unittest.TestCase):
"""Test ngclient Updater using the repository simulator."""
# set dump_dir to trigger repository state dumps
dump_dir: Optional[str] = None
def setUp(self) -> None:
# pylint: disable-next=consider-using-with
self.temp_dir = tempfile.TemporaryDirectory()
self.metadata_dir = os.path.join(self.temp_dir.name, "metadata")
self.targets_dir = os.path.join(self.temp_dir.name, "targets")
os.mkdir(self.metadata_dir)
os.mkdir(self.targets_dir)
# Setup the repository, bootstrap client root.json
self.sim = RepositorySimulator()
with open(os.path.join(self.metadata_dir, "root.json"), "bw") as f:
root = self.sim.download_bytes(
"https://example.com/metadata/1.root.json", 100000
)
f.write(root)
if self.dump_dir is not None:
# create test specific dump directory
name = self.id().split(".")[-1]
self.sim.dump_dir = os.path.join(self.dump_dir, name)
os.mkdir(self.sim.dump_dir)
def tearDown(self) -> None:
self.temp_dir.cleanup()
def _run_refresh(self) -> Updater:
"""Creates a new updater and runs refresh."""
if self.sim.dump_dir is not None:
self.sim.write()
updater = Updater(
self.metadata_dir,
"https://example.com/metadata/",
self.targets_dir,
"https://example.com/targets/",
self.sim,
)
updater.refresh()
return updater
def test_refresh(self) -> None:
# Update top level metadata
self._run_refresh()
# New root (root needs to be explicitly signed)
self.sim.root.version += 1
self.sim.publish_root()
self._run_refresh()
# New timestamp
self.sim.update_timestamp()
self._run_refresh()
# New targets, snapshot, timestamp version
self.sim.targets.version += 1
self.sim.update_snapshot()
self._run_refresh()
targets: utils.DataSet = {
"standard case": ("targetpath", b"content", "targetpath"),
"non-asci case": ("åäö", b"more content", "%C3%A5%C3%A4%C3%B6"),
"subdirectory case": (
"a/b/c/targetpath",
b"dir target content",
"a%2Fb%2Fc%2Ftargetpath",
),
}
@utils.run_sub_tests_with_dataset(targets)
def test_targets(self, test_case_data: Tuple[str, bytes, str]) -> None:
targetpath, content, encoded_path = test_case_data
path = os.path.join(self.targets_dir, encoded_path)
updater = self._run_refresh()
# target does not exist yet, configuration is what we expect
self.assertIsNone(updater.get_targetinfo(targetpath))
self.assertTrue(self.sim.root.consistent_snapshot)
self.assertTrue(updater.config.prefix_targets_with_hash)
# Add targets to repository
self.sim.targets.version += 1
self.sim.add_target("targets", content, targetpath)
self.sim.update_snapshot()
updater = self._run_refresh()
# target now exists, is not in cache yet
info = updater.get_targetinfo(targetpath)
assert info is not None
# Test without and with explicit local filepath
self.assertIsNone(updater.find_cached_target(info))
self.assertIsNone(updater.find_cached_target(info, path))
# download target, assert it is in cache and content is correct
self.assertEqual(path, updater.download_target(info))
self.assertEqual(path, updater.find_cached_target(info))
self.assertEqual(path, updater.find_cached_target(info, path))
with open(path, "rb") as f:
self.assertEqual(f.read(), content)
# download using explicit filepath as well
os.remove(path)
self.assertEqual(path, updater.download_target(info, path))
self.assertEqual(path, updater.find_cached_target(info))
self.assertEqual(path, updater.find_cached_target(info, path))
def test_fishy_rolenames(self) -> None:
roles_to_filenames = {
"../a": "..%2Fa.json",
".": "..json",
"/": "%2F.json",
"ö": "%C3%B6.json",
}
# Add new delegated targets, update the snapshot
spec_version = ".".join(SPECIFICATION_VERSION)
targets = Targets(1, spec_version, self.sim.safe_expiry, {}, None)
for role in roles_to_filenames:
self.sim.add_delegation(
"targets", role, targets, False, ["*"], None
)
self.sim.update_snapshot()
updater = self._run_refresh()
# trigger updater to fetch the delegated metadata, check filenames
updater.get_targetinfo("anything")
local_metadata = os.listdir(self.metadata_dir)
for fname in roles_to_filenames.values():
self.assertTrue(fname in local_metadata)
def test_keys_and_signatures(self) -> None:
"""Example of the two trickiest test areas: keys and root updates"""
# Update top level metadata
self._run_refresh()
# New targets: signed with only a new key that is not in roles keys
old_signers = self.sim.signers.pop("targets")
key, signer = self.sim.create_key()
self.sim.add_signer("targets", signer)
self.sim.targets.version += 1
self.sim.update_snapshot()
with self.assertRaises(UnsignedMetadataError):
self._run_refresh()
# New root: Add the new key as targets role key
# (root changes require explicit publishing)
self.sim.root.add_key("targets", key)
self.sim.root.version += 1
self.sim.publish_root()
self._run_refresh()
# New root: Raise targets threshold to 2
self.sim.root.roles["targets"].threshold = 2
self.sim.root.version += 1
self.sim.publish_root()
with self.assertRaises(UnsignedMetadataError):
self._run_refresh()
# New targets: sign with both new and any original keys
for signer in old_signers.values():
self.sim.add_signer("targets", signer)
self.sim.targets.version += 1
self.sim.update_snapshot()
self._run_refresh()
def test_snapshot_rollback_with_local_snapshot_hash_mismatch(self) -> None:
# Test triggering snapshot rollback check on a newly downloaded snapshot
# when the local snapshot is loaded even when there is a hash mismatch
# with timestamp.snapshot_meta.
# By raising this flag on timestamp update the simulator would:
# 1) compute the hash of the new modified version of snapshot
# 2) assign the hash to timestamp.snapshot_meta
# The purpose is to create a hash mismatch between timestamp.meta and
# the local snapshot, but to have hash match between timestamp.meta and
# the next snapshot version.
self.sim.compute_metafile_hashes_length = True
# Initialize all metadata and assign targets version higher than 1.
self.sim.targets.version = 2
self.sim.update_snapshot()
self._run_refresh()
# The new targets must have a lower version than the local trusted one.
self.sim.targets.version = 1
self.sim.update_snapshot()
# During the snapshot update, the local snapshot will be loaded even if
# there is a hash mismatch with timestamp.snapshot_meta, because it will
# be considered as trusted.
# Should fail as a new version of snapshot will be fetched which lowers
# the snapshot.meta["targets.json"] version by 1 and throws an error.
with self.assertRaises(BadVersionNumberError):
self._run_refresh()
@patch.object(builtins, "open", wraps=builtins.open)
def test_not_loading_targets_twice(self, wrapped_open: MagicMock) -> None:
# Do not load targets roles more than once when traversing
# the delegations tree
# Add new delegated targets, update the snapshot
spec_version = ".".join(SPECIFICATION_VERSION)
targets = Targets(1, spec_version, self.sim.safe_expiry, {}, None)
self.sim.add_delegation("targets", "role1", targets, False, ["*"], None)
self.sim.update_snapshot()
# Run refresh, top-level roles are loaded
updater = self._run_refresh()
# Clean up calls to open during refresh()
wrapped_open.reset_mock()
# First time looking for "somepath", only 'role1' must be loaded
updater.get_targetinfo("somepath")
wrapped_open.assert_called_once_with(
os.path.join(self.metadata_dir, "role1.json"), "rb"
)
wrapped_open.reset_mock()
# Second call to get_targetinfo, all metadata is already loaded
updater.get_targetinfo("somepath")
wrapped_open.assert_not_called()
if __name__ == "__main__":
if "--dump" in sys.argv:
TestUpdater.dump_dir = tempfile.mkdtemp()
print(f"Repository Simulator dumps in {TestUpdater.dump_dir}")
sys.argv.remove("--dump")
utils.configure_test_logging(sys.argv)
unittest.main()
``` |
{
"source": "jkurdek/TensorFlowASR",
"score": 3
} |
#### File: tensorflow_asr/mwer/monotonic_rnnt_loss.py
```python
from cached_property import cached_property
import tensorflow as tf
logger = tf.get_logger()
LOG_0 = float("-inf")
class MonotonicRnntLoss(tf.keras.losses.Loss):
def __init__(
self,
blank=0,
global_batch_size=None,
name=None,
):
super().__init__(reduction=tf.keras.losses.Reduction.NONE, name=name)
self.blank = blank
self.global_batch_size = global_batch_size
def call(self, y_true, y_pred):
loss = monotonic_rnnt_loss(
logits=y_pred["logits"],
logit_length=y_pred["logits_length"],
labels=y_true["labels"],
label_length=y_true["labels_length"],
blank=self.blank,
name=self.name,
)
return tf.nn.compute_average_loss(loss, global_batch_size=self.global_batch_size)
@tf.function
def monotonic_rnnt_loss(
logits,
labels,
label_length,
logit_length,
blank=0,
name=None,
):
name = "rnnt_loss" if name is None else name
with tf.name_scope(name):
logits = tf.convert_to_tensor(logits, name="logits")
labels = tf.convert_to_tensor(labels, name="labels")
label_length = tf.convert_to_tensor(label_length, name="label_length")
logit_length = tf.convert_to_tensor(logit_length, name="logit_length")
@tf.custom_gradient
def compute_rnnt_loss_and_grad(logits_t: tf.Tensor):
"""Compute RNN-T loss and gradients."""
loss_data = MonotonicRnntData(
logits=logits_t,
labels=labels,
label_length=label_length,
logit_length=logit_length,
)
return -loss_data.log_loss, loss_data.backprop
output = compute_rnnt_loss_and_grad(logits)
return output
class MonotonicRnntData:
def __init__(
self,
logits: tf.Tensor,
labels: tf.Tensor,
logit_length: tf.Tensor,
label_length: tf.Tensor,
):
super().__init__()
self._logits = logits
self._labels = labels
self._logit_length = logit_length
self._label_length = label_length
def backprop(self, loss: tf.Tensor) -> tf.Tensor:
return tf.reshape(loss, shape=[-1, 1, 1, 1]) * self.grads
@cached_property
def grads(self) -> tf.Tensor:
"""Computes gradients w.r.t logits.
Returns: tf.Tensor shape = [batch_size, input_max_len, target_max_len + 1, vocab_size + 1], dtype = tf.float32
"""
left_side = tf.exp(
tf.expand_dims(self.alpha + self.beta - tf.reshape(self.log_loss, shape=[self.batch_size, 1, 1]), axis=3)
+ self.log_probs
)
right_side = tf.concat([self.grads_blank, self.grads_truth], axis=3)
grads_logits = left_side - right_side
return grads_logits
@cached_property
def grads_truth(self) -> tf.Tensor:
"""Computes part of the RHS corresponding to k = y_u+1
Returns: tf.Tensor shape = [batch_size, input_max_len, target_max_len + 1, vocab_size], dtype = tf.float32
"""
grads_truth = tf.exp(
(
self.alpha[:, :-1, :-1]
+ self.beta[:, 1:, 1:]
- tf.reshape(self.log_loss, shape=[self.batch_size, 1, 1])
+ self.truth_probs[:, :-1, :]
)
)
grads_truth = tf.expand_dims(tf.pad(grads_truth, [[0, 0], [0, 1], [0, 1]], "CONSTANT"), axis=3)
grads_truth = (
tf.tile(grads_truth, multiples=[1, 1, 1, self.vocab_size - 1])
* tf.pad(self.one_hot_labels, [[0, 0], [0, 0], [0, 1], [0, 0]], "CONSTANT")[:, :, :, 1:]
)
return grads_truth
@cached_property
def grads_blank(self) -> tf.Tensor:
"""Computes part of the RHS corresponding to k = blank
Returns: tf.Tensor shape = [batch_size, input_max_len, target_max_len + 1, 1], dtype = tf.float32
"""
beta_expanded = tf.tensor_scatter_nd_update(
tf.pad(self.beta, [[0, 0], [0, 1], [0, 0]], "CONSTANT", constant_values=LOG_0)[:, 1:, :],
indices=tf.concat(
[
tf.reshape(tf.range(self.batch_size, dtype=tf.int32), shape=[self.batch_size, 1]),
self.last_elem_indices,
],
axis=1,
),
updates=tf.zeros(shape=[self.batch_size], dtype=tf.float32),
)
grads_blank = tf.exp(
(self.alpha + beta_expanded - tf.reshape(self.log_loss, shape=[self.batch_size, 1, 1]) + self.blank_probs)
)
return tf.expand_dims(grads_blank, axis=3)
@cached_property
def alpha(self) -> tf.Tensor:
"""Computes the forward alpha variable
Returns: tf.Tensor shape = [batch_size, input_max_len, target_max_len + 1], dtype = tf.float32
"""
def next_state(last_output, trans_probs):
blank_probs = trans_probs[0]
truth_probs = trans_probs[1]
alpha_b = last_output + blank_probs
alpha_t = tf.concat(
[LOG_0 * tf.ones(shape=[self.batch_size, 1]), last_output[:, :-1] + truth_probs], axis=1
)
alpha_next = tf.math.reduce_logsumexp(tf.stack([alpha_b, alpha_t], axis=0), axis=0)
return alpha_next
initial_alpha = tf.concat(
[
tf.zeros(shape=[self.batch_size, 1]),
tf.ones(shape=[self.batch_size, self.target_max_len - 1]) * LOG_0,
],
axis=1,
)
blank_probs_t = tf.transpose(self.blank_probs, perm=[1, 0, 2])
truth_probs_t = tf.transpose(self.truth_probs, perm=[1, 0, 2])
fwd = tf.scan(next_state, (blank_probs_t[:-1, :, :], truth_probs_t[:-1, :, :]), initializer=initial_alpha)
alpha = tf.concat([tf.expand_dims(initial_alpha, axis=0), fwd], axis=0)
alpha = tf.transpose(alpha, perm=[1, 0, 2])
return alpha
@cached_property
def beta(self) -> tf.Tensor:
"""Computes the backward beta variable.
Returns: tf.Tensor shape = [batch_size, input_max_len, target_max_len + 1], dtype = tf.float32
"""
def next_state(last_output, mask_and_trans_probs):
mask_s, blank_probs, truth_probs = mask_and_trans_probs
beta_b = last_output + blank_probs
beta_t = tf.pad(last_output[:, 1:] + truth_probs, [[0, 0], [0, 1]], "CONSTANT", constant_values=LOG_0)
beta_next = tf.math.reduce_logsumexp(tf.stack([beta_b, beta_t], axis=0), axis=0)
masked_beta_next = self.nan_to_zero(beta_next * tf.expand_dims(mask_s, axis=1)) + self.nan_to_zero(
last_output * tf.expand_dims((1.0 - mask_s), axis=1)
)
return tf.reshape(masked_beta_next, shape=tf.shape(last_output))
beta_init_val = tf.gather_nd(self.blank_probs, self.last_elem_indices, batch_dims=1)
# Initial beta for batches.
initial_beta_mask = tf.one_hot(self._label_length, depth=self.target_max_len)
initial_beta = tf.expand_dims(beta_init_val, axis=1) * initial_beta_mask + self.nan_to_zero(
LOG_0 * (1.0 - initial_beta_mask)
)
beta_mask = tf.transpose(
tf.sequence_mask(self._logit_length, maxlen=self.input_max_len, dtype=tf.float32), perm=[1, 0]
)
blank_probs_t = tf.transpose(self.blank_probs, perm=[1, 0, 2])
truth_probs_t = tf.transpose(self.truth_probs, perm=[1, 0, 2])
bwd = tf.scan(
next_state,
(beta_mask[1:, :], blank_probs_t[:-1, :, :], truth_probs_t[:-1, :, :]),
initializer=initial_beta,
reverse=True,
)
beta = tf.concat([bwd, tf.expand_dims(initial_beta, axis=0)], axis=0)
beta = tf.transpose(beta, perm=[1, 0, 2])
# remove beta entries that are beyond T and U of a given batch element
beta = beta + tf.math.log(tf.cast(self.dp_mask, dtype=tf.float32))
return beta
@cached_property
def log_loss(self) -> tf.Tensor:
"""Log loss defined by ln P(y*|x)."""
return self.beta[:, 0, 0]
@property
def dp_mask(self) -> tf.Tensor:
"""Computes mask for each elem of the batch
The mask indicates the region of interest for each batch element,
that is the area bounded by label_length[i] and logit_length[i] where i
is an index over the batch dimension.
Returns: tf.Tensor shape = [batch_size, input_max_len, target_max_len + 1, vocab_size + 1], dtype = tf.float32
"""
label_mask = tf.expand_dims(
tf.sequence_mask(self._label_length + 1, maxlen=self.target_max_len, dtype=tf.float32),
axis=1,
)
input_mask = tf.expand_dims(
tf.sequence_mask(self._logit_length, maxlen=self.input_max_len, dtype=tf.float32), axis=2
)
return label_mask * input_mask
@cached_property
def last_elem_indices(self) -> tf.Tensor:
return tf.stack([self._logit_length - 1, self._label_length], axis=1)
@cached_property
def truth_probs(self) -> tf.Tensor:
"""Log probabilites of obtaining symbol y_u+1 at each encoder step t and decoder step u.
Returns: tf.Tensor shape = [batch_size, input_max_len, target_max_len], dtype = tf.float32
"""
return tf.reduce_sum(tf.multiply(self.log_probs[:, :, :-1, :], self.one_hot_labels), axis=-1)
@cached_property
def blank_probs(self) -> tf.Tensor:
"""Log probabilites of obtaining a blank symbol at each encoder and decoder step.
Returns: tf.Tensor shape = [batch_size, input_max_len, target_max_len + 1], dtype = tf.float32
"""
return self.log_probs[:, :, :, 0]
@cached_property
def log_probs(self) -> tf.Tensor:
return tf.nn.log_softmax(self._logits)
@cached_property
def one_hot_labels(self) -> tf.Tensor:
return tf.one_hot(
tf.tile(tf.expand_dims(self._labels, axis=1), multiples=[1, self.input_max_len, 1]),
depth=self.vocab_size,
)
@cached_property
def batch_size(self) -> tf.Tensor:
return tf.shape(self._logits)[0]
@cached_property
def input_max_len(self) -> tf.Tensor:
return tf.shape(self._logits)[1]
@cached_property
def target_max_len(self) -> tf.Tensor:
return tf.shape(self._logits)[2]
@cached_property
def vocab_size(self) -> tf.Tensor:
return tf.shape(self._logits)[3]
# TO-DO: remove need for this function
def nan_to_zero(self, input_tensor: tf.Tensor) -> tf.Tensor:
return tf.where(tf.math.is_nan(input_tensor), tf.zeros_like(input_tensor), input_tensor)
``` |
{
"source": "jkur/salt",
"score": 2
} |
#### File: salt/beacons/service.py
```python
from __future__ import absolute_import
import logging
log = logging.getLogger(__name__) # pylint: disable=invalid-name
def beacon(config):
'''
Scan for the configured services and fire events
Example Config
.. code-block:: yaml
beacons:
service:
salt-master:
mysql:
The config above sets up beacons to check for
the salt-master and mysql services.
'''
return [{srvc: __salt__['service.status'](srvc)} for srvc in config]
```
#### File: salt/cli/ssh.py
```python
from __future__ import print_function
from __future__ import absolute_import
import salt.client.ssh
from salt.utils import parsers
class SaltSSH(parsers.SaltSSHOptionParser):
'''
Used to Execute the salt ssh routine
'''
def run(self):
self.parse_args()
ssh = salt.client.ssh.SSH(self.config)
ssh.run()
```
#### File: salt/modules/quota.py
```python
from __future__ import absolute_import
# Import python libs
import logging
# Import salt libs
import salt.utils
from salt.exceptions import CommandExecutionError, SaltInvocationError
log = logging.getLogger(__name__)
# Define a function alias in order not to shadow built-in's
__func_alias__ = {
'set_': 'set'
}
def __virtual__():
'''
Only work on POSIX-like systems with setquota binary available
'''
if not salt.utils.is_windows() and salt.utils.which('setquota'):
return 'quota'
return False
def report(mount):
'''
Report on quotas for a specific volume
CLI Example:
.. code-block:: bash
salt '*' quota.report /media/data
'''
ret = {mount: {}}
ret[mount]['User Quotas'] = _parse_quota(mount, '-u')
ret[mount]['Group Quotas'] = _parse_quota(mount, '-g')
return ret
def _parse_quota(mount, opts):
'''
Parse the output from repquota. Requires that -u -g are passed in
'''
cmd = 'repquota -vp {0} {1}'.format(opts, mount)
out = __salt__['cmd.run'](cmd, python_shell=False).splitlines()
mode = 'header'
if '-u' in opts:
quotatype = 'Users'
elif '-g' in opts:
quotatype = 'Groups'
ret = {quotatype: {}}
for line in out:
if not line:
continue
comps = line.split()
if mode == 'header':
if 'Block grace time' in line:
blockg, inodeg = line.split(';')
blockgc = blockg.split(': ')
inodegc = inodeg.split(': ')
ret['Block Grace Time'] = blockgc[-1:]
ret['Inode Grace Time'] = inodegc[-1:]
elif line.startswith('-'):
mode = 'quotas'
elif mode == 'quotas':
if len(comps) < 8:
continue
if not comps[0] in ret[quotatype]:
ret[quotatype][comps[0]] = {}
ret[quotatype][comps[0]]['block-used'] = comps[2]
ret[quotatype][comps[0]]['block-soft-limit'] = comps[3]
ret[quotatype][comps[0]]['block-hard-limit'] = comps[4]
ret[quotatype][comps[0]]['block-grace'] = comps[5]
ret[quotatype][comps[0]]['file-used'] = comps[6]
ret[quotatype][comps[0]]['file-soft-limit'] = comps[7]
ret[quotatype][comps[0]]['file-hard-limit'] = comps[8]
ret[quotatype][comps[0]]['file-grace'] = comps[9]
return ret
def set_(device, **kwargs):
'''
Calls out to setquota, for a specific user or group
CLI Example:
.. code-block:: bash
salt '*' quota.set /media/data user=larry block-soft-limit=1048576
salt '*' quota.set /media/data group=painters file-hard-limit=1000
'''
empty = {'block-soft-limit': 0, 'block-hard-limit': 0,
'file-soft-limit': 0, 'file-hard-limit': 0}
current = None
cmd = 'setquota'
if 'user' in kwargs:
cmd += ' -u {0} '.format(kwargs['user'])
parsed = _parse_quota(device, '-u')
if kwargs['user'] in parsed:
current = parsed['Users'][kwargs['user']]
else:
current = empty
ret = 'User: {0}'.format(kwargs['user'])
if 'group' in kwargs:
if 'user' in kwargs:
raise SaltInvocationError(
'Please specify a user or group, not both.'
)
cmd += ' -g {0} '.format(kwargs['group'])
parsed = _parse_quota(device, '-g')
if kwargs['group'] in parsed:
current = parsed['Groups'][kwargs['group']]
else:
current = empty
ret = 'Group: {0}'.format(kwargs['group'])
if not current:
raise CommandExecutionError('A valid user or group was not found')
for limit in ('block-soft-limit', 'block-hard-limit',
'file-soft-limit', 'file-hard-limit'):
if limit in kwargs:
current[limit] = kwargs[limit]
cmd += '{0} {1} {2} {3} {4}'.format(current['block-soft-limit'],
current['block-hard-limit'],
current['file-soft-limit'],
current['file-hard-limit'],
device)
result = __salt__['cmd.run_all'](cmd, python_shell=False)
if result['retcode'] != 0:
raise CommandExecutionError(
'Unable to set desired quota. Error follows: \n{0}'
.format(result['stderr'])
)
return {ret: current}
def warn():
'''
Runs the warnquota command, to send warning emails to users who
are over their quota limit.
CLI Example:
.. code-block:: bash
salt '*' quota.warn
'''
__salt__['cmd.run']('quotawarn')
def stats():
'''
Runs the quotastats command, and returns the parsed output
CLI Example:
.. code-block:: bash
salt '*' quota.stats
'''
ret = {}
out = __salt__['cmd.run']('quotastats').splitlines()
for line in out:
if not line:
continue
comps = line.split(': ')
ret[comps[0]] = comps[1]
return ret
def on(device):
'''
Turns on the quota system
CLI Example:
.. code-block:: bash
salt '*' quota.on
'''
cmd = 'quotaon {0}'.format(device)
__salt__['cmd.run'](cmd, python_shell=False)
return True
def off(device):
'''
Turns off the quota system
CLI Example:
.. code-block:: bash
salt '*' quota.off
'''
cmd = 'quotaoff {0}'.format(device)
__salt__['cmd.run'](cmd, python_shell=False)
return True
def get_mode(device):
'''
Report whether the quota system for this device is on or off
CLI Example:
.. code-block:: bash
salt '*' quota.get_mode
'''
ret = {}
cmd = 'quotaon -p {0}'.format(device)
out = __salt__['cmd.run'](cmd, python_shell=False)
for line in out.splitlines():
comps = line.strip().split()
if comps[3] not in ret:
if comps[0].startswith('quotaon'):
if comps[1].startswith('Mountpoint'):
ret[comps[4]] = 'disabled'
continue
elif comps[1].startswith('Cannot'):
ret[device] = 'Not found'
return ret
continue
ret[comps[3]] = {
'device': comps[4].replace('(', '').replace(')', ''),
}
ret[comps[3]][comps[0]] = comps[6]
return ret
```
#### File: salt/states/alias.py
```python
def present(name, target):
'''
Ensures that the named alias is present with the given target or list of
targets. If the alias exists but the target differs from the previous
entry, the target(s) will be overwritten. If the alias does not exist, the
alias will be created.
name
The local user/address to assign an alias to
target
The forwarding address
'''
ret = {'name': name,
'changes': {},
'result': False,
'comment': ''}
if __salt__['aliases.has_target'](name, target):
ret['result'] = True
ret['comment'] = 'Alias {0} already present'.format(name)
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Alias {0} -> {1} is set to be added'.format(
name, target
)
return ret
if __salt__['aliases.set_target'](name, target):
ret['changes'] = {'alias': name}
ret['result'] = True
ret['comment'] = 'Set email alias {0} -> {1}'.format(name, target)
return ret
else:
ret['result'] = False
ret['comment'] = 'Failed to set alias'
return ret
def absent(name):
'''
Ensure that the named alias is absent
name
The alias to remove
'''
ret = {'name': name,
'changes': {},
'result': False,
'comment': ''}
if not __salt__['aliases.get_target'](name):
ret['result'] = True
ret['comment'] = 'Alias {0} already absent'.format(name)
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Alias {0} is set to be removed'.format(name)
return ret
if __salt__['aliases.rm_alias'](name):
ret['changes'] = {'alias': name}
ret['result'] = True
ret['comment'] = 'Removed alias {0}'.format(name)
return ret
else:
ret['result'] = False
ret['comment'] = 'Failed to remove alias'
return ret
```
#### File: salt/states/pushover.py
```python
def __virtual__():
'''
Only load if the pushover module is available in __salt__
'''
return 'pushover' if 'pushover.post_message' in __salt__ else False
def post_message(name,
user=None,
device=None,
message=None,
title=None,
priority=None,
expire=None,
retry=None,
sound=None,
api_version=1,
token=None):
'''
Send a message to a PushOver channel.
.. code-block:: yaml
pushover-message:
pushover.post_message:
- user: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
- token: <KEY>
- title: Salt Returner
- device: phone
- priority: -1
- expire: 3600
- retry: 5
The following parameters are required:
name
The unique name for this event.
user
The user or group of users to send the message to. Must be ID of user, not name
or email address.
message
The message that is to be sent to the PushOver channel.
The following parameters are optional:
title
The title to use for the message.
device
The device for the user to send the message to.
priority
The priority for the message.
expire
The message should expire after specified amount of seconds.
retry
The message should be resent this many times.
token
The token for PushOver to use for authentication,
if not specified in the configuration options of master or minion.
'''
ret = {'name': name,
'changes': {},
'result': False,
'comment': ''}
if __opts__['test']:
ret['comment'] = 'The following message is to be sent to PushOver: {0}'.format(message)
ret['result'] = None
return ret
if not user:
ret['comment'] = 'PushOver user is missing: {0}'.format(user)
return ret
if not message:
ret['comment'] = 'PushOver message is missing: {0}'.format(message)
return ret
result = __salt__['pushover.post_message'](
user=user,
message=message,
title=title,
device=device,
priority=priority,
expire=expire,
retry=retry,
token=token,
)
if result:
ret['result'] = True
ret['comment'] = 'Sent message: {0}'.format(name)
else:
ret['comment'] = 'Failed to send message: {0}'.format(name)
return ret
```
#### File: unit/modules/brew_test.py
```python
from __future__ import absolute_import
# Import Salt Testing Libs
from salttesting import skipIf, TestCase
from salttesting.mock import MagicMock, patch, NO_MOCK, NO_MOCK_REASON
from salttesting.helpers import ensure_in_syspath
ensure_in_syspath('../../')
# Import Salt Libs
from salt.modules import brew
# Global Variables
brew.__context__ = {}
brew.__salt__ = {}
TAPS_STRING = 'homebrew/dupes\nhomebrew/science\nhomebrew/x11'
TAPS_LIST = ['homebrew/dupes', 'homebrew/science', 'homebrew/x11']
HOMEBREW_BIN = '/usr/local/bin/brew'
@skipIf(NO_MOCK, NO_MOCK_REASON)
class BrewTestCase(TestCase):
'''
TestCase for salt.modules.brew module
'''
# '_list_taps' function tests: 1
def test_list_taps(self):
'''
Tests the return of the list of taps
'''
mock_taps = MagicMock(return_value={'stdout': TAPS_STRING})
mock_user = MagicMock(return_value='foo')
moca_cmd = MagicMock(return_value='')
with patch.dict(brew.__salt__, {'file.get_user': mock_user,
'cmd.run_all': mock_taps,
'cmd.run': moca_cmd}):
self.assertEqual(brew._list_taps(), TAPS_LIST)
# '_tap' function tests: 3
@patch('salt.modules.brew._list_taps', MagicMock(return_value=TAPS_LIST))
def test_tap_installed(self):
'''
Tests if tap argument is already installed or not
'''
self.assertTrue(brew._tap('homebrew/science'))
@patch('salt.modules.brew._list_taps', MagicMock(return_value={}))
def test_tap_failure(self):
'''
Tests if the tap installation failed
'''
mock_failure = MagicMock(return_value={'retcode': 1})
mock_user = MagicMock(return_value='foo')
mock_cmd = MagicMock(return_value='')
with patch.dict(brew.__salt__, {'cmd.run_all': mock_failure,
'file.get_user': mock_user,
'cmd.run': mock_cmd,
'cmd.retcode': mock_failure}):
self.assertFalse(brew._tap('homebrew/test'))
@patch('salt.modules.brew._list_taps', MagicMock(return_value=TAPS_LIST))
def test_tap(self):
'''
Tests adding unofficial GitHub repos to the list of brew taps
'''
with patch.dict(brew.__salt__, {'cmd.retcode': MagicMock(return_value=0)}):
self.assertTrue(brew._tap('homebrew/test'))
# '_homebrew_bin' function tests: 1
def test_homebrew_bin(self):
'''
Tests the path to the homebrew binary
'''
mock_path = MagicMock(return_value='/usr/local')
with patch.dict(brew.__salt__, {'cmd.run': mock_path}):
self.assertEqual(brew._homebrew_bin(), '/usr/local/bin/brew')
# 'list_pkgs' function tests: 2
# Only tested a few basics
# Full functionality should be tested in integration phase
def test_list_pkgs_removed(self):
'''
Tests removed implementation
'''
self.assertEqual(brew.list_pkgs(removed=True), {})
def test_list_pkgs_versions_true(self):
'''
Tests if pkg.list_pkgs is already in context and is a list
'''
mock_context = {'foo': ['bar']}
with patch.dict(brew.__context__, {'pkg.list_pkgs': mock_context}):
self.assertEqual(brew.list_pkgs(versions_as_list=True),
mock_context)
# 'version' function tests: 1
def test_version(self):
'''
Tests version name returned
'''
mock_version = MagicMock(return_value='0.1.5')
with patch.dict(brew.__salt__, {'pkg_resource.version': mock_version}):
self.assertEqual(brew.version('foo'), '0.1.5')
# 'latest_version' function tests: 0
# It has not been fully implemented
# 'remove' function tests: 1
# Only tested a few basics
# Full functionality should be tested in integration phase
@patch('salt.modules.brew.list_pkgs',
MagicMock(return_value={'test': '0.1.5'}))
def test_remove(self):
'''
Tests if package to be removed exists
'''
mock_params = MagicMock(return_value=({'foo': None}, 'repository'))
with patch.dict(brew.__salt__,
{'pkg_resource.parse_targets': mock_params}):
self.assertEqual(brew.remove('foo'), {})
# 'refresh_db' function tests: 2
@patch('salt.modules.brew._homebrew_bin',
MagicMock(return_value=HOMEBREW_BIN))
def test_refresh_db_failure(self):
'''
Tests an update of homebrew package repository failure
'''
mock_user = MagicMock(return_value='foo')
mock_failure = MagicMock(return_value={'retcode': 1})
with patch.dict(brew.__salt__, {'file.get_user': mock_user,
'cmd.run_all': mock_failure}):
self.assertFalse(brew.refresh_db())
@patch('salt.modules.brew._homebrew_bin',
MagicMock(return_value=HOMEBREW_BIN))
def test_refresh_db(self):
'''
Tests a successful update of homebrew package repository
'''
mock_user = MagicMock(return_value='foo')
mock_success = MagicMock(return_value={'retcode': 0})
with patch.dict(brew.__salt__, {'file.get_user': mock_user,
'cmd.run_all': mock_success}):
self.assertTrue(brew.refresh_db())
# 'install' function tests: 1
# Only tested a few basics
# Full functionality should be tested in integration phase
def test_install(self):
'''
Tests if package to be installed exists
'''
mock_params = MagicMock(return_value=[None, None])
with patch.dict(brew.__salt__,
{'pkg_resource.parse_targets': mock_params}):
self.assertEqual(brew.install('name=foo'), {})
if __name__ == '__main__':
from integration import run_tests
run_tests(BrewTestCase, needs_daemon=False)
```
#### File: unit/modules/osxdesktop_test.py
```python
from __future__ import absolute_import
# Import Salt Testing Libs
from salttesting import TestCase, skipIf
from salttesting.mock import (
MagicMock,
patch,
NO_MOCK,
NO_MOCK_REASON
)
from salttesting.helpers import ensure_in_syspath
ensure_in_syspath('../../')
# Import Salt Libs
from salt.modules import osxdesktop
# Globals
osxdesktop.__salt__ = {}
@skipIf(NO_MOCK, NO_MOCK_REASON)
class OsxDesktopTestCase(TestCase):
'''
Test cases for salt.modules.osxdesktop
'''
# 'get_output_volume' function tests: 1
def test_get_output_volume(self):
'''
Test if it get the output volume (range 0 to 100)
'''
mock = MagicMock(return_value=True)
with patch.dict(osxdesktop.__salt__, {'cmd.run': mock}):
self.assertTrue(osxdesktop.get_output_volume())
# 'set_output_volume' function tests: 1
def test_set_output_volume(self):
'''
Test if it set the volume of sound (range 0 to 100)
'''
mock = MagicMock(return_value=True)
with patch.dict(osxdesktop.__salt__, {'cmd.run': mock}):
self.assertTrue(osxdesktop.set_output_volume('my-volume'))
# 'screensaver' function tests: 1
def test_screensaver(self):
'''
Test if it launch the screensaver
'''
mock = MagicMock(return_value=True)
with patch.dict(osxdesktop.__salt__, {'cmd.run': mock}):
self.assertTrue(osxdesktop.screensaver())
# 'lock' function tests: 1
def test_lock(self):
'''
Test if it lock the desktop session
'''
mock = MagicMock(return_value=True)
with patch.dict(osxdesktop.__salt__, {'cmd.run': mock}):
self.assertTrue(osxdesktop.lock())
# 'say' function tests: 1
def test_say(self):
'''
Test if it says some words.
'''
mock = MagicMock(return_value=True)
with patch.dict(osxdesktop.__salt__, {'cmd.run': mock}):
self.assertTrue(osxdesktop.say())
if __name__ == '__main__':
from integration import run_tests
run_tests(OsxDesktopTestCase, needs_daemon=False)
``` |
{
"source": "jkuruba/ack-sagemaker-controller",
"score": 2
} |
#### File: e2e/tests/test_hpo.py
```python
import boto3
import pytest
import logging
from typing import Dict
from e2e import service_marker, create_sagemaker_resource
from e2e.replacement_values import REPLACEMENT_VALUES
from acktest.resources import random_suffix_name
from acktest.k8s import resource as k8s
RESOURCE_PLURAL = "hyperparametertuningjobs"
HPO_JOB_STATUS_CREATED = ("InProgress", "Completed")
HPO_JOB_STATUS_STOPPED = ("Stopped", "Stopping")
def _sagemaker_client():
return boto3.client("sagemaker")
@pytest.fixture(scope="module")
def xgboost_hpojob():
resource_name = random_suffix_name("xgboost-hpojob", 32)
replacements = REPLACEMENT_VALUES.copy()
replacements["HPO_JOB_NAME"] = resource_name
reference, spec, resource = create_sagemaker_resource(
resource_plural=RESOURCE_PLURAL,
resource_name=resource_name,
spec_file="xgboost_hpojob",
replacements=replacements,
)
assert resource is not None
yield (reference, resource)
# Delete the k8s resource if not already deleted by tests
if k8s.get_resource_exists(reference):
k8s.delete_custom_resource(reference)
def get_sagemaker_hpo_job(hpo_job_name: str):
try:
hpo_desc = _sagemaker_client().describe_hyper_parameter_tuning_job(
HyperParameterTuningJobName=hpo_job_name
)
return hpo_desc
except BaseException:
logging.error(
f"SageMaker could not find an hpo job with the name {hpo_job_name}"
)
return None
@service_marker
@pytest.mark.canary
class TestHPO:
def test_create_hpo(self, xgboost_hpojob):
(reference, resource) = xgboost_hpojob
assert k8s.get_resource_exists(reference)
hpo_job_name = resource["spec"].get("hyperParameterTuningJobName", None)
assert hpo_job_name is not None
hpo_sm_desc = get_sagemaker_hpo_job(hpo_job_name)
assert (
k8s.get_resource_arn(resource) == hpo_sm_desc["HyperParameterTuningJobArn"]
)
assert hpo_sm_desc["HyperParameterTuningJobStatus"] in HPO_JOB_STATUS_CREATED
# Delete the k8s resource.
_, deleted = k8s.delete_custom_resource(reference)
assert deleted is True
hpo_sm_desc = get_sagemaker_hpo_job(hpo_job_name)
assert hpo_sm_desc["HyperParameterTuningJobStatus"] in HPO_JOB_STATUS_STOPPED
``` |
{
"source": "jkusner/PyJavaAnalyzer",
"score": 2
} |
#### File: PyJavaAnalyzer/analyzer/accessflags.py
```python
class AccessFlags:
ACC_PUBLIC = 0x0001
ACC_PRIVATE = 0x0002 # <- for fields/methods only
ACC_PROTECTED = 0x0004 # <- for fields/methods only
ACC_STATIC = 0x008 # <- fields/methods only
ACC_FINAL = 0x0010 # <- fields/methods only
ACC_SYNCHRONIZED = 0x0020 # <- method only
ACC_SUPER = 0x0020 # <- class only
ACC_VOLATILE = 0x0040 # <- field
ACC_BRIDGE = 0x0040 # <- method
ACC_VARARGS = 0x0080 # <- method
ACC_TRANSIENT = 0x0080 # <- field
ACC_NATIVE = 0x0100 # method
ACC_INTERFACE = 0x200
ACC_ABSTRACT = 0x0400 # <- class, method
ACC_STRICT = 0x0800 # method (strictfp)
ACC_SYNTHETIC = 0x1000
ACC_ANNOTATION = 0x2000 # <- class
ACC_ENUM = 0x4000
def __init__(self, short, ctype = "?"):
self.flags = []
self.raw = short
self.msg = ""
if short & AccessFlags.ACC_PUBLIC:
self.flags.append("public")
if short & AccessFlags.ACC_FINAL:
self.flags.append("final")
if short & AccessFlags.ACC_SUPER: # might be synchronized if method
if ctype == "class":
self.flags.append("super")
else:
self.flags.append("synchronized")
if short & AccessFlags.ACC_INTERFACE:
self.flags.append("interface")
if short & AccessFlags.ACC_ABSTRACT:
self.flags.append("abstract")
if short & AccessFlags.ACC_SYNTHETIC:
self.flags.append("synthetic")
if short & AccessFlags.ACC_ANNOTATION:
self.flags.append("annotation")
if short & AccessFlags.ACC_PRIVATE:
self.flags.append("private")
if short & AccessFlags.ACC_PROTECTED:
self.flags.append("protected")
if short & AccessFlags.ACC_STATIC:
self.flags.append("static")
if short & AccessFlags.ACC_VOLATILE: # might be BRIDGE if method
if ctype == "method":
self.flags.append("bridge")
else:
self.flags.append("volatile")
if short & AccessFlags.ACC_TRANSIENT: # might be varargs
if ctype == "method":
self.flags.append("varargs")
else:
self.flags.append("transient")
if short & AccessFlags.ACC_ENUM:
self.flags.append("enum")
self.msg = " ".join(self.flags)
def __str__(self):
return "raw: " + str(self.raw) + ". (" + self.msg + ")"
```
#### File: PyJavaAnalyzer/analyzer/field.py
```python
from .accessflags import AccessFlags
from .attribute import Attribute
from .util import *
class Field:
def __init__(self, data, pool):
self.data = data
self.pool = pool
raw_flags, data = read_u2(data)
self.flags = AccessFlags(raw_flags, "field")
self.name_index, data = read_u2(data)
self.name = pool[self.name_index].msg
self.descriptor_index, data = read_u2(data)
self.descriptor = pool[self.descriptor_index].msg
print("Field[%s]: %s" % (self.name, self.descriptor))
self.attributes, data = self._read_attribs(data, pool)
self.data = data
def _read_attribs(self, data, pool):
self.attrib_count, data = read_u2(data)
attrib = []
for _ in range(self.attrib_count):
a = Attribute(data, pool)
data = a.data # use trimmed data
self.data = data
attrib.append(Attribute(data, pool))
return attrib, data
```
#### File: PyJavaAnalyzer/analyzer/util.py
```python
import struct
def read_u2(data):
"""
Reads a u2 from the data. (Unsigned short)
:param data: Binary class file data
:return: Unsigned short, trimmed data
"""
return struct.unpack(">H", data[:2])[0], data[2:]
def read_u4(data):
"""
Reads a u4 from the data. (Unsigned int)
:param data: Binary class file data
:return: Unsigned int, data
"""
return struct.unpack(">I", data[:4])[0], data[4:]
``` |
{
"source": "jku-ssw/gcc-builtin-study",
"score": 3
} |
#### File: gcc-builtin-study/src/boxplot-builtins-per-project.py
```python
from include.common import *
# Obtain the data used to plot Figure 1
def get_count(gid, machine_specific, unique):
if unique:
table = 'UniqueBuiltinsPerProjectMachineSpecific'
else:
table = 'BuiltinsPerProjectMachineSpecific'
result = c.execute('SELECT usage_count FROM ' + table +' WHERE MACHINE_SPECIFIC=? AND GITHUB_PROJECT_ID=?', (1 if machine_specific else 0, gid)).fetchone()
if result is None:
return 0
else:
return result[0]
def compute(csvfile, unique=True):
dir = os.path.dirname(os.path.realpath(__file__))
csv_file = open(csvfile, 'w')
csv_file.write('category;count\n')
gids = (count[0] for count in c.execute('SELECT DISTINCT(GITHUB_PROJECT_ID) FROM BuiltinsPerProjectMachineSpecific').fetchall())
for gid in gids:
machine_specific_count = get_count(gid, True, unique)
machine_independent_count = get_count(gid, False, unique)
total_count = machine_specific_count + machine_independent_count
if machine_specific_count != 0:
csv_file.write('machine-specific;%d\n' % machine_specific_count)
if machine_independent_count != 0:
csv_file.write('machine-independent;%d\n' % machine_independent_count)
csv_file.write('total;%d\n' % total_count)
csv_file.close()
compute(os.path.join(current_dir, '..', '..', 'generated', 'unique_builtins_per_category.csv'), unique=True)
compute(os.path.join(current_dir, '..', '..', 'generated', 'builtins_per_category.csv'), unique=False)
```
#### File: src/include/common.py
```python
import sqlite3
import os
conn = sqlite3.connect(os.path.join(os.path.dirname(__file__), "../../database.db"))
c = conn.cursor()
current_dir = os.path.dirname(__file__)
def print_query_as_command(command, query, roundn=False, percentage=False):
print_as_command(command, c.execute(query).fetchone()[0], roundn, percentage)
def is_float(s):
try:
float(s)
return True
except ValueError:
return False
def is_int(s):
try:
int(s)
return True
except ValueError:
return False
def print_as_command(command, content, roundn=False, percentage=False):
formatted_content = content
if roundn:
formatted_content = '%.0f' % formatted_content
elif is_int(formatted_content):
formatted_content = '{:,.0f}'.format(formatted_content)
elif is_float(formatted_content):
formatted_content = '{:,.1f}'.format(formatted_content)
if percentage:
formatted_content = formatted_content + '\%'
print(('\\newcommand{\\%s}[0]{%s}') % (command, formatted_content, ))
def escape_latex(str):
return str.replace('#', '\#').replace('$', '\$').replace('_', '\_')
def print_tabular_start(name, caption, columns=None, nr_projects=None, columnstext=None):
if columns is not None and columnstext is not None:
print("cannot use both nr of columns and columntext!")
if nr_projects is None or nr_projects == 1:
append = ''
else:
append = ' (with at least ' + str(nr_projects) + ' projects using them)'
print("\\newcommand{\\%s}[0]{" % name)
print("\captionof{table}{%s}" % (caption + append,))
print("\\begin{tabular}{", end='')
if columns is not None:
print("l", end='')
for i in range(columns-1): print(" l", end='')
else:
print(columnstext)
print("}")
print("\\toprule{}")
def print_tabular_end(label):
print("""\\bottomrule{}
\\end{tabular}
\\label{%s}}""" % (label,))
def print_table_start(name, columns, caption, nr_projects=None):
if nr_projects is None or nr_projects == 1:
append = ''
else:
append = ' (with at least ' + str(nr_projects) + ' projects using them)'
print("\\newcommand{\\%s}[0]{" % name)
print("\\begin{table}[]")
print("\\caption{%s}" % (caption + append))
print("\\centering")
print("\\begin{tabular}{l", end='')
for i in range(columns-1): print("|l", end='')
print("}")
def print_table_end(label):
print("""\\end{tabular}
\\label{%s}
\\end{table}}""" % label)
```
#### File: src/include/setupdefs.py
```python
import os
from .common import *
all_builtins = set()
def find_files(directory, endings):
"""
Find all files in the given directory (recursively) that end in one of the extensions.
"""
for root, dirs, files in os.walk(directory):
for basename in files:
for ending in endings:
if basename.endswith(ending):
filename = os.path.join(root, basename)
yield filename
for deffile in find_files(os.path.join(os.path.dirname(__file__), '../../defs'), ['.def']):
category = os.path.basename(deffile.rstrip('.def'))
machine_specific = "architecture-specific" in deffile
with open(deffile) as f:
url = None
header = None
lines = [line.rstrip('\n') for line in f.readlines()]
for line in lines:
if line.startswith('#'):
pass # comment
elif line.startswith('%'):
(key, value) = line.split('=')
key = key.strip('%')
if key == 'url':
url = value
elif key == 'header':
header = value
else:
print(line + " is unknown!")
exit(-1)
else:
if url is None:
print(deffile + " did not define url")
exit(-1)
elif header is None:
print(deffile + " did not define header")
exit(-1)
if line in all_builtins:
print("duplicate " + line + "!")
exit(-1)
all_builtins.add(line)
query = """insert into BuiltinsUnfiltered(
BUILTIN_NAME,
BUILTIN_CATEGORY,
MACHINE_SPECIFIC,
DOCUMENTATION_URL,
DOCUMENTATION_SECTION_HEADER,
FROM_DEF
)
VALUES(?, ?, ?, ?, ?, 1)
"""
c.execute(query, (line, category, 1 if machine_specific else 0, url, header))
print("builtin: " + line)
print("category: " + category)
print("machine-specific: " + str(machine_specific))
print("url: " + url)
print("header: " + header)
for deffile in find_files(os.path.join(os.path.dirname(__file__), '../../excludes'), ['.def']):
category = os.path.basename(deffile.rstrip('.def'))
with open(deffile) as f:
url = None
lines = [line.rstrip('\n') for line in f.readlines()]
for line in lines:
if line.startswith('#'):
pass # comment
else:
query = """insert into ExcludedBuiltins(
NAME,
REASON
)
VALUES(?, ?)
"""
c.execute(query, (line, category))
conn.commit()
``` |
{
"source": "JKutt/pydiso",
"score": 2
} |
#### File: JKutt/pydiso/setup.py
```python
from distutils.core import setup
from setuptools import find_packages
import sys
def configuration(parent_package="", top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration(None, parent_package, top_path)
config.set_options(
ignore_setup_xxx_py=True,
assume_default_configuration=True,
delegate_options_to_subpackages=True,
quiet=True,
)
config.add_subpackage("pydiso")
return config
metadata = dict(
name='pydiso',
version='0.0.3',
python_requires=">=3.6",
setup_requires=[
"numpy>=1.8",
"cython>=0.2",
],
install_requires=[
'numpy>=1.8',
'scipy>=0.13',
],
author='SimPEG developers',
author_email='<EMAIL>',
description="Wrapper for intel's pardiso implementation in the MKL",
keywords='sparse, solver, wrapper',
url='https://www.simpeg.xyz',
download_url='https://github.com/jcapriot/pydiso-mkl',
platforms=['Windows', 'Linux', 'Solaris', 'Mac OS-X', 'Unix'],
license='MIT License'
)
if len(sys.argv) >= 2 and (
"--help" in sys.argv[1:]
or sys.argv[1] in ("--help-commands", "egg_info", "--version", "clean")
):
# For these actions, NumPy is not required.
#
# They are required to succeed without Numpy, for example when
# pip is used to install discretize when Numpy is not yet present in
# the system.
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
else:
if (len(sys.argv) >= 2 and sys.argv[1] in ("bdist_wheel", "bdist_egg")) or (
"develop" in sys.argv
):
# bdist_wheel/bdist_egg needs setuptools
import setuptools
from numpy.distutils.core import setup
# Add the configuration to the setup dict when building
# after numpy is installed
metadata["configuration"] = configuration
setup(**metadata)
``` |
{
"source": "JKutt/simpeg",
"score": 3
} |
#### File: examples/20-published/plot_heagyetal2017_casing.py
```python
import discretize
from SimPEG import utils, maps, tests
from SimPEG.electromagnetics import mu_0, frequency_domain as FDEM, analytics
from SimPEG.electromagnetics.utils import omega
from SimPEG.utils.io_utils import download
# try:
# from pymatsolver import MumpsSolver as Solver
# print('using MumpsSolver')
# except ImportError:
try:
from pymatsolver import Pardiso as Solver
except ImportError:
from SimPEG import SolverLU as Solver
import numpy as np
import scipy.sparse as sp
import time
import os
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
from matplotlib import rcParams
import h5py
np.random.seed(42)
fontsize = 12
rcParams["font.size"] = fontsize
class PrimSecCasingExample(object):
NAME = "PrimSec_5e6Casing_50Mu_05Hz_LargeCondBody"
# -------------- SETUP MODEL PARAMS ---------------------------- #
sigmaair = 1e-8 # air
sigmaback = 1e-2 # background
sigmacasing = 5.5e6 # casing
sigmainside = 1 # inside the casing
mucasing = 50 # casing permeability
casing_l = 1000 # length of the casing
casing_d = 10e-2 # 10cm diameter casing
casing_t = 1e-2 # 1cm thickness
# layer
sigmalayer = 1.0 / 10.0
layer_z = np.r_[-1000.0, -900.0]
# 3D body
sigmablock = 2.0
block_x = np.r_[75.0, 475.0]
block_y = np.r_[-125, 125.0]
block_z = layer_z
# Survey Params
freqs = np.r_[0.5] # frequencies 0.5
dsz = -950.0 # down-hole z source location
src_a = np.r_[0.0, 0.0, dsz]
src_b = np.r_[1e4, 0.0, 0.0] # return electrode is a ring, 200
def __init__(self):
# translate casing parameters to radial distances to outer casing wall,
# inner casing wall, and casing length to z locations
self.casing_r = self.casing_d / 2.0
self.casing_a = self.casing_r - self.casing_t / 2.0 # inner radius
self.casing_b = self.casing_r + self.casing_t / 2.0 # outer radius
self.casing_z = np.r_[-self.casing_l, 0.0]
# Display skin depth so we can ensure our mesh goes further.
print(
"\nSkin Depth: {}".format(
[(500.0 / np.sqrt(self.sigmaback * _)) for _ in self.freqs]
)
)
# -------------- Model --------------------------------- #
@property
def mtrue(self):
# This is the model we are using to compute the sensitivity. Each of
# these parameters would be considered unknown in an inversion. This
# model is of a parametrized block in a layer
#
# +--------------------------------------+
# | |
# | |
# | background |
# | |
# | |
# +--------------------+-------+---------+
# | | | |
# | layer | block | |
# | | | |
# +--------------------+-------+---------+
# | |
# | |
# | background |
# | |
# | |
# +--------------------------------------+
return np.hstack(
np.r_[
np.log(self.sigmaback), # value in background
np.log(self.sigmalayer), # value in the layer
np.log(self.sigmablock), # value in the block
self.layer_z.mean(), # layer center
self.layer_z[1] - self.layer_z[0], # layer thickness
self.block_x.mean(), # block x_0
self.block_y.mean(), # block y_0
self.block_x[1] - self.block_x[0], # block dx
self.block_y[1] - self.block_y[0], # block dy
]
)
# ----------------------------------------------------------------- #
# -------------- PRIMARY PROBLEM SETUP ---------------------------- #
# ----------------------------------------------------------------- #
@property
def meshp(self):
if getattr(self, "_meshp", None) is None:
# -------------- Mesh Parameters ------------------ #
# x-direction
csx1, csx2 = 2.5e-3, 25.0 # fine cells near well bore
pfx1, pfx2 = 1.3, 1.4 # padding factors: fine -> uniform
ncx1 = np.ceil(self.casing_b / csx1 + 2) # number of fine cells
# (past casing wall)
dx2 = 1000.0 # uniform mesh out to here
npadx2 = 21 # padding out to infinity
# z-direction
csz = 0.05 # finest z-cells
nza = 10 # number of fine cells above air-earth interface
pfz = pfx2 # padding factor in z-direction
# ------------- Assemble the Cyl Mesh ------------- #
# pad nicely to second cell size
npadx1 = np.floor(np.log(csx2 / csx1) / np.log(pfx1))
hx1a = utils.meshTensor([(csx1, ncx1)])
hx1b = utils.meshTensor([(csx1, npadx1, pfx1)])
dx1 = sum(hx1a) + sum(hx1b)
dx1 = np.floor(dx1 / csx2)
hx1b *= (dx1 * csx2 - sum(hx1a)) / sum(hx1b)
# second chunk of mesh
ncx2 = np.ceil((dx2 - dx1) / csx2)
hx2a = utils.meshTensor([(csx2, ncx2)])
hx2b = utils.meshTensor([(csx2, npadx2, pfx2)])
hx = np.hstack([hx1a, hx1b, hx2a, hx2b])
# cell size, number of core cells, number of padding cells in the
# x-direction
ncz = np.int(np.ceil(np.diff(self.casing_z)[0] / csz)) + 10
npadzu, npadzd = 43, 43
# vector of cell widths in the z-direction
hz = utils.meshTensor([(csz, npadzd, -pfz), (csz, ncz), (csz, npadzu, pfz)])
# primary mesh
self._meshp = discretize.CylMesh(
[hx, 1.0, hz], [0.0, 0.0, -np.sum(hz[: npadzu + ncz - nza])]
)
print(
"Cyl Mesh Extent xmax: {},: zmin: {}, zmax: {}".format(
self._meshp.vectorCCx.max(),
self._meshp.vectorCCz.min(),
self._meshp.vectorCCz.max(),
)
)
return self._meshp
@property
def indActivePrimary(self):
return self.meshp.gridCC[:, 2] <= 0.0 # air cells
@property
def projectionMapPrimary(self):
return maps.Projection(nP=9, index=np.r_[0, 1, 3, 4])
@property
def primaryMapping(self):
# Setup Pimary Maps:
# we want to simulate on a physical property model that
# consists of casing in a layered background. Air cells are included.
# Our "model", that we are considering when computing the sensitivity,
# consists of the layered background and block, so the casing and air
# cells are inactive parts of the model and need to be appropriately
# injected during the construction of the primary model
if getattr(self, "_primaryMapping", None) is None:
print("Building primary mapping")
# inject parameters we want to invert for into the full casing
# model
valInactive = np.r_[
np.log(self.sigmacasing), # log conductivity of the casing
np.log(self.sigmainside), # log conductivity fluid inside
# casing
self.casing_r, # radius of the casing (to its center)
self.casing_t, # casing thickness
self.casing_z[0], # bottom of casing (at depth)
self.casing_z[1], # top of casing (at surface)
]
# inject casing parameters so they are included in the construction
# of the layered background + casing
injectCasingParams = maps.InjectActiveCells(
None, indActive=np.r_[0, 1, 4, 5], valInactive=valInactive, nC=10
)
# maps a list of casing parameters to the cyl mesh (below the
# subsurface)
paramMapPrimary = maps.ParametricCasingAndLayer(
self.meshp, indActive=self.indActivePrimary, slopeFact=1e4
)
# inject air cells
injActMapPrimary = maps.InjectActiveCells(
self.meshp, self.indActivePrimary, np.log(self.sigmaair)
)
# map from log conductivity to conductivity
expMapPrimary = maps.ExpMap(self.meshp)
# assemble the primary mapping
primaryMapping = (
expMapPrimary
* injActMapPrimary # log(sigma) --> sigma
* paramMapPrimary # log(sigma) below surface --> include air
* injectCasingParams # parametric --> casing + layered earth
* # parametric layered earth --> parametric
# layered earth + casing
self.projectionMapPrimary # grab relevant parameters from full
# model (eg. ignore block)
)
self._paramMapPrimary = paramMapPrimary
self._primaryMapping = primaryMapping
print("... done building primary mapping")
return self._primaryMapping
@property
def muModel(self):
# Mu Model
# here, we want to consider variable magnetic permeability in the
# simulation. The only permeable item in the domain is the casing.
if getattr(self, "_muModel", None) is None:
if getattr(self, "_paramMapPrimary", None) is None:
self.primaryMapping
muMap = (
maps.InjectActiveCells(self.meshp, self.indActivePrimary, mu_0)
* self._paramMapPrimary
)
muModel = muMap * np.hstack(
np.r_[
mu_0, # val Background
mu_0, # val Layer
mu_0 * self.mucasing, # val Casing
mu_0, # val inside Casing
self.layer_z.mean(), # layer center
self.layer_z[1] - self.layer_z[0], # layer thickness
self.casing_r, # casing radius
self.casing_t, # casing thickness
self.casing_z[0], # casing bottom
self.casing_z[1], # casing top
]
)
self._muModel = muModel
return self._muModel
@property
def primaryProblem(self):
if getattr(self, "_primaryProblem", None) is None:
# define a custom prop map to include variable mu that we are not
# inverting for - This will change when we improve the propmap!
print("Getting Primary Problem")
# class CasingEMPropMap(maps.PropMap):
# sigma = maps.Property(
# "Electrical Conductivity", defaultInvProp=True,
# propertyLink=('rho', maps.ReciprocalMap)
# )
# mu = maps.Property(
# "Inverse Magnetic Permeability",
# defaultVal=self.muModel,
# propertyLink=('mui', maps.ReciprocalMap)
# )
# rho = maps.Property(
# "Electrical Resistivity",
# propertyLink=('sigma', maps.ReciprocalMap)
# )
# mui = maps.Property(
# "Inverse Magnetic Permeability",
# defaultVal=1./self.muModel,
# propertyLink=('mu', maps.ReciprocalMap)
# )
# # set the problem's propmap
# FDEM.Simulation3DMagneticField.PropMap = CasingEMPropMap
# use H-J formulation for source with vertical current density and
# cylindrical symmetry (h faster on cyl --> less edges than faces)
primaryProblem = FDEM.Simulation3DMagneticField(
self.meshp, sigmaMap=self.primaryMapping
)
primaryProblem.mu = self.muModel
primaryProblem.solver = Solver
self._primaryProblem = primaryProblem
print("... done building primary problem")
return self._primaryProblem
@property
def primarySurvey(self):
if getattr(self, "_primarySurvey", None) is None:
print("Setting up primary survey")
def setupPrimarySource(plotIt=False):
# Construct a downhole source that is coupled to the casing
meshp = self.meshp
src_a = self.src_a
src_b = self.src_b
casing_a = self.casing_a
# downhole source
dg_x = np.zeros(meshp.vnF[0], dtype=complex)
dg_y = np.zeros(meshp.vnF[1], dtype=complex)
dg_z = np.zeros(meshp.vnF[2], dtype=complex)
# vertically directed wire in borehole
# go through the center of the well
dgv_indx = meshp.gridFz[:, 0] < meshp.hx.min()
dgv_indz = (meshp.gridFz[:, 2] >= src_a[2]) & (
meshp.gridFz[:, 2] <= src_b[2]
)
dgv_ind = dgv_indx & dgv_indz
dg_z[dgv_ind] = -1.0
# couple to the casing downhole - top part
dgh_indx = meshp.gridFx[:, 0] <= casing_a + meshp.hx.min() * 2
# couple to the casing downhole - bottom part
dgh_indz2 = (meshp.gridFx[:, 2] <= src_a[2]) & (
meshp.gridFx[:, 2] > src_a[2] - meshp.hz.min()
)
dgh_ind2 = dgh_indx & dgh_indz2
dg_x[dgh_ind2] = 1.0
# horizontally directed wire
sgh_indx = meshp.gridFx[:, 0] <= src_b[0]
sgh_indz = (meshp.gridFx[:, 2] > meshp.hz.min()) & (
meshp.gridFx[:, 2] < 2 * meshp.hz.min()
)
sgh_ind = sgh_indx & sgh_indz
dg_x[sgh_ind] = -1.0
# return electrode
sgv_indx = (meshp.gridFz[:, 0] > src_b[0] * 0.9) & (
meshp.gridFz[:, 0] < src_b[0] * 1.1
)
sgv_indz = (meshp.gridFz[:, 2] >= -meshp.hz.min()) & (
meshp.gridFz[:, 2] < 2 * meshp.hz.min()
)
sgv_ind = sgv_indx & sgv_indz
dg_z[sgv_ind] = 1.0
# assemble the source (downhole grounded primary)
dg = np.hstack([dg_x, dg_y, dg_z])
dg_p = [FDEM.Src.RawVec_e([], _, dg / meshp.area) for _ in self.freqs]
# if plotIt:
# # Plot the source to make sure the path is infact
# # connected
# fig, ax = plt.subplots(1, 1, figsize=(6, 4))
# meshp.plotGrid(ax=ax)
# ax.plot(meshp.gridFz[dgv_ind, 0], meshp.gridFz[dgv_ind, 2], 'rd')
# ax.plot(meshp.gridFx[dgh_ind2, 0], meshp.gridFx[dgh_ind2, 2], 'rd')
# ax.plot(meshp.gridFz[sgv_ind, 0], meshp.gridFz[sgv_ind, 2], 'rd')
# ax.plot(meshp.gridFx[sgh_ind, 0], meshp.gridFx[sgh_ind, 2], 'rd')
# ax.set_title('downhole casing source on mesh')
# ax.set_xlim([0, 1.1e4])
# ax.set_ylim([-1100., 0.5])
return dg_p
srcList = setupPrimarySource() # create primary source
self._primarySurvey = FDEM.Survey(srcList) # primary survey
print("... done building primary survey")
return self._primarySurvey
def solvePrimary(self, primaryProblem, m=None, saveFields=False):
if m is None:
m = self.mtrue
print("solving primary ...")
t0 = time.time()
primfields = primaryProblem.fields(m)
t1 = time.time()
print("Done solving primary fields, time {} ".format(t1 - t0))
return primfields
def plotPrimaryMesh(self):
fig, ax = plt.subplots(1, 1, figsize=(6, 4))
self.meshp.plotGrid(ax=ax)
plt.title("Cyl Mesh")
return ax
def plotPrimaryProperties(self):
fig, ax = plt.subplots(1, 2, figsize=(10, 4))
f = self.meshp.plotImage(
self.muModel / mu_0,
ax=ax[0],
pcolorOpts={"cmap": plt.get_cmap("viridis")},
grid=False,
)
plt.colorbar(f[0], ax=ax[0])
ax[0].set_xlim([0, 1.0])
ax[0].set_ylim([-1.5e3, 500])
ax[0].set_title("mu_r")
f = self.meshp.plotImage(
np.log10(self.primaryMapping * self.mtrue),
ax=ax[1],
pcolorOpts={"cmap": plt.get_cmap("viridis")},
grid=False,
)
plt.colorbar(f[0], ax=ax[1])
ax[1].set_xlim([0, 1.0])
ax[1].set_ylim([-1.5e3, 500])
ax[1].set_title("log10 sigma")
plt.tight_layout()
return ax
# ----------------------------------------------------------------- #
# -------------- SECONDARY PROBLEM SETUP -------------------------- #
# ----------------------------------------------------------------- #
# -------------- MESH -------------------------------------------- #
@property
def meshs(self):
if getattr(self, "_meshs", None) is None:
csx, ncx, npadx = 50, 21, 12
csy, ncy, npady = 50, 21, 12
csz, ncz, npadz = 25, 40, 14
pf = 1.5
hx = utils.meshTensor([(csx, npadx, -pf), (csx, ncx), (csx, npadx, pf)])
hy = utils.meshTensor([(csy, npady, -pf), (csy, ncy), (csy, npady, pf)])
hz = utils.meshTensor([(csz, npadz, -pf), (csz, ncz), (csz, npadz, pf)])
x0 = np.r_[-hx.sum() / 2.0, -hy.sum() / 2.0, -hz[: npadz + ncz].sum()]
self._meshs = discretize.TensorMesh([hx, hy, hz], x0=x0)
print("Secondary Mesh ... ")
print(
" xmin, xmax, zmin, zmax: ",
self._meshs.vectorCCx.min(),
self._meshs.vectorCCx.max(),
self._meshs.vectorCCy.min(),
self._meshs.vectorCCy.max(),
self._meshs.vectorCCz.min(),
self._meshs.vectorCCz.max(),
)
print(" nC, vnC", self._meshs.nC, self._meshs.vnC)
return self._meshs
@property
def indActive(self):
return self.meshs.gridCC[:, 2] <= 0.0 # air cells
@property
def injActMap(self):
return maps.InjectActiveCells(self.meshs, self.indActive, np.log(self.sigmaair))
@property
def expMap(self):
return maps.ExpMap(self.meshs)
@property
def mapping(self):
# secondary mapping
# here, we construct the parametric mapping to take the parameters
# describing the block in a layered space and map it to a conductivity
# model on our mesh
if getattr(self, "_mapping", None) is None:
print("building secondary mapping")
paramMap = maps.ParametricBlockInLayer(self.meshs, indActive=self.indActive)
self._mapping = (
self.expMap
* self.injActMap # log sigma --> sigma
* paramMap # inject air cells # block in a layered space (subsurface)
)
print("... done building secondary mapping")
return self._mapping
@property
def primaryMap2meshs(self):
if getattr(self, "_primaryMap2mesh", None) is None:
# map the primary model to the secondary mesh (layer without the
# block)
print("Building primaryMap2meshs")
paramMapPrimaryMeshs = maps.ParametricLayer(
self.meshs, indActive=self.indActive
)
self._primaryMap2mesh = (
self.expMap
* self.injActMap # log sigma --> sigma
* paramMapPrimaryMeshs # include air cells
* self.projectionMapPrimary # parametrized layer # grab correct indices
)
print("... done building primaryMap2meshs")
return self._primaryMap2mesh
# -------------- PROBLEM and SURVEY ---------------------------- #
def setupSecondaryProblem(self, mapping=None):
print("Setting up Secondary Problem")
if mapping is None:
mapping = [("sigma", maps.IdentityMap(self.meshs))]
sec_problem = FDEM.Simulation3DElectricField(self.meshs, sigmaMap=mapping)
sec_problem.Solver = Solver
print("... done setting up secondary problem")
return sec_problem
def setupSecondarySurvey(self, primaryProblem, primarySurvey, map2meshSecondary):
print("Setting up Secondary Survey")
nx = 41
ny = nx
rx_x, rx_y = 2 * [np.linspace(-2050, 2050, nx)]
self.rxlocs = utils.ndgrid([rx_x, rx_y, np.r_[-1]])
self.rx_x = self.rxlocs[:, 0].reshape(nx, ny, order="F")
self.rx_y = self.rxlocs[:, 1].reshape(nx, ny, order="F")
rx_ex = FDEM.Rx.PointElectricField(
self.rxlocs, orientation="x", component="real"
)
rx_ey = FDEM.Rx.PointElectricField(
self.rxlocs, orientation="y", component="real"
)
RxList = [rx_ex, rx_ey]
sec_src = [
FDEM.Src.PrimSecMappedSigma(
RxList,
freq,
primaryProblem,
primarySurvey,
map2meshSecondary=map2meshSecondary,
)
for freq in self.freqs
]
print("... done secondary survey")
return FDEM.Survey(sec_src)
# -------------- SOLVE ---------------------------- #
def solveSecondary(self, sec_problem, sec_survey, m, plotIt=False):
sec_problem.survey = sec_survey
print("Solving Secondary")
t0 = time.time()
fields = sec_problem.fields(m)
dpred = sec_problem.dpred(m, f=fields)
t1 = time.time()
print(" ...done. secondary time "), t1 - t0
return fields, dpred
# ----------------------------------------------------------------- #
# ------------ PLOTTING ------------------------------------------- #
# ----------------------------------------------------------------- #
def plotPrimaryFields(self, primaryFields, saveFig=False):
# Interpolate onto a cartesian mesh with uniform cell sizes (better for
# streamplots)
cs = 5.0
xmax = 1000.0
zmax = 1200.0
csx, ncx = cs, np.ceil(xmax / cs)
csz, ncz = cs, np.ceil(zmax / cs)
# define the tensor mesh
meshcart = discretize.TensorMesh(
[[(csx, ncx)], [(csx, 1)], [(csz, ncz)]], [0, -csx / 2.0, -zmax]
)
projF = self.meshp.getInterpolationMatCartMesh(meshcart, "F")
jcart = projF * primaryFields[:, "j"]
fig, ax = plt.subplots(1, 1, figsize=(6, 7.75))
f = meshcart.plotSlice(
jcart.real,
normal="Y",
v_type="F",
view="vec",
pcolor_opts={"norm": LogNorm(), "cmap": plt.get_cmap("viridis")},
stream_opts={"color": "k", "arrowsize": 2},
ax=ax,
)
plt.colorbar(f[0], label="real current density (A/m$^2$)")
ax.set_adjustable("box")
ax.axis("equal")
ax.set_ylim([-1200.0, 0.0])
ax.set_xlim([0.0, 750.0])
ax.set_title("Primary Current Density")
ax.set_xlabel("radius (m)", fontsize=fontsize)
ax.set_ylabel("z (m)", fontsize=fontsize)
if saveFig is True:
fig.savefig("primaryCurrents", dpi=300, bbox_inches="tight")
return ax
def plotSecondarySource(self, primaryFields, saveFig=False):
# get source term
secondaryProblem = self.setupSecondaryProblem(mapping=self.mapping)
secondaryProblem.solver = Solver
self.primaryProblem.solver = Solver
secondaryProblem.model = self.mtrue
secondarySurvey = self.setupSecondarySurvey(
self.primaryProblem, self.primarySurvey, self.primaryMap2meshs
)
src = secondarySurvey.source_list[0]
s_e = src.s_e(secondaryProblem, f=primaryFields)
# Mesh to interpolate onto for stream plots
cs = 5.0
csz = 0.5
xmin, xmax = -600.0, 600.0
ymin, ymax = -600.0, 600.0
zmin, zmax = -950.0 - csz / 2.0, -950.0 + csz / 2.0
ncx = np.ceil((xmax - xmin) / cs)
ncy = np.ceil((ymax - ymin) / cs)
ncz = np.ceil((zmax - zmin) / cs)
meshs_plt = discretize.TensorMesh(
[[(cs, ncx)], [(cs, ncy)], [(cs, ncz)]],
[
xmin + (xmin + xmax) / 2.0,
ymin + (ymin + ymax) / 2.0,
zmin + (zmin + zmax) / 2.0,
],
)
# Construct interpolation matrices
Px = self.meshs.getInterpolationMat(meshs_plt.gridEx, locType="Ex")
Py = self.meshs.getInterpolationMat(meshs_plt.gridEy, locType="Ey")
Pz = self.meshs.getInterpolationMat(meshs_plt.gridEz, locType="Ez")
P = sp.vstack([Px, Py, Pz])
# for regions outside of the anomalous block, the source current
# density is identically zero. For plotting, we do not want to
# interpolate into this region, so we build up masked arrays.
maskme_ex = (
(self.meshs.gridEx[:, 0] <= self.block_x[0])
| (self.meshs.gridEx[:, 0] >= self.block_x[1])
| (self.meshs.gridEx[:, 1] <= self.block_y[0])
| (self.meshs.gridEx[:, 1] >= self.block_y[1])
)
maskme_ey = (
(self.meshs.gridEy[:, 0] <= self.block_x[0])
| (self.meshs.gridEy[:, 0] >= self.block_x[1])
| (self.meshs.gridEy[:, 1] <= self.block_y[0])
| (self.meshs.gridEy[:, 1] >= self.block_y[1])
)
maskme_ez = (
(self.meshs.gridEz[:, 0] <= self.block_x[0])
| (self.meshs.gridEz[:, 0] >= self.block_x[1])
| (self.meshs.gridEz[:, 1] <= self.block_y[0])
| (self.meshs.gridEz[:, 1] >= self.block_y[1])
)
maskme_e = np.hstack([maskme_ex, maskme_ey, maskme_ez])
# interpolate down a layer
s_e_interp = s_e.real.copy()
s_e_interp[maskme_e] = np.nan
s_e_plt = P * s_e_interp
# keep masked array for stream plots
s_e_stream_cc = meshs_plt.aveE2CCV * s_e_plt
# re-assign zero for amplitude of the real current density
s_e_abs_cc = s_e_stream_cc.reshape(meshs_plt.nC, 3, order="F")
s_e_abs_cc = np.sqrt((s_e_abs_cc ** 2.0).sum(axis=1))
s_e_abs_cc[np.isnan(s_e_abs_cc)] = 0.0
s_e_stream_cc = np.ma.masked_where(np.isnan(s_e_stream_cc), s_e_stream_cc)
# plot
fig, ax = plt.subplots(1, 1, figsize=(7.5, 6))
# f = meshs_plt.plotSlice(
# np.ma.masked_where(maskme_e, s_e_plt.real),
# normal='Z',
# vType='CCv',
# view='abs',
# pcolorOpts={'cmap':plt.get_cmap('viridis')}, ax=ax
# )
f = ax.pcolormesh(
meshs_plt.vectorCCx,
meshs_plt.vectorCCy,
(s_e_abs_cc).reshape(meshs_plt.vnC[:2], order="F").T,
cmap=plt.get_cmap("viridis"),
)
ax.streamplot(
meshs_plt.vectorCCx,
meshs_plt.vectorCCy,
s_e_stream_cc[: meshs_plt.nC].reshape(meshs_plt.vnC[:2]),
s_e_stream_cc[meshs_plt.nC : meshs_plt.nC * 2].reshape(meshs_plt.vnC[:2]),
density=1.5,
color="k",
arrowsize=2,
)
ax.set_xlabel("x (m)", fontsize=fontsize)
ax.set_ylabel("y (m)", fontsize=fontsize)
cb = plt.colorbar(f, label="real current density (A/m$^2$)")
cb.formatter.set_powerlimits((0, 0))
cb.update_ticks()
ax.set_adjustable("box")
ax.axis("equal")
ax.axis([-600, 600, -600, 600])
ax.set_title("(a) -950m Depth Slice", fontsize=fontsize)
# interact(plotMe, ind=[0, meshs_plt.vnC[2]-1])
if saveFig is True:
fig.savefig("secondarySource", dpi=300)
return ax
def plotData(self, data_block, data_back, saveFig=False):
XLIM = np.r_[-1500, 1500]
YLIM = np.r_[-1500, 1500]
sec_survey = self.setupSecondarySurvey(
self.primaryProblem, self.primarySurvey, self.primaryMap2meshs
)
src = sec_survey.source_list[0]
rx0 = src.receiver_list[0]
nx = int(np.sqrt(len(rx0.locations)))
ny = nx
def plotDataFun(
ax,
plotme,
num=50,
plotBlock=True,
xlim=XLIM,
ylim=YLIM,
clim=None,
clabel="Electric Field (V/m)",
xlabel="x (m)",
ylabel="y (m)",
title=None,
):
if clim is None:
clim = np.absolute(plotme).max() * np.r_[-1.0, 1.0]
elif clim is not None:
clim = clim
f = ax.contourf(
self.rx_x,
self.rx_y,
plotme,
num,
cmap=plt.get_cmap("viridis"),
vmin=clim[0],
vmax=clim[1],
)
ax.set_adjustable("box")
ax.axis("equal")
ax.set_xlim(xlim)
ax.set_ylim(ylim)
cb = plt.colorbar(f, ax=ax, label=clabel)
cb.formatter.set_powerlimits((0, 0))
cb.update_ticks()
if xlabel is not None:
ax.set_xlabel(xlabel)
if ylabel is not None:
ax.set_ylabel(ylabel)
if title is not None:
ax.set_title(title)
if plotBlock:
ax.plot(
np.r_[
self.block_x[0],
self.block_x[0],
self.block_x[1],
self.block_x[1],
self.block_x[0],
],
np.r_[
self.block_y[0],
self.block_y[1],
self.block_y[1],
self.block_y[0],
self.block_y[0],
],
color="w",
linestyle="-",
)
return ax
ncontours = 50
fig, ax = plt.subplots(2, 2, figsize=(12, 10))
ax = utils.mkvc(ax)
plotx0 = (data_block[: rx0.nD]).reshape(nx, ny, order="F")
ploty0 = (data_block[rx0.nD :]).reshape(nx, ny, order="F")
plotx1 = (data_block[: rx0.nD] - data_back[: rx0.nD]).reshape(nx, ny, order="F")
ploty1 = (data_block[rx0.nD :] - data_back[rx0.nD :]).reshape(nx, ny, order="F")
# Plotting
ax[0] = plotDataFun(ax[0], plotx0, num=ncontours, title="(a) Total E$_x$")
ax[1] = plotDataFun(ax[1], plotx1, num=ncontours, title="(c) Secondary E$_x$")
ax[2] = plotDataFun(ax[2], ploty0, num=ncontours, title="(b) Total E$_y$")
ax[3] = plotDataFun(ax[3], ploty1, num=ncontours, title="(d) Secondary E$_y$")
plt.tight_layout()
if saveFig is True:
fig.savefig("casingDpred", dpi=300)
def plotSensitivities(self, J, saveFig=False):
def plotJ(
ax,
Jv,
title,
plotGrid=False,
xlabel="x (m)",
ylabel="y (m)",
xlim=None,
ylim=None,
clim=None,
climCenter=True,
plotBlock=False,
num=30,
norm=None,
cblabel="",
):
eps = 1e-3 # just so we don't get white-spaces in the colormap
ax.axis("equal")
vlim = np.absolute(Jv).max() * np.r_[-1.0, 1.0]
if norm is None:
f = ax.contourf(
self.rx_x,
self.rx_y,
Jv,
levels=np.linspace(vlim[0], vlim[1], num),
cmap=plt.get_cmap("viridis"),
vmin=vlim[0],
vmax=vlim[1],
)
cb = plt.colorbar(f, ax=ax, label=cblabel)
cb.formatter.set_powerlimits((0, 0))
ticks = [
"{0:1.1e}".format(a)
for a in np.linspace(0.95 * vlim[0], 0.95 * vlim[1], 5)
]
ticks = [float(t) for t in ticks]
cb.set_ticks(ticks)
cb.update_ticks()
elif norm.lower() == "lognorm":
from matplotlib.colors import LogNorm
f = ax.contourf(
rx_x,
rx_y,
np.absolute(Jv),
num,
cmap=plt.get_cmap("viridis"),
norm=LogNorm(),
)
cb = plt.colorbar(f, ax=ax)
ax.set_title(title)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
if plotGrid:
self.meshs.plotSlice(
np.nan * np.ones(mesh.nC), normal="Z", grid=True, ax=ax
)
if xlim is not None:
ax.set_xlim(xlim)
if ylim is not None:
ax.set_ylim(ylim)
if plotBlock is True:
ax.plot(
np.r_[
self.block_x[0],
self.block_x[0],
self.block_x[1],
self.block_x[1],
self.block_x[0],
],
np.r_[
self.block_y[0],
self.block_y[1],
self.block_y[1],
self.block_y[0],
self.block_y[0],
],
color="w",
linestyle="-",
)
return ax
# Plot Conductivity contribution
plotGrid = False
plotBlock = True
ncontours = 30
xlim = np.r_[-1500, 1500]
ylim = np.r_[-1500, 1500]
nx, ny = self.rx_x.shape
nrx = len(self.rxlocs)
J_back_ex = J[0, :nrx].reshape(nx, ny, order="F")
J_back_ey = J[0, nrx:].reshape(nx, ny, order="F")
J_layer_ex = J[1, :nrx].reshape(nx, ny, order="F")
J_layer_ey = J[1, nrx:].reshape(nx, ny, order="F")
J_block_ex = J[2, :nrx].reshape(nx, ny, order="F")
J_block_ey = J[2, nrx:].reshape(nx, ny, order="F")
clabelSigs = "Sensitivity (V/m / log($\sigma$))"
fig, ax = plt.subplots(3, 2, figsize=(12, 15))
ax[0][0] = plotJ(
ax[0][0],
J_back_ex,
"(a) Sensitivity of $E_x$ wrt log($\sigma_{back}$)",
plotGrid=plotGrid,
xlim=xlim,
ylim=ylim,
plotBlock=plotBlock,
num=ncontours,
cblabel=clabelSigs,
)
ax[0][1] = plotJ(
ax[0][1],
J_back_ey,
"(b) Sensitivity of $E_y$ wrt log($\sigma_{back}$)",
plotGrid=plotGrid,
xlim=xlim,
ylim=ylim,
plotBlock=plotBlock,
num=ncontours,
cblabel=clabelSigs,
)
ax[1][0] = plotJ(
ax[1][0],
J_layer_ex,
"(c) Sensitivity of $E_x$ wrt log($\sigma_{layer}$)",
plotGrid=plotGrid,
xlim=xlim,
ylim=ylim,
plotBlock=plotBlock,
num=ncontours,
cblabel=clabelSigs,
)
ax[1][1] = plotJ(
ax[1][1],
J_layer_ey,
"(d) Sensitivity of $E_y$ wrt log($\sigma_{layer}$)",
plotGrid=plotGrid,
xlim=xlim,
ylim=ylim,
plotBlock=plotBlock,
num=ncontours,
cblabel=clabelSigs,
)
climsigblock = np.r_[-6e-8, 6e-8]
ax[2][0] = plotJ(
ax[2][0],
J_block_ex,
"(e) Sensitivity of $E_x$ wrt log($\sigma_{block}$)",
plotGrid=plotGrid,
xlim=xlim,
ylim=ylim,
clim=climsigblock,
plotBlock=plotBlock,
num=ncontours,
cblabel=clabelSigs,
)
ax[2][1] = plotJ(
ax[2][1],
J_block_ey,
"(f) Sensitivity of $E_y$ wrt log($\sigma_{block}$)",
plotGrid=plotGrid,
xlim=xlim,
ylim=ylim,
clim=climsigblock,
plotBlock=plotBlock,
num=ncontours,
cblabel=clabelSigs,
)
plt.tight_layout()
if saveFig is True:
fig.savefig("J_sigmas", dpi=300)
# Plot layer contribution
fig, ax = plt.subplots(2, 2, figsize=(12, 10))
# ax = utils.mkvc(ax)
useaxlim = True
xlim = np.r_[-1500.0, 1500.0]
ylim = np.r_[-1500.0, 1500.0]
J_z0_ex, J_z0_ey = (
J[3, :nrx].reshape(nx, ny, order="F"),
J[3, nrx:].reshape(nx, ny, order="F"),
)
J_hz_ex, J_hz_ey = (
J[4, :nrx].reshape(nx, ny, order="F"),
J[4, nrx:].reshape(nx, ny, order="F"),
)
ax[0][0] = plotJ(
ax[0][0],
J_z0_ex,
"(g) Sensitivity of $E_x$ wrt layer $z_0$",
plotGrid=plotGrid,
xlim=xlim,
ylim=ylim,
plotBlock=plotBlock,
num=ncontours,
cblabel="Sensitivity (V/m / m)",
)
ax[0][1] = plotJ(
ax[0][1],
J_z0_ey,
"(h) Sensitivity of $E_y$ wrt layer $z_0$",
plotGrid=plotGrid,
xlim=xlim,
ylim=ylim,
plotBlock=plotBlock,
num=ncontours,
cblabel="Sensitivity (V/m / m)",
)
ax[1][0] = plotJ(
ax[1][0],
J_hz_ex,
"(i) Sensitivity of $E_x$ wrt layer $h$",
plotGrid=plotGrid,
xlim=xlim,
ylim=ylim,
plotBlock=plotBlock,
num=ncontours,
cblabel="Sensitivity (V/m / m)",
)
ax[1][1] = plotJ(
ax[1][1],
J_hz_ey,
"(j) Sensitivity of $E_y$ wrt layer $h$",
plotGrid=plotGrid,
xlim=xlim,
ylim=ylim,
plotBlock=plotBlock,
num=ncontours,
cblabel="Sensitivity (V/m / m)",
)
plt.tight_layout()
if saveFig is True:
fig.savefig("J_layer", dpi=300)
# Block Geometry
fig, ax = plt.subplots(4, 2, figsize=(12, 20))
useaxlim = True
xlim = np.r_[-1500.0, 1500.0]
ylim = np.r_[-1500.0, 1500.0]
J_x0_ex = J[5, :nrx].reshape(nx, ny, order="F")
J_x0_ey = J[5, nrx:].reshape(nx, ny, order="F")
J_y0_ex = J[6, :nrx].reshape(nx, ny, order="F")
J_y0_ey = J[6, nrx:].reshape(nx, ny, order="F")
J_dx_ex = J[7, :nrx].reshape(nx, ny, order="F")
J_dx_ey = J[7, nrx:].reshape(nx, ny, order="F")
J_dy_ex = J[8, :nrx].reshape(nx, ny, order="F")
J_dy_ey = J[8, nrx:].reshape(nx, ny, order="F")
ax[0][0] = plotJ(
ax[0][0],
J_x0_ex,
"(k) Sensitivity of $E_x$ wrt block $x_0$",
plotGrid=plotGrid,
xlim=xlim,
ylim=ylim,
plotBlock=plotBlock,
num=ncontours,
cblabel="Sensitivity (V/m / m)",
)
ax[0][1] = plotJ(
ax[0][1],
J_x0_ey,
"(l) Sensitivity of $E_y$ wrt block $x_0$",
plotGrid=plotGrid,
xlim=xlim,
ylim=ylim,
plotBlock=plotBlock,
num=ncontours,
cblabel="Sensitivity (V/m / m)",
)
ax[1][0] = plotJ(
ax[1][0],
J_y0_ex,
"(m) Sensitivity of $E_x$ wrt block $y_0$",
plotGrid=plotGrid,
xlim=xlim,
ylim=ylim,
plotBlock=plotBlock,
num=ncontours,
cblabel="Sensitivity (V/m / m)",
)
ax[1][1] = plotJ(
ax[1][1],
J_y0_ey,
"(n) Sensitivity of $E_y$ wrt block $y_0$",
plotGrid=plotGrid,
xlim=xlim,
ylim=ylim,
plotBlock=plotBlock,
num=ncontours,
cblabel="Sensitivity (V/m / m)",
)
ax[2][0] = plotJ(
ax[2][0],
J_dx_ex,
"(o) Sensitivity of $E_x$ wrt block $d_x$",
plotGrid=plotGrid,
xlim=xlim,
ylim=ylim,
plotBlock=plotBlock,
num=ncontours,
cblabel="Sensitivity (V/m / m)",
)
ax[2][1] = plotJ(
ax[2][1],
J_dy_ex,
"(p) Sensitivity of $E_y$ wrt block $d_x$",
plotGrid=plotGrid,
xlim=xlim,
ylim=ylim,
plotBlock=plotBlock,
num=ncontours,
cblabel="Sensitivity (V/m / m)",
)
ax[3][0] = plotJ(
ax[3][0],
J_dy_ex,
"(q) Sensitivity of $E_x$ wrt block $d_y$",
plotGrid=plotGrid,
xlim=xlim,
ylim=ylim,
plotBlock=plotBlock,
num=ncontours,
cblabel="Sensitivity (V/m / m)",
)
ax[3][1] = plotJ(
ax[3][1],
J_dy_ey,
"(r) Sensitivity of $E_y$ wrt block $d_y$",
plotGrid=plotGrid,
xlim=xlim,
ylim=ylim,
plotBlock=plotBlock,
num=ncontours,
cblabel="Sensitivity (V/m / m)",
)
plt.tight_layout()
if saveFig is True:
fig.savefig("J_block", dpi=300)
# ---------------------------------------------------------------------- #
# ---------------- Run the example ------------------------------------- #
# ---------------------------------------------------------------------- #
def run(
self, plotIt=False, runTests=False, verbose=True, saveFields=True, saveFig=False
):
self.verbose = verbose
if plotIt is True: # Plot the Primary Model
# self.plotPrimaryMesh() # plot the mesh
self.plotPrimaryProperties() # plot mu, sigma
# Primary Simulation
self.primaryProblem.survey = self.primarySurvey
primfields = self.solvePrimary(self.primaryProblem, m=self.mtrue)
if saveFields is True:
np.save("primaryfields_" + self.NAME, primfields[:, :])
print(" saved %s" % "primaryfields_" + self.NAME)
mback = self.mtrue.copy()
mback[2] = np.log(self.sigmalayer)
# Secondary Problem and Survey
sec_problem = self.setupSecondaryProblem(mapping=self.mapping)
sec_survey = self.setupSecondarySurvey(
self.primaryProblem, self.primarySurvey, self.primaryMap2meshs
)
sec_problem.survey = sec_survey
# layered earth only (background)
background_problem = self.setupSecondaryProblem(mapping=self.primaryMap2meshs)
background_survey = self.setupSecondarySurvey(
self.primaryProblem, self.primarySurvey, self.primaryMap2meshs
)
background_problem.survey = background_survey
# -------------- Test the sensitivity ----------------------------- #
if runTests:
x0 = self.mtrue
# Test Block Model
def fun(x):
return [sec_problem.dpred(x), lambda x: sec_problem.Jvec(self.mtrue, x)]
tests.checkDerivative(fun, self.mtrue, num=2, plotIt=False)
# -------------- Calculate Fields --------------------------------- #
# Background
t0 = time.time()
print("solving background ... ")
fieldsback, dpredback = self.solveSecondary(
background_problem, background_survey, self.mtrue
)
t1 = time.time()
print("... done. dpred_back {}".format(t1 - t0))
if saveFields:
np.save("dpred_" + self.NAME + "_back", dpredback)
np.save("fields_" + self.NAME + "_back", fieldsback[:, :])
print(" saved {}".format(self.NAME + "_back"))
# with Block
t0 = time.time()
print("solving with block ... ")
fields, dpred = self.solveSecondary(sec_problem, sec_survey, self.mtrue)
print("... done. dpred {}".format(t1 - t0))
if saveFields:
np.save("dpred_" + self.NAME, dpred)
np.save("fields_" + self.NAME, fields[:, :])
print(" saved {}".format(self.NAME))
t1 = time.time()
# -------------- Calculate J --------------------------------- #
# Calculate J with block
print("starting J with block")
t0 = time.time()
J = []
for i in range(len(self.mtrue)):
ei = np.zeros_like(self.mtrue)
ei[i] = 1.0
J.append(sec_problem.Jvec(self.mtrue, ei, f=fields))
J = np.vstack(J)
t1 = time.time()
print(" J {}".format(t1 - t0))
if saveFields is True:
np.save("J_" + self.NAME, J)
print(" saved {}".format("J_" + self.NAME))
return {
"primfields": primfields, # primary fields
"fieldsback": fieldsback, # fields without block
"dpredback": dpredback, # predicted data without block
"fields": fields, # fields with block
"dpred": dpred, # predicted data with block
"J": J, # sensitivity
}
class PrimSecCasingStoredResults(PrimSecCasingExample):
url = "https://storage.googleapis.com/simpeg/papers/Heagyetal2016/"
# cloudfiles = [
# 'primaryfields_PrimSec_5e6Casing_50Mu_05Hz_LargeCondBody.npy',
# 'dpred_PrimSec_5e6Casing_50Mu_05Hz_LargeCondBody_back.npy',
# 'dpred_PrimSec_5e6Casing_50Mu_05Hz_LargeCondBody.npy',
# 'J_PrimSec_5e6Casing_50Mu_05Hz_LargeCondBody.npy',
# ]
cloudfile = "Heagyetal2016Casing.hdf5"
entry_names = [
"primaryfields_h_PrimSec_5e6Casing_50Mu_05Hz_LargeCondBody",
"dpred_PrimSec_5e6Casing_50Mu_05Hz_LargeCondBody_back",
"dpred_PrimSec_5e6Casing_50Mu_05Hz_LargeCondBody",
"J_PrimSec_5e6Casing_50Mu_05Hz_LargeCondBody",
]
def removeStoredResults(self):
import shutil
print("Removing {}".format(self.filepath))
shutil.rmtree(self.filepath)
def run(self, plotIt=False, runTests=False, saveFig=False):
filepath = download(
self.url + self.cloudfile, folder="~/Downloads/simpegtemp", overwrite=True
)
self.filepath = os.path.sep.join(filepath.split(os.path.sep)[:-1])
# resultsFiles = ['{filepath}{slash}{file}'.format(
# filepath=self.filepath, slash=os.path.sep, file=file)
# for file in self.cloudfiles]
# results = [np.load(file, encoding='bytes') for file in resultsFiles]
h5f = h5py.File(filepath, "r")
results = [h5f[entry_name][:] for entry_name in self.entry_names]
results = dict(zip(["primfields", "dpredback", "dpred", "J"], results))
# Put the primary fields into a fields object
self.primaryProblem.model = self.mtrue # set the current model
self.primaryProblem.survey = self.primarySurvey
primaryFields = self.primaryProblem.fieldsPair(self.primaryProblem)
primaryFields[self.primarySurvey.source_list[0], "hSolution"] = results[
"primfields"
]
results["primfields"] = primaryFields
return results
def run(plotIt=True, runTests=False, reRun=False, saveFig=False):
"""
EM Heagyetal2016 CasingFwd3DPrimSecSrc
======================================
Computation of Sensitivities for the primary-secondary example shown in
Heagy et al 2016.
:param bool plotIt: plot results
:param bool runTests: run sensitivity tests? (slow...)
:param bool reRun: recompute results? or just download stored results
and plot
:param bool saveFig: save the figures?
"""
# recompute results?
if reRun is True:
casingExample = PrimSecCasingExample()
# or download stored results
elif reRun is False:
casingExample = PrimSecCasingStoredResults()
dataDict = casingExample.run(runTests=runTests)
# plot some things
if plotIt is True or saveFig is True:
casingExample.plotPrimaryFields(dataDict["primfields"], saveFig=saveFig)
casingExample.plotSecondarySource(dataDict["primfields"], saveFig=saveFig)
casingExample.plotData(
dataDict["dpred"], dataDict["dpredback"], saveFig=saveFig
)
casingExample.plotSensitivities(dataDict["J"], saveFig=saveFig)
if plotIt is True:
plt.show()
# remove the downloaded results
if reRun is False:
casingExample.removeStoredResults()
if __name__ == "__main__":
run(plotIt=True, runTests=False, reRun=False, saveFig=False)
```
#### File: tests/base/test_joint.py
```python
from __future__ import print_function
import unittest
import numpy as np
import scipy.sparse as sp
import discretize
from SimPEG import (
data_misfit,
maps,
utils,
regularization,
inverse_problem,
optimization,
directives,
inversion,
)
from SimPEG.electromagnetics import resistivity as DC
np.random.seed(82)
class DataMisfitTest(unittest.TestCase):
def setUp(self):
mesh = discretize.TensorMesh([30, 30], x0=[-0.5, -1.0])
sigma = np.random.rand(mesh.nC)
model = np.log(sigma)
# prob = DC.Simulation3DCellCentered(mesh, rhoMap=maps.ExpMap(mesh))
# prob1 = DC.Simulation3DCellCentered(mesh, rhoMap=maps.ExpMap(mesh))
rx = DC.Rx.Pole(utils.ndgrid([mesh.vectorCCx, np.r_[mesh.vectorCCy.max()]]))
rx1 = DC.Rx.Pole(utils.ndgrid([mesh.vectorCCx, np.r_[mesh.vectorCCy.min()]]))
src = DC.Src.Dipole(
[rx], np.r_[-0.25, mesh.vectorCCy.max()], np.r_[0.25, mesh.vectorCCy.max()]
)
src1 = DC.Src.Dipole(
[rx1], np.r_[-0.25, mesh.vectorCCy.max()], np.r_[0.25, mesh.vectorCCy.max()]
)
survey = DC.Survey([src])
simulation0 = DC.simulation.Simulation3DCellCentered(
mesh=mesh, survey=survey, rhoMap=maps.ExpMap(mesh)
)
survey1 = DC.Survey([src1])
simulation1 = DC.simulation.Simulation3DCellCentered(
mesh=mesh, survey=survey1, rhoMap=maps.ExpMap(mesh)
)
dobs0 = simulation0.make_synthetic_data(model)
dobs1 = simulation1.make_synthetic_data(model)
self.mesh = mesh
self.model = model
self.survey0 = survey
self.sim0 = simulation0
self.survey1 = survey1
self.sim1 = simulation1
# self.dmis0 = data_misfit.L2DataMisfit(self.survey0)
self.dmis0 = data_misfit.L2DataMisfit(data=dobs0, simulation=simulation0)
self.dmis1 = data_misfit.L2DataMisfit(data=dobs1, simulation=simulation1)
self.dmiscombo = self.dmis0 + self.dmis1
def test_multiDataMisfit(self):
self.dmis0.test()
self.dmis1.test()
self.dmiscombo.test(x=self.model)
def test_inv(self):
reg = regularization.Tikhonov(self.mesh)
opt = optimization.InexactGaussNewton(maxIter=10, use_WolfeCurvature=True)
invProb = inverse_problem.BaseInvProblem(self.dmiscombo, reg, opt)
directives_list = [
directives.ScalingMultipleDataMisfits_ByEig(verbose=True),
directives.AlphasSmoothEstimate_ByEig(verbose=True),
directives.BetaEstimate_ByEig(beta0_ratio=1e-2),
directives.BetaSchedule(),
]
inv = inversion.BaseInversion(invProb, directiveList=directives_list)
m0 = self.model.mean() * np.ones_like(self.model)
mrec = inv.run(m0)
def test_inv_mref_setting(self):
reg1 = regularization.Tikhonov(self.mesh)
reg2 = regularization.Tikhonov(self.mesh)
reg = reg1 + reg2
opt = optimization.ProjectedGNCG(
maxIter=10, lower=-10, upper=10, maxIterLS=20, maxIterCG=50, tolCG=1e-4
)
invProb = inverse_problem.BaseInvProblem(self.dmiscombo, reg, opt)
directives_list = [
directives.ScalingMultipleDataMisfits_ByEig(chi0_ratio=[0.01, 1.0], verbose=True),
directives.AlphasSmoothEstimate_ByEig(verbose=True),
directives.BetaEstimate_ByEig(beta0_ratio=1e-2),
directives.BetaSchedule(),
]
inv = inversion.BaseInversion(invProb, directiveList=directives_list)
m0 = self.model.mean() * np.ones_like(self.model)
mrec = inv.run(m0)
self.assertTrue(np.all(reg1.mref == m0))
self.assertTrue(np.all(reg2.mref == m0))
if __name__ == "__main__":
unittest.main()
``` |
{
"source": "jku/warehouse",
"score": 2
} |
#### File: checks/setup_patterns/test_check.py
```python
import pretend
import pytest
import yara
from warehouse.malware.checks.setup_patterns import check as c
from warehouse.malware.models import (
MalwareCheckState,
VerdictClassification,
VerdictConfidence,
)
from .....common.db.malware import MalwareCheckFactory
from .....common.db.packaging import FileFactory
def test_initializes(db_session):
check_model = MalwareCheckFactory.create(
name="SetupPatternCheck", state=MalwareCheckState.Enabled
)
check = c.SetupPatternCheck(db_session)
assert check.id == check_model.id
assert isinstance(check._yara_rules, yara.Rules)
@pytest.mark.parametrize(
("obj", "file_url"), [(None, pretend.stub()), (pretend.stub(), None)]
)
def test_scan_missing_kwargs(db_session, obj, file_url):
MalwareCheckFactory.create(
name="SetupPatternCheck", state=MalwareCheckState.Enabled
)
check = c.SetupPatternCheck(db_session)
with pytest.raises(c.FatalCheckException):
check.scan(obj=obj, file_url=file_url)
def test_scan_non_sdist(db_session):
MalwareCheckFactory.create(
name="SetupPatternCheck", state=MalwareCheckState.Enabled
)
check = c.SetupPatternCheck(db_session)
file = FileFactory.create(packagetype="bdist_wheel")
check.scan(obj=file, file_url=pretend.stub())
assert check._verdicts == []
def test_scan_no_setup_contents(db_session, monkeypatch):
monkeypatch.setattr(
c, "fetch_url_content", pretend.call_recorder(lambda *a: pretend.stub())
)
monkeypatch.setattr(
c, "extract_file_content", pretend.call_recorder(lambda *a: None)
)
MalwareCheckFactory.create(
name="SetupPatternCheck", state=MalwareCheckState.Enabled
)
check = c.SetupPatternCheck(db_session)
file = FileFactory.create(packagetype="sdist")
check.scan(obj=file, file_url=pretend.stub())
assert len(check._verdicts) == 1
assert check._verdicts[0].check_id == check.id
assert check._verdicts[0].file_id == file.id
assert check._verdicts[0].classification == VerdictClassification.Indeterminate
assert check._verdicts[0].confidence == VerdictConfidence.High
assert (
check._verdicts[0].message
== "sdist does not contain a suitable setup.py for analysis"
)
def test_scan_benign_contents(db_session, monkeypatch):
monkeypatch.setattr(
c, "fetch_url_content", pretend.call_recorder(lambda *a: pretend.stub())
)
monkeypatch.setattr(
c,
"extract_file_content",
pretend.call_recorder(lambda *a: b"this is a benign string"),
)
MalwareCheckFactory.create(
name="SetupPatternCheck", state=MalwareCheckState.Enabled
)
check = c.SetupPatternCheck(db_session)
file = FileFactory.create(packagetype="sdist")
check.scan(obj=file, file_url=pretend.stub())
assert len(check._verdicts) == 1
assert check._verdicts[0].check_id == check.id
assert check._verdicts[0].file_id == file.id
assert check._verdicts[0].classification == VerdictClassification.Benign
assert check._verdicts[0].confidence == VerdictConfidence.Low
assert check._verdicts[0].message == "No malicious patterns found in setup.py"
def test_scan_matched_content(db_session, monkeypatch):
monkeypatch.setattr(
c, "fetch_url_content", pretend.call_recorder(lambda *a: pretend.stub())
)
monkeypatch.setattr(
c,
"extract_file_content",
pretend.call_recorder(
lambda *a: b"this looks suspicious: os.system('cat /etc/passwd')"
),
)
MalwareCheckFactory.create(
name="SetupPatternCheck", state=MalwareCheckState.Enabled
)
check = c.SetupPatternCheck(db_session)
file = FileFactory.create(packagetype="sdist")
check.scan(obj=file, file_url=pretend.stub())
assert len(check._verdicts) == 1
assert check._verdicts[0].check_id == check.id
assert check._verdicts[0].file_id == file.id
assert check._verdicts[0].classification == VerdictClassification.Threat
assert check._verdicts[0].confidence == VerdictConfidence.High
assert check._verdicts[0].message == "process_spawn_in_setup"
``` |
{
"source": "jku-win-dke/sparkInAction2-chapter05",
"score": 3
} |
#### File: main/python/pi.py
```python
import sys
from random import random
from operator import add
from pyspark.sql import SparkSession
spark = SparkSession\
.builder\
.appName("PythonPi")\
.getOrCreate()
n = 100000
def throwDarts(_): #A
x = random() * 2 - 1 #B
y = random() * 2 - 1 #B
return 1 if x ** 2 + y ** 2 <= 1 else 0 #B
count = spark.sparkContext.parallelize(range(1, n + 1), 1).map(throwDarts).reduce(add)
print("Pi is roughly %f" % (4.0 * count / n))
spark.stop()
``` |
{
"source": "jkvakaric/krv-je-zivot",
"score": 2
} |
#### File: krvjezivot/administration/views.py
```python
from django.shortcuts import render
from django.http import HttpResponseRedirect
from django.shortcuts import get_object_or_404
from django.urls import reverse
from django.views import View
from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import LoginRequiredMixin
from django.views.decorators.http import require_POST, require_GET
from django.views.decorators.csrf import csrf_exempt
from krvjezivot.users.models import User
from krvjezivot.users.enums import Sex
from .forms import DonationEventForm, DonationVenueForm
from .models import DonationEvent, DonationVenue, DonationInvite, ConfirmedDonation
# Create your views here.
@require_GET
@login_required
def get_donation_venues_list(request):
donation_venues_list = DonationVenue.objects.all()
return render(request, 'administration/donation-venues-list.html',
{'donation_venues_list': donation_venues_list})
class DonationVenueFormView(LoginRequiredMixin, View):
form_class = DonationVenueForm
instance = DonationVenue()
template_name = 'administration/donation_venue_add_update_partial.html'
def get(self, request, venue_id=None, *args, **kwargs):
if venue_id is not None:
dv = get_object_or_404(DonationVenue, pk=venue_id)
self.instance = dv
form = self.form_class(instance=self.instance)
return render(request, self.template_name, {
'form': form,
'venue_id': venue_id
})
def post(self, request, venue_id=None, *args, **kwargs):
form = self.form_class(request.POST)
if form.is_valid():
if venue_id is not None:
self.instance = get_object_or_404(DonationVenue, pk=venue_id)
form = self.form_class(request.POST, instance=self.instance)
form.save()
return HttpResponseRedirect(
reverse('administration:donation_venues_list'))
return render(request, self.template_name, {
'form': form,
'venue_id': venue_id
})
@require_POST
@login_required
def donation_venue_delete(request, venue_id=None, *args, **kwargs):
if venue_id is not None:
instance = get_object_or_404(DonationVenue, pk=venue_id)
instance.delete()
return HttpResponseRedirect(reverse('administration:donation_venues_list'))
@require_GET
@login_required
def get_donation_events_list(request):
donation_events_list = DonationEvent.objects.all()
return render(request, 'administration/donation_events_list.html',
{'donation_events_list': donation_events_list})
class DonationEventFormView(LoginRequiredMixin, View):
form_class = DonationEventForm
instance = DonationEvent()
template_name = 'administration/donation_event_add_update_partial.html'
def get(self, request, event_id=None, *args, **kwargs):
if event_id is not None:
de = get_object_or_404(DonationEvent, pk=event_id)
self.instance = de
form = self.form_class(instance=self.instance)
return render(request, self.template_name, {
'form': form,
'event_id': event_id
})
def post(self, request, event_id=None, *args, **kwargs):
form = self.form_class(request.POST)
if form.is_valid():
if event_id is not None:
self.instance = get_object_or_404(DonationEvent, pk=event_id)
form = self.form_class(request.POST, instance=self.instance)
evnt = form.save()
self.send_invitations(request, evnt)
return HttpResponseRedirect(
reverse('administration:donation_events_list'))
return render(request, self.template_name, {
'form': form,
'event_id': event_id
})
def send_invitations(self, request, evnt):
users = [
usr for usr in User.objects.all()
if usr.get_days_since_last_donation > 90 and usr.sex == Sex.MALE
or usr.get_days_since_last_donation > 90 and usr.sex == Sex.FEMALE
]
for user in users:
DonationInvite.objects.create(event=evnt, user=request.user)
@require_POST
@login_required
def donation_event_delete(request, event_id=None, *args, **kwargs):
if event_id is not None:
instance = get_object_or_404(DonationEvent, pk=event_id)
instance.delete()
return HttpResponseRedirect(reverse('administration:donation_events_list'))
@require_GET
@login_required
def get_donation_qr_confirm(request, event_id=None, *args, **kwargs):
if event_id is not None:
instance = get_object_or_404(DonationEvent, pk=event_id)
return render(request, 'administration/qrcode_confirm.html',
{'event': instance})
@csrf_exempt
@login_required
def user_donation_qr_confirm(request, event_id=None, *args, **kwargs):
uname = request.POST.get("username", "")
user_for_confirm = get_object_or_404(User, username=uname)
evnt_for_confirm = get_object_or_404(DonationEvent, id=event_id)
invitation = get_object_or_404(
DonationInvite, event_id=event_id, user=user_for_confirm)
if invitation.confirmed is True:
ConfirmedDonation.objects.create(
user=user_for_confirm, event=evnt_for_confirm)
return HttpResponseRedirect(
reverse('administration:get_donation_qr_confirm'),
{'event': evnt_for_confirm})
```
#### File: krvjezivot/donations/views.py
```python
from django.shortcuts import render
from django.shortcuts import render
from django.http import HttpResponseRedirect
from django.shortcuts import get_object_or_404
from django.urls import reverse
from django.views import View
from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import LoginRequiredMixin
from django.views.decorators.http import require_POST, require_GET
from krvjezivot.users.models import User
from krvjezivot.users.enums import Sex
from krvjezivot.administration.forms import DonationEventForm, DonationVenueForm
from krvjezivot.administration.models import DonationEvent, DonationVenue, DonationInvite, ConfirmedDonation
# Create your views here.
@require_GET
@login_required
def get_donation_invites_list(request):
donation_invites_pending = DonationInvite.objects.filter(
user=request.user, confirmed=False)
donation_invites_confirmed = DonationInvite.objects.filter(
user=request.user, confirmed=True)
return render(
request, 'donations/invites_list.html', {
'donation_invites_pending': donation_invites_pending,
'donation_invites_confirmed': donation_invites_confirmed
})
@require_POST
@login_required
def donation_invite_confirm(request, invite_id=None, *args, **kwargs):
inv = get_object_or_404(DonationInvite, id=invite_id)
inv.confirmed = True
inv.save()
return HttpResponseRedirect(reverse('donations:donation_invites_list'))
@require_POST
@login_required
def donation_invite_delete(request, invite_id=None, *args, **kwargs):
if invite_id is not None:
inv = get_object_or_404(DonationInvite, id=invite_id)
inv.delete()
return HttpResponseRedirect(reverse('donations:donation_invites_list'))
@require_GET
@login_required
def get_qrcode(request):
return render(request, 'donations/qrcode.html')
@require_GET
@login_required
def get_donation_history(request, *args, **kwargs):
donations = ConfirmedDonation.objects.filter(user=request.user)
return render(request, 'donations/donations_history.html',
{'donations': donations})
```
#### File: krvjezivot/users/signals.py
```python
from django.dispatch import receiver
from allauth.account.signals import user_signed_up
from allauth.socialaccount.signals import social_account_added
@receiver(user_signed_up)
def add_full_name_for_new_user(sender, user, **kwargs):
user.full_name = user.username
user.save()
@receiver(social_account_added)
def add_setting_for_social_account(sender, sociallogin, **kwargs):
pass
``` |
{
"source": "jkvc/modapt",
"score": 2
} |
#### File: 0.data_processing/framing/1.create_trainsets.py
```python
from os.path import join
import pandas as pd
from config import DATA_DIR
from experiments.datadef import zoo
from modapt.utils import load_json, save_json
FRAMING_DATA_DIR = join(DATA_DIR, "framing_labeled")
ISSUES = zoo.get_datadef("framing").domain_names
if __name__ == "__main__":
stats = []
for issue in ISSUES:
print(">>", issue)
data = load_json(join(FRAMING_DATA_DIR, f"{issue}_labeled.json"))
ids = list(data.keys())
testsets = load_json(join(FRAMING_DATA_DIR, f"{issue}_test_sets.json"))
testsets = {setname: set(ids) for setname, ids in testsets.items()}
trainsets = {}
# relevance train set: any sample not in test set relevance
trainsets["relevance"] = list(
{id for id in data if (id in ids and id not in testsets["relevance"])}
)
# primary frame trainset: any sample not in testset primary frame, and has non null primary fram
trainsets["primary_frame"] = list(
{
id
for id, item in data.items()
if (
id in ids
and id not in testsets["primary_frame"]
and item["primary_frame"] != 0
and item["primary_frame"] != None
)
}
)
# primary tone trainset: any sample not in testset primary tone, and has none null primary tone
trainsets["primary_tone"] = list(
{
id
for id, item in data.items()
if (
id in ids
and id not in testsets["primary_tone"]
and item["primary_tone"] != 0
and item["primary_tone"] != None
)
}
)
save_json(trainsets, join(FRAMING_DATA_DIR, f"{issue}_train_sets.json"))
stat = {
"raw": len(data),
}
stat.update(
{f"train_{setname}": len(ids) for setname, ids in trainsets.items()}
)
stat.update({f"test_{setname}": len(ids) for setname, ids in testsets.items()})
stats.append(stat)
for k, v in stat.items():
print("--", k, v)
df = pd.DataFrame(stats)
df.to_csv(join(FRAMING_DATA_DIR, "stats.csv"))
```
#### File: datadef/definitions/framing.py
```python
from os.path import join
from typing import Any, Dict, List
import numpy as np
from config import DATA_DIR, KFOLD
from experiments.datadef.zoo import register_datadef
from modapt.dataset.data_sample import DataSample
from modapt.dataset.dataset_def import DatasetDefinition
from modapt.utils import load_json
from tqdm import tqdm
ISSUES = [
"climate",
"deathpenalty",
"guncontrol",
"immigration",
# "police", # FIXME
"samesex",
"tobacco",
]
ISSUE2IIDX = {issue: i for i, issue in enumerate(ISSUES)}
PRIMARY_FRAME_NAMES = [
"Economic",
"Capacity and Resources",
"Morality",
"Fairness and Equality",
"Legality, Constitutionality, Jurisdiction",
"Policy Prescription and Evaluation",
"Crime and Punishment",
"Security and Defense",
"Health and Safety",
"Quality of Life",
"Cultural Identity",
"Public Sentiment",
"Political",
"External Regulation and Reputation",
"Other",
]
def primary_frame_code_to_fidx(frame_float: float) -> int:
# see codes.json, non null frames are [1.?, 15.?], map them to [0, 14]
assert frame_float != 0
assert frame_float < 16
return int(frame_float) - 1
PRIMARY_TONE_NAMES = [
"Pro",
"Neutral",
"Anti",
]
def primary_tone_code_to_yidx(tone_float: float) -> int:
assert tone_float >= 17 and tone_float < 20
return int(tone_float) - 17
def code_to_yidx(code: float, task: str) -> int:
if task == "primary_frame":
return primary_frame_code_to_fidx(code)
elif task == "primary_tone":
return primary_tone_code_to_yidx(code)
else:
raise NotImplementedError()
def remove_framing_text_headings(text):
lines = text.split("\n\n")
lines = lines[3:] # first 3 lines are id, "PRIMARY", title
text = "\n".join(lines)
return text
def load_all_framing_samples(
issues: List[str], split: str, task: str
) -> List[DataSample]:
assert split in ["train", "test"]
samples = []
for issue in tqdm(issues):
ids = load_json(
join(DATA_DIR, "framing_labeled", f"{issue}_{split}_sets.json")
)[task]
raw_data = load_json(join(DATA_DIR, "framing_labeled", f"{issue}_labeled.json"))
for id in ids:
samples.append(
DataSample(
id=id,
text=remove_framing_text_headings(raw_data[id]["text"]),
y_idx=code_to_yidx(raw_data[id][task], task),
domain_name=issue,
domain_idx=ISSUE2IIDX[issue],
)
)
return samples
def load_kfold_framing_samples(
issues: List[str], task: str
) -> List[Dict[str, List[DataSample]]]:
kidx2split2samples = [{"train": [], "valid": []} for _ in range(KFOLD)]
samples = load_all_framing_samples(issues, split="train", task=task)
for issue in tqdm(issues):
kfold_data = load_json(
join(DATA_DIR, "framing_labeled", f"{KFOLD}fold", f"{issue}.json")
)
for kidx, fold in enumerate(kfold_data[task]):
for split in ["train", "valid"]:
ids = set(fold[split])
selected_samples = [s for s in samples if s.id in ids]
kidx2split2samples[kidx][split].extend(selected_samples)
return kidx2split2samples
def load_splits(
issues: List[str], splits: List[str], task: str
) -> Dict[str, List[DataSample]]:
ret = {}
if "valid" in splits:
split2samples = load_kfold_framing_samples(issues, task)[0]
ret["train"] = split2samples["train"]
ret["valid"] = split2samples["valid"]
else:
ret["train"] = load_all_framing_samples(issues, "train", task)
if "test" in splits:
ret["test"] = load_all_framing_samples(issues, "test", task)
ret = {k: v for k, v in ret.items() if k in splits}
return ret
_LABELPROPS_DIR = join(DATA_DIR, "framing_labeled", "labelprops")
def load_labelprops(split, task):
if split == "valid":
split = "train" # kfold valid and train are the same set
return {
issue: np.array(labelprops)
for issue, labelprops in load_json(
join(_LABELPROPS_DIR, f"{task}.{split}.json")
).items()
}
register_datadef(
"framing",
DatasetDefinition(
domain_names=ISSUES,
label_names=PRIMARY_FRAME_NAMES,
load_splits_func=lambda issues, splits: load_splits(
issues,
splits,
"primary_frame",
),
load_labelprops_func=lambda splits: load_labelprops(splits, "primary_frame"),
),
)
register_datadef(
"framing_tone",
DatasetDefinition(
domain_names=ISSUES,
label_names=PRIMARY_TONE_NAMES,
load_splits_func=lambda issues, splits: load_splits(
issues,
splits,
"primary_tone",
),
load_labelprops_func=lambda splits: load_labelprops(splits, "primary_tone"),
),
)
```
#### File: model/logreg_config/grid_search.py
```python
import itertools
from .base import load_logreg_model_config
_LOGREG_ARCH_PREFIX = "logreg"
_CONFIG_OVERRIDES = [
("+sn", "use_source_individual_norm"),
("+kb", "use_log_labelprop_bias"),
("+lr", "use_learned_residualization"),
("+gr", "use_gradient_reversal"),
]
# tbh these are the only ones that make sense
_ARCH_FILTER = [
"logreg",
"logreg+kb",
"logreg+sn+kb",
"logreg+gr",
"logreg+lr",
"logreg+sn",
"logreg+sn+gr",
"logreg+sn+lr",
]
def load_logreg_model_config_all_archs(n_classes, n_sources):
base_config = load_logreg_model_config(_LOGREG_ARCH_PREFIX, n_classes, n_sources)
arch2configs = {}
combinations = itertools.product([False, True], repeat=len(_CONFIG_OVERRIDES))
for comb in combinations:
arch = _LOGREG_ARCH_PREFIX
config_copy = {**base_config}
for (prefix, key), value in zip(_CONFIG_OVERRIDES, comb):
if value:
arch += prefix
config_copy[key] = value
arch2configs[arch] = config_copy
filtered = {arch: arch2configs[arch] for arch in _ARCH_FILTER}
return filtered
```
#### File: modapt/model/model_utils.py
```python
import torch.nn as nn
from torch.autograd import Function
class RevGrad(Function):
"""
https://github.com/rpryzant/deconfounded-lexicon-induction
"""
@staticmethod
def forward(ctx, input_):
ctx.save_for_backward(input_)
output = input_
return output
@staticmethod
def backward(ctx, grad_output): # pragma: no cover
grad_input = None
if ctx.needs_input_grad[0]:
grad_input = -grad_output
return grad_input
class ReversalLayer(nn.Module):
def __init__(self):
"""
A gradient reversal layer.
This layer has no parameters, and simply reverses the gradient
in the backward pass.
https://github.com/rpryzant/deconfounded-lexicon-induction
"""
super().__init__()
def forward(self, input_):
return RevGrad.apply(input_)
``` |
{
"source": "jkveltri/midstreamMediaRedirection1",
"score": 3
} |
#### File: jkveltri/midstreamMediaRedirection1/server.py
```python
import os
import sys
import re
import datetime
from random import randint
try:
from SimpleHTTPServer import SimpleHTTPRequestHandler as Handler
from SocketServer import TCPServer as Server
from SocketServer import ThreadingMixIn
except ImportError:
from http.server import SimpleHTTPRequestHandler as Handler
from http.server import HTTPServer as Server
from socketserver import ThreadingMixIn
class ThreadingServer(ThreadingMixIn, Server):
pass
class StateTracker(object):
def __init__(self, logPath):
self.stateTable = {}
self.logPath = os.path.abspath(logPath)
log = open(self.logPath, 'w')
log.write("loadId,resReqNum,returnCode,url,date,time,dest_host,client_ip,client_port\n")
log.close()
def addRequestHistoryEvent(self, loadId, return_code, url, dest_host, client_addr):
resReqNum = self.numRequestsForUrl(loadId, url)+1
timestamp = datetime.datetime.now()
stateEntry = self.stateTable.get(loadId)
if not stateEntry:
stateEntry = {}
historyEntry = stateEntry.get(url)
if not historyEntry:
historyEntry = []
historyEntry.append({'return_code':return_code, 'timestamp':timestamp, 'dest_host':dest_host, 'client_addr':client_addr})
stateEntry[url] = historyEntry
self.stateTable[loadId] = stateEntry
log = open(self.logPath, 'a+')
log.write("%s,%d,%d,%s,%s,%s,%s,%s,%d\n" % (loadId,resReqNum,return_code,url,timestamp.date(),timestamp.time(),dest_host,client_addr[0],client_addr[1]))
log.close()
def numRequestsForUrl(self, loadId, url):
stateEntry = self.stateTable.get(loadId)
if not stateEntry:
return 0
historyEntry = stateEntry.get(url)
if not historyEntry:
return 0
return sum(1 if request['return_code'] == 200 else 0 for request in historyEntry)
counter = 0
class CustomHandler(Handler):
protocol_version = 'HTTP/1.0'
# Initialize the state tracker
#logFile = 'static/logs/log_' + datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S') + '.csv'
logFile = 'static/logs/log.csv'
stateTracker = StateTracker(logFile)
def __init__(self, req, client_addr, server):
Handler.__init__(self, req, client_addr, server)
def respond200(self, filePath, cookie=None):
try:
# Guess the MIME type of the requested file
mime = self.guess_type(filePath)
# Read the file
file = open(filePath, 'rb')
content = file.read()
file.close()
# Send response
content_length = len(content)
self.send_response(200)
self.send_header('Content-Type', mime)
self.send_header('Access-Control-Allow-Origin','https://mmr.mybluemix.net')
self.send_header('Content-Length', str(content_length))
self.send_header('Cache-Control', 'no-cache, no-store, must-revalidate')
self.end_headers()
self.wfile.write(content)
return 200
except IOError:
print(sys.exc_info()[0])
return self.respond400(404)
def respond303(self, location):
self.send_response(303)
self.send_header('Location', location)
self.send_header('Access-Control-Allow-Origin','*')
self.send_header('Content-Length', 0)
self.send_header('Cache-Control', 'no-cache, no-store, must-revalidate')
self.end_headers()
return 303
def respond400(self, code=400):
code = code if (code >= 400 and code <= 499) else 400
self.send_response(code)
self.send_header('Content-Length', 0)
self.end_headers()
return code
def separateQueryString(self, url):
if '?' not in url:
return url, None
filePath = None
queryString = None
matches = re.search('(.+)\?(.+)', url)
if matches:
filePath = matches.group(1)
queryString = matches.group(2)
return filePath, queryString
def extractQueryParamValue(self, queryString, paramName):
value = None
reString = '.*' + re.escape(str(paramName)) + '=(\w+)\,?.+'
matches = re.search(reString, queryString)
if matches:
value = matches.group(1)
return value
def lookForLoadId(self):
loadId = None
self.path, queryString = self.separateQueryString(self.path)
if(queryString):
loadId = self.extractQueryParamValue(queryString, 'loadId')
if 'Referer' in self.headers and not loadId:
refererPath, refererQueryString = self.separateQueryString(self.headers['Referer'])
if refererQueryString:
loadId = self.extractQueryParamValue(refererQueryString, 'loadId')
return loadId
def parseHostHeader(self):
subhost = None
superhost = None
hostMatches = re.search('(mmr[\d]*)?\.?(\w+\.\w+[\d\:]*)', self.headers['Host'])
if hostMatches:
subhost = hostMatches.group(1)
superhost = hostMatches.group(2)
if not subhost:
subhost = ''
return subhost,superhost
def do_GET(self):
# Look for a load id in the query string or referer
loadId = self.lookForLoadId()
# Use http in localhost test mode, https on server
devMode = 'localdevexample' in self.headers['Host'] # Edit hosts file to route localdevexample.com to 127.0.0.1
httpType = 'https' if not devMode else 'http'
# Parse the Host header
subhost, superhost = self.parseHostHeader()
"""print('--------------------')
print(self.client_address)
print(self.headers)
print(self.path)
print(queryString)
print(loadId)
print('Subhost: ' + subhost)
print('Superhost: ' + superhost)"""
returnCode = None
# Serve index file if nothing specific requested
if(self.path == '/'):
self.path += 'index.html'
# Only the html, css, and js can be loaded without a loadId (no manifest or segments)
loadIdNotRequiredExtensions = ['.html','.js','.css','.csv']
for extension in loadIdNotRequiredExtensions:
if(self.path[-1*len(extension):] == extension):
# Assign a loadId by redirecting loads of the html pages
if(self.path.endswith('.html') and not self.path.endswith('index.html') and not loadId):
returnCode = self.respond303(self.path + '?loadId=' + str(randint(1,1000000000)))
return
# Otherwise serve the file
returnCode = self.respond200(self.path[1:])
return
# All other files require a loadId
if(not loadId):
print('Request requires a loadId, but one was not supplied')
returnCode = self.respond400(403)
return
# Determine the experiment type (vod, live, or domainName) associated with the request
manifestTypes = ['vod','live','domainName']
manifestType = None
typeMatches = re.search('\/(\w+)\/[\w\/]+.\w+', self.path);
if(typeMatches and typeMatches.group(1) in manifestTypes):
manifestType = typeMatches.group(1)
else:
returnCode = self.respond400()
return
# For .ts files, determine the segment number being requested
segPrefix = None
segNum = None
if(self.path[-3:] == '.ts'):
segMatches = re.search('.+\/([a-z,A-Z,_,-]+)(\d+).ts', self.path)
if(segMatches):
segPrefix = segMatches.group(1)
segNum = segMatches.group(2)
else:
returnCode = self.respond400()
return
# Handle live-style manifests redirects
if(manifestType == 'live'):
if(self.path[-5:] == '.m3u8'):
manifestRequestNum = CustomHandler.stateTracker.numRequestsForUrl(loadId, self.path)+1
if not('mmr' + str(manifestRequestNum) == subhost):
if(manifestRequestNum == 1 or 'mmr' + str(manifestRequestNum-1) == subhost):
redirectAddr = httpType + '://mmr' + str(manifestRequestNum) + '.' + superhost + self.path
returnCode = self.respond303(redirectAddr)
else:
returnCode = self.respond400(404)
else:
newPath = '/' + manifestType + '/' + manifestType + str(manifestRequestNum) + '.m3u8'
returnCode = self.respond200(newPath[1:])
elif(self.path[-3:] == '.ts'):
genericManifestPath = '/' + manifestType + '/' + manifestType + '.m3u8'
manifestRequestNum = CustomHandler.stateTracker.numRequestsForUrl(loadId, genericManifestPath)
currManifestPath = manifestType + '/' + manifestType + str(manifestRequestNum) + '.m3u8'
manifestFile = open(currManifestPath, 'r')
manifestContent = manifestFile.read()
manifestFile.close()
subhostNum = 0 if len('mmr') >= len(subhost) else int(subhost[len('mmr'):])
print(subhostNum)
if(self.path[len(manifestType)+2:] in manifestContent and subhostNum <= manifestRequestNum and subhostNum > 0):
print('respond 200')
returnCode = self.respond200(self.path[1:])
else:
print('respond 404')
returnCode = self.respond400(404)
# Handle VOD or domainName style domain-prefix segment redirects
elif(manifestType == 'vod' or manifestType == 'domainName'):
if(self.path[-3:] == '.ts'):
if not(subhost == 'mmr' + segNum):
returnCode = self.respond303(httpType + '://mmr' + str(segNum) + '.' + superhost + self.path)
else:
returnCode = self.respond200(self.path[1:])
# Serve the file normally
if not returnCode:
returnCode = self.respond200(self.path[1:])
# Log the request
CustomHandler.stateTracker.addRequestHistoryEvent(loadId, returnCode, self.path, self.headers['Host'], self.client_address)
# Read port selected by the cloud for our application
PORT = int(os.getenv('PORT', 8000))
# Change current directory to avoid exposure of control files
os.chdir('static')
httpd = ThreadingServer(("", PORT), CustomHandler)
httpd.timeout = 10 # Set HTTP Keep-Alive timeout
try:
print("Start serving at port %i" % PORT)
httpd.serve_forever()
except KeyboardInterrupt:
pass
httpd.server_close()
``` |
{
"source": "jkverma11/OfflineSignatureVerification",
"score": 3
} |
#### File: jkverma11/OfflineSignatureVerification/main-3.py
```python
import keras
import os, random
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.metrics import log_loss
from sklearn.preprocessing import LabelEncoder
from matplotlib import ticker
from sklearn.model_selection import train_test_split
from keras.models import Sequential
from keras.layers import Dropout, Flatten, Conv2D, MaxPooling2D, ZeroPadding2D, Dense, Activation
from keras.optimizers import SGD, Adagrad
from keras.callbacks import EarlyStopping
from keras.utils import np_utils
from keras.optimizers import RMSprop, Adam
from keras import backend as K
from sklearn.model_selection import train_test_split
from keras.optimizers import SGD
from keras.regularizers import l2
from sklearn import metrics
def root_mean_squared_error(y_true, y_pred):
"""
RMSE loss function
"""
return K.sqrt(K.mean(K.square(y_pred - y_true), axis=-1))
def get_images(fish):
"""Load files from train folder"""
fish_dir = TRAIN_DIR+'{}'.format(fish)
images = [fish+'/'+im for im in os.listdir(fish_dir)]
return images
def read_image(src):
import os
from scipy import misc
filepath=src
im=misc.imread(filepath)
import scipy.misc as mc
return mc.imresize(im,(ROWS,COLS))
TEST_DIR= os.getcwd() + '/data/test/'
TRAIN_DIR= os.getcwd() + '/data/train/'
#SIGNATURE_CLASSES = ['A', 'B', 'C','D','E','F','G','H','T','U','Y','Z']
SIGNATURE_CLASSES = []
for x in range(1,138):
SIGNATURE_CLASSES.append(str(x))
#print (SIGNATURE_CLASSES)
ROWS = 256
COLS = 128
CHANNELS = 3
files = []
y_all = []
for fish in SIGNATURE_CLASSES:
fish_files = get_images(fish)
files.extend(fish_files)
y_fish = np.tile(fish, len(fish_files))
y_all.extend(y_fish)
print("{0} photos of {1}".format(len(fish_files), fish))
y_all = np.array(y_all)
print(len(files))
print(len(y_all))
X_all = np.ndarray((len(files), ROWS, COLS, CHANNELS), dtype=np.uint8)
for i, im in enumerate(files):
X_all[i] = read_image(TRAIN_DIR+im)
if i%1000 == 0: print('Processed {} of {}'.format(i, len(files)))
print(X_all.shape)
# One Hot Encoding Labels
y_all = LabelEncoder().fit_transform(y_all)
y_all = np_utils.to_categorical(y_all)
train_x, valid_x, train_y, valid_y = train_test_split(X_all, y_all,
test_size=0.2, random_state=23,
stratify=y_all)
train_x, test_x, train_y, test_y = train_test_split(train_x, train_y,
test_size=0.2, random_state=20, stratify=train_y)
# to run this code, you'll need to load the following data:
# train_x, train_y
# valid_x, valid_y
# test_x, test_y
# see http://aqibsaeed.github.io/2016-09-24-urban-sound-classification-part-2/ for details
# data dimension parameters
frames = 128
bands = 256
num_channels = 3
num_labels = test_y.shape[1]
print ("Num Lables:" + str(num_labels))
# this model implements the 5 layer CNN described in https://arxiv.org/pdf/1608.04363.pdf
# be aware, there are 2 main differences:
# the input is 60x41 data frames with 2 channels => (60,41,2) tensors
# the paper seems to report using 128x128 data frames (with no mention of channels)
# the paper also uses a receptive field size of 5x5 - as our input is smaller, I'm using 3x3
f_size = 5
model = Sequential()
# Layer 1 - 24 filters with a receptive field of (f,f), i.e. W has the shape (24,1,f,f).
# This is followed by (4,2) max-pooling over the last two dimensions and a ReLU activation function.
model.add(Conv2D(96, f_size, f_size, border_mode='same', input_shape=(bands, frames, num_channels)))
model.add(MaxPooling2D(pool_size=(4, 2)))
model.add(Activation('relu'))
# Layer 2 - 48 filters with a receptive field of (f,f), i.e. W has the shape (48,24,f,f).
# Like L1 this is followed by (4,2) max-pooling and a ReLU activation function.
model.add(Conv2D(192, f_size, f_size, border_mode='same'))
model.add(MaxPooling2D(pool_size=(4, 2)))
model.add(Activation('relu'))
# Layer 3 - 48 filters with a receptive field of (f,f), i.e. W has the shape (48, 48, f, f).
# This is followed by a ReLU but no pooling.
model.add(Conv2D(192, f_size, f_size, border_mode='valid'))
model.add(Activation('relu'))
# flatten output into a single dimension, let Keras do shape inference
model.add(Flatten())
# Layer 4 - a fully connected NN layer of 64 hidden units, L2 penalty of 0.001
model.add(Dense(256, W_regularizer=l2(0.001)))
model.add(Activation('relu'))
model.add(Dropout(0.5))
# Layer 5 - an output layer with one output unit per class, with L2 penalty,
# followed by a softmax activation function
model.add(Dense(num_labels, W_regularizer=l2(0.001)))
model.add(Dropout(0.5))
model.add(Activation('softmax'))
# create a SGD optimiser
sgd = SGD(lr=0.001, momentum=0.0, decay=0.0, nesterov=False)
# create adam optimiser
adam = Adam(lr=0.0001)
# a stopping function should the validation loss stop improving
earlystop = EarlyStopping(monitor='val_loss', patience=1, verbose=0, mode='auto')
# compile and fit model, reduce epochs if you want a result faster
# the validation set is used to identify parameter settings (epoch) that achieves
# the highest classification accuracy
model.compile(loss='categorical_crossentropy', metrics=['accuracy'], optimizer=sgd)
#model.compile(loss=root_mean_squared_error, metrics=['accuracy'], optimizer=adam)
model.fit(train_x, train_y, validation_data=(valid_x, valid_y), callbacks=[earlystop], batch_size=96, nb_epoch=5)
# finally, evaluate the model using the withheld test dataset
# determine the ROC AUC score
y_prob = model.predict_proba(test_x, verbose=0)
y_pred = np_utils.probas_to_classes(y_prob)
y_true = np.argmax(test_y, 1)
roc = metrics.roc_auc_score(test_y, y_prob)
print ("ROC:", round(roc,3))
# determine the classification accuracy
score, accuracy = model.evaluate(test_x, test_y, batch_size=96)
print("\nAccuracy = {:.2f}".format(accuracy))
``` |
{
"source": "jkvoulgaridis/Advanced-Database-Systems",
"score": 3
} |
#### File: jkvoulgaridis/Advanced-Database-Systems/sql_q1.py
```python
from pyspark.sql import SparkSession
spark = SparkSession.builder.appName("q1-sql").getOrCreate()
def get_year(timestamp):
if timestamp == None:
return None
else:
return timestamp.year
movies = spark.read.format('csv'). \
options(header = 'false' , inferSchema='true'). \
load('hdfs://master:9000/data/movies.csv')
movies.registerTempTable('movies')
#spark.udf.register('year' , get_year)
sqlString = \
"SELECT " + \
"m._c1 as Title, 100*(m._c6-m._c5)/m._c5 as Earnings, EXTRACT(YEAR FROM m._c3) as Year " + \
"FROM movies as m "+\
"INNER JOIN " +\
"( " + \
"SELECT max(100*(_c6-_c5)/_c5) as Earn, EXTRACT(YEAR FROM _c3) as Year "+\
"from movies " + \
"WHERE _c3 is not null and _c6 is not null and _c5 is not null and _c5 != 0 "+\
"group by EXTRACT(YEAR FROM _c3) " +\
") as MaxProfit "+ \
"on MaxProfit.Earn = 100*(m._c6-m._c5)/m._c5 and MaxProfit.Year = EXTRACT(YEAR FROM m._c3) " +\
"order by EXTRACT(YEAR FROM m._c3) DESC"
res = spark.sql(sqlString)
res.show()
``` |
{
"source": "jkw552403/todoist-toggle",
"score": 2
} |
#### File: jkw552403/todoist-toggle/handle_task.py
```python
import sys
import click
from workflow.notify import notify
from common import (
create_todoist_sync_client,
create_toggl_client,
create_workflow,
get_todoist_state,
get_toggl_project_map,
)
from setting import TODOIST_API_TOKEN, TOGGL_API_TOKEN
log = None
wf = None
@click.option("--track", is_flag=True)
@click.option("--complete", is_flag=True)
@click.argument("task_id", type=int)
@click.command()
def cli(task_id, complete, track):
if track and complete:
raise ValueError("Only one of track and complete can be true.")
sync_client = create_todoist_sync_client(wf.settings[TODOIST_API_TOKEN])
todoist_state = get_todoist_state(wf, sync_client)
item = next(item for item in todoist_state["items"] if item["id"] == task_id)
if track:
# TODO
# Get project name and try to map it to Toggl project
todoist_project_name = next(
(
p["name"]
for p in todoist_state["projects"]
if p["id"] == item["project_id"]
),
None,
)
toggl_client = create_toggl_client(wf.settings[TOGGL_API_TOKEN])
toggl_project_map = get_toggl_project_map(wf, toggl_client)
# If project with the same name exists, set pid to this project ID
toggl_project_id = toggl_project_map.get(todoist_project_name)
# Start a new time entry
toggl_client.start_time_entry(item["content"], toggl_project_id)
# Update notify message to show tracking project
notify(
u"Start tracking",
"{} ({})".format(
item["content"],
todoist_project_name if toggl_project_id else "No Project",
),
)
if complete:
item.complete()
sync_client.commit()
notify(u"Complete task", item[u"content"])
def main(wf):
cli()
if __name__ == u"__main__":
wf = create_workflow()
log = wf.logger
sys.exit(wf.run(main))
```
#### File: jkw552403/todoist-toggle/today.py
```python
import sys
from datetime import datetime, timedelta
import click
from common import (
add_task_item,
create_todoist_sync_client,
create_workflow,
get_todoist_state,
parse_todoist_date,
)
from setting import TODOIST_API_TOKEN
log = None
wf = None
def overdue_today(due_dt):
if due_dt:
return due_dt < datetime.today().replace(
hour=0, minute=0, second=0, microsecond=0
) + timedelta(days=1)
return False
def due_today(due_dt):
if due_dt:
return due_dt.date() == datetime.today().date()
return False
@click.command()
def cli():
sync_client = create_todoist_sync_client(wf.settings[TODOIST_API_TOKEN])
todoist_state = get_todoist_state(wf, sync_client)
items = todoist_state["items"]
log.debug("There are {} tasks".format(len(items)))
task_data = []
for task in items:
due = task["due"]
due_date = due.get("date", None) if due else None
due_dt = parse_todoist_date(due_date) if due else None
if (
overdue_today(due_dt)
and task["is_deleted"] == 0
and task["date_completed"] is None
):
# Add values for sorting tasks
task_data.append(
(
due_today(due_dt), # due before today will be shown at the top
-len(
due_date
), # longer due date means the due date has not only date but time
due_dt,
task["priority"],
task["day_order"],
task,
)
)
log.debug("Get {} tasks in Today view".format(len(task_data)))
log.debug("Task data: {}".format(task_data))
task_data.sort()
for t in task_data:
add_task_item(wf, sync_client, t[-1])
wf.send_feedback()
def main(wf):
cli()
if __name__ == u"__main__":
wf = create_workflow()
log = wf.logger
sys.exit(wf.run(main))
``` |
{
"source": "JKWalleiee/CarND-Behavioral-Cloning-P3",
"score": 3
} |
#### File: JKWalleiee/CarND-Behavioral-Cloning-P3/cnn_models.py
```python
from keras.models import Sequential
from keras.layers import Flatten, Dense, Lambda, Convolution2D, Cropping2D
from keras.layers.pooling import MaxPooling2D
def createPreProcessingLayers():
# Creates/return the initial pre-processing layers.
model = Sequential()
model.add(Lambda(lambda x: (x / 255.0) - 0.5, input_shape=(160,320,3)))
model.add(Cropping2D(cropping=((50,20), (0,0))))
return model
def basic_model():
# Creates a simple linear regression model
model = Sequential()
model.add(Flatten(input_shape=(160,320,3)))
model.add(Dense(1))
return model
def leNetModel():
# Creates a LeNet model.
model = createPreProcessingLayers()
model.add(Convolution2D(6,5,5,activation='relu'))
model.add(MaxPooling2D())
model.add(Convolution2D(6,5,5,activation='relu'))
model.add(MaxPooling2D())
model.add(Flatten())
model.add(Dense(120))
model.add(Dense(84))
model.add(Dense(1))
return model
def nVidiaModel():
# Creates a nVidea Autonomous vehicle team model
model = createPreProcessingLayers()
model.add(Convolution2D(24,5,5, subsample=(2,2), activation='relu'))
model.add(Convolution2D(36,5,5, subsample=(2,2), activation='relu'))
model.add(Convolution2D(48,5,5, subsample=(2,2), activation='relu'))
model.add(Convolution2D(64,3,3, activation='relu'))
model.add(Convolution2D(64,3,3, activation='relu'))
model.add(Flatten())
model.add(Dense(100))
model.add(Dense(50))
model.add(Dense(10))
model.add(Dense(1))
return model
```
#### File: JKWalleiee/CarND-Behavioral-Cloning-P3/model.py
```python
import cv2
import numpy as np
from pandas.io.parsers import read_csv
from cnn_models import basic_model, leNetModel, nVidiaModel
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
from keras.optimizers import Adam
from keras.callbacks import EarlyStopping, ModelCheckpoint
def image_brighten(image, base_change = 0.25):
# change the brightness of the image, using a (random)
# multiplication factor
hsv_image = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)
brightness = np.random.uniform(base_change, 1.0)
hsv_image[:, :, 2] = hsv_image[:, :, 2] * brightness
return cv2.cvtColor(hsv_image, cv2.COLOR_HSV2RGB)
def generator(data, image_path, batch_size=32):
# Generate the batches for training and validation
path = image_path
num_samples = len(data)
while 1:
shuffle(data)
for offset in range(0, num_samples, batch_size):
batch_samples = data[offset:offset+batch_size]
images = []
angles = []
for batch_sample in batch_samples:
angle = float(batch_sample[1])
name = image_path+(batch_sample[0].strip()).split('/')[-1]
image = cv2.imread(name)
image = cv2.cvtColor(image,cv2.COLOR_BGR2RGB)
images.append(image)
angles.append(angle)
#Flip the image to augment the data
# and avoid bias to clockwise/counterclockwise
flipped_image = cv2.flip(image, 1)
images.append(flipped_image)
angles.append(-angle)
# Augment the data for the classes where abs(angle)>0.5
if ( abs(angle) > 0.5 ):
# Change the brightness of the original image
augmented_image = image_brighten(image)
images.append(augmented_image)
angles.append(angle)
# Change the brightness of the flipped image
augmented_image = image_brighten(flipped_image)
images.append(augmented_image)
angles.append(-angle)
X_data = np.array(images)
y_data = np.array(angles)
yield shuffle(X_data, y_data)
# Read the dataset from the csv file
samples_folder = "data/"
#dataset_samples = read_csv(samples_folder+"driving_log.csv", header=0, usecols=[0,1,2,3,4,5,6]).values;
dataset_samples = read_csv("my_dataset.csv", header=0, usecols=[0,1]).values;
# Split the dataset in train and validation sets
shuffle(dataset_samples)
train_samples, validation_samples = train_test_split(dataset_samples, test_size=0.2)
print(train_samples.shape)
# Create the generators
train_generator = generator(train_samples, samples_folder+"IMG/")
validation_generator = generator(validation_samples, samples_folder+"IMG/")
# Creates and compiles the model
model = nVidiaModel()
model.compile(optimizer= 'adam', loss='mse', metrics=['acc'])
# Name of the model to save
file = 'model.h5'
# Stop training when "val_loss" quantity has stopped improving.
earlystopper = EarlyStopping(patience=5, verbose=1)
#Save the (best) model after every epoch
checkpointer = ModelCheckpoint(file, monitor='val_loss', verbose=1, save_best_only=True)
# Train the model
print("Trainning")
history_object = model.fit_generator(train_generator, samples_per_epoch = 2*len(train_samples),
validation_data = validation_generator,
nb_val_samples = 2*len(validation_samples), nb_epoch=1, verbose=1)
# Save the model
print("Saving model")
model.save(file)
print("Model Saved")
``` |
{
"source": "JKWalleiee/CarND-Capstone",
"score": 3
} |
#### File: src/twist_controller/twist_controller.py
```python
import rospy
from pid import PID
from lowpass import LowPassFilter
from yaw_controller import YawController
GAS_DENSITY = 2.858
ONE_MPH = 0.44704
class Controller(object):
def __init__(self, vehicle_mass, fuel_capacity, brake_deadband, decel_limit, accel_limit,
wheel_radius, wheel_base, steer_ratio, max_lat_accel, max_steer_angle, min_speed):
self.yaw_controller = YawController(
wheel_base=wheel_base,
steer_ratio=steer_ratio,
min_speed=min_speed,
max_lat_accel=max_lat_accel,
max_steer_angle=max_steer_angle)
self.brake_deadband = brake_deadband
self.vehicle_mass = vehicle_mass
#self.fuel_capacity = fuel_capacity
self.wheel_radius = wheel_radius
self.pid = PID(kp=0.8, ki=0.1, kd=0.0, mn=decel_limit, mx=0.2)
#self.pid = PID(kp=5, ki=0.5, kd=0.5, mn=decel_limit, mx=accel_limit)
#self.pid = PID(0.15, 0.0, 0.09, mn=decel_limit, mx=accel_limit)
self.s_lpf = LowPassFilter(tau = 3, ts = 1)
self.t_lpf = LowPassFilter(tau = 3, ts = 1)
self.vel_lpf = LowPassFilter(tau = 0.5, ts = 0.2)
def reset(self):
self.pid.reset()
self.s_lpf.reset()
self.t_lpf.reset()
self.vel_lpf.reset()
def control(self, current_velocity, dbw_enabled, linear_vel, ang_vel, del_time):
if not dbw_enabled:
self.reset()
return 0.,0.,0.
lin_vel = abs(linear_vel)
vel_err = lin_vel - current_velocity
next_steer = self.yaw_controller.get_steering(lin_vel, ang_vel, current_velocity)
next_steer = self.s_lpf.filt(next_steer)
acceleration = self.pid.step(vel_err, del_time)
acceleration = self.t_lpf.filt(acceleration)
if acceleration > 0.009:
throttle = acceleration
brake = 0.0
else:
throttle = 0.0
deceleration = -acceleration
if deceleration < self.brake_deadband:
deceleration = 0.0
brake = deceleration * (self.vehicle_mass* GAS_DENSITY) * self.wheel_radius
# Return throttle, brake, steer
return throttle, brake, next_steer
``` |
{
"source": "jkwang1992/rrdt",
"score": 3
} |
#### File: jkwang1992/rrdt/env.py
```python
import logging
import sys
from math import atan2, cos, sin
import pygame
from pygame.locals import *
from checkCollision import *
from helpers import *
LOGGER = logging.getLogger(__name__)
############################################################
class Env:
def __init__(self,
startPt=None,
goalPt=None,
**kwargs):
# initialize and prepare screen
self.args = MagicDict(kwargs)
self.img = pygame.image.load(self.args.image)
self.cc = CollisionChecker(self.img)
self.XDIM = self.img.get_width()
self.YDIM = self.img.get_height()
self.extra = 25
self.stats = Stats(showSampledPoint=self.args.showSampledPoint)
self.planner = self.args.planner
self.planner.args.env = self
self.pygame_init(kwargs['enable_pygame'])
##################################################
# Get starting and ending point
LOGGER.info('Select Starting Point and then Goal Point')
self.startPt = None
self.goalPt = None
while self.startPt is None or self.goalPt is None:
for e in pygame.event.get():
if e.type == MOUSEBUTTONDOWN:
mousePos = (int(e.pos[0] / self.args.scaling),
int(e.pos[1] / self.args.scaling))
if startPt is None:
if not self.collides(mousePos):
LOGGER.info(
('starting point set: ' + str(mousePos)))
startPt = mousePos
elif goalPt is None:
if not self.collides(mousePos):
LOGGER.info(('goal point set: ' + str(mousePos)))
goalPt = mousePos
elif e.type == QUIT or (e.type == KEYUP
and e.key == K_ESCAPE):
LOGGER.info("Leaving.")
return
# convert mouse pos to Node
if startPt is not None and self.startPt is None:
self.startPt = Node(startPt)
if goalPt is not None and self.goalPt is None:
self.goalPt = Node(goalPt)
self.update_screen(update_all=True)
self.planner.add_newnode(self.startPt)
##################################################
# calculate information regarding shortest path
self.c_min = dist(self.startPt.pos, self.goalPt.pos)
self.x_center = (self.startPt.pos[0] + self.goalPt.pos[0]) / 2, (
self.startPt.pos[1] + self.goalPt.pos[1]) / 2
dy = self.goalPt.pos[1] - self.startPt.pos[1]
dx = self.goalPt.pos[0] - self.startPt.pos[0]
self.angle = math.atan2(-dy, dx)
self.planner.init(
env=self,
XDIM=self.XDIM,
YDIM=self.YDIM,
startPt=self.startPt,
goalPt=self.goalPt,
**kwargs)
############################################################
def pygame_init(self, enable_pygame=True):
self.enable_pygame = enable_pygame
pygame.init()
self.fpsClock = pygame.time.Clock()
# self.fpsClock.tick(10)
self.fpsClock.tick(10000)
pygame.display.set_caption('RRTstar')
# screen.fill(white)
################################################################################
# text
pygame.font.init()
self.myfont = pygame.font.SysFont('Arial',
int(self.XDIM * 0.04 * self.args.scaling))
################################################################################
# main window
self.window = pygame.display.set_mode([
int(self.XDIM * self.args.scaling),
int((self.YDIM + self.extra) * self.args.scaling)
])
################################################################################
# background aka the room
self.background = pygame.Surface([self.XDIM, (self.YDIM + self.extra)])
self.background.blit(self.img, (0, 0))
# resize background to match windows
self.background = pygame.transform.scale(self.background, [
int(self.XDIM * self.args.scaling),
int((self.YDIM + self.extra) * self.args.scaling)
])
################################################################################
# path of RRT*
self.path_layers = pygame.Surface([
self.XDIM * self.args.scaling, (self.YDIM + self.extra) * self.args.scaling
])
self.path_layers.fill(Colour.ALPHA_CK)
self.path_layers.set_colorkey(Colour.ALPHA_CK)
################################################################################
# layers to store the solution path
self.solution_path_screen = pygame.Surface([
self.XDIM * self.args.scaling, (self.YDIM + self.extra) * self.args.scaling
])
self.solution_path_screen.fill(Colour.ALPHA_CK)
self.solution_path_screen.set_colorkey(Colour.ALPHA_CK)
################################################################################
# layers to store the sampled points
self.sampledPoint_screen = pygame.Surface([
self.XDIM * self.args.scaling, (self.YDIM + self.extra) * self.args.scaling
])
self.sampledPoint_screen.fill(Colour.ALPHA_CK)
self.sampledPoint_screen.set_colorkey(Colour.ALPHA_CK)
################################################################################
if not self.enable_pygame:
self.pygame_hide()
def pygame_show(self):
self.enable_pygame = True
def pygame_hide(self):
self.enable_pygame = False
pygame.display.iconify()
# pygame.quit()
def collides(self, p):
"""check if point is white (which means free space)"""
x = int(p[0])
y = int(p[1])
# make sure x and y is within image boundary
if (x < 0 or x >= self.img.get_width() or y < 0
or y >= self.img.get_height()):
return True
color = self.img.get_at((x, y))
pointIsObstacle = (color != pygame.Color(*Colour.white))
return pointIsObstacle
def step_from_to(self, p1, p2):
"""Get a new point from p1 to p2, according to step size."""
if self.args.ignore_step_size:
return p2
if dist(p1, p2) < self.args.epsilon:
return p2
else:
theta = atan2(p2[1] - p1[1], p2[0] - p1[0])
pos = p1[0] + self.args.epsilon * cos(
theta), p1[1] + self.args.epsilon * sin(theta)
return pos
def run(self):
"""Run until we reached the specified max nodes"""
while self.stats.valid_sample < self.args.max_number_nodes:
self.process_pygame_event()
self.update_screen()
self.planner.run_once()
self.planner.terminates_hook()
@check_pygame_enabled
def draw_path(self,
node1,
node2,
colour=Colour.path_blue,
line_modifier=1,
layer=None):
if layer is None:
layer = self.path_layers
pygame.draw.line(layer, colour, node1.pos * self.args.scaling,
node2.pos * self.args.scaling,
int(line_modifier * self.args.scaling))
@check_pygame_enabled
def draw_circle(self, pos, colour, radius, layer):
draw_pos = int(pos[0] * self.args.scaling), int(pos[1] * self.args.scaling)
pygame.draw.circle(layer, colour, draw_pos, int(radius * self.args.scaling))
@check_pygame_enabled
def process_pygame_event(self):
for e in pygame.event.get():
if e.type == QUIT or (e.type == KEYUP and e.key == K_ESCAPE):
LOGGER.info("Leaving.")
sys.exit(0)
@check_pygame_enabled
def wait_for_exit(self):
while True:
self.process_pygame_event()
############################################################
## DRAWING RELATED ##
############################################################
@check_pygame_enabled
def update_screen(self, update_all=False):
if 'refresh_cnt' not in self.__dict__:
# INIT (this section will only run when this function is first called)
self.refresh_cnt = 0
if update_all or self.args.always_refresh:
count = 0 #FORCE UPDATE
else:
count = self.refresh_cnt
self.refresh_cnt += 1
###################################################################################
def draw_start_goal_pt():
if self.startPt is not None:
self.draw_circle(
pos=self.startPt.pos,
colour=Colour.red,
radius=self.args.goal_radius,
layer=self.path_layers)
if self.goalPt is not None:
self.draw_circle(
pos=self.goalPt.pos,
colour=Colour.green,
radius=self.args.goal_radius,
layer=self.path_layers)
# limites the screen update
if count % 20 == 0:
self.window.blit(self.background, (0, 0))
if count % 60 == 0:
try:
self.planner.paint()
except AttributeError:
# raise
pass
draw_start_goal_pt()
##### Tree paths
if count % 20 == 0:
self.window.blit(self.path_layers, (0, 0))
self.window.blit(self.solution_path_screen, (0, 0))
draw_start_goal_pt()
##### Sampler hook
if count % 20 == 0:
try:
self.args.sampler.paint(self.window)
except AttributeError:
pass
##### Sampled points
if count % 4 == 0:
self.sampledPoint_screen.fill(Colour.ALPHA_CK)
# Draw sampled nodes
for sampledPos in self.stats.sampledNodes:
self.draw_circle(
pos=sampledPos,
colour=Colour.red,
radius=2,
layer=self.sampledPoint_screen)
self.window.blit(self.sampledPoint_screen, (0, 0))
# remove them from list
del self.stats.sampledNodes[:]
##### Texts
if count % 10 == 0:
_cost = 'INF' if self.planner.c_max == float('inf') else round(
self.planner.c_max, 2)
if 'RRdTSampler' in self.args.sampler.__str__() and count > 0:
num_nodes = sum(
len(tree.nodes) for tree in (
*self.args.sampler.tree_manager.disjointedTrees,
self.args.sampler.tree_manager.root))
else:
num_nodes = len(self.planner.nodes)
# text = 'Cost_min: {} | Nodes: {}'.format(_cost, num_nodes)
# self.window.blit(self.myfont.render(text, False, Colour.black, Colour.white), (20,self.YDIM * self.args.scaling * 0.88))
text = 'Cost: {} | Inv.Samples: {}(con) {}(obs)'.format(
_cost, self.stats.invalid_samples_connections,
self.stats.invalid_samples_obstacles)
self.window.blit(
self.myfont.render(text, False, Colour.white, Colour.black),
(10, (self.YDIM + self.extra) * self.args.scaling * 0.95))
pygame.display.update()
```
#### File: rrdt/planners/birrtPlanner.py
```python
import random
import numpy as np
from overrides import overrides
from env import Node
from planners.baseSampler import Sampler
from planners.randomPolicySampler import RandomPolicySampler
from planners.rrtPlanner import RRTPlanner
class BiRRTSampler(Sampler):
@overrides
def init(self, **kwargs):
super().init(**kwargs)
self.randomSampler = RandomPolicySampler()
self.randomSampler.init(**kwargs)
@overrides
def get_next_pos(self):
# Random path
while True:
if random.random() < self.args.goalBias:
# init/goal bias
if self.args.planner.goal_tree_turn:
p = self.start_pos
else:
p = self.goal_pos
else:
p = self.randomSampler.get_next_pos()[0]
return p, self.report_success, self.report_fail
class BiRRTPlanner(RRTPlanner):
@overrides
def init(self, *argv, **kwargs):
super().init(*argv, **kwargs)
self.goal_tree_nodes = []
self.goal_tree_poses = np.empty((self.args.max_number_nodes + 50,
2)) # +50 to prevent over flow
self.goal_tree_nodes.append(self.args.env.goalPt)
self.goal_tree_poses[0] = self.args.env.goalPt.pos
self.found_solution = False
self.goal_tree_turn = False
@overrides
def run_once(self):
if self.goal_tree_turn and not self.found_solution:
# extend from goal tree
poses = self.goal_tree_poses
nodes = self.goal_tree_nodes
else:
# extend from init tree
poses = self.poses
nodes = self.nodes
self.goal_tree_turn = not self.goal_tree_turn
# check two tree join together
###################################################################
###################################################################
# Get an sample that is free (not in blocked space)
rand_pos, _, _ = self.args.sampler.get_valid_next_pos()
# Found a node that is not in X_obs
idx = self.find_nearest_neighbour_idx(rand_pos, poses[:len(nodes)])
nn = nodes[idx]
# get an intermediate node according to step-size
newpos = self.args.env.step_from_to(nn.pos, rand_pos)
# check if it has a free path to nn or not
if not self.args.env.cc.path_is_free(nn.pos, newpos):
self.args.env.stats.add_invalid(obs=False)
else:
newnode = Node(newpos)
self.args.env.stats.add_free()
######################
newnode, nn = self.choose_least_cost_parent(
newnode, nn, nodes=nodes)
poses[len(nodes)] = newnode.pos
nodes.append(newnode)
# rewire to see what the newly added node can do for us
self.rewire(newnode, nodes)
self.args.env.draw_path(nn, newnode)
###################################################################
# check if two tree joins
if not self.found_solution:
if nodes is self.nodes:
other_poses = self.goal_tree_poses
other_nodes = self.goal_tree_nodes
else:
other_poses = self.poses
other_nodes = self.nodes
distances = np.linalg.norm(
other_poses[:len(self.nodes)] - newpos, axis=1)
if min(distances) < self.args.epsilon:
idx = np.argmin(distances)
if self.args.env.cc.path_is_free(other_poses[idx], newpos):
self.found_solution = True
# get the two closest nodes
if nodes is self.nodes:
init_tree_node = newnode
goal_tree_node = other_nodes[idx]
else:
init_tree_node = other_nodes[idx]
goal_tree_node = newnode
_nextnode = goal_tree_node # keep track of original parent
_old_parent = _nextnode.parent
# trees joined! Flip all the parent as child
nn = init_tree_node
assert init_tree_node in self.nodes
assert goal_tree_node in self.goal_tree_nodes
to_be_removed = []
while _old_parent is not None:
_old_parent = _nextnode.parent
_nextnode, nn = self.choose_least_cost_parent(
_nextnode, nn=nn, nodes=self.nodes)
self.rewire(_nextnode, nodes=self.nodes)
self.poses[len(self.nodes)] = _nextnode.pos
self.nodes.append(_nextnode)
to_be_removed.append(_nextnode)
nn = _nextnode
_nextnode = _old_parent
if self.goalPt.parent is not None:
if self.goalPt.parent.cost < self.c_max:
self.c_max = self.goalPt.parent.cost
self.draw_solution_path()
@overrides
def paint(self):
drawn_nodes_pairs = set()
for nodes in (self.nodes, self.goal_tree_nodes):
for n in nodes:
if n.parent is not None:
new_set = frozenset({n, n.parent})
if new_set not in drawn_nodes_pairs:
drawn_nodes_pairs.add(new_set)
self.args.env.draw_path(n, n.parent)
```
#### File: rrdt/planners/particleFilterSampler.py
```python
import logging
import math
import random
import numpy as np
import pygame
from overrides import overrides
from planners.baseSampler import Sampler
from planners.randomPolicySampler import RandomPolicySampler
from randomness import NormalRandomnessManager
"""
IDEAS / TODOS:
- Physics engine to bounce off wall
- ✔ Bias toward goalPt
- ✔ Using von mises distribution
- Wall following?
- RESTART
- ✔ Random tree node restart (lowest energy)
- ✔ Random tree node restart (particles with energy < specified amount)
- ✔ Re-sampling according to particle weight/energy
- Random free space restart (NEED TO CONNECT DIFFERENT TREES TOGETHER)
- Keep structure of undelying map to restart
Use linear gradient
| /
| /
| /
| /
| /
| /
|/________________
Use a factor of diff
|- - - - - - - - -
| _____
| ____/
| ____/
| __/
| _/
| /
|/_______________________
"""
LOGGER = logging.getLogger(__name__)
ENERGY_MIN = 0
ENERGY_MAX = 10
ENERGY_START = 7
ENERGY_COLLISION_LOSS = 1
RANDOM_RESTART_PARTICLES_ENERGY_UNDER = 1.5
RANDOM_RESTART_EVERY = 30
RESAMPLE_RESTART_EVERY = 0 # 200
class ParticleManager:
def __init__(self, num_particles, startPt, goalPt, args):
self.num_particles = num_particles
self.init_energy()
self.particles = []
self.local_samplers_to_be_rstart = []
self.goalPt = goalPt
self.args = args
for _ in range(self.num_particles):
self.particles.append(
Particle(
direction=random.uniform(0, math.pi * 2),
pos=startPt))
def add_to_restart(self, lsampler):
if lsampler not in self.local_samplers_to_be_rstart:
self.local_samplers_to_be_rstart.append(lsampler)
def init_energy(self):
self.particles_energy = np.ones(self.num_particles)
self.particles_energy *= ENERGY_START
self.resync_prob()
def size(self):
return self.num_particles
def modify_energy(self, idx=None, particle_ref=None, factor=None, set_val=None):
# TODO: sometimes the keep tracking might go out of sync (and cause error in np.random.choice. Investigate this)
# keep track how much energy this operation would modify,
# so we can change the energy_sum accordingly
if idx is None:
# get idx from particle ref
try:
idx = self.particles.index(particle_ref)
except ValueError:
return
old_energy = self.particles_energy[idx]
if set_val is not None:
self.particles_energy[idx] = set_val
elif factor is not None:
if False:
# NOTE WE ARE NOT DOING THIS FOR NOW
self.particles_energy[idx] *= factor
else:
# TODO: mayne redo this nasty
factor -= 1
if factor > 0:
diff = ENERGY_MAX - self.particles_energy[idx]
self.particles_energy[idx] += diff*factor
elif factor < 0:
diff = self.particles_energy[idx] - ENERGY_MIN
self.particles_energy[idx] += diff*factor
self.particles_energy[idx] = max(0, self.particles_energy[idx] - ENERGY_COLLISION_LOSS)
else:
raise Exception("Nothing set in modify_energy")
delta = self.particles_energy[idx] - old_energy
self.cur_energy_sum += delta
def confirm(self, idx, pos):
self.particles[idx].confirm(pos)
def new_pos(self, idx, pos, dir):
return self.particles[idx].try_new_pos((pos[0], pos[1]), dir)
def get_pos(self, idx):
return self.particles[idx].pos
def get_dir(self, idx):
return self.particles[idx].direction
def get_prob(self):
return self.particles_energy / self.cur_energy_sum
def resync_prob(self):
self.cur_energy_sum = self.particles_energy.sum()
def random_restart_lowest(self):
"""
Restart the particle with the lowest energy.
"""
self.args.env.stats.lscampler_restart_counter += 1
min_idx = np.argmin(self.particles_energy)
p = self.particles_energy[min_idx]
randomPt = self.args.env.nodes[random.randint(0, len(self.args.env.nodes) - 1)].pos
self.particles[min_idx] = Particle(pos=randomPt)
self.modify_energy(min_idx, set_val=ENERGY_START)
return p
def random_restart_specific_value(self):
"""
Restart all the particles that has < energy
than a specified amount, to a random location
based on existing tree nodes.
"""
self.args.env.stats.lscampler_restart_counter += 1
tmp = []
for i in range(self.size()):
if self.particles_energy[i] < RANDOM_RESTART_PARTICLES_ENERGY_UNDER:
tmp.append(self.particles_energy[i])
randomPt = self.args.planner.nodes[random.randint(0, len(self.args.planner.nodes) - 1)].pos
self.particles[i] = Particle(pos=randomPt)
self.modify_energy(i, set_val=ENERGY_START)
return tmp
def new_pos_in_free_space(self):
"""Return a particle that is in free space (from map)"""
self.args.env.stats.lscampler_restart_counter += 1
while True:
new_p = random.random() * self.args.env.XDIM, random.random() * self.args.env.YDIM
self.args.env.stats.add_sampled_node(new_p)
if self.args.env.collides(new_p):
self.args.env.stats.add_invalid(obs=True)
else:
self.args.env.stats.add_free()
break
return new_p
def random_free_space_restart(self):
"""
Restart all the particles that has < energy
than a specified amount, to a random location
in the map that is free.
Might not work well for non-disjoint tree.
"""
tmp = []
for i in range(self.size()):
if self.particles_energy[i] < RANDOM_RESTART_PARTICLES_ENERGY_UNDER:
tmp.append(self.particles_energy[i])
randomPt = self.new_pos_in_free_space()
self.particles[i] = Particle(pos=randomPt)
self.modify_energy(i, set_val=ENERGY_START)
return tmp
def weighted_resampling(self):
"""
Resampling to the same amount of particles than it was,
based on the current particles' energy/weighting
"""
self.args.env.stats.lscampler_restart_counter += 1
prob = self.get_prob()
new_particles = []
for _ in range(self.size()):
choice = np.random.choice(range(self.size()), p=prob)
new_particles.append(Particle(pos=self.particles[choice].pos))
self.particles[:] = new_particles
self.init_energy()
############################################################
## Particles ##
############################################################
class Particle:
def __init__(self, direction=None, pos=None):
self.restart(direction=direction, pos=pos)
def restart(self, direction=None, pos=None):
if direction is None:
# I will generate one if you dont give me!
direction = random.uniform(0, math.pi * 2)
if pos is None:
# I cant really get started...can i?
raise Exception("No pos given")
# self.energy = 1
self.direction = direction
self.pos = np.copy(pos)
self._trying_this_pos = np.copy(pos)
self._trying_this_dir = None
def try_new_pos(self, new_pos, new_dir):
# pos is a 2 index list-type object
# Do nothing with new_pos for now as we confirm our final location via callback
#################################################################################
# new_dir is a scalar (for now TODO make it to general dimension later)
self._trying_this_dir = new_dir
def confirm(self, pos):
# to confirm the final location of newly added tree node
self.pos = pos
self.direction = self._trying_this_dir
############################################################
## Sampler ##
############################################################
class ParticleFilterSampler(Sampler):
@overrides
def __init__(self, supressVisitedArea=True):
self.supressVisitedArea = supressVisitedArea
self._last_prob = None
self.counter = 0
self._c_random = 0
self._c_resample = 0
@overrides
def init(self, **kwargs):
super().init(**kwargs)
# For benchmark stats tracking
self.args.env.stats.lscampler_restart_counter = 0
self.args.env.stats.lscampler_randomwalk_counter = 0
self.randomSampler = RandomPolicySampler()
self.randomSampler.init(**kwargs)
self.randomnessManager = NormalRandomnessManager()
# probability layer
self.particles_layer = pygame.Surface(
(self.args.XDIM * self.args.scaling, self.args.YDIM * self.args.scaling),
pygame.SRCALPHA)
self.p_manager = ParticleManager(num_particles=16,
startPt=self.start_pos,
goalPt=self.goal_pos,
args=self.args)
@overrides
def report_fail(self, idx, **kwargs):
if idx >= 0:
self.p_manager.modify_energy(idx=idx, factor=0.7)
@overrides
def report_success(self, idx, **kwargs):
self.p_manager.confirm(idx, kwargs['pos'])
self.p_manager.modify_energy(idx=idx, factor=1)
def randomWalk(self, idx):
self.args.env.stats.lscampler_randomwalk_counter +=1
# Randomly bias toward goal direction
if random.random() < self.args.goalBias:
dx = self.goal_pos[0] - self.p_manager.get_pos(idx)[0]
dy = self.goal_pos[1] - self.p_manager.get_pos(idx)[1]
goal_direction = math.atan2(dy, dx)
new_direction = self.randomnessManager.draw_normal(origin=goal_direction, kappa=1.5)
else:
new_direction = self.randomnessManager.draw_normal(origin=self.p_manager.get_dir(idx), kappa=1.5)
# scale the half norm by a factor of epsilon
# Using this: https://docs.scipy.org/doc/scipy-0.15.1/reference/generated/scipy.stats.halfnorm.html
# factor = self.randomnessManager.draw_half_normal(self.args.epsilon, scale=self.args.epsilon * 0.5)
factor = self.args.epsilon
x, y = self.p_manager.get_pos(idx)
x += math.cos(new_direction) * factor
y += math.sin(new_direction) * factor
self.p_manager.new_pos(idx=idx,
pos=(x, y),
dir=new_direction)
return (x, y)
def get_random_choice(self):
prob = self.p_manager.get_prob()
self._last_prob = prob # this will be used to paint particles
try:
choice = np.random.choice(range(self.p_manager.size()), p=prob)
except ValueError as e:
# NOTE dont know why the probability got out of sync... We notify the use, then try re-sync the prob
LOGGER.error("!! probability got exception '{}'... trying to re-sync prob again.".format(e))
self.p_manager.resync_prob()
prob = self.p_manager.get_prob()
self._last_prob = prob
choice = np.random.choice(range(self.p_manager.size()), p=prob)
return choice
@overrides
def get_next_pos(self):
self.counter += 1
self._c_random += 1
self._c_resample += 1
# if self._c_random > RANDOM_RESTART_EVERY and RANDOM_RESTART_EVERY > 0:
# _p = self.p_manager.random_restart_lowest()
# print("Rand restart at counter {}, with p {}".format(self.counter, _p))
# self._c_random = 0
if self._c_random > RANDOM_RESTART_EVERY > 0:
_p = self.p_manager.random_restart_specific_value()
if _p:
LOGGER.debug("Rand restart at counter {}, with p {}".format(self.counter, _p))
self._c_random = 0
self.p_manager.weighted_resampling()
LOGGER.debug("Resampling at counter {}".format(self.counter))
self._c_resample = 0
LOGGER.debug(self.p_manager.get_prob())
if random.random() < 0:
LOGGER.debug('rand')
p = self.randomSampler.get_next_pos()
choice = -1
else:
# get a node to random walk
choice = self.get_random_choice()
p = self.randomWalk(choice)
self.last_particle = p
return (p, lambda c=choice, **kwargs: self.report_success(c, **kwargs),
lambda c=choice, **kwargs: self.report_fail(c, **kwargs))
############################################################
## FOR PAINTING ##
############################################################
@staticmethod
def get_color_transists(value, max_prob, min_prob):
denominator = max_prob - min_prob
if denominator == 0:
denominator = 1 # prevent division by zero
return 220 - 180 * (1 - (value - min_prob) / denominator)
@overrides
def paint(self, window):
if self._last_prob is None:
return
max_num = self._last_prob.max()
min_num = self._last_prob.min()
for i, p in enumerate(self.p_manager.particles):
self.particles_layer.fill((255, 128, 255, 0))
# get a transition from green to red
c = self.get_color_transists(self._last_prob[i], max_num, min_num)
c = max(min(255, c), 50)
color = (c, c, 0)
self.args.env.draw_circle(pos=p.pos, colour=color, radius=4, layer=self.particles_layer)
window.blit(self.particles_layer, (0, 0))
##### Texts
# text = 'L.S.Walk:{}Res:{}'.format(self.rrt.stats.lscampler_randomwalk_counter, self.rrt.stats.lscampler_restart_counter)
# window.blit(self.rrt.myfont.render(text, False, Colour.black, Colour.white), (self.rrt.XDIM * self.rrt.SCALING * 0.5, self.rrt.YDIM * self.rrt.SCALING * 0.95))
```
#### File: rrdt/planners/rrtPlanner.py
```python
from checkCollision import *
from helpers import *
class RRTPlanner:
"""This planner is largely a RRT planner, though with extra features."""
def __init__(self, **kwargs):
self.args = MagicDict(kwargs)
self.poses = np.empty((self.args.max_number_nodes + 50,
2)) # +50 to prevent over flow
self.c_max = float('inf')
# this dict is to temparily store distance of a new node to all others
# so that in the next run other functions (eg choose_least_cost_parent and rwire)
# and take advantage to the already computed values
self._new_node_dist_to_all_others = {}
self.nodes = []
self.args.env = None # will be set by env itself
def init(self, *argv, **kwargs):
# self.args.env = kwargs['RRT']
self.args.sampler.init(*argv, **kwargs)
self.startPt = kwargs['startPt']
self.goalPt = kwargs['goalPt']
def run_once(self):
# Get an sample that is free (not in blocked space)
rand_pos, report_success, report_fail = self.args[
'sampler'].get_valid_next_pos()
# Found a node that is not in X_obs
idx = self.find_nearest_neighbour_idx(rand_pos,
self.poses[:len(self.nodes)])
nn = self.nodes[idx]
# get an intermediate node according to step-size
newpos = self.args.env.step_from_to(nn.pos, rand_pos)
# check if it has a free path to nn or not
if not self.args.env.cc.path_is_free(nn.pos, newpos):
self.args.env.stats.add_invalid(obs=False)
report_fail(pos=rand_pos, free=False)
else:
newnode = Node(newpos)
self.args.env.stats.add_free()
self.args.sampler.add_tree_node(newnode.pos)
report_success(pos=newnode.pos, nn=nn, rand_pos=rand_pos)
######################
newnode, nn = self.choose_least_cost_parent(
newnode, nn, nodes=self.nodes)
self.add_newnode(newnode)
# rewire to see what the newly added node can do for us
self.rewire(newnode, self.nodes)
self.args.env.draw_path(nn, newnode)
if dist(newnode.pos, self.goalPt.pos) < self.args.goal_radius:
if newnode.cost < self.c_max:
self.c_max = newnode.cost
self.goalPt.parent = newnode
newnode.children.append(self.goalPt.parent)
self.draw_solution_path()
def add_newnode(self, node):
self.poses[len(self.nodes)] = node.pos
self.nodes.append(node)
def choose_least_cost_parent(self, newnode, nn=None, nodes=None):
"""Given a new node, a node from root, return a node from root that
has the least cost (toward the newly added node)"""
if nn is not None:
_newnode_to_nn_cost = dist(newnode.pos, nn.pos)
self._new_node_dist_to_all_others = {}
for p in nodes:
_newnode_to_p_cost = dist(newnode.pos, p.pos)
self._new_node_dist_to_all_others[(newnode,
p)] = _newnode_to_p_cost
if _newnode_to_p_cost <= self.args[
'radius'] and self.args.env.cc.path_is_free(newnode.pos, p.pos):
# This is another valid parent. Check if it's better than our current one.
if nn is None or (p.cost + _newnode_to_p_cost <
nn.cost + _newnode_to_nn_cost):
nn = p
_newnode_to_nn_cost = _newnode_to_p_cost
if nn is None:
raise LookupError(
"ERROR: Provided nn=None, and cannot find any valid nn by this function. This newnode is not close to the root tree...?"
)
newnode.cost = nn.cost + dist(nn.pos, newnode.pos)
newnode.parent = nn
nn.children.append(newnode)
return newnode, nn
def rewire(self, newnode, nodes, already_rewired=None):
"""Reconsider parents of nodes that had change, so that the optimiality would change instantly"""
if len(nodes) < 1:
return
if already_rewired is None:
already_rewired = {newnode}
for n in (x for x in nodes if x not in already_rewired):
if len(already_rewired) <= 1:
_newnode_to_n_cost = self._new_node_dist_to_all_others[newnode,
n]
else:
_newnode_to_n_cost = dist(newnode.pos, n.pos)
if (n != newnode.parent
and _newnode_to_n_cost <= self.args.radius
and self.args.env.cc.path_is_free(n.pos, newnode.pos)
and newnode.cost + _newnode_to_n_cost < n.cost):
# draw over the old wire
self.args.env.draw_path(n, n.parent, Colour.white)
reconsider = (n.parent, *n.children)
n.parent.children.remove(n)
n.parent = newnode
newnode.children.append(n)
n.cost = newnode.cost + _newnode_to_n_cost
already_rewired.add(n)
self.args.env.draw_path(n, newnode, Colour.path_blue)
self.rewire(n, reconsider, already_rewired=already_rewired)
def terminates_hook(self):
"""For planner to process anything when planning terminates.
RRT does nothing."""
pass
@staticmethod
def find_nearest_neighbour_idx(pos, poses):
# Make use of numpy fast parallel operation to find all distance with one operation.
distances = np.linalg.norm(poses - pos, axis=1)
return np.argmin(distances)
############################################################
## DRAWING RELATED ##
############################################################
def paint(self):
# these had already been drawn
drawn_nodes_pairs = set()
self.args.env.path_layers.fill(Colour.ALPHA_CK)
# Draw path trees
for n in self.nodes:
if n.parent is not None:
new_set = frozenset({n, n.parent})
if new_set not in drawn_nodes_pairs:
drawn_nodes_pairs.add(new_set)
self.args.env.draw_path(n, n.parent)
self.draw_solution_path()
@check_pygame_enabled
def draw_solution_path(self):
if self.c_max == float('inf'):
# nothing to d
return
# redraw new path
self.args.env.solution_path_screen.fill(Colour.ALPHA_CK)
nn = self.goalPt.parent
self.c_max = nn.cost
while nn != self.startPt:
self.args.env.draw_path(
nn,
nn.parent,
colour=Colour.blue,
line_modifier=5,
layer=self.args.env.solution_path_screen)
nn = nn.parent
self.args.env.window.blit(self.args.env.path_layers, (0, 0))
self.args.env.window.blit(self.args.env.solution_path_screen, (0, 0))
``` |
{
"source": "jkwang1992/sbp-env",
"score": 2
} |
#### File: sbp-env/planners/birrtPlanner.py
```python
import numpy as np
from overrides import overrides
from env import Node
from planners.rrtPlanner import RRTPlanner
from utils import planner_registry
# noinspection PyAttributeOutsideInit
class BiRRTPlanner(RRTPlanner):
r"""The bidrectional RRT* planner, or sometimes it's also referred to as the
*RRT-Connect\**.
The class :class:`~planners.birrtPlanner.BiRRTPlanner` uses an adopted version of
random policy sampler that makes it suitable for using in both the start and goal
trees, which is implemented in :class:`~samplers.birrtSampler.BiRRTSampler`.
"""
@overrides
def init(self, **kwargs):
super().init(**kwargs)
self.goal_tree_nodes = []
self.goal_tree_poses = np.empty(
(
self.args.max_number_nodes * 2 + 50, # +50 to prevent over flow
kwargs["num_dim"],
)
)
self.goal_tree_nodes.append(self.args.env.goal_pt)
self.goal_tree_poses[0] = self.args.env.goal_pt.pos
self.found_solution = False
self.goal_tree_turn = False
@overrides
def run_once(self):
if self.goal_tree_turn and not self.found_solution:
# extend from goal tree
poses = self.goal_tree_poses
nodes = self.goal_tree_nodes
else:
# extend from init tree
poses = self.poses
nodes = self.nodes
self.goal_tree_turn = not self.goal_tree_turn
# check two tree join together
###################################################################
# Get an sample that is free (not in blocked space)
rand_pos, report_success, report_fail = self.args.sampler.get_valid_next_pos()
# Found a node that is not in X_obs
idx = self.find_nearest_neighbour_idx(rand_pos, poses[: len(nodes)])
nn = nodes[idx]
# get an intermediate node according to step-size
newpos = self.args.env.step_from_to(nn.pos, rand_pos)
# check if it has a free path to nn or not
if not self.args.env.cc.visible(nn.pos, newpos):
self.args.env.stats.add_invalid(obs=False)
report_fail(pos=rand_pos, free=False)
else:
newnode = Node(newpos)
self.args.env.stats.add_free()
report_success(pos=newnode.pos, nn=nn, rand_pos=rand_pos)
newnode, nn = self.choose_least_cost_parent(newnode, nn, nodes=nodes)
poses[len(nodes)] = newnode.pos
nodes.append(newnode)
# rewire to see what the newly added node can do for us
self.rewire(newnode, nodes)
###################################################################
# check if two tree joins
if not self.found_solution:
if nodes is self.nodes:
other_poses = self.goal_tree_poses
other_nodes = self.goal_tree_nodes
else:
other_poses = self.poses
other_nodes = self.nodes
distances = np.linalg.norm(
other_poses[: len(self.nodes)] - newpos, axis=1
)
if min(distances) < self.args.epsilon:
idx = np.argmin(distances)
if self.args.env.cc.visible(other_poses[idx], newpos):
self.found_solution = True
# get the two closest nodes
if nodes is self.nodes:
init_tree_node = newnode
goal_tree_node = other_nodes[idx]
else:
init_tree_node = other_nodes[idx]
goal_tree_node = newnode
_nextnode = goal_tree_node # keep track of original parent
_old_parent = _nextnode.parent
# trees joined! Flip all the parent as child
nn = init_tree_node
assert init_tree_node in self.nodes
assert goal_tree_node in self.goal_tree_nodes
to_be_removed = []
while _old_parent is not None:
_old_parent = _nextnode.parent
_nextnode, nn = self.choose_least_cost_parent(
_nextnode, nn=nn, nodes=self.nodes
)
self.rewire(_nextnode, nodes=self.nodes)
self.poses[len(self.nodes)] = _nextnode.pos
self.nodes.append(_nextnode)
to_be_removed.append(_nextnode)
nn = _nextnode
_nextnode = _old_parent
if self.goal_pt.parent is not None:
if self.goal_pt.parent.cost < self.c_max:
self.c_max = self.goal_pt.parent.cost
def pygame_birrt_planner_paint(planner):
"""Visualisation function for BiRRT
:param planner: planner to be visualised
"""
from utils.common import Colour
planner.args.env.path_layers.fill(Colour.ALPHA_CK)
drawn_nodes_pairs = set()
for nodes in (planner.nodes, planner.goal_tree_nodes):
for n in nodes:
if n.parent is not None:
new_set = frozenset({n, n.parent})
if new_set not in drawn_nodes_pairs:
drawn_nodes_pairs.add(new_set)
planner.args.env.draw_path(n, n.parent)
if planner.goal_pt.parent is not None:
planner.visualiser.draw_solution_path()
from planners.rrtPlanner import klampt_draw_nodes_paint_func
def klampt_birrt_paint(planner):
"""Visualiser paint function for BiRRT
:param planner: the planner to be visualised
"""
for c, nodes in (
((1, 0, 0, 1), planner.nodes),
((0, 0, 1, 1), planner.goal_tree_nodes),
):
klampt_draw_nodes_paint_func(planner, nodes, c)
# start register
planner_registry.register_planner(
"birrt",
planner_class=BiRRTPlanner,
visualise_pygame_paint=pygame_birrt_planner_paint,
visualise_klampt_paint=klampt_birrt_paint,
sampler_id="birrt_sampler",
)
# finish register
```
#### File: sbp-env/planners/prmPlanner.py
```python
from typing import List
import networkx as nx
import numpy as np
from overrides import overrides
import tqdm
from env import Node
from planners.rrtPlanner import RRTPlanner
from samplers import prmSampler
from utils import planner_registry
volume_of_unit_ball = {
1: 2,
2: 3.142,
3: 4.189,
4: 4.935,
5: 5.264,
6: 5.168,
7: 4.725,
8: 4.059,
9: 3.299,
10: 2.550,
}
def nearest_neighbours(
nodes: List[Node], poses: np.ndarray, pos: np.ndarray, radius: float
):
"""A helper function to find the nearest neighbours from a roadmap
:param nodes: the list of nodes to search against
:param poses: array of positions
:param pos: the position of interest
:param radius: the maximum radius of distance
"""
distances = np.linalg.norm(poses[: len(nodes)] - pos, axis=1)
neighbours = []
for i, d in enumerate(distances):
if d < radius:
neighbours.append(nodes[i])
return neighbours
class PRMPlanner(RRTPlanner):
"""
Probabilistic Roadmap motion planner, the multi-query sampling-based planner.
"""
@overrides
def init(self, *argv, **kwargs):
super().init(*argv, **kwargs)
self.args.env.stats.invalid_samples_connections = "-- "
self.d_threshold = self.args.epsilon
self.gamma = (
1 + np.power(2, kwargs["num_dim"]) * (1 + 1.0 / kwargs["num_dim"]) * 10000
)
self.graph = nx.DiGraph()
self.graph.add_node(self.args.env.start_pt)
self.args.env.end_state = None
@overrides
def run_once(self):
rand_pos, _, _ = self.args.sampler.get_valid_next_pos()
self.args.env.stats.add_free()
self.add_newnode(Node(rand_pos))
def clear_graph(self):
"""Clear the current roadmap graph"""
self.graph = nx.DiGraph()
self.graph.add_node(self.args.env.start_pt)
self.args.env.end_state = None
def build_graph(self):
"""Performs the graph building process where
.. math::
G = (V, E).
"""
n = len(self.nodes)
radius = self.gamma * np.power(np.log(n + 1) / (n + 1), 1 / self.args.num_dim)
for v in tqdm.tqdm(self.nodes, desc="Building graph"):
m_near = nearest_neighbours(self.nodes, self.poses, v.pos, radius)
for m_g in m_near:
if m_g is v:
continue
# check if path between(m_g,m_new) defined by motion-model is collision free
if not self.args.env.cc.visible(m_g.pos, v.pos):
continue
self.graph.add_weighted_edges_from(
[(m_g, v, self.args.env.dist(m_g.pos, v.pos))]
)
def get_nearest_free(self, node: Node, neighbours: List[Node]):
"""Internal method to get the closest existing node that is free to connects
to the given node.
:param node: the node of interest
:param neighbours: the list of nodes to search against
"""
nn = None
min_cost = 999999
for n in neighbours:
if n is self.args.env.start_pt or n is self.args.env.goal_pt or n is node:
continue
if not self.args.env.cc.visible(node.pos, n.pos):
continue
if nn is None:
nn = n
min_cost = self.args.env.dist(node.pos, n.pos)
else:
_cost = self.args.env.dist(node.pos, n.pos)
if _cost < min_cost:
min_cost = _cost
nn = n
return nn
def get_solution(self):
"""Build the solution path"""
# get two nodes that is cloest to start/goal and are free routes
m_near = nearest_neighbours(
self.nodes, self.poses, self.args.sampler.start_pos, self.args.epsilon
)
start = self.get_nearest_free(self.args.env.start_pt, m_near)
m_near = nearest_neighbours(
self.nodes, self.poses, self.args.sampler.goal_pos, self.args.epsilon
)
goal = self.get_nearest_free(self.args.env.goal_pt, m_near)
if start is None or goal is None or not nx.has_path(self.graph, start, goal):
return float("inf")
solution_path = nx.shortest_path(self.graph, start, goal)
solution_path[0].cost = self.args.env.dist(solution_path[0].pos, start.pos)
for i in range(1, len(solution_path)):
solution_path[i].parent = solution_path[i - 1]
solution_path[i].cost = solution_path[i - 1].cost + self.args.env.dist(
solution_path[i].pos, solution_path[i - 1].pos
)
self.c_max = goal.cost
self.args.env.goal_pt.parent = goal
start.parent = self.args.env.start_pt
self.visualiser.draw_solution_path()
return self.c_max
def pygame_prm_planner_paint(planner):
"""Visualisation function to paint for planner
:param planner: the planner to visualise
"""
for n in planner.nodes:
planner.args.env.draw_circle(
pos=n.pos,
colour=(0, 0, 255),
radius=1.4,
layer=planner.args.env.path_layers,
)
def pygame_prm_planner_paint_when_terminate(planner):
"""Visualisation function to paint for planner when termiante
:param planner: the planner to visualise
"""
from utils.common import Colour
planner.build_graph()
# draw all edges
for n1, n2 in planner.graph.edges():
planner.args.env.draw_path(n1, n2, Colour.path_blue)
planner.get_solution()
planner.args.env.update_screen()
input("\nPress Enter to quit...")
def klampt_prm_paint(planner) -> None:
"""Visualiser paint function for PRM
:param planner: the planner to be visualised
"""
colour = (1, 0, 0, 1)
for n in planner.nodes:
planner.args.env.draw_node(
planner.args.env.cc.get_eef_world_pos(n.pos), colour=colour
)
for edge in planner.graph.edges:
edge = np.array(edge).transpose()
planner.args.env.draw_path(
planner.args.env.cc.get_eef_world_pos(n.pos),
planner.args.env.cc.get_eef_world_pos(n.parent.pos),
colour=colour,
)
def klampt_prm_planner_paint_when_terminate(planner):
"""Visualisation function to paint for planner when termiante
:param planner: the planner to visualise
"""
from utils.common import Colour
planner.build_graph()
# draw all edges
for n1, n2 in planner.tree.edges():
planner.args.env.draw_path(n1, n2, Colour.path_blue)
planner.get_solution()
planner.args.env.update_screen()
input("\nPress Enter to quit...")
# start register
planner_registry.register_planner(
"prm",
planner_class=PRMPlanner,
visualise_pygame_paint=pygame_prm_planner_paint,
visualise_pygame_paint_terminate=pygame_prm_planner_paint_when_terminate,
visualise_klampt_paint=klampt_prm_paint,
visualise_klampt_paint_terminate=klampt_prm_planner_paint_when_terminate,
sampler_id=prmSampler.sampler_id,
)
# finish register
```
#### File: sbp-env/samplers/birrtSampler.py
```python
import random
from overrides import overrides
from samplers.baseSampler import Sampler
from samplers.randomPolicySampler import RandomPolicySampler
from utils import planner_registry
# noinspection PyAttributeOutsideInit
class BiRRTSampler(Sampler):
r"""
The sampler that is used internally by :class:`~planners.birrtPlanner.BiRRTPlanner`.
Internally, :class:`~samplers.birrtSampler.BiRRTSampler`
uses :class:`~samplers.randomPolicySampler.RandomPolicySampler` to draw from its
supported random methods.
The main differences lies in the epsilon biasing when
.. math::
p \sim \mathcal{U}(0,1) < \epsilon,
where the sampler will bias towards the correct **start** or **goal** tree
depending on the current tree :math:`\mathcal{T}_\text{current}` that
:class:`~samplers.birrtSampler.BiRRTSampler`
is currently planning for (in contrast to only always biasing towards the goal tree).
That is, :math:`p \sim \mathcal{U}(0,1)` is first drawn, then :math:`q_\text{new}`
is given by
.. math::
q_\text{new} =
\begin{cases}
q \sim \mathcal{U}(0,1)^d & \text{if } p < \epsilon\\
q_\text{target} & \text{if } \mathcal{T}_\text{current} \equiv \mathcal{
T}_{start}\\
q_\text{start} & \text{if } \mathcal{T}_\text{current} \equiv \mathcal{
T}_{target}\text{.}
\end{cases}
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.random_method = (
kwargs["random_method"] if "random_method" in kwargs else "pseudo_random"
)
@overrides
def init(self, **kwargs):
"""The delayed **initialisation** method"""
super().init(**kwargs)
self.randomSampler = RandomPolicySampler(random_method=self.random_method)
self.randomSampler.init(use_radian=self.use_radian, **kwargs)
def set_use_radian(self, value=True):
"""Overrides the super class method such that the value will be passed to the
internal :class:`samplers.randomPolicySampler.RandomPolicySampler`
:param value: whether to use radian
"""
self.use_radian = value
self.randomSampler.use_radian = value
@overrides
def get_next_pos(self) -> Sampler.GetNextPosReturnType:
"""Get next sampled position"""
# Random path
while True:
if random.random() < self.args.goalBias:
# init/goal bias
if self.args.planner.goal_tree_turn:
p = self.start_pos
else:
p = self.goal_pos
else:
p = self.randomSampler.get_next_pos()[0]
return p, self.report_success, self.report_fail
# start register
planner_registry.register_sampler(
"birrt_sampler",
sampler_class=BiRRTSampler,
)
# finish register
```
#### File: sbp-env/tests/test_4d_arm_collision_checker.py
```python
from unittest import TestCase
import numpy as np
from collisionChecker import RobotArm4dCollisionChecker
from tests.test_image_space_collision_checker import (
mock_image_as_np,
create_test_image,
pt,
)
from utils.common import Stats, MagicDict
class TestRobotArm4dCollisionChecker(TestCase):
def setUp(self) -> None:
self.cc = RobotArm4dCollisionChecker(
create_test_image(),
stick_robot_length_config=[0.1, 0.1],
stats=Stats(),
args=MagicDict(),
)
self.target = mock_image_as_np == 255
self.target = self.target.astype(self.cc.image.dtype)
def test_get_image_shape(self):
self.assertEqual(self.cc.get_image_shape(), self.target.T.shape)
def test_get_dimension(self):
self.assertEqual(self.cc.get_dimension(), 4)
def test_create_ranges(self):
starts = np.array([1, 5, 3, 19])
ends = np.array([-11, 54, 32, -219])
multi_linspace = self.cc.create_ranges(starts, ends, 10)
# assert lengths are equal
self.assertEqual(multi_linspace.shape[1], 10)
self.assertEqual(multi_linspace.shape[0], len(starts))
# assert they starts and ends at the specified locations
self.assertTrue(np.isclose(multi_linspace[:, 0], starts).all())
self.assertTrue(np.isclose(multi_linspace[:, -1], ends).all())
def test_interpolate_configs(self):
starts = np.random.rand(4) * 10
ends = np.random.rand(4) * 10
out = self.cc._interpolate_configs(starts, ends)
self.assertTrue(out.shape[0] >= 2)
for i in range(out.shape[0]):
self.assertEqual(len(out[i, :]), 4)
# the first two configs are pixel location, which will always be int
self.assertTrue(
np.isclose(out[0, :2].astype(np.uint8), starts[:2].astype(np.uint8)).all()
)
# the last two configs are rotational joints which will be in reals
self.assertTrue(np.isclose(out[0, 2:], starts[2:]).all())
# the first two configs are pixel location, which will always be int
self.assertTrue(
np.isclose(out[-1, :2].astype(np.uint8), ends[:2].astype(np.uint8)).all()
)
# the last two configs are rotational joints which will be in reals
self.assertTrue(np.isclose(out[-1, 2:], ends[2:]).all())
def test_visible(self):
self.assertTrue(self.cc.visible(pt(0.5, 2.5, 1, 2.5), pt(1.5, 3.5, 0, -1.5)))
self.assertTrue(self.cc.visible(pt(0.5, 2.5, -1, 1), pt(0.5, 3.5, 2, 0.4)))
def test_not_visible(self):
self.assertFalse(self.cc.visible(pt(0.5, 2.5, 1, 2.5), pt(1.5, 5.5, 0, -1.5)))
self.assertFalse(self.cc.visible(pt(0.5, 0.5, -1, 1), pt(3.5, 0.5, 2, 0.4)))
def test_feasible(self):
self.assertTrue(self.cc.feasible(pt(0.5, 2.5, 1, 2.5)))
self.assertTrue(self.cc.feasible(pt(1.5, 3.5, 0, -1.5)))
self.assertTrue(self.cc.feasible(pt(0.5, 3.5, 2, 0.4)))
def test_not_feasible(self):
self.assertFalse(self.cc.feasible((3.5, 0.5, 1, 1)))
self.assertFalse(self.cc.feasible((-1.5, 3.5, 1, 1)))
self.assertFalse(self.cc.feasible((1.78, 4.5, 1, 1)))
```
#### File: sbp-env/tests/test_env.py
```python
from unittest import TestCase
import numpy as np
import env
import visualiser
from planners.basePlanner import Planner
from samplers.baseSampler import Sampler
from main import generate_args
from tests.common_vars import DummyPlannerClass
from utils import planner_registry
class TestGenerateArgs(TestCase):
def test_missing_argunment(self):
with self.assertRaises(TypeError):
generate_args()
with self.assertRaises(TypeError):
generate_args(planner_id="rrt")
with self.assertRaises(TypeError):
generate_args(map_fname="maps/4d.png")
# should not raise error
generate_args(planner_id="rrt", map_fname="maps/4d.png")
# test error if the planner id has not been registered yet
with self.assertRaises(ValueError):
generate_args(planner_id="my_planner_id", map_fname="maps/4d.png")
# test that after the planner id is registered, it will work.
planner_registry.register_planner(
planner_id="my_planner_id",
planner_class=DummyPlannerClass,
sampler_id="random",
)
generate_args(planner_id="my_planner_id", map_fname="maps/4d.png")
def test_actual_planning(self):
visualiser.VisualiserSwitcher.choose_visualiser("base")
args = generate_args(
planner_id="rrt",
map_fname="maps/test.png",
start_pt=np.array([25, 123]),
goal_pt=np.array([225, 42]),
)
args.no_display = True
e = env.Env(args, fixed_seed=0)
ori_method = e.planner.run_once
# prepare an exception to escape from the planning loop
class PlanningSuccess(Exception):
pass
def planner_run_once_with_side_effect(*args, **kwargs):
# pass through to planner
ori_method(*args, **kwargs)
if e.planner.c_max < float("inf"):
raise PlanningSuccess()
# patch the planner run_once such that it will terminates as soon as the
# planning problem is finished.
e.planner.run_once = planner_run_once_with_side_effect
with self.assertRaises(PlanningSuccess):
e.run()
def test_get_attribute(self):
visualiser.VisualiserSwitcher.choose_visualiser("pygame")
args = generate_args(
planner_id="rrt",
map_fname="maps/test.png",
start_pt=np.array([25, 123]),
goal_pt=np.array([225, 42]),
)
args.no_display = True
e = env.Env(args, fixed_seed=0)
# test get planner
assert isinstance(e.planner, Planner)
# test get sampler
assert isinstance(e.sampler, Sampler)
```
#### File: sbp-env/tests/test_nearbyPolicySampler.py
```python
from copy import deepcopy
from unittest import TestCase
import numpy as np
import visualiser
from env import Env
from planners.rrdtPlanner import Node
from samplers.nearbyPolicySampler import NearbyPolicySampler
from tests.common_vars import template_args
from utils import planner_registry
class TestNearbyPolicySampler(TestCase):
def setUp(self) -> None:
args = deepcopy(template_args)
visualiser.VisualiserSwitcher.choose_visualiser("base")
# setup to use the correct sampler
args["sampler"] = NearbyPolicySampler(prob_block_size=10)
# use some suitable planner
args["planner_data_pack"] = planner_registry.PLANNERS["rrt"]
self.env = Env(args)
self.sampler = self.env.args.sampler
def test_likelihood_increases(self):
pt, report_success, report_fail = self.sampler.get_next_pos()
target_pt = np.array([10, 10])
nn = Node(np.array([20, 20]))
rand_pos = np.array([5, 5])
prev_val_1 = None
prev_val_2 = None
prev_val_3 = None
for i in range(20):
report_success(
pos=target_pt,
nn=nn,
rand_pos=rand_pos,
)
# val_1 comes from the target point (probability should increases)
val_1 = self.sampler.prob_vector[
tuple((target_pt / self.sampler.PROB_BLOCK_SIZE).astype(int))
]
if prev_val_1 is not None:
# the likelihood should increases
self.assertTrue(val_1 > prev_val_1)
prev_val_1 = val_1
# val_2 comes from the nearest neighbour point (probability should
# increases)
val_2 = self.sampler.prob_vector[
tuple((nn.pos / self.sampler.PROB_BLOCK_SIZE).astype(int))
]
if prev_val_2 is not None:
# the likelihood should increases
self.assertTrue(val_2 > prev_val_2)
prev_val_2 = val_2
# val_3 comes from the desire rand pos (but hasn't reached yet,
# should this should not be affected)
val_3 = self.sampler.prob_vector[
tuple((rand_pos / self.sampler.PROB_BLOCK_SIZE).astype(int))
]
if prev_val_3 is not None:
# the likelihood should remains the same
self.assertTrue(val_3 == prev_val_3)
prev_val_3 = val_3
```
#### File: sbp-env/tests/test_prmSampler.py
```python
from copy import deepcopy
from unittest import TestCase
import numpy as np
import visualiser
from env import Env
from planners.rrdtPlanner import Node
from samplers.prmSampler import PRMSampler
from tests.common_vars import template_args
from utils import planner_registry
class TestPRMSampler(TestCase):
def setUp(self) -> None:
args = deepcopy(template_args)
visualiser.VisualiserSwitcher.choose_visualiser("base")
# setup to use the correct sampler
args["sampler"] = PRMSampler()
# use some suitable planner
args["planner_data_pack"] = planner_registry.PLANNERS["prm"]
self.env = Env(args)
self.sampler = self.env.args.sampler
def test_init(self):
# method that does not exists
with self.assertRaises(ValueError):
PRMSampler(random_method="foo bar")
# with supported method
PRMSampler(random_method="fast")
def test_get_next_pos(self):
# test that by default there should be no goal bias
self.assertEqual(self.sampler.args.goalBias, 0)
np.random.seed(0)
# test all results are different
results = set()
for i in range(100):
results.add(Node(self.sampler.get_next_pos()[0]))
self.assertEqual(len(results), 100)
```
#### File: sbp-env/utils/csv_stats_logger.py
```python
import os
import logging
from datetime import datetime
import io
import csv
class CsvFormatter(logging.Formatter):
def __init__(self):
super().__init__()
self.output = io.StringIO()
self.writer = csv.writer(self.output, quoting=csv.QUOTE_ALL)
def format(self, record):
# self.writer.writerow([record.levelname, record.msg])
self.writer.writerow(record.msg)
data = self.output.getvalue()
self.output.truncate(0)
self.output.seek(0)
return data.strip()
def get_non_existing_filename(fname_template="runs/%Y-%m-%d_%H-%M{}.csv"):
"""Return a string that represent a path to file that does not exists.
The filename is the current timestamp."""
suffix_num = 0
while True:
fname = datetime.now().strftime(
fname_template.format("" if suffix_num == 0 else f".{suffix_num}")
)
if not os.path.exists(fname):
break
suffix_num += 1
return fname
def setup_csv_stats_logger(file_name, logger_name="CSV_STATS"):
# create any missing intermediate folder
os.makedirs(os.path.dirname(file_name), exist_ok=True)
# setup a special logger for the logging stats
csv_logger = logging.getLogger(logger_name)
csv_logger.setLevel(logging.INFO)
# only use our own file handle, and disable propagate to console
csv_logger.propagate = False
# setup our handler
fh = logging.FileHandler(file_name)
fh.setFormatter(CsvFormatter())
csv_logger.addHandler(fh)
``` |
{
"source": "JK-Warriors/Heimdallr",
"score": 2
} |
#### File: Heimdallr/python/alert_sqlserver.py
```python
import os
import sys
import string
import time
import datetime
import MySQLdb
import logging
import logging.config
logging.config.fileConfig("etc/logger.ini")
logger = logging.getLogger("alert_sqlserver")
path='./include'
sys.path.insert(0,path)
import functions as func
import sendmail
import sendsms_fx
import sendsms_api
send_mail_max_count = func.get_option('send_mail_max_count')
send_mail_sleep_time = func.get_option('send_mail_sleep_time')
mail_to_list_common = func.get_option('send_mail_to_list')
send_sms_max_count = func.get_option('send_sms_max_count')
send_sms_sleep_time = func.get_option('send_sms_sleep_time')
sms_to_list_common = func.get_option('send_sms_to_list')
g_alert = str(func.get_option('alert'))
#################################################################################################
def gen_alert_sqlserver_status(server_id):
if g_alert != "1":
return -1
sql="""SELECT a.server_id,
a.connect,
a.processes,
a.processes_running,
a.processes_waits,
a.create_time,
a.host,
a.port,
b.alarm_processes,
b.alarm_processes_running,
alarm_processes_waits,
b.threshold_warning_processes,
b.threshold_warning_processes_running,
b.threshold_warning_processes_waits,
b.threshold_critical_processes,
threshold_critical_processes_running,
threshold_critical_processes_waits,
b.send_mail,
b.send_mail_to_list,
b.send_sms,
b.send_sms_to_list,
b.send_wx,
b.tags,
'sqlserver' AS db_type
FROM sqlserver_status a, db_cfg_sqlserver b
WHERE a.server_id = b.id
and a.server_id = %s """ %(server_id)
result=func.mysql_query(sql)
if result <> 0:
for line in result:
server_id=line[0]
connect=line[1]
processes=line[2]
processes_running=line[3]
processes_waits=line[4]
create_time=line[5]
host=line[6]
port=line[7]
alarm_processes=line[8]
alarm_processes_running=line[9]
alarm_processes_waits=line[10]
threshold_warning_processes=line[11]
threshold_warning_processes_running=line[12]
threshold_warning_processes_waits=line[13]
threshold_critical_processes=line[14]
threshold_critical_processes_running=line[15]
threshold_critical_processes_waits=line[16]
send_mail=line[17]
send_mail_to_list=line[18]
send_sms=line[19]
send_sms_to_list=line[20]
send_wx=line[21]
tags=line[22]
db_type=line[23]
if send_mail_to_list is None or send_mail_to_list.strip()=='':
send_mail_to_list = mail_to_list_common
if send_sms_to_list is None or send_sms_to_list.strip()=='':
send_sms_to_list = sms_to_list_common
if connect <> 1:
send_mail = func.update_send_mail_status(server_id,db_type,'connect',send_mail,send_mail_max_count)
send_sms = func.update_send_sms_status(server_id,db_type,'connect',send_sms,send_sms_max_count)
send_wx = func.update_send_wx_status(server_id,db_type,'connect',send_wx)
func.add_alert(server_id,tags,host,port,create_time,db_type,'connect','down','critical','sqlserver server down',send_mail,send_mail_to_list,send_sms,send_sms_to_list,send_wx)
func.update_db_status('connect','3',server_id, host, db_type,create_time,'connect','down','critical')
func.update_db_status('sessions','-1',server_id, host, db_type,'','','','')
func.update_db_status('actives','-1',server_id, host, db_type,'','','','')
func.update_db_status('waits','-1',server_id, host, db_type,'','','','')
func.update_db_status('repl','-1',server_id, host, db_type,'','','','')
func.update_db_status('repl_delay','-1',server_id, host, db_type,'','','','')
else:
func.check_if_ok(server_id,tags,host,port,create_time,db_type,'connect','up','sqlserver server up',send_mail,send_mail_to_list,send_sms,send_sms_to_list,send_wx)
func.update_db_status('connect','1',server_id, host, db_type,create_time,'connect','up','ok')
if int(alarm_processes)==1:
if int(processes)>=int(threshold_critical_processes):
#send_mail = func.update_send_mail_status(server_id,db_type,'processes',send_mail,send_mail_max_count)
#send_sms = func.update_send_sms_status(server_id,db_type,'processes',send_sms,send_sms_max_count)
#func.add_alert(server_id,tags,host,port,create_time,db_type,'processes',processes,'critical','too many processes',send_mail,send_mail_to_list,send_sms,send_sms_to_list,send_wx)
func.update_db_status('sessions',3,server_id, host, db_type,create_time,'processes',processes,'critical')
elif int(processes)>=int(threshold_warning_processes):
#send_mail = func.update_send_mail_status(server_id,db_type,'processes',send_mail,send_mail_max_count)
#send_sms = func.update_send_sms_status(server_id,db_type,'processes',send_sms,send_sms_max_count)
#func.add_alert(server_id,tags,host,port,create_time,db_type,'processes',processes,'warning','too many processes',send_mail,send_mail_to_list,send_sms,send_sms_to_list,send_wx)
func.update_db_status('sessions',2,server_id, host, db_type,create_time,'processes',processes,'warning')
else:
func.update_db_status('sessions',1,server_id, host, db_type,create_time,'processes',processes,'ok')
func.check_if_ok(server_id,tags,host,port,create_time,db_type,'processes',processes,'processes ok',send_mail,send_mail_to_list,send_sms,send_sms_to_list,send_wx)
if int(alarm_processes_running)==1:
if int(processes_running)>=int(threshold_critical_processes_running):
#send_mail = func.update_send_mail_status(server_id,db_type,'processes_running',send_mail,send_mail_max_count)
#send_sms = func.update_send_sms_status(server_id,db_type,'processes_running',send_sms,send_sms_max_count)
#func.add_alert(server_id,tags,host,port,create_time,db_type,'processes_running',processes_running,'critical','too many processes running',send_mail,send_mail_to_list,send_sms,send_sms_to_list,send_wx)
func.update_db_status('actives',3,server_id, host, db_type,create_time,'processes_running',processes_running,'critical')
elif int(processes_running)>=int(threshold_warning_processes_running):
#send_mail = func.update_send_mail_status(server_id,db_type,'processes_running',send_mail,send_mail_max_count)
#send_sms = func.update_send_sms_status(server_id,db_type,'processes_running',send_sms,send_sms_max_count)
#func.add_alert(server_id,tags,host,port,create_time,db_type,'processes_running',processes_running,'critical','too many processes running',send_mail,send_mail_to_list,send_sms,send_sms_to_list,send_wx)
func.update_db_status('actives',2,server_id, host, db_type,create_time,'processes_running',processes_running,'warning')
else:
func.update_db_status('actives',1,server_id, host, db_type,create_time,'processes_running',processes_running,'ok')
func.check_if_ok(server_id,tags,host,port,create_time,db_type,'processes_running',processes_running,'processes running ok',send_mail,send_mail_to_list,send_sms,send_sms_to_list,send_wx)
if int(alarm_processes_waits)==1:
if int(processes_waits)>=int(threshold_critical_processes_waits):
#send_mail = func.update_send_mail_status(server_id,db_type,'processes_waits',send_mail,send_mail_max_count)
#send_sms = func.update_send_sms_status(server_id,db_type,'processes_waits',send_sms,send_sms_max_count)
#func.add_alert(server_id,tags,host,port,create_time,db_type,'processes_waits',processes_waits,'critical','too many processes waits',send_mail,send_mail_to_list,send_sms,send_sms_to_list,send_wx)
func.update_db_status('waits',3,server_id, host, db_type,create_time,'processes_waits',processes_waits,'critical')
elif int(processes_waits)>=int(threshold_warning_processes_waits):
#send_mail = func.update_send_mail_status(server_id,db_type,'processes_waits',send_mail,send_mail_max_count)
#send_sms = func.update_send_sms_status(server_id,db_type,'processes_waits',send_sms,send_sms_max_count)
#func.add_alert(server_id,tags,host,port,create_time,db_type,'processes_waits',processes_waits,'warning','too many processes waits',send_mail,send_mail_to_list,send_sms,send_sms_to_list,send_wx)
func.update_db_status('waits',2,server_id, host, db_type,create_time,'processes_waits',processes_waits,'warning')
else:
func.update_db_status('waits',1,server_id, host, db_type,create_time,'processes_waits',processes_waits,'ok')
func.check_if_ok(server_id,tags,host,port,create_time,db_type,'processes_waits',processes_waits,'processes waits ok',send_mail,send_mail_to_list,send_sms,send_sms_to_list,send_wx)
else:
pass
def gen_alert_sqlserver_mirror(server_id, mirror_role):
if g_alert != "1":
return -1
sql = """SELECT a.server_id,
a.connect,
a.create_time,
a.host,
a.port,
b.send_mail,
b.send_mail_to_list,
b.send_sms,
b.send_sms_to_list,
b.send_wx,
b.tags,
'sqlserver' AS db_type
FROM sqlserver_status a, db_cfg_sqlserver b
WHERE a.server_id = b.id
and a.server_id = %s """ %(server_id)
result=func.mysql_query(sql)
if result <> 0:
for line in result:
server_id=line[0]
connect=line[1]
create_time=line[2]
host=line[3]
port=line[4]
send_mail=line[5]
send_mail_to_list=line[6]
send_sms=line[7]
send_sms_to_list=line[8]
send_wx=line[9]
tags=line[10]
db_type=line[11]
if send_mail_to_list is None or send_mail_to_list.strip()=='':
send_mail_to_list = mail_to_list_common
if send_sms_to_list is None or send_sms_to_list.strip()=='':
send_sms_to_list = sms_to_list_common
if mirror_role==1:
send_mail = func.update_send_mail_status(server_id,db_type,'mirroring_role',send_mail,send_mail_max_count)
send_sms = func.update_send_sms_status(server_id,db_type,'mirroring_role',send_sms,send_sms_max_count)
send_wx = func.update_send_wx_status(server_id,db_type,'mirroring_role',send_wx)
func.add_alert(server_id,tags,host,port,create_time,db_type,'mirroring_role',mirror_role,'critical','Database role is NOT match!',send_mail,send_mail_to_list,send_sms,send_sms_to_list,send_wx)
func.update_db_status('repl',3,server_id, host, db_type,create_time,'mirroring_role',mirror_role,'critical')
else:
func.check_if_ok(server_id,tags,host,port,create_time,db_type,'mirroring_role',mirror_role,'Database role is OK!',send_mail,send_mail_to_list,send_sms,send_sms_to_list,send_wx)
func.update_db_status('repl',1,server_id, host, db_type,create_time,'mirroring_role',mirror_role,'ok')
else:
pass
``` |
{
"source": "jkwening/bus_wmata",
"score": 3
} |
#### File: bus_wmata/old_wmata_code/wmata_api.py
```python
import requests
class WmataApi:
"""WMATA API class."""
bus_routes_url = 'https://api.wmata.com/Bus.svc/json/jRoutes'
bus_pos_url = 'https://api.wmata.com/Bus.svc/json/jBusPositions'
bus_path_url = 'https://api.wmata.com/Bus.svc/json/jRouteDetails'
bus_schedule_url = 'https://api.wmata.com/Bus.svc/json/jRouteSchedule'
bus_scheduled_stop_url = 'https://api.wmata.com/Bus.svc/json/jStopSchedule'
bus_nearby_stop_url = 'https://api.wmata.com/Bus.svc/json/jStops'
bus_incidents_url = 'https://api.wmata.com/Incidents.svc/json/BusIncidents'
@classmethod
def get_bus_routes(cls, api_key: str):
"""Returns list of all bus stop info, route, and schedule data.
Returns:
- Routes: Array containing route variant information: Name,
RouteID, and LineDescription.
"""
headers = {
'api_key': api_key
}
r = requests.get(cls.bus_routes_url, headers=headers)
return r.json()
@classmethod
def get_bus_position(cls, api_key: str, route_id: str = None,
lat: int = None, lon: int = None,
radius: int = None):
"""
Returns bus positions for the given route, with an optional
search radius. If no parameters are specified, all bus positions
are returned. Note, RouteId pars only accepts base route names
and no variations. i.e. 10A not 10Av1 or 10Av2.
Request parameters:
- RouteId (opt.): Base bus route, e.g. 70, 10A.
- Lat (opt.): Latitude, required if Longitude and Radius specified.
- Lon (opt.): Longitude, required if Latitude and Radius specified.
- Radius (opt.): Radius (m) to include in search area, required if
Latitude and Longitude are specified.
Returns:
- BusPositions: Array containing bus position information:
DateTime, Deviation, DirectionText, Lat, Lon, RouteID,
TripEndTime, TripHeadsign, TripID, TripStartTime, and
VehicleID.
"""
headers = {
'api_key': api_key
}
# configure optional parameters
params = dict()
if route_id is not None:
params['RouteID'] = route_id
if lat is not None:
params['Lat'] = lat
if lon is not None:
params['Lon'] = lon
if radius is not None:
params['Radius'] = radius
r = requests.get(cls.bus_pos_url, params=params, headers=headers)
return r.json()
@classmethod
def get_bus_path_details(cls, api_key: str, route_id: str,
date: str = None):
"""
Returns the set of ordered latitude/longitude points along
along a route variant along with the list of stops served.
Request parameters:
- RouteId: Base bus route, e.g. 70, 10A.
- Date (opt.): Date in YYYY-MM-DD format for which to retrieve
route and stop information. Defaults to today's date unless
specified.
Returns:
- Direction0/Direction1: Structures describing path/stop
information, which most will return both but a few that run in
a loop will return only for Direction0 and NULL content for
Direction1: DirectionText, Shape, Stops, and TripHeadsign.
- Name: Descriptive name for the route
- RouteID: Bus route variant (e.g.: 10A, 10Av1, etc.)
"""
headers = {
'api_key': api_key
}
# configure parameters
params = dict()
params['RouteID'] = route_id
if date is not None:
params['Date'] = date
r = requests.get(cls.bus_path_url, params=params, headers=headers)
return r.json()
@classmethod
def get_bus_schedule(cls, api_key: str, route_id: str,
date: str = None, variation: bool = None):
"""
Returns schedule for a given route variant for a given route.
Request parameters:
- RouteId: Base bus route, e.g. 70, 10A.
- Date (opt.): Date in YYYY-MM-DD format for which to retrieve
route and stop information. Defaults to today's date unless
specified.
- IncludingVariations (opt.): Whether or not to include variations
if a base route is specified in RouteId. For example, if B30
is specified and IncludingVariations is set to true, data for
all variations of B30 such as B30v1, B30v2, etc. will be
returned.
Returns:
- Direction0/Direction1: Arrays containing trip direction
information, which most will return both but a few that run in
a loop will return only for Direction0 and NULL content for
Direction1: EndTime, RouteID, StartTime, StopTimes,
TripDirectionText, TripHeadsign, and TripID.
- Name: Descriptive name for the route
"""
headers = {
'api_key': api_key
}
# configure parameters
params = dict()
params['RouteID'] = route_id
if date is not None:
params['Date'] = date
if variation is not None:
params['IncludingVariations'] = variation
r = requests.get(cls.bus_schedule_url, params=params, headers=headers)
return r.json()
@classmethod
def get_bus_scheduled_stop(cls, api_key, stop_id: str, date: str = None):
"""
Returns a set of buses scheduled at a stop for a given date.
Request parameters:
- StopID: 7-digit regional stop ID.
- Date (opt.): Date in YYYY-MM-DD format for which to retrieve
schedule. Defaults to today's date unless specified.
Returns:
- ScheduleArrivals: Array containing scheduled arrival information:
DirectionNum, EndTime, RouteID, ScheduleTime,
TripDirectionText,TripHeadsign, and TripID
- Stop: Dict describing stop information: Lat, Lon, Name, Routes,
and StopID
"""
headers = {
'api_key': api_key
}
# configure parameters
params = dict()
params['StopID'] = stop_id
if date is not None:
params['Date'] = date
r = requests.get(cls.bus_scheduled_stop_url, params=params, headers=headers)
return r.json()
@classmethod
def get_bus_nearby_stops(cls, api_key: str, lat: int = None,
lon: int = None, radius: int = None):
"""
Returns a list of nearby bus stops based on latitude, longitude,
and radius. Omit all parameters to retrieve a list of all stops.
Request parameters:
- Lat (opt.): Latitude, required if Longitude and Radius specified.
- Lon (opt.): Longitude, required if Latitude and Radius specified.
- Radius (opt.): Radius (m) to include in search area, required if
Latitude and Longitude are specified.
Returns:
- Stops: Array containing stop information: Lat, Lon, Name,
Routes, and StopID.
"""
headers = {
'api_key': api_key
}
# configure optional parameters
params = dict()
if lat is not None:
params['Lat'] = lat
if lon is not None:
params['Lon'] = lon
if radius is not None:
params['Radius'] = radius
r = requests.get(cls.bus_nearby_stop_url, params=params,
headers=headers)
return r.json()
@classmethod
def get_incidents(cls, api_key, route_id=None):
"""Returns a set of reported bus incidents/delays for a given Route.
Omit the Route to return all reported items. Note that the Route
parameters accepts only base route names and no variants, i.e.:
use 10A instead of 10Av1 and 10Av2.
Returns:
BusIncidents: Array containing bus incidents information:
DateUpdated, Description, IncidentId, IncidentType, and
RoutesAffected.
"""
headers = {
'api_key': api_key
}
# configure optional parameters
params = dict()
if route_id is not None:
params['RouteID'] = route_id
r = requests.get(cls.bus_incidents_url, params=params, headers=headers)
return r.json()
```
#### File: bus_wmata/tests/wmata_test.py
```python
import unittest
from requests import codes
# project modules
import wmata
class MyTestCase(unittest.TestCase):
def test_validate_key(self):
for sub, key in wmata.API_KEYS.items():
self.assertTrue(
wmata.validate_key(api_key=key),
msg=f'Failed validating {sub}'
)
def test_get_bus_position(self):
# case route_id = 70
resp = wmata.get_bus_position(route_id='70')
self.assertTrue(
resp.status_code == codes.ok,
msg=f'[Route_ID-70] Failed with status code: {resp.status_code}'
)
self.assertTrue(resp.json(), msg=f'[Route_ID-70] JSON is empty: {resp.json}')
r = resp.json()
self.assertTrue(
'BusPositions' in r.keys(),
msg=f'[Route_ID-70] BusPositions not in: {resp.json()}'
)
# case lat=39.191525, lon=-76.672821, radius=1000 (meters)
resp = wmata.get_bus_position(lat=39.191525, lon=-76.672821, radius=1000)
self.assertTrue(
resp.status_code == codes.ok,
msg=f'[Lat,Lon,Radius] Failed with status code: {resp.status_code}'
)
self.assertTrue(resp.json(), msg=f'[Lat,Lon,Radius] JSON is empty: {resp.json}')
r = resp.json()
self.assertTrue(
'BusPositions' in r.keys(),
msg=f'[Lat,Lon,Radius] BusPositions not in: {resp.json()}'
)
def test_get_path_details(self):
# case route_id = 70
resp = wmata.get_path_details(route_id='70')
self.assertTrue(
resp.status_code == codes.ok,
msg=f'[Route_ID-70] Failed with status code: {resp.status_code}'
)
self.assertTrue(resp.json(), msg=f'[Route_ID-70] JSON is empty: {resp.json}')
r = resp.json()
# validate response elements
expected_elements = [
'Direction0', 'Direction1', 'Name', 'RouteID'
]
for e in expected_elements:
self.assertTrue(
e in r.keys(),
msg=f'[Route_ID-70] response element {e} not in: {resp.json()}'
)
def test_get_routes(self):
# case: get_routes()
resp = wmata.get_routes()
self.assertTrue(
resp.status_code == codes.ok,
msg=f'Failed with status code: {resp.status_code}'
)
self.assertTrue(resp.json(), msg=f'JSON is empty: {resp.json}')
r = resp.json()
# validate response elements
expected_elements = [
'Routes'
]
for e in expected_elements:
self.assertTrue(
e in r.keys(),
msg=f'[Route_ID-70] response element {e} not in: {resp.json()}'
)
# case: get_route_ids()
route_ids = wmata.get_route_ids(resp.json())
expected_routes = [
'10A', '10B', '70'
]
for r in expected_routes:
self.assertIn(
r, route_ids,
msg=f'{r} not in {route_ids}'
)
def test_get_schedule(self):
# case route_id = 10A
resp = wmata.get_schedule(route_id='10A', including_variations=True)
self.assertTrue(
resp.status_code == codes.ok,
msg=f'[Route_ID-10A] Failed with status code: {resp.status_code}'
)
self.assertTrue(resp.json(), msg=f'[Route_ID-10A] JSON is empty: {resp.json}')
r = resp.json()
# validate response elements
expected_elements = [
'Direction0', 'Direction1', 'Name'
]
for e in expected_elements:
self.assertTrue(
e in r.keys(),
msg=f'[Route_ID-10A] response element {e} not in: {resp.json()}'
)
def test_get_stop_schedule(self):
# case stop_id = 1001195
resp = wmata.get_stop_schedule(stop_id='1001195')
self.assertTrue(
resp.status_code == codes.ok,
msg=f'[StopID-1001195] Failed with status code: {resp.status_code}'
)
self.assertTrue(resp.json(), msg=f'[StopID-1001195] JSON is empty: {resp.json}')
r = resp.json()
# validate response elements
expected_elements = [
'ScheduleArrivals', 'Stop'
]
for e in expected_elements:
self.assertTrue(
e in r.keys(),
msg=f'[StopID-1001195] response element {e} not in: {resp.json()}'
)
def test_get_stops(self):
# case lat=39.191525, lon=-76.672821, radius=1000 (meters)
resp = wmata.get_stops(lat=39.191525, lon=-76.672821, radius=1000)
self.assertTrue(
resp.status_code == codes.ok,
msg=f'[Lat,Lon,Radius] Failed with status code: {resp.status_code}'
)
self.assertTrue(resp.json(), msg=f'[Lat,Lon,Radius] JSON is empty: {resp.json}')
r = resp.json()
self.assertTrue(
'Stops' in r.keys(),
msg=f'[Lat,Lon,Radius] BusPositions not in: {resp.json()}'
)
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "jkwesio/ppi-fiberdistr",
"score": 3
} |
#### File: jkwesio/ppi-fiberdistr/emissionsCalcV4.py
```python
import pandas as pd
import numpy as np
class en_emissions(): # energy & emissions
def __init__(cls,xls,fProd,rLevel,f2pYld,pulpYld,f2pVolNew,pbpVolNew,consCollNew,exportNew,demandNew):
# xls (str) - name of Excel spreadsheet to pull data from
# fProd (list) - list of products in current scenario
# rLevel (df) - recycled content level by product
# f2pYld (df) - fiber to pulp yield by pulp product; indexed by fiber
# pulpYld (df) - pulp to product yield; pulp as index
# f2pVolNew (df) - fiber to pulp volume (in short tons); indexed by pulp name
# pbpVolNew (df) - pulp by product volume; indexed by pulp name
# consCollNew (df) - domestic consumption, collection, and recovery by product
# demandNew (df) - new demand by product; indexed by rec level
uC = 0.907185 # unit conversion of MM US ton to Mg/metric ton
cls.fProd = fProd
cls.fProdM = fProd + ['Market']
cls.rLevel = rLevel
cls.f2pYld = f2pYld
cls.pulpYld = pulpYld
cls.f2pVolNew = f2pVolNew * uC
cls.pbpVolNew = pbpVolNew * uC
cls.consCollNew = consCollNew * uC
cls.exportNew = exportNew * uC
cls.demandNew = {t: demandNew[t] * uC for t in demandNew.keys()}
with pd.ExcelFile(xls) as x:
# Old data
cls.f2pVolOld = pd.read_excel(x, 'OldData', usecols="A:I", skiprows=1, nrows=21, index_col=0)
cls.f2pVolOld.iloc[:,:-1] = cls.f2pVolOld.iloc[:,:-1] * uC * 1000
cls.f2pVolNew = cls.f2pVolNew.assign(TransCode=cls.f2pVolOld['TransCode'].values)
cls.pbpVolOld = pd.read_excel(x, 'OldData', usecols="K:R", skiprows=1, nrows=14, index_col=0)
cls.pbpVolOld.columns = [x[:-2] for x in cls.pbpVolOld.columns] # has .1 after column names for pandas duplicate
cls.pbpVolOld.iloc[:,:-1] = cls.pbpVolOld.iloc[:,:-1] * uC * 1000
cls.pbpVolNew = cls.pbpVolNew.assign(TransCode=cls.pbpVolOld['TransCode'].values)
cls.prodLD = pd.read_excel(x, 'OldData', usecols="K:Q", skiprows=19, nrows=5, index_col=0) * uC * 1000
cls.prodDemand = pd.read_excel(x, 'OldData', usecols="A:G", skiprows=26, nrows=1, index_col=0) * uC * 1000
cls.consCollOld = pd.read_excel(x, 'OldData', usecols="K:Q", skiprows=29, nrows=3, index_col=0) * uC * 1000
cls.exportOld = pd.read_excel(x, 'OldData', usecols="E:G", skiprows=31, nrows=16, index_col=0)
cls.exportOld.iloc[:,:-1] = cls.exportOld.iloc[:,:-1] * uC * 1000
cls.exportNew = cls.exportNew.assign(TransCode=cls.exportOld['TransCode'].values)
cls.fiberType = pd.read_excel(x, 'OldData', usecols="A:B", skiprows=31, nrows=20, index_col=0)
cls.rFiber = cls.f2pVolOld.index[:16]
cls.vFiber = cls.f2pVolOld.index[16:]
cls.rPulp = [p for p in cls.pbpVolOld.index if 'Rec' in p]
cls.vPulp = [q for q in cls.pbpVolOld.index if 'Vir' in q]
cls.fPulp = [f for f in cls.pbpVolOld.index]
# Emissions Info
cls.chemicals = pd.read_excel(x, 'nonFiber', usecols="A:B,E:L", skiprows=2, nrows=42, index_col=0)
cls.eolEmissions = pd.read_excel(x, 'EmTables', usecols="A:G", skiprows=2, nrows=3, index_col=0)
cls.bfEI = pd.read_excel(x, 'EmTables', usecols="J:P", skiprows=2, nrows=3, index_col=0)
cls.bfEI.columns = [x[:-2] for x in cls.bfEI.columns] # has .1 after column names for some reason
cls.bioPct = pd.read_excel(x, 'EmTables', usecols="J:P", skiprows=8, nrows=2, index_col=0)
cls.pwpEI = pd.read_excel(x, 'EmTables', usecols="O:P", skiprows=14, nrows=5, index_col=0)
cls.bfCO2 = pd.read_excel(x, 'EmTables', usecols="A:G", skiprows=9, nrows=2, index_col=0)
cls.fuelTable = pd.read_excel(x, 'EmTables', usecols="A:M", skiprows=15, nrows=13, index_col=0)
cls.fuelTable = cls.fuelTable.fillna(0)
cls.rsdlModes = pd.read_excel(x, 'EmTables', usecols="A:G", skiprows=32, nrows=6, index_col=0)
cls.rsdlbio = pd.read_excel(x, 'EmTables', usecols="A:H", skiprows=41, nrows=4, index_col=0)
cls.rsdlbio = cls.rsdlbio.fillna(0)
cls.rsdlfos = pd.read_excel(x, 'EmTables', usecols="A:H", skiprows=48, nrows=4, index_col=0)
cls.rsdlfos = cls.rsdlfos.fillna(0)
cls.transPct = pd.read_excel(x, 'EmTables', usecols="L:P", skiprows=32, nrows=11, index_col=0)
cls.transKM = pd.read_excel(x, 'EmTables', usecols="L:P", skiprows=46, nrows=11, index_col=0)
cls.transUMI = pd.read_excel(x, 'EmTables', usecols="L:P", skiprows=59, nrows=1, index_col=0)
cls.woodint = pd.read_excel(x, 'EmTables', usecols="A:H", skiprows=58, nrows=1, index_col=0)
cls.wtotalGHGb0 = pd.read_excel(x, 'EmTables', usecols="A:K", skiprows=62, nrows=6, index_col=0)
cls.wtotalGHGb1 = pd.read_excel(x, 'EmTables', usecols="A:K", skiprows=71, nrows=6, index_col=0)
cls.wbioGHGb0 = pd.read_excel(x, 'EmTables', usecols="A:K", skiprows=80, nrows=6, index_col=0)
cls.wbioGHGb1 = pd.read_excel(x, 'EmTables', usecols="A:K", skiprows=89, nrows=6, index_col=0)
cls.wfosGHGb0 = pd.read_excel(x, 'EmTables', usecols="A:K", skiprows=98, nrows=6, index_col=0)
cls.wfosGHGb1 = pd.read_excel(x, 'EmTables', usecols="A:K", skiprows=107, nrows=6, index_col=0)
cls.chinaVals = pd.read_excel(x, 'EmTables', usecols="L:M", skiprows=66, nrows=3, index_col=0)
cls.chinaCons = pd.read_excel(x, 'EmTables', usecols="L:M", skiprows=72, nrows=6, index_col=0)
cls.fYield = pd.read_excel(x, 'EmTables', usecols="L:N", skiprows=81, nrows=5, index_col=0)
def calculateTrans(cls,transVol):
# transVol [df] - item, volume (in Mg) by product, TransCode; indexed by fiberCode or other label
# transPct [df] - % traversed for transMode by transCode; indexed by transCode
# transKM [df] - distance traversed for transMode by transCode; indexed by transCode
# transUMI [s] - unit impact by mode (truck, train, boat); indexed by "transUMI"
transImpact = pd.Series(0, index = cls.fProd)
tC = transVol['TransCode']
tC = tC[(tC != 0) & (tC != 1)] # index non-zero/non-NaN elements only
transVol = transVol.loc[tC.index]
for t in cls.fProd:
for m in cls.transUMI.columns:
transImpact[t] += sum(transVol[t] * cls.transPct.loc[tC,m].values * cls.transKM.loc[tC,m].values * cls.transUMI[m].values * 1)
return transImpact
def calculateChem(cls,chemicals,prodDemand):
# chemicals [df] - nonfiber name, % use by product, transCode, impact factor; indexed by number
# prodDemand [df] - total demand; indexed by product
chemImpact = pd.Series(0, index = cls.fProd, name = 'chemImp')
chemVol = pd.DataFrame(0, index = chemicals.index, columns = cls.fProd)
for t in cls.fProd:
chemImpact[t] = sum(prodDemand[t].values * chemicals[t] * chemicals['Impact Factor'])
chemVol[t] = chemicals[t] * prodDemand[t].values
chemVol = chemVol.join(chemicals['TransCode'])
chemTrans = pd.Series(cls.calculateTrans(chemVol), name = 'chemTrans')
chemImpact = pd.DataFrame(chemImpact)
return pd.concat([chemImpact, chemTrans], axis=1)
def calculateEoL(cls,eolEmissions,consColl):
# eolEmissions [df] - biogenic and fossil CO2 emission factors & transportation code by product; indexed by bio/fosCO2
# consColl [df] - domestic consumption, collection, and recovery by product; indexed by name
prod2landfill = pd.Series(consColl.loc['Domestic Consumption'] - consColl.loc['Recovery Volume'],
index = cls.fProd, name = 'prod2landfill')
mrf2landfill = pd.Series(consColl.loc['Collection Volume'] - consColl.loc['Recovery Volume'],
index = cls.fProd, name = 'mrf2landfill')
bioEoL = pd.Series(prod2landfill * eolEmissions.loc['bioCO2'], index = cls.fProd, name = 'bioEoL')
mrf2landfill = pd.DataFrame(mrf2landfill) # works b/c all prods have same TransCode
transEoL = pd.Series(cls.calculateTrans(mrf2landfill.T.assign(TransCode=eolEmissions.loc['TransCode'].values[0])),
index = cls.fProd, name = 'eolTrans')
fesTransEoL = pd.Series(prod2landfill * eolEmissions.loc['fossilCO2'] + transEoL, index = cls.fProd,
name = 'fesTransEoL')
bftEoL = pd.Series(bioEoL + fesTransEoL, name = 'bftEoL')
return pd.concat([bioEoL, fesTransEoL, bftEoL, transEoL], axis=1)
def getEnergyYldCoeff(cls,f2pVol,pbpVol):
# f2pVol [df] - recycled fiber to pulp (in Mg); indexed by fiber code
# pbpVol [df] - pulp by product (in Mg); indexed by pulp name
#
# PYCoeff [s] - pulp yield coeffient; indexed by pulp
f2pByPulp = pd.Series(0, index = pbpVol.index, name = 'fiber2pulp')
for p in cls.rPulp:
f2pByPulp[p] = sum([f2pVol.loc[cls.rFiber,t].sum() for t in cls.fProdM
if cls.fProdM.index(t) == cls.rPulp.index(p)])
for q in cls.vPulp:
f2pByPulp[q] = sum([f2pVol.loc[cls.vFiber,t].sum() for t in cls.fProdM
if cls.fProdM.index(t) == cls.vPulp.index(q)])
pulpProd = pd.Series([pbpVol.loc[i].sum() for i in pbpVol.index], index = pbpVol.index, name = 'pulpProd')
PYCoeff = (pd.Series(f2pByPulp / pulpProd, name = 'pulpYldCoeff'))
PYCoeff.replace([np.inf, -np.inf], np.nan, inplace=True)
PYCoeff = PYCoeff.fillna(0)
return PYCoeff
def getEnergyPulpPct(cls,pbpVol):
# pbpVol [df] - pulp by product (in Mg); indexed by pulp name
#
# pulpPct [df] - % of rec/vir pulp used in product; indexed by pulp name
pulpPct = pbpVol.copy().drop(['TransCode'], axis=1)
for t in pulpPct.columns:
rTotalPulp = pulpPct.loc[cls.rPulp,t].sum()
vTotalPulp = pulpPct.loc[cls.vPulp,t].sum()
pulpPct.loc[cls.rPulp,t] = pulpPct.loc[cls.rPulp,t] / rTotalPulp
pulpPct.loc[cls.vPulp,t] = pulpPct.loc[cls.vPulp,t] / vTotalPulp
return pulpPct.fillna(0)
def getEnergyMultiProd(cls,PYMult,pulpPct):
# PYMult [s] - pulp yield multiplier; indexed by pulp name
# pulpPct [df] - % of rec/vir pulp used in product; indexed by pulp name
#
# (return) [df] - rec/vir yield multiprod by product; index by r/vYldMultiProd
rYldMultiProd = pd.Series([sum(pulpPct.loc[cls.rPulp,t] * PYMult[cls.rPulp]) for t in cls.fProd],
index = cls.fProd, name = 'rYldMultiProd')
vYldMultiProd = pd.Series([sum(pulpPct.loc[cls.vPulp,t] * PYMult[cls.vPulp]) for t in cls.fProd],
index = cls.fProd, name = 'vYldMultiProd')
rYldMultiProd.replace([np.inf, -np.inf], np.nan, inplace=True)
vYldMultiProd.replace([np.inf, -np.inf], np.nan, inplace=True)
return pd.concat([rYldMultiProd.fillna(0), vYldMultiProd.fillna(0)], axis=1)
def calculateEnergy(cls,pbpVol,prodLD,multiProd,pwpEI,paperEI):
# prodLD (df) - demand by product; indexed by % recycled content level
# bfEI (df) - bio & fes energy intensity fitting parameters by product; indexed by name
# bioPct (df) - bio fitting parameter for PWP; indexed by name
# pwpEI (df) - energy intensity of PWP pulp; indexed by pulp name
# paperEI (df) - paper production energy intensity; indexed by 'PPE'
# pbpVol (df) - pulp by product (in Mg); indexed by pulp name
# multiProd (df) - rec/vir yield multiprod by product; indexed by product
bioEnergy = pd.Series(0, index = cls.fProd, name = "bioEnergy")
fesEnergy = pd.Series(0, index = cls.fProd, name = 'fesEnergy')
totalEnergy = pd.Series(0, index = cls.fProd, name = 'totalEnergy')
for t in cls.fProd:
bioEnergy[t] = sum(prodLD[t].values[:len(cls.rLevel[t])] *
sum([r * cls.bfEI.loc['bioEI b1',t] + cls.bfEI.loc['bioEI b0',t] for r in cls.rLevel[t]]))
fesEnergy[t] = sum(prodLD[t].values[:len(cls.rLevel[t])] *
cls.bfEI.loc['fesEI',t] * multiProd.loc[t,'rYldMultiProd'])
if 'P&W' or 'News' in t:
avgrecPct = sum(prodLD[t].values[:len(cls.rLevel[t])] * cls.rLevel[t]) / prodLD[t].sum()
bioPctPW = avgrecPct * cls.bioPct.loc['bioPct b1',t] + cls.bioPct.loc['bioPct b0',t]
pulpProdEnergy = sum([pbpVol.loc[p,t] * pwpEI.loc[p].values[0] for p in pwpEI.index])
ppEnergy = pulpProdEnergy + prodLD[t].sum() * paperEI.values[0]
bioEnergy[t] = bioPctPW * ppEnergy
fesEnergy[t] = (1 - bioPctPW) * ppEnergy * multiProd.loc[t,'rYldMultiProd']
totalEnergy[t] = bioEnergy[t] + fesEnergy[t]
return pd.concat([bioEnergy, fesEnergy, totalEnergy], axis=1)
def calculateProduction(cls,calcEnergy):
# calcEnergy (df) - bio, fes, and total energy from calculateEnergy; indexed by product
# bfCO2 (df) - bio & fes CO2 fitting parameters; indexed by product
bioCO2 = pd.Series(0, index = cls.fProd, name = 'bioCO2')
fesCO2 = pd.Series(0, index = cls.fProd, name = 'fesCO2')
totalCO2 = pd.Series(0, index = cls.fProd, name = 'totalCO2')
for t in cls.fProd:
bioCO2[t] = calcEnergy.loc[t,'bioEnergy'] * cls.bfCO2.loc['bioCO2 b1',t]
fesCO2[t] = calcEnergy.loc[t,'fesEnergy'] * cls.bfCO2.loc['fesCO2 b1',t]
totalCO2[t] = bioCO2[t] + fesCO2[t]
return pd.concat([bioCO2, fesCO2, totalCO2], axis=1)
def calculateFuel(cls,calcEnergy):
# calcEnergy (df) - bio, fes, and total energy from calculateEnergy; indexed by product
# fuelTable (df) - fuel impact by product; indexed by fuel type
fuels = cls.fuelTable.index
bioFI = pd.Series(0, index = cls.fProd, name = 'bioFuelImp')
fesFI = pd.Series(0, index = cls.fProd, name = 'fesFuelImp')
fuelImp = pd.Series(0, index = cls.fProd, name = 'fuelImp')
for t in cls.fProd:
bioFI[t] = calcEnergy.loc[t,'bioEnergy'] * sum([cls.fuelTable.loc[f,t] * cls.fuelTable.loc[f,'Upstream Impact Factor']
for f in fuels if cls.fuelTable.loc[f,'Fuel Type'] == 1])
fesFI[t] = calcEnergy.loc[t,'fesEnergy'] * sum([cls.fuelTable.loc[f,t] * cls.fuelTable.loc[f,'Upstream Impact Factor']
for f in fuels if cls.fuelTable.loc[f,'Fuel Type'] == 2])
fuelImp[t] = bioFI[t] + fesFI[t]
fuelTransVol = cls.fuelTable.copy()
fuel1 = [f for f in fuels if cls.fuelTable.loc[f,'Fuel Type'] == 1]
fuel2 = [f for f in fuels if cls.fuelTable.loc[f,'Fuel Type'] == 2]
for t in cls.fProd:
fuelTransVol.loc[fuel1,t] = [calcEnergy.loc[t,'bioEnergy'] * cls.fuelTable.loc[f,t] * cls.fuelTable.loc[f,'FU/GJ']
for f in fuel1]
fuelTransVol.loc[fuel2,t] = [calcEnergy.loc[t,'fesEnergy'] * cls.fuelTable.loc[f,t] * cls.fuelTable.loc[f,'FU/GJ']
for f in fuel2]
fuelTrans = pd.Series(cls.calculateTrans(fuelTransVol), name = 'fuelTrans')
return pd.concat([bioFI, fesFI, fuelImp, fuelTrans], axis=1)
def calculateResidual(cls,pbpVol,f2pVol):
# pbpVol [df] - pulp by product (in Mg); indexed by pulp name
# f2pVol [df] - recycled fiber to pulp (in Mg); indexed by fiber code
# f2pYld [df] - fiber to pulp yield by pulp product; indexed by fiber
# pulpYld [df] - pulp to product yield; indexed by pulp
# rsdlModes [df] - residual treatments modes; indexed by residual type
# rsdlbio [df] - transport and biogenic emissions factors; indexed by residual treatment mode
# rsdlfos [df] - transport and fossil emissions factors; indexed by residual treatment mode
pulpProd = pd.Series(0, index = cls.rPulp + cls.vPulp, name = 'pulpProduced')
fiberRes = pd.Series(0, index = cls.rPulp + cls.vPulp, name = 'fiberResidue')
for p in cls.rPulp: # order of fPulp must match order of r/vPulp
pulpProd[p] = sum([(f2pVol.loc[cls.rFiber,t].mul(cls.f2pYld.loc[cls.rFiber,t])).sum() for t in cls.fProdM
if cls.fProdM.index(t) == cls.rPulp.index(p)])
fiberRes[p] = sum([(f2pVol.loc[cls.rFiber,t].mul(1 - cls.f2pYld.loc[cls.rFiber,t])).sum() for t in cls.fProdM
if cls.fProdM.index(t) == cls.rPulp.index(p)])
for q in cls.vPulp:
pulpProd[q] = sum([(f2pVol.loc[cls.vFiber,t].mul(cls.f2pYld.loc[cls.vFiber,t])).sum() for t in cls.fProdM
if cls.fProdM.index(t) == cls.vPulp.index(q)])
fiberRes[q] = sum([(f2pVol.loc[cls.vFiber,t].mul(1 - cls.f2pYld.loc[cls.vFiber,t])).sum() for t in cls.fProdM
if cls.fProdM.index(t) == cls.vPulp.index(q)])
pulpUP = pbpVol.iloc[:,:-1].div(pulpProd, axis=0).fillna(0) # pulpUsePct
rFiberRsd = pd.Series((pulpUP.loc[cls.rPulp].mul(fiberRes[cls.rPulp], axis=0)).sum(), index = cls.fProd, name = 'rFiberRsd')
rPulpRsd = pd.Series((pulpUP.loc[cls.rPulp].mul(1 - cls.pulpYld.iloc[:,0].loc[cls.rPulp], axis=0)).sum(), index = cls.fProd, name = 'rPulpRsd')
rTotalRsd = rFiberRsd + rPulpRsd
vFiberRsd = pd.Series((pulpUP.loc[cls.vPulp].mul(fiberRes[cls.vPulp], axis=0)).sum(), index = cls.fProd, name = 'vFiberRsd')
vPulpRsd = pd.Series((pulpUP.loc[cls.vPulp].mul(1 - cls.pulpYld.iloc[:,0].loc[cls.vPulp], axis=0)).sum(), index = cls.fProd, name = 'vPulpRsd')
vTotalRsd = vFiberRsd + vPulpRsd
rsdlType = cls.rsdlModes.index
rsdlQuantity = pd.DataFrame(0, index = rsdlType, columns = cls.fProd)
for rt in rsdlType:
if cls.rsdlModes.loc[rt,'Input Base'] == 1:
rsdlQuantity.loc[rt,:] = rTotalRsd * cls.rsdlModes.loc[rt,'Intensity']
if cls.rsdlModes.loc[rt,'Input Base'] == 2:
rsdlQuantity.loc[rt,:] = vTotalRsd * cls.rsdlModes.loc[rt,'Intensity']
rsdlMode = cls.rsdlModes.columns[:-2]
rsdlModeVol = {rM: pd.DataFrame(0, index = rsdlType, columns = cls.fProd)
for rM in rsdlMode}
for rM in rsdlMode:
rsdlModeVol[rM] = rsdlQuantity.mul(cls.rsdlModes[rM], axis=0)
rsdlModeVol[rM] = rsdlModeVol[rM].assign(TransCode=cls.rsdlbio.loc[rM,'TransCode'] * np.ones(len(rsdlType)))
rsdlModeVol[rM].replace([np.inf, -np.inf], np.nan, inplace=True) # TODO: what happens to make this inf?
rsdlModeVol[rM].fillna(0)
bioImp = pd.Series(0, index = cls.fProd, name = 'bioImp')
fosImp = pd.Series(0, index = cls.fProd, name = 'fossilImp')
for t in cls.fProd:
bioImp[t] = sum([rsdlModeVol[rM][t].sum() * cls.rsdlbio.loc[rM,t] for rM in rsdlMode])
fosImp[t] = sum([rsdlModeVol[rM][t].sum() * cls.rsdlfos.loc[rM,t] for rM in rsdlMode])
biofosImp = pd.Series(bioImp + fosImp, name = 'bio+fos')
rsdlTrans = pd.Series(0, index = cls.fProd, name = 'rsdlTrans')
for rM in rsdlMode:
rsdlTrans += cls.calculateTrans(rsdlModeVol[rM])
return pd.concat([bioImp, fosImp, biofosImp, rsdlTrans], axis=1)
def getExportTrans(cls,transVol):
transImpact = pd.Series(0, index = transVol.columns[:-1])
tC = transVol['TransCode']
tC = tC[(tC != 0) & (tC != 1)] # index non-zero/non-NaN elements only
transVol = transVol.loc[tC.index]
for n in transVol.columns[:-1]:
for m in cls.transUMI.columns:
transImpact[n] += sum(transVol[n] * cls.transPct.loc[tC,m].values * cls.transKM.loc[tC,m].values * cls.transUMI[m].values)
return transImpact.values
def calculateExport(cls,exportOld,exportNew):
# exportOld [df] old export from US; indexed by rec fiber
# exportNew [df] new export from US; indexed by rec fiber
impChange = pd.Series(0, index = cls.fYield.index, name = 'impChangeByGroup')
sumChange = pd.Series(0, index = cls.fYield.index, name = 'sumNetChange')
for r in impChange.index:
typeMask = cls.fiberType[cls.fiberType['fiberType'] == r].index
# impChange[r] = (exportOld.loc[typeMask, 'exportOld'] - exportNew.loc[typeMask, 'exportNew']).sum()
impChange[r] = (exportNew.loc[typeMask, 'exportNew'] - exportOld.loc[typeMask, 'exportOld']).sum()
sumChange[r] = impChange[r] * (1 - cls.fYield.loc[r,'US'] / cls.fYield.loc[r,'China'])
beta = sumChange.sum() / (cls.chinaCons.loc['totalVir'].values + cls.chinaCons.loc['domesticRec'].values +
cls.chinaCons.loc['importRec-US'].values + cls.chinaCons.loc['importRec-nonUS'].values)
# chinaTrans = cls.getExportTrans(exportOld) - cls.getExportTrans(exportNew)
chinaTrans = cls.getExportTrans(exportNew) - cls.getExportTrans(exportOld)
return cls.chinaVals.loc['Production'] * cls.chinaVals.loc['Energy Intensity'] * cls.chinaVals.loc['Emission Factor'] * beta + chinaTrans
def getForestVirginGHG(cls,virCons,woodint,slope,intercept):
# virCons [df] change in virgin consumption; products as columns
# woodint [df] intervals of virgin wood consumption
# slope [s] b1 value for GHG emissions
# intercept[s] b0 value for GHG emissions
for n in range(1,len(woodint.columns)):
if (woodint[n].values <= virCons) & (virCons < woodint[n+1].values):
return virCons * slope[n] + intercept[n]
return 0 # catch values outside of interval
def calculateForest(cls,virCons,forYear):
# virCons [float] change in virgin consumption, sum of all products
# forYear [int] forest year length for cumulative emissions calcs; 10-90 by ten
deltaTotalGHG = pd.Series(cls.getForestVirginGHG(virCons / 1e6, cls.woodint, cls.wtotalGHGb1[forYear], cls.wtotalGHGb0[forYear]),
name = 'totalGHG') * 1e6
deltabioGHG = pd.Series(cls.getForestVirginGHG(virCons / 1e6, cls.woodint, cls.wbioGHGb1[forYear], cls.wbioGHGb0[forYear]),
name = 'bioGHG') * 1e6
deltafosGHG = pd.Series(cls.getForestVirginGHG(virCons / 1e6, cls.woodint, cls.wfosGHGb1[forYear], cls.wfosGHGb0[forYear]),
name = 'fosGHG') * 1e6
return pd.concat([deltaTotalGHG, deltabioGHG, deltafosGHG], axis=1)
def calculateEmissions(cls):
# xls [df] - name of Excel spreadsheet to pull data from
# fProd [df] - list of products in current scenario
# rL [dict] - recycled content level by product
# f2pYld [df] - fiber to pulp yield by pulp product; indexed by fiber
# pulpYld [df] - pulp to product yield; indexed by pulp
# f2pVolNew [df] - fiber to pulp volume (in Mg); indexed by fiber code
# pbpVolNew [df] - pulp by product volume; indexed by pulp name
# consCollNew [df] - domestic consumption, collection, and recovery by product
pulpNames = cls.rPulp + cls.vPulp
mvO = [cls.pbpVolOld.loc[p] for p in pulpNames if 'Deinked' in p or 'Market' in p]
marketVolOld = pd.concat([mvO[0],mvO[1]], axis=1).T
mvN = [cls.pbpVolNew.loc[p] for p in pulpNames if 'Deinked' in p or 'Market' in p]
marketVolNew = pd.concat([mvN[0],mvN[1]], axis=1).T
# Chemical
chemImp = cls.calculateChem(cls.chemicals, cls.prodDemand)
# EoL
oldEoL = cls.calculateEoL(cls.eolEmissions, cls.consCollOld)
newEoL = cls.calculateEoL(cls.eolEmissions, cls.consCollNew)
# Energy
oldPulpPct = cls.getEnergyPulpPct(cls.pbpVolOld)
newPulpPct = cls.getEnergyPulpPct(cls.pbpVolNew)
oldPYCoeff = cls.getEnergyYldCoeff(cls.f2pVolOld, cls.pbpVolOld)
newPYCoeff = cls.getEnergyYldCoeff(cls.f2pVolNew, cls.pbpVolNew)
oldYldMultiplier = (oldPYCoeff / oldPYCoeff).fillna(0)
newYldMultiplier = (newPYCoeff / oldPYCoeff).fillna(0)
oldMP = cls.getEnergyMultiProd(oldYldMultiplier, oldPulpPct)
newMP = cls.getEnergyMultiProd(newYldMultiplier, newPulpPct)
oldEnergy = cls.calculateEnergy(cls.pbpVolOld, cls.prodLD, oldMP, cls.pwpEI.iloc[:-1], cls.pwpEI.iloc[-1])
newEnergy = cls.calculateEnergy(cls.pbpVolNew, cls.demandNew, newMP, cls.pwpEI.iloc[:-1], cls.pwpEI.iloc[-1])
# Production
oldProd = cls.calculateProduction(oldEnergy)
newProd = cls.calculateProduction(newEnergy)
# Fuel
oldFuel = cls.calculateFuel(oldEnergy)
newFuel = cls.calculateFuel(newEnergy)
# Residual
oldRsdl = cls.calculateResidual(cls.pbpVolOld, cls.f2pVolOld)
newRsdl = cls.calculateResidual(cls.pbpVolNew, cls.f2pVolNew)
# Transportation
oldFiberTrans = pd.Series(cls.calculateTrans(cls.f2pVolOld), name = 'fiberTrans')
oldMarketTrans = pd.Series(cls.calculateTrans(marketVolOld), name = 'marketTrans')
oldTrans = pd.concat([oldFiberTrans, oldMarketTrans, chemImp['chemTrans'], oldFuel['fuelTrans'],
oldRsdl['rsdlTrans'], oldEoL['eolTrans']], axis=1)
newFiberTrans = pd.Series(cls.calculateTrans(cls.f2pVolNew), name = 'fiberTrans')
newMarketTrans = pd.Series(cls.calculateTrans(marketVolNew), name = 'marketTrans')
newTrans = pd.concat([newFiberTrans, newMarketTrans, chemImp['chemTrans'], newFuel['fuelTrans'],
newRsdl['rsdlTrans'], newEoL['eolTrans']], axis=1)
# Export
exportImp = cls.calculateExport(cls.exportOld,cls.exportNew)
# FASOM/LURA
forestGHG = cls.calculateForest(cls.f2pVolNew.iloc[:,:-1].loc[cls.vFiber].sum().sum() -
cls.f2pVolOld.iloc[:,:-1].loc[cls.vFiber].sum().sum(), 90)
# Summary calcs for plotting
oldSums = pd.concat([pd.Series(chemImp['chemImp'], name='chemImp'),
pd.Series(oldFuel['bioFuelImp'], name='fuelbio'),
pd.Series(oldFuel['fesFuelImp'], name='fuelfos'),
pd.Series(oldProd['totalCO2'], name='prodImp'),
pd.Series(oldProd['bioCO2'], name='prodbio'),
pd.Series(oldProd['fesCO2'], name='prodfos'),
pd.Series(oldEnergy['totalEnergy'], name='energy'),
pd.Series(oldEnergy['bioEnergy'], name='energybio'),
pd.Series(oldEnergy['fesEnergy'], name='energyfos'),
pd.Series(oldRsdl['bio+fos'], name='residImp'),
pd.Series(oldRsdl['bioImp'], name='residbio'),
pd.Series(oldRsdl['fossilImp'], name='residfos'),
pd.Series(oldEoL['bftEoL'], name='eolImp'),
pd.Series(oldEoL['bioEoL'], name='eolbio'),
pd.Series(oldEoL['fesTransEoL'], name='eolfos'),
pd.Series(oldProd['bioCO2'] + oldRsdl['bioImp'] + oldEoL['bioEoL'], name='bioCO2'),
pd.Series(oldTrans.sum(axis=1) + chemImp['chemImp'] + oldFuel['fuelImp'] +
oldProd['fesCO2'] + oldRsdl['fossilImp'] + oldEoL['fesTransEoL'], name='fossilCO2'),
pd.Series(oldProd['bioCO2'] + oldRsdl['bioImp'], name='g2gbio'),
pd.Series(oldProd['fesCO2'] + oldRsdl['fossilImp'] + oldTrans.sum(axis=1), name='g2gfos')], axis=1)
oldSums = pd.concat([oldSums, pd.Series(oldSums['bioCO2'] + oldSums['fossilCO2'], name='totalImp')], axis=1)
oldSums = pd.concat([oldSums, pd.Series(oldSums['totalImp'] / cls.prodLD.sum(), name='unitImp')], axis=1, sort=True)
newSums = pd.concat([pd.Series(chemImp['chemImp'], name='chemImp'),
pd.Series(newFuel['bioFuelImp'], name='fuelbio'),
pd.Series(newFuel['fesFuelImp'], name='fuelfos'),
pd.Series(newProd['totalCO2'], name='prodImp'),
pd.Series(newProd['bioCO2'], name='prodbio'),
pd.Series(newProd['fesCO2'], name='prodfos'),
pd.Series(newEnergy['totalEnergy'], name='energy'),
pd.Series(newEnergy['bioEnergy'], name='energybio'),
pd.Series(newEnergy['fesEnergy'], name='energyfos'),
pd.Series(newRsdl['bio+fos'], name='residImp'),
pd.Series(newRsdl['bioImp'], name='residbio'),
pd.Series(newRsdl['fossilImp'], name='residfos'),
pd.Series(newEoL['bftEoL'], name='eolImp'),
pd.Series(newEoL['bioEoL'], name='eolbio'),
pd.Series(newEoL['fesTransEoL'], name='eolfos'),
pd.Series(newProd['bioCO2'] + newRsdl['bioImp'] + newEoL['bioEoL'], name='bioCO2'),
pd.Series(newTrans.sum(axis=1) + chemImp['chemImp'] + newFuel['fuelImp'] +
newProd['fesCO2'] + newRsdl['fossilImp'] + newEoL['fesTransEoL'], name='fossilCO2'),
pd.Series(newProd['bioCO2'] + newRsdl['bioImp'], name='g2gbio'),
pd.Series(newProd['fesCO2'] + newRsdl['fossilImp'] + newTrans.sum(axis=1), name='g2gfos')],axis=1)
newSums = pd.concat([newSums, pd.Series(newSums['bioCO2'] + newSums['fossilCO2'], name='totalImp')], axis=1)
newSums = pd.concat([newSums, pd.Series(newSums['totalImp'] / cls.prodLD.sum(), name='unitImp')], axis=1, sort=True)
return {k: v for k,v in zip(['old','new','forest','trade','oldenergy','newenergy'],
[oldSums,newSums,forestGHG,exportImp,oldEnergy,newEnergy])}
``` |
{
"source": "jkwieser/personality-detection-text",
"score": 3
} |
#### File: jkwieser/personality-detection-text/essay.py
```python
import re
# a class for our Essays. Could be required for further development.
# to be extended in further versions
class Essay:
def __init__(self, text, cEXT, cNEU, cAGR, cCON, cOPN):
self.text = text
self.cEXT = cEXT
self.cNEU = cNEU
self.cAGR = cAGR
self.cCON = cCON
self.cOPN = cOPN
self.clean_text = self.remove_unwanted_chars(text)
self.words = self.get_all_words(text)
self.scentences = self.create_scentences(text)
self.filtered_text = ""
self.w2v_words = []
self.glove = {}
# a method to documents into scentences
# possible further improvements. If "." or "!" or "?" is missing
# split long junks of words into scentences. (see paper: )
def create_scentences(self, text):
scentences = re.split("(?<=[.!?]) +", text)
return scentences
# as defined in the paper, all text is changed to lowercase
# and all caracters other than ASCII-letters, digits exclamation marks
# and single and double quotation marks are removed # I also added "?"
def remove_unwanted_chars(self, text):
allowed_chars = """ 0123456789abcdefghijklmnopqrstuvwxyz!"".?"""
clean_text = text.lower()
for c in clean_text:
if allowed_chars.find(c) == -1:
clean_text = clean_text.replace(c, "")
else:
pass
return clean_text
def get_all_words(self, text):
regex = re.compile("""[^A-Za-z ]""")
text = regex.sub('', text)
text = text.split()
return text
``` |
{
"source": "jkwill87/mapi",
"score": 2
} |
#### File: mapi/mapi/providers.py
```python
import re
from abc import abstractmethod
from datetime import datetime as dt
from os import environ
from mapi import log
from mapi.compatibility import AbstractClass, ustr
from mapi.endpoints import *
from mapi.exceptions import (
MapiException,
MapiNotFoundException,
MapiProviderException,
)
from mapi.metadata import *
from mapi.utils import year_expand
__all__ = [
"API_ALL",
"API_MOVIE",
"API_TELEVISION",
"OMDb",
"Provider",
"provider_factory",
"TMDb",
"TVDb",
]
API_TELEVISION = {"tvdb"}
API_MOVIE = {"tmdb", "omdb"}
API_ALL = API_TELEVISION | API_MOVIE
class Provider(AbstractClass):
"""ABC for Providers, high-level interfaces for metadata media providers.
"""
def __init__(self, **options):
"""Initializes the provider."""
cls_name = self.__class__.__name__
self._api_key = options.get(
"api_key", environ.get("API_KEY_%s" % cls_name.upper())
)
self._cache = options.get("cache", True)
@abstractmethod
def search(self, id_key=None, **parameters):
pass
@property
def api_key(self):
return self._api_key
@property
def cache(self):
return self._cache
def has_provider(provider):
"""Verifies that module has support for requested API provider."""
return provider.lower() in API_ALL
def has_provider_support(provider, media_type):
"""Verifies if API provider has support for requested media type."""
if provider.lower() not in API_ALL:
return False
provider_const = "API_" + media_type.upper()
return provider in globals().get(provider_const, {})
def provider_factory(provider, **options):
"""Factory function for DB Provider concrete classes."""
providers = {"tmdb": TMDb, "tvdb": TVDb, "omdb": OMDb}
try:
return providers[provider.lower()](**options)
except KeyError:
msg = "Attempted to initialize non-existing DB Provider"
log.error(msg)
raise MapiException(msg)
class OMDb(Provider):
"""Queries the OMDb API.
"""
def __init__(self, **options):
super(OMDb, self).__init__(**options)
if not self.api_key:
raise MapiProviderException("OMDb require API key")
def search(self, id_key=None, **parameters):
title = parameters.get("title")
year = parameters.get("year")
id_imdb = id_key or parameters.get("id_imdb")
if id_imdb:
results = self._lookup_movie(id_imdb)
elif title:
results = self._search_movie(title, year)
else:
raise MapiNotFoundException
for result in results:
yield result
def _lookup_movie(self, id_imdb):
response = omdb_title(self.api_key, id_imdb, cache=self._cache)
try:
date = dt.strptime(response["Released"], "%d %b %Y").strftime(
"%Y-%m-%d"
)
except (KeyError, ValueError):
if response.get("Year") in (None, "N/A"):
date = None
else:
date = "%s-01-01" % response["Year"]
meta = MetadataMovie(
title=response["Title"],
date=date,
synopsis=response["Plot"],
id_imdb=id_imdb,
)
if meta["synopsis"] == "N/A":
del meta["synopsis"]
yield meta
def _search_movie(self, title, year):
year_from, year_to = year_expand(year)
found = False
page = 1
page_max = 10 # each page yields a maximum of 10 results
while True:
try:
response = omdb_search(
api_key=self.api_key,
media_type="movie",
query=title,
page=page,
cache=self.cache,
)
except MapiNotFoundException:
break
for entry in response["Search"]:
if year_from <= int(entry["Year"]) <= year_to:
for result in self._lookup_movie(entry["imdbID"]):
yield result
found = True
if page >= page_max:
break
page += 1
if not found:
raise MapiNotFoundException
class TMDb(Provider):
"""Queries the TMDb API.
"""
def __init__(self, **options):
super(TMDb, self).__init__(**options)
if not self.api_key:
raise MapiProviderException("TMDb requires an API key")
def search(self, id_key=None, **parameters):
"""Searches TMDb for movie metadata."""
id_tmdb = id_key or parameters.get("id_tmdb")
id_imdb = parameters.get("id_imdb")
title = parameters.get("title")
year = parameters.get("year")
if id_tmdb:
results = self._search_id_tmdb(id_tmdb)
elif id_imdb:
results = self._search_id_imdb(id_imdb)
elif title:
results = self._search_title(title, year)
else:
raise MapiNotFoundException
for result in results:
yield result
def _search_id_imdb(self, id_imdb):
response = tmdb_find(
self.api_key, "imdb_id", id_imdb, cache=self.cache
)["movie_results"][0]
yield MetadataMovie(
title=response["title"],
date=response["release_date"],
synopsis=response["overview"],
media="movie",
id_tmdb=response["id"],
)
def _search_id_tmdb(self, id_tmdb):
assert id_tmdb
response = tmdb_movies(self.api_key, id_tmdb, cache=self.cache)
yield MetadataMovie(
title=response["title"],
date=response["release_date"],
synopsis=response["overview"],
media="movie",
id_tmdb=ustr(id_tmdb),
)
def _search_title(self, title, year):
assert title
found = False
year_from, year_to = year_expand(year)
page = 1
page_max = 5 # each page yields a maximum of 20 results
while True:
response = tmdb_search_movies(
self.api_key, title, year, page=page, cache=self.cache
)
for entry in response["results"]:
try:
meta = MetadataMovie(
title=entry["title"],
date=entry["release_date"],
synopsis=entry["overview"],
id_tmdb=ustr(entry["id"]),
)
except ValueError:
continue
if year_from <= int(meta["year"]) <= year_to:
yield meta
found = True
if page == response["total_pages"]:
break
elif page >= page_max:
break
page += 1
if not found:
raise MapiNotFoundException
class TVDb(Provider):
"""Queries the TVDb API.
"""
def __init__(self, **options):
super(TVDb, self).__init__(**options)
if not self.api_key:
raise MapiProviderException("TVDb requires an API key")
self.token = "" if self.cache else self._login()
def _login(self):
return tvdb_login(self.api_key)
def search(self, id_key=None, **parameters):
"""Searches TVDb for movie metadata.
TODO: Consider making parameters for episode ids
"""
episode = parameters.get("episode")
id_tvdb = id_key or parameters.get("id_tvdb")
id_imdb = parameters.get("id_imdb")
season = parameters.get("season")
series = parameters.get("series")
date = parameters.get("date")
date_fmt = r"(19|20)\d{2}(-(?:0[1-9]|1[012])(-(?:[012][1-9]|3[01]))?)?"
try:
if id_tvdb and date:
results = self._search_tvdb_date(id_tvdb, date)
elif id_tvdb:
results = self._search_id_tvdb(id_tvdb, season, episode)
elif id_imdb:
results = self._search_id_imdb(id_imdb, season, episode)
elif series and date:
if not re.match(date_fmt, date):
raise MapiProviderException(
"Date format must be YYYY-MM-DD"
)
results = self._search_series_date(series, date)
elif series:
results = self._search_series(series, season, episode)
else:
raise MapiNotFoundException
for result in results:
yield result
except MapiProviderException:
if not self.token:
log.info(
"Result not cached; logging in and reattempting search"
)
self.token = self._login()
for result in self.search(id_key, **parameters):
yield result
else:
raise
def _search_id_imdb(self, id_imdb, season=None, episode=None):
series_data = tvdb_search_series(
self.token, id_imdb=id_imdb, cache=self.cache
)
id_tvdb = series_data["data"][0]["id"]
return self._search_id_tvdb(id_tvdb, season, episode)
def _search_id_tvdb(self, id_tvdb, season=None, episode=None):
assert id_tvdb
found = False
series_data = tvdb_series_id(self.token, id_tvdb, cache=self.cache)
page = 1
while True:
episode_data = tvdb_series_id_episodes_query(
self.token,
id_tvdb,
episode,
season,
page=page,
cache=self.cache,
)
for entry in episode_data["data"]:
try:
yield MetadataTelevision(
series=series_data["data"]["seriesName"],
season=ustr(entry["airedSeason"]),
episode=ustr(entry["airedEpisodeNumber"]),
date=entry["firstAired"],
title=entry["episodeName"].split(";", 1)[0],
synopsis=(entry["overview"] or "")
.replace("\r\n", "")
.replace(" ", "")
.strip(),
media="television",
id_tvdb=ustr(id_tvdb),
)
found = True
except (AttributeError, ValueError):
continue
if page == episode_data["links"]["last"]:
break
page += 1
if not found:
raise MapiNotFoundException
def _search_series(self, series, season, episode):
assert series
found = False
series_data = tvdb_search_series(self.token, series, cache=self.cache)
for series_id in [entry["id"] for entry in series_data["data"][:5]]:
try:
for data in self._search_id_tvdb(series_id, season, episode):
found = True
yield data
except MapiNotFoundException:
continue # may not have requested episode or may be banned
if not found:
raise MapiNotFoundException
def _search_tvdb_date(self, id_tvdb, date):
found = False
for meta in self._search_id_tvdb(id_tvdb):
if meta["date"] and meta["date"].startswith(date):
found = True
yield meta
if not found:
raise MapiNotFoundException
def _search_series_date(self, series, date):
assert series and date
series_data = tvdb_search_series(self.token, series, cache=self.cache)
tvdb_ids = [entry["id"] for entry in series_data["data"]][:5]
found = False
for tvdb_id in tvdb_ids:
try:
for result in self._search_tvdb_date(tvdb_id, date):
yield result
found = True
except MapiNotFoundException:
continue
if not found:
raise MapiNotFoundException
```
#### File: mapi/mapi/utils.py
```python
import random
import re
from os import path
from sys import version_info
import requests_cache
from appdirs import user_cache_dir
from requests.adapters import HTTPAdapter
from mapi import log
from mapi.compatibility import ustr
__all__ = [
"AGENT_ALL",
"AGENT_CHROME",
"AGENT_EDGE",
"AGENT_IOS",
"CACHE_PATH",
"clean_dict",
"clear_cache",
"d2l",
"get_session",
"get_user_agent",
"request_json",
"year_expand",
"year_parse",
]
AGENT_CHROME = (
"Mozilla/5.0 (iPhone; CPU iPhone OS 10_0_1 like Mac OS X) AppleWebKit/601.1"
" (KHTML, like Gecko) CriOS/53.0.2785.86 Mobile/14A403 Safari/601.1.46"
)
AGENT_EDGE = (
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like "
"Gecko) Chrome/51.0.2704.79 Safari/537.36 Edge/14.14393"
)
AGENT_IOS = (
"Mozilla/5.0 (iPhone; CPU iPhone OS 10_0_1 like Mac OS X) "
"AppleWebKit/602.1.50 (KHTML, like Gecko) Version/10.0 Mobile/14A403 "
"Safari/602.1"
)
AGENT_ALL = (AGENT_CHROME, AGENT_EDGE, AGENT_IOS)
CACHE_PATH = path.join(
user_cache_dir(), "mapi-py%d.sqlite" % version_info.major
)
def clean_dict(target_dict, whitelist=None):
"""Convenience function that removes a dicts keys that have falsy values."""
assert isinstance(target_dict, dict)
return {
ustr(k).strip(): ustr(v).strip()
for k, v in target_dict.items()
if v not in (None, Ellipsis, [], (), "")
and (not whitelist or k in whitelist)
}
def clear_cache():
"""Clears requests-cache cache."""
get_session().cache.clear()
def d2l(d):
"""Convenience function that converts a dict into a sorted tuples list."""
return sorted([(k, v) for k, v in d.items()])
def get_session():
"""Convenience function that returns request-cache session singleton."""
if not hasattr(get_session, "session"):
get_session.session = requests_cache.CachedSession(
cache_name=CACHE_PATH.rstrip(".sqlite"),
expire_after=518400, # 6 days
)
adapter = HTTPAdapter(max_retries=3)
get_session.session.mount("http://", adapter)
get_session.session.mount("https://", adapter)
return get_session.session
def get_user_agent(platform=None):
"""Convenience function that looks up a user agent string, random if N/A."""
if isinstance(platform, ustr):
platform = platform.upper()
return {"chrome": AGENT_CHROME, "edge": AGENT_EDGE, "ios": AGENT_IOS}.get(
platform, random.choice(AGENT_ALL)
)
def request_json(
url, parameters=None, body=None, headers=None, cache=True, agent=None
):
"""
Queries a url for json data.
Note: Requests are cached using requests_cached for a week, this is done
transparently by using the package's monkey patching.
"""
assert url
session = get_session()
log.info("-" * 80)
log.info("url: %s", url)
if isinstance(headers, dict):
headers = clean_dict(headers)
else:
headers = dict()
if isinstance(parameters, dict):
parameters = d2l(clean_dict(parameters))
if body:
method = "POST"
headers["content-type"] = "application/json"
headers["user-agent"] = get_user_agent(agent)
headers["content-length"] = ustr(len(body))
else:
method = "GET"
headers["user-agent"] = get_user_agent(agent)
initial_cache_state = session._is_cache_disabled # yes, i'm a bad person
try:
session._is_cache_disabled = not cache
response = session.request(
url=url,
params=parameters,
json=body,
headers=headers,
method=method,
timeout=1,
)
status = response.status_code
content = response.json() if status // 100 == 2 else None
cache = getattr(response, "from_cache", False)
except Exception as e:
content = None
status = 500
log.debug(e, exc_info=True)
else:
log.debug("method: %s", method)
log.debug("headers: %r", headers)
log.debug("parameters: %r", parameters)
log.debug("cache: %r", cache)
log.info("status: %d", status)
log.debug("content: %s", content)
finally:
session._is_cache_disabled = initial_cache_state
return status, content
def year_parse(s):
"""Parses a year from a string."""
regex = r"((?:19|20)\d{2})(?:$|[-/]\d{2}[-/]\d{2})"
try:
year = int(re.findall(regex, ustr(s))[0])
except IndexError:
year = None
return year
def year_expand(s):
"""Parses a year or dash-delimited year range."""
regex = r"^((?:19|20)\d{2})?(\s*-\s*)?((?:19|20)\d{2})?$"
try:
start, dash, end = re.match(regex, ustr(s)).groups()
start = start or 1900
end = end or 2099
except AttributeError:
return 1900, 2099
return (int(start), int(end)) if dash else (int(start), int(start))
```
#### File: tests/metadata/test_metadata_television.py
```python
import pytest
def test_str(television_metadata):
s = str(television_metadata)
assert s == "Adventure Time - 05x03 - Five More Short Graybles"
def test_format(television_metadata):
s = format(
television_metadata, "{series} - S{season:02}E{episode:02} - {title}"
)
assert s == "Adventure Time - S05E03 - Five More Short Graybles"
def test_format__missing_episode(television_metadata):
television_metadata["episode"] = None
s = str(television_metadata)
assert s == "Adventure Time - 05x - Five More Short Graybles"
def test_format__missing_title(television_metadata):
television_metadata["title"] = None
s = str(television_metadata)
assert s == "Adventure Time - 05x03"
def test_format__multi_episode(television_metadata):
television_metadata["episode"] = (3, 4)
assert isinstance(television_metadata["episode"], int)
s = str(television_metadata)
assert s == "Adventure Time - 05x03 - Five More Short Graybles"
def test_invalid_media(television_metadata):
with pytest.raises(ValueError):
television_metadata["media"] = "yolo"
def test_invalid_field(television_metadata):
with pytest.raises(KeyError):
television_metadata["yolo"] = "hi"
```
#### File: tests/providers/test_providers_omdb.py
```python
import pytest
from mock import patch
from mapi.exceptions import MapiNotFoundException, MapiProviderException
from mapi.providers import OMDb
from tests import JUNK_TEXT, MOVIE_META
def test_omdb_provider__api_key__missing():
with patch.dict("os.environ", {}, clear=True):
with pytest.raises(MapiProviderException):
OMDb()
def test_omdb_provider__api_key__env_fallback_ok():
with patch.dict("os.environ", {"API_KEY_OMDB": JUNK_TEXT}, clear=True):
OMDb() # should not raise exception
@pytest.mark.usefixtures("omdb_provider")
@pytest.mark.parametrize("meta", MOVIE_META)
def test_omdb_provider__search__id_imdb(meta, omdb_provider):
results = list(omdb_provider.search(id_imdb=meta["id_imdb"]))
assert len(results) == 1
result = results[0]
assert meta["title"] == result["title"]
@pytest.mark.usefixtures("omdb_provider")
def test_omdb_provider__search__id_imdb__no_hits(omdb_provider):
with pytest.raises(MapiNotFoundException):
next(omdb_provider.search(id_imdb=JUNK_TEXT, cache=False))
@pytest.mark.usefixtures("omdb_provider")
@pytest.mark.parametrize("meta", MOVIE_META)
def test_omdb_provider__search__title(meta, omdb_provider):
found = False
results = list(omdb_provider.search(title=meta["title"]))
for result in results:
if result["id_imdb"] == meta["id_imdb"]:
found = True
break
assert found is True
@pytest.mark.usefixtures("omdb_provider")
def test_omdb_provider__search__no_hits(omdb_provider):
with pytest.raises(MapiNotFoundException):
next(omdb_provider.search(title=JUNK_TEXT, cache=False))
@pytest.mark.usefixtures("omdb_provider")
def test_omdb_provider__search__missing(omdb_provider):
with pytest.raises(MapiNotFoundException):
next(omdb_provider.search())
```
#### File: tests/providers/test_providers.py
```python
import pytest
from mapi.exceptions import MapiException
from mapi.providers import (
TMDb,
TVDb,
has_provider,
has_provider_support,
provider_factory,
)
def test_has_provider__true():
assert has_provider("tmdb") is True
assert has_provider("tvdb") is True
def test_has_provider__missing():
assert has_provider("imdb") is False
def test_has_provider_support__true():
assert has_provider_support("tmdb", "movie") is True
assert has_provider_support("tvdb", "television") is True
def test_has_provider_support__missing():
assert has_provider_support("tmdb", "television") is False
assert has_provider_support("tvdb", "movie") is False
def test_has_provider_support__valid_mtype():
assert has_provider_support("imdb", "movie") is False
def test_has_provider_support__invalid_mtype():
assert has_provider_support("tmdb", "media_type_subtitle") is False
@pytest.mark.usefixtures("tmdb_api_key")
def test_provider_factory__tmdb(tmdb_api_key):
client = provider_factory("tmdb", api_key=tmdb_api_key)
assert isinstance(client, TMDb)
@pytest.mark.usefixtures("tvdb_api_key")
def test_provider_factory__tvdb(tvdb_api_key):
client = provider_factory("tvdb", api_key=tvdb_api_key)
assert isinstance(client, TVDb)
def test_non_existant():
with pytest.raises(MapiException):
provider_factory("yolo")
```
#### File: tests/providers/test_providers_tvdb.py
```python
import pytest
from mapi.exceptions import MapiProviderException
from tests import TELEVISION_META
@pytest.mark.usefixtures("tmdb_provider")
@pytest.mark.parametrize("meta", TELEVISION_META)
def test_tvdb_provider__search__id_tvdb(tvdb_provider, meta):
results = list(tvdb_provider.search(id_tvdb=meta["id_tvdb"]))
assert meta["id_tvdb"] == results[0]["id_tvdb"]
@pytest.mark.usefixtures("tmdb_provider")
@pytest.mark.parametrize("meta", TELEVISION_META)
def test_tvdb_provider__search__id_tvdb_season(tvdb_provider, meta):
results = tvdb_provider.search(id_tvdb=meta["id_tvdb"], season=1)
all_season_1 = all(entry["season"] == 1 for entry in results)
assert all_season_1 is True
@pytest.mark.usefixtures("tmdb_provider")
@pytest.mark.parametrize("meta", TELEVISION_META)
def test_tvdb_provider__search__id_tvdb_episode(tvdb_provider, meta):
results = tvdb_provider.search(id_tvdb=meta["id_tvdb"], episode=2)
all_episode_2 = all(entry["episode"] == 2 for entry in results)
assert all_episode_2 is True
@pytest.mark.usefixtures("tmdb_provider")
@pytest.mark.parametrize("meta", TELEVISION_META)
def test_tvdb_provider__search__id_tvdb_season_episode(tvdb_provider, meta):
results = list(
tvdb_provider.search(id_tvdb=meta["id_tvdb"], season=1, episode=3)
)
assert len(results) == 1
assert results[0]["season"] == 1
assert results[0]["episode"] == 3
@pytest.mark.usefixtures("tmdb_provider")
@pytest.mark.parametrize("meta", TELEVISION_META)
def test_tvdb_provider__search__id_imdb(tvdb_provider, meta):
found = False
results = tvdb_provider.search(id_imdb=meta["id_imdb"])
for result in results:
if result["id_tvdb"] == meta["id_tvdb"]:
found = True
break
assert found is True
@pytest.mark.usefixtures("tmdb_provider")
@pytest.mark.parametrize("meta", TELEVISION_META)
def test_tvdb_provider__search__id_imdb_season(tvdb_provider, meta):
results = tvdb_provider.search(id_imdb=meta["id_imdb"], season=1)
all_season_1 = all(entry["season"] == 1 for entry in results)
assert all_season_1 is True
@pytest.mark.usefixtures("tmdb_provider")
@pytest.mark.parametrize("meta", TELEVISION_META)
def test_tvdb_provider__search__id_imdb_episode(tvdb_provider, meta):
results = tvdb_provider.search(id_imdb=meta["id_imdb"], episode=2)
all_episode_2 = all(entry["episode"] == 2 for entry in results)
assert all_episode_2 is True
@pytest.mark.usefixtures("tmdb_provider")
@pytest.mark.parametrize("meta", TELEVISION_META)
def test_tvdb_provider__search__id_imdb_season_episode(tvdb_provider, meta):
results = list(
tvdb_provider.search(id_imdb=meta["id_imdb"], season=1, episode=3)
)
assert results[0]["season"] == 1
assert results[0]["episode"] == 3
@pytest.mark.usefixtures("tmdb_provider")
@pytest.mark.parametrize("meta", TELEVISION_META)
def test_tvdb_provider__search__series(tvdb_provider, meta):
found = False
results = tvdb_provider.search(series=meta["series"])
for result in results:
if result["id_tvdb"] == meta["id_tvdb"]:
found = True
break
assert found is True
@pytest.mark.usefixtures("tmdb_provider")
def test_tvdb_provider__search__series_deep(tvdb_provider):
results = tvdb_provider.search(
series="House Rules (au)", season=6, episode=6
)
assert any(r["id_tvdb"] == "269795" for r in results)
@pytest.mark.usefixtures("tmdb_provider")
@pytest.mark.parametrize("meta", TELEVISION_META)
def test_tvdb_provider__search__title_season(tvdb_provider, meta):
results = tvdb_provider.search(series=meta["series"], season=1)
all_season_1 = all(entry["season"] == 1 for entry in results)
assert all_season_1 is True
@pytest.mark.usefixtures("tmdb_provider")
@pytest.mark.parametrize("meta", TELEVISION_META)
def test_tvdb_provider__search__title_season_episode(tvdb_provider, meta):
results = list(
tvdb_provider.search(series=meta["series"], season=1, episode=3)
)
assert results[0]["season"] == 1
assert results[0]["episode"] == 3
@pytest.mark.usefixtures("tmdb_provider")
def test_tvdb_provider__search_series_date__year(tvdb_provider):
results = list(
tvdb_provider.search(series="The Daily Show", date="2017-11-01")
)
assert len(results) == 1
assert results[0]["title"] == "<NAME>"
@pytest.mark.usefixtures("tmdb_provider")
def test_tvdb_provider__search_series_date__partial(tvdb_provider):
results = list(tvdb_provider.search(series="The Daily Show", date="2017"))
assert results
assert any(r["title"] == "<NAME>" for r in results)
def test_tvdb_provider__search_series_date__invalid_format(tvdb_provider):
with pytest.raises(MapiProviderException):
next(tvdb_provider.search(series="The Daily Show", date="13"))
``` |
{
"source": "jkwill87/teletype",
"score": 3
} |
#### File: teletype/io/common.py
```python
from __future__ import print_function
from re import sub
from sys import stdout
from teletype import codes
try:
input = raw_input
except NameError:
pass
__all__ = [
"erase_lines",
"erase_screen",
"hide_cursor",
"move_cursor",
"show_cursor",
"strip_format",
"style_format",
"style_print",
"style_input",
]
def erase_lines(n=1):
""" Erases n lines from the screen and moves the cursor up to follow
"""
for _ in range(n):
print(codes.CURSOR["up"], end="")
print(codes.CURSOR["eol"], end="")
stdout.flush()
def erase_screen():
""" Clears all text from the screen
"""
print(codes.CURSOR["clear"], end="")
stdout.flush()
def move_cursor(cols=0, rows=0):
""" Moves the cursor the given number of columns and rows
The cursor is moved right when cols is positive and left when negative.
The cursor is moved down when rows is positive and down when negative.
"""
if cols == 0 and rows == 0:
return
commands = ""
commands += codes.CURSOR["up" if rows < 0 else "down"] * abs(rows)
commands += codes.CURSOR["left" if cols < 0 else "right"] * abs(cols)
if commands:
print(commands, end="")
stdout.flush()
def show_cursor():
""" Shows the cursor indicator
"""
print(codes.CURSOR["show"], end="")
stdout.flush()
def hide_cursor():
""" Hides the cursor indicator; remember to call show_cursor before exiting
"""
print(codes.CURSOR["hide"], end="")
stdout.flush()
def strip_format(text):
""" Returns text with all control sequences removed
"""
return sub(r"(\x9B|\x1B\[)[0-?]*[ -/]*[@-~]", "", text)
def style_format(text, style, reset=True):
""" Wraps texts in terminal control sequences
Style can be passed as either a collection or space delimited string.
Valid styles can be found in the codes module. Invalid or unsuported styles
will just be ignored.
"""
if not style:
return text
if isinstance(style, str):
style = style.split(" ")
prefix = ""
for s in style:
prefix += codes.COLOURS.get(s, "")
prefix += codes.HIGHLIGHTS.get(s, "")
prefix += codes.MODES.get(s, "")
if reset:
text += codes.MODES["reset"]
return prefix + text
def style_print(*values, **options):
""" A convenience function that applies style_format to text before printing
"""
style = options.pop("style", None)
values = tuple(style_format(value, style) for value in values)
print(*values, **options)
def style_input(prompt=None, style=None):
""" A convenience function that applies style_format before get user input
"""
if style:
prompt = style_format(prompt, style)
return input(prompt)
```
#### File: teletype/io/posix.py
```python
from __future__ import print_function
from sys import stdin
from termios import TCSADRAIN, tcgetattr, tcsetattr
from tty import setraw
from teletype import codes
__all__ = ["get_key"]
def get_key(raw=False):
""" Gets a single key from stdin
"""
file_descriptor = stdin.fileno()
state = tcgetattr(file_descriptor)
chars = []
try:
setraw(stdin.fileno())
for i in range(3):
char = stdin.read(1)
ordinal = ord(char)
chars.append(char)
if i == 0 and ordinal != 27:
break
elif i == 1 and ordinal != 91:
break
elif i == 2 and ordinal != 51:
break
finally:
tcsetattr(file_descriptor, TCSADRAIN, state)
result = "".join(chars)
return result if raw else codes.KEYS_FLIPPED.get(result, result)
```
#### File: teletype/io/windows.py
```python
from msvcrt import getch, kbhit # pylint: disable=import-error
from teletype.codes import KEYS_FLIPPED, SCAN_CODES
__all__ = ["get_key"]
def get_key(raw=False):
""" Gets a single key from stdin
"""
while True:
try:
if kbhit():
char = getch()
ordinal = ord(char)
if ordinal in (0, 224):
extension = ord(getch())
scan_code = ordinal + extension * 256
result = SCAN_CODES[scan_code]
break
else:
result = char.decode()
break
except KeyboardInterrupt:
return "ctrl-c"
return result if raw else KEYS_FLIPPED.get(result, result)
``` |
{
"source": "jkwon045/CS170Project01",
"score": 4
} |
#### File: jkwon045/CS170Project01/8PuzzleSolver.py
```python
import copy
GOALSTATE = [['1', '2', '3'], ['4', '5', '6'], ['7', '8', ' ']]
INITIALSTATE = [['8', '7', '1'], ['6', ' ', '2'], ['5', '4', '3']]
EXPANDEDSTATES = list()
NUMEXPANSIONS = 0
MAXQUEUESIZE = 0
DIMENSIONS = 3
EXPANDFLAG = False
#A class to define a point on the puzzle
class Point:
_x = 0
_y = 0
def __init__ ( self, x = 0, y = 0 ):
self._x = x
self._y = y
def set( self, x = None, y = None ):
if x is not None:
self._x = x
if y is not None:
self._y = y
def getX ( self ):
return self._x
def getY ( self ):
return self._y
def disp ( self ):
print( self._x, self._y )
#A class to define the 8 puzzle itself, or any arbitrary puzzle if needed
class Puzzle:
#init should be a 2d array
def __init__( self, dim = DIMENSIONS, init = INITIALSTATE ):
self._state = list(init)
for i in range(dim):
for j in range(dim):
if ( init[i][j] == ' ' ):
self._blank = Point(i, j)
#Checks if one puzzle state is equal to another
def __eq__( self, other ):
if ( self._blank != other._blank ):
return False
for i in range ( DIMENSIONS ):
for j in range ( DIMENSIONS ):
if ( self._state[i][j] != other._state[i][j] ):
return False
return True
def getState( self ):
return self._state
def getBlank( self ):
return (self._blank.getX(), self._blank.getY())
#Moves the location of the blank left
def moveBlankLeft( self ):
cX = self._blank.getX() #the current X
cY = self._blank.getY() #the current Y
newState = list(self._state)
if ( cY > 0 ):
newState[cX][cY], newState[cX][cY-1] = newState[cX][cY-1], newState[cX][cY]
self._blank.set( x = cX, y = (cY - 1 ))
#Moves the location of the blank Right
def moveBlankRight( self ):
cX = self._blank.getX() #the current X
cY = self._blank.getY() #the current Y
newState = list(self._state)
if ( cY < DIMENSIONS-1 ):
newState[cX][cY], newState[cX][cY+1] = newState[cX][cY+1], newState[cX][cY]
self._blank.set( y = (cY + 1) )
def moveBlankUp( self ):
cX = self._blank.getX() #the current X
cY = self._blank.getY() #the current Y
if ( cX > 0 ):
self._state[cX][cY], self._state[cX-1][cY] = self._state[cX-1][cY], self._state[cX][cY]
self._blank.set( x = (cX - 1) )
def moveBlankDown( self ):
cX = self._blank.getX() #the current X
cY = self._blank.getY() #the current Y
if ( cX < DIMENSIONS-1 ):
self._state[cX][cY], self._state[cX+1][cY] = self._state[cX+1][cY], self._state[cX][cY]
self._blank.set( x = (cX + 1) )
def disp( self ):
for i in self._state:
print(i, '\n')
#use copy.deepcopy in order to create new instances of class :)
class Node:
def __init__(self, state, g_n = 0):
self._state = copy.deepcopy(state)
self._g_n = g_n
self._f_n = g_n
def __eq__ ( self, other ):
return self._state == other._state
def getState(self):
return self._state.getState()
def getNumMoves(self):
return self._g_n
def getWeight(self):
return self._f_n
def setWeight(self, val):
self._f_n = val
def setPrevState( self, state ):
self._prevState = copy.deepcopy(state)
def moveBlankRight( self ):
self._state.moveBlankRight()
self._g_n += 1
return self
def moveBlankLeft( self ):
self._state.moveBlankLeft()
self._g_n += 1
return self
def moveBlankUp( self ):
self._state.moveBlankUp()
self._g_n += 1
return self
def moveBlankDown(self ):
self._state.moveBlankDown()
self._g_n += 1
return self
def goalTest( self ):
return self._state.getState() == GOALSTATE
def disp( self ):
self._state.disp()
def hasBeenExpanded(node):
for i in range(len(EXPANDEDSTATES)):
if ( node.getState() == EXPANDEDSTATES[i].getState() ):
return True
EXPANDEDSTATES.append(copy.deepcopy(node))
return False
def dequeue( nodesList ):
minWeight = nodesList[0].getWeight()
minloc = 0
for i in range ( 1, len( nodesList )):
if( nodesList[i].getWeight() < minWeight ):
minloc = i
mingn = nodesList[i].getNumMoves()
return minloc
def expand( node ):
possibleMoves = list()
if(EXPANDFLAG):
print("\nNow we are expanding: ")
node.disp()
print("It has a value of g(n): ", node.getNumMoves(), "and h(n): ", node.getWeight() - node.getNumMoves())
global NUMEXPANSIONS
up = copy.deepcopy(node).moveBlankUp()
down = copy.deepcopy(node).moveBlankDown()
left = copy.deepcopy(node).moveBlankLeft()
right = copy.deepcopy(node).moveBlankRight()
possibleMoves.append(up)
possibleMoves.append(down)
possibleMoves.append(left)
possibleMoves.append(right)
indexToRemove = list()
for i in range(len(possibleMoves)):
appended = False
if (possibleMoves[i].getState() == node.getState()):
indexToRemove.append(i)
elif ( hasBeenExpanded(possibleMoves[i]) ):
indexToRemove.append(i)
possibleMoves[i].setPrevState(node.getState())
#sort indices and reverse in order to remove all values
if(len(indexToRemove) > 0):
indexToRemove.reverse()
for i in range( len( indexToRemove ) ):
del possibleMoves[indexToRemove[i]]
if(len(possibleMoves) > 0):
NUMEXPANSIONS+=1
return possibleMoves
def queueingFunction( nodesToEnqueue, index, nodesList , heurestic ):
global MAXQUEUESIZE
if ( nodesList is None ):
nodesList = list()
for i in range(len(nodesToEnqueue)):
nodesToEnqueue[i].setWeight(nodesToEnqueue[i].getNumMoves() + heurestic(nodesToEnqueue[i]))
nodesList.insert(index, nodesToEnqueue[i])
index+=1
if( len(nodesList) > MAXQUEUESIZE ):
MAXQUEUESIZE = len(nodesList)
return nodesList
#queueing function is the heurestic
def generalSearch( problem, heurestic):
nodes = list()
nodes.append(Node(state = problem))
while( True ):
if( len(nodes) == 0 ):
print('fail')
return None
index = dequeue(nodes)
a = copy.deepcopy(nodes.pop(index))
if ( a.goalTest() ):
print("Goal!")
return a
expanded = expand(a)
nodes = queueingFunction(expanded, index, nodes, heurestic)
def uniformSearchHeuristic( problem ):
return 0
def misplacedTilesHeuristic( problem ):
misplacedTiles = 0
check = problem.getState()
for i in range(DIMENSIONS):
for j in range(DIMENSIONS):
if ( check[i][j] != GOALSTATE[i][j] and GOALSTATE[i][j] != ' '):
misplacedTiles+=1
return misplacedTiles
def absVal( a ):
if( a < 0 ):
a *= -1
return a
def manhattanDistanceHeuristic( problem ):
totalDistance = 0
check = problem.getState()
for i in range(DIMENSIONS):
for j in range(DIMENSIONS):
if( check[i][j] != ' ' ):
goalX = (int(check[i][j])-1) % DIMENSIONS
goalY = (int(check[i][j])-1) // DIMENSIONS
distX = absVal(goalX - j)
distY = absVal(goalY - i)
totalDistance+= (distX + distY)
return absVal(totalDistance)
def choosePuzzle():
response = -1
while( response > 2 or response < 0 ):
response = int(input("Press 1 to use the default puzzle, or 2 to create your own\n"))
if( response > 2 or response < 0):
print(response, " is an invalid option\n")
if( response == 1 ):
return Puzzle()
else:
inputs = list()
print("Please use spaces to separate each value, and use 0 as your blank")
inputs.append(input("Please input the first row\t"))
inputs.append(input("Please input the second row\t"))
inputs.append(input("Please input the third row\t"))
puzzInit = list()
for i in inputs:
check = i.split()
if ( '0' in check ):
blankLoc = check.index('0')
check[blankLoc] = ' '
puzzInit.append(check)
return Puzzle( DIMENSIONS, puzzInit )
def chooseHeuristic():
print("Please choose your heuristic")
print(" 1. Uniform Search Hueristic (WARNING: MAY TAKE A LONG TIME)")
print(" 2. Misplaced Tiles Heuristic")
print(" 3. Manhattan Distance Heuristic")
response = -1
while(response < 0 or response > 3):
response = int(input("Please enter your choice: "))
if( response > 3 or response < 0):
print(response, " is an invalid option\n")
if (response == 1):
return uniformSearchHeuristic
elif (response == 2):
return misplacedTilesHeuristic
else:
return manhattanDistanceHeuristic
def setExpand():
val = int(input("Enter 1 to display the node that is expanding, otherwise press 2\n"))
global EXPANDFLAG
if( val == 1 ):
EXPANDFLAG = True
else:
EXPANDFLAG = False
def main():
problem = choosePuzzle()
h = chooseHeuristic()
setExpand()
a = generalSearch(problem, h)
if( a is not None):
print("\nTo solve this problem, the search algorithm expanded nodes", NUMEXPANSIONS, "times")
print("The max number of nodes in the queue at any time was", MAXQUEUESIZE )
print("The depth of the goal node was", a.getNumMoves())
main()
``` |
{
"source": "jkwon045/cs172-project",
"score": 3
} |
#### File: jkwon045/cs172-project/web-crawler.py
```python
from bs4 import BeautifulSoup
import threading
import requests
import sys
import time
def get_restrictions(url):
robots_txt = url[:len(url)] + "robots.txt"
robots_page = requests.get(robots_txt)
parsed_robots_txt = BeautifulSoup(robots_page.content, 'html.parser')
string_parsed_robots_txt = parsed_robots_txt.string
general_user_agent_location = parsed_robots_txt.string.find('User-agent: *')
string_parsed_robots_txt = string_parsed_robots_txt[general_user_agent_location:]
restricted = []
for line in string_parsed_robots_txt.split('\n'):
if ( line.find('Disallow:') > -1 ):
path = line[line.find('Disallow:')+len('Disallow:'):]
if (path.find('#') > -1):
path = path[:path.find('#')-1]
path = path.strip()
restricted.append(path)
return restricted
def main():
# should take seed from command line argument, but for now it is hardcoded
#seed = sys.argv[1]
#num_pages = sys.argv[2]
#hops = sys.arv[3]
seed = "https://www.fda.gov/"
link = seed[:len(seed)-1]
list_restrictions = get_restrictions("https://www.fda.gov/")
list_links = []
level_dictionary = { seed:1 }
level = 0
cnt = 0
list_links.append(seed)
for i in list_links:
print("Current Page: ", i)
if ( level_dictionary[i] > 5 ):
break
current_page = requests.get(i)
if(current_page.status_code != 200 ):
continue
html_code = BeautifulSoup(current_page.content, 'html.parser')
#for tag in html_code.findAll('p'):
# print(tag.get_text())
for tag in html_code.findAll('a', href=True):
if ( link + tag['href'] not in list_links ):
if ( len(tag['href']) > 0 and tag['href'] not in list_restrictions and tag['href'][0] == '/'):
if ( len(tag['href']) > 1 ):
if ( tag['href'][1] != '['):
list_links.append(link+tag['href'])
level_dictionary[link+tag['href']] = level_dictionary[i]+1
else:
continue
else:
list_links.append(link+tag['href'])
level_dictionary[link+tag['href']] = level_dictionary[i]+1
cnt += 1
time.sleep(30)
main()
``` |
{
"source": "jkwong888/dataflow-gcp-logs",
"score": 2
} |
#### File: jkwong888/dataflow-gcp-logs/categorize_logs.py
```python
from __future__ import absolute_import
from __future__ import division
import argparse
import csv
import json
import sys
import logging
import apache_beam as beam
from apache_beam.metrics.metric import Metrics
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.options.pipeline_options import SetupOptions
class ParseLogEntry(beam.DoFn):
"""Parses the log entry into a Python dictionary.
The human-readable time string is not used here.
"""
def __init__(self):
# TODO(BEAM-6158): Revert the workaround once we can pickle super() on py3.
# super(ParseGameEventFn, self).__init__()
beam.DoFn.__init__(self)
self.num_parse_errors = Metrics.counter(self.__class__, 'num_parse_errors')
def process(self, elem):
try:
#print(elem)
row = json.loads(elem)
# categorize by the kubernetes deployment that generated the log
log_labels = row['labels']
log_cat = log_labels['k8s-pod/app_kubernetes_io/instance']
yield (log_cat, row)
except: # pylint: disable=bare-except
# Log and count parse errors
self.num_parse_errors.inc()
logging.error('Parse error on "%s"' % elem, sys.exc_info()[0])
# [END extract_and_sum_score]
# [START main]
def run(argv=None, save_main_session=True):
"""Main entry point; defines and runs the user_score pipeline."""
parser = argparse.ArgumentParser()
# The default maps to two large Google Cloud Storage files (each ~12GB)
# holding two subsequent day's worth (roughly) of data.
parser.add_argument(
'--input',
type=str,
required=True,
help='Path to the log bucket')
parser.add_argument(
'--output',
type=str,
required=True,
help='Path to the output file(s).')
args, pipeline_args = parser.parse_known_args(argv)
options = PipelineOptions(pipeline_args)
# We use the save_main_session option because one or more DoFn's in this
# workflow rely on global context (e.g., a module imported at module level).
options.view_as(SetupOptions).save_main_session = save_main_session
with beam.Pipeline(options=options) as p:
( # pylint: disable=expression-not-assigned
p
| 'ReadInputText' >> beam.io.ReadFromText(args.input)
| 'ParseLogEntry' >> beam.ParDo(ParseLogEntry())
| 'Group' >> beam.GroupByKey()
| 'getKeys' >> beam.Keys()
| 'Write' >> beam.io.WriteToText(args.output)
)
# [END main]
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
run()
``` |
{
"source": "jkx19/MRQA",
"score": 2
} |
#### File: jkx19/MRQA/cls.py
```python
from dataclasses import dataclass, field
from typing import Optional
import numpy as np
from model.prefix import BertForQuestionAnswering, BertPrefixModel
from model.prefix import DebertaPrefixModel
from transformers import AutoTokenizer, AutoConfig, DebertaForQuestionAnswering
from transformers.models.bert.configuration_bert import BertConfig
from transformers.trainer_pt_utils import get_parameter_names
from transformers.trainer_utils import set_seed
import torch
from torch.optim import AdamW
from tqdm import tqdm
import argparse
import os
import sys
import json
from data.mrqa_dataset import MRQA
class Train_API():
def __init__(self, args) -> None:
# parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
# model_args, data_args, training_args = parser.parse_args_into_dataclasses()
self.batch_size = args.batch_size*torch.cuda.device_count()
if args.model == 'bert':
self.model_name = f'bert-{args.model_size}-uncased'
elif args.model == 'deberta':
self.model_name = 'microsoft/deberta-xlarge'
config = AutoConfig.from_pretrained(
self.model_name,
revision='main',
)
config.dropout =args.dropout
tokenizer = AutoTokenizer.from_pretrained(
self.model_name,
revision='main',
use_fast=True,
)
config.num_labels = 2
config.pre_seq_len = args.pre_seq_len
config.mid_dim = args.mid_dim
method = args.method
if args.model == 'bert':
if method == 'prefix':
self.model = BertPrefixModel.from_pretrained(
self.model_name,
config=config,
revision='main',
)
elif method == 'finetune':
self.model = BertForQuestionAnswering.from_pretrained(
self.model_name,
config=config,
revision='main',
)
elif args.model == 'deberta':
if method == 'prefix':
self.model = DebertaPrefixModel.from_pretrained(
self.model_name,
config=config,
revision='main',
)
elif method == 'finetune':
self.model = DebertaForQuestionAnswering.from_pretrained(
self.model_name,
config=config,
revision='main',
)
dataset = MRQA(tokenizer, self.batch_size)
# exit()
self.eval_example = dataset.eval_example
self.eval_dataset = dataset.eval_dataset
self.train_loader = dataset.train_loader
self.eval_loader = dataset.eval_loader
self.device = torch.device('cuda:0')
self.batch_size = self.batch_size * torch.cuda.device_count()
self.epoch = args.epoch
self.adam_beta1 = 0.9
self.adam_beta2 = 0.999
self.adam_epsilon = 1e-8
self.weight_decay = 0
self.gamma = args.gamma
self.lr = args.lr
self.seed = args.seed
self.compute_metric = dataset.compute_metric
self.post_process_function = dataset.post_process_function
def get_optimizer(self):
decay_parameters = get_parameter_names(self.model, [torch.nn.LayerNorm])
decay_parameters = [name for name in decay_parameters if "bias" not in name]
optimizer_grouped_parameters = [
{
"params": [p for n, p in self.model.named_parameters() if n in decay_parameters],
"weight_decay": self.weight_decay,
},
{
"params": [p for n, p in self.model.named_parameters() if n not in decay_parameters],
"weight_decay": 0.0,
},
]
optimizer_kwargs = {
"betas": (self.adam_beta1, self.adam_beta2),
"eps": self.adam_epsilon,
}
optimizer_kwargs["lr"] = self.lr
self.optimizer = AdamW(optimizer_grouped_parameters, **optimizer_kwargs)
def get_schedular(self):
pass
def train(self):
self.get_optimizer()
self.scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer=self.optimizer, gamma=self.gamma)
pbar = tqdm(total=(len(self.train_loader) + len(self.eval_loader))*self.epoch)
if torch.cuda.device_count() > 1:
self.model = torch.nn.DataParallel(self.model)
self.model.to(self.device)
best_dev_result = 0
best_result = None
best_model = None
for epoch in range(self.epoch):
# Train
total_loss = 0
self.model.train()
for batch_idx, batch in enumerate(self.train_loader):
batch = {k:v.to(self.device) for k,v in batch.items()}
output = self.model(**batch)
loss = torch.sum(output.loss)
# loss = output.loss
total_loss += loss.item()
loss.backward()
self.optimizer.step()
self.optimizer.zero_grad()
pbar.update(1)
self.scheduler.step()
result = self.evaluate(pbar)
eval_f1 = result['eval_f1']
if eval_f1 > best_dev_result:
best_dev_result = eval_f1
best_result = result
best_model = self.model.prefix_encoder
pbar.set_description(f'Train_loss: {total_loss:.0f}, Eval_F1: {eval_f1:.2f}')
torch.save(best_model, f'checkpoints/prefix_{self.seed}_{eval_f1}_{self.model_name}')
return best_result
def evaluate(self, pbar: tqdm):
self.model.eval()
with torch.no_grad():
start, end = [],[]
for batch_idx, batch in enumerate(self.eval_loader):
batch = {k:v.to(self.device) for k,v in batch.items()}
output = self.model(**batch)
start_logits, end_logits = output.start_logits, output.end_logits
start.append(start_logits)
end.append(end_logits)
pbar.update(1)
start_logits = np.array(torch.cat(start).cpu())
end_logits = np.array(torch.cat(end).cpu())
eval_preds = self.post_process_function(self.eval_example, self.eval_dataset, (start_logits, end_logits))
metrics = self.compute_metric(eval_preds)
for key in list(metrics.keys()):
if not key.startswith(f"eval_"):
metrics[f"eval_{key}"] = metrics.pop(key)
return metrics
def predict(self):
self.model.eval()
with torch.no_grad():
start, end = [],[]
for batch_idx, batch in enumerate(self.eval_loader):
batch = {k:v.to(self.device) for k,v in batch.items()}
output = self.model(**batch)
start_logits, end_logits = output.start_logits, output.end_logits
start.append(start_logits)
end.append(end_logits)
start_logits = np.array(torch.cat(start).cpu())
end_logits = np.array(torch.cat(end).cpu())
preds = self.post_process_function(self.eval_example, self.eval_dataset, (start_logits, end_logits))
out_file = open('output/prediction.json', 'w')
predictions = dict((p["id"], p["prediction_text"]) for p in preds[0])
json.dump(predictions, out_file)
def construct_args():
parser = argparse.ArgumentParser()
parser.add_argument('--lr', type=float, default=2e-2)
parser.add_argument('--gamma', type=float, default=0.95)
parser.add_argument('--batch_size', type=int, default=8)
parser.add_argument('--pre_seq_len', type=int, default=8)
parser.add_argument('--mid_dim', type=int, default=512)
parser.add_argument('--model', type=str, choices=['bert', 'deberta'], default='bert')
parser.add_argument('--model_size', type=str, choices=['base', 'large'], default='base')
parser.add_argument('--method', type=str, choices=['finetune', 'prefix'], default='prefix')
parser.add_argument('--epoch', type=int, default=10)
parser.add_argument('--dropout', type=float, default=0.2)
parser.add_argument('--cuda', type=str, default='5')
parser.add_argument('--seed', type=int, default=44)
args = parser.parse_args()
return args
if __name__ == '__main__':
args = construct_args()
os.environ["CUDA_VISIBLE_DEVICES"] = args.cuda
set_seed(args.seed)
train_api = Train_API(args)
result = train_api.train()
sys.stdout = open('result.txt', 'a')
print(args)
print(result)
``` |
{
"source": "jkx19/P_tuning",
"score": 2
} |
#### File: PT-Fewshot/pet/task_helpers.py
```python
import math
from abc import ABC
from collections import defaultdict
from typing import Dict, List, Optional, Any
import torch
import re
import numpy as np
from torch.nn import CrossEntropyLoss
from pet.utils import (
InputFeatures,
InputExample,
get_verbalization_ids,
chunks,
trim_input_ids,
remove_final_punc,
lowercase_first,
)
class TaskHelper(ABC):
"""
A helper class that provides custom training and evaluation methods for tasks that do not fit in PETs default
schema, for example because they require more than two sequences of text, different evaluation metrics or
verbalizers consisting of multiple tokens.
"""
def __init__(self, wrapper):
"""
Create a new task helper.
:param wrapper: The wrapper for the language model being used.
"""
self.wrapper = wrapper
self.output = None
def train_step(
self, batch: Dict[str, torch.Tensor], **kwargs
) -> Optional[torch.Tensor]:
"""
Custom implementation of the train step for this task.
:param batch: a batch of examples
:return: a scalar loss tensor
"""
pass
def eval_step(
self, batch: Dict[str, torch.Tensor], **kwargs
) -> Optional[torch.Tensor]:
"""
Custom implementation of the eval step for this task.
:param batch: a batch of examples
:return: a tensor of logits
"""
pass
def add_special_input_features(
self, input_example: InputExample, input_features: InputFeatures
) -> None:
"""
Add special features to the ``meta`` dictionary of a feature set
:param input_example: the input example considered
:param input_features: the set of features corresponding to this example
"""
pass
def add_features_to_dict(
self, features: List[InputFeatures], feature_dict: Dict[str, torch.Tensor]
) -> None:
"""
Add special features from the ``meta`` dictionary of a sequence of features to the corresponding dictionary
:param features: the sequence of features
:param feature_dict: the dictionary that stores aggregated feature views as tensors
"""
pass
def get_sequence_classifier_inputs(self, example: InputExample) -> Dict[str, Any]:
"""
Get the inputs for sequence classification. Override this method if the input for the task considered is of a
more complicated form than `text_a` or `text_a [SEP] text_b`.
:param example: the input example
:return: the dictionary of inputs
"""
pass
class MultiRcTaskHelper(TaskHelper):
"""A custom task helper for the MultiRC dataset."""
def add_special_input_features(
self, input_example: InputExample, input_features: InputFeatures
) -> None:
input_features.meta["question_idx"] = input_example.meta["question_idx"]
def add_features_to_dict(
self, features: List[InputFeatures], feature_dict: Dict[str, torch.Tensor]
) -> None:
feature_dict["question_idx"] = torch.tensor(
[f.meta["question_idx"] for f in features], dtype=torch.long
)
class CopaTaskHelper(TaskHelper):
"""A custom task helper for the COPA dataset."""
def train_step(self, batch, **kwargs) -> Optional[torch.Tensor]:
inputs = self.wrapper.generate_default_inputs(batch)
mask = batch["labels"].unsqueeze(1)
correct_targets = (
batch["choice1_token_ids"] * (1 - mask) + batch["choice2_token_ids"] * mask
)
wrong_targets = batch["choice1_token_ids"] * mask + batch[
"choice2_token_ids"
] * (1 - mask)
prediction_scores = self.wrapper.model(**inputs)[0].view(
-1, self.wrapper.model.model.config.vocab_size
)
loss_fct = CrossEntropyLoss()
loss_correct_label = loss_fct(prediction_scores, correct_targets.view(-1))
loss_wrong_label = loss_fct(prediction_scores, wrong_targets.view(-1))
loss = 1 + loss_correct_label - loss_wrong_label
loss[loss < 0] = 0
return loss
def eval_step(
self,
batch: Dict[str, torch.Tensor],
decoding_strategy: str = "default",
**kwargs,
):
assert (
batch["input_ids"].shape[0] == 1
), "eval_step() for COPA is only implemented for batch_size=1"
log_probs = []
for choice in ["choice1", "choice2"]:
labels = batch[f"{choice}_token_ids"]
log_prob = self._get_choice_log_probability(
batch, labels, decoding_strategy=decoding_strategy
)
log_probs.append(log_prob)
return torch.tensor([log_probs])
def _get_choice_log_probability(
self, batch, target_sequence, decoding_strategy: str = "default"
):
# adjust the number of masks
num_masks = sum(1 for tok_id in target_sequence[0] if tok_id != -100)
input_ids = trim_input_ids(
batch["input_ids"],
num_masks=num_masks,
pad_token_id=self.wrapper.tokenizer.pad_token_id,
mask_token_id=self.wrapper.tokenizer.mask_token_id,
)
log_probabilities = []
original_batch = {}
while True:
masks = [
(idx, tok_id)
for idx, tok_id in enumerate(target_sequence[0])
if tok_id != -100
]
if not masks: # there are no masks left to process, we are done
break
original_batch["input_ids"] = input_ids
original_batch["attention_mask"] = torch.tensor(
[[1] * len(input_ids[0])], dtype=torch.long
).cuda()
original_batch["block_flag"] = batch["block_flag"]
inputs = self.wrapper.generate_default_inputs(original_batch)
outputs = self.wrapper.model(**inputs)
next_token_logits = torch.nn.Softmax(dim=2)(outputs[0])[0]
mask_pos, masked_id = None, None
max_prob = None
for m_pos, m_id in masks:
m_prob = next_token_logits[m_pos][m_id].item()
if max_prob is None or m_prob > max_prob:
max_prob = m_prob
mask_pos, masked_id = m_pos, m_id
log_probabilities.append(math.log(max_prob))
input_ids[0][mask_pos] = masked_id
target_sequence[0][mask_pos] = -100
return sum(log_probabilities)
def add_special_input_features(
self, input_example: InputExample, input_features: InputFeatures
) -> None:
mask_start = input_features.input_ids.index(
self.wrapper.tokenizer.mask_token_id
)
for choice in ["choice1", "choice2"]:
choice_text = input_example.meta[choice]
choice_token_ids = get_verbalization_ids(
choice_text, self.wrapper.tokenizer, force_single_token=False
)
mask_end = mask_start + len(choice_token_ids)
input_features.meta[f"{choice}_token_ids"] = [-100] * len(
input_features.input_ids
)
input_features.meta[f"{choice}_token_ids"][
mask_start:mask_end
] = choice_token_ids
def add_features_to_dict(
self, features: List[InputFeatures], feature_dict: Dict[str, torch.Tensor]
) -> None:
for choice in ["choice1", "choice2"]:
feature_dict[f"{choice}_token_ids"] = torch.tensor(
[f.meta[f"{choice}_token_ids"] for f in features], dtype=torch.long
)
class WscTaskHelper(TaskHelper):
"""A custom task helper for the Wsc dataset."""
def __init__(self, wrapper):
super().__init__(wrapper)
self.id_to_target = []
def add_special_input_features(
self, input_example: InputExample, input_features: InputFeatures
) -> None:
mask_start = input_features.input_ids.index(
self.wrapper.tokenizer.mask_token_id
)
num_masks = input_features.input_ids.count(self.wrapper.tokenizer.mask_token_id)
mask_end = mask_start + num_masks
target = input_example.meta["span1_text"]
input_features.meta["target"] = target
target_token_ids = get_verbalization_ids(
target, self.wrapper.tokenizer, force_single_token=False
)
input_features.meta["target_token_ids"] = [-100] * len(input_features.input_ids)
# we also predict <pad> tokens at the missing positions
target_token_ids += [self.wrapper.tokenizer.pad_token_id] * (
num_masks - len(target_token_ids)
)
input_features.meta["target_token_ids"][mask_start:mask_end] = target_token_ids
def add_features_to_dict(
self, features: List[InputFeatures], feature_dict: Dict[str, torch.Tensor]
) -> None:
feature_dict["target_id"] = torch.tensor(
[len(self.id_to_target) + idx for idx, f in enumerate(features)],
dtype=torch.long,
)
self.id_to_target += [f.meta["target"] for f in features]
feature_dict["target_token_ids"] = torch.tensor(
[f.meta["target_token_ids"] for f in features], dtype=torch.long
)
def train_step(self, batch, **kwargs) -> Optional[torch.Tensor]:
inputs = self.wrapper.generate_default_inputs(batch)
inputs["labels"] = batch["target_token_ids"]
loss = self.wrapper.model(**inputs)[0]
return loss
def eval_step(
self,
batch: Dict[str, torch.Tensor],
decoding_strategy: str = "default",
**kwargs,
):
assert (
batch["input_ids"].shape[0] == 1
), "eval_step() for COPA is only implemented for batch_size=1"
input_ids = batch["input_ids"]
origin_batch = batch
orig_mask_positions = [
idx
for idx, input_id in enumerate(input_ids[0])
if input_id == self.wrapper.tokenizer.mask_token_id
]
while True:
mask_positions = [
idx
for idx, input_id in enumerate(input_ids[0])
if input_id == self.wrapper.tokenizer.mask_token_id
]
if not mask_positions: # there are no masks left to process, we are done
input_ids = input_ids[0].detach().cpu().tolist()
output_actual = self.wrapper.tokenizer.decode(
[
input_id
for idx, input_id in enumerate(input_ids)
if idx in orig_mask_positions
and input_id not in self.wrapper.tokenizer.all_special_ids
]
)
output_expected = self.id_to_target[batch["target_id"][0].item()]
# transform both outputs as described in the T5 paper
output_actual = output_actual.lower().strip()
output_actual = [w for w in re.split("[^a-zA-Z]", output_actual) if w]
output_expected = output_expected.lower().strip()
output_expected = [
w for w in re.split("[^a-zA-Z]", output_expected) if w
]
# compare outputs
if all(x in output_expected for x in output_actual) or all(
x in output_actual for x in output_expected
):
return torch.tensor([[0, 1]])
return torch.tensor([[1, 0]])
origin_batch["input_ids"] = input_ids
inputs = self.wrapper.generate_default_inputs(origin_batch)
outputs = self.wrapper.model(**inputs)
next_token_logits = outputs[0]
next_token_logits = torch.nn.Softmax(dim=2)(next_token_logits)
next_token_logits = next_token_logits[0].detach().cpu().numpy()
most_confident = ()
most_confident_score = -1
for mask_position in mask_positions:
ntl = next_token_logits[mask_position]
top_token_id = np.argmax(ntl)
top_score = ntl[top_token_id]
if top_score > most_confident_score:
most_confident_score = top_score
most_confident = (mask_position, top_token_id)
input_ids[0][most_confident[0]] = most_confident[1]
``` |
{
"source": "jkx19/SeqLabel",
"score": 2
} |
#### File: SeqLabel/CoNLL03/cli.py
```python
import datasets
from datasets.load import load_metric, load_dataset, load_dataset_builder
import numpy as np
import torch
from torch import Tensor
import torch.nn
import torch.nn.functional as F
from torch.utils.data.dataloader import DataLoader
from torch.utils.data.sampler import RandomSampler
from torch.optim import AdamW
from transformers import TrainingArguments, HfArgumentParser
from model.deberta import DebertaV2ForTokenClassification
from transformers.trainer_pt_utils import get_parameter_names
from tqdm import tqdm
from dataclasses import dataclass, field
from typing import Optional
import sys
import os
import argparse
from data.conll_dataset import CoNLL
from model.prefix import BertForTokenClassification, BertPrefixModel, DeBertaV2PrefixModel
from trainer import Trainer
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
"""
model_name_or_path: str = field(
default='bert-base-uncased',
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
)
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
)
cache_dir: Optional[str] = field(
default=None,
metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"},
)
model_revision: str = field(
default="main",
metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."},
)
use_auth_token: bool = field(
default=False,
metadata={
"help": "Will use the token generated when running `transformers-cli login` (necessary to use this script "
"with private models)."
},
)
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
task_name: Optional[str] = field(default="ner", metadata={"help": "The name of the task (ner, pos...)."})
dataset_name: Optional[str] = field(
default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."}
)
dataset_config_name: Optional[str] = field(
default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
)
train_file: Optional[str] = field(
default=None, metadata={"help": "The input training data file (a csv or JSON file)."}
)
validation_file: Optional[str] = field(
default=None,
metadata={"help": "An optional input evaluation data file to evaluate on (a csv or JSON file)."},
)
test_file: Optional[str] = field(
default=None,
metadata={"help": "An optional input test data file to predict on (a csv or JSON file)."},
)
text_column_name: Optional[str] = field(
default=None, metadata={"help": "The column name of text to input in the file (a csv or JSON file)."}
)
label_column_name: Optional[str] = field(
default=None, metadata={"help": "The column name of label to input in the file (a csv or JSON file)."}
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
)
preprocessing_num_workers: Optional[int] = field(
default=None,
metadata={"help": "The number of processes to use for the preprocessing."},
)
pad_to_max_length: bool = field(
default=False,
metadata={
"help": "Whether to pad all samples to model maximum sentence length. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch. More "
"efficient on GPU but very bad for TPU."
},
)
max_train_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
},
)
max_eval_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
},
)
max_predict_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of prediction examples to this "
"value if set."
},
)
label_all_tokens: bool = field(
default=False,
metadata={
"help": "Whether to put the label for one word on all tokens of generated by that word or just on the "
"one (in which case the other tokens will have a padding index)."
},
)
return_entity_level_metrics: bool = field(
default=False,
metadata={"help": "Whether to return all the entity levels during evaluation or just the overall ones."},
)
# METRIC: F1 score
# Note: the main reason abandoning LAMA is to fit the metric
class Trainer_API:
def __init__(self, args) -> None:
# parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
# self.model_args, self.data_args, self.training_args = parser.parse_args_into_dataclasses()
self.task = args.task
assert self.task in ['pos', 'chunk', 'ner']
self.device = torch.device('cuda:0')
device_num = torch.cuda.device_count() if torch.cuda.is_available() else 1
self.batch_size = args.batch_size * device_num
self.epoch = args.epoch
self.adam_beta1 = 0.9
self.adam_beta2 = 0.999
self.adam_epsilon = 1e-8
self.weight_decay = 0
self.gamma = 0.99
self.lr = args.lr
if args.model == 'bert':
self.model_name = f'bert-{args.model_size}-uncased'
elif args.model == 'deberta':
self.model_name = 'microsoft/deberta-xlarge-v2'
raw_data = load_dataset('data/load_dataset.py')
dataset = CoNLL(self.task, raw_data, self.model_name)
self.train_dataset = dataset.train_data
self.dev_dataset = dataset.dev_data
self.test_dataset = dataset.test_data
self.ignore_columns = dataset.ignore_columns
self.tokenizer = dataset.tokenizer
self.data_collator = dataset.data_collator
self.compute_metrics = dataset.compute_metrics
self.lm_config = dataset.config
self.method = args.method
if args.method == 'prefix':
self.lm_config.hidden_dropout_prob = args.dropout
self.lm_config.pre_seq_len = args.pre_seq_len
self.lm_config.mid_dim = args.mid_dim
if 'deberta' in self.model_name:
self.model = DeBertaV2PrefixModel.from_pretrained(
self.model_name,
config=self.lm_config,
revision='main',
)
elif 'bert' in self.model_name:
self.model = BertPrefixModel.from_pretrained(
self.model_name,
config=self.lm_config,
revision='main',
)
elif args.method == 'finetune':
if 'deberta' in self.model_name:
self.model = DebertaV2ForTokenClassification.from_pretrained(
self.model_name,
config=self.lm_config,
revision='main',
)
elif 'bert' in self.model_name:
self.model = BertForTokenClassification.from_pretrained(
self.model_name,
config=self.lm_config,
revision='main',
)
self.train_loader = self.get_data_loader(self.train_dataset)
self.dev_loader = self.get_data_loader(self.dev_dataset)
self.test_loader = self.get_data_loader(self.test_dataset)
max_dev_len = max([batch['labels'].shape[1] for _, batch in enumerate(self.dev_loader)])
max_test_len = max([batch['labels'].shape[1] for _, batch in enumerate(self.test_loader)])
self.max_seq_len = max(max_dev_len, max_test_len)
def get_sampler(self, dataset) -> Optional[torch.utils.data.sampler.Sampler]:
generator = torch.Generator()
generator.manual_seed(int(torch.empty((), dtype=torch.int64).random_().item()))
# Build the sampler.
return RandomSampler(dataset, generator=generator)
def get_data_loader(self, dataset: datasets.arrow_dataset.Dataset) -> DataLoader:
dataset = dataset.remove_columns(self.ignore_columns)
sampler = self.get_sampler(dataset)
return DataLoader(
dataset,
batch_size=self.batch_size,
sampler=sampler,
collate_fn=self.data_collator,
drop_last=False,
num_workers=0,
pin_memory=True,
)
def get_optimizer(self):
decay_parameters = get_parameter_names(self.model, [torch.nn.LayerNorm])
decay_parameters = [name for name in decay_parameters if "bias" not in name]
optimizer_grouped_parameters = [
{
"params": [p for n, p in self.model.named_parameters() if n in decay_parameters],
"weight_decay": self.weight_decay,
},
{
"params": [p for n, p in self.model.named_parameters() if n not in decay_parameters],
"weight_decay": 0.0,
},
]
optimizer_kwargs = {
"betas": (self.adam_beta1, self.adam_beta2),
"eps": self.adam_epsilon,
}
optimizer_kwargs["lr"] = self.lr
self.optimizer = AdamW(optimizer_grouped_parameters, **optimizer_kwargs)
def get_schedular(self):
pass
def pad_tensor(self, tensor: torch.Tensor, pad_index: int):
r'''
Pad the ( batched ) result tensor to max length for concatent with given pad-index
'''
max_size = self.max_seq_len
old_size = tensor.shape
new_size = list(old_size)
new_size[1] = max_size
new_tensor = tensor.new_zeros(tuple(new_size)) + pad_index
new_tensor[:, : old_size[1]] = tensor
return new_tensor
def train(self):
self.get_optimizer()
self.scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer=self.optimizer, gamma=self.gamma)
pbar = tqdm(total=len(self.train_loader)*self.epoch)
if torch.cuda.device_count() > 1:
self.model = torch.nn.DataParallel(self.model)
self.model.to(self.device)
best_dev_result = 0
best_test_result = 0
for epoch in range(self.epoch):
# Train
total_loss = 0
self.model.train()
for batch_idx, batch in enumerate(self.train_loader):
batch = {k:v.to(self.device) for k,v in batch.items()}
output = self.model(**batch)
loss = torch.sum(output.loss)
# loss = output.loss
total_loss += loss.item()
loss.backward()
self.optimizer.step()
self.optimizer.zero_grad()
pbar.update(1)
self.scheduler.step()
# Evaluate
dev_result = self.eval()
test_result = self.test()
if best_dev_result < dev_result["f1"]:
best_dev_result = dev_result["f1"]
best_test_result = test_result
best_head = self.model.classifier.state_dict()
pbar.set_description(f'Train_loss: {total_loss:.1f}, Eval_F1: {dev_result["f1"]:.3f}, Test_F1: {test_result["f1"]:.3f},')
pbar.close()
if self.method == 'finetune':
torch.save(best_head, 'checkpoints/classifier.pkl')
return best_test_result
def eval(self):
self.model.eval()
with torch.no_grad():
labels, prediction = [], []
for batch_idx, batch in enumerate(self.dev_loader):
batch = {k:v.to(self.device) for k,v in batch.items()}
output = self.model(**batch)
loss,logits = output.loss, output.logits
logits = self.pad_tensor(logits, -100)
prediction.append(logits)
batch_label = self.pad_tensor(batch['labels'], -100)
labels.append(batch_label)
prediction = torch.cat(prediction)
labels = torch.cat(labels)
result = self.compute_metrics((np.array(prediction.cpu()), np.array(labels.cpu())))
return result
def test(self):
self.model.eval()
with torch.no_grad():
labels, prediction = [], []
for batch_idx, batch in enumerate(self.test_loader):
batch = {k:v.to(self.device) for k,v in batch.items()}
output = self.model(**batch)
loss,logits = output.loss, output.logits
logits = self.pad_tensor(logits, -100)
prediction.append(logits)
batch_label = self.pad_tensor(batch['labels'], -100)
labels.append(batch_label)
prediction = torch.cat(prediction)
labels = torch.cat(labels)
result = self.compute_metrics((np.array(prediction.cpu()), np.array(labels.cpu())))
return result
def construct_args():
parser = argparse.ArgumentParser()
parser.add_argument("--lr", type=float, default=5e-5)
parser.add_argument('--batch_size', type=int, default=4)
parser.add_argument('--task', type=str, choices=['pos', 'chunk', 'ner'], default='ner')
parser.add_argument('--pre_seq_len', type=int, default=3)
parser.add_argument('--mid_dim', type=int, default=512)
parser.add_argument('--model', type=str,choices=['bert', 'deberta'], default='deberta')
parser.add_argument('--model_size', type=str, choices=['base', 'large'], default='base')
parser.add_argument('--method', type=str, choices=['prefix', 'finetune'], default='prefix')
parser.add_argument('--epoch', type=int, default=30)
parser.add_argument('--dropout', type=float, default=0.1)
args = parser.parse_args()
return args
def main():
args = construct_args()
os.environ["CUDA_VISIBLE_DEVICES"] = '7'
train_api = Trainer_API(args)
result = train_api.train()
sys.stdout = open('result.txt', 'a')
print(args)
print(result)
if __name__ == '__main__':
main()
``` |
{
"source": "jkxing/pytorch3d",
"score": 2
} |
#### File: nerf/nerf/eval_video_utils.py
```python
import math
from typing import Tuple
import torch
from pytorch3d.renderer import PerspectiveCameras, look_at_view_transform
from torch.utils.data.dataset import Dataset
def generate_eval_video_cameras(
train_dataset,
n_eval_cams: int = 100,
trajectory_type: str = "figure_eight",
trajectory_scale: float = 0.2,
scene_center: Tuple[float, float, float] = (0.0, 0.0, 0.0),
up: Tuple[float, float, float] = (0.0, 0.0, 1.0),
) -> Dataset[torch.Tensor]:
"""
Generate a camera trajectory for visualizing a NeRF model.
Args:
train_dataset: The training dataset object.
n_eval_cams: Number of cameras in the trajectory.
trajectory_type: The type of the camera trajectory. Can be one of:
circular: Rotating around the center of the scene at a fixed radius.
figure_eight: Figure-of-8 trajectory around the center of the
central camera of the training dataset.
trefoil_knot: Same as 'figure_eight', but the trajectory has a shape
of a trefoil knot (https://en.wikipedia.org/wiki/Trefoil_knot).
figure_eight_knot: Same as 'figure_eight', but the trajectory has a shape
of a figure-eight knot
(https://en.wikipedia.org/wiki/Figure-eight_knot_(mathematics)).
trajectory_scale: The extent of the trajectory.
up: The "up" vector of the scene (=the normal of the scene floor).
Active for the `trajectory_type="circular"`.
scene_center: The center of the scene in world coordinates which all
the cameras from the generated trajectory look at.
Returns:
Dictionary of camera instances which can be used as the test dataset
"""
if trajectory_type in ("figure_eight", "trefoil_knot", "figure_eight_knot"):
cam_centers = torch.cat(
[e["camera"].get_camera_center() for e in train_dataset]
)
# get the nearest camera center to the mean of centers
mean_camera_idx = (
((cam_centers - cam_centers.mean(dim=0)[None]) ** 2)
.sum(dim=1)
.min(dim=0)
.indices
)
# generate the knot trajectory in canonical coords
time = torch.linspace(0, 2 * math.pi, n_eval_cams + 1)[:n_eval_cams]
if trajectory_type == "trefoil_knot":
traj = _trefoil_knot(time)
elif trajectory_type == "figure_eight_knot":
traj = _figure_eight_knot(time)
elif trajectory_type == "figure_eight":
traj = _figure_eight(time)
traj[:, 2] -= traj[:, 2].max()
# transform the canonical knot to the coord frame of the mean camera
traj_trans = (
train_dataset[mean_camera_idx]["camera"]
.get_world_to_view_transform()
.inverse()
)
traj_trans = traj_trans.scale(cam_centers.std(dim=0).mean() * trajectory_scale)
traj = traj_trans.transform_points(traj)
elif trajectory_type == "circular":
cam_centers = torch.cat(
[e["camera"].get_camera_center() for e in train_dataset]
)
# fit plane to the camera centers
plane_mean = cam_centers.mean(dim=0)
cam_centers_c = cam_centers - plane_mean[None]
if up is not None:
# us the up vector instead of the plane through the camera centers
plane_normal = torch.FloatTensor(up)
else:
cov = (cam_centers_c.t() @ cam_centers_c) / cam_centers_c.shape[0]
_, e_vec = torch.symeig(cov, eigenvectors=True)
plane_normal = e_vec[:, 0]
plane_dist = (plane_normal[None] * cam_centers_c).sum(dim=-1)
cam_centers_on_plane = cam_centers_c - plane_dist[:, None] * plane_normal[None]
cov = (
cam_centers_on_plane.t() @ cam_centers_on_plane
) / cam_centers_on_plane.shape[0]
_, e_vec = torch.symeig(cov, eigenvectors=True)
traj_radius = (cam_centers_on_plane ** 2).sum(dim=1).sqrt().mean()
angle = torch.linspace(0, 2.0 * math.pi, n_eval_cams)
traj = traj_radius * torch.stack(
(torch.zeros_like(angle), angle.cos(), angle.sin()), dim=-1
)
traj = traj @ e_vec.t() + plane_mean[None]
else:
raise ValueError(f"Unknown trajectory_type {trajectory_type}.")
# point all cameras towards the center of the scene
R, T = look_at_view_transform(
eye=traj,
at=(scene_center,), # (1, 3)
up=(up,), # (1, 3)
device=traj.device,
)
# get the average focal length and principal point
focal = torch.cat([e["camera"].focal_length for e in train_dataset]).mean(dim=0)
p0 = torch.cat([e["camera"].principal_point for e in train_dataset]).mean(dim=0)
# assemble the dataset
test_dataset = [
{
"image": None,
"camera": PerspectiveCameras(
focal_length=focal[None],
principal_point=p0[None],
R=R_[None],
T=T_[None],
),
"camera_idx": i,
}
for i, (R_, T_) in enumerate(zip(R, T))
]
return test_dataset
def _figure_eight_knot(t: torch.Tensor, z_scale: float = 0.5):
x = (2 + (2 * t).cos()) * (3 * t).cos()
y = (2 + (2 * t).cos()) * (3 * t).sin()
z = (4 * t).sin() * z_scale
return torch.stack((x, y, z), dim=-1)
def _trefoil_knot(t: torch.Tensor, z_scale: float = 0.5):
x = t.sin() + 2 * (2 * t).sin()
y = t.cos() - 2 * (2 * t).cos()
z = -(3 * t).sin() * z_scale
return torch.stack((x, y, z), dim=-1)
def _figure_eight(t: torch.Tensor, z_scale: float = 0.5):
x = t.cos()
y = (2 * t).sin() / 2
z = t.sin() * z_scale
return torch.stack((x, y, z), dim=-1)
```
#### File: pytorch3d/tests/test_chamfer.py
```python
import unittest
from collections import namedtuple
import numpy as np
import torch
import torch.nn.functional as F
from common_testing import TestCaseMixin, get_random_cuda_device
from pytorch3d.loss import chamfer_distance
from pytorch3d.structures.pointclouds import Pointclouds
# Output of init_pointclouds
points_normals = namedtuple(
"points_normals", "p1_lengths p2_lengths cloud1 cloud2 p1 p2 n1 n2 weights"
)
class TestChamfer(TestCaseMixin, unittest.TestCase):
def setUp(self) -> None:
super().setUp()
torch.manual_seed(1)
@staticmethod
def init_pointclouds(
N, P1, P2, device, requires_grad: bool = True, allow_empty: bool = True
):
"""
Create 2 pointclouds object and associated padded points/normals tensors by
starting from lists. The clouds and tensors have the same data. The
leaf nodes for the clouds are a list of tensors. The padded tensor can be
used directly as a leaf node.
"""
low = 0 if allow_empty else 1
p1_lengths = torch.randint(low, P1, size=(N,), dtype=torch.int64, device=device)
p2_lengths = torch.randint(low, P2, size=(N,), dtype=torch.int64, device=device)
P1 = p1_lengths.max().item()
P2 = p2_lengths.max().item()
weights = torch.rand((N,), dtype=torch.float32, device=device)
# list of points and normals tensors
p1 = torch.rand((N, P1, 3), dtype=torch.float32, device=device)
p2 = torch.rand((N, P2, 3), dtype=torch.float32, device=device)
n1 = torch.rand((N, P1, 3), dtype=torch.float32, device=device)
n2 = torch.rand((N, P2, 3), dtype=torch.float32, device=device)
n1 /= n1.norm(dim=-1, p=2, keepdim=True)
n2 /= n2.norm(dim=-1, p=2, keepdim=True)
p1_list = []
p2_list = []
n1_list = []
n2_list = []
for i in range(N):
l1 = p1_lengths[i]
l2 = p2_lengths[i]
p1_list.append(p1[i, :l1].clone())
p2_list.append(p2[i, :l2].clone())
n1_list.append(n1[i, :l1].clone())
n2_list.append(n2[i, :l2].clone())
# Set requires_grad for all tensors in the lists and
# padded tensors.
if requires_grad:
for p in p2_list + p1_list + n1_list + n2_list + [p1, p2, n1, n2]:
p.requires_grad = True
# Create pointclouds objects
cloud1 = Pointclouds(points=p1_list, normals=n1_list)
cloud2 = Pointclouds(points=p2_list, normals=n2_list)
# Return pointclouds objects and padded tensors
return points_normals(
p1_lengths=p1_lengths,
p2_lengths=p2_lengths,
cloud1=cloud1,
cloud2=cloud2,
p1=p1,
p2=p2,
n1=n1,
n2=n2,
weights=weights,
)
@staticmethod
def chamfer_distance_naive_pointclouds(p1, p2, device="cpu"):
"""
Naive iterative implementation of nearest neighbor and chamfer distance.
x and y are assumed to be pointclouds objects with points and optionally normals.
This functions supports heterogeneous pointclouds in a batch.
Returns lists of the unreduced loss and loss_normals.
"""
x = p1.points_padded()
y = p2.points_padded()
N, P1, D = x.shape
P2 = y.size(1)
x_lengths = p1.num_points_per_cloud()
y_lengths = p2.num_points_per_cloud()
x_normals = p1.normals_padded()
y_normals = p2.normals_padded()
return_normals = x_normals is not None and y_normals is not None
# Initialize all distances to + inf
dist = torch.ones((N, P1, P2), dtype=torch.float32, device=device) * np.inf
x_mask = (
torch.arange(P1, device=x.device)[None] >= x_lengths[:, None]
) # shape [N, P1]
y_mask = (
torch.arange(P2, device=y.device)[None] >= y_lengths[:, None]
) # shape [N, P2]
is_x_heterogeneous = (x_lengths != P1).any()
is_y_heterogeneous = (y_lengths != P2).any()
# Only calculate the distances for the points which are not masked
for n in range(N):
for i1 in range(x_lengths[n]):
for i2 in range(y_lengths[n]):
dist[n, i1, i2] = torch.sum((x[n, i1, :] - y[n, i2, :]) ** 2)
x_dist = torch.min(dist, dim=2)[0] # (N, P1)
y_dist = torch.min(dist, dim=1)[0] # (N, P2)
if is_x_heterogeneous:
x_dist[x_mask] = 0.0
if is_y_heterogeneous:
y_dist[y_mask] = 0.0
loss = [x_dist, y_dist]
lnorm = [x.new_zeros(()), x.new_zeros(())]
if return_normals:
x_index = dist.argmin(2).view(N, P1, 1).expand(N, P1, 3)
y_index = dist.argmin(1).view(N, P2, 1).expand(N, P2, 3)
lnorm1 = 1 - torch.abs(
F.cosine_similarity(
x_normals, y_normals.gather(1, x_index), dim=2, eps=1e-6
)
)
lnorm2 = 1 - torch.abs(
F.cosine_similarity(
y_normals, x_normals.gather(1, y_index), dim=2, eps=1e-6
)
)
if is_x_heterogeneous:
lnorm1[x_mask] = 0.0
if is_y_heterogeneous:
lnorm2[y_mask] = 0.0
lnorm = [lnorm1, lnorm2] # [(N, P1), (N, P2)]
return loss, lnorm
@staticmethod
def chamfer_distance_naive(x, y, x_normals=None, y_normals=None):
"""
Naive iterative implementation of nearest neighbor and chamfer distance.
Returns lists of the unreduced loss and loss_normals. This naive
version only supports homogeneous pointcouds in a batch.
"""
N, P1, D = x.shape
P2 = y.size(1)
device = x.device
return_normals = x_normals is not None and y_normals is not None
dist = torch.zeros((N, P1, P2), dtype=torch.float32, device=device)
for n in range(N):
for i1 in range(P1):
for i2 in range(P2):
dist[n, i1, i2] = torch.sum((x[n, i1, :] - y[n, i2, :]) ** 2)
loss = [
torch.min(dist, dim=2)[0], # (N, P1)
torch.min(dist, dim=1)[0], # (N, P2)
]
lnorm = [x.new_zeros(()), x.new_zeros(())]
if return_normals:
x_index = dist.argmin(2).view(N, P1, 1).expand(N, P1, 3)
y_index = dist.argmin(1).view(N, P2, 1).expand(N, P2, 3)
lnorm1 = 1 - torch.abs(
F.cosine_similarity(
x_normals, y_normals.gather(1, x_index), dim=2, eps=1e-6
)
)
lnorm2 = 1 - torch.abs(
F.cosine_similarity(
y_normals, x_normals.gather(1, y_index), dim=2, eps=1e-6
)
)
lnorm = [lnorm1, lnorm2] # [(N, P1), (N, P2)]
return loss, lnorm
def test_chamfer_point_batch_reduction_mean(self):
"""
Compare output of vectorized chamfer loss with naive implementation
for the default settings (point_reduction = "mean" and batch_reduction = "mean")
and no normals.
This tests only uses homogeneous pointclouds.
"""
N, max_P1, max_P2 = 7, 10, 18
device = get_random_cuda_device()
points_normals = TestChamfer.init_pointclouds(N, max_P1, max_P2, device)
p1 = points_normals.p1
p2 = points_normals.p2
weights = points_normals.weights
p11 = p1.detach().clone()
p22 = p2.detach().clone()
p11.requires_grad = True
p22.requires_grad = True
P1 = p1.shape[1]
P2 = p2.shape[1]
pred_loss, pred_loss_norm = TestChamfer.chamfer_distance_naive(p1, p2)
# point_reduction = "mean".
loss, loss_norm = chamfer_distance(p11, p22, weights=weights)
pred_loss = pred_loss[0].sum(1) / P1 + pred_loss[1].sum(1) / P2
pred_loss *= weights
pred_loss = pred_loss.sum() / weights.sum()
self.assertClose(loss, pred_loss)
self.assertTrue(loss_norm is None)
# Check gradients
self._check_gradients(loss, None, pred_loss, None, p1, p11, p2, p22)
def test_chamfer_vs_naive_pointcloud(self):
"""
Test the default settings for chamfer_distance
(point reduction = "mean" and batch_reduction="mean") but with heterogeneous
pointclouds as input. Compare with the naive implementation of chamfer
which supports heterogeneous pointcloud objects.
"""
N, max_P1, max_P2 = 3, 70, 70
device = get_random_cuda_device()
points_normals = TestChamfer.init_pointclouds(N, max_P1, max_P2, device)
weights = points_normals.weights
x_lengths = points_normals.p1_lengths
y_lengths = points_normals.p2_lengths
# Chamfer with tensors as input for heterogeneous pointclouds.
cham_tensor, norm_tensor = chamfer_distance(
points_normals.p1,
points_normals.p2,
x_normals=points_normals.n1,
y_normals=points_normals.n2,
x_lengths=points_normals.p1_lengths,
y_lengths=points_normals.p2_lengths,
weights=weights,
)
# Chamfer with pointclouds as input.
pred_loss, pred_norm_loss = TestChamfer.chamfer_distance_naive_pointclouds(
points_normals.cloud1, points_normals.cloud2, device=device
)
# Mean reduction point loss.
pred_loss[0] *= weights.view(N, 1)
pred_loss[1] *= weights.view(N, 1)
pred_loss_mean = (
pred_loss[0].sum(1) / x_lengths + pred_loss[1].sum(1) / y_lengths
)
pred_loss_mean = pred_loss_mean.sum()
pred_loss_mean /= weights.sum()
# Mean reduction norm loss.
pred_norm_loss[0] *= weights.view(N, 1)
pred_norm_loss[1] *= weights.view(N, 1)
pred_norm_loss_mean = (
pred_norm_loss[0].sum(1) / x_lengths + pred_norm_loss[1].sum(1) / y_lengths
)
pred_norm_loss_mean = pred_norm_loss_mean.sum() / weights.sum()
self.assertClose(pred_loss_mean, cham_tensor)
self.assertClose(pred_norm_loss_mean, norm_tensor)
self._check_gradients(
cham_tensor,
norm_tensor,
pred_loss_mean,
pred_norm_loss_mean,
points_normals.cloud1.points_list(),
points_normals.p1,
points_normals.cloud2.points_list(),
points_normals.p2,
points_normals.cloud1.normals_list(),
points_normals.n1,
points_normals.cloud2.normals_list(),
points_normals.n2,
x_lengths,
y_lengths,
)
def test_chamfer_pointcloud_object_withnormals(self):
N = 5
P1, P2 = 100, 100
device = get_random_cuda_device()
reductions = [
("sum", "sum"),
("mean", "sum"),
("sum", "mean"),
("mean", "mean"),
("sum", None),
("mean", None),
]
for (point_reduction, batch_reduction) in reductions:
# Reinitialize all the tensors so that the
# backward pass can be computed.
points_normals = TestChamfer.init_pointclouds(
N, P1, P2, device, allow_empty=False
)
# Chamfer with pointclouds as input.
cham_cloud, norm_cloud = chamfer_distance(
points_normals.cloud1,
points_normals.cloud2,
point_reduction=point_reduction,
batch_reduction=batch_reduction,
)
# Chamfer with tensors as input.
cham_tensor, norm_tensor = chamfer_distance(
points_normals.p1,
points_normals.p2,
x_lengths=points_normals.p1_lengths,
y_lengths=points_normals.p2_lengths,
x_normals=points_normals.n1,
y_normals=points_normals.n2,
point_reduction=point_reduction,
batch_reduction=batch_reduction,
)
self.assertClose(cham_cloud, cham_tensor)
self.assertClose(norm_cloud, norm_tensor)
self._check_gradients(
cham_tensor,
norm_tensor,
cham_cloud,
norm_cloud,
points_normals.cloud1.points_list(),
points_normals.p1,
points_normals.cloud2.points_list(),
points_normals.p2,
points_normals.cloud1.normals_list(),
points_normals.n1,
points_normals.cloud2.normals_list(),
points_normals.n2,
points_normals.p1_lengths,
points_normals.p2_lengths,
)
def test_chamfer_pointcloud_object_nonormals(self):
N = 5
P1, P2 = 100, 100
device = get_random_cuda_device()
reductions = [
("sum", "sum"),
("mean", "sum"),
("sum", "mean"),
("mean", "mean"),
("sum", None),
("mean", None),
]
for (point_reduction, batch_reduction) in reductions:
# Reinitialize all the tensors so that the
# backward pass can be computed.
points_normals = TestChamfer.init_pointclouds(
N, P1, P2, device, allow_empty=False
)
# Chamfer with pointclouds as input.
cham_cloud, _ = chamfer_distance(
points_normals.cloud1,
points_normals.cloud2,
point_reduction=point_reduction,
batch_reduction=batch_reduction,
)
# Chamfer with tensors as input.
cham_tensor, _ = chamfer_distance(
points_normals.p1,
points_normals.p2,
x_lengths=points_normals.p1_lengths,
y_lengths=points_normals.p2_lengths,
point_reduction=point_reduction,
batch_reduction=batch_reduction,
)
self.assertClose(cham_cloud, cham_tensor)
self._check_gradients(
cham_tensor,
None,
cham_cloud,
None,
points_normals.cloud1.points_list(),
points_normals.p1,
points_normals.cloud2.points_list(),
points_normals.p2,
lengths1=points_normals.p1_lengths,
lengths2=points_normals.p2_lengths,
)
def test_chamfer_point_reduction_mean(self):
"""
Compare output of vectorized chamfer loss with naive implementation
for point_reduction = "mean" and batch_reduction = None.
"""
N, max_P1, max_P2 = 7, 10, 18
device = get_random_cuda_device()
points_normals = TestChamfer.init_pointclouds(N, max_P1, max_P2, device)
p1 = points_normals.p1
p2 = points_normals.p2
p1_normals = points_normals.n1
p2_normals = points_normals.n2
weights = points_normals.weights
p11 = p1.detach().clone()
p22 = p2.detach().clone()
p11.requires_grad = True
p22.requires_grad = True
P1 = p1.shape[1]
P2 = p2.shape[1]
pred_loss, pred_loss_norm = TestChamfer.chamfer_distance_naive(
p1, p2, x_normals=p1_normals, y_normals=p2_normals
)
# point_reduction = "mean".
loss, loss_norm = chamfer_distance(
p11,
p22,
x_normals=p1_normals,
y_normals=p2_normals,
weights=weights,
batch_reduction=None,
point_reduction="mean",
)
pred_loss_mean = pred_loss[0].sum(1) / P1 + pred_loss[1].sum(1) / P2
pred_loss_mean *= weights
self.assertClose(loss, pred_loss_mean)
pred_loss_norm_mean = (
pred_loss_norm[0].sum(1) / P1 + pred_loss_norm[1].sum(1) / P2
)
pred_loss_norm_mean *= weights
self.assertClose(loss_norm, pred_loss_norm_mean)
# Check gradients
self._check_gradients(
loss, loss_norm, pred_loss_mean, pred_loss_norm_mean, p1, p11, p2, p22
)
def test_chamfer_point_reduction_sum(self):
"""
Compare output of vectorized chamfer loss with naive implementation
for point_reduction = "sum" and batch_reduction = None.
"""
N, P1, P2 = 7, 10, 18
device = get_random_cuda_device()
points_normals = TestChamfer.init_pointclouds(N, P1, P2, device)
p1 = points_normals.p1
p2 = points_normals.p2
p1_normals = points_normals.n1
p2_normals = points_normals.n2
weights = points_normals.weights
p11 = p1.detach().clone()
p22 = p2.detach().clone()
p11.requires_grad = True
p22.requires_grad = True
pred_loss, pred_loss_norm = TestChamfer.chamfer_distance_naive(
p1, p2, x_normals=p1_normals, y_normals=p2_normals
)
loss, loss_norm = chamfer_distance(
p11,
p22,
x_normals=p1_normals,
y_normals=p2_normals,
weights=weights,
batch_reduction=None,
point_reduction="sum",
)
pred_loss_sum = pred_loss[0].sum(1) + pred_loss[1].sum(1)
pred_loss_sum *= weights
self.assertClose(loss, pred_loss_sum)
pred_loss_norm_sum = pred_loss_norm[0].sum(1) + pred_loss_norm[1].sum(1)
pred_loss_norm_sum *= weights
self.assertClose(loss_norm, pred_loss_norm_sum)
# Check gradients
self._check_gradients(
loss, loss_norm, pred_loss_sum, pred_loss_norm_sum, p1, p11, p2, p22
)
def _check_gradients(
self,
loss,
loss_norm,
pred_loss,
pred_loss_norm,
x1,
x2,
y1,
y2,
xn1=None, # normals
xn2=None, # normals
yn1=None, # normals
yn2=None, # normals
lengths1=None,
lengths2=None,
):
"""
x1 and x2 can have different types based on the leaf node used in the calculation:
e.g. x1 may be a list of tensors whereas x2 is a padded tensor.
This also applies for the pairs: (y1, y2), (xn1, xn2), (yn1, yn2).
"""
grad_loss = torch.rand(loss.shape, device=loss.device, dtype=loss.dtype)
# Loss for normals is optional. Iniitalize to 0.
norm_loss_term = pred_norm_loss_term = 0.0
if loss_norm is not None and pred_loss_norm is not None:
grad_normals = torch.rand(
loss_norm.shape, device=loss.device, dtype=loss.dtype
)
norm_loss_term = loss_norm * grad_normals
pred_norm_loss_term = pred_loss_norm * grad_normals
l1 = (loss * grad_loss) + norm_loss_term
l1.sum().backward()
l2 = (pred_loss * grad_loss) + pred_norm_loss_term
l2.sum().backward()
self._check_grad_by_type(x1, x2, lengths1)
self._check_grad_by_type(y1, y2, lengths2)
# If leaf nodes for normals are passed in, check their gradients.
if all(n is not None for n in [xn1, xn2, yn1, yn2]):
self._check_grad_by_type(xn1, xn2, lengths1)
self._check_grad_by_type(yn1, yn2, lengths2)
def _check_grad_by_type(self, x1, x2, lengths=None):
"""
x1 and x2 can be of different types e.g. list or tensor - compare appropriately
based on the types.
"""
error_msg = "All values for gradient checks must be tensors or lists of tensors"
if all(isinstance(p, list) for p in [x1, x2]):
# Lists of tensors
for i in range(len(x1)):
self.assertClose(x1[i].grad, x2[i].grad)
elif isinstance(x1, list) and torch.is_tensor(x2):
self.assertIsNotNone(lengths) # lengths is required
# List of tensors vs padded tensor
for i in range(len(x1)):
self.assertClose(x1[i].grad, x2.grad[i, : lengths[i]], atol=1e-7)
self.assertTrue(x2.grad[i, lengths[i] :].sum().item() == 0.0)
elif all(torch.is_tensor(p) for p in [x1, x2]):
# Two tensors
self.assertClose(x1.grad, x2.grad)
else:
raise ValueError(error_msg)
def test_chamfer_joint_reduction(self):
"""
Compare output of vectorized chamfer loss with naive implementation
when batch_reduction in ["mean", "sum"] and
point_reduction in ["mean", "sum"].
"""
N, max_P1, max_P2 = 7, 10, 18
device = get_random_cuda_device()
points_normals = TestChamfer.init_pointclouds(N, max_P1, max_P2, device)
p1 = points_normals.p1
p2 = points_normals.p2
p1_normals = points_normals.n1
p2_normals = points_normals.n2
weights = points_normals.weights
P1 = p1.shape[1]
P2 = p2.shape[1]
pred_loss, pred_loss_norm = TestChamfer.chamfer_distance_naive(
p1, p2, x_normals=p1_normals, y_normals=p2_normals
)
# batch_reduction = "sum", point_reduction = "sum".
loss, loss_norm = chamfer_distance(
p1,
p2,
x_normals=p1_normals,
y_normals=p2_normals,
weights=weights,
batch_reduction="sum",
point_reduction="sum",
)
pred_loss[0] *= weights.view(N, 1)
pred_loss[1] *= weights.view(N, 1)
pred_loss_sum = pred_loss[0].sum(1) + pred_loss[1].sum(1) # point sum
pred_loss_sum = pred_loss_sum.sum() # batch sum
self.assertClose(loss, pred_loss_sum)
pred_loss_norm[0] *= weights.view(N, 1)
pred_loss_norm[1] *= weights.view(N, 1)
pred_loss_norm_sum = pred_loss_norm[0].sum(1) + pred_loss_norm[1].sum(
1
) # point sum.
pred_loss_norm_sum = pred_loss_norm_sum.sum() # batch sum
self.assertClose(loss_norm, pred_loss_norm_sum)
# batch_reduction = "mean", point_reduction = "sum".
loss, loss_norm = chamfer_distance(
p1,
p2,
x_normals=p1_normals,
y_normals=p2_normals,
weights=weights,
batch_reduction="mean",
point_reduction="sum",
)
pred_loss_sum /= weights.sum()
self.assertClose(loss, pred_loss_sum)
pred_loss_norm_sum /= weights.sum()
self.assertClose(loss_norm, pred_loss_norm_sum)
# batch_reduction = "sum", point_reduction = "mean".
loss, loss_norm = chamfer_distance(
p1,
p2,
x_normals=p1_normals,
y_normals=p2_normals,
weights=weights,
batch_reduction="sum",
point_reduction="mean",
)
pred_loss_mean = pred_loss[0].sum(1) / P1 + pred_loss[1].sum(1) / P2
pred_loss_mean = pred_loss_mean.sum()
self.assertClose(loss, pred_loss_mean)
pred_loss_norm_mean = (
pred_loss_norm[0].sum(1) / P1 + pred_loss_norm[1].sum(1) / P2
)
pred_loss_norm_mean = pred_loss_norm_mean.sum()
self.assertClose(loss_norm, pred_loss_norm_mean)
# batch_reduction = "mean", point_reduction = "mean". This is the default.
loss, loss_norm = chamfer_distance(
p1,
p2,
x_normals=p1_normals,
y_normals=p2_normals,
weights=weights,
batch_reduction="mean",
point_reduction="mean",
)
pred_loss_mean /= weights.sum()
self.assertClose(loss, pred_loss_mean)
pred_loss_norm_mean /= weights.sum()
self.assertClose(loss_norm, pred_loss_norm_mean)
# Error when batch_reduction is not in ["mean", "sum"] or None.
with self.assertRaisesRegex(ValueError, "batch_reduction must be one of"):
chamfer_distance(p1, p2, weights=weights, batch_reduction="max")
# Error when point_reduction is not in ["mean", "sum"].
with self.assertRaisesRegex(ValueError, "point_reduction must be one of"):
chamfer_distance(p1, p2, weights=weights, point_reduction=None)
def test_incorrect_weights(self):
N, P1, P2 = 16, 64, 128
device = get_random_cuda_device()
p1 = torch.rand(
(N, P1, 3), dtype=torch.float32, device=device, requires_grad=True
)
p2 = torch.rand(
(N, P2, 3), dtype=torch.float32, device=device, requires_grad=True
)
weights = torch.zeros((N,), dtype=torch.float32, device=device)
loss, loss_norm = chamfer_distance(
p1, p2, weights=weights, batch_reduction="mean"
)
self.assertClose(loss.cpu(), torch.zeros(()))
self.assertTrue(loss.requires_grad)
self.assertClose(loss_norm.cpu(), torch.zeros(()))
self.assertTrue(loss_norm.requires_grad)
loss, loss_norm = chamfer_distance(
p1, p2, weights=weights, batch_reduction=None
)
self.assertClose(loss.cpu(), torch.zeros((N, N)))
self.assertTrue(loss.requires_grad)
self.assertClose(loss_norm.cpu(), torch.zeros((N, N)))
self.assertTrue(loss_norm.requires_grad)
weights = torch.ones((N,), dtype=torch.float32, device=device) * -1
with self.assertRaises(ValueError):
loss, loss_norm = chamfer_distance(p1, p2, weights=weights)
weights = torch.zeros((N - 1,), dtype=torch.float32, device=device)
with self.assertRaises(ValueError):
loss, loss_norm = chamfer_distance(p1, p2, weights=weights)
def test_incorrect_inputs(self):
N, P1, P2 = 7, 10, 18
device = get_random_cuda_device()
points_normals = TestChamfer.init_pointclouds(N, P1, P2, device)
p1 = points_normals.p1
p2 = points_normals.p2
p1_normals = points_normals.n1
# Normals of wrong shape
with self.assertRaisesRegex(ValueError, "Expected normals to be of shape"):
chamfer_distance(p1, p2, x_normals=p1_normals[None])
# Points of wrong shape
with self.assertRaisesRegex(ValueError, "Expected points to be of shape"):
chamfer_distance(p1[None], p2)
# Lengths of wrong shape
with self.assertRaisesRegex(ValueError, "Expected lengths to be of shape"):
chamfer_distance(p1, p2, x_lengths=torch.tensor([1, 2, 3], device=device))
# Points are not a tensor or Pointclouds
with self.assertRaisesRegex(ValueError, "Pointclouds objects or torch.Tensor"):
chamfer_distance(x=[1, 1, 1], y=[1, 1, 1])
@staticmethod
def chamfer_with_init(
batch_size: int,
P1: int,
P2: int,
return_normals: bool,
homogeneous: bool,
device="cpu",
):
points_normals = TestChamfer.init_pointclouds(batch_size, P1, P2, device=device)
l1 = points_normals.p1_lengths
l2 = points_normals.p2_lengths
if homogeneous:
# Set lengths to None so in Chamfer it assumes
# there is no padding.
l1 = l2 = None
torch.cuda.synchronize()
def loss():
loss, loss_normals = chamfer_distance(
points_normals.p1,
points_normals.p2,
x_lengths=l1,
y_lengths=l2,
x_normals=points_normals.n1,
y_normals=points_normals.n2,
weights=points_normals.weights,
)
torch.cuda.synchronize()
return loss
@staticmethod
def chamfer_naive_with_init(
batch_size: int, P1: int, P2: int, return_normals: bool, device="cpu"
):
points_normals = TestChamfer.init_pointclouds(batch_size, P1, P2, device=device)
torch.cuda.synchronize()
def loss():
loss, loss_normals = TestChamfer.chamfer_distance_naive(
points_normals.p1,
points_normals.p2,
x_normals=points_normals.n1,
y_normals=points_normals.n2,
)
torch.cuda.synchronize()
return loss
```
#### File: pytorch3d/tests/test_io_gltf.py
```python
import unittest
from math import radians
import numpy as np
import torch
from common_testing import TestCaseMixin, get_pytorch3d_dir, get_tests_dir
from PIL import Image
from pytorch3d.io import IO
from pytorch3d.io.experimental_gltf_io import MeshGlbFormat
from pytorch3d.renderer import (
AmbientLights,
BlendParams,
FoVPerspectiveCameras,
PointLights,
RasterizationSettings,
look_at_view_transform,
rotate_on_spot,
)
from pytorch3d.renderer.mesh import (
HardPhongShader,
MeshRasterizer,
MeshRenderer,
TexturesVertex,
)
from pytorch3d.structures import Meshes
from pytorch3d.transforms import axis_angle_to_matrix
from pytorch3d.vis.texture_vis import texturesuv_image_PIL
DATA_DIR = get_tests_dir() / "data"
TUTORIAL_DATA_DIR = get_pytorch3d_dir() / "docs/tutorials/data"
DEBUG = False
def _load(path, **kwargs) -> Meshes:
io = IO()
io.register_meshes_format(MeshGlbFormat())
return io.load_mesh(path, **kwargs)
def _render(
mesh: Meshes,
name: str,
dist: float = 3.0,
elev: float = 10.0,
azim: float = 0,
image_size: int = 256,
pan=None,
RT=None,
use_ambient=False,
):
device = mesh.device
if RT is not None:
R, T = RT
else:
R, T = look_at_view_transform(dist, elev, azim)
if pan is not None:
R, T = rotate_on_spot(R, T, pan)
cameras = FoVPerspectiveCameras(device=device, R=R, T=T)
raster_settings = RasterizationSettings(
image_size=image_size, blur_radius=0.0, faces_per_pixel=1
)
# Init shader settings
if use_ambient:
lights = AmbientLights(device=device)
else:
lights = PointLights(device=device)
lights.location = torch.tensor([0.0, 0.0, 2.0], device=device)[None]
blend_params = BlendParams(
sigma=1e-1,
gamma=1e-4,
background_color=torch.tensor([1.0, 1.0, 1.0], device=device),
)
# Init renderer
renderer = MeshRenderer(
rasterizer=MeshRasterizer(cameras=cameras, raster_settings=raster_settings),
shader=HardPhongShader(
device=device, lights=lights, cameras=cameras, blend_params=blend_params
),
)
output = renderer(mesh)
image = (output[0, ..., :3].cpu().numpy() * 255).astype(np.uint8)
if DEBUG:
Image.fromarray(image).save(DATA_DIR / f"glb_{name}_.png")
return image
class TestMeshGltfIO(TestCaseMixin, unittest.TestCase):
def test_load_apartment(self):
"""
This is the example habitat example scene from inside
http://dl.fbaipublicfiles.com/habitat/habitat-test-scenes.zip
The scene is "already lit", i.e. the textures reflect the lighting
already, so we want to render them with full ambient light.
"""
self.skipTest("Data not available")
glb = DATA_DIR / "apartment_1.glb"
self.assertTrue(glb.is_file())
device = torch.device("cuda:0")
mesh = _load(glb, device=device)
if DEBUG:
texturesuv_image_PIL(mesh.textures).save(DATA_DIR / "out_apartment.png")
for i in range(19):
# random locations in the apartment
eye = ((np.random.uniform(-6, 0.5), np.random.uniform(-8, 2), 0),)
at = ((np.random.uniform(-6, 0.5), np.random.uniform(-8, 2), 0),)
up = ((0, 0, -1),)
RT = look_at_view_transform(eye=eye, at=at, up=up)
_render(mesh, f"apartment_eau{i}", RT=RT, use_ambient=True)
for i in range(12):
# panning around the inner room from one location
pan = axis_angle_to_matrix(torch.FloatTensor([0, radians(30 * i), 0]))
_render(mesh, f"apartment{i}", 1.0, -90, pan, use_ambient=True)
def test_load_cow(self):
"""
Load the cow as converted to a single mesh in a glb file.
"""
glb = DATA_DIR / "cow.glb"
self.assertTrue(glb.is_file())
device = torch.device("cuda:0")
mesh = _load(glb, device=device)
self.assertEqual(mesh.device, device)
self.assertEqual(mesh.faces_packed().shape, (5856, 3))
self.assertEqual(mesh.verts_packed().shape, (3225, 3))
mesh_obj = _load(TUTORIAL_DATA_DIR / "cow_mesh/cow.obj")
self.assertClose(
mesh_obj.get_bounding_boxes().cpu(), mesh_obj.get_bounding_boxes()
)
self.assertClose(
mesh.textures.verts_uvs_padded().cpu(), mesh_obj.textures.verts_uvs_padded()
)
self.assertClose(
mesh.textures.faces_uvs_padded().cpu(), mesh_obj.textures.faces_uvs_padded()
)
self.assertClose(
mesh.textures.maps_padded().cpu(), mesh_obj.textures.maps_padded()
)
if DEBUG:
texturesuv_image_PIL(mesh.textures).save(DATA_DIR / "out_cow.png")
image = _render(mesh, "cow", azim=4)
with Image.open(DATA_DIR / "glb_cow.png") as f:
expected = np.array(f)
self.assertClose(image, expected)
def test_load_cow_no_texture(self):
"""
Load the cow as converted to a single mesh in a glb file.
"""
glb = DATA_DIR / "cow.glb"
self.assertTrue(glb.is_file())
device = torch.device("cuda:0")
mesh = _load(glb, device=device, include_textures=False)
self.assertEqual(len(mesh), 1)
self.assertIsNone(mesh.textures)
self.assertEqual(mesh.faces_packed().shape, (5856, 3))
self.assertEqual(mesh.verts_packed().shape, (3225, 3))
mesh_obj = _load(TUTORIAL_DATA_DIR / "cow_mesh/cow.obj")
self.assertClose(
mesh_obj.get_bounding_boxes().cpu(), mesh_obj.get_bounding_boxes()
)
mesh.textures = TexturesVertex(0.5 * torch.ones_like(mesh.verts_padded()))
image = _render(mesh, "cow_gray")
with Image.open(DATA_DIR / "glb_cow_gray.png") as f:
expected = np.array(f)
self.assertClose(image, expected)
```
#### File: pytorch3d/tests/test_laplacian_matrices.py
```python
import unittest
import torch
from common_testing import TestCaseMixin, get_random_cuda_device
from pytorch3d.ops import cot_laplacian, laplacian, norm_laplacian
from pytorch3d.structures.meshes import Meshes
class TestLaplacianMatrices(TestCaseMixin, unittest.TestCase):
def setUp(self) -> None:
super().setUp()
torch.manual_seed(1)
def init_mesh(self) -> Meshes:
V, F = 32, 64
device = get_random_cuda_device()
# random vertices
verts = torch.rand((V, 3), dtype=torch.float32, device=device)
# random valid faces (no self circles, e.g. (v0, v0, v1))
faces = torch.stack([torch.randperm(V) for f in range(F)], dim=0)[:, :3]
faces = faces.to(device=device)
return Meshes(verts=[verts], faces=[faces])
def test_laplacian(self):
mesh = self.init_mesh()
verts = mesh.verts_packed()
edges = mesh.edges_packed()
V, E = verts.shape[0], edges.shape[0]
L = laplacian(verts, edges)
Lnaive = torch.zeros((V, V), dtype=torch.float32, device=verts.device)
for e in range(E):
e0, e1 = edges[e]
Lnaive[e0, e1] = 1
# symetric
Lnaive[e1, e0] = 1
deg = Lnaive.sum(1).view(-1, 1)
deg[deg > 0] = 1.0 / deg[deg > 0]
Lnaive = Lnaive * deg
diag = torch.eye(V, dtype=torch.float32, device=mesh.device)
Lnaive.masked_fill_(diag > 0, -1)
self.assertClose(L.to_dense(), Lnaive)
def test_cot_laplacian(self):
mesh = self.init_mesh()
verts = mesh.verts_packed()
faces = mesh.faces_packed()
V = verts.shape[0]
eps = 1e-12
L, inv_areas = cot_laplacian(verts, faces, eps=eps)
Lnaive = torch.zeros((V, V), dtype=torch.float32, device=verts.device)
inv_areas_naive = torch.zeros((V, 1), dtype=torch.float32, device=verts.device)
for f in faces:
v0 = verts[f[0], :]
v1 = verts[f[1], :]
v2 = verts[f[2], :]
A = (v1 - v2).norm()
B = (v0 - v2).norm()
C = (v0 - v1).norm()
s = 0.5 * (A + B + C)
face_area = (s * (s - A) * (s - B) * (s - C)).clamp_(min=1e-12).sqrt()
inv_areas_naive[f[0]] += face_area
inv_areas_naive[f[1]] += face_area
inv_areas_naive[f[2]] += face_area
A2, B2, C2 = A * A, B * B, C * C
cota = (B2 + C2 - A2) / face_area / 4.0
cotb = (A2 + C2 - B2) / face_area / 4.0
cotc = (A2 + B2 - C2) / face_area / 4.0
Lnaive[f[1], f[2]] += cota
Lnaive[f[2], f[0]] += cotb
Lnaive[f[0], f[1]] += cotc
# symetric
Lnaive[f[2], f[1]] += cota
Lnaive[f[0], f[2]] += cotb
Lnaive[f[1], f[0]] += cotc
idx = inv_areas_naive > 0
inv_areas_naive[idx] = 1.0 / inv_areas_naive[idx]
self.assertClose(inv_areas, inv_areas_naive)
self.assertClose(L.to_dense(), Lnaive)
def test_norm_laplacian(self):
mesh = self.init_mesh()
verts = mesh.verts_packed()
edges = mesh.edges_packed()
V, E = verts.shape[0], edges.shape[0]
eps = 1e-12
L = norm_laplacian(verts, edges, eps=eps)
Lnaive = torch.zeros((V, V), dtype=torch.float32, device=verts.device)
for e in range(E):
e0, e1 = edges[e]
v0 = verts[e0]
v1 = verts[e1]
w01 = 1.0 / ((v0 - v1).norm() + eps)
Lnaive[e0, e1] += w01
Lnaive[e1, e0] += w01
self.assertClose(L.to_dense(), Lnaive)
```
#### File: pytorch3d/tests/test_sample_farthest_points.py
```python
import unittest
import numpy as np
import torch
from common_testing import (
TestCaseMixin,
get_pytorch3d_dir,
get_random_cuda_device,
get_tests_dir,
)
from pytorch3d.io import load_obj
from pytorch3d.ops.sample_farthest_points import (
sample_farthest_points,
sample_farthest_points_naive,
)
from pytorch3d.ops.utils import masked_gather
DATA_DIR = get_tests_dir() / "data"
TUTORIAL_DATA_DIR = get_pytorch3d_dir() / "docs/tutorials/data"
DEBUG = False
class TestFPS(TestCaseMixin, unittest.TestCase):
def _test_simple(self, fps_func, device="cpu"):
# fmt: off
points = torch.tensor(
[
[
[-1.0, -1.0], # noqa: E241, E201
[-1.3, 1.1], # noqa: E241, E201
[ 0.2, -1.1], # noqa: E241, E201
[ 0.0, 0.0], # noqa: E241, E201
[ 1.3, 1.3], # noqa: E241, E201
[ 1.0, 0.5], # noqa: E241, E201
[-1.3, 0.2], # noqa: E241, E201
[ 1.5, -0.5], # noqa: E241, E201
],
[
[-2.2, -2.4], # noqa: E241, E201
[-2.1, 2.0], # noqa: E241, E201
[ 2.2, 2.1], # noqa: E241, E201
[ 2.1, -2.4], # noqa: E241, E201
[ 0.4, -1.0], # noqa: E241, E201
[ 0.3, 0.3], # noqa: E241, E201
[ 1.2, 0.5], # noqa: E241, E201
[ 4.5, 4.5], # noqa: E241, E201
],
],
dtype=torch.float32,
device=device,
)
# fmt: on
expected_inds = torch.tensor([[0, 4], [0, 7]], dtype=torch.int64, device=device)
out_points, out_inds = fps_func(points, K=2)
self.assertClose(out_inds, expected_inds)
# Gather the points
expected_inds = expected_inds[..., None].expand(-1, -1, points.shape[-1])
self.assertClose(out_points, points.gather(dim=1, index=expected_inds))
# Different number of points sampled for each pointcloud in the batch
expected_inds = torch.tensor(
[[0, 4, 1], [0, 7, -1]], dtype=torch.int64, device=device
)
out_points, out_inds = fps_func(points, K=[3, 2])
self.assertClose(out_inds, expected_inds)
# Gather the points
expected_points = masked_gather(points, expected_inds)
self.assertClose(out_points, expected_points)
def _test_compare_random_heterogeneous(self, device="cpu"):
N, P, D, K = 5, 20, 5, 8
points = torch.randn((N, P, D), device=device, dtype=torch.float32)
out_points_naive, out_idxs_naive = sample_farthest_points_naive(points, K=K)
out_points, out_idxs = sample_farthest_points(points, K=K)
self.assertTrue(out_idxs.min() >= 0)
self.assertClose(out_idxs, out_idxs_naive)
self.assertClose(out_points, out_points_naive)
for n in range(N):
self.assertEqual(out_idxs[n].ne(-1).sum(), K)
# Test case where K > P
K = 30
points1 = torch.randn((N, P, D), dtype=torch.float32, device=device)
points2 = points1.clone()
points1.requires_grad = True
points2.requires_grad = True
lengths = torch.randint(low=1, high=P, size=(N,), device=device)
out_points_naive, out_idxs_naive = sample_farthest_points_naive(
points1, lengths, K=K
)
out_points, out_idxs = sample_farthest_points(points2, lengths, K=K)
self.assertClose(out_idxs, out_idxs_naive)
self.assertClose(out_points, out_points_naive)
for n in range(N):
# Check that for heterogeneous batches, the max number of
# selected points is less than the length
self.assertTrue(out_idxs[n].ne(-1).sum() <= lengths[n])
self.assertTrue(out_idxs[n].max() <= lengths[n])
# Check there are no duplicate indices
val_mask = out_idxs[n].ne(-1)
vals, counts = torch.unique(out_idxs[n][val_mask], return_counts=True)
self.assertTrue(counts.le(1).all())
# Check gradients
grad_sampled_points = torch.ones((N, K, D), dtype=torch.float32, device=device)
loss1 = (out_points_naive * grad_sampled_points).sum()
loss1.backward()
loss2 = (out_points * grad_sampled_points).sum()
loss2.backward()
self.assertClose(points1.grad, points2.grad, atol=5e-6)
def _test_errors(self, fps_func, device="cpu"):
N, P, D, K = 5, 40, 5, 8
points = torch.randn((N, P, D), device=device)
wrong_batch_dim = torch.randint(low=1, high=K, size=(K,), device=device)
# K has diferent batch dimension to points
with self.assertRaisesRegex(ValueError, "K and points must have"):
sample_farthest_points_naive(points, K=wrong_batch_dim)
# lengths has diferent batch dimension to points
with self.assertRaisesRegex(ValueError, "points and lengths must have"):
sample_farthest_points_naive(points, lengths=wrong_batch_dim, K=K)
def _test_random_start(self, fps_func, device="cpu"):
N, P, D, K = 5, 40, 5, 8
points = torch.randn((N, P, D), dtype=torch.float32, device=device)
out_points, out_idxs = fps_func(points, K=K, random_start_point=True)
# Check the first index is not 0 or the same number for all batch elements
# when random_start_point = True
self.assertTrue(out_idxs[:, 0].sum() > 0)
self.assertFalse(out_idxs[:, 0].eq(out_idxs[0, 0]).all())
def _test_gradcheck(self, fps_func, device="cpu"):
N, P, D, K = 2, 10, 3, 2
points = torch.randn(
(N, P, D), dtype=torch.float32, device=device, requires_grad=True
)
lengths = torch.randint(low=1, high=P, size=(N,), device=device)
torch.autograd.gradcheck(
fps_func,
(points, lengths, K),
check_undefined_grad=False,
eps=2e-3,
atol=0.001,
)
def test_sample_farthest_points_naive(self):
device = get_random_cuda_device()
self._test_simple(sample_farthest_points_naive, device)
self._test_errors(sample_farthest_points_naive, device)
self._test_random_start(sample_farthest_points_naive, device)
self._test_gradcheck(sample_farthest_points_naive, device)
def test_sample_farthest_points_cpu(self):
self._test_simple(sample_farthest_points, "cpu")
self._test_errors(sample_farthest_points, "cpu")
self._test_compare_random_heterogeneous("cpu")
self._test_random_start(sample_farthest_points, "cpu")
self._test_gradcheck(sample_farthest_points, "cpu")
def test_sample_farthest_points_cuda(self):
device = get_random_cuda_device()
self._test_simple(sample_farthest_points, device)
self._test_errors(sample_farthest_points, device)
self._test_compare_random_heterogeneous(device)
self._test_random_start(sample_farthest_points, device)
self._test_gradcheck(sample_farthest_points, device)
def test_cuda_vs_cpu(self):
"""
Compare cuda vs cpu on a complex object
"""
obj_filename = TUTORIAL_DATA_DIR / "cow_mesh/cow.obj"
K = 250
# Run on CPU
device = "cpu"
points, _, _ = load_obj(obj_filename, device=device, load_textures=False)
points = points[None, ...]
out_points_cpu, out_idxs_cpu = sample_farthest_points(points, K=K)
# Run on GPU
device = get_random_cuda_device()
points_cuda = points.to(device)
out_points_cuda, out_idxs_cuda = sample_farthest_points(points_cuda, K=K)
# Check that the indices from CUDA and CPU match
self.assertClose(out_idxs_cpu, out_idxs_cuda.cpu())
# Check there are no duplicate indices
val_mask = out_idxs_cuda[0].ne(-1)
vals, counts = torch.unique(out_idxs_cuda[0][val_mask], return_counts=True)
self.assertTrue(counts.le(1).all())
# Plot all results
if DEBUG:
# mplot3d is required for 3d projection plots
import matplotlib.pyplot as plt
from mpl_toolkits import mplot3d # noqa: F401
# Move to cpu and convert to numpy for plotting
points = points.squeeze()
out_points_cpu = out_points_cpu.squeeze().numpy()
out_points_cuda = out_points_cuda.squeeze().cpu().numpy()
# Farthest point sampling CPU
fig = plt.figure(figsize=plt.figaspect(1.0 / 3))
ax1 = fig.add_subplot(1, 3, 1, projection="3d")
ax1.scatter(*points.T, alpha=0.1)
ax1.scatter(*out_points_cpu.T, color="black")
ax1.set_title("FPS CPU")
# Farthest point sampling CUDA
ax2 = fig.add_subplot(1, 3, 2, projection="3d")
ax2.scatter(*points.T, alpha=0.1)
ax2.scatter(*out_points_cuda.T, color="red")
ax2.set_title("FPS CUDA")
# Random Sampling
random_points = np.random.permutation(points)[:K]
ax3 = fig.add_subplot(1, 3, 3, projection="3d")
ax3.scatter(*points.T, alpha=0.1)
ax3.scatter(*random_points.T, color="green")
ax3.set_title("Random")
# Save image
filename = "DEBUG_fps.jpg"
filepath = DATA_DIR / filename
plt.savefig(filepath)
@staticmethod
def sample_farthest_points_naive(N: int, P: int, D: int, K: int, device: str):
device = torch.device(device)
pts = torch.randn(
N, P, D, dtype=torch.float32, device=device, requires_grad=True
)
grad_pts = torch.randn(N, K, D, dtype=torch.float32, device=device)
torch.cuda.synchronize()
def output():
out_points, _ = sample_farthest_points_naive(pts, K=K)
loss = (out_points * grad_pts).sum()
loss.backward()
torch.cuda.synchronize()
return output
@staticmethod
def sample_farthest_points(N: int, P: int, D: int, K: int, device: str):
device = torch.device(device)
pts = torch.randn(
N, P, D, dtype=torch.float32, device=device, requires_grad=True
)
grad_pts = torch.randn(N, K, D, dtype=torch.float32, device=device)
torch.cuda.synchronize()
def output():
out_points, _ = sample_farthest_points(pts, K=K)
loss = (out_points * grad_pts).sum()
loss.backward()
torch.cuda.synchronize()
return output
```
#### File: pytorch3d/tests/test_se3.py
```python
import unittest
import numpy as np
import torch
from common_testing import TestCaseMixin
from pytorch3d.common.compat import qr
from pytorch3d.transforms.rotation_conversions import random_rotations
from pytorch3d.transforms.se3 import se3_exp_map, se3_log_map
from pytorch3d.transforms.so3 import so3_exp_map, so3_log_map, so3_rotation_angle
class TestSE3(TestCaseMixin, unittest.TestCase):
precomputed_log_transform = torch.tensor(
[
[0.1900, 2.1600, -0.1700, 0.8500, -1.9200, 0.6500],
[-0.6500, -0.8200, 0.5300, -1.2800, -1.6600, -0.3000],
[-0.0900, 0.2000, -1.1200, 1.8600, -0.7100, 0.6900],
[0.8000, -0.0300, 1.4900, -0.5200, -0.2500, 1.4700],
[-0.3300, -1.1600, 2.3600, -0.6900, 0.1800, -1.1800],
[-1.8000, -1.5800, 0.8400, 1.4200, 0.6500, 0.4300],
[-1.5900, 0.6200, 1.6900, -0.6600, 0.9400, 0.0800],
[0.0800, -0.1400, 0.3300, -0.5900, -1.0700, 0.1000],
[-0.3300, -0.5300, -0.8800, 0.3900, 0.1600, -0.2000],
[1.0100, -1.3500, -0.3500, -0.6400, 0.4500, -0.5400],
],
dtype=torch.float32,
)
precomputed_transform = torch.tensor(
[
[
[-0.3496, -0.2966, 0.8887, 0.0000],
[-0.7755, 0.6239, -0.0968, 0.0000],
[-0.5258, -0.7230, -0.4481, 0.0000],
[-0.7392, 1.9119, 0.3122, 1.0000],
],
[
[0.0354, 0.5992, 0.7998, 0.0000],
[0.8413, 0.4141, -0.3475, 0.0000],
[-0.5395, 0.6852, -0.4894, 0.0000],
[-0.9902, -0.4840, 0.1226, 1.0000],
],
[
[0.6664, -0.1679, 0.7264, 0.0000],
[-0.7309, -0.3394, 0.5921, 0.0000],
[0.1471, -0.9255, -0.3489, 0.0000],
[-0.0815, 0.8719, -0.4516, 1.0000],
],
[
[0.1010, 0.9834, -0.1508, 0.0000],
[-0.8783, 0.0169, -0.4779, 0.0000],
[-0.4674, 0.1807, 0.8654, 0.0000],
[0.2375, 0.7043, 1.4159, 1.0000],
],
[
[0.3935, -0.8930, 0.2184, 0.0000],
[0.7873, 0.2047, -0.5817, 0.0000],
[0.4747, 0.4009, 0.7836, 0.0000],
[-0.3476, -0.0424, 2.5408, 1.0000],
],
[
[0.7572, 0.6342, -0.1567, 0.0000],
[0.1039, 0.1199, 0.9873, 0.0000],
[0.6449, -0.7638, 0.0249, 0.0000],
[-1.2885, -2.0666, -0.1137, 1.0000],
],
[
[0.6020, -0.2140, -0.7693, 0.0000],
[-0.3409, 0.8024, -0.4899, 0.0000],
[0.7221, 0.5572, 0.4101, 0.0000],
[-0.7550, 1.1928, 1.8480, 1.0000],
],
[
[0.4913, 0.3548, 0.7954, 0.0000],
[0.2013, 0.8423, -0.5000, 0.0000],
[-0.8474, 0.4058, 0.3424, 0.0000],
[-0.1003, -0.0406, 0.3295, 1.0000],
],
[
[0.9678, -0.1622, -0.1926, 0.0000],
[0.2235, 0.9057, 0.3603, 0.0000],
[0.1160, -0.3917, 0.9128, 0.0000],
[-0.4417, -0.3111, -0.9227, 1.0000],
],
[
[0.7710, -0.5957, -0.2250, 0.0000],
[0.3288, 0.6750, -0.6605, 0.0000],
[0.5454, 0.4352, 0.7163, 0.0000],
[0.5623, -1.5886, -0.0182, 1.0000],
],
],
dtype=torch.float32,
)
def setUp(self) -> None:
super().setUp()
torch.manual_seed(42)
np.random.seed(42)
@staticmethod
def init_log_transform(batch_size: int = 10):
"""
Initialize a list of `batch_size` 6-dimensional vectors representing
randomly generated logarithms of SE(3) transforms.
"""
device = torch.device("cuda:0")
log_rot = torch.randn((batch_size, 6), dtype=torch.float32, device=device)
return log_rot
@staticmethod
def init_transform(batch_size: int = 10):
"""
Initialize a list of `batch_size` 4x4 SE(3) transforms.
"""
device = torch.device("cuda:0")
transform = torch.zeros(batch_size, 4, 4, dtype=torch.float32, device=device)
transform[:, :3, :3] = random_rotations(
batch_size, dtype=torch.float32, device=device
)
transform[:, 3, :3] = torch.randn(
(batch_size, 3), dtype=torch.float32, device=device
)
transform[:, 3, 3] = 1.0
return transform
def test_se3_exp_output_format(self, batch_size: int = 100):
"""
Check that the output of `se3_exp_map` is a valid SE3 matrix.
"""
transform = se3_exp_map(TestSE3.init_log_transform(batch_size=batch_size))
R = transform[:, :3, :3]
T = transform[:, 3, :3]
rest = transform[:, :, 3]
Rdet = R.det()
# check det(R)==1
self.assertClose(Rdet, torch.ones_like(Rdet), atol=1e-4)
# check that the translation is a finite vector
self.assertTrue(torch.isfinite(T).all())
# check last column == [0,0,0,1]
last_col = rest.new_zeros(batch_size, 4)
last_col[:, -1] = 1.0
self.assertClose(rest, last_col)
def test_compare_with_precomputed(self):
"""
Compare the outputs against precomputed results.
"""
self.assertClose(
se3_log_map(self.precomputed_transform),
self.precomputed_log_transform,
atol=1e-4,
)
self.assertClose(
self.precomputed_transform,
se3_exp_map(self.precomputed_log_transform),
atol=1e-4,
)
def test_se3_exp_singularity(self, batch_size: int = 100):
"""
Tests whether the `se3_exp_map` is robust to the input vectors
with low L2 norms, where the algorithm is numerically unstable.
"""
# generate random log-rotations with a tiny angle
log_rot = TestSE3.init_log_transform(batch_size=batch_size)
log_rot_small = log_rot * 1e-6
log_rot_small.requires_grad = True
transforms = se3_exp_map(log_rot_small)
# tests whether all outputs are finite
self.assertTrue(torch.isfinite(transforms).all())
# tests whether all gradients are finite and not None
loss = transforms.sum()
loss.backward()
self.assertIsNotNone(log_rot_small.grad)
self.assertTrue(torch.isfinite(log_rot_small.grad).all())
def test_se3_log_singularity(self, batch_size: int = 100):
"""
Tests whether the `se3_log_map` is robust to the input matrices
whose rotation angles and translations are close to the numerically
unstable region (i.e. matrices with low rotation angles
and 0 translation).
"""
# generate random rotations with a tiny angle
device = torch.device("cuda:0")
identity = torch.eye(3, device=device)
rot180 = identity * torch.tensor([[1.0, -1.0, -1.0]], device=device)
r = [identity, rot180]
r.extend(
[
qr(identity + torch.randn_like(identity) * 1e-6)[0]
+ float(i > batch_size // 2) * (0.5 - torch.rand_like(identity)) * 1e-8
# this adds random noise to the second half
# of the random orthogonal matrices to generate
# near-orthogonal matrices
for i in range(batch_size - 2)
]
)
r = torch.stack(r)
# tiny translations
t = torch.randn(batch_size, 3, dtype=r.dtype, device=device) * 1e-6
# create the transform matrix
transform = torch.zeros(batch_size, 4, 4, dtype=torch.float32, device=device)
transform[:, :3, :3] = r
transform[:, 3, :3] = t
transform[:, 3, 3] = 1.0
transform.requires_grad = True
# the log of the transform
log_transform = se3_log_map(transform, eps=1e-4, cos_bound=1e-4)
# tests whether all outputs are finite
self.assertTrue(torch.isfinite(log_transform).all())
# tests whether all gradients are finite and not None
loss = log_transform.sum()
loss.backward()
self.assertIsNotNone(transform.grad)
self.assertTrue(torch.isfinite(transform.grad).all())
def test_se3_exp_zero_translation(self, batch_size: int = 100):
"""
Check that `se3_exp_map` with zero translation gives
the same result as corresponding `so3_exp_map`.
"""
log_transform = TestSE3.init_log_transform(batch_size=batch_size)
log_transform[:, :3] *= 0.0
transform = se3_exp_map(log_transform, eps=1e-8)
transform_so3 = so3_exp_map(log_transform[:, 3:], eps=1e-8)
self.assertClose(
transform[:, :3, :3], transform_so3.permute(0, 2, 1), atol=1e-4
)
self.assertClose(
transform[:, 3, :3], torch.zeros_like(transform[:, :3, 3]), atol=1e-4
)
def test_se3_log_zero_translation(self, batch_size: int = 100):
"""
Check that `se3_log_map` with zero translation gives
the same result as corresponding `so3_log_map`.
"""
transform = TestSE3.init_transform(batch_size=batch_size)
transform[:, 3, :3] *= 0.0
log_transform = se3_log_map(transform, eps=1e-8, cos_bound=1e-4)
log_transform_so3 = so3_log_map(transform[:, :3, :3], eps=1e-8, cos_bound=1e-4)
self.assertClose(log_transform[:, 3:], -log_transform_so3, atol=1e-4)
self.assertClose(
log_transform[:, :3], torch.zeros_like(log_transform[:, :3]), atol=1e-4
)
def test_se3_exp_to_log_to_exp(self, batch_size: int = 10000):
"""
Check that `se3_exp_map(se3_log_map(A))==A` for
a batch of randomly generated SE(3) matrices `A`.
"""
transform = TestSE3.init_transform(batch_size=batch_size)
# Limit test transforms to those not around the singularity where
# the rotation angle~=pi.
nonsingular = so3_rotation_angle(transform[:, :3, :3]) < 3.134
transform = transform[nonsingular]
transform_ = se3_exp_map(
se3_log_map(transform, eps=1e-8, cos_bound=0.0), eps=1e-8
)
self.assertClose(transform, transform_, atol=0.02)
def test_se3_log_to_exp_to_log(self, batch_size: int = 100):
"""
Check that `se3_log_map(se3_exp_map(log_transform))==log_transform`
for a randomly generated batch of SE(3) matrix logarithms `log_transform`.
"""
log_transform = TestSE3.init_log_transform(batch_size=batch_size)
log_transform_ = se3_log_map(se3_exp_map(log_transform, eps=1e-8), eps=1e-8)
self.assertClose(log_transform, log_transform_, atol=1e-1)
def test_bad_se3_input_value_err(self):
"""
Tests whether `se3_exp_map` and `se3_log_map` correctly return
a ValueError if called with an argument of incorrect shape, or with
an tensor containing illegal values.
"""
device = torch.device("cuda:0")
for size in ([5, 4], [3, 4, 5], [3, 5, 6]):
log_transform = torch.randn(size=size, device=device)
with self.assertRaises(ValueError):
se3_exp_map(log_transform)
for size in ([5, 4], [3, 4, 5], [3, 5, 6], [2, 2, 3, 4]):
transform = torch.randn(size=size, device=device)
with self.assertRaises(ValueError):
se3_log_map(transform)
# Test the case where transform[:, :, :3] != 0.
transform = torch.rand(size=[5, 4, 4], device=device) + 0.1
with self.assertRaises(ValueError):
se3_log_map(transform)
@staticmethod
def se3_expmap(batch_size: int = 10):
log_transform = TestSE3.init_log_transform(batch_size=batch_size)
torch.cuda.synchronize()
def compute_transforms():
se3_exp_map(log_transform)
torch.cuda.synchronize()
return compute_transforms
@staticmethod
def se3_logmap(batch_size: int = 10):
log_transform = TestSE3.init_transform(batch_size=batch_size)
torch.cuda.synchronize()
def compute_logs():
se3_log_map(log_transform)
torch.cuda.synchronize()
return compute_logs
```
#### File: pytorch3d/tests/test_symeig3x3.py
```python
import unittest
import torch
from common_testing import TestCaseMixin, get_random_cuda_device
from pytorch3d.common.workaround import symeig3x3
from pytorch3d.transforms.rotation_conversions import random_rotations
class TestSymEig3x3(TestCaseMixin, unittest.TestCase):
TEST_BATCH_SIZE = 1024
@staticmethod
def create_random_sym3x3(device, n):
random_3x3 = torch.randn((n, 3, 3), device=device)
random_3x3_T = torch.transpose(random_3x3, 1, 2)
random_sym_3x3 = (random_3x3 * random_3x3_T).contiguous()
return random_sym_3x3
@staticmethod
def create_diag_sym3x3(device, n, noise=0.0):
# Create purly diagonal matrices
random_diag_3x3 = torch.randn((n, 3), device=device).diag_embed()
# Make them 'almost' diagonal
random_diag_3x3 += noise * TestSymEig3x3.create_random_sym3x3(device, n)
return random_diag_3x3
def setUp(self) -> None:
super().setUp()
torch.manual_seed(42)
self._gpu = get_random_cuda_device()
self._cpu = torch.device("cpu")
def test_is_eigen_gpu(self):
test_input = self.create_random_sym3x3(self._gpu, n=self.TEST_BATCH_SIZE)
self._test_is_eigen(test_input)
def test_is_eigen_cpu(self):
test_input = self.create_random_sym3x3(self._cpu, n=self.TEST_BATCH_SIZE)
self._test_is_eigen(test_input)
def _test_is_eigen(self, test_input, atol=1e-04, rtol=1e-02):
"""
Verify that values and vectors produced are really eigenvalues and eigenvectors
and can restore the original input matrix with good precision
"""
eigenvalues, eigenvectors = symeig3x3(test_input, eigenvectors=True)
self.assertClose(
test_input,
eigenvectors @ eigenvalues.diag_embed() @ eigenvectors.transpose(-2, -1),
atol=atol,
rtol=rtol,
)
def test_eigenvectors_are_orthonormal_gpu(self):
test_input = self.create_random_sym3x3(self._gpu, n=self.TEST_BATCH_SIZE)
self._test_eigenvectors_are_orthonormal(test_input)
def test_eigenvectors_are_orthonormal_cpu(self):
test_input = self.create_random_sym3x3(self._cpu, n=self.TEST_BATCH_SIZE)
self._test_eigenvectors_are_orthonormal(test_input)
def _test_eigenvectors_are_orthonormal(self, test_input):
"""
Verify that eigenvectors are an orthonormal set
"""
eigenvalues, eigenvectors = symeig3x3(test_input, eigenvectors=True)
batched_eye = torch.zeros_like(test_input)
batched_eye[..., :, :] = torch.eye(3, device=batched_eye.device)
self.assertClose(
batched_eye, eigenvectors @ eigenvectors.transpose(-2, -1), atol=1e-06
)
def test_is_not_nan_or_inf_gpu(self):
test_input = self.create_random_sym3x3(self._gpu, n=self.TEST_BATCH_SIZE)
self._test_is_not_nan_or_inf(test_input)
def test_is_not_nan_or_inf_cpu(self):
test_input = self.create_random_sym3x3(self._cpu, n=self.TEST_BATCH_SIZE)
self._test_is_not_nan_or_inf(test_input)
def _test_is_not_nan_or_inf(self, test_input):
eigenvalues, eigenvectors = symeig3x3(test_input, eigenvectors=True)
self.assertTrue(torch.isfinite(eigenvalues).all())
self.assertTrue(torch.isfinite(eigenvectors).all())
def test_degenerate_inputs_gpu(self):
self._test_degenerate_inputs(self._gpu)
def test_degenerate_inputs_cpu(self):
self._test_degenerate_inputs(self._cpu)
def _test_degenerate_inputs(self, device):
"""
Test degenerate case when input matrices are diagonal or near-diagonal
"""
# Purely diagonal case
test_input = self.create_diag_sym3x3(device, self.TEST_BATCH_SIZE)
self._test_is_not_nan_or_inf(test_input)
self._test_is_eigen(test_input)
self._test_eigenvectors_are_orthonormal(test_input)
# Almost-diagonal case
test_input = self.create_diag_sym3x3(device, self.TEST_BATCH_SIZE, noise=1e-4)
self._test_is_not_nan_or_inf(test_input)
self._test_is_eigen(test_input)
self._test_eigenvectors_are_orthonormal(test_input)
def test_gradients_cpu(self):
self._test_gradients(self._cpu)
def test_gradients_gpu(self):
self._test_gradients(self._gpu)
def _test_gradients(self, device):
"""
Tests if gradients pass though without any problems (infs, nans etc) and
also performs gradcheck (compares numerical and analytical gradients)
"""
test_random_input = self.create_random_sym3x3(device, n=16)
test_diag_input = self.create_diag_sym3x3(device, n=16)
test_almost_diag_input = self.create_diag_sym3x3(device, n=16, noise=1e-4)
test_input = torch.cat(
(test_random_input, test_diag_input, test_almost_diag_input)
)
test_input.requires_grad = True
with torch.autograd.detect_anomaly():
eigenvalues, eigenvectors = symeig3x3(test_input, eigenvectors=True)
loss = eigenvalues.mean() + eigenvectors.mean()
loss.backward()
test_random_input.requires_grad = True
# Inputs are converted to double to increase the precision of gradcheck.
torch.autograd.gradcheck(
symeig3x3, test_random_input.double(), eps=1e-6, atol=1e-2, rtol=1e-2
)
def _test_eigenvalues_and_eigenvectors(
self, test_eigenvectors, test_eigenvalues, atol=1e-04, rtol=1e-04
):
test_input = (
test_eigenvectors.transpose(-2, -1)
@ test_eigenvalues.diag_embed()
@ test_eigenvectors
)
test_eigenvalues_sorted, _ = torch.sort(test_eigenvalues, dim=-1)
eigenvalues, eigenvectors = symeig3x3(test_input, eigenvectors=True)
self.assertClose(
test_eigenvalues_sorted,
eigenvalues,
atol=atol,
rtol=rtol,
)
self._test_is_not_nan_or_inf(test_input)
self._test_is_eigen(test_input, atol=atol, rtol=rtol)
self._test_eigenvectors_are_orthonormal(test_input)
def test_degenerate_eigenvalues_gpu(self):
self._test_degenerate_eigenvalues(self._gpu)
def test_degenerate_eigenvalues_cpu(self):
self._test_degenerate_eigenvalues(self._cpu)
def _test_degenerate_eigenvalues(self, device):
"""
Test degenerate eigenvalues like zero-valued and with 2-/3-multiplicity
"""
# Error tolerances for degenerate values are increased as things might become
# numerically unstable
deg_atol = 1e-3
deg_rtol = 1.0
# Construct random orthonormal sets
test_eigenvecs = random_rotations(n=self.TEST_BATCH_SIZE, device=device)
# Construct random eigenvalues
test_eigenvals = torch.randn(
(self.TEST_BATCH_SIZE, 3), device=test_eigenvecs.device
)
self._test_eigenvalues_and_eigenvectors(
test_eigenvecs, test_eigenvals, atol=deg_atol, rtol=deg_rtol
)
# First eigenvalue is always 0.0 here: [0.0 X Y]
test_eigenvals_with_zero = test_eigenvals.clone()
test_eigenvals_with_zero[..., 0] = 0.0
self._test_eigenvalues_and_eigenvectors(
test_eigenvecs, test_eigenvals_with_zero, atol=deg_atol, rtol=deg_rtol
)
# First two eigenvalues are always the same here: [X X Y]
test_eigenvals_with_multiplicity2 = test_eigenvals.clone()
test_eigenvals_with_multiplicity2[..., 1] = test_eigenvals_with_multiplicity2[
..., 0
]
self._test_eigenvalues_and_eigenvectors(
test_eigenvecs,
test_eigenvals_with_multiplicity2,
atol=deg_atol,
rtol=deg_rtol,
)
# All three eigenvalues are the same here: [X X X]
test_eigenvals_with_multiplicity3 = test_eigenvals_with_multiplicity2.clone()
test_eigenvals_with_multiplicity3[..., 2] = test_eigenvals_with_multiplicity2[
..., 0
]
self._test_eigenvalues_and_eigenvectors(
test_eigenvecs,
test_eigenvals_with_multiplicity3,
atol=deg_atol,
rtol=deg_rtol,
)
def test_more_dimensions(self):
"""
Tests if function supports arbitrary leading dimensions
"""
repeat = 4
test_input = self.create_random_sym3x3(self._cpu, n=16)
test_input_4d = test_input[None, ...].expand((repeat,) + test_input.shape)
eigenvalues, eigenvectors = symeig3x3(test_input, eigenvectors=True)
eigenvalues_4d, eigenvectors_4d = symeig3x3(test_input_4d, eigenvectors=True)
eigenvalues_4d_gt = eigenvalues[None, ...].expand((repeat,) + eigenvalues.shape)
eigenvectors_4d_gt = eigenvectors[None, ...].expand(
(repeat,) + eigenvectors.shape
)
self.assertClose(eigenvalues_4d_gt, eigenvalues_4d)
self.assertClose(eigenvectors_4d_gt, eigenvectors_4d)
``` |
{
"source": "jkxing/stylized-neural-painting",
"score": 3
} |
#### File: jkxing/stylized-neural-painting/GUI.py
```python
import tkinter as tk
from tkinter import ttk
from PIL import Image, ImageTk
from tkinter import filedialog as fd
from tkinter import messagebox as mb
import matplotlib.pyplot as plt
import numpy as np
import cv2
class GUI(tk.Frame):
def __init__(self, parent = None):
tk.Frame.__init__(self, parent)
self.parent = parent
self.img_path = ''
self.save_path = ''
self.frame0 = tk.Frame(self, bd = 10)
self.frame0.pack()
self.path_label = tk.Label(self.frame0, text = '')
self.path_label.pack(side='left')
self.browseButton = tk.Button(self.frame0, text = 'Browse', command = self.openfile)
self.browseButton.pack(side = 'left')
self.slider_var = tk.IntVar()
self.slider = tk.Scale(self, from_=1, to=20, orient= 'horizontal', variable = self.slider_var, command = self.slider_changed)
self.slider.pack(pady = 10)
self.goButton = tk.Button(self, text = 'Paint', command = self.go, width = 20)
self.goButton.pack(pady = 10)
self.addButton = tk.Button(self, text = 'Add Area', command = self.add_area, width = 20)
self.addButton.pack(pady = 10)
self.saveButton = tk.Button(self, text = 'Save as...', command = self.savefile, width = 20)
self.saveButton.pack(pady = 10)
self.mark_val = 1
self.oval_size = 1
def paint(self, event):
python_green = "#476042"
x1, y1 = ( event.x - self.oval_size ), ( event.y - self.oval_size )
x2, y2 = ( event.x + self.oval_size ), ( event.y + self.oval_size )
for x in range(x1, x2+1) :
for y in range(y1, y2 + 1):
self.image_mask[y][x][0] = self.mark_val
self.image_mask[y][x][1] = self.mark_val
self.image_mask[y][x][2] = self.mark_val
self.canvas.create_oval( x1, y1, x2, y2, fill = python_green )
def add_area(self):
self.mark_val += 1
def slider_changed(self, event):
self.oval_size = self.slider_var.get()
# print(self.slider_var.get())
def go(self):
if (len(self.img_path) == 0):
mb.showinfo('No image selected', 'Please browse an image to be resized')
return
# img = plt.imread(self.img_path)
img = ImageTk.PhotoImage(Image.open(self.img_path))
offspring = tk.Toplevel()
offspring.title(self.img_path.split('/')[-1])
offspring.geometry('%sx%s' % (img.width()+10, img.height()+10))
self.image_mask = np.zeros((img.height(), img.width(), 3))
self.canvas = tk.Canvas(offspring, width=img.width(), height=img.height(),
borderwidth=0, highlightthickness=0)
self.canvas.pack(expand=True)
self.canvas.img = img # Keep reference in case this code is put into a function.
self.canvas.create_image(0, 0, image=img, anchor=tk.NW)
self.canvas.bind( "<B1-Motion>", self.paint )
offspring.mainloop()
def openfile(self):
self.img_path = fd.askopenfilename()
self.path_label.config(text = self.img_path)
def savefile(self):
self.save_path = fd.asksaveasfilename()
if len(self.save_path) == 0 :
mb.showinfo('Give destination', 'Please give a destination path')
return
cv2.imwrite(self.save_path, self.image_mask)
with open(self.save_path[:-4]+'.npy', 'wb') as f:
np.save(f, np.array(self.image_mask))
if __name__ == '__main__':
root = tk.Tk()
root.geometry('%sx%s' % (400, 300))
gui = GUI(root)
gui.pack()
root.mainloop()
``` |
{
"source": "jkyamog/Azure-Sentinel",
"score": 2
} |
#### File: Data Connectors/AzureFunctionCloudflare/state_manager.py
```python
from azure.storage.fileshare.aio import ShareClient
from azure.storage.fileshare.aio import ShareFileClient
from azure.core.exceptions import ResourceNotFoundError
import asyncio
class StateManagerAsync:
def __init__(self, connection_string, share_name='funcstatemarkershare', file_path='funcstatemarkerfile'):
self.connection_string = connection_string
self.share_name = share_name
self.file_path = file_path
self._lock = asyncio.Lock()
def _get_file_cli(self):
return ShareFileClient.from_connection_string(conn_str=self.connection_string, share_name=self.share_name, file_path=self.file_path)
def _get_share_cli(self):
return ShareClient.from_connection_string(conn_str=self.connection_string, share_name=self.share_name)
async def post(self, marker_text: str, validate_upload=True, tries=2):
async with self._lock:
if not validate_upload:
await self._upload_file(marker_text)
else:
count = 0
validated = False
while count < tries:
await self._upload_file(marker_text)
validated = await self._validate(marker_text)
count += 1
if validated:
break
if not validated:
raise Exception(f'File {self.share_name}/{self.file_path} was not saved correctly. Please update file manually.')
async def _upload_file(self, text):
file_cli = self._get_file_cli()
async with file_cli:
try:
await file_cli.upload_file(text, validate_content=True)
except ResourceNotFoundError:
share_cli = self._get_share_cli()
async with share_cli:
await share_cli.create_share()
await file_cli.upload_file(text, validate_content=True)
async def get(self):
file_cli = self._get_file_cli()
async with file_cli:
try:
cor = await file_cli.download_file()
f = await cor.readall()
return f.decode()
except ResourceNotFoundError:
return None
async def _validate(self, text):
content = await self.get()
return content == text
``` |
{
"source": "JKYang01/algorithm004-05",
"score": 4
} |
#### File: Week 1/id_215/215-Week01.py
```python
class Solution:
def removeDuplicates(self, nums: List[int]) -> int:
# First, I think we can just list(set(num)) to get the result
# but the type erro occurs, seems like we cannot use the set() function
# Scond, I conder about using the pointer to get the result
# we set i as pointer and move to the end when the value of the next elment
# is the same to the previous element, then we delete it
# The time complexity is O(n) because of one loop
i = 0
while i < len(nums): # the pionter
if nums[i]==nums[i-1]:
del(nums[i])
else:
i+=1
return i
# Another solution (from discussion):
# set two pointers i nad j, the pionter i store the unduplicated result
# let the pionter j move around
# def (self, nums:list[int])->int:
# i,j=1,1 # set two pointers
# while j < len (nums):
# if nums[i-1]!=nums[j]:
# nums[i]=nums[j] # # the pionter move from i to j under this condition
# i+=1 # the pinter i go to next
# j+=1 # move j under while condition
# return i
# leet code #21
class Solution:
# these two are linked lists not array and they are not iterable
# so we cannot use the for loop and we have to get the value by using .next
def mergeTwoLists(self, l1: ListNode, l2: ListNode) -> ListNode:
# the edge cases:
if l1 is None:
return l2
elif l2 is None:
return l1
elif l1.val <l2.val:
l1.next = self.mergeTwoLists(l1.next,l2) # recursively merge
return l1
else:
l2.next = self.mergeTwoLists(l1, l2.next)
def mergeTwoLists2 (self, l1: ListNode, l2: ListNode) ->ListNode:
# set a reference of the node which will not change named prev
prev = ListNode(0)
head = prev
while l1 and l2:
if l1.val<=l2.val:
head.next=l1 # the next node of head is node in l1
l1 = l1.next
else:
head.next=l2 # the next node is node in l2
l2= l2.next
head = head.next # append node and move to next
if l1 is None and l2 is not None:
head.next = l2
elif l1 is not None and l2 is None:
head.next = l1
# return the nodes
return prev.next # if we return 'head' it will be [4,4]
``` |
{
"source": "JKybelka/SpectralCluster",
"score": 3
} |
#### File: SpectralCluster/spectralcluster/refinement.py
```python
import abc
import numpy as np
from scipy.ndimage import gaussian_filter
DEFAULT_REFINEMENT_SEQUENCE = [
"CropDiagonal",
"GaussianBlur",
"RowWiseThreshold",
"Symmetrize",
"Diffuse",
"RowWiseNormalize",
]
class RefinementOptions(object):
"""Refinement options for the affinity matrix."""
def __init__(self,
gaussian_blur_sigma=1,
p_percentile=0.95,
thresholding_soft_multiplier=0.01,
thresholding_with_row_max=True,
refinement_sequence=DEFAULT_REFINEMENT_SEQUENCE):
"""Initialization of the refinement arguments.
Args:
gaussian_blur_sigma: sigma value of the Gaussian blur operation
p_percentile: the p-percentile for the row wise thresholding
thresholding_soft_multiplier: the multiplier for soft threhsold, if this
value is 0, then it's a hard thresholding
thresholding_with_row_max: if true, we use row_max * p_percentile as row
wise threshold, instead of doing a percentile-based thresholding
refinement_sequence: a list of strings for the sequence of refinement
operations to apply on the affinity matrix
"""
self.gaussian_blur_sigma = gaussian_blur_sigma
self.p_percentile = p_percentile
self.thresholding_soft_multiplier = thresholding_soft_multiplier
self.thresholding_with_row_max = thresholding_with_row_max
self.refinement_sequence = refinement_sequence
class AffinityRefinementOperation(metaclass=abc.ABCMeta):
"""Refinement of the affinity matrix."""
def check_input(self, affinity):
"""Check the input to the refine() method.
Args:
affinity: the input affinity matrix.
Raises:
TypeError: if affinity has wrong type
ValueError: if affinity has wrong shape, etc.
"""
if not isinstance(affinity, np.ndarray):
raise TypeError("affinity must be a numpy array")
shape = affinity.shape
if len(shape) != 2:
raise ValueError("affinity must be 2-dimensional")
if shape[0] != shape[1]:
raise ValueError("affinity must be a square matrix")
@abc.abstractmethod
def refine(self, affinity):
"""An abstract method to perform the refinement operation.
Args:
affinity: the affinity matrix, of size (n_samples, n_samples)
Returns:
a matrix of the same size as affinity
"""
pass
class CropDiagonal(AffinityRefinementOperation):
"""Crop the diagonal.
Replace diagonal element by the max non-diagonal value of row.
After this operation, the matrix has similar properties to a standard
Laplacian matrix. This also helps to avoid the bias during Gaussian blur and
normalization.
"""
def refine(self, affinity):
self.check_input(affinity)
refined_affinity = np.copy(affinity)
np.fill_diagonal(refined_affinity, 0.0)
di = np.diag_indices(refined_affinity.shape[0])
refined_affinity[di] = refined_affinity.max(axis=1)
return refined_affinity
class GaussianBlur(AffinityRefinementOperation):
"""Apply Gaussian blur."""
def __init__(self, sigma=1):
self.sigma = sigma
def refine(self, affinity):
self.check_input(affinity)
return gaussian_filter(affinity, sigma=self.sigma)
class RowWiseThreshold(AffinityRefinementOperation):
"""Apply row wise thresholding."""
def __init__(self,
p_percentile=0.95,
thresholding_soft_multiplier=0.01,
thresholding_with_row_max=False):
self.p_percentile = p_percentile
self.multiplier = thresholding_soft_multiplier
self.thresholding_with_row_max = thresholding_with_row_max
def refine(self, affinity):
self.check_input(affinity)
refined_affinity = np.copy(affinity)
if self.thresholding_with_row_max:
# Row_max based thresholding
row_max = refined_affinity.max(axis=1)
row_max = np.expand_dims(row_max, axis=1)
is_smaller = refined_affinity < (row_max * self.p_percentile)
else:
# Percentile based thresholding
row_percentile = np.percentile(
refined_affinity, self.p_percentile * 100, axis=1)
row_percentile = np.expand_dims(row_percentile, axis=1)
is_smaller = refined_affinity < row_percentile
refined_affinity = (refined_affinity * np.invert(is_smaller)) + (
refined_affinity * self.multiplier * is_smaller)
return refined_affinity
class Symmetrize(AffinityRefinementOperation):
"""The Symmetrization operation."""
def refine(self, affinity):
self.check_input(affinity)
return np.maximum(affinity, np.transpose(affinity))
class Diffuse(AffinityRefinementOperation):
"""The diffusion operation."""
def refine(self, affinity):
self.check_input(affinity)
return np.matmul(affinity, np.transpose(affinity))
class RowWiseNormalize(AffinityRefinementOperation):
"""The row wise max normalization operation."""
def refine(self, affinity):
self.check_input(affinity)
refined_affinity = np.copy(affinity)
row_max = refined_affinity.max(axis=1)
refined_affinity /= np.expand_dims(row_max, axis=1)
return refined_affinity
``` |
{
"source": "jkyeung/fonttools-intro",
"score": 4
} |
#### File: fonttools-intro/examples/00-ttfont-template.py
```python
import sys
from fontTools.ttLib import TTFont
# sets 'fontPath' variable to use the font path you pass in
fontPath = sys.argv[1]
# MAIN FUNCTION
def main(fontPath):
# open font with TTFont
font = TTFont(fontPath)
# font['name'] gets the name table,
# getName gets a specified name ID (1), platform (3 for Windows), and platEncID (1)
name1 = str(font['name'].getName(1, 3, 1))
# print the result
print(f"\n\t→ name1 is '{name1}'\n")
# help(font['name']) # use help() for more methods on a given table
# run main function
main(fontPath)
```
#### File: fonttools-intro/examples/01-ttfont-name-updates.py
```python
import sys
from fontTools.ttLib import TTFont
# sets 'fontPath' variable to use the font path you pass in
fontPath = sys.argv[1]
newFamilyName = sys.argv[2]
print(fontPath)
print(newFamilyName)
# GET / SET NAME HELPER FUNCTIONS
def getFontNameID(font, ID, platformID=3, platEncID=1):
name = str(font['name'].getName(ID, platformID, platEncID))
return name
def setFontNameID(font, ID, newName):
print(f"\n\t• name {ID}:")
macIDs = {"platformID": 3, "platEncID": 1, "langID": 0x409}
winIDs = {"platformID": 1, "platEncID": 0, "langID": 0x0}
oldMacName = font['name'].getName(ID, *macIDs.values())
oldWinName = font['name'].getName(ID, *winIDs.values())
if oldMacName != newName:
print(f"\n\t\t Mac name was '{oldMacName}'")
font['name'].setName(newName, ID, *macIDs.values())
print(f"\n\t\t Mac name now '{newName}'")
if oldWinName != newName:
print(f"\n\t\t Win name was '{oldWinName}'")
font['name'].setName(newName, ID, *winIDs.values())
print(f"\n\t\t Win name now '{newName}'")
# MAIN FUNCTION
def main(fontPath, newFamilyName):
# open font with TTFont
font = TTFont(fontPath)
# useful to go backwards to start with name16, the Typographic Family name
# (sadly, name16 is not in every font; you may have to manually pass in an "Old Name" for other font files)
# Name 16: Typographic Family name – "Recursive"
name16 = getFontNameID(font, 16) # provides the basic family name
newName16 = name16.replace(name16, newFamilyName) # replaces family name
setFontNameID(font, 16, newName16)
# Name 6: PostScript name – "Recursive-SansLinearLight"
name6 = getFontNameID(font, 6)
newName6 = name6.replace(name6.replace(" ",""), newFamilyName.replace(" ","")) # update name, make sure you have no spaces
setFontNameID(font, 6, newName6)
# Name 4: Full font name – "Recursive Sans Linear Light"
name4 = getFontNameID(font, 4)
newName4 = name4.replace(name4, newFamilyName)
setFontNameID(font, 4, newName4)
# Name 3: Unique font identifier – "1.053;ARRW;Recursive-SansLinearLight"
name3 = getFontNameID(font, 3)
newName3 = name3.replace(name6, newName6) # name 3 includes name6 as a substring
setFontNameID(font, 3, newName3)
# Name 1: Font Family name – "Recursive Sans Linear Light"
name1 = getFontNameID(font, 1) # provides the basic family name
newName1 = name1.replace(name1, newFamilyName) # replaces family name
setFontNameID(font, 1, newName1)
# make new path to save to (assume old family name is in path)
savePath = fontPath.replace(name16.replace(" ",""), newFamilyName.replace(" ",""))
font.save(savePath)
main(fontPath, newFamilyName)
```
#### File: fonttools-intro/examples/02-make-trial-font.py
```python
import os
import sys
from fontTools.ttLib import TTFont
from fontTools.subset import main as subsetter
import struct
# GET / SET NAME HELPER FUNCTIONS
def getFontNameID(font, ID, platformID=3, platEncID=1):
name = str(font['name'].getName(ID, platformID, platEncID))
return name
def setFontNameID(font, ID, newName):
print(f"\n\t• name {ID}:")
macIDs = {"platformID": 3, "platEncID": 1, "langID": 0x409}
winIDs = {"platformID": 1, "platEncID": 0, "langID": 0x0}
oldMacName = font['name'].getName(ID, *macIDs.values())
oldWinName = font['name'].getName(ID, *winIDs.values())
if oldMacName != newName:
print(f"\n\t\t Mac name was '{oldMacName}'")
font['name'].setName(newName, ID, *macIDs.values())
print(f"\n\t\t Mac name now '{newName}'")
if oldWinName != newName:
print(f"\n\t\t Win name was '{oldWinName}'")
font['name'].setName(newName, ID, *winIDs.values())
print(f"\n\t\t Win name now '{newName}'")
def listUnicodeRanges(unicodeRanges):
# remove "U+"" from ranges
unicodeRanges = unicodeRanges.replace("U+", "").replace(" ", "")
# create set
unicodesIncluded = set()
# split up separate ranges by commas
for unicodeChunk in unicodeRanges.split(","):
# if it's a range...
if "-" in unicodeChunk:
# get start and end of range
start, end = unicodeChunk.split("-")
# go through range and add each value to the set
for unicodeInteger in range(int(start,16), int(end,16)+1):
unicodesIncluded.add(unicodeInteger)
# if it's a single unicode...
else:
unicodesIncluded.add(int(unicodeChunk,16))
return unicodesIncluded
def main():
# get arguments from argparse
args = parser.parse_args()
for fontPath in args.fontPaths:
# open font at TTFont object
ttfont = TTFont(fontPath)
filetype = fontPath.split(".")[-1]
# get set of unicode ints in font
rangeInFont = {x for x in ttfont["cmap"].getBestCmap()}
unicodesToKeep = listUnicodeRanges(args.unicodes)
unicodesToHide = {intUnicode for intUnicode in rangeInFont if intUnicode not in unicodesToKeep}
# get cmap of font, find unicode for glyph with name of replacerGlyph
try:
if "U+" in args.replacer:
replacerGlyphUnicode = args.replacer.replace("U+","")
else:
replacerGlyphUnicode = list(ttfont["cmap"].buildReversed()[args.replacer])[0]
unicodesToKeep.add(replacerGlyphUnicode)
if replacerGlyphUnicode in unicodesToHide:
unicodesToHide.remove(replacerGlyphUnicode) # TODO: check if this fails if item not in set
except KeyError:
print("\nReplacer glyph has no unicode; try checking the font file to copy in an exact name.\n")
print("Stopping execution.\n")
break
# make path of newly-subset font
tempSubsetPath = fontPath.replace(f".{filetype}",f".subset.{filetype}")
# subset input font to remove glyphs that are being hidden
sys.argv = [None, fontPath, f'--unicodes={args.unicodes}', '--name-IDs="*"', '--notdef-outline', f'--output-file={tempSubsetPath}']
subsetter() # this is what actually does the subsetting and writes the output file
# -------------------------------------------------------------------------------------------------
# then, add many additional unicodes to the replacer glyph to cover all diacritics, etc
for table in ttfont['cmap'].tables:
for c in unicodesToHide:
table.cmap[c] = args.replacer
# -------------------------------------------------------------------------------------------------
# update font names
familyName = getFontNameID(ttfont, 16)
nameSuffix = args.suffix
# MUST check if familyName is not 'None', or this doesn't work (e.g. can't just check if None)
if familyName != 'None':
newFamName = familyName + f" {nameSuffix}"
setFontNameID(ttfont, 16, newFamName)
else:
familyName = getFontNameID(ttfont, 1)
newFamName = familyName + f" {nameSuffix}"
print("familyName is", familyName)
# UPDATE NAME ID 6, postscript name
# Format: FamilynameTrial-Stylename
currentPsName = getFontNameID(ttfont, 6)
newPsName = currentPsName.replace('-',f'{nameSuffix}-')
setFontNameID(ttfont, 6, newPsName)
# UPDATE NAME ID 4, full font name
# Format: Familyname Trial Stylename
currentFullName = getFontNameID(ttfont, 4)
newFullName = currentFullName.replace(familyName,f'{familyName} {nameSuffix}')
setFontNameID(ttfont, 4, newFullName)
# UPDATE NAME ID 3, unique font ID
# Format: 1.001;ARRW;FamilynameTrial-Stylename
currentUniqueName = getFontNameID(ttfont, 3)
newUniqueName = currentUniqueName.replace('-',f'{nameSuffix}-')
setFontNameID(ttfont, 3, newUniqueName)
# UPDATE NAME ID 1, unique font ID
# Format: Familyname Trial OR Familyname Trial Style (if not Regular, Itali, Bold, or Bold Italic)
currentFamName = getFontNameID(ttfont, 1)
newFamNameOne = currentFamName.replace(familyName,newFamName)
setFontNameID(ttfont, 1, newFamNameOne)
# -------------------------------------------------------------------------------------------------
# save font with ".trial" added to name
ttfont.save(tempSubsetPath.replace(f".subset.{filetype}",f".trial.{filetype}"))
# clean up temp subset font
os.remove(tempSubsetPath)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='Make a "trial font" from OpenType font files, keeping characters for specified unicodes, while hiding the rest.')
parser.add_argument('fontPaths',
help='Path(s) to font file(s)',
nargs="+")
parser.add_argument("-u", "--unicodes",
default="U+0020-0039, U+003A-005A, U+0061-007A, U+2018-201D, U+005B, U+005D",
help='String of unicodes or unicode ranges to keep, comma-separated. Default is a basic Latin set: "U+0020-0039, U+003A-005A, U+0061-007A, U+2018-201D, U+005B, U+005D"')
parser.add_argument('-r','--replacer',
default="X",
help='Name of glyph that will replace unicodes to hide. If you wish to use unicode, start with "U+" like "U+0058". Glyph be in the font & cannnot be ".notdef". Default: "X". ')
parser.add_argument('-s','--suffix',
default="Trial",
help='Suffix to add to trial font names. Default: "Trial".')
main()
``` |
{
"source": "jkyeung/python-itoolkit",
"score": 2
} |
#### File: itoolkit/transport/direct.py
```python
import sys
from .base import XmlServiceTransport
try:
from . import _direct
_available = hasattr(_direct, '_xmlservice')
except ImportError:
# For Sphinx build
_available = False
__all__ = [
'DirectTransport'
]
class DirectTransport(XmlServiceTransport):
"""Call XMLSERVICE directly in-process using _ILECALL
Args:
**kwargs: Base transport options. See `XmlServiceTransport`.
"""
def __init__(self, **kwargs):
super(DirectTransport, self).__init__(**kwargs)
def call(self, tk):
if not _available:
raise RuntimeError("Not supported on this platform")
data = _direct._xmlservice(tk.xml_in(), self.ctl, self.ipc)
if sys.version_info >= (3, 0):
return data.decode('utf-8')
else:
return data
``` |
{
"source": "jkyeung/rpc-source-transport",
"score": 3
} |
#### File: rpc-source-transport/src/server.py
```python
import os
from SimpleXMLRPCServer import SimpleXMLRPCServer
from SimpleXMLRPCServer import SimpleXMLRPCRequestHandler
from base64 import b64encode, b64decode
import os400
import file400
SERVER_IP = '10.x.x.x' # SYSTEMA
PORT = 8000
# RTVMBRD is a little helper CLP that wraps the RTVMBRD command.
rtvmbrd = os400.Program('RTVMBRD', 'MISEXE',
(('c', 10), ('c', 10), ('c', 10), ('c', 10), ('c', 512)))
def mbr_text(lib, fil, mbr):
rtvmbrd(lib, fil, mbr, 'TEXT', '')
return rtvmbrd[4]
def mbr_srctype(lib, fil, mbr):
rtvmbrd(lib, fil, mbr, 'SRCTYPE', '')
return rtvmbrd[4]
# Restrict to a particular path.
class RequestHandler(SimpleXMLRPCRequestHandler):
rpc_paths = ('/RPC2',)
# Create server.
shutdown_request = False
server = SimpleXMLRPCServer((SERVER_IP, PORT), requestHandler=RequestHandler)
def shutdown():
global shutdown_request
shutdown_request = True
return 'Shutdown request submitted.'
server.register_function(shutdown)
def put(from_srctype, from_text, data, to_lib, to_file, to_mbr):
# Try to clear the member. If that fails, assume the member doesn't
# exist and try to add it.
if os.system("clrpfm {}/{} {}".format(to_lib, to_file, to_mbr)):
if os.system("addpfm {}/{} {}".format(to_lib, to_file, to_mbr)):
return 'Could not add member.'
# Copy the received data into the member. Note that the source data
# has been Base64-encoded to make it safe for XML transport.
f = File400(to_file, 'a', lib=to_lib, mbr=to_mbr)
for line in data:
f['SRCSEQ'] = line[0]
f['SRCDAT'] = line[1]
f['SRCDTA'] = b64decode(line[2])
f.write()
f.close()
# Change the source type and member text.
template = "chgpfm {}/{} {} srctype({}) text('{}')"
os.system(template.format(
to_lib, to_file, to_mbr, from_srctype, from_text.replace("'", "''")))
return 'Transfer successful.'
server.register_function(put)
def get(from_lib, from_file, from_mbr):
data = []
f = File400(from_file, lib=from_lib, mbr=from_mbr)
try:
f.posf()
except file400.error:
return 'Could not read member.'
# If it looks like a source member, Base64-encode the data.
if f.fieldList() == ('SRCSEQ', 'SRCDAT', 'SRCDTA'):
while not f.readn():
data.append((f['SRCSEQ'], f['SRCDAT'], b64encode(f['SRCDTA'])))
# For any other member, just grab all the fields as-is.
else:
while not f.readn():
data.append(f.get())
from_text = mbr_text(from_lib, from_file, from_mbr)
from_srctype = mbr_srctype(from_lib, from_file, from_mbr)
return from_srctype, from_text, data
server.register_function(get)
# Run the server's main loop.
while not shutdown_request:
server.handle_request()
``` |
{
"source": "jkyleung/python_stl_mesh",
"score": 3
} |
#### File: jkyleung/python_stl_mesh/circle.py
```python
import numpy as np
from stl import mesh
import math
import matplotlib.pyplot as plt
class Circle():
def __init__(self, center=[0,0,0], radius=1, details=10):
self.center = center
self.radius = radius
self.details = details
self.vertices = []
angle = math.pi * 2 / details
temp_pt = [radius, 0, 0]
for i in range(details):
# rotation matrix
R = np.array([[math.cos(angle*i), math.sin(angle*i), 0],
[-math.sin(angle*i), math.cos(angle*i), 0],
[0, 0, 1]])
self.vertices.append(np.dot(temp_pt, R)+center)
```
#### File: jkyleung/python_stl_mesh/stl_cylinder.py
```python
import numpy as np
from stl import mesh
from cylinder import Cylinder
def make_cylinder(pt1, pt2, radius, details=50):
c = Cylinder(pt1, pt2, radius, details)
faces = []
v_base = c.base_circle.vertices
for i in range(details):
faces.append(np.array([c.base_circle.center, v_base[i], v_base[(i+1)%details]]))
v_top = c.top_circle.vertices
for i in range(details):
faces.append([c.top_circle.center, v_top[i], v_top[(i+1)%details]])
for i in range(details):
faces.append([v_base[i], v_base[(i+1)%details], v_top[i]])
faces.append([v_base[(i+1)%details], v_top[(i+1)%details], v_top[i]])
f = np.array(faces)
cylinder = mesh.Mesh(np.zeros(f.shape[0], dtype=mesh.Mesh.dtype))
for i in range(f.shape[0]):
cylinder.vectors[i] = faces[i]
cylinder.save('cylinder.stl')
if __name__ == '__main__':
make_cylinder([0,0,0], [1,1,1], 1)
```
#### File: jkyleung/python_stl_mesh/stl_sphere.py
```python
import numpy as np
from stl import mesh
from sphere import Sphere
def make_sphere(pt, radius, details=30):
s = Sphere(pt, radius, details)
faces = []
v_top = s.top
v_base = s.base
for i in range(details):
faces.append(np.array([v_top, s.circle_list[0].vertices[i], s.circle_list[0].vertices[(i+1)%details]]))
for i in range(details):
faces.append(np.array([v_base, s.circle_list[-1].vertices[i], s.circle_list[-1].vertices[(i+1)%details]]))
if details > 3:
for i in range(details-3):
for j in range(details):
faces.append([s.circle_list[i].vertices[j], s.circle_list[i].vertices[(j+1)%details], s.circle_list[i+1].vertices[j]])
faces.append([s.circle_list[i].vertices[(j+1)%details], s.circle_list[i+1].vertices[j], s.circle_list[i+1].vertices[(j+1)%details]])
f = np.array(faces)
sphere = mesh.Mesh(np.zeros(f.shape[0], dtype=mesh.Mesh.dtype))
for i in range(f.shape[0]):
sphere.vectors[i] = faces[i]
sphere.save('sphere.stl')
if __name__ == '__main__':
make_sphere([0,0,0], 1)
``` |
{
"source": "jkylling/fdb-kubernetes-operator",
"score": 3
} |
#### File: sample-apps/data-loader/app.py
```python
import argparse
import random
import uuid
import fdb
fdb.api_version(600)
@fdb.transactional
def write_batch(tr, batch_size, value_size):
prefix = uuid.uuid4()
for index in range(1, batch_size+1):
key = fdb.tuple.pack((prefix, index))
value = []
for _ in range(0, value_size):
value.append(random.randint(0, 255))
tr[key] = bytes(value)
pass
def load_data(keys, batch_size, value_size):
batch_count = int(keys / batch_size)
db = fdb.open()
for batch in range(1, batch_count+1):
print('Writing batch %d' % batch)
write_batch(db, batch_size, value_size)
pass
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Load random data into FDB")
parser.add_argument('--keys', type=int, help='Number of keys to generate', default=100000)
parser.add_argument('--batch-size', type=int, help='Number of keys to write in each transaction', default=10)
parser.add_argument('--value-size', type=int, help='Number of bytes to include in each value', default=1000)
args = parser.parse_args()
load_data(args.keys, args.batch_size, args.value_size)
``` |
{
"source": "j-kyoda/tbldifconv",
"score": 3
} |
#### File: j-kyoda/tbldifconv/tbldifconv.py
```python
import argparse
import base64
import re
def parse_entry(lines):
"""Read lines and make entry object
Arguments:
lines -- entry lines
Returns:
entry object
"""
entry = {}
for line in lines:
line = line.replace('\n', '').replace('\r', '')
if ':: ' in line:
(key, value) = line.split(':: ')
value = base64.b64decode(value).decode('utf-8')
elif ': ' in line:
(key, value) = line.split(': ')
else:
continue
if key not in entry:
entry[key] = []
entry[key].append(value)
return entry
def adjust_entry_ldap(entry, base_path):
"""Adjust entry for LDAP
Arguments:
entry -- entry object
base_path -- ldap base path
Returns:
Nothing.
"""
if 'mail' not in entry:
return
mail = entry['mail'][0]
# remove attribute if exist
rm_keys = ['modifytimestamp', 'birthyear', 'birthday']
for rm_key in rm_keys:
if rm_key in entry:
del entry[rm_key]
# append attribute
if 'cn' not in entry:
entry['cn'] = [mail]
if 'sn' not in entry:
entry['sn'] = [mail]
# replace attribute
if 'dn' in entry:
entry['dn'] = [f'mail={mail},{base_path}']
def adjust_entry_thunderbird(entry):
"""Adjust entry for Thunderbird
Arguments:
entry -- entry object
Returns:
Nothing.
"""
if 'mail' not in entry:
return
mail = entry['mail'][0]
# append
if 'modifytimestamp' not in entry:
entry['modifytimestamp'] = ['0']
# remove
if 'cn' in entry:
if entry['cn'] == mail:
del entry['cn']
if 'sn' in entry:
if entry['sn'] == mail:
del entry['sn']
# replace attribute
if 'dn' in entry:
if 'cn' in entry:
cn = entry['cn'][0]
entry['dn'] = [f"cn={cn},mail={mail}"]
else:
entry['dn'] = [f'mail={mail}']
def dump_entry_for_ldap(entry):
"""Dump entry for LDAP ldif
Arguments:
entry -- entry object
Returns:
Nothing.
"""
# formatting
lines = []
for (key, values) in entry.items():
for value in values:
line = f'{key}: {value}'
lines.append(line)
lines.append('')
# output
for line in lines:
print(line)
def dump_entry_for_thunderbird(entry):
"""Dump entry for Thunderbird ldif
Arguments:
entry -- entry object
Returns:
Nothing.
"""
# formatting
reg = re.compile('^[-\w\d\s\.@?]+$', flags=re.ASCII)
lines = []
for (key, values) in entry.items():
for value in values:
need_escape = False
if key != 'dn':
if not reg.match(value):
need_escape = True
else:
if 'cn' in entry:
cn = entry['cn'][0]
if not reg.match(cn):
need_escape = True
if need_escape:
b = value.encode('utf-8')
value_ = base64.b64encode(b).decode('utf-8')
line = f'{key}:: {value_}'
else:
line = f'{key}: {value}'
lines.append(line)
lines.append('')
# output
for line in lines:
print(line)
def convert(fobj, base_path=''):
"""Convert ldif
Arguments:
fobj -- Thunderbird ldif file object
base_path -- ldap base path(when convert to LDAP ldif)
Returns:
Nothing.
"""
lines = []
for line in fobj:
line = line.replace('\n', '').replace('\r', '')
if line:
lines.append(line)
continue
# convert and dump
someone = parse_entry(lines)
if base_path:
# Thunderbird -> LDAP
adjust_entry_ldap(someone, base_path)
dump_entry_for_ldap(someone)
else:
# LDAP -> Thunderbird
adjust_entry_thunderbird(someone)
dump_entry_for_thunderbird(someone)
lines = []
def main():
"""Main routine
Parse arguments and call subrouteine.
"""
parser = argparse.ArgumentParser(
description='Convert Thunderbird address ldif to your LDAP ldif,'
' or the reverse.')
parser.add_argument('-b',
metavar='BASE_PATH',
dest='base_path',
default='',
help='ldap base path')
parser.add_argument('-f',
metavar='FILE',
dest='fname',
type=argparse.FileType(),
required=True,
help='ldif file')
args = parser.parse_args()
convert(args.fname, args.base_path)
if __name__ == '__main__':
main()
``` |
{
"source": "j-kyoda/vcfldifconv",
"score": 3
} |
#### File: j-kyoda/vcfldifconv/vcfconv.py
```python
import argparse
import base64
import quopri
def get_line(f):
"""Get logically line which separated physically
Arguments:
f -- file object
Returns:
Yield logically line
"""
chain_next = False # QUOTED-PRINTABLE
chain_now = False # chain flag
chain_previous = False # BASE64
chain_stop = False # BASE64
chanks = []
for chank in f:
chank = chank.replace('\n', '').replace('\r', '')
chain_next = False
if chank.endswith('='):
chank = chank[:-1]
chain_next = True
chain_previous = False
if chank.startswith(' '):
chank = chank[1:]
chain_previous = True
chain_stop = False
if chank is '':
chain_stop = True
if chanks and not chain_now and not chain_previous:
line = ''.join(chanks)
chanks = []
yield line
if chain_next:
# chain next
chanks.append(chank)
chain_now = True
continue
if chain_previous:
# start chain
chanks.append(chank)
chain_now = True
continue
if chain_stop:
# stop chain
line = ''.join(chanks)
chanks = []
chain_now = False
yield line
continue
if chanks and chain_now is True:
# no more chain next
chanks.append(chank)
line = ''.join(chanks)
chanks = []
chain_now = False
yield line
continue
if chanks:
# no chain
line = ''.join(chanks)
chanks = []
yield line
chanks.append(chank)
continue
if chanks:
# no chain
line = ''.join(chanks)
chanks = []
yield line
def parse_tag(tag):
"""parse vCard tag
Arguments:
tag -- vCard tag
Returns:
Return tag dictonary.
"""
head = {}
for (idx, name) in enumerate(tag.split(';')):
if idx == 0:
head['name'] = name
continue
if '=' in name:
(tag, val) = name.split('=', maxsplit=1)
head[tag] = val
continue
if 'TYPE' not in head:
head['TYPE'] = []
head['TYPE'].append(name)
return head
def parse_line(line):
"""parse vCard line
Arguments:
line -- vCard line
Returns:
Return tuple (tag dictonaly, value).
"""
(tag, value) = line.split(':', maxsplit=1)
head = parse_tag(tag)
if 'ENCODING' in head:
if head['ENCODING'] == 'QUOTED-PRINTABLE':
value = quopri.decodestring(value)
elif head['ENCODING'] == 'BASE64':
value = base64.b64decode(value)
if 'CHARSET' in head:
enc = head['CHARSET']
value = value.decode(enc)
return (head, value)
def parse_entry(lines):
"""Make entry from vCard lines
Arguments:
lines -- vCard lines
Returns:
Yield entry object.
"""
entry = {'mail': []}
for line in lines:
t = line.split(':', maxsplit=1)
if len(t) != 2:
continue
# analyze line
(head, value) = parse_line(line)
name = head['name']
# givenName
# sn
if name == 'N':
(val1, val2, dummy) = value.split(';', maxsplit=2)
if val1:
entry['sn'] = val1
if val2:
entry['givenName'] = val2
continue
# cn
if name == 'FN':
entry['cn'] = value
continue
# mail
if name == 'EMAIL':
entry['mail'].append(value)
continue
# description
if name == 'NOTE':
entry['description'] = value
continue
# tel
if name == 'TEL':
if 'CELL'in head['TYPE']:
entry['mobile'] = value
continue
elif 'HOME' in head['TYPE']:
entry['homePhone'] = value
continue
elif 'WORK' in head['TYPE']:
entry['telephoneNumber'] = value
continue
else:
entry['homePhone'] = value
continue
if name == 'ADR':
if 'HOME' in head['TYPE']:
entry['homePostalAddress'] = value.replace(';', ' ').strip()
continue
elif 'WORK' in head['TYPE']:
entry['mozillaWorkStreet2'] = value.replace(';', ' ').strip()
continue
else:
entry['homePostalAddress'] = value.replace(';', ' ').strip()
continue
return entry
def split_entry(fobj):
"""Read entry and return ldif object
Arguments:
fobj -- vcf file object
Returns:
Yield vCard entry
"""
flag = False
lines = []
for line in get_line(fobj):
if 'BEGIN:VCARD' in line:
flag = True
lines = []
if flag:
lines.append(line)
if 'END:VCARD' in line:
flag = False
yield parse_entry(lines)
def dump_person(entry, base_path):
"""Dump person
Arguments:
entry -- entry
base_path -- ldap base path
Returns:
Nothing.
"""
lines = []
for mail in entry['mail']:
lines.append(f'dn: mail={mail},{base_path}')
lines.append('objectclass: top')
lines.append('objectclass: person')
lines.append('objectclass: organizationalPerson')
lines.append('objectclass: inetOrgPerson')
lines.append('objectclass: mozillaAbPersonAlpha')
lines.append(f'mail: {mail}')
for tag in ['givenName', 'sn', 'cn',
'mobile', 'homePhone', 'telephoneNumber',
'homePostalAddress', 'mozillaWorkStreet2']:
if tag in entry:
lines.append(f'{tag}: {entry[tag]}')
lines.append('')
print('\n'.join(lines))
def convert(fobj, base_path=''):
"""Convert vCard to ldif
Arguments:
fobj -- vcf file object
base_path -- ldap base path
Returns:
Nothing.
"""
for person in split_entry(fobj):
if len(person['mail']) > 0:
dump_person(person, base_path)
def main():
"""Main routine
Parse arguments and call subroutine
"""
parser = argparse.ArgumentParser(
description='Convert Thunderbird address ldif to your LDAP ldif,'
' or the reverse.')
parser.add_argument('-b',
metavar='BASE_PATH',
dest='base_path',
default='',
help='ldap base path')
parser.add_argument('-f',
metavar='FILE',
dest='fname',
type=argparse.FileType(),
required=True,
help='VCF file')
args = parser.parse_args()
convert(args.fname, args.base_path)
if __name__ == '__main__':
main()
``` |
{
"source": "jkyrouac/ACT",
"score": 2
} |
#### File: act/tests/test_clean.py
```python
import act
def test_clean():
ceil_ds = act.io.armfiles.read_netcdf([act.tests.EXAMPLE_CEIL1])
ceil_ds.clean.cleanup(clean_arm_state_vars=['detection_status'])
# Check that global attribures are removed
global_attributes = ['qc_bit_comment',
'qc_bit_1_description',
'qc_bit_1_assessment',
'qc_bit_2_description',
'qc_bit_2_assessment'
'qc_bit_3_description',
'qc_bit_3_assessment',
'qc_bit_4_description',
'qc_bit_4_assessment']
for glb_att in global_attributes:
assert glb_att not in ceil_ds.attrs.keys()
# Check that CF attributes are set including new flag_assessments
assert 'flag_masks' in ceil_ds['qc_first_cbh'].attrs.keys()
assert 'flag_meanings' in ceil_ds['qc_first_cbh'].attrs.keys()
assert 'flag_assessments' in ceil_ds['qc_first_cbh'].attrs.keys()
# Check the value of flag_assessments is as expected
assert (ceil_ds['qc_first_cbh'].attrs['flag_assessments'] ==
['Bad', 'Bad', 'Bad'])
# Check that ancillary varibles is being added
assert 'qc_first_cbh' in ceil_ds['first_cbh'].attrs['ancillary_variables'].split()
# Check that state field is updated to CF
assert 'flag_values' in ceil_ds['detection_status'].attrs.keys()
assert 'flag_meanings' in ceil_ds['detection_status'].attrs.keys()
assert 'detection_status' in ceil_ds['first_cbh'].attrs['ancillary_variables'].split()
ceil_ds.close()
```
#### File: act/tests/test_plotting.py
```python
import matplotlib
matplotlib.use('Agg')
import act.io.armfiles as arm
import act.discovery.get_files as get_data
import act.tests.sample_files as sample_files
import act.corrections.ceil as ceil
import pytest
import glob
import matplotlib.pyplot as plt
import os
import boto3
from act.plotting import TimeSeriesDisplay
from botocore.handlers import disable_signing
@pytest.mark.mpl_image_compare(tolerance=30)
def test_plot():
# Process MET data to get simple LCL
files = sample_files.EXAMPLE_MET_WILDCARD
met = arm.read_netcdf(files)
met_temp = met.temp_mean
met_rh = met.rh_mean
met_lcl = (20. + met_temp / 5.) * (100. - met_rh) / 1000.
met['met_lcl'] = met_lcl * 1000.
met['met_lcl'].attrs['units'] = 'm'
met['met_lcl'].attrs['long_name'] = 'LCL Calculated from SGP MET E13'
# Plot data
# Plot data
display = TimeSeriesDisplay(met)
display.add_subplots((3,), figsize=(15, 10))
display.plot('wspd_vec_mean', subplot_index=(0, ))
display.plot('temp_mean', subplot_index=(1, ))
display.plot('rh_mean', subplot_index=(2, ))
return display.fig
@pytest.mark.mpl_image_compare(tolerance=30)
def test_multidataset_plot_tuple():
conn = boto3.resource('s3')
conn.meta.client.meta.events.register('choose-signer.s3.*', disable_signing)
bucket = conn.Bucket('act-tests')
if not os.path.isdir((os.getcwd() + '/data/')):
os.makedirs((os.getcwd() + '/data/'))
for item in bucket.objects.all():
bucket.download_file(item.key, (os.getcwd() + '/data/' + item.key))
ceil_ds = arm.read_netcdf('data/sgpceilC1.b1*')
sonde_ds = arm.read_netcdf(
sample_files.EXAMPLE_MET_WILDCARD)
ceil_ds = ceil.correct_ceil(ceil_ds, -9999.)
# You can use tuples if the datasets in the tuple contain a
# datastream attribute. This is required in all ARM datasets.
display = TimeSeriesDisplay(
(ceil_ds, sonde_ds), subplot_shape=(2,), figsize=(15, 10))
display.plot('backscatter', 'sgpceilC1.b1', subplot_index=(0,))
display.plot('temp_mean', 'sgpmetE13.b1', subplot_index=(1,))
display.day_night_background('sgpmetE13.b1', subplot_index=(1,))
plt.show()
return display.fig
@pytest.mark.mpl_image_compare(tolerance=30)
def test_multidataset_plot_dict():
conn = boto3.resource('s3')
conn.meta.client.meta.events.register('choose-signer.s3.*', disable_signing)
bucket = conn.Bucket('act-tests')
if not os.path.isdir((os.getcwd() + '/data/')):
os.makedirs((os.getcwd() + '/data/'))
for item in bucket.objects.all():
bucket.download_file(item.key, (os.getcwd() + '/data/' + item.key))
ceil_ds = arm.read_netcdf('data/sgpceilC1.b1*')
sonde_ds = arm.read_netcdf(
sample_files.EXAMPLE_MET_WILDCARD)
ceil_ds = ceil.correct_ceil(ceil_ds, -9999.)
display = TimeSeriesDisplay(
{'ceiliometer': ceil_ds, 'rawinsonde': sonde_ds},
subplot_shape=(2,), figsize=(15, 10))
display.plot('backscatter', 'ceiliometer', subplot_index=(0,))
display.plot('temp_mean', 'rawinsonde', subplot_index=(1,))
display.day_night_background('rawinsonde', subplot_index=(1,))
plt.show()
return display.fig
```
#### File: act/tests/test_utils.py
```python
import act
import xarray as xr
import numpy as np
from datetime import datetime
def test_dates_between():
start_date = '20190101'
end_date = '20190110'
date_list = act.utils.datetime_utils.dates_between(start_date, end_date)
answer = [datetime(2019, 1, 1),
datetime(2019, 1, 2),
datetime(2019, 1, 3),
datetime(2019, 1, 4),
datetime(2019, 1, 5),
datetime(2019, 1, 6),
datetime(2019, 1, 7),
datetime(2019, 1, 8),
datetime(2019, 1, 9),
datetime(2019, 1, 10)]
assert date_list == answer
def add_in_nan():
# Make a 1D array with a 4 day gap in the data
time_list = [np.datetime64(datetime(2019, 1, 1)),
np.datetime64(datetime(2019, 1, 2)),
np.datetime64(datetime(2019, 1, 3)),
np.datetime64(datetime(2019, 1, 4)),
np.datetime64(datetime(2019, 1, 9))]
data = np.linspace(0, 8, 5)
time_list = xr.DataArray(time_list)
data = xr.DataArray(data)
data_filled, time_filled = act.utils.data_utils.add_in_nan(
time_list, data)
assert(data_filled.data == np.array([0, 2, 4, 6,
np.nan, np.nan, np.nan, np.nan,
8]))
time_answer = [datetime(2019, 1, 1),
datetime(2019, 1, 2),
datetime(2019, 1, 3),
datetime(2019, 1, 4),
datetime(2019, 1, 5),
datetime(2019, 1, 6),
datetime(2019, 1, 7),
datetime(2019, 1, 8),
datetime(2019, 1, 9)]
assert(time_list == time_answer)
``` |
{
"source": "J-Kyu/Python-Log",
"score": 2
} |
#### File: Python-Log/source/_defaults.py
```python
PLOG_HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKCYAN = '\033[96m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
BLACK = '\033[30m'
RED = '\033[31m'
GREEN = '\033[32m'
YELLOW = '\033[33m'
MAGENTA = '\033[35m'
WHITE = '\033[37m'
# format
PLOG_FORMAT_MSG = '[%(asctime)s ]%(_module)-12s|%(levelname)-8s| %(message)s'
PLOG_FORMAT_DATE = '%Y-%m-%d %H:%M:%S'
# level
PLOG_CRITICAL_NO = 50 # 50
PLOG_CRITICAL_ICON = u'\U0001F480'
PLOG_ERROR_NO = 40 # 40
PLOG_ERROR_ICON = u'\U0001274C'
PLOG_WARNING_NO = 30 # 30
PLOG_WARNING_ICON = u'\U000026A0'
PLOG_INFO_NO = 20 # 20
PLOG_INFO_ICON = u'\U00002139'
PLOG_DEBUG_NO = 10 # 10
PLOG_DEBUG_ICON = u'\U0001F41E'
PLOG_NOTSET_NO = 0 #0
PLOG_NOTSET_ICON = u'\U0001F47D'
PLOG_LEVEL_DICT = {'critical':PLOG_CRITICAL_NO, 'error': PLOG_ERROR_NO, 'warning':PLOG_WARNING_NO, 'info':PLOG_INFO_NO,'debug':PLOG_DEBUG_NO ,'none':PLOG_NOTSET_NO}
# unit
PLOG_UNIT_SIZE = ['B','KB','MB','GB']
PLOG_UNIT_TIME = ['SECOND','MINUTE','HOUR','DAY','WEEK',':']
def CheckParameters(**kwargs):
# filename
if 'filename' in kwargs:
filename = kwargs['filename']
else:
filename = 'log.log'
# set format of logger
if 'format' in kwargs:
format = kwargs['format']
else:
format = PLOG_FORMAT_MSG
if 'datefmt' in kwargs:
datefmt = kwargs['datefmt']
else:
datefmt =PLOG_FORMAT_DATE
if 'filter' in kwargs:
filter = kwargs['filter']
else:
filter = (lambda record : True)
# level
if 'level' in kwargs:
if kwargs['level'] in PLOG_LEVEL_DICT:
level = PLOG_LEVEL_DICT[kwargs['level']]
elif isinstance(kwargs['level'],int):
level = kwargs['level']
else:
raise Exception('WrongLevelInput')
return filename, format, datefmt, level, filter
```
#### File: Python-Log/source/_PAbsFileHandler.py
```python
class PAbsFileHandler:
def __init__(self):
self.fileHandler = None
def GetFileHandler(self):
return self.fileHandler
``` |
{
"source": "jkznst/CenterNet",
"score": 2
} |
#### File: src/lib/oss_logger.py
```python
import logging
import os
import oss2
import sys
from utils.oss_tools import OSS_Bucket
def setup_logger(name, save_dir, distributed_rank):
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
# don't log results for the non-master process
if distributed_rank > 0:
return logger
ch = logging.StreamHandler(stream=sys.stdout)
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(asctime)s %(name)s %(levelname)s: %(message)s")
ch.setFormatter(formatter)
logger.addHandler(ch)
if save_dir:
if OSS_Bucket.oss_bucket:
fh = OSSLoggingHandler(os.path.join(save_dir, "log.txt"))
formatter = logging.Formatter("%(asctime)s %(name)s %(levelname)s: %(message)s\n")
else:
fh = logging.FileHandler(os.path.join(save_dir, "log.txt"), mode='w')
fh.setLevel(logging.DEBUG)
fh.setFormatter(formatter)
logger.addHandler(fh)
return logger
class OSSLoggingHandler(logging.StreamHandler):
def __init__(self, log_file):
super(OSSLoggingHandler, self).__init__()
self._bucket = OSS_Bucket.bucket
self._log_file = log_file
if self._bucket.object_exists(self._log_file):
self._bucket.delete_object(self._log_file)
# raise ValueError('log file {} exists, Please check!'.format(self._log_file))
self._pos = self._bucket.append_object(self._log_file, 0, '')
def emit(self, record):
msg = self.format(record)
try:
self._pos = self._bucket.append_object(self._log_file, self._pos.next_position, msg)
except Exception as e:
print(e)
if isinstance(e, oss2.exceptions.PositionNotEqualToLength):
raise ValueError('log file [{}] has changed, Please check!'.format(self._log_file))
class RedirectStdout(object):
def __init__(self, save_dir): # 保存标准输出流
self.fname = os.path.join(save_dir, 'stdout.txt')
self.file = None
self.out = sys.stdout
self.err = sys.stderr
def start(self): # 标准输出重定向至文件
self.file = open(self.fname, 'w', encoding='utf-8')
sys.stderr = self.file
sys.stdout = self.file
def end(self): # 恢复标准输出流
if self.file:
sys.stdout = self.out
sys.stderr = self.err
self.file.close()
if __name__ == '__main__':
OSS_Bucket.set(True)
logger = setup_logger('test_logger', 'niding-nd', 0)
for i in range(100):
logger.info(','.join(['CCC']*i))
``` |
{
"source": "jkznst/detectron2",
"score": 2
} |
#### File: SixDPose/sixdpose/crpnet.py
```python
import math
import numpy as np
from typing import List, Tuple
import torch
from fvcore.nn import sigmoid_focal_loss_jit, smooth_l1_loss
from torch import nn
from torch.nn import functional as F
from detectron2.data.detection_utils import convert_image_to_rgb
from detectron2.layers import ShapeSpec, batched_nms, cat
from detectron2.structures import Boxes, ImageList, Instances, pairwise_iou
from detectron2.utils.events import get_event_storage
from detectron2.modeling import META_ARCH_REGISTRY, detector_postprocess, build_backbone, build_anchor_generator
from detectron2.modeling.box_regression import Box2BoxTransform
from detectron2.modeling.matcher import Matcher
__all__ = ["CRPNet"]
def permute_to_N_HWA_K(tensor, K):
"""
Transpose/reshape a tensor from (N, (A x K), H, W) to (N, (HxWxA), K)
"""
assert tensor.dim() == 4, tensor.shape
N, _, H, W = tensor.shape
tensor = tensor.view(N, -1, K, H, W)
tensor = tensor.permute(0, 3, 4, 1, 2)
tensor = tensor.reshape(N, -1, K) # Size=(N,HWA,K)
return tensor
def permute_all_cls_and_box_to_N_HWA_K_and_concat(box_cls, box_delta, kpt_delta,
num_classes=80, num_kpt=17):
"""
Rearrange the tensor layout from the network output, i.e.:
list[Tensor]: #lvl tensors of shape (N, A x K, Hi, Wi)
to per-image predictions, i.e.:
Tensor: of shape (N x sum(Hi x Wi x A), K)
"""
# for each feature level, permute the outputs to make them be in the
# same format as the labels. Note that the labels are computed for
# all feature levels concatenated, so we keep the same representation
# for the objectness and the box_delta and the kpt_delta
box_cls_flattened = [permute_to_N_HWA_K(x, num_classes) for x in box_cls]
box_delta_flattened = [permute_to_N_HWA_K(x, 4) for x in box_delta]
kpt_delta_flattened = [permute_to_N_HWA_K(x, num_kpt * 2) for x in kpt_delta]
# concatenate on the first dimension (representing the feature levels), to
# take into account the way the labels were generated (with all feature maps
# being concatenated as well)
box_cls = cat(box_cls_flattened, dim=1).view(-1, num_classes)
box_delta = cat(box_delta_flattened, dim=1).view(-1, 4)
kpt_delta = cat(kpt_delta_flattened, dim=1).view(-1, num_kpt * 2)
return box_cls, box_delta, kpt_delta
# Value for clamping large dw and dh predictions. The heuristic is that we clamp
# such that dw and dh are no larger than what would transform a 16px box into a
# 1000px box (based on a small anchor, 16px, and a typical image size, 1000px).
_DEFAULT_SCALE_CLAMP = math.log(1000.0 / 16)
@torch.jit.script
class Box2KptTransform(object):
"""
The box-to-kpt transform defined in CRPNet. The transformation is parameterized
by 2 deltas: (dx, dy). The transformation scales the box's width and height
by exp(dw), exp(dh) and shifts a box's center by the offset (dx * width, dy * height).
"""
def __init__(
self, weights: Tuple[float, float, float, float], scale_clamp: float = _DEFAULT_SCALE_CLAMP
):
"""
Args:
weights (4-element tuple): Scaling factors that are applied to the
(dx, dy, dw, dh) deltas. In Fast R-CNN, these were originally set
such that the deltas have unit variance; now they are treated as
hyperparameters of the system.
scale_clamp (float): When predicting deltas, the predicted box scaling
factors (dw and dh) are clamped such that they are <= scale_clamp.
"""
self.weights = weights
self.scale_clamp = scale_clamp
def get_deltas(self, src_boxes, target_kpts):
"""
Get box regression transformation deltas (dx, dy, dw, dh) that can be used
to transform the `src_boxes` into the `target_kpts`. That is, the relation
``target_kpts == self.apply_deltas(deltas, src_boxes)`` is true (unless
any delta is too large and is clamped).
Args:
src_boxes (Tensor): source boxes, e.g., object proposals, shape (N, 4)
target_kpts (Tensor): target of the transformation, e.g., ground-truth
keypoints. shape (N, K, 3)
"""
assert isinstance(src_boxes, torch.Tensor), type(src_boxes)
assert isinstance(target_kpts, torch.Tensor), type(target_kpts)
# print(src_boxes[0:10])
# print(target_kpts[0:10])
# print(src_boxes.device)
# print(target_kpts.device)
src_widths = src_boxes[:, 2] - src_boxes[:, 0]
src_heights = src_boxes[:, 3] - src_boxes[:, 1]
src_ctr_x = src_boxes[:, 0] + 0.5 * src_widths
src_ctr_y = src_boxes[:, 1] + 0.5 * src_heights
# target_widths = target_kpts[:, 2] - target_kpts[:, 0]
# target_heights = target_kpts[:, 3] - target_kpts[:, 1]
target_kpt_x = target_kpts[..., 0]
target_kpt_y = target_kpts[..., 1]
wx, wy, ww, wh = self.weights
# print(self.weights)
# print(target_kpt_x[0])
# print(src_ctr_x[0])
# print(src_widths[0])
# print(target_kpt_x.shape)
# print(src_ctr_x.shape)
dx = wx * (target_kpt_x - src_ctr_x[:, None]) / src_widths[:, None]
dy = wy * (target_kpt_y - src_ctr_y[:, None]) / src_heights[:, None]
# dw = ww * torch.log(target_widths / src_widths)
# dh = wh * torch.log(target_heights / src_heights)
deltas = torch.stack((dx, dy), dim=2).reshape(target_kpts.size(0), -1) # shape (N, K x 2)
# print(deltas.size())
assert (src_widths > 0).all().item(), "Input boxes to Box2KptTransform are not valid!"
return deltas
def apply_deltas(self, deltas, boxes):
"""
Apply transformation `deltas` (dx, dy) to `boxes`.
Args:
deltas (Tensor): transformation deltas of shape (N, A*k*2), where A >= 1.
deltas[i] represents A potentially different class-specific
box transformations for the single box boxes[i].
boxes (Tensor): boxes to transform, of shape (N, 4)
return:
keypoints of shape (N, K, 3)
"""
boxes = boxes.to(deltas.dtype)
# print(deltas.size())
# print(boxes.size())
widths = boxes[:, 2] - boxes[:, 0]
heights = boxes[:, 3] - boxes[:, 1]
ctr_x = boxes[:, 0] + 0.5 * widths
ctr_y = boxes[:, 1] + 0.5 * heights
wx, wy, ww, wh = self.weights
dx = deltas[:, 0::2] / wx
dy = deltas[:, 1::2] / wy
# dw = deltas[:, 2::4] / ww
# dh = deltas[:, 3::4] / wh
# Prevent sending too large values into torch.exp()
# dw = torch.clamp(dw, max=self.scale_clamp)
# dh = torch.clamp(dh, max=self.scale_clamp)
pred_kpt_x = dx * widths[:, None] + ctr_x[:, None]
pred_kpt_y = dy * heights[:, None] + ctr_y[:, None]
# pred_w = torch.exp(dw) * widths[:, None]
# pred_h = torch.exp(dh) * heights[:, None]
kpt_score = torch.ones((deltas.size(0), deltas.size(1) // 2), device=deltas.device)
pred_kpts = torch.stack((pred_kpt_x, pred_kpt_y, kpt_score), dim=2)
return pred_kpts
@META_ARCH_REGISTRY.register()
class CRPNet(nn.Module):
"""
Implement CRPNet.
"""
def __init__(self, cfg):
super().__init__()
# fmt: off
self.num_classes = cfg.MODEL.CRPNET.NUM_CLASSES
self.in_features = cfg.MODEL.CRPNET.IN_FEATURES
self.num_kpt = cfg.MODEL.CRPNET.NUM_KEYPOINTS
self.cascade_regression = cfg.MODEL.CRPNET.CASCADE_REGRESSION
# Loss parameters:
self.focal_loss_alpha = cfg.MODEL.CRPNET.FOCAL_LOSS_ALPHA
self.focal_loss_gamma = cfg.MODEL.CRPNET.FOCAL_LOSS_GAMMA
self.smooth_l1_loss_beta = cfg.MODEL.CRPNET.SMOOTH_L1_LOSS_BETA
self.kpt_loss_weight = cfg.MODEL.CRPNET.KPT_WEIGHT
# Inference parameters:
self.score_threshold = cfg.MODEL.CRPNET.SCORE_THRESH_TEST
self.topk_candidates = cfg.MODEL.CRPNET.TOPK_CANDIDATES_TEST
self.nms_threshold = cfg.MODEL.CRPNET.NMS_THRESH_TEST
self.max_detections_per_image = cfg.TEST.DETECTIONS_PER_IMAGE
# Vis parameters
self.vis_period = cfg.VIS_PERIOD
self.input_format = cfg.INPUT.FORMAT
# fmt: on
self.backbone = build_backbone(cfg)
backbone_shape = self.backbone.output_shape()
feature_shapes = [backbone_shape[f] for f in self.in_features]
self.head = CRPNetHead(cfg, feature_shapes)
self.anchor_generator = build_anchor_generator(cfg, feature_shapes)
# Matching and loss
self.box2box_transform = Box2BoxTransform(weights=cfg.MODEL.CRPNET.BBOX_REG_WEIGHTS)
self.box2kpt_transform = Box2KptTransform(weights=cfg.MODEL.CRPNET.BBOX_REG_WEIGHTS)
self.anchor_matcher = Matcher(
cfg.MODEL.CRPNET.IOU_THRESHOLDS,
cfg.MODEL.CRPNET.IOU_LABELS,
allow_low_quality_matches=True,
)
self.register_buffer("pixel_mean", torch.Tensor(cfg.MODEL.PIXEL_MEAN).view(-1, 1, 1))
self.register_buffer("pixel_std", torch.Tensor(cfg.MODEL.PIXEL_STD).view(-1, 1, 1))
"""
In Detectron1, loss is normalized by number of foreground samples in the batch.
When batch size is 1 per GPU, #foreground has a large variance and
using it lead to lower performance. Here we maintain an EMA of #foreground to
stabilize the normalizer.
"""
self.loss_normalizer = 100 # initialize with any reasonable #fg that's not too small
self.loss_normalizer_momentum = 0.9
@property
def device(self):
return self.pixel_mean.device
def visualize_training(self, batched_inputs, results):
"""
A function used to visualize ground truth images and final network predictions.
It shows ground truth bounding boxes on the original image and up to 20
predicted object bounding boxes on the original image.
Args:
batched_inputs (list): a list that contains input to the model.
results (List[Instances]): a list of #images elements.
"""
from detectron2.utils.visualizer import Visualizer
assert len(batched_inputs) == len(
results
), "Cannot visualize inputs and results of different sizes"
storage = get_event_storage()
max_boxes = 20
image_index = 0 # only visualize a single image
img = batched_inputs[image_index]["image"]
img = convert_image_to_rgb(img.permute(1, 2, 0), self.input_format)
v_gt = Visualizer(img, None)
v_gt = v_gt.overlay_instances(boxes=batched_inputs[image_index]["instances"].gt_boxes)
anno_img = v_gt.get_image()
processed_results = detector_postprocess(results[image_index], img.shape[0], img.shape[1])
predicted_boxes = processed_results.pred_boxes.tensor.detach().cpu().numpy()
v_pred = Visualizer(img, None)
v_pred = v_pred.overlay_instances(boxes=predicted_boxes[0:max_boxes])
prop_img = v_pred.get_image()
vis_img = np.vstack((anno_img, prop_img))
vis_img = vis_img.transpose(2, 0, 1)
vis_name = f"Top: GT bounding boxes; Bottom: {max_boxes} Highest Scoring Results"
storage.put_image(vis_name, vis_img)
def forward(self, batched_inputs):
"""
Args:
batched_inputs: a list, batched outputs of :class:`DatasetMapper` .
Each item in the list contains the inputs for one image.
For now, each item in the list is a dict that contains:
* image: Tensor, image in (C, H, W) format.
* instances: Instances
Other information that's included in the original dicts, such as:
* "height", "width" (int): the output resolution of the model, used in inference.
See :meth:`postprocess` for details.
Returns:
dict[str: Tensor]:
mapping from a named loss to a tensor storing the loss. Used during training only.
"""
images = self.preprocess_image(batched_inputs)
features = self.backbone(images.tensor)
features = [features[f] for f in self.in_features]
# print(features[0].size())
anchors = self.anchor_generator(features)
# print(anchors[0].tensor.size())
pred_logits, pred_anchor_deltas, pred_kpt_deltas = self.head(features)
# Transpose the Hi*Wi*A dimension to the middle:
pred_logits = [permute_to_N_HWA_K(x, self.num_classes) for x in pred_logits]
pred_anchor_deltas = [permute_to_N_HWA_K(x, 4) for x in pred_anchor_deltas]
pred_kpt_deltas = [permute_to_N_HWA_K(x, self.num_kpt * 2) for x in pred_kpt_deltas]
if self.training:
assert "instances" in batched_inputs[0], "Instance annotations are missing in training!"
gt_instances = [x["instances"].to(self.device) for x in batched_inputs]
gt_labels, gt_boxes, gt_keypoints = self.label_anchors(anchors, gt_instances)
# print(gt_classes.size())
# print(gt_anchors_reg_deltas.size())
# print(gt_kpt_reg_deltas.size())
losses = self.losses(anchors, pred_logits, gt_labels, pred_anchor_deltas, gt_boxes,
pred_kpt_deltas, gt_keypoints)
if self.vis_period > 0:
storage = get_event_storage()
if storage.iter % self.vis_period == 0:
results = self.inference(
anchors, pred_logits, pred_anchor_deltas, pred_kpt_deltas, images.image_sizes
)
self.visualize_training(batched_inputs, results)
return losses
else:
results = self.inference(anchors, pred_logits, pred_anchor_deltas, pred_kpt_deltas, images.image_sizes)
processed_results = []
for results_per_image, input_per_image, image_size in zip(
results, batched_inputs, images.image_sizes
):
height = input_per_image.get("height", image_size[0])
width = input_per_image.get("width", image_size[1])
r = detector_postprocess(results_per_image, height, width)
processed_results.append({"instances": r})
return processed_results
def losses(self, anchors, pred_logits, gt_labels, pred_anchor_deltas, gt_boxes,
pred_kpt_deltas, gt_keypoints):
"""
Args:
anchors (list[Boxes]): a list of #feature level Boxes
gt_labels, gt_boxes, gt_keypoints: see output of :meth:`CRPNet.label_anchors`.
Their shapes are (N, R) and (N, R, 4) and (N, R, num_kpt x 2), respectively, where R is
the total number of anchors across levels, i.e. sum(Hi x Wi x Ai)
pred_logits, pred_anchor_deltas, pred_kpt_deltas: list[Tensor], one per level. Each
has shape (N, Hi * Wi * Ai, K or 4 or num_kpt x 2)
Returns:
dict[str: Tensor]:
mapping from a named loss to a scalar tensor
storing the loss. Used during training only. The dict keys are:
"loss_cls" and "loss_box_reg"
"""
num_images = len(gt_labels)
gt_labels = torch.stack(gt_labels) # (N, R)
anchors = type(anchors[0]).cat(anchors).tensor # (R, 4)
gt_anchor_deltas = [self.box2box_transform.get_deltas(anchors, k) for k in gt_boxes]
gt_anchor_deltas = torch.stack(gt_anchor_deltas) # (N, R, 4)
if self.cascade_regression:
predicted_boxes = [self.box2box_transform.apply_deltas(d, anchors) for d in cat(pred_anchor_deltas, dim=1)]
predicted_boxes = torch.stack(predicted_boxes)
# TODO: test if we should use gt bbox or pred bbox
# gt_kpt_deltas = [self.box2kpt_transform.get_deltas(b, k) for b, k in zip(gt_boxes, gt_keypoints)]
gt_kpt_deltas = [self.box2kpt_transform.get_deltas(b, k) for b, k in zip(predicted_boxes, gt_keypoints)]
# print(gt_kpt_reg_deltas_i[0])
# test_kpt = self.box2kpt_transform.apply_deltas(
# gt_kpt_reg_deltas_i, matched_gt_boxes.tensor
# )
# print(test_kpt[0])
else:
gt_kpt_deltas = [self.box2kpt_transform.get_deltas(anchors, k) for k in gt_keypoints]
gt_kpt_deltas = torch.stack(gt_kpt_deltas) # (N, R, num_kpt x 2)
valid_mask = gt_labels >= 0
pos_mask = (gt_labels >= 0) & (gt_labels != self.num_classes)
num_pos_anchors = pos_mask.sum().item()
get_event_storage().put_scalar("num_pos_anchors", num_pos_anchors / num_images)
self.loss_normalizer = self.loss_normalizer_momentum * self.loss_normalizer + (
1 - self.loss_normalizer_momentum
) * max(num_pos_anchors, 1)
# classification and regression loss
gt_labels_target = F.one_hot(gt_labels[valid_mask], num_classes=self.num_classes + 1)[
:, :-1
] # no loss for the last (background) class
loss_cls = sigmoid_focal_loss_jit(
cat(pred_logits, dim=1)[valid_mask],
gt_labels_target.to(pred_logits[0].dtype),
alpha=self.focal_loss_alpha,
gamma=self.focal_loss_gamma,
reduction="sum",
)
loss_box_reg = smooth_l1_loss(
cat(pred_anchor_deltas, dim=1)[pos_mask],
gt_anchor_deltas[pos_mask],
beta=self.smooth_l1_loss_beta,
reduction="sum",
)
loss_kpt_reg = smooth_l1_loss(
cat(pred_kpt_deltas, dim=1)[pos_mask],
gt_kpt_deltas[pos_mask],
beta=self.smooth_l1_loss_beta,
reduction="sum",
)
return {
"loss_cls": loss_cls / self.loss_normalizer,
"loss_box_reg": loss_box_reg / self.loss_normalizer,
"loss_kpt_reg": loss_kpt_reg / self.loss_normalizer / self.num_kpt * self.kpt_loss_weight,
}
@torch.no_grad()
def label_anchors(self, anchors, gt_instances):
"""
Args:
anchors (list[Boxes]): A list of #feature level Boxes.
The Boxes contains anchors of this image on the specific feature level.
gt_instances (list[Instances]): a list of N `Instances`s. The i-th
`Instances` contains the ground-truth per-instance annotations
for the i-th input image.
Returns:
gt_labels: list[Tensor]:
List of #img tensors. i-th element is a vector of labels whose length is
the total number of anchors across all feature maps (sum(Hi * Wi * A)).
Label values are in {-1, 0, ..., K}, with -1 means ignore, and K means background.
matched_gt_boxes (list[Tensor]):
i-th element is a Rx4 tensor, where R is the total number of anchors across
feature maps. The values are the matched gt boxes for each anchor.
Values are undefined for those anchors not labeled as foreground.
matched_gt_kpts (list[Tensor]):
i-th element is a Rx(num_kptx2) tensor, where R is the total number of anchors across
feature maps. The values are the matched gt keypoints for each anchor.
Values are undefined for those anchors not labeled as foreground.
"""
anchors = Boxes.cat(anchors) # Rx4
gt_labels = []
matched_gt_boxes = []
matched_gt_kpts = []
for gt_per_image in gt_instances:
match_quality_matrix = pairwise_iou(gt_per_image.gt_boxes, anchors)
matched_idxs, anchor_labels = self.anchor_matcher(match_quality_matrix)
del match_quality_matrix
if len(gt_per_image) > 0:
matched_gt_boxes_i = gt_per_image.gt_boxes.tensor[matched_idxs]
matched_gt_kpts_i = gt_per_image.gt_keypoints.tensor[matched_idxs]
gt_labels_i = gt_per_image.gt_classes[matched_idxs]
# Anchors with label 0 are treated as background.
gt_labels_i[anchor_labels == 0] = self.num_classes
# Anchors with label -1 are ignored.
gt_labels_i[anchor_labels == -1] = -1
else:
matched_gt_boxes_i = torch.zeros_like(anchors.tensor)
matched_gt_kpts_i = torch.zeros((anchors.tensor.size(0), self.num_kpt, 3), device=matched_gt_boxes_i.device)
gt_labels_i = torch.zeros_like(matched_idxs) + self.num_classes
gt_labels.append(gt_labels_i)
matched_gt_boxes.append(matched_gt_boxes_i)
matched_gt_kpts.append(matched_gt_kpts_i)
return gt_labels, matched_gt_boxes, matched_gt_kpts
def inference(self, anchors, pred_logits, pred_anchor_deltas, pred_kpt_deltas, image_sizes):
"""
Arguments:
anchors (list[Boxes]): A list of #feature level Boxes.
The Boxes contain anchors of this image on the specific feature level.
pred_logits, pred_anchor_deltas: list[Tensor], one per level. Each
has shape (N, Hi * Wi * Ai, K or 4)
image_sizes (List[torch.Size]): the input image sizes
Returns:
results (List[Instances]): a list of #images elements.
"""
results = []
for img_idx, image_size in enumerate(image_sizes):
pred_logits_per_image = [x[img_idx] for x in pred_logits]
bbox_deltas_per_image = [x[img_idx] for x in pred_anchor_deltas]
kpt_deltas_per_image = [x[img_idx] for x in pred_kpt_deltas]
results_per_image = self.inference_single_image(
anchors, pred_logits_per_image, bbox_deltas_per_image,
kpt_deltas_per_image, tuple(image_size)
)
results.append(results_per_image)
return results
def inference_single_image(self, anchors, box_cls, box_delta, kpt_delta, image_size):
"""
Single-image inference. Return bounding-box detection results by thresholding
on scores and applying non-maximum suppression (NMS).
Arguments:
box_cls (list[Tensor]): list of #feature levels. Each entry contains
tensor of size (H x W x A, C)
box_delta (list[Tensor]): Same shape as 'box_cls' except that C becomes 4.
kpt_delta (list[Tensor]): Same shape as 'box_delta' except that 4 becomes K x 2.
anchors (list[Boxes]): list of #feature levels. Each entry contains
a Boxes object, which contains all the anchors for that
image in that feature level.
image_size (tuple(H, W)): a tuple of the image height and width.
Returns:
Same as `inference`, but for only one image.
"""
boxes_all = []
kpts_all = []
scores_all = []
class_idxs_all = []
# Iterate over every feature level
for box_cls_i, box_reg_i, kpt_reg_i, anchors_i in zip(box_cls, box_delta, kpt_delta, anchors):
# (HxWxAxK,)
box_cls_i = box_cls_i.flatten().sigmoid_()
# Keep top k top scoring indices only.
num_topk = min(self.topk_candidates, box_reg_i.size(0))
# torch.sort is actually faster than .topk (at least on GPUs)
predicted_prob, topk_idxs = box_cls_i.sort(descending=True)
predicted_prob = predicted_prob[:num_topk]
topk_idxs = topk_idxs[:num_topk]
# filter out the proposals with low confidence score
keep_idxs = predicted_prob > self.score_threshold
predicted_prob = predicted_prob[keep_idxs]
topk_idxs = topk_idxs[keep_idxs]
anchor_idxs = topk_idxs // self.num_classes
classes_idxs = topk_idxs % self.num_classes
box_reg_i = box_reg_i[anchor_idxs]
kpt_reg_i = kpt_reg_i[anchor_idxs]
anchors_i = anchors_i[anchor_idxs]
# predict boxes
predicted_boxes = self.box2box_transform.apply_deltas(box_reg_i, anchors_i.tensor)
if self.cascade_regression:
predicted_kpts = self.box2kpt_transform.apply_deltas(kpt_reg_i, predicted_boxes)
else:
predicted_kpts = self.box2kpt_transform.apply_deltas(kpt_reg_i, anchors_i.tensor)
boxes_all.append(predicted_boxes)
kpts_all.append(predicted_kpts)
scores_all.append(predicted_prob)
class_idxs_all.append(classes_idxs)
boxes_all, kpts_all, scores_all, class_idxs_all = [
cat(x) for x in [boxes_all, kpts_all, scores_all, class_idxs_all]
]
keep = batched_nms(boxes_all, scores_all, class_idxs_all, self.nms_threshold)
keep = keep[: self.max_detections_per_image]
result = Instances(image_size)
result.pred_boxes = Boxes(boxes_all[keep])
result.scores = scores_all[keep]
result.pred_classes = class_idxs_all[keep]
result.pred_keypoints = kpts_all[keep]
return result
def preprocess_image(self, batched_inputs):
"""
Normalize, pad and batch the input images.
"""
images = [x["image"].to(self.device) for x in batched_inputs]
images = [(x - self.pixel_mean) / self.pixel_std for x in images]
images = ImageList.from_tensors(images, self.backbone.size_divisibility)
return images
class CRPNetHead(nn.Module):
"""
The head used in CRPNet for object classification and box regression and pose estimation.
It has three subnets for the three tasks, with a common structure but separate parameters.
"""
def __init__(self, cfg, input_shape: List[ShapeSpec]):
super().__init__()
# fmt: off
in_channels = input_shape[0].channels
num_classes = cfg.MODEL.CRPNET.NUM_CLASSES
num_convs = cfg.MODEL.CRPNET.NUM_CONVS
prior_prob = cfg.MODEL.CRPNET.PRIOR_PROB
num_anchors = build_anchor_generator(cfg, input_shape).num_cell_anchors
num_kpt = cfg.MODEL.CRPNET.NUM_KEYPOINTS
# fmt: on
assert (
len(set(num_anchors)) == 1
), "Using different number of anchors between levels is not currently supported!"
num_anchors = num_anchors[0]
cls_subnet = []
bbox_subnet = []
kpt_subnet = []
for _ in range(num_convs):
cls_subnet.append(
nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1,
groups=in_channels))
cls_subnet.append(nn.ReLU())
cls_subnet.append(
nn.Conv2d(in_channels, in_channels, kernel_size=1, stride=1))
cls_subnet.append(nn.ReLU())
bbox_subnet.append(
nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1,
groups=in_channels))
bbox_subnet.append(nn.ReLU())
bbox_subnet.append(
nn.Conv2d(in_channels, in_channels, kernel_size=1, stride=1))
bbox_subnet.append(nn.ReLU())
kpt_subnet.append(
nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1,
groups=in_channels))
kpt_subnet.append(nn.ReLU())
kpt_subnet.append(
nn.Conv2d(in_channels, in_channels, kernel_size=1, stride=1))
kpt_subnet.append(nn.ReLU())
self.cls_subnet = nn.Sequential(*cls_subnet)
self.bbox_subnet = nn.Sequential(*bbox_subnet)
self.kpt_subnet = nn.Sequential(*kpt_subnet)
self.cls_score = nn.Conv2d(
in_channels, num_anchors * num_classes, kernel_size=3, stride=1, padding=1
)
self.bbox_pred = nn.Conv2d(in_channels, num_anchors * 4, kernel_size=3, stride=1, padding=1)
self.kpt_pred = nn.Conv2d(in_channels, num_anchors * num_kpt * 2,
kernel_size=3, stride=1, padding=1)
# Initialization
for modules in [self.cls_subnet, self.bbox_subnet, self.kpt_subnet, self.cls_score, self.bbox_pred, self.kpt_pred]:
for layer in modules.modules():
if isinstance(layer, nn.Conv2d):
torch.nn.init.normal_(layer.weight, mean=0, std=0.01)
torch.nn.init.constant_(layer.bias, 0)
# Use prior in model initialization to improve stability
bias_value = -math.log((1 - prior_prob) / prior_prob)
torch.nn.init.constant_(self.cls_score.bias, bias_value)
def forward(self, features):
"""
Arguments:
features (list[Tensor]): FPN feature map tensors in high to low resolution.
Each tensor in the list correspond to different feature levels.
Returns:
logits (list[Tensor]): #lvl tensors, each has shape (N, AxC, Hi, Wi).
The tensor predicts the classification probability
at each spatial position for each of the A anchors and C object
classes.
bbox_reg (list[Tensor]): #lvl tensors, each has shape (N, Ax4, Hi, Wi).
The tensor predicts 4-vector (dx,dy,dw,dh) box
regression values for every anchor. These values are the
relative offset between the anchor and the ground truth box.
kpt_reg (list[Tensor]): #lvl tensors, each has shape (N, AxKx2, Hi, Wi).
The tensor predicts Kx2-vector (dx,dy) keypoint
regression values for every anchor. These values are the
relative offset between the anchor and the ground truth keypoint.
"""
logits = []
bbox_reg = []
kpt_reg = []
for feature in features:
# print(feature.size())
logits.append(self.cls_score(self.cls_subnet(feature)))
bbox_reg.append(self.bbox_pred(self.bbox_subnet(feature)))
kpt_reg.append(self.kpt_pred(self.kpt_subnet(feature)))
return logits, bbox_reg, kpt_reg
class SSDHead(nn.Module):
"""
The head used in SSD for object classification and box regression.
Does not share parameters across feature levels.
"""
def __init__(self, cfg, input_shape: List[ShapeSpec]):
super().__init__()
# fmt: off
in_channels = [f.channels for f in input_shape]
num_classes = cfg.MODEL.RETINANET.NUM_CLASSES
prior_prob = cfg.MODEL.RETINANET.PRIOR_PROB
num_anchors = build_anchor_generator(cfg, input_shape).num_cell_anchors
# fmt: on
assert (
len(set(num_anchors)) == 1
), "Using different number of anchors between levels is not currently supported!"
num_anchors = num_anchors[0]
# Use prior in model initialization to improve stability
bias_value = -math.log((1 - prior_prob) / prior_prob)
for i, in_channel in enumerate(in_channels):
cls_score = nn.Conv2d(
in_channel, num_anchors * num_classes, kernel_size=3, stride=1, padding=1
)
torch.nn.init.normal_(cls_score.weight, mean=0, std=0.01)
torch.nn.init.constant_(cls_score.bias, bias_value)
self.add_module("p{}_cls_score".format(i + 3), cls_score)
bbox_pred = nn.Conv2d(
in_channel, num_anchors * 4, kernel_size=3, stride=1, padding=1
)
torch.nn.init.normal_(bbox_pred.weight, mean=0, std=0.01)
torch.nn.init.constant_(bbox_pred.bias, 0)
self.add_module("p{}_bbox_pred".format(i + 3), bbox_pred)
def forward(self, features):
"""
Arguments:
features (list[Tensor]): FPN feature map tensors in high to low resolution.
Each tensor in the list correspond to different feature levels.
Returns:
logits (list[Tensor]): #lvl tensors, each has shape (N, AxK, Hi, Wi).
The tensor predicts the classification probability
at each spatial position for each of the A anchors and K object
classes.
bbox_reg (list[Tensor]): #lvl tensors, each has shape (N, Ax4, Hi, Wi).
The tensor predicts 4-vector (dx,dy,dw,dh) box
regression values for every anchor. These values are the
relative offset between the anchor and the ground truth box.
"""
logits = []
bbox_reg = []
for i, feature in enumerate(features):
cls_score = getattr(self, "p{}_cls_score".format(i + 3))
bbox_pred = getattr(self, "p{}_bbox_pred".format(i + 3))
logits.append(cls_score(feature))
bbox_reg.append(bbox_pred(feature))
return logits, bbox_reg
```
#### File: SixDPose/sixdpose/resneth.py
```python
import math
import numpy as np
import fvcore.nn.weight_init as weight_init
import torch
import torch.nn.functional as F
from torch import nn
from detectron2.layers import (
Conv2d,
DeformConv,
FrozenBatchNorm2d,
ModulatedDeformConv,
ShapeSpec,
get_norm,
)
from detectron2.modeling import Backbone, BACKBONE_REGISTRY, ResNetBlockBase
__all__ = [
"BottleneckBlock",
"DeformBottleneckBlock",
"BasicStem",
"make_stage",
"ResNet",
"build_resneth_backbone",
"build_crpnet_resneth_fpn_backbone",
]
# class ResNetBlockBase(nn.Module):
# def __init__(self, in_channels, out_channels, stride):
# """
# The `__init__` method of any subclass should also contain these arguments.
# Args:
# in_channels (int):
# out_channels (int):
# stride (int):
# """
# super().__init__()
# self.in_channels = in_channels
# self.out_channels = out_channels
# self.stride = stride
# def freeze(self):
# for p in self.parameters():
# p.requires_grad = False
# FrozenBatchNorm2d.convert_frozen_batchnorm(self)
# return self
class BottleneckBlock(ResNetBlockBase):
def __init__(
self,
in_channels,
out_channels,
*,
bottleneck_channels,
stride=1,
num_groups=1,
norm="BN",
stride_in_1x1=False,
dilation=1,
):
"""
Args:
norm (str or callable): a callable that takes the number of
channels and return a `nn.Module`, or a pre-defined string
(one of {"FrozenBN", "BN", "GN"}).
stride_in_1x1 (bool): when stride==2, whether to put stride in the
first 1x1 convolution or the bottleneck 3x3 convolution.
"""
super().__init__(in_channels, out_channels, stride)
if (in_channels != out_channels) or (stride > 1):
self.shortcut = Conv2d(
in_channels,
out_channels,
kernel_size=1,
stride=stride,
bias=False,
norm=get_norm(norm, out_channels),
)
else:
self.shortcut = None
# The original MSRA ResNet models have stride in the first 1x1 conv
# The subsequent fb.torch.resnet and Caffe2 ResNe[X]t implementations have
# stride in the 3x3 conv
stride_1x1, stride_3x3 = (stride, 1) if stride_in_1x1 else (1, stride)
self.conv1 = Conv2d(
in_channels,
bottleneck_channels,
kernel_size=1,
stride=stride_1x1,
bias=False,
norm=get_norm(norm, bottleneck_channels),
)
self.conv2 = Conv2d(
bottleneck_channels,
bottleneck_channels,
kernel_size=3,
stride=stride_3x3,
padding=1 * dilation,
bias=False,
groups=num_groups,
dilation=dilation,
norm=get_norm(norm, bottleneck_channels),
)
self.conv3 = Conv2d(
bottleneck_channels,
out_channels,
kernel_size=1,
bias=False,
norm=get_norm(norm, out_channels),
)
for layer in [self.conv1, self.conv2, self.conv3, self.shortcut]:
if layer is not None: # shortcut can be None
weight_init.c2_msra_fill(layer)
# Zero-initialize the last normalization in each residual branch,
# so that at the beginning, the residual branch starts with zeros,
# and each residual block behaves like an identity.
# See Sec 5.1 in "Accurate, Large Minibatch SGD: Training ImageNet in 1 Hour":
# "For BN layers, the learnable scaling coefficient γ is initialized
# to be 1, except for each residual block's last BN
# where γ is initialized to be 0."
# nn.init.constant_(self.conv3.norm.weight, 0)
# TODO this somehow hurts performance when training GN models from scratch.
# Add it as an option when we need to use this code to train a backbone.
def forward(self, x):
out = self.conv1(x)
out = F.relu_(out)
out = self.conv2(out)
out = F.relu_(out)
out = self.conv3(out)
if self.shortcut is not None:
shortcut = self.shortcut(x)
else:
shortcut = x
out += shortcut
out = F.relu_(out)
return out
class DeformBottleneckBlock(ResNetBlockBase):
def __init__(
self,
in_channels,
out_channels,
*,
bottleneck_channels,
stride=1,
num_groups=1,
norm="BN",
stride_in_1x1=False,
dilation=1,
deform_modulated=False,
deform_num_groups=1,
):
"""
Similar to :class:`BottleneckBlock`, but with deformable conv in the 3x3 convolution.
"""
super().__init__(in_channels, out_channels, stride)
self.deform_modulated = deform_modulated
if in_channels != out_channels:
self.shortcut = Conv2d(
in_channels,
out_channels,
kernel_size=1,
stride=stride,
bias=False,
norm=get_norm(norm, out_channels),
)
else:
self.shortcut = None
stride_1x1, stride_3x3 = (stride, 1) if stride_in_1x1 else (1, stride)
self.conv1 = Conv2d(
in_channels,
bottleneck_channels,
kernel_size=1,
stride=stride_1x1,
bias=False,
norm=get_norm(norm, bottleneck_channels),
)
if deform_modulated:
deform_conv_op = ModulatedDeformConv
# offset channels are 2 or 3 (if with modulated) * kernel_size * kernel_size
offset_channels = 27
else:
deform_conv_op = DeformConv
offset_channels = 18
self.conv2_offset = Conv2d(
bottleneck_channels,
offset_channels * deform_num_groups,
kernel_size=3,
stride=stride_3x3,
padding=1 * dilation,
dilation=dilation,
)
self.conv2 = deform_conv_op(
bottleneck_channels,
bottleneck_channels,
kernel_size=3,
stride=stride_3x3,
padding=1 * dilation,
bias=False,
groups=num_groups,
dilation=dilation,
deformable_groups=deform_num_groups,
norm=get_norm(norm, bottleneck_channels),
)
self.conv3 = Conv2d(
bottleneck_channels,
out_channels,
kernel_size=1,
bias=False,
norm=get_norm(norm, out_channels),
)
for layer in [self.conv1, self.conv2, self.conv3, self.shortcut]:
if layer is not None: # shortcut can be None
weight_init.c2_msra_fill(layer)
nn.init.constant_(self.conv2_offset.weight, 0)
nn.init.constant_(self.conv2_offset.bias, 0)
def forward(self, x):
out = self.conv1(x)
out = F.relu_(out)
if self.deform_modulated:
offset_mask = self.conv2_offset(out)
offset_x, offset_y, mask = torch.chunk(offset_mask, 3, dim=1)
offset = torch.cat((offset_x, offset_y), dim=1)
mask = mask.sigmoid()
out = self.conv2(out, offset, mask)
else:
offset = self.conv2_offset(out)
out = self.conv2(out, offset)
out = F.relu_(out)
out = self.conv3(out)
if self.shortcut is not None:
shortcut = self.shortcut(x)
else:
shortcut = x
out += shortcut
out = F.relu_(out)
return out
def make_stage(block_class, num_blocks, first_stride, **kwargs):
"""
Create a resnet stage by creating many blocks.
Args:
block_class (class): a subclass of ResNetBlockBase
num_blocks (int):
first_stride (int): the stride of the first block. The other blocks will have stride=1.
A `stride` argument will be passed to the block constructor.
kwargs: other arguments passed to the block constructor.
Returns:
list[nn.Module]: a list of block module.
"""
blocks = []
use_dilation = kwargs["dilation"]
for i in range(num_blocks):
kwargs["dilation"] = 1 if not use_dilation else (i % 3 + 2)
blocks.append(block_class(stride=first_stride if i == 0 else 1, **kwargs))
kwargs["in_channels"] = kwargs["out_channels"]
return blocks
class CSPStage(nn.Module):
def __init__(self, block_class, num_blocks, first_stride, **kwargs):
"""
Create a csp-resnet stage by creating many blocks.
Args:
block_class (class): a subclass of ResNetBlockBase
num_blocks (int):
first_stride (int): the stride of the first block. The other blocks will have stride=1.
A `stride` argument will be passed to the block constructor.
kwargs: other arguments passed to the block constructor.
"""
super().__init__()
self.blocks = []
self.stride = first_stride
use_dilation = kwargs["dilation"]
in_channels = kwargs["in_channels"]
out_channels = kwargs["out_channels"]
self.out_channels = out_channels
norm = kwargs["norm"]
if first_stride == 1: # stage 2
self.pre_conv = Conv2d(
in_channels,
in_channels,
kernel_size=1,
stride=1,
bias=False,
norm=get_norm(norm, in_channels),
)
weight_init.c2_msra_fill(self.pre_conv)
for i in range(num_blocks):
kwargs["dilation"] = 1 if not use_dilation else (i % 3 + 2)
kwargs["out_channels"] = out_channels // 2
self.blocks.append(block_class(stride=1, **kwargs))
kwargs["in_channels"] = kwargs["out_channels"]
self.after_conv = Conv2d(
out_channels // 2,
out_channels // 2,
kernel_size=1,
stride=1,
bias=False,
norm=get_norm(norm, out_channels // 2),
)
weight_init.c2_msra_fill(self.after_conv)
self.csp_conv = Conv2d(
in_channels,
out_channels // 2,
kernel_size=1,
stride=1,
bias=False,
norm=get_norm(norm, out_channels // 2),
)
weight_init.c2_msra_fill(self.csp_conv)
elif first_stride > 1: # stage 3 4 5
self.transition_conv1 = Conv2d(
in_channels,
in_channels // 2,
kernel_size=1,
stride=1,
bias=False,
norm=get_norm(norm, in_channels // 2),
)
weight_init.c2_msra_fill(self.transition_conv1)
self.transition_conv2 = Conv2d(
in_channels // 2,
in_channels // 2,
kernel_size=3,
stride=first_stride,
padding=1,
bias=False,
norm=get_norm(norm, in_channels // 2),
)
weight_init.c2_msra_fill(self.transition_conv2)
self.pre_conv = Conv2d(
in_channels // 2,
in_channels,
kernel_size=1,
stride=1,
bias=False,
norm=get_norm(norm, in_channels),
)
weight_init.c2_msra_fill(self.pre_conv)
for i in range(num_blocks - 1):
kwargs["dilation"] = 1 if not use_dilation else (i % 3 + 2)
kwargs["out_channels"] = out_channels // 2
self.blocks.append(block_class(stride=1, **kwargs))
kwargs["in_channels"] = kwargs["out_channels"]
self.after_conv = Conv2d(
out_channels // 2,
out_channels // 2,
kernel_size=1,
stride=1,
bias=False,
norm=get_norm(norm, out_channels // 2),
)
weight_init.c2_msra_fill(self.after_conv)
self.csp_conv = Conv2d(
in_channels // 2,
out_channels // 2,
kernel_size=1,
stride=1,
bias=False,
norm=get_norm(norm, out_channels // 2),
)
weight_init.c2_msra_fill(self.csp_conv)
self.blocks = nn.Sequential(*self.blocks)
def forward(self, x):
if self.stride == 1:
out = self.pre_conv(x)
out = F.relu_(out)
for block in self.blocks:
out = block(out)
out = self.after_conv(x)
out = F.relu_(out)
csp = self.csp_conv(x)
csp = F.relu_(csp)
return torch.cat((out, csp), dim=1)
elif self.stride > 1:
x = self.transition_conv1(x)
x = F.relu_(x)
x = self.transition_conv2(x)
x = F.relu_(x)
csp = self.csp_conv(x)
csp = F.relu_(csp)
out = self.pre_conv(x)
out = F.relu_(out)
for block in self.blocks:
out = block(out)
out = self.after_conv(out)
out = F.relu_(out)
return torch.cat((out, csp), dim=1)
def freeze(self):
for p in self.parameters():
p.requires_grad = False
FrozenBatchNorm2d.convert_frozen_batchnorm(self)
return self
class BasicStem(nn.Module):
def __init__(self, in_channels=3, out_channels=64, norm="BN"):
"""
Args:
norm (str or callable): a callable that takes the number of
channels and return a `nn.Module`, or a pre-defined string
(one of {"FrozenBN", "BN", "GN"}).
"""
super().__init__()
self.conv1 = Conv2d(
in_channels,
out_channels,
kernel_size=7,
stride=2,
padding=3,
bias=False,
norm=get_norm(norm, out_channels),
)
weight_init.c2_msra_fill(self.conv1)
def forward(self, x):
x = self.conv1(x)
x = F.relu_(x)
# x = F.max_pool2d(x, kernel_size=3, stride=2, padding=1)
return x
@property
def out_channels(self):
return self.conv1.out_channels
@property
def stride(self):
return 2 # = stride 2 conv
class ResNet(Backbone):
def __init__(self, stem, stages, num_classes=None, out_features=None):
"""
Args:
stem (nn.Module): a stem module
stages (list[list[ResNetBlock]]): several (typically 4) stages,
each contains multiple :class:`ResNetBlockBase`.
num_classes (None or int): if None, will not perform classification.
out_features (list[str]): name of the layers whose outputs should
be returned in forward. Can be anything in "stem", "linear", or "res2" ...
If None, will return the output of the last layer.
"""
super(ResNet, self).__init__()
self.stem = stem
self.num_classes = num_classes
current_stride = self.stem.stride
self._out_feature_strides = {"stem": current_stride}
self._out_feature_channels = {"stem": self.stem.out_channels}
self.stages_and_names = []
for i, blocks in enumerate(stages):
for block in blocks:
assert isinstance(block, ResNetBlockBase), block
curr_channels = block.out_channels
stage = nn.Sequential(*blocks)
name = "res" + str(i + 2)
self.add_module(name, stage)
self.stages_and_names.append((stage, name))
self._out_feature_strides[name] = current_stride = int(
current_stride * np.prod([k.stride for k in blocks])
)
self._out_feature_channels[name] = blocks[-1].out_channels
if num_classes is not None:
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.linear = nn.Linear(curr_channels, num_classes)
# Sec 5.1 in "Accurate, Large Minibatch SGD: Training ImageNet in 1 Hour":
# "The 1000-way fully-connected layer is initialized by
# drawing weights from a zero-mean Gaussian with standard deviation of 0.01."
nn.init.normal_(self.linear.weight, std=0.01)
name = "linear"
if out_features is None:
out_features = [name]
self._out_features = out_features
assert len(self._out_features)
children = [x[0] for x in self.named_children()]
for out_feature in self._out_features:
assert out_feature in children, "Available children: {}".format(", ".join(children))
def forward(self, x):
outputs = {}
x = self.stem(x)
if "stem" in self._out_features:
outputs["stem"] = x
for stage, name in self.stages_and_names:
x = stage(x)
if name in self._out_features:
outputs[name] = x
if self.num_classes is not None:
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.linear(x)
if "linear" in self._out_features:
outputs["linear"] = x
return outputs
def output_shape(self):
return {
name: ShapeSpec(
channels=self._out_feature_channels[name], stride=self._out_feature_strides[name]
)
for name in self._out_features
}
class CSPResNet(Backbone):
def __init__(self, stem, stages, num_classes=None, out_features=None):
"""
Args:
stem (nn.Module): a stem module
stages (list[CSPStage]): several (typically 4) stages,
each contains multiple :class:`ResNetBlockBase`.
num_classes (None or int): if None, will not perform classification.
out_features (list[str]): name of the layers whose outputs should
be returned in forward. Can be anything in "stem", "linear", or "res2" ...
If None, will return the output of the last layer.
"""
super(CSPResNet, self).__init__()
self.stem = stem
self.num_classes = num_classes
current_stride = self.stem.stride
self._out_feature_strides = {"stem": current_stride}
self._out_feature_channels = {"stem": self.stem.out_channels}
self.stages_and_names = []
for i, stage in enumerate(stages):
curr_channels = stage.out_channels
name = "res" + str(i + 2)
self.add_module(name, stage)
self.stages_and_names.append((stage, name))
self._out_feature_strides[name] = current_stride = int(
current_stride * stage.stride)
self._out_feature_channels[name] = stage.out_channels
if num_classes is not None:
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.linear = nn.Linear(curr_channels, num_classes)
# Sec 5.1 in "Accurate, Large Minibatch SGD: Training ImageNet in 1 Hour":
# "The 1000-way fully-connected layer is initialized by
# drawing weights from a zero-mean Gaussian with standard deviation of 0.01."
nn.init.normal_(self.linear.weight, std=0.01)
name = "linear"
if out_features is None:
out_features = [name]
self._out_features = out_features
assert len(self._out_features)
children = [x[0] for x in self.named_children()]
for out_feature in self._out_features:
assert out_feature in children, "Available children: {}".format(", ".join(children))
def forward(self, x):
outputs = {}
x = self.stem(x)
if "stem" in self._out_features:
outputs["stem"] = x
for stage, name in self.stages_and_names:
x = stage(x)
if name in self._out_features:
outputs[name] = x
if self.num_classes is not None:
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.linear(x)
if "linear" in self._out_features:
outputs["linear"] = x
return outputs
def output_shape(self):
return {
name: ShapeSpec(
channels=self._out_feature_channels[name], stride=self._out_feature_strides[name]
)
for name in self._out_features
}
@BACKBONE_REGISTRY.register()
def build_resneth_backbone(cfg, input_shape):
"""
Create a ResNet-h instance from config.
Returns:
ResNet: a :class:`ResNet` instance.
"""
# need registration of new blocks/stems?
norm = cfg.MODEL.RESNETH.NORM
stem = BasicStem(
in_channels=input_shape.channels,
out_channels=cfg.MODEL.RESNETH.STEM_OUT_CHANNELS,
norm=norm,
)
freeze_at = cfg.MODEL.RESNETH.FREEZE_AT
if freeze_at >= 1:
for p in stem.parameters():
p.requires_grad = False
stem = FrozenBatchNorm2d.convert_frozen_batchnorm(stem)
# fmt: off
out_features = cfg.MODEL.RESNETH.OUT_FEATURES
depth = cfg.MODEL.RESNETH.DEPTH
num_groups = cfg.MODEL.RESNETH.NUM_GROUPS
# width_per_group = cfg.MODEL.RESNETH.WIDTH_PER_GROUP
bottleneck_channels = [32, 64, 128, 256]
in_channels = [64, 128, 256, 512]
out_channels = [128, 256, 512, 1024]
stride_in_1x1 = cfg.MODEL.RESNETH.STRIDE_IN_1X1
dilation_on_per_stage = cfg.MODEL.RESNETH.DILATION_ON_PER_STAGE
deform_on_per_stage = cfg.MODEL.RESNETH.DEFORM_ON_PER_STAGE
deform_modulated = cfg.MODEL.RESNETH.DEFORM_MODULATED
deform_num_groups = cfg.MODEL.RESNETH.DEFORM_NUM_GROUPS
# fmt: on
# assert res5_dilation in {1, 2}, "res5_dilation cannot be {}.".format(res5_dilation)
num_blocks_per_stage = {50: [3, 4, 6, 3], 101: [3, 4, 23, 3], 152: [3, 8, 36, 3]}[depth]
stages = []
# Avoid creating variables without gradients
# It consumes extra memory and may cause allreduce to fail
out_stage_idx = [{"res2": 2, "res3": 3, "res4": 4, "res5": 5}[f] for f in out_features]
max_stage_idx = max(out_stage_idx)
for idx, stage_idx in enumerate(range(2, max_stage_idx + 1)):
# dilation = res5_dilation if stage_idx == 5 else 1
first_stride = 1 if idx == 0 else 2
stage_kargs = {
"num_blocks": num_blocks_per_stage[idx],
"first_stride": first_stride,
"in_channels": in_channels[idx],
"bottleneck_channels": bottleneck_channels[idx],
"out_channels": out_channels[idx],
"num_groups": num_groups,
"norm": norm,
"stride_in_1x1": stride_in_1x1,
"dilation": dilation_on_per_stage[idx],
}
if deform_on_per_stage[idx]:
stage_kargs["block_class"] = DeformBottleneckBlock
stage_kargs["deform_modulated"] = deform_modulated
stage_kargs["deform_num_groups"] = deform_num_groups
else:
stage_kargs["block_class"] = BottleneckBlock
blocks = make_stage(**stage_kargs)
if freeze_at >= stage_idx:
for block in blocks:
block.freeze()
stages.append(blocks)
return ResNet(stem, stages, out_features=out_features)
@BACKBONE_REGISTRY.register()
def build_cspresneth_backbone(cfg, input_shape):
"""
Create a CSP-ResNet-h instance from config.
Returns:
CSPResNet: a :class:`CSPResNet` instance.
"""
# need registration of new blocks/stems?
norm = cfg.MODEL.RESNETH.NORM
stem = BasicStem(
in_channels=input_shape.channels,
out_channels=cfg.MODEL.RESNETH.STEM_OUT_CHANNELS,
norm=norm,
)
freeze_at = cfg.MODEL.RESNETH.FREEZE_AT
if freeze_at >= 1:
for p in stem.parameters():
p.requires_grad = False
stem = FrozenBatchNorm2d.convert_frozen_batchnorm(stem)
# fmt: off
out_features = cfg.MODEL.RESNETH.OUT_FEATURES
depth = cfg.MODEL.RESNETH.DEPTH
num_groups = cfg.MODEL.RESNETH.NUM_GROUPS
# width_per_group = cfg.MODEL.RESNETH.WIDTH_PER_GROUP
bottleneck_channels = [32, 64, 128, 256]
in_channels = [64, 128, 256, 512]
out_channels = [128, 256, 512, 1024]
stride_in_1x1 = cfg.MODEL.RESNETH.STRIDE_IN_1X1
dilation_on_per_stage = cfg.MODEL.RESNETH.DILATION_ON_PER_STAGE
deform_on_per_stage = cfg.MODEL.RESNETH.DEFORM_ON_PER_STAGE
deform_modulated = cfg.MODEL.RESNETH.DEFORM_MODULATED
deform_num_groups = cfg.MODEL.RESNETH.DEFORM_NUM_GROUPS
# fmt: on
# assert res5_dilation in {1, 2}, "res5_dilation cannot be {}.".format(res5_dilation)
num_blocks_per_stage = {50: [3, 4, 6, 3], 101: [3, 4, 23, 3], 152: [3, 8, 36, 3]}[depth]
stages = []
# Avoid creating variables without gradients
# It consumes extra memory and may cause allreduce to fail
out_stage_idx = [{"res2": 2, "res3": 3, "res4": 4, "res5": 5}[f] for f in out_features]
max_stage_idx = max(out_stage_idx)
for idx, stage_idx in enumerate(range(2, max_stage_idx + 1)):
# dilation = res5_dilation if stage_idx == 5 else 1
first_stride = 1 if idx == 0 else 2
stage_kargs = {
"num_blocks": num_blocks_per_stage[idx],
"first_stride": first_stride,
"in_channels": in_channels[idx],
"bottleneck_channels": bottleneck_channels[idx],
"out_channels": out_channels[idx],
"num_groups": num_groups,
"norm": norm,
"stride_in_1x1": stride_in_1x1,
"dilation": dilation_on_per_stage[idx],
}
if deform_on_per_stage[idx]:
stage_kargs["block_class"] = DeformBottleneckBlock
stage_kargs["deform_modulated"] = deform_modulated
stage_kargs["deform_num_groups"] = deform_num_groups
else:
stage_kargs["block_class"] = BottleneckBlock
csp_stage = CSPStage(**stage_kargs)
if freeze_at >= stage_idx:
csp_stage.freeze()
stages.append(csp_stage)
return CSPResNet(stem, stages, out_features=out_features)
class LastLevelMaxPool(nn.Module):
"""
This module is used in the original FPN to generate a downsampled
P6 feature from P5.
"""
def __init__(self):
super().__init__()
self.num_levels = 1
self.in_feature = "p5"
def forward(self, x):
return [F.max_pool2d(x, kernel_size=1, stride=2, padding=0)]
class LastLevelP6P7(nn.Module):
"""
This module is used in RetinaNet to generate extra layers, P6 and P7 from
C5 feature.
"""
def __init__(self, in_channels, out_channels, in_feature="res5"):
super().__init__()
self.num_levels = 2
self.in_feature = in_feature
self.p6 = nn.Conv2d(in_channels, out_channels, 3, 2, 1)
self.p7 = nn.Conv2d(out_channels, out_channels, 3, 2, 1)
for module in [self.p6, self.p7]:
weight_init.c2_xavier_fill(module)
def forward(self, c5):
p6 = self.p6(c5)
p7 = self.p7(F.relu_(p6))
return [p6, p7]
class FPN_resneth(Backbone):
"""
This module implements Feature Pyramid Network.
It creates pyramid features built on top of some input feature maps.
"""
def __init__(
self, bottom_up, in_features, out_channels, norm="", top_block=None, fuse_type="sum"
):
"""
Args:
bottom_up (Backbone): module representing the bottom up subnetwork.
Must be a subclass of :class:`Backbone`. The multi-scale feature
maps generated by the bottom up network, and listed in `in_features`,
are used to generate FPN levels.
in_features (list[str]): names of the input feature maps coming
from the backbone to which FPN is attached. For example, if the
backbone produces ["res2", "res3", "res4"], any *contiguous* sublist
of these may be used; order must be from high to low resolution.
out_channels (int): number of channels in the output feature maps.
norm (str): the normalization to use.
top_block (nn.Module or None): if provided, an extra operation will
be performed on the output of the last (smallest resolution)
FPN output, and the result will extend the result list. The top_block
further downsamples the feature map. It must have an attribute
"num_levels", meaning the number of extra FPN levels added by
this block, and "in_feature", which is a string representing
its input feature (e.g., p5).
fuse_type (str): types for fusing the top down features and the lateral
ones. It can be "sum" (default), which sums up element-wise; or "avg",
which takes the element-wise mean of the two.
"""
super(FPN_resneth, self).__init__()
assert isinstance(bottom_up, Backbone)
# Feature map strides and channels from the bottom up network (e.g. ResNet)
input_shapes = bottom_up.output_shape()
in_strides = [input_shapes[f].stride for f in in_features]
in_channels = [input_shapes[f].channels for f in in_features]
_assert_strides_are_log2_contiguous(in_strides)
lateral_convs = []
output_convs = []
use_bias = norm == ""
for idx, in_channels in enumerate(in_channels):
lateral_norm = get_norm(norm, out_channels)
output_norm = get_norm(norm, out_channels)
lateral_conv = Conv2d(
in_channels, out_channels, kernel_size=1, bias=use_bias, norm=lateral_norm
)
output_conv = Conv2d(
out_channels,
out_channels,
kernel_size=3,
stride=1,
padding=1,
bias=use_bias,
norm=output_norm,
)
weight_init.c2_xavier_fill(lateral_conv)
weight_init.c2_xavier_fill(output_conv)
stage = int(math.log2(in_strides[idx])) + 1
self.add_module("fpn_lateral{}".format(stage), lateral_conv)
self.add_module("fpn_output{}".format(stage), output_conv)
lateral_convs.append(lateral_conv)
output_convs.append(output_conv)
# Place convs into top-down order (from low to high resolution)
# to make the top-down computation in forward clearer.
self.lateral_convs = lateral_convs[::-1]
self.output_convs = output_convs[::-1]
self.top_block = top_block
self.in_features = in_features
self.bottom_up = bottom_up
# Return feature names are "p<stage>", like ["p2", "p3", ..., "p6"]
self._out_feature_strides = {"p{}".format(int(math.log2(s)) + 1): s for s in in_strides}
# top block output feature maps.
if self.top_block is not None:
for s in range(stage, stage + self.top_block.num_levels):
self._out_feature_strides["p{}".format(s + 1)] = 2 ** s
self._out_features = list(self._out_feature_strides.keys())
self._out_feature_channels = {k: out_channels for k in self._out_features}
self._size_divisibility = in_strides[-1]
assert fuse_type in {"avg", "sum"}
self._fuse_type = fuse_type
@property
def size_divisibility(self):
return self._size_divisibility
def forward(self, x):
"""
Args:
input (dict[str->Tensor]): mapping feature map name (e.g., "res5") to
feature map tensor for each feature level in high to low resolution order.
Returns:
dict[str->Tensor]:
mapping from feature map name to FPN feature map tensor
in high to low resolution order. Returned feature names follow the FPN
paper convention: "p<stage>", where stage has stride = 2 ** stage e.g.,
["p2", "p3", ..., "p6"].
"""
# Reverse feature maps into top-down order (from low to high resolution)
bottom_up_features = self.bottom_up(x)
x = [bottom_up_features[f] for f in self.in_features[::-1]]
results = []
prev_features = self.lateral_convs[0](x[0])
results.append(self.output_convs[0](prev_features))
for features, lateral_conv, output_conv in zip(
x[1:], self.lateral_convs[1:], self.output_convs[1:]
):
top_down_features = F.interpolate(prev_features, scale_factor=2, mode="nearest")
lateral_features = lateral_conv(features)
prev_features = lateral_features + top_down_features
if self._fuse_type == "avg":
prev_features /= 2
results.insert(0, output_conv(prev_features))
if self.top_block is not None:
top_block_in_feature = bottom_up_features.get(self.top_block.in_feature, None)
if top_block_in_feature is None:
top_block_in_feature = results[self._out_features.index(self.top_block.in_feature)]
results.extend(self.top_block(top_block_in_feature))
assert len(self._out_features) == len(results)
return dict(zip(self._out_features, results))
def output_shape(self):
return {
name: ShapeSpec(
channels=self._out_feature_channels[name], stride=self._out_feature_strides[name]
)
for name in self._out_features
}
# class SSD_resneth(Backbone):
# """
# This module implements Feature Pyramid Network.
# It creates pyramid features built on top of some input feature maps.
# """
# def __init__(
# self, bottom_up, in_features, out_channels, norm="", top_block=None, fuse_type="sum"
# ):
# """
# Args:
# bottom_up (Backbone): module representing the bottom up subnetwork.
# Must be a subclass of :class:`Backbone`. The multi-scale feature
# maps generated by the bottom up network, and listed in `in_features`,
# are used to generate FPN levels.
# in_features (list[str]): names of the input feature maps coming
# from the backbone to which FPN is attached. For example, if the
# backbone produces ["res2", "res3", "res4"], any *contiguous* sublist
# of these may be used; order must be from high to low resolution.
# out_channels (int): number of channels in the output feature maps.
# norm (str): the normalization to use.
# top_block (nn.Module or None): if provided, an extra operation will
# be performed on the output of the last (smallest resolution)
# FPN output, and the result will extend the result list. The top_block
# further downsamples the feature map. It must have an attribute
# "num_levels", meaning the number of extra FPN levels added by
# this block, and "in_feature", which is a string representing
# its input feature (e.g., p5).
# fuse_type (str): types for fusing the top down features and the lateral
# ones. It can be "sum" (default), which sums up element-wise; or "avg",
# which takes the element-wise mean of the two.
# """
# super(SSD_resneth, self).__init__()
# assert isinstance(bottom_up, Backbone)
# # Feature map strides and channels from the bottom up network (e.g. ResNet)
# input_shapes = bottom_up.output_shape()
# in_strides = [input_shapes[f].stride for f in in_features]
# in_channels = [input_shapes[f].channels for f in in_features]
# _assert_strides_are_log2_contiguous(in_strides)
# output_convs = []
# use_bias = norm == ""
# for idx, in_channel in enumerate(in_channels):
# output_norm = get_norm(norm, out_channels)
# output_conv = Conv2d(
# in_channel, out_channels, kernel_size=1, bias=use_bias, norm=output_norm
# )
# weight_init.c2_xavier_fill(output_conv)
# stage = int(math.log2(in_strides[idx])) + 1
# self.add_module("ssd_output{}".format(stage), output_conv)
# output_convs.append(output_conv)
# # Place convs into top-down order (from low to high resolution)
# # to make the top-down computation in forward clearer.
# self.output_convs = output_convs[::-1]
# self.top_block = top_block
# self.in_features = in_features
# self.bottom_up = bottom_up
# # Return feature names are "p<stage>", like ["p2", "p3", ..., "p6"]
# self._out_feature_strides = {"p{}".format(int(math.log2(s)) + 1): s for s in in_strides}
# # top block output feature maps.
# if self.top_block is not None:
# for s in range(stage, stage + self.top_block.num_levels):
# self._out_feature_strides["p{}".format(s + 1)] = 2 ** s
# self._out_features = list(self._out_feature_strides.keys())
# self._out_feature_channels = {k: out_channels for k in self._out_features}
# self._size_divisibility = in_strides[-1]
# assert fuse_type in {"avg", "sum"}
# self._fuse_type = fuse_type
# @property
# def size_divisibility(self):
# return self._size_divisibility
# def forward(self, x):
# """
# Args:
# input (dict[str->Tensor]): mapping feature map name (e.g., "res5") to
# feature map tensor for each feature level in high to low resolution order.
# Returns:
# dict[str->Tensor]:
# mapping from feature map name to FPN feature map tensor
# in high to low resolution order. Returned feature names follow the FPN
# paper convention: "p<stage>", where stage has stride = 2 ** stage e.g.,
# ["p2", "p3", ..., "p6"].
# """
# # Reverse feature maps into top-down order (from low to high resolution)
# bottom_up_features = self.bottom_up(x)
# x = [bottom_up_features[f] for f in self.in_features[::-1]]
# results = []
# prev_features = x[0]
# results.append(self.output_convs[0](prev_features))
# for features, output_conv in zip(
# x[1:], self.output_convs[1:]
# ):
# prev_features = features
# results.insert(0, output_conv(prev_features))
# if self.top_block is not None:
# top_block_in_feature = bottom_up_features.get(self.top_block.in_feature, None)
# if top_block_in_feature is None:
# top_block_in_feature = results[self._out_features.index(self.top_block.in_feature)]
# results.extend(self.top_block(top_block_in_feature))
# assert len(self._out_features) == len(results)
# return dict(zip(self._out_features, results))
# def output_shape(self):
# return {
# name: ShapeSpec(
# channels=self._out_feature_channels[name], stride=self._out_feature_strides[name]
# )
# for name in self._out_features
# }
class SSD_resneth(Backbone):
"""
This module implements Feature Pyramid Network.
It creates pyramid features built on top of some input feature maps.
"""
def __init__(
self, bottom_up, in_features, out_channels, norm="", top_block=None, fuse_type="sum"
):
"""
Args:
bottom_up (Backbone): module representing the bottom up subnetwork.
Must be a subclass of :class:`Backbone`. The multi-scale feature
maps generated by the bottom up network, and listed in `in_features`,
are used to generate FPN levels.
in_features (list[str]): names of the input feature maps coming
from the backbone to which FPN is attached. For example, if the
backbone produces ["res2", "res3", "res4"], any *contiguous* sublist
of these may be used; order must be from high to low resolution.
out_channels (int): number of channels in the output feature maps.
norm (str): the normalization to use.
top_block (nn.Module or None): if provided, an extra operation will
be performed on the output of the last (smallest resolution)
FPN output, and the result will extend the result list. The top_block
further downsamples the feature map. It must have an attribute
"num_levels", meaning the number of extra FPN levels added by
this block, and "in_feature", which is a string representing
its input feature (e.g., p5).
fuse_type (str): types for fusing the top down features and the lateral
ones. It can be "sum" (default), which sums up element-wise; or "avg",
which takes the element-wise mean of the two.
"""
super(SSD_resneth, self).__init__()
assert isinstance(bottom_up, Backbone)
# Feature map strides and channels from the bottom up network (e.g. ResNet)
input_shapes = bottom_up.output_shape()
in_strides = [input_shapes[f].stride for f in in_features]
in_channels = [input_shapes[f].channels for f in in_features]
_assert_strides_are_log2_contiguous(in_strides)
self.top_block = top_block
self.in_features = in_features
self.bottom_up = bottom_up
# Return feature names are "p<stage>", like ["p2", "p3", ..., "p6"]
self._out_feature_strides = {"p{}".format(int(math.log2(s)) + 1): s for s in in_strides}
# top block output feature maps.
stage = 5
if self.top_block is not None:
for s in range(stage, stage + self.top_block.num_levels):
self._out_feature_strides["p{}".format(s + 1)] = 2 ** s
self._out_features = list(self._out_feature_strides.keys())
self._out_feature_channels = {"p3": 256, "p4": 512, "p5": 1024,
"p6": out_channels, "p7": out_channels}
self._size_divisibility = in_strides[-1]
assert fuse_type in {"avg", "sum"}
self._fuse_type = fuse_type
@property
def size_divisibility(self):
return self._size_divisibility
def forward(self, x):
"""
Args:
input (dict[str->Tensor]): mapping feature map name (e.g., "res5") to
feature map tensor for each feature level in high to low resolution order.
Returns:
dict[str->Tensor]:
mapping from feature map name to FPN feature map tensor
in high to low resolution order. Returned feature names follow the FPN
paper convention: "p<stage>", where stage has stride = 2 ** stage e.g.,
["p2", "p3", ..., "p6"].
"""
# Reverse feature maps into top-down order (from low to high resolution)
bottom_up_features = self.bottom_up(x)
x = [bottom_up_features[f] for f in self.in_features[::-1]]
results = []
prev_features = x[0]
results.append(prev_features)
for features in x[1:]:
prev_features = features
results.insert(0, prev_features)
if self.top_block is not None:
top_block_in_feature = bottom_up_features.get(self.top_block.in_feature, None)
if top_block_in_feature is None:
top_block_in_feature = results[self._out_features.index(self.top_block.in_feature)]
results.extend(self.top_block(top_block_in_feature))
assert len(self._out_features) == len(results)
return dict(zip(self._out_features, results))
def output_shape(self):
return {
name: ShapeSpec(
channels=self._out_feature_channels[name], stride=self._out_feature_strides[name]
)
for name in self._out_features
}
def _assert_strides_are_log2_contiguous(strides):
"""
Assert that each stride is 2x times its preceding stride, i.e. "contiguous in log2".
"""
for i, stride in enumerate(strides[1:], 1):
assert stride == 2 * strides[i - 1], "Strides {} {} are not log2 contiguous".format(
stride, strides[i - 1]
)
@BACKBONE_REGISTRY.register()
def build_crpnet_resneth_fpn_backbone(cfg, input_shape: ShapeSpec):
"""
Args:
cfg: a detectron2 CfgNode
Returns:
backbone (Backbone): backbone module, must be a subclass of :class:`Backbone`.
"""
bottom_up = build_resneth_backbone(cfg, input_shape)
in_features = cfg.MODEL.FPN.IN_FEATURES
out_channels = cfg.MODEL.FPN.OUT_CHANNELS
in_channels_p6p7 = bottom_up.output_shape()["res5"].channels
backbone = FPN_resneth(
bottom_up=bottom_up,
in_features=in_features,
out_channels=out_channels,
norm=cfg.MODEL.FPN.NORM,
top_block=LastLevelP6P7(in_channels_p6p7, out_channels),
fuse_type=cfg.MODEL.FPN.FUSE_TYPE,
)
return backbone
@BACKBONE_REGISTRY.register()
def build_crpnet_cspresneth_fpn_backbone(cfg, input_shape: ShapeSpec):
"""
Args:
cfg: a detectron2 CfgNode
Returns:
backbone (Backbone): backbone module, must be a subclass of :class:`Backbone`.
"""
bottom_up = build_cspresneth_backbone(cfg, input_shape)
in_features = cfg.MODEL.FPN.IN_FEATURES
out_channels = cfg.MODEL.FPN.OUT_CHANNELS
in_channels_p6p7 = bottom_up.output_shape()["res5"].channels
backbone = FPN_resneth(
bottom_up=bottom_up,
in_features=in_features,
out_channels=out_channels,
norm=cfg.MODEL.FPN.NORM,
top_block=LastLevelP6P7(in_channels_p6p7, out_channels),
fuse_type=cfg.MODEL.FPN.FUSE_TYPE,
)
return backbone
@BACKBONE_REGISTRY.register()
def build_crpnet_resneth_ssd_backbone(cfg, input_shape: ShapeSpec):
"""
Args:
cfg: a detectron2 CfgNode
Returns:
backbone (Backbone): backbone module, must be a subclass of :class:`Backbone`.
"""
bottom_up = build_resneth_backbone(cfg, input_shape)
in_features = cfg.MODEL.FPN.IN_FEATURES
out_channels = cfg.MODEL.FPN.OUT_CHANNELS
in_channels_p6p7 = bottom_up.output_shape()["res5"].channels
backbone = SSD_resneth(
bottom_up=bottom_up,
in_features=in_features,
out_channels=out_channels,
norm=cfg.MODEL.FPN.NORM,
top_block=LastLevelP6P7(in_channels_p6p7, out_channels),
fuse_type=cfg.MODEL.FPN.FUSE_TYPE,
)
return backbone
@BACKBONE_REGISTRY.register()
def build_crpnet_resnet_fpn_backbone(cfg, input_shape: ShapeSpec):
"""
Args:
cfg: a detectron2 CfgNode
Returns:
backbone (Backbone): backbone module, must be a subclass of :class:`Backbone`.
"""
from detectron2.modeling import build_resnet_backbone, FPN
bottom_up = build_resnet_backbone(cfg, input_shape)
in_features = cfg.MODEL.FPN.IN_FEATURES
out_channels = cfg.MODEL.FPN.OUT_CHANNELS
in_channels_p6p7 = bottom_up.output_shape()["res5"].channels
backbone = FPN(
bottom_up=bottom_up,
in_features=in_features,
out_channels=out_channels,
norm=cfg.MODEL.FPN.NORM,
top_block=LastLevelP6P7(in_channels_p6p7, out_channels),
fuse_type=cfg.MODEL.FPN.FUSE_TYPE,
)
return backbone
@BACKBONE_REGISTRY.register()
def build_hcrnet_resneth_fpn_backbone(cfg, input_shape: ShapeSpec):
"""
Args:
cfg: a detectron2 CfgNode
Returns:
backbone (Backbone): backbone module, must be a subclass of :class:`Backbone`.
"""
bottom_up = build_resneth_backbone(cfg, input_shape)
in_features = cfg.MODEL.FPN.IN_FEATURES
out_channels = cfg.MODEL.FPN.OUT_CHANNELS
backbone = FPN_resneth(
bottom_up=bottom_up,
in_features=in_features,
out_channels=out_channels,
norm=cfg.MODEL.FPN.NORM,
top_block=LastLevelMaxPool(),
fuse_type=cfg.MODEL.FPN.FUSE_TYPE,
)
return backbone
```
#### File: utils/icp/icp_utils.py
```python
from ..renderer import opengl_utils
import numpy as np
from ..pysixd import transform
from sklearn.neighbors import NearestNeighbors
def rgbd_to_point_cloud(K, depth):
vs, us = depth.nonzero()
zs = depth[vs, us]
xs = ((us - K[0, 2]) * zs) / float(K[0, 0])
ys = ((vs - K[1, 2]) * zs) / float(K[1, 1])
pts = np.array([xs, ys, zs]).T
return pts
def nearest_neighbor(src, dst):
'''
Find the nearest (Euclidean) neighbor in dst for each point in src
Input:
src: Nxm array of points
dst: Nxm array of points
Output:
distances: Euclidean distances of the nearest neighbor
indices: dst indices of the nearest neighbor
'''
assert src.shape == dst.shape
neigh = NearestNeighbors(n_neighbors=1)
neigh.fit(dst)
distances, indices = neigh.kneighbors(src, return_distance=True)
return distances.ravel(), indices.ravel()
def best_fit_transform(A, B, depth_only=False, no_depth=False):
'''
Calculates the least-squares best-fit transform that maps corresponding points A to B in m spatial dimensions
Input:
A: Nxm numpy array of corresponding points
B: Nxm numpy array of corresponding points
Returns:
T: (m+1)x(m+1) homogeneous transformation matrix that maps A on to B
R: mxm rotation matrix
t: mx1 translation vector
'''
assert A.shape == B.shape
# get number of dimensions
m = A.shape[1]
# translate points to their centroids
centroid_A = np.mean(A, axis=0)
centroid_B = np.mean(B, axis=0)
AA = A - centroid_A
BB = B - centroid_B
if depth_only == True and no_depth == False:
R = np.eye(3)
t = centroid_B.T - centroid_A.T
# t = np.array([0, 0, t[2]])
else:
# rotation matrix
H = np.dot(AA.T, BB)
U, S, Vt = np.linalg.svd(H)
R = np.dot(Vt.T, U.T)
# special reflection case
if np.linalg.det(R) < 0:
Vt[m - 1, :] *= -1
R = np.dot(Vt.T, U.T)
t = centroid_B.T - np.dot(R, centroid_A.T)
if no_depth == True and depth_only == False:
t = np.array([t[0], t[1], 0])
T = np.identity(m + 1)
T[:m, :m] = R
T[:m, m] = t
return T, R, t
def icp(A, B, init_pose=None, max_iterations=200, tolerance=0.001, verbose=False, depth_only=False, no_depth=False):
assert A.shape == B.shape
# get number of dimensions
m = A.shape[1]
# make points homogeneous, copy them to maintain the originals
src = np.ones((m + 1, A.shape[0]))
dst = np.ones((m + 1, B.shape[0]))
src[:m, :] = np.copy(A.T)
dst[:m, :] = np.copy(B.T)
# apply the initial pose estimation
if init_pose is not None:
src = np.dot(init_pose, src)
prev_error = 0
for i in range(max_iterations):
# find the nearest neighbors between the current source and destination points
distances, indices = nearest_neighbor(src[:m, :].T, dst[:m, :].T)
# compute the transformation between the current source and nearest destination points
T, _, _ = best_fit_transform(src[:m, :].T, dst[:m, indices].T, depth_only=depth_only, no_depth=no_depth)
# update the current source
src = np.dot(T, src)
mean_error = np.mean(distances)
# print mean_error
# check error
if np.abs(prev_error - mean_error) < tolerance:
break
prev_error = mean_error
# calculate final transformation
T, _, _ = best_fit_transform(A, src[:m, :].T, depth_only=depth_only, no_depth=no_depth)
if verbose:
anim = ax.scatter(src[0, :], src[1, :], src[2, :], label='estimated', marker='.', c='red')
plt.legend()
plt.show()
return T, distances, i
class ICPRefiner:
def __init__(self, model, im_size):
self.renderer = opengl_utils.DepthRender(model, im_size)
self.im_size = im_size
def refine(self, depth_crop, R_est, t_est, K_test, depth_only=False, no_depth=False, max_mean_dist_factor=2.0):
depth = self.renderer.render(self.im_size, 100, 10000, K_test, R_est, t_est)
synthetic_pts = rgbd_to_point_cloud(K_test, depth)
centroid_synthetic_pts = np.mean(synthetic_pts, axis=0)
try:
max_mean_dist = np.max(np.linalg.norm(synthetic_pts - centroid_synthetic_pts, axis=1))
except:
return (R_est, t_est)
real_depth_pts = rgbd_to_point_cloud(K_test, depth_crop)
real_synmean_dist = np.linalg.norm(real_depth_pts - centroid_synthetic_pts, axis=1)
real_depth_pts = real_depth_pts[real_synmean_dist < max_mean_dist_factor * max_mean_dist]
if len(real_depth_pts) < len(synthetic_pts) / 20.:
print('not enough visible points')
R_refined = R_est
t_refined = t_est
else:
N = 3000
sub_idcs_real = np.random.choice(len(real_depth_pts), np.min([len(real_depth_pts), len(synthetic_pts), N]))
sub_idcs_syn = np.random.choice(len(synthetic_pts), np.min([len(real_depth_pts), len(synthetic_pts), N]))
T, distances, iterations = icp(synthetic_pts[sub_idcs_syn], real_depth_pts[sub_idcs_real], tolerance=0.0000005, depth_only=depth_only, no_depth=no_depth)
if no_depth == True:
angle, _, _ = transform.rotation_from_matrix(T)
angle_change_limit = 20 * np.pi / 180.
if np.abs(angle) > angle_change_limit:
T = np.eye(4)
H_est = np.zeros((4, 4))
# R_est, t_est is from model to camera
H_est[3, 3] = 1
H_est[:3, 3] = t_est
H_est[:3, :3] = R_est
H_est_refined = np.dot(T, H_est)
R_refined = H_est_refined[:3, :3]
t_refined = H_est_refined[:3, 3]
return R_refined, t_refined
``` |
{
"source": "jkznst/maskrcnn-benchmark",
"score": 2
} |
#### File: roi_heads/bb8keypoint_offset_head/roi_bb8keypoint_offset_predictors.py
```python
from torch import nn
class FastRCNNPredictor(nn.Module):
def __init__(self, config, pretrained=None):
super(FastRCNNPredictor, self).__init__()
stage_index = 4
stage2_relative_factor = 2 ** (stage_index - 1)
res2_out_channels = config.MODEL.RESNETS.RES2_OUT_CHANNELS
num_inputs = res2_out_channels * stage2_relative_factor
num_bb8keypoints = config.MODEL.ROI_BB8KEYPOINT_OFFSET_HEAD.NUM_CLASSES
self.avgpool = nn.AvgPool2d(kernel_size=7, stride=7)
self.bb8keypoint_offset_pred = nn.Linear(num_inputs, num_bb8keypoints * 2)
nn.init.normal_(self.bb8keypoint_offset_pred.weight, mean=0, std=0.001)
nn.init.constant_(self.bb8keypoint_offset_pred.bias, 0)
def forward(self, x):
x = self.avgpool(x)
x = x.view(x.size(0), -1)
bb8keypoint_offset_pred = self.bb8keypoint_offset_pred(x)
return bb8keypoint_offset_pred
class FPNPredictor(nn.Module):
def __init__(self, cfg):
super(FPNPredictor, self).__init__()
num_classes = cfg.MODEL.ROI_BB8KEYPOINT_OFFSET_HEAD.NUM_CLASSES
representation_size = cfg.MODEL.ROI_BOX_HEAD.MLP_HEAD_DIM
self.bb8keypoint_offset_pred = nn.Linear(representation_size, num_classes * 2)
nn.init.normal_(self.bb8keypoint_offset_pred.weight, std=0.001)
for l in [self.bb8keypoint_offset_pred]:
nn.init.constant_(l.bias, 0)
def forward(self, x):
bb8keypoint_deltas = self.bb8keypoint_offset_pred(x)
return bb8keypoint_deltas
_ROI_BB8KEYPOINT_OFFSET_PREDICTOR = {
"FastRCNNPredictor": FastRCNNPredictor,
"FPNPredictor": FPNPredictor,
}
def make_roi_bb8keypoint_offset_predictor(cfg):
func = _ROI_BB8KEYPOINT_OFFSET_PREDICTOR[cfg.MODEL.ROI_BB8KEYPOINT_OFFSET_HEAD.PREDICTOR]
return func(cfg)
``` |
{
"source": "jkznst/pvnet",
"score": 2
} |
#### File: lib/utils/draw_utils.py
```python
import torch
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from transforms3d.euler import euler2mat
from skimage.io import imsave
def visualize_bounding_box(rgb, corners_pred, corners_targets=None, centers_pred=None, centers_targets=None, save=False, save_fn=None):
'''
:param rgb: torch tensor with size [b,3,h,w] or numpy array with size [b,h,w,3]
:param corners_pred: [b,1,8,2]
:param corners_targets: [b,1,8,2] or None
:param centers_pred: [b,1,2] or None
:param centers_targets: [b,1,2] or None
:param save:
:param save_fn:
:return:
'''
if isinstance(rgb, torch.Tensor):
rgb = rgb.permute(0, 2, 3, 1).detach().cpu().numpy()
rgb = rgb.astype(np.uint8)
batch_size = corners_pred.shape[0]
for idx in range(batch_size):
_, ax = plt.subplots(1)
ax.imshow(rgb[idx])
ax.add_patch(
patches.Polygon(xy=corners_pred[idx, 0][[0, 1, 3, 2, 0, 4, 6, 2]], fill=False, linewidth=1, edgecolor='b'))
ax.add_patch(
patches.Polygon(xy=corners_pred[idx, 0][[5, 4, 6, 7, 5, 1, 3, 7]], fill=False, linewidth=1, edgecolor='b'))
if corners_targets is not None:
ax.add_patch(patches.Polygon(xy=corners_targets[idx, 0][[0, 1, 3, 2, 0, 4, 6, 2]], fill=False, linewidth=1,
edgecolor='g'))
ax.add_patch(patches.Polygon(xy=corners_targets[idx, 0][[5, 4, 6, 7, 5, 1, 3, 7]], fill=False, linewidth=1,
edgecolor='g'))
if centers_pred is not None:
ax.plot(centers_pred[idx, 0, 0],centers_pred[idx, 0, 1],'*')
if centers_targets is not None:
ax.plot(centers_targets[idx, 0, 0], centers_pred[idx, 0, 1], '*')
if not save:
plt.show()
else:
plt.savefig(save_fn.format(idx))
plt.close()
def visualize_mask(mask_pred,mask_gt, save=False, save_fn=None):
'''
:param mask_pred: [b,h,w]
:param mask_gt: [b,h,w]
:return:
'''
b,h,w=mask_gt.shape
for bi in range(b):
img=np.zeros([h,w,3],np.uint8)
img[np.logical_and(mask_gt[bi],mask_pred[bi])]=np.asarray([0,255,0])
img[np.logical_and(np.logical_not(mask_gt[bi]),mask_pred[bi])]=np.asarray([[[255,255,0]]])
img[np.logical_and(np.logical_not(mask_pred[bi]),mask_gt[bi])]=np.asarray([[[255,0,0]]])
plt.imshow(img)
if not save:
plt.show()
else:
plt.savefig(save_fn.format(bi))
plt.close()
def visualize_overlap_mask(img,mask,save_fn):
b,h,w=mask.shape
for bi in range(b):
img[bi][mask[bi]>0]//=2
img[bi][mask[bi]>0]+=np.asarray([0,128,0],np.uint8)
imsave(save_fn.format(bi),img[bi])
def visualize_points_3d(pts1,pts2,K,h=480,w=640):
'''
:param pts1: [pn,3] prediction
:param pts2: [pn,3] target
:param K: [3,3]
:return:
'''
def get_pts_img(pts,R,T):
img_pts,_=pts_to_img_pts(pts,np.identity(3),np.zeros(3),K)
pts_img=img_pts_to_pts_img(img_pts,h,w)
trans_pts=np.matmul(pts-T,R.transpose())+T
trans_img_pts,_=pts_to_img_pts(trans_pts,np.identity(3),np.zeros(3),K)
trans_pts_img=img_pts_to_pts_img(trans_img_pts,h,w)
return pts_img,trans_pts_img
def get_img(pts_img1,pts_img2):
img=np.zeros([h,w,3],np.uint8)
img[np.logical_and(pts_img1>0,pts_img2>0)]=np.asarray([0,255,0],np.uint8)
img[np.logical_and(pts_img1>0,pts_img2==0)]=np.asarray([255,255,0],np.uint8)
img[np.logical_and(pts_img1==0,pts_img2>0)]=np.asarray([255,0,0],np.uint8)
return img
T=np.mean(np.concatenate([pts1,pts2],0),0)[None,:]
R=euler2mat(np.pi/2,0,0,'syzx')
pts_img1, trans_pts_img1=get_pts_img(pts1,R,T)
pts_img2, trans_pts_img2=get_pts_img(pts2,R,T)
overlap1=get_img(pts_img1,pts_img2)
overlap2=get_img(trans_pts_img1,trans_pts_img2)
return overlap1,overlap2
def visualize_mask_multi_class(mask_pred, mask_gt, colors, save=False, save_fn=None):
'''
:param mask_pred: [b,h,w]
:param mask_gt: [b,h,w]
:param colors: [cn,3]
:return:
'''
b,h,w=mask_gt.shape
cn,_=colors.shape
for bi in range(b):
img_pred=np.zeros([h,w,3],np.uint8)
for ci in range(cn):
img_pred[mask_pred[bi]==ci]=colors[ci]
img_gt=np.zeros([h,w,3],np.uint8)
for ci in range(cn):
img_gt[mask_gt[bi]==ci]=colors[ci]
plt.subplots(121)
plt.imshow(img_pred)
plt.subplots(122)
plt.imshow(img_gt)
if not save:
plt.show()
else:
plt.savefig(save_fn.format(bi))
plt.close()
def visualize_hypothesis(rgb, hyp_pts, hyp_counts, pts_target, save=False, save_fn=None):
'''
:param rgb: b,h,w
:param hyp_pts: b,hn,vn,2
:param hyp_counts: b,hn,vn
:param pts_target: b,vn,2
:param save:
:param save_fn:
:return:
'''
b,hn,vn,_=hyp_pts.shape
_,h,w,_=rgb.shape
for bi in range(b):
for vi in range(vn):
cur_hyp_counts=hyp_counts[bi,:,vi] # [hn]
cur_hyp_pts=hyp_pts[bi,:,vi] # [hn,2]
# mask=np.logical_and(np.logical_and(cur_hyp_pts[:,0]>-w*0.5,cur_hyp_pts[:,0]<w*1.5),
# np.logical_and(cur_hyp_pts[:,1]>-h*0.5,cur_hyp_pts[:,1]<h*1.5))
mask=np.logical_and(np.logical_and(cur_hyp_pts[:,0]>0,cur_hyp_pts[:,0]<w*1.0),
np.logical_and(cur_hyp_pts[:,1]>0,cur_hyp_pts[:,1]<h*1.0))
cur_hyp_pts[np.logical_not(mask)]=0.0
cur_hyp_counts[np.logical_not(mask)]=0
cur_hyp_counts=cur_hyp_counts.astype(np.float32)
colors=(cur_hyp_counts/cur_hyp_counts.max())#[:,None]#*np.array([[255,0,0]])
plt.figure(figsize=(10,8))
plt.imshow(rgb[bi])
plt.scatter(cur_hyp_pts[:,0],cur_hyp_pts[:,1],c=colors,s=0.1,cmap='viridis')
# plt.plot(pts_target[bi,vi,0],pts_target[bi,vi,1],'*',c='r')
if save:
plt.savefig(save_fn.format(bi,vi))
else:
plt.show()
plt.close()
def visualize_voting_ellipse(rgb,mean,var,target,save=False, save_fn=None):
'''
:param rgb: b,h,w,3
:param mean: b,vn,2
:param var: b,vn,2,2
:param save:
:param save_fn:
:return:
'''
b,vn,_=mean.shape
yellow=np.array([1.0,0.0,0.0])
red=np.asarray([1.0,1.0,0.0])
num=5
for bi in range(b):
for vi in range(vn):
_, ax = plt.subplots(1, figsize=(10, 8))
cov=var[bi,vi]
w,v=np.linalg.eig(cov)
for k in range(num):
size=w*k*3
elp = patches.Ellipse(mean[bi, vi], size[0], size[1], np.arctan2(v[1, 0], v[0, 0]) / np.pi * 180, fill=False, color=yellow/num*(num-k)+red/num*k)
ax.add_patch(elp)
ax.scatter(mean[bi,vi,0],mean[bi,vi,1], marker='*', c=[yellow], s=8)
ax.imshow(rgb[bi])
if save:
plt.savefig(save_fn.format(bi))
else:
plt.show()
plt.close()
def visualize_vanishing_points(rgb, van_cens, save=False, save_fn=None):
b,h,w,_=rgb.shape
cen=van_cens[:,3,:] # [b,3]
van=van_cens[:,:3,:] # [b,3,3]
cen/=cen[:,2:]
for bi in range(b):
dir_2d=[]
for di in range(3):
dir=(van[bi,di,:]-cen[bi]*van[bi,di,2])[:2]
dir/=np.linalg.norm(dir)
dir_2d.append(dir)
dir_2d=np.asarray(dir_2d)*20 # [4,2]
_, ax = plt.subplots(1)
ax.imshow(rgb[bi])
ax.add_patch(patches.Arrow(x=cen[bi,0],y=cen[bi,1],dx=dir_2d[0,0],dy=dir_2d[0,1],linewidth=2,edgecolor='r'))
ax.add_patch(patches.Arrow(x=cen[bi,0],y=cen[bi,1],dx=dir_2d[1,0],dy=dir_2d[1,1],linewidth=2,edgecolor='g'))
ax.add_patch(patches.Arrow(x=cen[bi,0],y=cen[bi,1],dx=dir_2d[2,0],dy=dir_2d[2,1],linewidth=2,edgecolor='b'))
if save:
plt.savefig(save_fn.format(bi))
else:
plt.show()
plt.close()
def visualize_points(rgb, pts_target, pts_pred=None, save=False, save_fn=None):
'''
:param rgb: torch tensor with size [b,3,h,w] or numpy array with size [b,h,w,3]
:param pts_target: [b,pn,2]
:param pts_pred: [b,pn,2]
:param save:
:param save_fn:
:return:
'''
if isinstance(rgb, torch.Tensor):
rgb = rgb.permute(0, 2, 3, 1).detach().cpu().numpy()
rgb = rgb.astype(np.uint8)
batch_size = pts_target.shape[0]
for idx in range(batch_size):
_, ax = plt.subplots(1)
ax.imshow(rgb[idx])
ax.plot(pts_target[idx,:,0],pts_target[idx,:,1],'*')
if pts_pred is not None:
ax.plot(pts_pred[idx,:,0],pts_pred[idx,:,1],'*')
if not save:
plt.show()
else:
plt.savefig(save_fn.format(idx))
plt.close()
def visualize_keypoints(rgb, pts_target, pts_pred=None, save=False, save_fn=None):
rgb=rgb.astype(np.uint8)
batch_size=rgb.shape[0]
for bi in range(batch_size):
_, ax = plt.subplots(1)
ax.imshow(rgb[bi])
ax.scatter(pts_target[bi,:,0],pts_target[bi,:,1],c=np.arange(pts_target.shape[1]))
if pts_pred is not None:
ax.scatter(pts_pred[bi,:,0],pts_pred[bi,:,1],c=np.arange(pts_pred.shape[1]))
if not save:
plt.show()
else:
plt.savefig(save_fn.format(bi))
plt.close()
def imagenet_to_uint8(rgb,torch_format=True):
'''
:param rgb: [b,3,h,w]
:return:
'''
if torch_format:
if len(rgb.shape)==4:
rgb = rgb.transpose(0, 2, 3, 1)
else:
rgb = rgb.transpose(1, 2, 0)
rgb *= np.asarray([0.229, 0.224, 0.225])[None, None, :]
rgb += np.asarray([0.485, 0.456, 0.406])[None, None, :]
rgb *= 255
rgb = rgb.astype(np.uint8)
return rgb
def write_points(filename, pts, colors=None):
has_color=pts.shape[1]>=6
with open(filename, 'w') as f:
for i,pt in enumerate(pts):
if colors is None:
if has_color:
f.write('{} {} {} {} {} {}\n'.format(pt[0],pt[1],pt[2],int(pt[3]),int(pt[4]),int(pt[5])))
else:
f.write('{} {} {}\n'.format(pt[0],pt[1],pt[2]))
else:
if colors.shape[0]==pts.shape[0]:
f.write('{} {} {} {} {} {}\n'.format(pt[0],pt[1],pt[2],int(colors[i,0]),int(colors[i,1]),int(colors[i,2])))
else:
f.write('{} {} {} {} {} {}\n'.format(pt[0],pt[1],pt[2],int(colors[0]),int(colors[1]),int(colors[2])))
def img_pts_to_pts_img(im_pts, img_row, img_col):
pts_img=np.zeros([img_row,img_col],dtype=np.float32)
for pt in im_pts:
x,y = round(pt[0]), round(pt[1])
x=int(img_col-1 if x>=img_col else x)
y=int(img_row-1 if y>=img_row else y)
x=0 if x<0 else x
y=0 if y<0 else y
pts_img[y,x]=1.0
return pts_img
def img_pts_to_pts_img_colors(img, im_pts, img_rgbs):
pts_img=img.copy()
img_row,img_col,_=img.shape
for pt,rgb in zip(im_pts,img_rgbs):
x,y = round(pt[0]), round(pt[1])
x=int(img_col-1 if x>=img_col else x)
y=int(img_row-1 if y>=img_row else y)
x=0 if x<0 else x
y=0 if y<0 else y
pts_img[y,x]=rgb
return pts_img
def pts_to_img_pts(pts,R,T,K):
img_pts=np.matmul(np.matmul(pts,R.transpose())+T[None,:],K.transpose())
img_dpt=img_pts[:,2]
img_pts=img_pts[:,:2]/img_pts[:,2:]
return img_pts,img_dpt
``` |
{
"source": "jl00/PyPPM",
"score": 2
} |
#### File: PyPPM/ppmpy/rprofile.py
```python
from __future__ import division
from __future__ import print_function
from builtins import str
from builtins import range
from past.utils import old_div
from builtins import object
import struct
import logging
import os
import re
import sys
try:
import numpy as np
except ImportError:
print("numpy is required for reading rprofiles")
logging.basicConfig(level=logging.DEBUG)
log = logging.getLogger('rp_info')
log.setLevel(logging.DEBUG)
center_types = [(k, 'f8') for k in ('phi', 'theta', 'x', 'y', 'z')]
normal_types = [(k, 'f8') for k in ('x', 'y', 'z')]
class rprofile_set(object):
def __init__(self, source, **kwargs):
"""Load a set of RProfiles. Can pass a `path` to a set of files, or a list of `files`. Passing
the `lazy=True` makes everything operate only from disk, without loading things into memory
(for large datasets).
`source` is the path to a directory containing profiles or a list of RProfile files to open
`stride` the iteration increment. Every `stride` element is looked at during iteration. Default is 1.
`first_dump` the first dump to iterate from or None
`last_dump` the last dump to iterate through (inclusive)
There is no `.get` method... you must iterate through the files like this:
.. code-block:: python
:linenos:
rp_set = lcse.rprofile_set(path=targetpath, lazy=True, logging=True)
for rp in rp_set:
rp.get("fv_hi")
rp.get_table("fv")
"""
self.path = source
self.files = source if isinstance(source, list) else []
self.lazy = kwargs.get('lazy', True)
self.stride = kwargs.get('stride', 1)
self.first_dump = kwargs.get('first_dump')
self.last_dump = kwargs.get('last_dump')
self._logging = kwargs.get('logging')
self.log = log if self._logging else None
self.ray_profiles = {}
self._current_ix = 0
self._current = None
if self.path:
self.files = self.get_file_list_for_path(self.path) if os.path.isdir(self.path) else [self.path]
dump_re = re.compile('(.*)-([\d]{4})\.bobaaa')
self.dump_map = dict((int(dump_re.match(f).groups()[1]), f) for f in self.files if dump_re.match(f))
self.file_map = dict((f, int(dump_re.match(f).groups()[1])) for f in self.files if dump_re.match(f))
self.dumps = list(self.dump_map.keys())
self.dumps.sort()
def __iter__(self):
self._current_ix = self.dumps.index(self.first_dump) if self.first_dump else 0
return self
# Python 3 bullshit
def __next__(self):
return next(self)
def __next__(self):
if self._current_ix < len(self.dumps):
dump = self.dumps[self._current_ix]
if self.last_dump and dump > self.last_dump:
raise StopIteration()
rp = self.ray_profiles.get(dump, rprofile(self.dump_map[dump], lazy=self.lazy, logging=self._logging))
if not self.lazy and (dump not in self.ray_profiles):
self.ray_profiles[dump] = rp
self._current = rp
self._current_ix += self.stride
return rp
else:
raise StopIteration()
# def reset(self):
#
# if first_dump:
# self.start = self.dumps.index(first_dump)
#
# self._current_ix = self.start
# self._current = None
def get_dump(self, dump=None):
""" Get a new `rprofile` instance for `dump`. These are NOT cached internally."""
if self.dumps and dump is None:
dump = self.dumps[-1]
elif dump not in self.dump_map:
return None
return self.ray_profiles.get(dump, rprofile(self.dump_map[dump], lazy=self.lazy, logging=self._logging))
def get_file_list_for_path(self, path):
""" Return a list of RProfiles at the given path"""
filenames = [os.path.join(path, f) for f in os.listdir(path) if f.startswith('RProfile') and f.endswith('.bobaaa')]
filenames.sort()
return filenames
# def load_files(self, filenames):
# """ Loads `filenames` """
#
# # This should add to the existing
#
# self.files = filenames
# self.files.sort()
# self.ray_profiles = [rprofile(f, lazy=self.lazy, logging=self._logging) for f in self.files]
def check_for_new(self, path=None):
"""Check path for new files"""
current_files = self.get_file_list_for_path(self.path or path)
new_files = [f for f in current_files if f not in self.files]
self.files.extend(new_files)
self.files.sort()
return len(new_files) > 0
class rprofile(object):
"""
`rprofile.header_attrs` is a dictionary of header attributes
"""
header_var_list = [
dict(name='version', pos=0, type='i'),
dict(name='cell_ct_low', pos=1, type='i'),
dict(name='nbuckets', pos=2, type='i'),
dict(name='dump', pos=3, type='i'),
dict(name='sizeof_float', pos=4, type='i'),
dict(name='has_centers', pos=5, type='i'),
dict(name='has_corners', pos=6, type='i'),
dict(name='has_normals', pos=7, type='i'),
dict(name='isrestart', pos=8, type='i', min_ver=12),
dict(name='var_ct_low', pos=9, type='i'),
dict(name='var_ct_high', pos=10, type='i'),
dict(name='cell_ct_high', pos=11, type='i'),
dict(name='ncpucores', pos=12, type='i'),
dict(name='ntxbricks', pos=13, type='i'),
dict(name='ntybricks', pos=14, type='i'),
dict(name='ntzbricks', pos=15, type='i'),
dict(name='nxteams', pos=16, type='i'),
dict(name='nyteams', pos=17, type='i'),
dict(name='nzteams', pos=18, type='i'),
dict(name='nx', pos=19, type='i'),
dict(name='ny', pos=20, type='i'),
dict(name='nz', pos=21, type='i'),
dict(name='nsugar', pos=22, type='i'),
dict(name='nbdy', pos=23, type='i'),
dict(name='nfluids', pos=24, type='i'),
dict(name='nvars', pos=25, type='i'),
dict(name='nhalfwaves', pos=26, type='i'),
dict(name='maxrad', pos=27, type='i'),
dict(name='nteamsinbunch', pos=28, type='i'),
dict(name='ndumps', pos=29, type='i'),
dict(name='ndumpstodo', pos=30, type='i'),
dict(name='nrminrad', pos=31, type='i'),
dict(name='nrmaxrad', pos=32, type='i'),
dict(name='iburn', pos=33, type='i', min_ver=12),
dict(name='imuffledbdry', pos=34, type='i', min_ver=12),
dict(name='ireflectbdry', pos=35, type='i', min_ver=12),
# fheader (the offsets are in the fheader)
dict(name='radin0', pos=0, type='f', help='Gravity completely off inside this radius'),
dict(name='radinner', pos=1, type='f', help='Gravity starts turning off inside this radius'),
dict(name='radbase', pos=2, type='f', help='Bot convect zone'),
dict(name='radtop', pos=3, type='f', help='Top convect zone'),
dict(name='radouter', pos=4, type='f', help='Grav starts turning off outside this radius'),
dict(name='radout0', pos=5, type='f', help='Gravity completely off outside this radius'),
dict(name='radmax', pos=6, type='f', help='distance from center of grid to nearest edge'),
dict(name='dlayerbot', pos=7, type='f', help='thickness of flame zone'),
dict(name='dlayertop', pos=8, type='f', help='thickness of transition @ top of convect zone'),
dict(name='totallum', pos=9, type='f'),
dict(name='grav00base', pos=10, type='f'),
dict(name='rho00base', pos=11, type='f'),
dict(name='prs00base', pos=12, type='f'),
dict(name='gammaconv', pos=13, type='f'),
dict(name='gammabelow', pos=14, type='f'),
dict(name='gammaabove', pos=15, type='f'),
dict(name='gravconst', pos=16, type='f'),
dict(name='rhoconv', pos=17, type='f'),
dict(name='rhoabove', pos=18, type='f'),
dict(name='airmu', pos=19, type='f', min_ver=13),
dict(name='cldmu', pos=20, type='f', min_ver=13),
dict(name='fkair', pos=21, type='f', min_ver=13),
dict(name='fkcld', pos=22, type='f', min_ver=13),
dict(name='atomicnoair', pos=23, type='f', min_ver=13),
dict(name='atomicnocld', pos=24, type='f', min_ver=13),
# Global T-history
dict(name='time', pos=31+0, type='f'),
dict(name='timerescaled', pos=31+1, type='f'),
dict(name='bubbleheight', pos=31+2, type='f'),
dict(name='spikeheight', pos=31+3, type='f'),
dict(name='cycl', pos=31+4, type='f', min_ver=12),
dict(name='dt', pos=31+5, type='f'),
dict(name='courmx', pos=31+6, type='f'),
dict(name='urbubmx', pos=31+7, type='f'),
dict(name='urspkmn', pos=31+8, type='f'),
dict(name='ekmx', pos=31+9, type='f'),
dict(name='ekrmx', pos=31+10, type='f'),
dict(name='ektmx', pos=31+11, type='f'),
dict(name='ekurmn', pos=31+12, type='f'),
dict(name='ekurmx', pos=31+13, type='f'),
dict(name='eiurmn', pos=31+14, type='f'),
dict(name='eiurmx', pos=31+15, type='f'),
dict(name='Hurmn', pos=31+16, type='f'),
dict(name='Hurmx', pos=31+17, type='f'),
dict(name='ekurspkmn', pos=31+18, type='f'),
dict(name='ekurbubmx', pos=31+19, type='f'),
dict(name='eiurspkmn', pos=31+20, type='f'),
dict(name='eiurbubmx', pos=31+21, type='f'),
dict(name='Hurspkmn', pos=31+22, type='f'),
dict(name='Hurbubmx', pos=31+23, type='f'),
dict(name='ekbubmx', pos=31+24, type='f'),
dict(name='ekrbubmx', pos=31+25, type='f'),
dict(name='ektbubmx', pos=31+26, type='f'),
dict(name='ekspkmx', pos=31+27, type='f'),
dict(name='ekrspkmx', pos=31+28, type='f'),
dict(name='ektspkmx', pos=31+29, type='f'),
# Args images
dict(name='ai_vort', pos=64+0, type='f', len=2),
dict(name='ai_divu', pos=64+2, type='f', len=2),
dict(name='ai_s', pos=64+4, type='f', len=2),
dict(name='ai_fv', pos=64+6, type='f', len=2),
dict(name='ai_rho', pos=64+8, type='f', len=2),
dict(name='ai_p', pos=64+10, type='f', len=2),
dict(name='ai_ux', pos=64+12, type='f', len=2),
dict(name='ai_uy', pos=64+14, type='f', len=2),
dict(name='ai_uz', pos=64+16, type='f', len=2),
]
def __init__(self, filename, lazy=True, **kwargs):
"""Create a ray profile reader object.
`lazy` means only the header is loaded on open
"""
logging = kwargs.get('logging')
self._filename = filename
self.lazy = lazy
self.version = None
self.bucket_count = 0
self._centers = None
self._corners = None
self._normals = None
self._cache = {}
self._variable_map = {}
self._names = []
self._data = []
self.header_attrs = {}
if logging:
import logging
logging.basicConfig(level=logging.DEBUG)
self.log = logging.getLogger('rp_info')
self.log.setLevel(logging.DEBUG)
else:
self.log = None
if str(filename).isdigit():
filename = 'RProfile-01-%04i.bobaaa' % int(filename)
if self.log: self.log.info("Opening %s" % filename)
f = open(filename, 'rb')
header = f.read(128)
'''
if header[:8] != 'LCSE:RPS':
if self.log: self.log.warn('File %s is not a new Ray Profile, try an older rp_info.py' % filename)
f.close()
raise Exception('Unsupported file version')
'''
self.version = struct.unpack("i", header[8:12])[0]
f.seek(0)
if self.version < 8:
raise Exception('Unsupported version %i' % self.version)
elif self.version == 8:
self._header_size = 128
hlen = 8
self.header_var_list = self.header_var_list[:8]
# header = struct.unpack(hlen * "i", header[8:8+4*hlen])
# self.header_attrs['version'] = header[0]
# self.header_attrs['cell_ct_low'] = header[1]
# self.header_attrs['nbuckets'] = header[2]
# self.header_attrs['dump'] = header[3]
# self.header_attrs['sizeof_float'] = header[4]
# self.header_attrs['has_centers'] = header[5]
# self.header_attrs['has_corners'] = header[6]
# self.header_attrs['has_normals'] = header[7]
elif self.version > 8:
self._header_size = 1024
hlen = 127
header = f.read(self._header_size)
# Bug fixes
# Using the header info from v9
# if self.version < 11:
# self._init_v9()
# self._init_legacy()
# raw_header = struct.unpack(hlen * "i", header[8:8+4*hlen])
# raw_fheader = struct.unpack(hlen * "f", header[8+4*hlen:8+8*hlen])
# self.header_attrs.update([(k, raw_header[i]) for i, k in enumerate(self._header_names)])
# self.header_attrs.update([(k, raw_fheader[i]) for i, k in enumerate(self._fheader_names)])
# self.header_attrs.update([(k, raw_fheader[32 + i]) for i, k in enumerate(self._fheader_names2)])
# self.header_attrs.update([(k, (raw_fheader[64 + 2*i], raw_fheader[64 + 2*i + 1] )) for i, k in enumerate(self._argsimg_names)])
#elif self.version <= 12:
hmap = dict(i=struct.unpack(hlen * "i", header[8 : 8 + 4 * hlen]),
f=struct.unpack(hlen * "f", header[8 + 4 * hlen : 8 * (1 + hlen)]))
for var in self.header_var_list:
name = var['name']
pos = var['pos']
var_type = var['type']
var_len = var.get('len', 1)
min_ver = var.get('min_ver', 0)
if self.version < min_ver:
continue
# A slight offset problem
if self.version == 11 and var_type == 'f' and pos > 30: # and pos < 64:
pos = pos + 1
attr = hmap[var_type][pos] if var_len == 1 else hmap[var_type][pos : pos + var_len]
self.header_attrs[name] = attr
# Fix header problems
if self.version == 8:
self.header_attrs['cell_ct_high'] = 2 * self.header_attrs['cell_ct_low']
self.header_attrs['var_ct_high'] = 1
self.header_attrs['var_ct_low'] = 14
if self.version == 9:
self.header_attrs['cell_ct_low'] -= 2
self.header_attrs['cell_ct_high'] -= 4
if self.version < 12:
self.header_attrs['isreflectbdry'] = 1
self.header_attrs['ismuffledbdry'] = 0
if self.version > 13:
self.header_attrs['has_corners'] = False
self.bucket_count = self.header_attrs['nbuckets']
self.dump = self.header_attrs['dump']
self.buckets = self.header_attrs['nbuckets']
if self.version > 10:
self._init_v11()
if not self.lazy:
f = open(self._filename, 'r')
self._data = f.read()
f.close()
else:
self._init_legacy()
if self.version == 8:
self._init_v8()
else:
self._init_v9()
for k in ['has_centers', 'has_corners', 'has_normals']:
self.header_attrs[k] = self.header_attrs.get(k, 0) == 1
float_type = 'f8' if self.header_attrs.get('sizeof_float') == 8 else 'f4'
self._dtypes_hi = [('j_hi', 'i4')]
self._dtypes_hi.extend([(n, float_type) for n in self._names_hi])
self._col_names_hi = ['j_hi'] + self._names_hi
self._dtypes = [('j', 'i4')]
self._dtypes.extend([(n, float_type) for n in self._names])
self._col_names = ['j'] + self._names
if self.lazy:
log.warn("Lazy Loading not supported for v %i" % self.version)
self._load(f)
f.close()
def _load(self, f):
nbuckets = self.header_attrs.get('nbuckets')
cell_ct_low = self.header_attrs.get('cell_ct_low')
cell_ct_high = self.header_attrs.get('cell_ct_high')
# Read the high resolution table
self._data_hi = np.fromfile(f, dtype=self._dtypes_hi, count=cell_ct_high*(nbuckets+1))
# Read the low resolution table
self._data_low = np.fromfile(f, dtype=self._dtypes, count=cell_ct_low*(nbuckets+1))
if self.header_attrs.get('has_centers'):
vals = 3 if self.version > 12 else 5
self._centers = np.fromfile(f, dtype=np.float64, count=5 * nbuckets).reshape((vals, -1), order='F')
if self.header_attrs.get('has_normals'):
self._normals = np.fromfile(f, dtype=np.float64, count=9*nbuckets).reshape((3, 3, -1), order='F')
if self.header_attrs.get('has_corners'):
self._corners = np.fromfile(f, dtype=np.float64, count=9*nbuckets).reshape((3, 3, -1), order='F')
def get_centers(self):
""" Get centers of the buckets as an array of x, y, z """
if self._centers is None and self.version >= 11:
self._centers = self._get_array('centers')
if self.version < 13:
centers = self._centers[2:]
self._centers = old_div(-centers, np.sqrt((centers * centers).sum(0)))
return self._centers
def get_corners(self):
""" Get corners of the buckets as an array of (xyz,side #, bucket #) """
if self._corners is None and self.version >= 11:
normals = self.get_normals()
self._corners = np.zeros((3, 3, normals.shape[2]))
self._corners[:,0,:] = np.cross(normals[:,0,:], normals[:,1,:], axis=0)
self._corners[:,1,:] = np.cross(normals[:,1,:], normals[:,2,:], axis=0)
self._corners[:,2,:] = np.cross(normals[:,2,:], normals[:,0,:], axis=0)
self._corners[:,:,:] /= np.sqrt((self._corners * self._corners).sum(0))
return self._corners
def get_normals(self):
""" Get normals of the buckets as an array of (x/y/z coordinate, side #, bucket #).
"""
if self._normals is None and self.version >= 11:
self._normals = self._get_array('normals')
normals_len = np.sqrt((self._normals * self._normals).sum(0))
normals_ix = normals_len > 0.0
self._normals[:, normals_ix] = old_div(self._normals[:, normals_ix], normals_len[normals_ix])
return self._normals
def get_cell_volumes(self):
"""
Get an array of dimension (`bucket_ct`, `cell_ct_low`) containing the volume of each
totopo shaped cell.
"""
volumes = []
ys = self.get('y')
normals = self.get_normals()
a, b, c = normals[:,0,:], normals[:,1,:], normals[:,2,:]
ang_a = np.pi - np.arccos((a * b).sum(0))
ang_b = np.pi - np.arccos((b * c).sum(0))
ang_c = np.pi - np.arccos((c * a).sum(0))
angles = np.vstack([ang_a, ang_b, ang_c])
dr = (ys[1] - ys[0])
r_sq = dr * (ys**2)
bucket_angles = angles.sum(0) - np.pi
for i in range(0, self.bucket_count):
vols = r_sq * bucket_angles[i]
volumes.append(vols.reshape((-1,1)))
return np.hstack(volumes)
def get_table(self, var):
"""Get a table of dimension (4, ncells, nbuckets+1) containg all buckets
(including the global average bucket zero). The first dimension contains the
statastical information: average, min, max, sd.
"""
if var not in self._variable_names:
print('Variable %s not found in table. Available variables are %s' % (var, self._variable_names))
return
if self.version >= 11:
return self._get_array(var)
else:
return self._get_legacy(var)
def get(self, var):
"""Get the global bucket for variable `var` or get header attribute `var`.
Use `get_table(self, var)` to get the same variable but for all buckets.
If the global bucket is returned an array of dimension (4, ncells) is returned.
The first dimension contains avg, min, max, sd.
"""
if var in self.header_attrs:
return self.header_attrs.get(var)
if self.version >= 11:
return self._get_array(var, global_only=True)
else:
return self._get_legacy(var, global_only=True)
def _get_array(self, var, global_only=False):
if var not in self._variable_map:
return None
if var in self._cache:
return self._cache[var]
offset, dtype, count, shape = self._variable_map[var]
# print self._variable_map[var], global_only
if global_only and len(shape) == 3 and shape[2] == self.bucket_count + 1:
count = shape[0] * shape[1]
shape = shape[:2]
if self.lazy:
f = open(self._filename, 'r')
f.seek(offset)
data = np.fromfile(f, dtype=dtype, count=count).reshape(shape, order='F')
f.close()
else:
data = np.frombuffer(self._data[offset:], dtype=dtype,
count=count).reshape(shape, order='F')
if not global_only:
self._cache[var] = data
return data
def _get_legacy(self, var, global_only=False):
if var in self._col_names_hi:
data_array = self._data_hi
radial = self.header_attrs.get('cell_ct_high')
elif var in self._col_names:
radial = self.header_attrs.get('cell_ct_low')
data_array = self._data_low
else:
#print var, self._col_names_hi, self._col_names, self.header_attrs
raise Exception("Attribute '%s' not found, look in .get_variables() and .get_attributes()" % var)
if var in self._legacy_remap:
remap_vars = self._legacy_remap[var]
data_out = np.zeros((len(remap_vars), radial, self.bucket_count + 1), order='F')
var_array = []
for i, v in enumerate(remap_vars):
data_out[i,:,:] = data_array[:][v].reshape((radial, -1), order='F')
data = data_out
else:
data = data_array[:][var].reshape((radial, -1), order='F')
if var in ['y', 'j', 'j_hi', 'y_hi']:
data = data[:,0]
elif global_only:
data = data[:,:,0]
# The old format was backwards
if self.version < 9:
if len(data.shape) == 3:
data = data[:,::-1,:]
elif len(data.shape) == 2:
data = data[:,::-1]
else:
data = data[::-1]
return data
def get_attributes(self):
attrs = list(self.header_attrs.keys())
attrs.sort()
return attrs
def get_variables(self):
return self._variable_names
def _init_v8(self):
self._header_names = ['version', 'nradial_low', 'nbuckets', 'dump',
'sizeof_float', 'has_centers', 'has_corners', 'has_normals']
self._names_hi = ['y_hi', 'fv_hi', 'fvmn_hi', 'fvmx_hi', 'fvsd_hi']
self._names = ['counts', 'y',
'fv', 'fvmn','fvmx','fvsd',
'rho', 'rhomn', 'rhomx', 'rhosd',
'rhourbubble', 'rhourbubblemn', 'rhourbubblemx', 'rhourbubblesd',
'rhourspike', 'rhourspikemn', 'rhourspikemx', 'rhourspikesd',
'p', 'pmn', 'pmx', 'psd',
'ux','uxmn','uxmx','uxsd',
'uy', 'uymn', 'uymx', 'uysd',
'uz', 'uzmn', 'uzmx', 'uzsd',
'ekr', 'ekrmn','ekrmx','ekrsd',
'ekt','ektmn','ektmx','ektsd',
'ek', 'ekmn','ekmx','eksd',
'ekur','ekurmn','ekurmx','ekursd',
'eiur', 'eiurmn', 'eiurmx', 'eiursd',
'hur', 'hurmn', 'hurmx', 'hursd']
self._header_arrays = ['normals', 'centers', 'corners']
def _init_v9(self):
self._init_v8()
self._names.extend(['ceul', 'ceulmn', 'ceulmx', 'ceulsd',
'mach', 'machmn', 'machmx', 'machsd',
'enuc', 'enucmn', 'enucmx', 'enucsd',
'fnuc', 'fnucmn', 'fnucmx', 'fnucsd',
'dy', 'dymn', 'dymx', 'dysd'])
def _init_legacy(self):
''' Initialize internals for old versions (< 11)'''
buckets_total = 1 + self.bucket_count
var_ct_high = self.header_attrs.get('var_ct_high')
var_ct_low = self.header_attrs.get('var_ct_low')
cell_ct_high = self.header_attrs.get('cell_ct_high')
cell_ct_low = self.header_attrs.get('cell_ct_low')
sizeof_float = self.header_attrs.get('sizeof_float')
float_type = np.float64 if sizeof_float == 8 else np.float32
self._variable_list = [('centers', float_type, sizeof_float, (5, self.bucket_count)),
('normals', float_type, sizeof_float, (3, 3, self.bucket_count)),
('corners', float_type, sizeof_float, (3, 3, self.bucket_count))]
offset = self._header_size
# Integer Arrays (j, h_hi)
offset += 4 * buckets_total * (cell_ct_high + cell_ct_low)
# Float array, yav + counts + 4 * nvars. No high counts
offset += 8 * buckets_total * ((1 + 4 * var_ct_high) * cell_ct_high + (2 + 4 * var_ct_low) * cell_ct_low)
for name, dtype, sizeof, shape in self._variable_list:
count = np.prod(shape)
size = sizeof * count
self._variable_map[name] = (offset, dtype, count, shape)
offset += size
self._variable_names = list(self._variable_map.keys())
self._variable_names.sort()
# Variable meta
self._legacy_remap = dict(fv_hi=('fv_hi', 'fvmn_hi','fvmx_hi','fvsd_hi'),
fv=('fv', 'fvmn','fvmx','fvsd'),
rho=('rho', 'rhomn', 'rhomx', 'rhosd'),
rhourbubble=('rhourbubble', 'rhourbubblemn', 'rhourbubblemx', 'rhourbubblesd'),
rhourspike=('rhourspike', 'rhourspikemn', 'rhourspikemx', 'rhourspikesd'),
p=('p', 'pmn', 'pmx', 'psd'),
ux=('ux','uxmn','uxmx','uxsd'),
uy=('uy', 'uymn', 'uymx', 'uysd'),
uz=('uz', 'uzmn', 'uzmx', 'uzsd'),
ekr=('ekr', 'ekrmn','ekrmx','ekrsd'),
ekt=('ekt','ektmn','ektmx','ektsd'),
ek=('ek', 'ekmn','ekmx','eksd'),
ekur=('ekur','ekurmn','ekurmx','ekursd'),
eiur=('eiur', 'eiurmn', 'eiurmx', 'eiursd'),
hur=('hur', 'hurmn', 'hurmx', 'hursd'),
ceul=('ceul', 'ceulmn', 'ceulmx', 'ceulsd'),
mach=('mach', 'machmn', 'machmx', 'machsd'),
enuc=('enuc', 'enucmn', 'enucmx', 'enucsd'),
fnuc=('fnuc', 'fnucmn', 'fnucmx', 'fnucsd'),
dy=('dy', 'dymn', 'dymx', 'dysd'))
self._legacy_order = ['counts', 'y', 'fv', 'rho', 'rhourbubble', 'rhourspike',
'p', 'ux', 'uy', 'uz', 'ekr', 'ekt', 'ek', 'ekur', 'eiur', 'hur',
'ceul', 'mach', 'enuc', 'fnuc', 'dy',]
self._variable_names = list(self._variable_map.keys()) + self._legacy_order + ['fv_hi', 'y_hi']
self._variable_names.sort()
def _init_v11(self):
cell_ct_high = self.header_attrs.get('cell_ct_high')
cell_ct_low = self.header_attrs.get('cell_ct_low')
buckets_total = 1 + self.bucket_count
sizeof_float = self.header_attrs.get('sizeof_float')
float_type = np.float64 if sizeof_float == 8 else np.float32
int_type = np.int32
vals = 3 if self.version > 12 else 5
# name, size_in_bytes, <array dimensions>
self._variable_list = [('centers', float_type, sizeof_float, (vals, self.bucket_count)),
('normals', float_type, sizeof_float, (3, 3, self.bucket_count)),
('corners', float_type, sizeof_float, (3, 3, self.bucket_count)),
('j_hi', int_type, 4, (cell_ct_high,)),
('y_hi', float_type, sizeof_float, (cell_ct_high,)),
('fv_hi', float_type, sizeof_float, (4, cell_ct_high, buckets_total)),
('j', int_type, 4, (cell_ct_low,)),
('y', float_type, sizeof_float, (cell_ct_low,)),
('counts', float_type, sizeof_float, (4, cell_ct_low, buckets_total)),
('fv', float_type, sizeof_float, (4, cell_ct_low, buckets_total)),
('rho', float_type, sizeof_float, (4, cell_ct_low, buckets_total)),
('rhobubble', float_type, sizeof_float, (4, cell_ct_low, buckets_total)),
('rhospike', float_type, sizeof_float, (4, cell_ct_low, buckets_total)),
('rhourbubble', float_type, sizeof_float, (4, cell_ct_low, buckets_total)),
('rhourspike', float_type, sizeof_float, (4, cell_ct_low, buckets_total)),
('p', float_type, sizeof_float, (4, cell_ct_low, buckets_total)),
('ux', float_type, sizeof_float, (4, cell_ct_low, buckets_total)),
('uy', float_type, sizeof_float, (4, cell_ct_low, buckets_total)),
('uz', float_type, sizeof_float, (4, cell_ct_low, buckets_total)),
('ceul', float_type, sizeof_float, (4, cell_ct_low, buckets_total)),
('mach', float_type, sizeof_float, (4, cell_ct_low, buckets_total)),
('enuc', float_type, sizeof_float, (4, cell_ct_low, buckets_total)),
('fnuc', float_type, sizeof_float, (4, cell_ct_low, buckets_total)),
('dy', float_type, sizeof_float, (4, cell_ct_low, buckets_total)),
('ekr', float_type, sizeof_float, (4, cell_ct_low, buckets_total)),
('ekt', float_type, sizeof_float, (4, cell_ct_low, buckets_total)),
('ek', float_type, sizeof_float, (4, cell_ct_low, buckets_total)),
('ekur', float_type, sizeof_float, (4, cell_ct_low, buckets_total)),
('eiur', float_type, sizeof_float, (4, cell_ct_low, buckets_total)),
('hur', float_type, sizeof_float, (4, cell_ct_low, buckets_total)),
]
# present_vars = [v['name'] for v in self.header_var_list if self.version >= v.get('min_ver', self.version)]
# TODO: HACK UGH
skip_vars = []
if self.version > 12:
skip_vars.append('corners')
offset = self._header_size
for name, dtype, sizeof, shape in self._variable_list:
if name in skip_vars:
continue
# print (name, offset, dtype, sizeof, shape)
count = np.prod(shape)
size = sizeof * count
self._variable_map[name] = (offset, dtype, count, shape)
offset += size
# print (name, offset, dtype, count, shape, sizeof)
self._variable_names = list(self._variable_map.keys())
self._variable_names.sort()
def main():
'''Simple demo main function'''
if len(sys.argv) < 2:
print("Specify filename")
return
path = sys.argv[1]
if os.path.isdir(path):
rp_set = rprofile_set(sys.argv[1])
print(rp_set.ray_profiles)
else:
rp = rprofile(sys.argv[1], logging=True)
log.info('File version %i (real%i), with %i buckets and %i radial bins for dump %i' %
(rp.version, rp.get('sizeof_float'), rp.get('nbuckets'), rp.get('cell_ct_low'), rp.get('dump')))
header_keys = list(rp.header_attrs.keys())
header_keys.sort()
for k in header_keys:
print("%s: %s" % (k, rp.header_attrs[k]))
d = rp.get('fv')
print(d)
# print rp.get('j_hi', bucket=0)
print(rp.get_table('y'))
return
print("ceul")
print(rp.get('ceul'))
print("mach")
print(rp.get('mach'))
print("enuc")
print(rp.get('enuc'))
print("fnuc")
print(rp.get('fnucmx'))
print("dy")
print(rp.get('dy'))
if __name__ == "__main__":
main()
``` |
{
"source": "jl090909/test1",
"score": 3
} |
#### File: jl090909/test1/testc2.py
```python
import time
from selenium import webdriver
from selenium.webdriver.common.by import By
import os
class CT1():
def test(self):
burl="http://www.businesstransforms.com"
driverLocation = "C:\\dj\\env1\\chromedriver.exe"
os.environ["webdriver.chrome.driver"] = driverLocation
driver = webdriver.Chrome(driverLocation)
driver.maximize_window()
driver.get(burl)
driver.implicitly_wait(10)
loginlink=driver.find_element(By.XPATH,"//a[contains(text(),'Authenticate')]")
loginlink.click()
time.sleep(3)
loginlink=driver.find_element(By.XPATH,"//div[@id='navbarSupportedContent']/ul[1]//a[@href='/accounts/login/']")
loginlink.click()
time.sleep(5)
chromeTest = CT1()
chromeTest.test()
``` |
{
"source": "JL1829/EmployeeSalaryPrediction",
"score": 3
} |
#### File: src/features/build_features.py
```python
import numpy as np
import pandas as pd
class FeatureGenerator(object):
"""A feature engineering generator to create additional feature to DataFrame
Group by each label encoded column, compute the:
* mean
* max
* min
* Standard Deviation
* median
of label column
Parameter
-------------
data: object, a dataset object created by `preprocessing.py`
Method
-------------
add_group_stats(self):
group by each label encoded column, and compute the Grouped Statitics.
fill any NaN value with 0
Return: grouped by statitics DataFrame merged with original DataFrame.
Example
-------------
>>> feature_engineering = True
>>> if feature_engineering:
FeatureGenerator(data).add_group_stats()
"""
def __init__(self, data:object):
"""initializes class and creates groupby object for data"""
self.data = data
self.cat_cols = data.cat_cols
self.groups = data.train_df.groupby(self.cat_cols)
def add_group_stats(self):
"""adds group statistics to data stored in data object"""
group_stats_df = self._get_group_stats()
group_stats_df.reset_index(inplace=True)
# merge derived columns to original df
self.data.train_df = self._merge_new_cols(self.data.train_df, group_stats_df, self.cat_cols, fillna=True)
self.data.test_df = self._merge_new_cols(self.data.test_df, group_stats_df, self.cat_cols, fillna=True)
# update column list
group_stats_cols = ['group_mean_salary', 'group_max_salary', 'group_min_salary', 'group_std_salary', 'group_median_salary']
self._extend_col_lists(self.data, cat_cols=group_stats_cols)
def _get_group_stats(self):
"""calculate group statistics"""
target_col = self.data.target_col
group_stats_df = pd.DataFrame({'group_mean_salary': self.groups[target_col].mean()})
group_stats_df['group_max_salary'] = self.groups[target_col].max()
group_stats_df['group_min_salary'] = self.groups[target_col].min()
group_stats_df['group_std_salary'] = self.groups[target_col].std()
group_stats_df['group_median_salary'] = self.groups[target_col].median()
return group_stats_df
def _merge_new_cols(self, df, new_cols_df, keys, fillna=False):
"""Merges engineered features with original df"""
DataFrame = pd.merge(df, new_cols_df, on=keys, how='left')
if fillna:
DataFrame.fillna(0, inplace=True)
return DataFrame
def _extend_col_lists(self, data, cat_cols=[], num_cols=[]):
"""addes engineered features cols to data cols lists"""
data.num_cols.extend(num_cols)
data.cat_cols.extend(cat_cols)
data.feature_cols.extend(num_cols + cat_cols)
``` |
{
"source": "JL1829/LeetCode",
"score": 3
} |
#### File: LeetCode/src/findNumberIn2DArray.py
```python
def findNumberIn2DArray(matrix, target):
i = len(matrix) - 1
j = 0
while i >= 0 and j < len(matrix[0]):
if matrix[i][j] > target:
i -= 1
elif matrix[i][j] < target:
j += 1
elif matrix[i][j] == target:
return True
return False
if __name__ == '__main__':
array = [
[1, 4, 7, 11, 15],
[2, 5, 8, 12, 19],
[3, 6, 9, 16, 22],
[10, 13, 14, 17, 24],
[18, 21, 23, 26, 30]
]
print(f"For the array: {array}\n, return {findNumberIn2DArray(matrix=array, target=3)}"
f"\n For the array: {array}\n, return {findNumberIn2DArray(matrix=array, target=20)}")
```
#### File: LeetCode/src/flipBT.py
```python
class TreeNode:
"""DocString Placeholder"""
def __init__(self, value):
self.value = value
self.left = None
self.right = None
def __repr__(self):
return f"Tree Node value: {self.value}"
root = TreeNode(4)
root.left = TreeNode(2)
root.right = TreeNode(7)
root.left.left = TreeNode(1)
root.left.right = TreeNode(3)
root.right.left = TreeNode(6)
root.right.right = TreeNode(9)
def preOrder(node):
if not node:
return []
return [node.value] + preOrder(node.left) + preOrder(node.right)
print(f"Pre Order: {preOrder(root)}")
def invertTree(node):
if not node:
return
node.left, node.right = invertTree(node.right), invertTree(node.left)
return node
print(f"Inverting Tree")
invertTree(node=root)
print(f"Pre Order after invert: {preOrder(root)}")
```
#### File: LeetCode/src/hasCycle.py
```python
class ListNode:
def __init__(self, value):
self.value = value
self.next = None
def __repr__(self):
return f"List Node with value: {self.value}"
node1 = ListNode(3)
node2 = ListNode(2)
node3 = ListNode(0)
node4 = ListNode(-4)
node1.next = node2
node2.next = node3
node3.next = node4
node4.next = node2
node5 = ListNode(1)
node5.next = ListNode(2)
node5.next.next = ListNode(3)
def hasCycle(node):
if not node or not node.next:
return False
slow = node
fast = node.next
while slow != fast:
if not fast or not fast.next:
return False
slow = slow.next
fast = fast.next.next
return True
if __name__ == '__main__':
print(f"For Linked List started with {node1}, have loop? \n {hasCycle(node=node1)}\n")
print(f"For Linked List started with {node5}, have loop? \n {hasCycle(node=node5)}\n")
```
#### File: LeetCode/src/isPalindromeLinkedList.py
```python
class ListNode:
def __init__(self, value):
self.value = value
self.next = None
def __repr__(self):
return f"List Node Value: {self.value}"
head = ListNode(1)
head.next = ListNode(2)
head.next.next = ListNode(2)
head.next.next.next = ListNode(1)
head1 = ListNode(1)
head1.next = ListNode(2)
def convertToList(node):
value = []
while node:
value.append(node.value)
node = node.next
return value
if __name__ == '__main__':
array = convertToList(node=head)
array2 = convertToList(node=head1)
print(array == array[::-1])
print(array2 == array2[::-1])
```
#### File: LeetCode/src/majorityElement.py
```python
from collections import Counter
def majorityElement(nums):
counter = Counter(nums)
temp = list(set(nums))
result = -1
for item in temp:
if counter[item] > len(nums) / 2:
result = item
break
return result
if __name__ == '__main__':
testArray = [1, 2, 5, 9, 5, 9, 5, 5, 5]
print(majorityElement(nums=testArray))
testArray = [3, 2]
print(majorityElement(nums=testArray))
testArray = [2, 2, 1, 1, 1, 2, 2]
print(majorityElement(nums=testArray))
```
#### File: LeetCode/src/maxDepthBT.py
```python
from collections import deque
class TreeNode:
def __init__(self, value):
self.value = value
self.left = None
self.right = None
def __repr__(self):
return f"Tree Node with Value: {self.value}"
def maxDepth(root):
if root is None:
return 0
queue = deque()
queue.append([1, root])
while queue:
depth, node = queue.popleft()
if node.left:
queue.append((depth + 1, node.left))
if node.right:
queue.append((depth + 1, node.right))
return depth
if __name__ == '__main__':
Treenode = TreeNode(3)
Treenode.left = TreeNode(9)
Treenode.right = TreeNode(20)
Treenode.right.left = TreeNode(15)
Treenode.right.right = TreeNode(7)
print(f"The max depth of this Binary Tree is: \n {maxDepth(root=Treenode)}")
```
#### File: LeetCode/src/maxSlidingWindow.py
```python
def maxSlidingWindow(array, size):
n = len(array)
i = 0
ans = []
while size <= n:
ans.append(max(array[i:size]))
i += 1
size += 1
return ans
if __name__ == '__main__':
nums1 = [1, 3, -1, -3, 5, 3, 6, 7]
print(maxSlidingWindow(array=nums1, size=3))
nums2 = []
print(maxSlidingWindow(array=nums2, size=3))
```
#### File: LeetCode/src/maxWaterContainer.py
```python
points = [1, 8, 6, 2, 5, 4, 8, 3, 7]
def max_area(height):
low, high = 0, len(height) - 1
answer = 0
while low < high:
area = min(height[low], height[high]) * (high - low)
answer = max(answer, area)
if height[low] < height[high]:
low += 1
else:
high -= 1
return answer
print(f"With input height points: {points}"
f"\n The maximum area: {max_area(height=points)}")
# official answer
def maxArea(height):
n = len(height)
low = 0
high = n - 1
maxarea = (high - low) * min(height[low], height[high])
while low < high:
if height[low] < height[high]:
low += 1
else:
high -= 1
maxarea = max(maxarea, (high - low) * min(height[low], height[high]))
return maxarea
```
#### File: LeetCode/src/mergeTwoLinkedList.py
```python
class ListNode:
def __init__(self, value=0, next=None):
self.value = value
self.next = next
def __repr__(self):
return f"Linked List Node with value: {self.value}"
head1 = ListNode(1)
head1.next = ListNode(2)
head1.next.next = ListNode(4)
head2 = ListNode(1)
head2.next = ListNode(3)
head2.next.next = ListNode(4)
def printLinkedList(head):
while head:
print(head.value)
head = head.next
# print(f"{printLinkedList(head1)}")
def mergeTwoLinkedList(l1, l2):
prehead = ListNode(-1)
prev = prehead
while l1 and l2:
if l1.value <= l2.value:
prev.next = l1
l1 = l1.next
else:
prev.next = l2
l2 = l2.next
prev = prev.next
prev.next = l1 if l1 is not None else l2
return prehead.next
mergeTwoLinkedList(l1=head1, l2=head2)
print(f"{printLinkedList(head1)}")
```
#### File: LeetCode/src/minStack.py
```python
import math
class MinStack:
"""docString placeholder"""
def __init__(self):
self.stack = []
self.helperStack = [math.inf]
def push(self, x):
self.stack.append(x)
self.helperStack.append(min(x, self.helperStack[-1]))
def pop(self):
self.stack.pop()
self.helperStack.pop()
def top(self):
return self.stack[-1]
def getMin(self):
return self.helperStack[-1]
myStack = MinStack()
myStack.push(-2)
myStack.push(0)
myStack.push(-3)
print(myStack.getMin())
myStack.pop()
print(myStack.top())
print(myStack.getMin())
```
#### File: LeetCode/src/minSubArrayLen.py
```python
nums = [2, 3, 1, 2, 4, 3]
s = 7
# two pointer method
def minSubArrayLen_slidingWindow(sub, array):
if not array:
return 0
n = len(array)
answer = n + 1
start, end = 0, 0
total = 0
while end < n:
total += array[end]
while total >= sub:
answer = min(answer, end - start + 1)
total -= array[start]
start += 1
end += 1
return 0 if answer == n + 1 else answer
print(f"For input: {nums}, and target: {s}"
f"\n The output: {minSubArrayLen_slidingWindow(sub=s, array=nums)}")
```
#### File: LeetCode/src/moveZeros.py
```python
def moveZeros(array):
n = len(array)
left = right = 0
while right < n:
if array[right] != 0:
# swap the non zero element to the right
array[left], array[right] = array[right], array[left]
left += 1
right += 1
return array
```
#### File: LeetCode/src/removeKdigits.py
```python
def removeKdigits(numbers, target):
stack = []
remain = len(numbers) - target
for digit in numbers:
while target and stack and stack[-1] > digit:
stack.pop()
target -= 1
stack.append(digit)
return ''.join(stack[:remain]).lstrip('0') or '0'
num1 = '1432219'
num2 = '10200'
num3 = '10'
k1 = 3
k2 = 1
k3 = 2
print(f"For number: {num1} with target: {k1}"
f" The min output is {removeKdigits(numbers=num1, target=k1)}")
print(f"For number: {num2} with target: {k2}"
f" The min output is {removeKdigits(numbers=num2, target=k2)}")
print(f"For number: {num3} with target: {k3}"
f" The min output is {removeKdigits(numbers=num3, target=k3)}")
```
#### File: LeetCode/src/reverseString.py
```python
def reverseString(string):
left = 0
right = len(string) - 1
while left < right:
string[left], string[right] = string[right], string[left]
left += 1
right -= 1
return string
```
#### File: LeetCode/src/rotateArrayKtimes_right.py
```python
def rotateArray(array, k):
if not array:
return []
left = 0
right = len(array) - 1
while left < right:
array[left], array[right] = array[right], array[left]
left += 1
right -= 1
left = 0
right = k - 1
while left < right:
array[left], array[right] = array[right], array[left]
left += 1
right -= 1
left = k
right = len(array) - 1
while left < right:
array[left], array[right] = array[right], array[left]
left += 1
right -= 1
return array
"""
If you found your code repeated many times,
then is time to write a function for it
"""
# 2nd approach
def rotateArray_2(array, k):
if not array:
return []
def reverse(receiveArr, start_index, end_index):
while start_index < end_index:
receiveArr[start_index], receiveArr[end_index] = receiveArr[end_index], receiveArr[start_index]
start_index += 1
end_index -= 1
k = k % len(array)
reverse(receiveArr=array, start_index=0, end_index=len(array) - 1)
reverse(receiveArr=array, start_index=0, end_index=k - 1)
reverse(receiveArr=array, start_index=k, end_index=len(array) - 1)
return array
```
#### File: LeetCode/src/setZeros.py
```python
matrix1 = [[1, 1, 1], [1, 0, 1], [1, 1, 1]]
matrix2 = [[0, 1, 2, 0], [3, 4, 5, 2], [1, 3, 1, 5]]
def setZeros(matrix):
if not matrix:
return []
# as single row or single col could have multiple zero
# so in order to take out the duplicate
# we use set()
# the row going to be set zero
rows = set()
# the column going to be set zero
cols = set()
# find the 0 index
for i in range(len(matrix)):
for j in range(len(matrix[0])):
if matrix[i][j] == 0:
rows.add(i)
cols.add(j)
# now set row to be zero
for row in rows:
matrix[row] = [0] * len(matrix[0])
# now set column to be zero
# careful about the index
for col in cols:
# vertical direction
for i in range(len(matrix)):
matrix[i][col] = 0
return matrix
print(setZeros(matrix=matrix1))
print(setZeros(matrix=matrix2))
```
#### File: LeetCode/src/transpose.py
```python
def transpose(array):
ROW, COL = len(array), len(array[0])
answer = [[None] * ROW for _ in range(COL)]
for r, row in enumerate(array):
for c, value in enumerate(row):
answer[c][r] = value
return answer
if __name__ == '__main__':
matrix = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
print(transpose(array=matrix))
matrix = [[1, 2, 3], [4, 5, 6]]
print(transpose(array=matrix))
```
#### File: LeetCode/src/tree.py
```python
class TreeNode:
def __init__(self, value):
self.value = value
self.left = None
self.right = None
def __repr__(self):
return f"Tree Node with value: {self.value}"
root = TreeNode(1)
root.left = TreeNode(2)
root.right = TreeNode(3)
# 以下都是递归方式遍历
def preOrder(root):
if not root:
return []
return [root.value] + preOrder(root.left) + preOrder(root.right)
print(f"Pre Order of a Tree called root: {preOrder(root)}")
def inorder(root):
if not root:
return []
return inorder(root.left) + [root.value] + inorder(root.right)
print(f"In Order traversal of a tree call root: {inorder(root)}")
def postOrder(root):
if not root:
return []
return postOrder(root.left) + postOrder(root.right) + [root.value]
print(f"Post Order Traversal of a tree call root: {postOrder(root)}")
# 迭代方式遍历
def iteratePreOrder(root):
if not root: return []
result = []
stack = [root]
while stack:
cur = stack.pop()
result.append(cur.value)
if cur.right:
stack.append(cur.right)
if cur.left:
stack.append(cur.left)
return result
print(f"Iterate throught: {iteratePreOrder(root)}")
# flip binary tree
```
#### File: LeetCode/src/twoSum.py
```python
def twoSum(array, target):
hashtable = {}
for i, num in enumerate(array):
if target - num in hashtable:
return [hashtable[target - num], i]
hashtable[array[i]] = i
return []
```
#### File: LeetCode/src/twoSum_to_nSum.py
```python
nums = [3, 2, 4]
def twoSum(array, target):
array.sort()
low, high = 0, len(array) - 1
while low < high:
res = array[low] + array[high]
if res < target:
low += 1
elif res > target:
high -= 1
elif res == target:
return [low, high]
print(twoSum(nums, 9))
def two_sum_duplicate(array, target):
array.sort()
result = []
left, right = 0, len(array) - 1
while left < right:
total = array[left] + array[right]
if total < target:
left += 1
elif total > target:
right -= 1
else:
result.append([array[left], array[right]])
left += 1
right -= 1
return result
nums2 = [1, 3, 1, 2, 2, 3]
print(f"With incoming array: {nums2}, and target: 4")
print(f"Return tuple(duplicate) is: {two_sum_duplicate(array=nums2, target=4)}")
def two_sum_non_duplicate(array, target):
# time complexity: O(N log N)
array.sort()
lo, hi = 0, len(array) - 1
result = []
while lo < hi:
total = array[lo] + array[hi]
left, right = array[lo], array[hi]
if total < target:
while lo < hi and array[lo] == left:
lo += 1
elif total > target:
while lo < hi and array[hi] == right:
hi -= 1
else:
result.append([left, right])
while lo < hi and array[lo] == left:
lo += 1
while lo < hi and array[hi] == right:
hi -= 1
return result
print(f"Return Tuple without duplicate: {two_sum_non_duplicate(array=nums2, target=4)}")
def two_sum_modify(array, start, target):
# time complexity: O(N log N)
array.sort()
lo = start
hi = len(array) - 1
result = []
while lo < hi:
total = array[lo] + array[hi]
left, right = array[lo], array[hi]
if total < target:
while lo < hi and array[lo] == left:
lo += 1
elif total > target:
while lo < hi and array[hi] == right:
hi -= 1
else:
result.append([left, right])
while lo < hi and array[lo] == left:
lo += 1
while lo < hi and array[hi] == right:
hi -= 1
return result
def three_sum(array, target):
array.sort()
n = len(array)
result = []
for i in range(n):
tuples = two_sum_modify(array, i + 1, target - array[i])
for item in tuples:
item.append(array[i])
result.append(item)
while i < n - 1 and array[i] == array[i + 1]:
i += 1
return result
nums4 = [-1, 0, 1, 2, -1, 4]
print(f"\n 3 Sums with nums: {nums4}, and target: 0: \n {three_sum(array=nums4, target=0)}")
```
#### File: src/utilis/linkedList.py
```python
class ListNode:
"""
The basic node in Linked List
"""
def __init__(self, value=None):
self.value = value
self.next = None
def __repr__(self):
return f"List Node with value: {self.value}"
class LinkedList:
"""
This is single linked list, have below method:
To be continue
"""
def __init__(self, maxsize=None):
self.maxsize = maxsize
self.head = ListNode()
self.TailNode = None
self.length = 0
def __len__(self):
return self.length
def __repr__(self):
return f"Single Linked List with length: {self.length}\n" \
f"And with full element: {self._printValue()}"
def append(self, value):
if self.maxsize is not None and len(self) >= self.maxsize:
raise Exception("Linked List is FULL!")
node = ListNode(value)
TailNode = self.TailNode
if TailNode is None:
self.head.next = node
else:
TailNode.next = node
self.TailNode = node
self.length += 1
def appendLeft(self, value):
if self.maxsize is not None and len(self) >= self.maxsize:
raise Exception("Linked List is FULL!")
node = ListNode(value)
# if original linked list is empty, insert first element need to set TailNode
if self.TailNode is None:
self.TailNode = node
headNode = self.head.next
self.head.next = node
node.next = headNode
self.length += 1
def __iter__(self):
for node in self._iter_node():
yield node.value
def _iter_node(self):
"""
Traversal from head to tail
"""
currentNode = self.head.next
while currentNode is not self.TailNode:
yield currentNode
currentNode = currentNode.next
if currentNode is not None:
yield currentNode
def _printValue(self):
value = []
node = self.head.next
while node:
value.append(node.value)
node = node.next
return value
if __name__ == '__main__':
ll = LinkedList()
ll.append(1)
ll.append(2)
print(ll)
``` |
{
"source": "jl223vy/FAERS-data-toolkit",
"score": 3
} |
#### File: jl223vy/FAERS-data-toolkit/faersPreprocess.py
```python
import os
import warnings
import pandas as pd
import numpy as np
# local directory to save files.
data_dir = "FAERSdata"
directoryPath = os.getcwd() + '/' + data_dir
# ignore warnings
warnings.filterwarnings('ignore')
def processDemo():
for filename in os.listdir(directoryPath):
if "DEMO" in filename.upper() and "TXT" in filename.upper():
print("Process " + filename)
demo_df = pd.read_csv(directoryPath + "/" + filename, low_memory=False, sep='$', error_bad_lines=False)
# keep primaryid, caseid, age, sex, wt
demo_df.drop(
['caseversion', 'i_f_code', 'lit_ref', 'event_dt', 'auth_num', 'fda_dt', 'age_grp', 'e_sub',
'rept_dt', 'to_mfr', 'reporter_country', 'mfr_dt', 'init_fda_dt', 'rept_cod', 'mfr_num',
'mfr_sndr', 'occp_cod', 'occr_country'], inplace=True, axis=1, errors='ignore')
# process sex
demo_df['sex'] = demo_df['sex'].fillna('UNK')
sex_map = {'M': "0", 'F': "1", 'UNK': "2"}
demo_df['sex'] = demo_df['sex'].map(sex_map)
# process age
demo_df = demo_df[pd.notnull(demo_df['age'])]
# unified age unit
demo_df = demo_df[demo_df.age_cod != 'dec'].reset_index(drop=True)
demo_df['age'] = demo_df['age'].apply(pd.to_numeric, errors='coerce')
demo_df['age'] = np.where(demo_df['age_cod'] == 'MON', demo_df['age'] * 1 / 12, demo_df['age']) # mounth
demo_df['age'] = np.where(demo_df['age_cod'] == 'WK', demo_df['age'] * 1 / 52, demo_df['age']) # week
demo_df['age'] = np.where(demo_df['age_cod'] == 'DY', demo_df['age'] * 1 / 365, demo_df['age']) # day
demo_df['age'] = np.where(demo_df['age_cod'] == 'HR', demo_df['age'] * 1 / 8760, demo_df['age']) # hour
demo_df = demo_df.drop(['age_cod'], axis=1)
# age discretization and label encode
# Newborn, Infant, Child Preschool, Child, Adolescent, Young Adult, Adult,Middle Aged, Aged, Aged+
age_bins = [0, 1, 2, 5, 12, 18, 24, 44, 64, 79, 123]
demo_df['age'] = pd.cut(demo_df.age, age_bins, labels=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
demo_df = demo_df.dropna(axis=0, subset=["age"]) # drop unreasonable age <0 or >123
# process weight(wt)
demo_df = demo_df[pd.notnull(demo_df['wt'])]
# unified weight unit
demo_df['wt'] = demo_df['wt'].apply(pd.to_numeric, errors='coerce')
demo_df['wt'] = np.where(demo_df['wt_cod'] == 'LBS', demo_df['wt'] * 0.453592, demo_df['wt']) # pounds
demo_df['wt'] = np.where(demo_df['wt_cod'] == 'GMS', demo_df['wt'] * 0.001, demo_df['wt']) # grams
demo_df = demo_df.drop(['wt_cod'], axis=1)
# weight discretization and label encode
wt_bins = [0, 5, 10, 40, 50, 60, 70, 80, 90, 100, 150, 200, 300]
demo_df['wt'] = pd.cut(demo_df.wt, wt_bins, labels=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12])
demo_df = demo_df.dropna(axis=0, subset=["wt"]) # drop unreasonable weight <0 or >300
# save file
demo_df.to_csv(directoryPath + "/" + filename[:-4] + '.csv', header=True, index=False)
def processDrug():
for filename in os.listdir(directoryPath):
if "DRUG" in filename.upper() and "TXT" in filename.upper():
print("Process " + filename)
drug_df = pd.read_csv(directoryPath + "/" + filename, low_memory=False, sep="$", error_bad_lines=False)
# keep primaryid, caseid, role_cod, drugname
drug_df.drop(
['drug_seq', 'val_vbm', 'dose_vbm', 'dose_form', 'dose_amt', 'dose_unit', 'cum_dose_chr', 'prod_ai',
'cum_dose_unit', 'dechal', 'rechal', 'lot_num', 'exp_dt', 'nda_num', 'route', 'dose_freq'],
inplace=True, axis=1, errors='ignore')
# process role_cod label encode
drug_df = drug_df[pd.notnull(drug_df['role_cod'])]
rolecod_map = {'PS': '0', 'SS': '1', 'C': '2', 'I': '3'}
drug_df['role_cod'] = drug_df['role_cod'].map(rolecod_map)
# process drugname
drug_df = drug_df[pd.notnull(drug_df['drugname'])]
drug_df['drugname'] = drug_df['drugname'].str.strip().str.lower() # to lowercase
drug_df = drug_df[~drug_df['drugname'].isin(['unknown'])] # drop unknown
drug_df['drugname'] = drug_df['drugname'].str.replace('\\', '/') # fix slashes
drug_df['drugname'] = drug_df['drugname'].map(
lambda x: x[:-1] if str(x).endswith(".") else x) # fix ending with period
# save file
drug_df.to_csv(directoryPath + "/" + filename[:-4] + '.csv', header=True, index=False)
def processReac():
for filename in os.listdir(directoryPath):
if "REAC" in filename.upper() and "TXT" in filename.upper():
print("Process " + filename)
reac_df = pd.read_csv(directoryPath + "/" + filename, low_memory=False, sep="$", error_bad_lines=False)
# keep primaryid, caseid, pt
reac_df.drop(['drug_rec_act'], inplace=True, axis=1, errors='ignore')
# process pt
reac_df = reac_df[pd.notnull(reac_df['pt'])]
reac_df['pt'] = reac_df['pt'].str.strip().str.lower() # to lowercase
reac_df = reac_df[~reac_df['pt'].isin(['unknown'])] # drop unknown
reac_df['pt'] = reac_df['pt'].map(
lambda x: x[:-1] if str(x).endswith(".") else x) # fix ending with period
# save file
reac_df.to_csv(directoryPath + "/" + filename[:-4] + '.csv', header=True, index=False)
def processOutc():
for filename in os.listdir(directoryPath):
if "OUTC" in filename.upper() and "TXT" in filename.upper():
print("Process " + filename)
outc_df = pd.read_csv(directoryPath + "/" + filename, low_memory=False, sep="$", error_bad_lines=False)
# process outc_cod
outc_df = outc_df[pd.notnull(outc_df['outc_cod'])]
outc_df = outc_df[outc_df['outc_cod'].isin(['DE', 'LT', 'HO', 'DS', 'CA', 'RI', 'OT'])]
outccod_map = {'DE': '0', 'LT': '1', 'HO': '2', 'DS': '3', 'CA': '4', 'RI': '5', 'OT': '6'}
outc_df['outc_cod'] = outc_df['outc_cod'].map(outccod_map)
# save file
outc_df.to_csv(directoryPath + "/" + filename[:-4] + '.csv', header=True, index=False)
def main():
processDemo()
processDrug()
processReac()
# processOutc()
if __name__ == '__main__':
main()
``` |
{
"source": "jl223vy/Helper_for_CommissaryInChargeOfStudies",
"score": 3
} |
#### File: Helper_for_CommissaryInChargeOfStudies/Src/pyHelper.py
```python
import xlrd
import os
import traceback
def open_excel(file):
try:
data = xlrd.open_workbook(file)
return data
except Exception as e:
print(str(e))
def excel_table_byname(file, colnameindex=0, by_name="Sheet1"): # 获取Excel表格名单
data = open_excel(file)
table = data.sheet_by_name(by_name) # 获得表格
nrows = table.nrows # 表格的总行数
colnames = table.row_values(colnameindex) # 第一行数据: ['学号', '姓名']
list = []
for rownum in range(1, nrows): # 从Excel第二行开始
row = table.row_values(rownum)
if row:
app = {}
for i in range(len(colnames)):
app[colnames[i]] = row[i] # 表头与数据对应
list.append(app)
return list
def get_files_name(file_dir): # 获取要检查的文件夹目录下所有文件夹名和文件名
L = []
for roots, dirs, files in os.walk(file_dir):
# dirs 是一个 list ,内容是该文件夹中所有的文件夹的名字(不包括子目录)
if dirs:
for d in dirs:
L.append(d)
# files是一个list,内容是该文件夹中所有的文件
if files:
for f in files:
L.append(f)
return L
def check_stu(excel_file="", open_dir=""): # 作业查交
tables = excel_table_byname(excel_file)
filesList = get_files_name(open_dir)
out = ""
cnt = 0
for dic in tables:
stuNo = dic["学号"]
flag = 1
for file in filesList:
if file.find(stuNo) != -1:
flag = 0
break
if flag:
cnt += 1
out += dic["学号"] + " " + dic["姓名"] + '\n'
if cnt == 0:
out = "已交齐!"
else:
total = len(tables)
submit = cnt
out = "=====作业查交情况=====\n班级总人数:" + str(total) \
+ " 人\n上交作业人数:" + str(total - submit) + " 人\n未交作业人数:" \
+ str(submit) + " 人\n======================\n未交作业同学名单:\n\n" \
+ out
return out
def modi_all(excel_file="", open_dir="", sep=" "): # 批量格式化命名
tables = excel_table_byname(excel_file)
filesList = os.listdir(open_dir) # 检查文件夹下的所有文件和文件夹
cnt = 0
try:
for file in filesList:
for dic in tables:
stuNo = dic["学号"]
if file.find(stuNo) != -1:
dot = file.rfind('.')
if dot != -1:
newName = dic["学号"] + sep + dic["姓名"] + file[dot:]
elif os.path.isdir(open_dir + '\\' + str(file)):
newName = dic["学号"] + sep + dic["姓名"]
os.rename(os.path.join(open_dir, file),
os.path.join(open_dir, newName))
cnt += 1
break
return "批量重命名文件 " + str(cnt) + " 个。"
except:
return "批量重命名文件失败!失败原因:\n" + traceback.format_exc()
def add_front(open_dir="", add=""): # 批量添加文件名前缀
filesList = os.listdir(open_dir)
cnt = 0
try:
for file in filesList:
newName = add + file[0:]
cnt += 1
os.rename(open_dir + '\\' + str(file),
open_dir + '\\' + newName)
return "批量添加前缀重命名文件 " + str(cnt) + " 个。"
except:
return "批量添加前缀重命名文件失败!失败原因:\n" + traceback.format_exc()
def add_back(open_dir="", add=""): # 批量添加文件名后缀
filesList = os.listdir(open_dir)
cnt = 0
try:
for file in filesList:
dot = file.rfind(".")
if dot != -1:
newName = file[0:dot] + add + file[dot:]
else:
newName = file[0:] + add
cnt += 1
os.rename(open_dir + '\\' + str(file),
open_dir + '\\' + newName)
return "批量添加后缀重命名文件 " + str(cnt) + " 个。"
except:
return "批量添加后缀重命名文件失败!失败原因:\n" + traceback.format_exc()
def main():
n = input()
ls = n.split("$")
if n[0] == '1': # 作业查交
print(check_stu(ls[1], ls[2]))
elif n[0] == '2': # 基础重命名
if ls[1] == '1': # 指定分隔符
print(modi_all(ls[2], ls[3], ls[4]))
elif ls[1] == '2': # 默认分隔符
print(modi_all(ls[2], ls[3]))
elif n[0] == '3': # 扩展重命名
if ls[1] == '1': # 前缀
print(add_front(ls[2], ls[3]))
elif ls[1] == '2': # 后缀
print(add_back(ls[2], ls[3]))
if __name__ == '__main__':
main()
``` |
{
"source": "jl2264/Flood44",
"score": 3
} |
#### File: jl2264/Flood44/Task2F.py
```python
import datetime
import matplotlib.pyplot as plt
from pytest import skip
from floodsystem.datafetcher import fetch_measure_levels
from floodsystem.stationdata import build_station_list, update_water_levels
from floodsystem.flood import stations_highest_rel_level
from floodsystem.plot import plot_water_level_with_fit
import numpy as np
def run():
stations = build_station_list()
update_water_levels(stations)
N = 5
dt = 2
p = 4
stations_N = (stations_highest_rel_level (stations, N))
list = []
for i in stations_N:
list.append(i[0])
list_1 = []
for station in stations:
if station.name in list:
list_1.append(station)
for station in list_1:
dates, levels = fetch_measure_levels(station.measure_id, dt=datetime.timedelta(days=dt))
if len(dates) == 0:
print(station.name,'data error')
else:
plot_water_level_with_fit(station, dates, levels, p)
if __name__ == "__main__":
print("*** Task 2F: CUED Part IA Flood Warning System ***")
run()
```
#### File: jl2264/Flood44/test_1D.py
```python
from floodsystem.geo import rivers_with_station
from floodsystem.station import MonitoringStation
def test_rivers_with_station_no_duplicate():
stations = []
s1 = MonitoringStation(
station_id=1,
measure_id=1,
label='label1',
coord=(float(52.2053), float(0.1218)),
typical_range='typical_range',
river='river1',
town='town')
s2 = MonitoringStation(
station_id=2,
measure_id=1,
label='label2',
coord=(float(2.2053), float(10.1218)),
typical_range='typical_range',
river='river2',
town='town')
s3 = MonitoringStation(
station_id=3,
measure_id=1,
label='label3',
coord=(float(52.2153), float(0.1318)),
typical_range='typical_range',
river='river3',
town='town')
stations.append(s1)
stations.append(s2)
stations.append(s3)
list_test_1D = rivers_with_station(stations)
assert len(list_test_1D) == 3 # all of the station rivers
def test_rivers_with_station_with_duplicate():
stations = []
s1 = MonitoringStation(
station_id=1,
measure_id=1,
label='label1',
coord=(float(52.2053), float(0.1218)),
typical_range='typical_range',
river='river1',
town='town')
s2 = MonitoringStation(
station_id=2,
measure_id=1,
label='label2',
coord=(float(2.2053), float(10.1218)),
typical_range='typical_range',
river='river2',
town='town')
s3 = MonitoringStation(
station_id=3,
measure_id=1,
label='label3',
coord=(float(52.2153), float(0.1318)),
typical_range='typical_range',
river='river1',
town='town')
stations.append(s1)
stations.append(s2)
stations.append(s3)
list_test_1D = rivers_with_station(stations)
assert len(list_test_1D) == 2 # all of the station rivers with one duplicate
assert list_test_1D[0] == s1.river
assert list_test_1D[1] == s2.river
assert s1.river == s3.river
```
#### File: jl2264/Flood44/test_2G.py
```python
from floodsystem.station import MonitoringStation
import datetime
from floodsystem.stationdata import build_station_list, update_water_levels
from floodsystem.flood import stations_highest_rel_level
from floodsystem.plot import plot_water_level_with_fit, plot_water_levels, relative_level_rising_rate
from dateutil.tz import tzutc
from dataclasses import replace
from os import remove
import matplotlib
import numpy as np
def test_relative_level_rising_rate():
#create fake station s1 (has same typical range and latet level as Hayes Basin, but not necessary)
s1 = MonitoringStation(
station_id=1,
measure_id=1,
label='Test Station',
coord=(float(52.2053), float(0.1218)),
typical_range=(0.91,0.97),
river='river',
town='town')
s1.latest_level = 1.536
#create arbitrary dates
dates_2G = [datetime.datetime(2022, 2, 28, 12, 0, tzinfo=tzutc()), datetime.datetime(2022, 2, 28, 11, 45, tzinfo=tzutc()),
datetime.datetime(2022, 2, 28, 11, 30, tzinfo=tzutc()), datetime.datetime(2022, 2, 28, 11, 15, tzinfo=tzutc()),
datetime.datetime(2022, 2, 28, 11, 0, tzinfo=tzutc()), datetime.datetime(2022, 2, 28, 10, 45, tzinfo=tzutc()),
datetime.datetime(2022, 2, 28, 10, 30, tzinfo=tzutc()), datetime.datetime(2022, 2, 28, 10, 15, tzinfo=tzutc()),
datetime.datetime(2022, 2, 28, 10, 0, tzinfo=tzutc()), datetime.datetime(2022, 2, 28, 9, 45, tzinfo=tzutc()),
datetime.datetime(2022, 2, 28, 9, 30, tzinfo=tzutc()), datetime.datetime(2022, 2, 28, 9, 15, tzinfo=tzutc()),
datetime.datetime(2022, 2, 28, 9, 0, tzinfo=tzutc()), datetime.datetime(2022, 2, 28, 8, 45, tzinfo=tzutc()),
datetime.datetime(2022, 2, 28, 8, 30, tzinfo=tzutc()), datetime.datetime(2022, 2, 28, 8, 15, tzinfo=tzutc()),
datetime.datetime(2022, 2, 28, 8, 0, tzinfo=tzutc()), datetime.datetime(2022, 2, 28, 7, 45, tzinfo=tzutc()),
datetime.datetime(2022, 2, 28, 7, 30, tzinfo=tzutc()), datetime.datetime(2022, 2, 28, 7, 15, tzinfo=tzutc()),
datetime.datetime(2022, 2, 28, 7, 0, tzinfo=tzutc()), datetime.datetime(2022, 2, 28, 6, 45, tzinfo=tzutc()),
datetime.datetime(2022, 2, 28, 6, 30, tzinfo=tzutc()), datetime.datetime(2022, 2, 28, 6, 15, tzinfo=tzutc()),
datetime.datetime(2022, 2, 28, 6, 0, tzinfo=tzutc()), datetime.datetime(2022, 2, 28, 5, 45, tzinfo=tzutc()),
datetime.datetime(2022, 2, 28, 5, 30, tzinfo=tzutc()), datetime.datetime(2022, 2, 28, 5, 15, tzinfo=tzutc()),
datetime.datetime(2022, 2, 28, 5, 0, tzinfo=tzutc()), datetime.datetime(2022, 2, 28, 4, 45, tzinfo=tzutc()),
datetime.datetime(2022, 2, 28, 4, 30, tzinfo=tzutc()), datetime.datetime(2022, 2, 28, 4, 15, tzinfo=tzutc()),
datetime.datetime(2022, 2, 28, 4, 0, tzinfo=tzutc()), datetime.datetime(2022, 2, 28, 3, 45, tzinfo=tzutc()),
datetime.datetime(2022, 2, 28, 3, 30, tzinfo=tzutc()), datetime.datetime(2022, 2, 28, 3, 15, tzinfo=tzutc()),
datetime.datetime(2022, 2, 28, 3, 0, tzinfo=tzutc()), datetime.datetime(2022, 2, 28, 2, 45, tzinfo=tzutc()),
datetime.datetime(2022, 2, 28, 2, 30, tzinfo=tzutc()), datetime.datetime(2022, 2, 28, 2, 15, tzinfo=tzutc()),
datetime.datetime(2022, 2, 28, 2, 0, tzinfo=tzutc()), datetime.datetime(2022, 2, 28, 1, 45, tzinfo=tzutc()),
datetime.datetime(2022, 2, 28, 1, 30, tzinfo=tzutc()), datetime.datetime(2022, 2, 28, 1, 15, tzinfo=tzutc()),
datetime.datetime(2022, 2, 28, 1, 0, tzinfo=tzutc()), datetime.datetime(2022, 2, 28, 0, 45, tzinfo=tzutc())]
#water level at station s1 do not change (have constant level = 1)
levels_2G = [1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00,
1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00,
1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00,
1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00,
1.00, 1.00, 1.00, 1.00, 1.00, 1.00]
#rate of relative water level rise should be zero
assert relative_level_rising_rate(s1, dates_2G, levels_2G) == 0
test_relative_level_rising_rate()
``` |
{
"source": "JL2718/python-mnemonic",
"score": 3
} |
#### File: python-mnemonic/mnemonic/__main__.py
```python
from mnemonic import Mnemonic
import sys,os,argparse
def main() -> None:
parser = argparse.ArgumentParser(description='Encode/Decode mnemonics')
parser.add_argument('-i --input',dest='typein',choices=['generate','hex','entropy','mnemonic','stamp'],default='generate',help="generate mnemonic or input type at stdin")
parser.add_argument('-o --output',dest='typeout',choices=['entropy','hex','mnemonic','stamp','seed','key'],default='mnemonic',help="type of output to print to stdout")
parser.add_argument('-l --language',dest='lang',choices=[f.split('.')[0] for f in os.listdir(Mnemonic._get_directory())],default='english')
parser.add_argument('-s --strength',dest='strength',choices = [128, 160, 192, 224, 256],default=128)
parser.add_argument('-p --passphrase',dest='passphrase',type=str,default='')
parser.add_argument('-t --testnet',dest='testnet',type=bool,default=False)
args = parser.parse_args()
m = Mnemonic(args.lang)
# input types
if args.typein == 'generate':
mnemonic = m.generate(args.strength)
elif args.typein == 'hex':
num = int(sys.stdin.readline().strip(),16)
mnemonic = m.from_int(num)
elif args.typein == 'entropy':
entropy = sys.stdin.buffer.read()
mnemonic = m.to_mnemonic(entropy)
elif args.typein == 'mnemonic':
mnemonic = sys.stdin.readline().strip()
if not m.check(mnemonic): raise ValueError(mnemonic)
elif args.typein=='stamp':
stamp = sys.stdin.readline().strip()
mnemonic = m.from_stamp(stamp)
# output types
if args.typeout=='entropy':
sys.stdout.buffer.write(m.to_entropy(mnemonic))
if args.typeout=='hex':
print(hex(m.to_int(mnemonic)))
elif args.typeout=='mnemonic':
print(mnemonic)
elif args.typeout=='stamp':
print(m.to_stamp(mnemonic))
elif args.typeout=='seed':
print(m.to_seed(mnemonic,args.passphrase))
elif args.typeout=='key':
print(m.to_hd_master_key(m.to_seed(mnemonic,args.passphrase),args.testnet))
if __name__ == "__main__":
main()
``` |
{
"source": "jl2718-xview/data_utilities",
"score": 2
} |
#### File: jl2718-xview/data_utilities/dataset.py
```python
import tensorflow as tf
import tensorflow.data as tfd
import tensorflow.image as tfi
import tensorflow.random as tfr
import itertools as it
import operator as op
import diux.data_utilities.wv_util as wvu
import core.preprocessor as pp
def main():
D = fImPatches()
iD = D.make_one_shot_iterator()
d = next(iD)
print(d.shape)
def fImPatches(
img_dir='/home/jl/data/xview/train_images/tif/*.tif'
,labelfile = "/home/jl/data/xview/xView_train.geojson"
,stride:int=500
,patch_dim:int=1000
,zoom:float=0.25
,out_dim:int=300
)->tfd.Dataset:
def fPatches(img:tf.Tensor)-> tf.Tensor:
def fPatchAug(patch:tf.Tensor)->tf.Tensor:
patch = tf.cast(tfi.resize(patch, tf.cast(tf.cast(patch.shape[:2], tf.float32) * tfr.uniform([], 1 - zoom, 1 + zoom), tf.int32)),tf.uint8)
patch = tfi.random_crop(patch, (out_dim, out_dim,patch.shape[-1]))
patch = tfi.random_flip_left_right(patch)
patch = tfi.random_flip_up_down(patch)
patch = tfi.rot90(patch, tfr.uniform([], 0, 3, tf.int32))
return patch
img = tfi.random_crop(img,tf.concat([stride*(tf.cast(tf.shape(img)[:2],tf.int32) // stride),tf.shape(img)[2:]],0))
patches = tf.reshape(tfi.extract_image_patches(tf.expand_dims(img,0),(1,patch_dim,patch_dim,1),(1,stride,stride,1),(1,1,1,1),"VALID"),(-1,patch_dim,patch_dim,3))
patches = tf.map_fn(fPatchAug,patches)
return patches
#coords, chips, classes = wvu.get_labels(labelfile)
#CCC = it.groupby(sorted(zip(chips, coords, classes)), op.itemgetter(0))
D = tfd.Dataset.list_files(img_dir,shuffle=True).repeat()
D = D.map(tf.io.read_file).map(tf.io.decode_png)
D = D.filter(lambda x:tf.reduce_mean(tf.cast(tf.equal(x,0),tf.float32))<.9)
D = D.map(fPatches)
D = D.flat_map(lambda x: tfd.Dataset.from_tensor_slices(x)).shuffle(100)
return D
if __name__ == "__main__": main()
``` |
{
"source": "jl32587/Jie-Lu",
"score": 3
} |
#### File: jl32587/Jie-Lu/coord.py
```python
import re
def create_sra_matrix_from_file_by_len(filename,chr_num,sra_len):
# accession start end strand read_length read_count hit
# Chr1 1 23 - 23 1 315
infile = open(filename)
infile.readline()
len = 0
for line in infile:
s_line = line.strip()
b = s_line.split()
if b[0] == chr_num and b[4] == str(sra_len):
len += 1
sra_matrix = [[0]*2 for i in range(len)]
sra_read = [0]*len
infile.close()
infile = open(filename)
infile.readline() #if file has header line
i = 0
for line in infile:
s_line = line.strip()
a = s_line.split()
if a[0] == chr_num and a[4] == str(sra_len):
sra_matrix[i][0] = int(a[1]) # start
sra_matrix[i][1] = int(a[2]) # end
#sra_matrix[i][2] = a[3] # strand
sra_read[i] = int(a[5]) # reads
i += 1
infile.close()
return sra_matrix, sra_read
def create_rep_norm_sra_matrix_from_file(filename,chr_num):
infile = open(filename)
infile.readline()
len = 0
for line in infile:
s_line = line.strip()
b = s_line.split()
if b[0] == chr_num:
len += 1
sra_matrix = [[0]*2 for i in range(len)]
sra_read = [0]*len
infile.close()
infile = open(filename)
#infile.readline()
i = 0
for line in infile:
s_line = line.strip()
a = s_line.split()
if a[0] == chr_num:
sra_matrix[i][0] = int(a[1]) # start
sra_matrix[i][1] = int(a[2]) # end
sra_read[i] = float(a[5])/float(a[6]) # repeat normalized reads
i += 1
infile.close()
return sra_matrix, sra_read
def create_rep_norm_sra_matrix_from_file_by_len(filename,chr_num,sra_len):
infile = open(filename)
infile.readline()
len = 0
for line in infile:
s_line = line.strip()
b = s_line.split()
if b[0] == chr_num and b[4] == str(sra_len):
len += 1
sra_matrix = [[0]*2 for i in range(len)]
sra_read = [0]*len
infile.close()
infile = open(filename)
#infile.readline()
i = 0
for line in infile:
s_line = line.strip()
a = s_line.split()
if a[0] == chr_num and a[4] == str(sra_len):
sra_matrix[i][0] = int(a[1]) # start
sra_matrix[i][1] = int(a[2]) # end
sra_read[i] = float(a[5])/float(a[6]) # repeat normalized reads
i += 1
infile.close()
return sra_matrix, sra_read
GFF = "TAIR9_GFF3_genes.gff"
def get_coord_given_agi(agi,feature):
# gff format:
# accession source feature start end score strand frame attributes
# Chr1 TAIR9 gene 3631 5899 . + . ID=AT1G01010;Note=protein_coding_gene;Name=AT1G01010
# feature is a list of interested features
file_in = open(GFF, 'r')
file_in.readline()
coord = {}
for line in file_in:
line = line.strip()
s_line = line.split()
if s_line[2] in feature:
attr = s_line[8].split(";")
tair_agi = re.sub('ID=','',attr[0])
start = int(s_line[3])
end = int(s_line[4])
coord[tair_agi] = [start,end,s_line[0],s_line[6]] #start,end,chromosome,strand
file_in.close()
if agi in coord:
return coord[agi]
else:
print agi+":no coordinate found"
def create_matrix_from_gff(filename,chr):
'give a matrix of [start,end,strand,agi_identifier] from gff file'
# accession source feature start end score strand frame attributes
# Chr1 TAIR9 transposable_element 11897 11976 . + . ID=AT1TE00010;Name=AT1TE00010;Alias=ATCOPIA24
file_in = open(filename)
file_in.readline()
gene_matrix = []
for line in file_in:
line = line.strip()
s_line = line.split()
if s_line[0].upper() == chr.upper():
start = int(s_line[3])
end = int(s_line[4])
attr = s_line[8].split(";")
tair_agi = re.sub('ID=','',attr[0])
gene_matrix.append([start,end,s_line[6],tair_agi])
file_in.close()
return gene_matrix
def create_matrix_from_kdef_gff(filename,chr):
'give a matrix of [start,end,strand,agi_identifier] from methylation gff file (Gerhing et al 2009)'
# chrom source feature start end score strand frame attributes
# chr1 Solexa WT_embryo_kdef.txt 1201 1301 5.33223847616762e-08 + . .
file_in = open(filename)
file_in.readline()
gene_matrix = []
for line in file_in:
line = line.strip()
s_line = line.split()
if s_line[0] == chr.lower():
start = int(s_line[3])
end = int(s_line[4])
score = float(s_line[5])
gene_matrix.append([start,end,score])
file_in.close()
return gene_matrix
def create_matrix_from_dmr(filename,chr):
'give a matrix of [start,end,strand,agi_identifier] from dmr file'
#Chr Context Start End EM-EN p (Fisher's exact test)
#chr1 CHH 11699651 11699800 -0.6400 1.20E-07
#chr2 CHH 17249501 17249550 -0.5000 6.52E-10
file_in = open(filename)
file_in.readline() #skip header
dmr = []
for line in file_in:
line = line.strip()
s_line = line.split()
if s_line[0].upper() == chr.upper():
start = int(s_line[2])
end = int(s_line[3])
dmr.append([start,end])
dmr.sort()
file_in.close()
return dmr
```
#### File: jl32587/Jie-Lu/ol.py
```python
def overlap(a,b,percentage = False):
'''Determines whether two lists of coordinates overlap. a and b are lists of genome coordinates[[start1,end1],[start2,end2]...[startn,endn]] that have already been sorted by start and end.
The function returns a list of lists that has the same index as b.'''
# for example:
# a = [[19,38],[21,50],[200,300],[250,400]]
# b = [[18,39],[50,80],[200,300]]
# overlap(a,b) returns a list of lists ol:
# [[0, 1],[],[2, 3]]
total_len = sum([i[1]-i[0]+1 for i in a])
m = len(a)
n = len(b)
a.append([(),()])
b.append([(),()]) # add infinity to the end of each array so that the last element of either a and b has something to compare to.
i = 0
j = 0
ol = [[] for u in range(n+1)]
ol_len = 0
while i <= m and j <= n:
# if a[i] and b[j] do not overlap
if a[i][1] < b[j][0]: # if a[i] is on the left of b[j]
i += 1 # move to the next element of a
elif a[i][0] > b[j][1]: # if a[i] is on the right of b[j]
j += 1 # move to the next element of b
if b[j][0] < b[j-1][1]: # trace back all a if current b overlaps with the previous b
i = traceback(b[j][0],i,a)
# if a[i] and b[j] overlap
else:
ol[j].append(i) # append the index of the element in list a which overlaps with b[j] to the result list ol[j], which means that b[j] overlaps with a[i].
if percentage == True:
ol_len += overlap_len(a[i],b[j])
if a[i][1] > b[j][1]: # if the end of a[i] extend beyond the end of b[j], it could also overlap with the following elements of b.
for y in range(j+1, n): # loop through all the other elements in b to check whether a[i] overlaps with the rest elements of b.
if a[i][1] > b[y][0]:
ol[y].append(i)
if percentage == True:
ol_len += overlap_len(a[i],b[y])
else: # until a[i] does not overlap with any other elements of b.
break
i += 1 # move to the next element of a
del ol[n]
del a[m]
del b[n]
if percentage == False:
return ol
else:
return float(ol_len)/total_len
def traceback(b,i,a):
while i > 0:
if a[i][1] > b:
i -= 1
else:
break
return i
def overlap_dot(a,b):
'''Determines whether two lists of coordinates overlap. a and b are lists of genome coordinates[[start1,end1],[start2,end2]...[startn,endn]] that have already been sorted by start and end.
The function returns a list of lists that has the same index as b.'''
# for example:
# a = [19,21,50,200,300,400]
# b = [[18,39],[50,80],[200,300]]
# overlap(a,b) returns a list of lists ol:
m = len(a)
n = len(b)
a.append([])
b.append([(),()]) # add infinity to the end of each array so that the last element of either a and b has something to compare to.
i = 0
j = 0
ol = [[] for u in range(n+1)]
while i <= m and j <= n:
# if a[i] and b[j] do not overlap
if a[i] < b[j][0]: # if a[i] is on the left of b[j]
i += 1 # move to the next element of a
elif a[i] > b[j][1]: # if a[i] is on the right of b[j]
j += 1 # move to the next element of b
if b[j][0] < b[j-1][1]:
i = traceback_dot(b[j][0],i,a)
# if a[i] and b[j] overlap
else:
ol[j].append(i) # append the index of the element in list a which overlaps with b[j] to the result list ol[j], which means that b[j] overlaps with a[i].
i += 1 # move to the next element of a
del ol[n]
del a[m]
del b[n]
return ol
def traceback_dot(b,i,a):
while i > 0:
if a[i] > b:
i -= 1
else:
break
return i
def overlap_len(a,b):
try:
if a[0] <= b[0]:
if a[1] <= b[1]:
return a[1] - b[0] + 1
else:
return b[1] - b[0] + 1
else:
if a[1] <= b[1]:
return a[1] - a[0] + 1
else:
return b[1] - a[0] + 1
except TypeError:
return 0
## a = [[19,38],[21,50],[200,300],[250,400]]
## b = [[18,39],[50,80],[200,300]]
## print overlap(a,b,percentage=True)
``` |
{
"source": "jl3953/cockroach2.0",
"score": 2
} |
#### File: cockroach2.0/experiments/driver.py
```python
import argparse
import configparser
import os
import exp_lib
import lib
import plotlib
FPATH = os.path.dirname(os.path.realpath(__file__))
CONFIG_LIST = [
# "new_zipfian_read95.ini",
# "new_zipfian_write.ini"
# "new_zipfian_overload.ini"
# "baseline.ini",
# "read100.ini",
"beep.ini",
]
EXP, SKEWS = exp_lib.create_experiment(FPATH, CONFIG_LIST[0])
DB_QUERY_NODE = "192.168.1.2"
def gather_statistics(exp, skews, collect_only=False):
""" Warms up cluster to a stable state and gathers statistics.
Args:
exp: experimental configuration
skews: self explanatory
collect_only: if set, do not warm up machines, assume
that logs are already generated, and only collect the
results from them.
Return:
None.
"""
exps = lib.vary_zipf_skew(exp, skews)
if not collect_only:
for e in exps:
lib.cleanup_previous_experiment(exp)
lib.init_experiment(exp)
lib.warmup_cluster(e)
lib.query_for_shards(DB_QUERY_NODE, e)
lib.grep_for_term(e, "jenndebug bumped")
lib.run_bench(e)
plotlib.plot_shards(exp, skews)
plotlib.plot_bumps(exp, skews)
def generate_skew_curve(exp, skews, view=False, collect=False, prepopulate=False):
""" Warms up cluster and generates curve over skew space.
Args:
exp: experimental configuration
skews: self-explanatory
view: if set, only run benchmarks, do not record logs.
Returns:
None.
"""
exps = lib.vary_zipf_skew(exp, skews)
for e in exps:
lib.cleanup_previous_experiment(exp)
lib.init_experiment(exp)
# insert writes, or just warm up
if prepopulate:
lib.prepopulate_cluster(e)
else:
lib.warmup_cluster(e)
if collect:
lib.query_for_shards(DB_QUERY_NODE, e)
lib.grep_for_term(e, "jenndebug bumped")
if not view:
lib.run_bench(e)
if exp["disable_cores"]:
hosts = [node["ip"] for node in exp["warm_nodes"] + exp["hot_nodes"]]
lib.enable_cores(exp["disable_cores"], hosts)
def plot(exp, skews, driver_node, csv_path, csv_file,
view=False, collect=False, take_over_time=False):
if take_over_time:
plotlib.gather_over_time(exp)
if len(skews) > 1:
print("over_time takes only one skew!!")
if collect:
plotlib.plot_shards(exp, skews)
plotlib.plot_bumps(exp, skews)
if not view:
plotlib.gnuplot(exp, skews, driver_node, csv_path, csv_file)
def create_trial_outdir(config_filename, i):
""" Appends number to log directory per trial.
Args:
config_filename: filename of .ini file.
i: nth trial
Returns:
Directory name with trial number appended.
"""
config = configparser.ConfigParser()
config.read(config_filename)
logs_dir = config["DEFAULT"]["LOGS_DIR"]
if i > 0:
logs_dir += "_" + str(i)
return exp_lib.create_out_dir(FPATH, logs_dir, config["DEFAULT"]["OUT_DIR"])
def main():
parser = argparse.ArgumentParser(description='Start script for cockroach.')
parser.add_argument('--start', action='store_true', help='starts, or restarts, the cluster.')
parser.add_argument('--obliterate', action='store_true', help='kills cluster and cleans up, if specified')
parser.add_argument('--benchmark', action='store_true',
help='runs specified benchmark, assumes db is already started')
parser.add_argument('--ini_files', nargs='*', help='.ini file to read from')
parser.add_argument('--driver_node')
parser.add_argument('--csv_path', help="where the generated csv file will go")
parser.add_argument('--csv_file', help="name of generated csv file")
parser.add_argument('--override', help='overrides parameters according provided .ini,'
' only valid when running benchmark')
parser.add_argument('--view', action='store_true', help='only runs warmup for short testing')
# parser.add_argument('--logs', action='store_true', help='parses benchmark logs')
parser.add_argument('--stats', action='store_true',
help='gathers statistics on benchmark instead of generating curve')
parser.add_argument('--collect', action='store_true', help='collects statistics without running the benchmark')
parser.add_argument('--prepopulate', action='store_true')
parser.add_argument('--over_time', action='store_true', help='collects stats over time')
args = parser.parse_args()
if args.obliterate:
lib.cleanup_previous_experiment(EXP)
elif args.benchmark:
if not args.driver_node or not args.ini_files:
print("missing --driver_node and/or --ini_files when used with --benchmark option")
parser.print_help()
return -1
elif args.view is False and (not args.csv_path or not args.csv_file):
print("when --view is False / not specified, must include --csv_path and --csv_file")
parser.print_help()
return -1
for config_file in args.ini_files:
exp, skews = exp_lib.create_experiment(FPATH, config_file, args.override)
for i in range(exp["trials"]):
exp["out_dir"] = create_trial_outdir(config_file, i)
generate_skew_curve(exp, skews, args.view, args.collect, args.prepopulate)
plot(exp, skews, args.driver_node, args.csv_path, args.csv_file,
args.view, args.collect, args.over_time)
elif args.stats:
for config_file in args.ini_files:
exp, skews = exp_lib.create_experiment(FPATH, config_file, args.override)
for i in range(exp["trials"]):
exp["out_dir"] = create_trial_outdir(config_file, i)
gather_statistics(exp, skews, args.collect)
elif args.start:
lib.cleanup_previous_experiment(EXP)
lib.init_experiment(EXP)
else:
parser.print_help()
if __name__ == "__main__":
main()
```
#### File: cockroach2.0/experiments/lib.py
```python
import copy
import json
import os
import shlex
import subprocess
import sys
import bash_imitation
import lib
COCKROACH_DIR = "/usr/local/temp/go/src/github.com/cockroachdb/cockroach"
EXE = os.path.join(COCKROACH_DIR, "cockroach")
STORE_DIR = "/data"
FPATH = os.path.dirname(os.path.realpath(__file__))
BASE_DIR = os.path.join(FPATH, '..')
LOGS_DIR = os.path.join(BASE_DIR, 'logs')
def call(cmd, err_msg):
print(cmd)
p = subprocess.run(cmd, universal_newlines=True, shell=True)
if p.returncode:
print(p.stderr)
print(err_msg)
sys.exit(1)
else:
return p.stdout
def call_remote(host, cmd, err_msg):
cmd = "sudo ssh {0} '{1}'".format(host, cmd)
return call(cmd, err_msg)
def call_remote_redirect_stdout(host, cmd, err_msg, path):
cmd = "sudo ssh {0} '{1}'".format(host, cmd)
print(cmd)
print(path)
with open(path, "w") as f:
return subprocess.Popen(shlex.split(cmd), stdout=f)
def init_store(node):
ip = node["ip"]
cmd = "if [[ ! -e {0} ]]; then mkdir {0}; fi".format(STORE_DIR)
call_remote(ip, cmd, "Failed to initialize store")
cmd = ("if [[ $(! mount -l | grep {0}) != *{0}* ]]; "
"then mount -t tmpfs -o size=32g tmpfs {0}; fi").format(STORE_DIR)
call_remote(ip, cmd, "Failed to initialize store")
def kill_cockroach_node(node):
ip = node["ip"]
if "store" in node:
store = node["store"]
else:
store = None
cmd = ("PID=$(! pgrep cockroach) "
"|| (sudo pkill -9 cockroach; while ps -p $PID;do sleep 1;done;)")
if store:
cmd = "({0}) && {1}".format(
cmd, "sudo rm -rf {0}".format(os.path.join(store, "*")))
cmd = "ssh {0} '{1}'".format(ip, cmd)
print(cmd)
return subprocess.Popen(shlex.split(cmd))
def start_cockroach_node(node, join=None):
ip = node["ip"]
store = node["store"]
region = node["region"]
cmd = ("{0} start --insecure "
"--advertise-addr={1} "
"--store={2} "
"--locality=region={3} "
"--cache=.25 "
"--max-sql-memory=.25 "
"--log-file-verbosity=2 "
"--background"
).format(EXE, ip, store, region)
if join:
cmd = "{0} --join={1}:26257".format(cmd, join)
cmd = "ssh -tt {0} '{1}' && stty sane".format(ip, cmd)
print(cmd)
return subprocess.Popen(cmd, shell=True)
def query_for_shards(ip, config):
cmd = '/usr/local/temp/go/src/github.com/cockroachdb/cockroach/cockroach sql --insecure --execute "show experimental_ranges from table kv.kv"'
# cmd = "sudo ssh {0} '{1}'".format(ip, cmd)
out_dir = config["out_dir"]
if not os.path.exists(out_dir):
os.makedirs(out_dir)
save_params(config, out_dir)
outfile = os.path.join(out_dir, "shards.csv")
p = call_remote_redirect_stdout(ip, cmd, "query_shard_err", outfile)
p.wait()
def set_cluster_settings(node):
ip = node["ip"]
cmd = ('echo "'
# 'set cluster setting kv.range_merge.queue_enabled = false;'
# 'set cluster setting kv.range_split.by_load_enabled = false;'
'set cluster setting kv.raft_log.disable_synchronization_unsafe = true;'
'alter range default configure zone using num_replicas = 1;'
'" | {0} sql --insecure '
'--url="postgresql://root@{1}?sslmode=disable"').format(EXE, ip)
call_remote(ip, cmd, "Failed to set cluster settings.")
def grep_for_term(config, term):
nodes = config["hot_nodes"] + config["warm_nodes"]
out_dir = config["out_dir"]
if not os.path.exists(out_dir):
os.makedirs(out_dir)
save_params(config, out_dir)
outfile = os.path.join(out_dir, "bumps.csv")
print(outfile)
ssh = "sudo ssh {0} '{1}'"
template = 'grep -ir "{0}" /data/logs/cockroach.node-* | wc -l'.format(term)
ps = []
with open(outfile, "w") as f:
i = 1
with open(outfile, "a") as f:
for n in nodes:
cmd = ssh.format(n["ip"], template)
print(cmd)
p = subprocess.Popen(shlex.split(cmd), stdout=f)
p.wait()
def start_cluster(nodes):
"""
:returns None
"""
if len(nodes) == 0:
return None
first = nodes[0]
start_cockroach_node(first).wait()
ps = []
for n in nodes[1:]:
ps.append(start_cockroach_node(n, join=first["ip"]))
for p in ps:
p.wait()
set_cluster_settings(first)
def build_cockroach(node, commit):
cmd = ("ssh {0} 'export GOPATH=/usr/local/temp/go "
"&& set -x && cd {1} && git fetch origin {2} && git checkout {2} && git pull origin {2} && git submodule update --init "
"&& (export PATH=$PATH:/usr/local/go/bin && echo $PATH && make build || (make clean && make build)) && set +x'") \
.format(node["ip"], COCKROACH_DIR, commit)
return subprocess.Popen(shlex.split(cmd))
def build_cockroach_commit(nodes, commit):
ps = [build_cockroach(n, commit) for n in nodes]
for p in ps:
p.wait()
def cleanup_previous_experiment(config):
ps = []
for n in config["workload_nodes"]:
p = kill_cockroach_node(n)
ps.append(p)
for n in config["hot_nodes"] + config["warm_nodes"]:
p = kill_cockroach_node(n)
ps.append(p)
if config["disable_cores"]:
lib.enable_cores(config["disable_cores"], [n["ip"]])
for p in ps:
p.wait()
def set_hot_keys(nodes, keys):
if len(keys) == 0:
return
values = ', '.join(map(lambda k: "({})".format(k), keys))
for n in nodes:
ip = n["ip"]
cmd = ('echo "'
'alter table kv.kv hotkey at values {2};'
'" | {0} sql --insecure '
'--url="postgresql://root@{1}?sslmode=disable"').format(EXE, ip, values)
call_remote(ip, cmd, "Failed to set cluster settings.")
def modify_cores(cores, is_enable_cores, hosts):
for host in hosts:
for i in range(1, cores + 1):
if is_enable_cores:
bash_imitation.enable_core(i, host)
else:
bash_imitation.disable_core(i, host)
def disable_cores(cores, hosts):
modify_cores(cores, False, hosts)
def enable_cores(cores, hosts):
modify_cores(cores, True, hosts)
def init_experiment(config):
nodes = config["workload_nodes"] \
+ config["warm_nodes"] \
+ config["hot_nodes"]
build_cockroach_commit(nodes, config["cockroach_commit"])
# disable any cores
if config["disable_cores"]:
hosts = [node["ip"] for node in config["warm_nodes"] + config["hot_nodes"]]
disable_cores(config["disable_cores"], hosts)
# Start hot node separately from warm nodes
# start_cluster(config["hot_nodes"])
start_cluster(config["warm_nodes"] + config["hot_nodes"]) # no, start them together for now
def save_params(exp_params, out_dir):
params = {
"exp_params": exp_params
}
path = os.path.join(out_dir, "params.json")
with open(path, "w") as f:
json.dump(params, f, indent=4)
def read_params(out_dir):
path = os.path.join(out_dir, "params.json")
with open(path, "r") as f:
params = json.load(f)
return params["exp_params"]
def vary_zipf_skew(config, skews):
if ("benchmark" in config and
"run_args" in config["benchmark"] and
"distribution" in config["benchmark"]["run_args"] and
"type" in config["benchmark"]["run_args"]["distribution"] and
config["benchmark"]["run_args"]["distribution"]["type"] == "zipf"):
out_dir = config["out_dir"]
exps = []
for i in range(len(skews)):
s = skews[i]
c = config["benchmark"]["run_args"]["concurrency"][i]
e = copy.deepcopy(config)
if "params" not in e["benchmark"]["run_args"]["distribution"]:
e["benchmark"]["run_args"]["distribution"]["params"] = {}
if "skew" in e["benchmark"]["run_args"]["distribution"]["params"]:
print("WARNING: Overwriting skew param in experiment config!")
e["benchmark"]["run_args"]["distribution"]["params"]["skew"] = s
e["benchmark"]["run_args"]["concurrency"] = c
e["out_dir"] = os.path.join(out_dir, "skew-{0}".format(i))
exps.append(e)
return exps
else:
raise ValueError(
"Passed experiment that does not use Zipf distribution!")
def parse_bench_args(bench_config, is_warmup=False, hot_key=None):
args = []
if "duration" in bench_config:
if is_warmup:
args.append("--duration={}s".format(bench_config["warmup_duration"]))
else:
args.append("--duration={}s".format(bench_config["duration"]))
if "drop" in bench_config and bench_config["drop"] is True:
args.append("--drop")
if "concurrency" in bench_config:
args.append("--concurrency={}".format(bench_config["concurrency"]))
if "splits" in bench_config:
args.append("--splits={}".format(bench_config["splits"]))
if "read_percent" in bench_config:
args.append("--read-percent={}".format(bench_config["read_percent"]))
if "n_statements_per_txn" in bench_config:
args.append("--stmt-per-txn={}".format(bench_config["n_statements_per_txn"]))
if "n_keys_per_statement" in bench_config:
args.append("--batch={}".format(bench_config["n_keys_per_statement"]))
if "distribution" in bench_config:
d = bench_config["distribution"]
params = d["params"]
if d["type"] == "zipf":
args.append("--zipfian")
args.append("--s={1}".format(args, params["skew"]))
if "use_original_zipfian" in bench_config:
args.append("--useOriginal={}".format(bench_config["use_original_zipfian"]))
if hot_key:
args.append("--hotkey={}".format(hot_key))
if "keyspace" in bench_config:
args.append("--keyspace={}".format(bench_config["keyspace"]))
return " ".join(args)
def init_workload(b, name, urls, workload_nodes):
args = parse_bench_args(b["init_args"], is_warmup=True)
cmd = "{0} workload init {1} {2} {3}".format(EXE, name, urls, args)
ip = workload_nodes[0]["ip"]
call_remote(ip, cmd, "Failed to initialize benchmark")
def set_database_settings(nodes, create_partition, hot_key):
ip = nodes[0]["ip"]
cmd = ('echo "'
'alter range default configure zone using num_replicas = 1;')
if create_partition:
cmd += 'alter table kv partition by range(k) (partition hot values from (minvalue) to ({0}), partition warm values from ({0}) to (maxvalue));'.format(
hot_key)
cmd += "alter partition hot of table kv configure zone using constraints='\\''[+region=newyork]'\\'';"
cmd += "alter partition warm of table kv configure zone using constraints='\\''[-region=newyork]'\\'';"
cmd += ('" | {0} sql --insecure --database=kv '
'--url="postgresql://root@{1}?sslmode=disable"').format(EXE, ip)
call_remote(ip, cmd, "Failed to assign partition affinity")
def extract_config_params(config):
out_dir = config["out_dir"]
if not os.path.exists(out_dir):
os.makedirs(out_dir)
nodes = config["warm_nodes"]
if config["use_hot_nodes_as_gateways"]:
nodes += config["hot_nodes"]
b = config["benchmark"]
name = b["name"]
urls = ["postgresql://root@{0}:26257?sslmode=disable".format(n["ip"])
for n in nodes]
urls = " ".join(urls)
workload_nodes = config["workload_nodes"]
return out_dir, nodes, b, name, urls, workload_nodes
def run_workload(workload_nodes, b, name, urls, out_dir, is_warmup=False, hot_key=None):
i = 0
ps = []
for wn in workload_nodes:
args = parse_bench_args(b["run_args"], is_warmup=is_warmup, hot_key=hot_key)
cmd = "{0} workload run {1} {2} {3}".format(EXE, name, urls, args)
ip = wn["ip"]
if is_warmup:
# Call remote
cmd = "sudo ssh {0} '{1}'".format(ip, cmd)
print(cmd)
ps.append(subprocess.Popen(shlex.split(cmd)))
else:
path = os.path.join(out_dir, "bench_out_{0}.txt".format(i))
p = call_remote_redirect_stdout(ip, cmd, "run_workload_err", path)
ps.append(p)
i += 1
for p in ps:
p.wait()
def prepopulate_cluster(config):
out_dir, nodes, b, name, urls, workload_nodes = extract_config_params(config)
init_workload(b, name, urls, workload_nodes)
set_database_settings(nodes, config["should_create_partition"], config["hot_key"])
b_copy = copy.deepcopy(b)
b_copy["run_args"]["warmup_duration"] = 30
b_copy["run_args"]["read_percent"] = 0
# populate with writes
hot_key = None
if "hot_key" in config:
hot_key = config["hot_key"]
run_workload(workload_nodes, b_copy, name, urls, out_dir, is_warmup=True, hot_key=hot_key)
# real warmup
run_workload(workload_nodes, b, name, urls, out_dir, is_warmup=True, hot_key=hot_key)
def warmup_cluster(config):
out_dir, nodes, b, name, urls, workload_nodes = extract_config_params(config)
if len(workload_nodes) == 0:
print("No workload nodes!")
return
if len(nodes) == 0:
print("No cluster nodes!")
return
# initialize workload on database
init_workload(b, name, urls, workload_nodes)
# set database settings (hot key, replicas)
set_database_settings(nodes, config["should_create_partition"], config["hot_key"])
# run workload
hot_key = None
if "hot_key" in config:
hot_key = config["hot_key"]
run_workload(workload_nodes, b, name, urls, out_dir, is_warmup=True, hot_key=hot_key)
def run_bench(config):
out_dir, nodes, b, name, urls, workload_nodes = extract_config_params(config)
if not os.path.exists(out_dir):
os.makedirs(out_dir)
save_params(config, out_dir)
if len(workload_nodes) == 0:
print("No workload nodes!")
return
if len(nodes) == 0:
print("No cluster nodes!")
return
hot_key = None
if "hot_key" in config:
hot_key = config["hot_key"]
run_workload(workload_nodes, b, name, urls, out_dir, is_warmup=False, hot_key=hot_key)
```
#### File: cockroach2.0/experiments/plotlib.py
```python
import collections
import csv
import os
import re
import lib
import over_time
DRIVER_NODE = "192.168.1.19"
def extract_data(last_eight_lines):
def parse(header_line, data_line, suffix=""):
if "elapsed" not in header_line:
return {}
fields = data_line.strip().split()
if "read" in fields[-1]:
suffix = "-r"
elif "write" in fields[-1]:
suffix = "-w"
else:
suffix = ""
header = [w + suffix for w in re.split('_+', header_line.strip().strip('_'))]
data = dict(zip(header, fields))
return data
read_data = {}
try:
read_data = parse(last_eight_lines[0], last_eight_lines[1], "-r")
except BaseException:
print("write only")
write_data = {}
try:
write_data = parse(last_eight_lines[3], last_eight_lines[4], "-w")
read_data.update(write_data)
except BaseException:
print("read only")
data = parse(last_eight_lines[6], last_eight_lines[7])
read_data.update(data)
return read_data
def write_out_data_wrapper(data, out_dir, outfile_name="gnuplot.csv"):
filename = os.path.join(out_dir, outfile_name)
return write_out_data(data, filename)
def read_in_data(infile):
data = []
if os.path.exists(infile):
with open(infile, "r") as csvfile:
reader = csv.DictReader(csvfile, delimiter='\t')
for row in reader:
for key in row:
row[key] = float(row[key])
data.append(dict(row))
return data
def insert_lt_csv_data(data, filename):
if len(data) <= 0:
return None
existing_rows = read_in_data(filename)
all_data = existing_rows + data
print(all_data)
all_data = sorted(all_data, key=lambda i: i["concurrency"])
_ = write_out_data(all_data, filename)
return filename
def write_out_data(data, outfile, mode="w"):
if len(data) <= 0:
return ""
print(mode)
with open(outfile, mode) as csvfile:
writer = csv.DictWriter(csvfile, delimiter='\t', fieldnames=data[0].keys())
writer.writeheader()
for datum in data:
try:
writer.writerow(datum)
except BaseException:
print("failed on {0}".format(datum))
continue
return outfile
def aggregate(acc):
""" Aggregates data across workload nodes.
Args:
acc (list[dict])
Returns:
one final data point (dict).
"""
final_datum = collections.defaultdict(float)
for datum in acc:
for k, v in datum.items():
try:
final_datum[k] += float(v)
except BaseException:
print("could not add to csv file key:[{0}], value:[{1}]".format(k, v))
continue
for k in final_datum:
if "ops" not in k:
final_datum[k] /= len(acc)
return final_datum
def is_output_okay(tail):
try:
if not ("elapsed" in tail[3] and "elapsed" in tail[6]):
return False
return True
except BaseException:
return False
def accumulate_workloads_per_skew(config, dir_path):
""" Aggregating data for a single skew point across all workload nodes.
Returns:
extracted datum, success or not
"""
acc = []
for j in range(len(config["workload_nodes"])):
path = os.path.join(dir_path, "bench_out_{0}.txt".format(j))
with open(path, "r") as f:
# read the last eight lines of f
print(path)
tail = f.readlines()[-8:]
if not is_output_okay(tail):
print("{0} missing some data lines".format(path))
return None, False
try:
datum = extract_data(tail)
acc.append(datum)
except BaseException:
print("failed to extract data: {0}".format(path))
return None, False
final_datum = aggregate(acc)
return final_datum, True
def extract_shard_data(lines):
def to_int(key):
if key == "NULL":
return 0
else:
all_nums = key.split("/")
last = int(all_nums[-1])
if last == 0:
return int(all_nums[-2])
else:
return last
point = lines[0]
print(point)
point["start_key"] = to_int(point["start_key"])
point["end_key"] = to_int(point["end_key"])
point["range_id"] = int(point["range_id"])
point["range_size_mb"] = float(point["range_size_mb"])
return point
def accumulate_shard_per_skew(config, dir_path):
acc = []
path = os.path.join(dir_path, "shards.csv")
with open(path, "r") as f:
reader = csv.DictReader(f, delimiter='\t')
for row in reader:
acc.append(row)
datum = extract_shard_data(acc)
return datum, True
def accumulate_greps_per_skew(config, dir_path):
bumps = 0
path = os.path.join(dir_path, "bumps.csv")
with open(path, "r") as f:
for line in f:
bumps += int(line.strip())
return {"bumps": bumps}, True
def gather_over_time(config):
def write_out_stats(stats, out):
with open(out, "w") as f:
writer = csv.DictWriter(f, delimiter='\t', fieldnames=stats[0].keys())
writer.writeheader()
for stat in stats:
writer.writerow(stat)
return out
out_dir = os.path.join(config["out_dir"])
print("jenndebug", out_dir)
dir_path = os.path.join(out_dir, "skew-0")
for i in range(len(config["workload_nodes"])):
path = os.path.join(dir_path, "bench_out_{0}.txt".format(i))
stats = over_time.parse_file(path)
sorted(stats, key=lambda stat: stat["elapsed-r"])
filename = write_out_stats(stats, os.path.join(out_dir, "stats_over_time_{0}.csv".format(i)))
print(filename)
csv_file = os.path.basename(os.path.dirname(out_dir)) + "_stats_{0}.csv".format(i)
print(csv_file)
cmd = "mv {0} /usr/local/temp/go/src/github.com/cockroachdb/cockroach/gnuplot/{1}".format(filename, csv_file)
lib.call_remote(DRIVER_NODE, cmd, "gather_time_err")
def generate_csv_file(config, skews, accumulate_fn, suffix, driver_node=DRIVER_NODE,
csv_path="/usr/local/temp/go/src/github.com/cockroachdb/cockroach/gnuplot",
csv_file=None):
""" Generates csv file from skews.
Args:
config: experimental config
skews: take a guess buddy
Returns:
None.
"""
out_dir = os.path.join(config["out_dir"])
data = []
for i in range(len(skews)):
dir_path = os.path.join(out_dir, "skew-{0}".format(i))
datum, succeeded = accumulate_fn(config, dir_path)
if succeeded:
datum_with_skew = {"skew": skews[i]}
datum_with_skew.update(datum)
data.append(datum_with_skew)
else:
print("failed on skew[{0}]".format(skews[i]))
continue
filename = write_out_data_wrapper(data, out_dir, suffix + ".csv")
print(filename)
if csv_file is None:
csv_file = os.path.basename(os.path.dirname(out_dir)) + "_" + suffix + ".csv"
print(csv_file)
cmd = "cp {0} {1}/{2}".format(filename, csv_path, csv_file)
lib.call_remote(driver_node, cmd, "i like to move it move it")
def plot_bumps(config, skews):
generate_csv_file(config, skews, accumulate_greps_per_skew, "bumps")
def plot_shards(config, skews):
generate_csv_file(config, skews, accumulate_shard_per_skew, "shard")
def gnuplot(config, skews, driver_node,
csv_path="/usr/local/temp/go/src/github.com/cockroachdb/cockroach/gnuplot",
csv_file=None):
generate_csv_file(config, skews, accumulate_workloads_per_skew, "skew",
driver_node=driver_node, csv_path=csv_path, csv_file=csv_file)
``` |
{
"source": "jl3953/thermopylae_tests",
"score": 4
} |
#### File: thermopylae_tests/src/config_io.py
```python
import configparser
import json
def write_config_to_file(config_dict, ini_fpath):
""" Writes a configuration to an ini file.
:param config_dict: (Dict) config to write
:param ini_fpath: (str) fpath to ini file
:return: (str) ini_file written to
"""
config = configparser.ConfigParser()
config["DEFAULT"] = {key: json.dumps(value) for key, value in config_dict.items()}
with open(ini_fpath, "w") as ini:
config.write(ini)
return ini_fpath
def read_config_from_file(ini_fpath):
"""
Reads a config file
:param ini_fpath:
:return: a dictionary of config parameters
"""
config = configparser.ConfigParser()
config.read(ini_fpath)
result = {}
for key in config["DEFAULT"]:
result[key] = json.loads(config["DEFAULT"][key])
return result
```
#### File: thermopylae_tests/src/plot_utils.py
```python
import system_utils
def gnuplot(exe, *args):
args_str = " ".join(args)
cmd = "gnuplot -c {0} {1}".format(exe, args_str)
system_utils.call(cmd)
```
#### File: thermopylae_tests/src/sqlite_helper_object.py
```python
import argparse
import sqlite3
import csv_utils
class SQLiteHelperObject:
def __init__(self, db_file):
self.db = db_file
self.conn = None
self.c = None
def connect(self):
self.conn = sqlite3.connect(self.db)
self.c = self.conn.cursor()
def create_table_if_not_exists(self, table_name, row_names_list):
self.c.execute("CREATE TABLE IF NOT EXISTS {0} ({1})"
.format(table_name, ", ".join(row_names_list)))
def insert_csv_data_into_sqlite_table(self, table_name, csv_fpath, *args, **kwargs):
# read in csv file data
header, data = csv_utils.read_in_data_as_tuples_float(csv_fpath)
# create table if not exists yet
column_names = SQLiteHelperObject.sanitize_column_names(header + list(kwargs.keys()))
data_rows = [tuple(data_row + list(kwargs.values())) for data_row in data]
question_marks = ",".join(["?"] * len(column_names))
self.create_table_if_not_exists(table_name, column_names)
# insert the rows
insert_cmd = "INSERT INTO {0} VALUES ({1})".format(table_name, question_marks)
self.c.executemany(insert_cmd, data_rows)
self.conn.commit()
def close(self):
self.conn.close()
@staticmethod
def sanitize_column_names(column_names):
def sanitize(col_name):
col_name = col_name.replace("-", "_")
col_name = col_name.replace("/", "_per_")
col_name = col_name.replace("(", "_")
col_name = col_name.replace(")", "")
return col_name
return [sanitize(cn) for cn in column_names]
def main():
parser = argparse.ArgumentParser()
parser.add_argument("db_file")
parser.add_argument("csv")
parser.add_argument("logs_dir")
parser.add_argument("table_name")
args = parser.parse_args()
db = SQLiteHelperObject(args.db_file)
db.connect()
db.insert_csv_data_into_sqlite_table(args.table_name, args.csv,
{"logs_dir": args.logs_dir})
if __name__ == "__main__":
main()
```
#### File: thermopylae_tests/src/system_utils.py
```python
import shlex
import subprocess
def call(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT):
"""
Calls a command in the shell.
:param cmd: (str)
:param stdout: set by default to subprocess.PIPE (which is standard stream)
:param stderr: set by default subprocess.STDOUT (combines with stdout)
:return: if successful, stdout stream of command.
"""
print(cmd)
p = subprocess.run(cmd, stdout=stdout, stderr=stderr, shell=True, check=True, universal_newlines=True)
return p.stdout
def call_remote(host, cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT):
"""
Makes a remote call of a command.
:param host: (str)
:param cmd: (str)
:param stdout: set by default to subprocess.PIPE (which is the standard stream)
:param stderr: set by default to subprocess.STDOUT (combines with stdout)
:return: if successful, stdout stream of command
"""
cmd = "sudo ssh {0} '{1}'".format(host, cmd)
return call(cmd, stdout, stderr)
def modify_core(node, core_num, is_enable=False):
if core_num >= 16:
raise AssertionError("Cannot specify core larger than 15")
elif core_num <= 0:
raise AssertionError("Cannot specify core 0 or less")
cmd = "echo {0} | tee /sys/devices/system/cpu/cpu{1}/online".format(1 if is_enable else 0, core_num)
cmd = "sudo ssh {0} '{1}'".format(node, cmd)
print(cmd)
return subprocess.Popen(shlex.split(cmd))
# call_remote(node, cmd)
``` |
{
"source": "jl45621/coach",
"score": 2
} |
#### File: rl_coach/agents/categorical_dqn_agent.py
```python
from typing import Union
import numpy as np
from rl_coach.agents.dqn_agent import DQNNetworkParameters, DQNAlgorithmParameters, DQNAgentParameters
from rl_coach.agents.value_optimization_agent import ValueOptimizationAgent
from rl_coach.architectures.head_parameters import CategoricalQHeadParameters
from rl_coach.core_types import StateType
from rl_coach.exploration_policies.e_greedy import EGreedyParameters
from rl_coach.memories.non_episodic.prioritized_experience_replay import PrioritizedExperienceReplay
from rl_coach.schedules import LinearSchedule
class CategoricalDQNNetworkParameters(DQNNetworkParameters):
def __init__(self):
super().__init__()
self.heads_parameters = [CategoricalQHeadParameters()]
class CategoricalDQNAlgorithmParameters(DQNAlgorithmParameters):
"""
:param v_min: (float)
The minimal value that will be represented in the network output for predicting the Q value.
Corresponds to :math:`v_{min}` in the paper.
:param v_max: (float)
The maximum value that will be represented in the network output for predicting the Q value.
Corresponds to :math:`v_{max}` in the paper.
:param atoms: (int)
The number of atoms that will be used to discretize the range between v_min and v_max.
For the C51 algorithm described in the paper, the number of atoms is 51.
"""
def __init__(self):
super().__init__()
self.v_min = -10.0
self.v_max = 10.0
self.atoms = 51
class CategoricalDQNExplorationParameters(EGreedyParameters):
def __init__(self):
super().__init__()
self.epsilon_schedule = LinearSchedule(1, 0.01, 1000000)
self.evaluation_epsilon = 0.001
class CategoricalDQNAgentParameters(DQNAgentParameters):
def __init__(self):
super().__init__()
self.algorithm = CategoricalDQNAlgorithmParameters()
self.exploration = CategoricalDQNExplorationParameters()
self.network_wrappers = {"main": CategoricalDQNNetworkParameters()}
@property
def path(self):
return 'rl_coach.agents.categorical_dqn_agent:CategoricalDQNAgent'
# Categorical Deep Q Network - https://arxiv.org/pdf/1707.06887.pdf
class CategoricalDQNAgent(ValueOptimizationAgent):
def __init__(self, agent_parameters, parent: Union['LevelManager', 'CompositeAgent']=None):
super().__init__(agent_parameters, parent)
self.z_values = np.linspace(self.ap.algorithm.v_min, self.ap.algorithm.v_max, self.ap.algorithm.atoms)
def distribution_prediction_to_q_values(self, prediction):
return np.dot(prediction, self.z_values)
# prediction's format is (batch,actions,atoms)
def get_all_q_values_for_states(self, states: StateType):
if self.exploration_policy.requires_action_values():
prediction = self.get_prediction(states)
q_values = self.distribution_prediction_to_q_values(prediction)
else:
q_values = None
return q_values
def learn_from_batch(self, batch):
network_keys = self.ap.network_wrappers['main'].input_embedders_parameters.keys()
# for the action we actually took, the error is calculated by the atoms distribution
# for all other actions, the error is 0
distributional_q_st_plus_1, TD_targets = self.networks['main'].parallel_prediction([
(self.networks['main'].target_network, batch.next_states(network_keys)),
(self.networks['main'].online_network, batch.states(network_keys))
])
# select the optimal actions for the next state
target_actions = np.argmax(self.distribution_prediction_to_q_values(distributional_q_st_plus_1), axis=1)
m = np.zeros((self.ap.network_wrappers['main'].batch_size, self.z_values.size))
batches = np.arange(self.ap.network_wrappers['main'].batch_size)
# an alternative to the for loop. 3.7x perf improvement vs. the same code done with for looping.
# only 10% speedup overall - leaving commented out as the code is not as clear.
# tzj_ = np.fmax(np.fmin(batch.rewards() + (1.0 - batch.game_overs()) * self.ap.algorithm.discount *
# np.transpose(np.repeat(self.z_values[np.newaxis, :], batch.size, axis=0), (1, 0)),
# self.z_values[-1]),
# self.z_values[0])
#
# bj_ = (tzj_ - self.z_values[0]) / (self.z_values[1] - self.z_values[0])
# u_ = (np.ceil(bj_)).astype(int)
# l_ = (np.floor(bj_)).astype(int)
# m_ = np.zeros((self.ap.network_wrappers['main'].batch_size, self.z_values.size))
# np.add.at(m_, [batches, l_],
# np.transpose(distributional_q_st_plus_1[batches, target_actions], (1, 0)) * (u_ - bj_))
# np.add.at(m_, [batches, u_],
# np.transpose(distributional_q_st_plus_1[batches, target_actions], (1, 0)) * (bj_ - l_))
for j in range(self.z_values.size):
tzj = np.fmax(np.fmin(batch.rewards() +
(1.0 - batch.game_overs()) * self.ap.algorithm.discount * self.z_values[j],
self.z_values[-1]),
self.z_values[0])
bj = (tzj - self.z_values[0])/(self.z_values[1] - self.z_values[0])
u = (np.ceil(bj)).astype(int)
l = (np.floor(bj)).astype(int)
m[batches, l] += (distributional_q_st_plus_1[batches, target_actions, j] * (u - bj))
m[batches, u] += (distributional_q_st_plus_1[batches, target_actions, j] * (bj - l))
# total_loss = cross entropy between actual result above and predicted result for the given action
# only update the action that we have actually done in this transition
TD_targets[batches, batch.actions()] = m
# update errors in prioritized replay buffer
importance_weights = batch.info('weight') if isinstance(self.memory, PrioritizedExperienceReplay) else None
result = self.networks['main'].train_and_sync_networks(batch.states(network_keys), TD_targets,
importance_weights=importance_weights)
total_loss, losses, unclipped_grads = result[:3]
# TODO: fix this spaghetti code
if isinstance(self.memory, PrioritizedExperienceReplay):
errors = losses[0][np.arange(batch.size), batch.actions()]
self.call_memory('update_priorities', (batch.info('idx'), errors))
return total_loss, losses, unclipped_grads
```
#### File: rl_coach/agents/dfp_agent.py
```python
import copy
from enum import Enum
from typing import Union
import numpy as np
from rl_coach.agents.agent import Agent
from rl_coach.architectures.head_parameters import MeasurementsPredictionHeadParameters
from rl_coach.architectures.embedder_parameters import InputEmbedderParameters
from rl_coach.architectures.middleware_parameters import FCMiddlewareParameters
from rl_coach.architectures.tensorflow_components.layers import Conv2d, Dense
from rl_coach.base_parameters import AlgorithmParameters, AgentParameters, NetworkParameters, \
MiddlewareScheme
from rl_coach.core_types import ActionInfo, EnvironmentSteps, RunPhase
from rl_coach.exploration_policies.e_greedy import EGreedyParameters
from rl_coach.memories.episodic.episodic_experience_replay import EpisodicExperienceReplayParameters
from rl_coach.memories.memory import MemoryGranularity
from rl_coach.spaces import SpacesDefinition, VectorObservationSpace
class HandlingTargetsAfterEpisodeEnd(Enum):
LastStep = 0
NAN = 1
class DFPNetworkParameters(NetworkParameters):
def __init__(self):
super().__init__()
self.input_embedders_parameters = {'observation': InputEmbedderParameters(activation_function='leaky_relu'),
'measurements': InputEmbedderParameters(activation_function='leaky_relu'),
'goal': InputEmbedderParameters(activation_function='leaky_relu')}
self.input_embedders_parameters['observation'].scheme = [
Conv2d(32, 8, 4),
Conv2d(64, 4, 2),
Conv2d(64, 3, 1),
Dense(512),
]
self.input_embedders_parameters['measurements'].scheme = [
Dense(128),
Dense(128),
Dense(128),
]
self.input_embedders_parameters['goal'].scheme = [
Dense(128),
Dense(128),
Dense(128),
]
self.middleware_parameters = FCMiddlewareParameters(activation_function='leaky_relu',
scheme=MiddlewareScheme.Empty)
self.heads_parameters = [MeasurementsPredictionHeadParameters(activation_function='leaky_relu')]
self.async_training = False
self.batch_size = 64
self.adam_optimizer_beta1 = 0.95
class DFPMemoryParameters(EpisodicExperienceReplayParameters):
def __init__(self):
self.max_size = (MemoryGranularity.Transitions, 20000)
self.shared_memory = True
super().__init__()
class DFPAlgorithmParameters(AlgorithmParameters):
"""
:param num_predicted_steps_ahead: (int)
Number of future steps to predict measurements for. The future steps won't be sequential, but rather jump
in multiples of 2. For example, if num_predicted_steps_ahead = 3, then the steps will be: t+1, t+2, t+4.
The predicted steps will be [t + 2**i for i in range(num_predicted_steps_ahead)]
:param goal_vector: (List[float])
The goal vector will weight each of the measurements to form an optimization goal. The vector should have
the same length as the number of measurements, and it will be vector multiplied by the measurements.
Positive values correspond to trying to maximize the particular measurement, and negative values
correspond to trying to minimize the particular measurement.
:param future_measurements_weights: (List[float])
The future_measurements_weights weight the contribution of each of the predicted timesteps to the optimization
goal. For example, if there are 6 steps predicted ahead, and a future_measurements_weights vector with 3 values,
then only the 3 last timesteps will be taken into account, according to the weights in the
future_measurements_weights vector.
:param use_accumulated_reward_as_measurement: (bool)
If set to True, the accumulated reward from the beginning of the episode will be added as a measurement to
the measurements vector in the state. This van be useful in environments where the given measurements don't
include enough information for the particular goal the agent should achieve.
:param handling_targets_after_episode_end: (HandlingTargetsAfterEpisodeEnd)
Dictates how to handle measurements that are outside the episode length.
:param scale_measurements_targets: (Dict[str, float])
Allows rescaling the values of each of the measurements available. This van be useful when the measurements
have a different scale and you want to normalize them to the same scale.
"""
def __init__(self):
super().__init__()
self.num_predicted_steps_ahead = 6
self.goal_vector = [1.0, 1.0]
self.future_measurements_weights = [0.5, 0.5, 1.0]
self.use_accumulated_reward_as_measurement = False
self.handling_targets_after_episode_end = HandlingTargetsAfterEpisodeEnd.NAN
self.scale_measurements_targets = {}
self.num_consecutive_playing_steps = EnvironmentSteps(8)
class DFPAgentParameters(AgentParameters):
def __init__(self):
super().__init__(algorithm=DFPAlgorithmParameters(),
exploration=EGreedyParameters(),
memory=DFPMemoryParameters(),
networks={"main": DFPNetworkParameters()})
@property
def path(self):
return 'rl_coach.agents.dfp_agent:DFPAgent'
# Direct Future Prediction Agent - http://vladlen.info/papers/learning-to-act.pdf
class DFPAgent(Agent):
def __init__(self, agent_parameters, parent: Union['LevelManager', 'CompositeAgent']=None):
super().__init__(agent_parameters, parent)
self.current_goal = self.ap.algorithm.goal_vector
self.target_measurements_scale_factors = None
def learn_from_batch(self, batch):
network_keys = self.ap.network_wrappers['main'].input_embedders_parameters.keys()
network_inputs = batch.states(network_keys)
network_inputs['goal'] = np.repeat(np.expand_dims(self.current_goal, 0),
self.ap.network_wrappers['main'].batch_size, axis=0)
# get the current outputs of the network
targets = self.networks['main'].online_network.predict(network_inputs)
# change the targets for the taken actions
for i in range(self.ap.network_wrappers['main'].batch_size):
targets[i, batch.actions()[i]] = batch[i].info['future_measurements'].flatten()
result = self.networks['main'].train_and_sync_networks(network_inputs, targets)
total_loss, losses, unclipped_grads = result[:3]
return total_loss, losses, unclipped_grads
def choose_action(self, curr_state):
if self.exploration_policy.requires_action_values():
# predict the future measurements
tf_input_state = self.prepare_batch_for_inference(curr_state, 'main')
tf_input_state['goal'] = np.expand_dims(self.current_goal, 0)
measurements_future_prediction = self.networks['main'].online_network.predict(tf_input_state)[0]
action_values = np.zeros(len(self.spaces.action.actions))
num_steps_used_for_objective = len(self.ap.algorithm.future_measurements_weights)
# calculate the score of each action by multiplying it's future measurements with the goal vector
for action_idx in range(len(self.spaces.action.actions)):
action_measurements = measurements_future_prediction[action_idx]
action_measurements = np.reshape(action_measurements,
(self.ap.algorithm.num_predicted_steps_ahead,
self.spaces.state['measurements'].shape[0]))
future_steps_values = np.dot(action_measurements, self.current_goal)
action_values[action_idx] = np.dot(future_steps_values[-num_steps_used_for_objective:],
self.ap.algorithm.future_measurements_weights)
else:
action_values = None
# choose action according to the exploration policy and the current phase (evaluating or training the agent)
action = self.exploration_policy.get_action(action_values)
if action_values is not None:
action_values = action_values.squeeze()
action_info = ActionInfo(action=action, action_value=action_values[action])
else:
action_info = ActionInfo(action=action)
return action_info
def set_environment_parameters(self, spaces: SpacesDefinition):
self.spaces = copy.deepcopy(spaces)
self.spaces.goal = VectorObservationSpace(shape=self.spaces.state['measurements'].shape,
measurements_names=
self.spaces.state['measurements'].measurements_names)
# if the user has filled some scale values, check that he got the names right
if set(self.spaces.state['measurements'].measurements_names).intersection(
self.ap.algorithm.scale_measurements_targets.keys()) !=\
set(self.ap.algorithm.scale_measurements_targets.keys()):
raise ValueError("Some of the keys in parameter scale_measurements_targets ({}) are not defined in "
"the measurements space {}".format(self.ap.algorithm.scale_measurements_targets.keys(),
self.spaces.state['measurements'].measurements_names))
super().set_environment_parameters(self.spaces)
# the below is done after calling the base class method, as it might add accumulated reward as a measurement
# fill out the missing measurements scale factors
for measurement_name in self.spaces.state['measurements'].measurements_names:
if measurement_name not in self.ap.algorithm.scale_measurements_targets:
self.ap.algorithm.scale_measurements_targets[measurement_name] = 1
self.target_measurements_scale_factors = \
np.array([self.ap.algorithm.scale_measurements_targets[measurement_name] for measurement_name in
self.spaces.state['measurements'].measurements_names])
def handle_episode_ended(self):
last_episode = self.current_episode_buffer
if self.phase in [RunPhase.TRAIN, RunPhase.HEATUP] and last_episode:
self._update_measurements_targets(last_episode,
self.ap.algorithm.num_predicted_steps_ahead)
super().handle_episode_ended()
def _update_measurements_targets(self, episode, num_steps):
if 'measurements' not in episode.transitions[0].state or episode.transitions[0].state['measurements'] == []:
raise ValueError("Measurements are not present in the transitions of the last episode played. ")
measurements_size = self.spaces.state['measurements'].shape[0]
for transition_idx, transition in enumerate(episode.transitions):
transition.info['future_measurements'] = np.zeros((num_steps, measurements_size))
for step in range(num_steps):
offset_idx = transition_idx + 2 ** step
if offset_idx >= episode.length():
if self.ap.algorithm.handling_targets_after_episode_end == HandlingTargetsAfterEpisodeEnd.NAN:
# the special MSE loss will ignore those entries so that the gradient will be 0 for these
transition.info['future_measurements'][step] = np.nan
continue
elif self.ap.algorithm.handling_targets_after_episode_end == HandlingTargetsAfterEpisodeEnd.LastStep:
offset_idx = - 1
transition.info['future_measurements'][step] = \
self.target_measurements_scale_factors * \
(episode.transitions[offset_idx].state['measurements'] - transition.state['measurements'])
```
#### File: rl_coach/architectures/architecture.py
```python
from typing import Any, Dict, List, Tuple
import numpy as np
from rl_coach.base_parameters import AgentParameters
from rl_coach.saver import SaverCollection
from rl_coach.spaces import SpacesDefinition
class Architecture(object):
@staticmethod
def construct(variable_scope: str, devices: List[str], *args, **kwargs) -> 'Architecture':
"""
Construct a network class using the provided variable scope and on requested devices
:param variable_scope: string specifying variable scope under which to create network variables
:param devices: list of devices (can be list of Device objects, or string for TF distributed)
:param args: all other arguments for class initializer
:param kwargs: all other keyword arguments for class initializer
:return: an object which is a child of Architecture
"""
raise NotImplementedError
def __init__(self, agent_parameters: AgentParameters, spaces: SpacesDefinition, name: str= ""):
"""
Creates a neural network 'architecture', that can be trained and used for inference.
:param agent_parameters: the agent parameters
:param spaces: the spaces (observation, action, etc.) definition of the agent
:param name: the name of the network
"""
self.spaces = spaces
self.name = name
self.network_wrapper_name = self.name.split('/')[0] # e.g. 'main/online' --> 'main'
self.full_name = "{}/{}".format(agent_parameters.full_name_id, name)
self.network_parameters = agent_parameters.network_wrappers[self.network_wrapper_name]
self.batch_size = self.network_parameters.batch_size
self.learning_rate = self.network_parameters.learning_rate
self.optimizer = None
self.ap = agent_parameters
def predict(self,
inputs: Dict[str, np.ndarray],
outputs: List[Any] = None,
squeeze_output: bool = True,
initial_feed_dict: Dict[Any, np.ndarray] = None) -> Tuple[np.ndarray, ...]:
"""
Given input observations, use the model to make predictions (e.g. action or value).
:param inputs: current state (i.e. observations, measurements, goals, etc.)
(e.g. `{'observation': numpy.ndarray}` of shape (batch_size, observation_space_size))
:param outputs: list of outputs to return. Return all outputs if unspecified. Type of the list elements
depends on the framework backend.
:param squeeze_output: call squeeze_list on output before returning if True
:param initial_feed_dict: a dictionary of extra inputs for forward pass.
:return: predictions of action or value of shape (batch_size, action_space_size) for action predictions)
"""
raise NotImplementedError
@staticmethod
def parallel_predict(sess: Any,
network_input_tuples: List[Tuple['Architecture', Dict[str, np.ndarray]]]) -> \
Tuple[np.ndarray, ...]:
"""
:param sess: active session to use for prediction
:param network_input_tuples: tuple of network and corresponding input
:return: list or tuple of outputs from all networks
"""
raise NotImplementedError
def train_on_batch(self,
inputs: Dict[str, np.ndarray],
targets: List[np.ndarray],
scaler: float=1.,
additional_fetches: list=None,
importance_weights: np.ndarray=None) -> Tuple[float, List[float], float, list]:
"""
Given a batch of inputs (e.g. states) and targets (e.g. discounted rewards), takes a training step: i.e. runs a
forward pass and backward pass of the network, accumulates the gradients and applies an optimization step to
update the weights.
Calls `accumulate_gradients` followed by `apply_and_reset_gradients`.
Note: Currently an unused method.
:param inputs: typically the environment states (but can also contain other data necessary for loss).
(e.g. `{'observation': numpy.ndarray}` with `observation` of shape (batch_size, observation_space_size) or
(batch_size, observation_space_size, stack_size) or
`{'observation': numpy.ndarray, 'output_0_0': numpy.ndarray}` with `output_0_0` of shape (batch_size,))
:param targets: target values of shape (batch_size, ). For example discounted rewards for value network
for calculating the value-network loss would be a target. Length of list and order of arrays in
the list matches that of network losses which are defined by network parameters
:param scaler: value to scale gradients by before optimizing network weights
:param additional_fetches: list of additional values to fetch and return. The type of each list
element is framework dependent.
:param importance_weights: ndarray of shape (batch_size,) to multiply with batch loss.
:return: tuple of total_loss, losses, norm_unclipped_grads, fetched_tensors
total_loss (float): sum of all head losses
losses (list of float): list of all losses. The order is list of target losses followed by list
of regularization losses. The specifics of losses is dependant on the network parameters
(number of heads, etc.)
norm_unclippsed_grads (float): global norm of all gradients before any gradient clipping is applied
fetched_tensors: all values for additional_fetches
"""
raise NotImplementedError
def get_weights(self) -> List[np.ndarray]:
"""
Gets model weights as a list of ndarrays. It is used for synchronizing weight between two identical networks.
:return: list weights as ndarray
"""
raise NotImplementedError
def set_weights(self, weights: List[np.ndarray], rate: float=1.0) -> None:
"""
Sets model weights for provided layer parameters.
:param weights: list of model weights in the same order as received in get_weights
:param rate: controls the mixture of given weight values versus old weight values.
i.e. new_weight = rate * given_weight + (1 - rate) * old_weight
:return: None
"""
raise NotImplementedError
def reset_accumulated_gradients(self) -> None:
"""
Sets gradient of all parameters to 0.
Once gradients are reset, they must be accessible by `accumulated_gradients` property of this class,
which must return a list of numpy ndarrays. Child class must ensure that `accumulated_gradients` is set.
"""
raise NotImplementedError
def accumulate_gradients(self,
inputs: Dict[str, np.ndarray],
targets: List[np.ndarray],
additional_fetches: list=None,
importance_weights: np.ndarray=None,
no_accumulation: bool=False) -> Tuple[float, List[float], float, list]:
"""
Given a batch of inputs (i.e. states) and targets (e.g. discounted rewards), computes and accumulates the
gradients for model parameters. Will run forward and backward pass to compute gradients, clip the gradient
values if required and then accumulate gradients from all learners. It does not update the model weights,
that's performed in `apply_and_reset_gradients` method.
Once gradients are accumulated, they are accessed by `accumulated_gradients` property of this class.å
:param inputs: typically the environment states (but can also contain other data for loss)
(e.g. `{'observation': numpy.ndarray}` with `observation` of shape (batch_size, observation_space_size) or
(batch_size, observation_space_size, stack_size) or
`{'observation': numpy.ndarray, 'output_0_0': numpy.ndarray}` with `output_0_0` of shape (batch_size,))
:param targets: targets for calculating loss. For example discounted rewards for value network
for calculating the value-network loss would be a target. Length of list and order of arrays in
the list matches that of network losses which are defined by network parameters
:param additional_fetches: list of additional values to fetch and return. The type of each list
element is framework dependent.
:param importance_weights: ndarray of shape (batch_size,) to multiply with batch loss.
:param no_accumulation: if True, set gradient values to the new gradients, otherwise sum with previously
calculated gradients
:return: tuple of total_loss, losses, norm_unclipped_grads, fetched_tensors
total_loss (float): sum of all head losses
losses (list of float): list of all losses. The order is list of target losses followed by list of
regularization losses. The specifics of losses is dependant on the network parameters
(number of heads, etc.)
norm_unclippsed_grads (float): global norm of all gradients before any gradient clipping is applied
fetched_tensors: all values for additional_fetches
"""
raise NotImplementedError
def apply_and_reset_gradients(self, gradients: List[np.ndarray], scaler: float=1.) -> None:
"""
Applies the given gradients to the network weights and resets the gradient accumulations.
Has the same impact as calling `apply_gradients`, then `reset_accumulated_gradients`.
:param gradients: gradients for the parameter weights, taken from `accumulated_gradients` property
of an identical network (either self or another identical network)
:param scaler: A scaling factor that allows rescaling the gradients before applying them
"""
raise NotImplementedError
def apply_gradients(self, gradients: List[np.ndarray], scaler: float=1.) -> None:
"""
Applies the given gradients to the network weights.
Will be performed sync or async depending on `network_parameters.async_training`
:param gradients: gradients for the parameter weights, taken from `accumulated_gradients` property
of an identical network (either self or another identical network)
:param scaler: A scaling factor that allows rescaling the gradients before applying them
"""
raise NotImplementedError
def get_variable_value(self, variable: Any) -> np.ndarray:
"""
Gets value of a specified variable. Type of variable is dependant on the framework.
Example of a variable is head.kl_coefficient, which could be a symbol for evaluation
or could be a string representing the value.
:param variable: variable of interest
:return: value of the specified variable
"""
raise NotImplementedError
def set_variable_value(self, assign_op: Any, value: np.ndarray, placeholder: Any):
"""
Updates the value of a specified variable. Type of assign_op is dependant on the framework
and is a unique identifier for assigning value to a variable. For example an agent may use
head.assign_kl_coefficient. There is a one to one mapping between assign_op and placeholder
(in the example above, placeholder would be head.kl_coefficient_ph).
:param assign_op: a parameter representing the operation for assigning value to a specific variable
:param value: value of the specified variable used for update
:param placeholder: a placeholder for binding the value to assign_op.
"""
raise NotImplementedError
def collect_savers(self, parent_path_suffix: str) -> SaverCollection:
"""
Collection of all savers for the network (typically only one saver for network and one for ONNX export)
:param parent_path_suffix: path suffix of the parent of the network
(e.g. could be name of level manager plus name of agent)
:return: saver collection for the network
"""
raise NotImplementedError
```
#### File: mxnet_components/heads/ppo_head.py
```python
from typing import List, Tuple, Union
from types import ModuleType
import math
import mxnet as mx
from mxnet.gluon import nn
from rl_coach.base_parameters import AgentParameters
from rl_coach.core_types import ActionProbabilities
from rl_coach.spaces import SpacesDefinition, BoxActionSpace, DiscreteActionSpace
from rl_coach.utils import eps
from rl_coach.architectures.mxnet_components.heads.head import Head, HeadLoss, LossInputSchema,\
NormalizedRSSInitializer
from rl_coach.architectures.mxnet_components.heads.head import LOSS_OUT_TYPE_LOSS, LOSS_OUT_TYPE_REGULARIZATION
from rl_coach.architectures.mxnet_components.utils import hybrid_clip, broadcast_like
LOSS_OUT_TYPE_KL = 'kl_divergence'
LOSS_OUT_TYPE_ENTROPY = 'entropy'
LOSS_OUT_TYPE_LIKELIHOOD_RATIO = 'likelihood_ratio'
LOSS_OUT_TYPE_CLIPPED_LIKELIHOOD_RATIO = 'clipped_likelihood_ratio'
nd_sym_type = Union[mx.nd.NDArray, mx.sym.Symbol]
class MultivariateNormalDist:
def __init__(self,
num_var: int,
mean: nd_sym_type,
sigma: nd_sym_type,
F: ModuleType=mx.nd) -> None:
"""
Distribution object for Multivariate Normal. Works with batches.
Optionally works with batches and time steps, but be consistent in usage: i.e. if using time_step,
mean, sigma and data for log_prob must all include a time_step dimension.
:param num_var: number of variables in distribution
:param mean: mean for each variable,
of shape (num_var) or
of shape (batch_size, num_var) or
of shape (batch_size, time_step, num_var).
:param sigma: covariance matrix,
of shape (num_var, num_var) or
of shape (batch_size, num_var, num_var) or
of shape (batch_size, time_step, num_var, num_var).
:param (mx.nd or mx.sym) F: backend api (mx.sym if block has been hybridized).
"""
self.num_var = num_var
self.mean = mean
self.sigma = sigma
self.F = F
def inverse_using_cholesky(self, matrix: nd_sym_type) -> nd_sym_type:
"""
Calculate inverses for a batch of matrices using Cholesky decomposition method.
:param matrix: matrix (or matrices) to invert,
of shape (num_var, num_var) or
of shape (batch_size, num_var, num_var) or
of shape (batch_size, time_step, num_var, num_var).
:return: inverted matrix (or matrices),
of shape (num_var, num_var) or
of shape (batch_size, num_var, num_var) or
of shape (batch_size, time_step, num_var, num_var).
"""
cholesky_factor = self.F.linalg.potrf(matrix)
return self.F.linalg.potri(cholesky_factor)
def log_det(self, matrix: nd_sym_type) -> nd_sym_type:
"""
Calculate log of the determinant for a batch of matrices using Cholesky decomposition method.
:param matrix: matrix (or matrices) to invert,
of shape (num_var, num_var) or
of shape (batch_size, num_var, num_var) or
of shape (batch_size, time_step, num_var, num_var).
:return: inverted matrix (or matrices),
of shape (num_var, num_var) or
of shape (batch_size, num_var, num_var) or
of shape (batch_size, time_step, num_var, num_var).
"""
cholesky_factor = self.F.linalg.potrf(matrix)
return 2 * self.F.linalg.sumlogdiag(cholesky_factor)
def log_prob(self, x: nd_sym_type) -> nd_sym_type:
"""
Calculate the log probability of data given the current distribution.
See http://www.notenoughthoughts.net/posts/normal-log-likelihood-gradient.html
and https://discuss.mxnet.io/t/multivariate-gaussian-log-density-operator/1169/7
:param x: input data,
of shape (num_var) or
of shape (batch_size, num_var) or
of shape (batch_size, time_step, num_var).
:return: log_probability,
of shape (1) or
of shape (batch_size) or
of shape (batch_size, time_step).
"""
a = (self.num_var / 2) * math.log(2 * math.pi)
log_det_sigma = self.log_det(self.sigma)
b = (1 / 2) * log_det_sigma
sigma_inv = self.inverse_using_cholesky(self.sigma)
# deviation from mean, and dev_t is equivalent to transpose on last two dims.
dev = (x - self.mean).expand_dims(-1)
dev_t = (x - self.mean).expand_dims(-2)
# since batch_dot only works with ndarrays with ndim of 3,
# and we could have ndarrays with ndim of 4,
# we flatten batch_size and time_step into single dim.
dev_flat = dev.reshape(shape=(-1, 0, 0), reverse=1)
sigma_inv_flat = sigma_inv.reshape(shape=(-1, 0, 0), reverse=1)
dev_t_flat = dev_t.reshape(shape=(-1, 0, 0), reverse=1)
c = (1 / 2) * self.F.batch_dot(self.F.batch_dot(dev_t_flat, sigma_inv_flat), dev_flat)
# and now reshape back to (batch_size, time_step) if required.
c = c.reshape_like(b)
log_likelihood = -a - b - c
return log_likelihood
def entropy(self) -> nd_sym_type:
"""
Calculate entropy of current distribution.
See http://www.nowozin.net/sebastian/blog/the-entropy-of-a-normal-distribution.html
:return: entropy,
of shape (1) or
of shape (batch_size) or
of shape (batch_size, time_step).
"""
# todo: check if differential entropy is correct
log_det_sigma = self.log_det(self.sigma)
return (self.num_var / 2) + ((self.num_var / 2) * math.log(2 * math.pi)) + ((1 / 2) * log_det_sigma)
def kl_div(self, alt_dist) -> nd_sym_type:
"""
Calculated KL-Divergence with another MultivariateNormalDist distribution
See https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence
Specifically https://wikimedia.org/api/rest_v1/media/math/render/svg/a3bf3b4917bd1fcb8be48d6d6139e2e387bdc7d3
:param alt_dist: alternative distribution used for kl divergence calculation
:type alt_dist: MultivariateNormalDist
:return: KL-Divergence, of shape (1,)
"""
sigma_a_inv = self.F.linalg.potri(self.F.linalg.potrf(self.sigma))
sigma_b_inv = self.F.linalg.potri(self.F.linalg.potrf(alt_dist.sigma))
term1a = mx.nd.batch_dot(sigma_b_inv, self.sigma)
# sum of diagonal for batch of matrices
term1 = (broadcast_like(self.F, self.F.eye(self.num_var), term1a) * term1a).sum(axis=-1).sum(axis=-1)
mean_diff = (alt_dist.mean - self.mean).expand_dims(-1)
mean_diff_t = (alt_dist.mean - self.mean).expand_dims(-2)
term2 = self.F.batch_dot(self.F.batch_dot(mean_diff_t, sigma_b_inv), mean_diff).reshape_like(term1)
term3 = (2 * self.F.linalg.sumlogdiag(self.F.linalg.potrf(alt_dist.sigma))) -\
(2 * self.F.linalg.sumlogdiag(self.F.linalg.potrf(self.sigma)))
return 0.5 * (term1 + term2 - self.num_var + term3)
class CategoricalDist:
def __init__(self, n_classes: int, probs: nd_sym_type, F: ModuleType=mx.nd) -> None:
"""
Distribution object for Categorical data.
Optionally works with batches and time steps, but be consistent in usage: i.e. if using time_step,
mean, sigma and data for log_prob must all include a time_step dimension.
:param n_classes: number of classes in distribution
:param probs: probabilities for each class,
of shape (n_classes),
of shape (batch_size, n_classes) or
of shape (batch_size, time_step, n_classes)
:param (mx.nd or mx.sym) F: backend api (mx.sym if block has been hybridized).
"""
self.n_classes = n_classes
self.probs = probs
self.F = F
def log_prob(self, actions: nd_sym_type) -> nd_sym_type:
"""
Calculate the log probability of data given the current distribution.
:param actions: actions, with int8 data type,
of shape (1) if probs was (n_classes),
of shape (batch_size) if probs was (batch_size, n_classes) and
of shape (batch_size, time_step) if probs was (batch_size, time_step, n_classes)
:return: log_probability,
of shape (1) if probs was (n_classes),
of shape (batch_size) if probs was (batch_size, n_classes) and
of shape (batch_size, time_step) if probs was (batch_size, time_step, n_classes)
"""
action_mask = actions.one_hot(depth=self.n_classes)
action_probs = (self.probs * action_mask).sum(axis=-1)
return action_probs.log()
def entropy(self) -> nd_sym_type:
"""
Calculate entropy of current distribution.
:return: entropy,
of shape (1) if probs was (n_classes),
of shape (batch_size) if probs was (batch_size, n_classes) and
of shape (batch_size, time_step) if probs was (batch_size, time_step, n_classes)
"""
# todo: look into numerical stability
return -(self.probs.log()*self.probs).sum(axis=-1)
def kl_div(self, alt_dist) -> nd_sym_type:
"""
Calculated KL-Divergence with another Categorical distribution
:param alt_dist: alternative distribution used for kl divergence calculation
:type alt_dist: CategoricalDist
:return: KL-Divergence
"""
logits_a = self.probs.clip(a_min=eps, a_max=1 - eps).log()
logits_b = alt_dist.probs.clip(a_min=eps, a_max=1 - eps).log()
t = self.probs * (logits_a - logits_b)
t = self.F.where(condition=(alt_dist.probs == 0), x=self.F.ones_like(alt_dist.probs) * math.inf, y=t)
t = self.F.where(condition=(self.probs == 0), x=self.F.zeros_like(self.probs), y=t)
return t.sum(axis=-1)
class DiscretePPOHead(nn.HybridBlock):
def __init__(self, num_actions: int) -> None:
"""
Head block for Discrete Proximal Policy Optimization, to calculate probabilities for each action given
middleware representation of the environment state.
:param num_actions: number of actions in action space.
"""
super(DiscretePPOHead, self).__init__()
with self.name_scope():
self.dense = nn.Dense(units=num_actions, flatten=False,
weight_initializer=NormalizedRSSInitializer(0.01))
def hybrid_forward(self, F: ModuleType, x: nd_sym_type) -> nd_sym_type:
"""
Used for forward pass through head network.
:param (mx.nd or mx.sym) F: backend api (mx.sym if block has been hybridized).
:param x: middleware state representation,
of shape (batch_size, in_channels) or
of shape (batch_size, time_step, in_channels).
:return: batch of probabilities for each action,
of shape (batch_size, num_actions) or
of shape (batch_size, time_step, num_actions).
"""
policy_values = self.dense(x)
policy_probs = F.softmax(policy_values)
return policy_probs
class ContinuousPPOHead(nn.HybridBlock):
def __init__(self, num_actions: int) -> None:
"""
Head block for Continuous Proximal Policy Optimization, to calculate probabilities for each action given
middleware representation of the environment state.
:param num_actions: number of actions in action space.
"""
super(ContinuousPPOHead, self).__init__()
with self.name_scope():
self.dense = nn.Dense(units=num_actions, flatten=False,
weight_initializer=NormalizedRSSInitializer(0.01))
# all samples (across batch, and time step) share the same covariance, which is learnt,
# but since we assume the action probability variables are independent,
# only the diagonal entries of the covariance matrix are specified.
self.log_std = self.params.get('log_std',
shape=(num_actions,),
init=mx.init.Zero(),
allow_deferred_init=True)
# todo: is_local?
def hybrid_forward(self, F: ModuleType, x: nd_sym_type, log_std: nd_sym_type) -> Tuple[nd_sym_type, nd_sym_type]:
"""
Used for forward pass through head network.
:param (mx.nd or mx.sym) F: backend api (mx.sym if block has been hybridized).
:param x: middleware state representation,
of shape (batch_size, in_channels) or
of shape (batch_size, time_step, in_channels).
:return: batch of probabilities for each action,
of shape (batch_size, action_mean) or
of shape (batch_size, time_step, action_mean).
"""
policy_means = self.dense(x)
policy_std = broadcast_like(F, log_std.exp().expand_dims(0), policy_means)
return policy_means, policy_std
class ClippedPPOLossDiscrete(HeadLoss):
def __init__(self,
num_actions: int,
clip_likelihood_ratio_using_epsilon: float,
beta: float=0,
use_kl_regularization: bool=False,
initial_kl_coefficient: float=1,
kl_cutoff: float=0,
high_kl_penalty_coefficient: float=1,
weight: float=1,
batch_axis: int=0) -> None:
"""
Loss for discrete version of Clipped PPO.
:param num_actions: number of actions in action space.
:param clip_likelihood_ratio_using_epsilon: epsilon to use for likelihood ratio clipping.
:param beta: loss coefficient applied to entropy
:param use_kl_regularization: option to add kl divergence loss
:param initial_kl_coefficient: loss coefficient applied kl divergence loss (also see high_kl_penalty_coefficient).
:param kl_cutoff: threshold for using high_kl_penalty_coefficient
:param high_kl_penalty_coefficient: loss coefficient applied to kv divergence above kl_cutoff
:param weight: scalar used to adjust relative weight of loss (if using this loss with others).
:param batch_axis: axis used for mini-batch (default is 0) and excluded from loss aggregation.
"""
super(ClippedPPOLossDiscrete, self).__init__(weight=weight, batch_axis=batch_axis)
self.weight = weight
self.num_actions = num_actions
self.clip_likelihood_ratio_using_epsilon = clip_likelihood_ratio_using_epsilon
self.beta = beta
self.use_kl_regularization = use_kl_regularization
self.initial_kl_coefficient = initial_kl_coefficient if self.use_kl_regularization else 0.0
self.kl_coefficient = self.params.get('kl_coefficient',
shape=(1,),
init=mx.init.Constant([initial_kl_coefficient,]),
differentiable=False)
self.kl_cutoff = kl_cutoff
self.high_kl_penalty_coefficient = high_kl_penalty_coefficient
@property
def input_schema(self) -> LossInputSchema:
return LossInputSchema(
head_outputs=['new_policy_probs'],
agent_inputs=['actions', 'old_policy_probs', 'clip_param_rescaler'],
targets=['advantages']
)
def loss_forward(self,
F: ModuleType,
new_policy_probs: nd_sym_type,
actions: nd_sym_type,
old_policy_probs: nd_sym_type,
clip_param_rescaler: nd_sym_type,
advantages: nd_sym_type,
kl_coefficient: nd_sym_type) -> List[Tuple[nd_sym_type, str]]:
"""
Used for forward pass through loss computations.
Works with batches of data, and optionally time_steps, but be consistent in usage: i.e. if using time_step,
new_policy_probs, old_policy_probs, actions and advantages all must include a time_step dimension.
NOTE: order of input arguments MUST NOT CHANGE because it matches the order
parameters are passed in ppo_agent:train_network()
:param (mx.nd or mx.sym) F: backend api (mx.sym if block has been hybridized).
:param new_policy_probs: action probabilities predicted by DiscretePPOHead network,
of shape (batch_size, num_actions) or
of shape (batch_size, time_step, num_actions).
:param old_policy_probs: action probabilities for previous policy,
of shape (batch_size, num_actions) or
of shape (batch_size, time_step, num_actions).
:param actions: true actions taken during rollout,
of shape (batch_size) or
of shape (batch_size, time_step).
:param clip_param_rescaler: scales epsilon to use for likelihood ratio clipping.
:param advantages: change in state value after taking action (a.k.a advantage)
of shape (batch_size) or
of shape (batch_size, time_step).
:param kl_coefficient: loss coefficient applied kl divergence loss (also see high_kl_penalty_coefficient).
:return: loss, of shape (batch_size).
"""
old_policy_dist = CategoricalDist(self.num_actions, old_policy_probs, F=F)
action_probs_wrt_old_policy = old_policy_dist.log_prob(actions)
new_policy_dist = CategoricalDist(self.num_actions, new_policy_probs, F=F)
action_probs_wrt_new_policy = new_policy_dist.log_prob(actions)
entropy_loss = - self.beta * new_policy_dist.entropy().mean()
if self.use_kl_regularization:
kl_div = old_policy_dist.kl_div(new_policy_dist).mean()
weighted_kl_div = kl_coefficient * kl_div
high_kl_div = F.stack(F.zeros_like(kl_div), kl_div - self.kl_cutoff).max().square()
weighted_high_kl_div = self.high_kl_penalty_coefficient * high_kl_div
kl_div_loss = weighted_kl_div + weighted_high_kl_div
else:
kl_div_loss = F.zeros(shape=(1,))
# working with log probs, so minus first, then exponential (same as division)
likelihood_ratio = (action_probs_wrt_new_policy - action_probs_wrt_old_policy).exp()
if self.clip_likelihood_ratio_using_epsilon is not None:
# clipping of likelihood ratio
min_value = 1 - self.clip_likelihood_ratio_using_epsilon * clip_param_rescaler
max_value = 1 + self.clip_likelihood_ratio_using_epsilon * clip_param_rescaler
# can't use F.clip (with variable clipping bounds), hence custom implementation
clipped_likelihood_ratio = hybrid_clip(F, likelihood_ratio, clip_lower=min_value, clip_upper=max_value)
# lower bound of original, and clipped versions or each scaled advantage
# element-wise min between the two ndarrays
unclipped_scaled_advantages = likelihood_ratio * advantages
clipped_scaled_advantages = clipped_likelihood_ratio * advantages
scaled_advantages = F.stack(unclipped_scaled_advantages, clipped_scaled_advantages).min(axis=0)
else:
scaled_advantages = likelihood_ratio * advantages
clipped_likelihood_ratio = F.zeros_like(likelihood_ratio)
# for each batch, calculate expectation of scaled_advantages across time steps,
# but want code to work with data without time step too, so reshape to add timestep if doesn't exist.
scaled_advantages_w_time = scaled_advantages.reshape(shape=(0, -1))
expected_scaled_advantages = scaled_advantages_w_time.mean(axis=1)
# want to maximize expected_scaled_advantages, add minus so can minimize.
surrogate_loss = (-expected_scaled_advantages * self.weight).mean()
return [
(surrogate_loss, LOSS_OUT_TYPE_LOSS),
(entropy_loss + kl_div_loss, LOSS_OUT_TYPE_REGULARIZATION),
(kl_div_loss, LOSS_OUT_TYPE_KL),
(entropy_loss, LOSS_OUT_TYPE_ENTROPY),
(likelihood_ratio, LOSS_OUT_TYPE_LIKELIHOOD_RATIO),
(clipped_likelihood_ratio, LOSS_OUT_TYPE_CLIPPED_LIKELIHOOD_RATIO)
]
class ClippedPPOLossContinuous(HeadLoss):
def __init__(self,
num_actions: int,
clip_likelihood_ratio_using_epsilon: float,
beta: float=0,
use_kl_regularization: bool=False,
initial_kl_coefficient: float=1,
kl_cutoff: float=0,
high_kl_penalty_coefficient: float=1,
weight: float=1,
batch_axis: int=0):
"""
Loss for continuous version of Clipped PPO.
:param num_actions: number of actions in action space.
:param clip_likelihood_ratio_using_epsilon: epsilon to use for likelihood ratio clipping.
:param beta: loss coefficient applied to entropy
:param batch_axis: axis used for mini-batch (default is 0) and excluded from loss aggregation.
:param use_kl_regularization: option to add kl divergence loss
:param initial_kl_coefficient: initial loss coefficient applied kl divergence loss (also see high_kl_penalty_coefficient).
:param kl_cutoff: threshold for using high_kl_penalty_coefficient
:param high_kl_penalty_coefficient: loss coefficient applied to kv divergence above kl_cutoff
:param weight: scalar used to adjust relative weight of loss (if using this loss with others).
:param batch_axis: axis used for mini-batch (default is 0) and excluded from loss aggregation.
"""
super(ClippedPPOLossContinuous, self).__init__(weight=weight, batch_axis=batch_axis)
self.weight = weight
self.num_actions = num_actions
self.clip_likelihood_ratio_using_epsilon = clip_likelihood_ratio_using_epsilon
self.beta = beta
self.use_kl_regularization = use_kl_regularization
self.initial_kl_coefficient = initial_kl_coefficient if self.use_kl_regularization else 0.0
self.kl_coefficient = self.params.get('kl_coefficient',
shape=(1,),
init=mx.init.Constant([initial_kl_coefficient,]),
differentiable=False)
self.kl_cutoff = kl_cutoff
self.high_kl_penalty_coefficient = high_kl_penalty_coefficient
@property
def input_schema(self) -> LossInputSchema:
return LossInputSchema(
head_outputs=['new_policy_means','new_policy_stds'],
agent_inputs=['actions', 'old_policy_means', 'old_policy_stds', 'clip_param_rescaler'],
targets=['advantages']
)
def loss_forward(self,
F: ModuleType,
new_policy_means: nd_sym_type,
new_policy_stds: nd_sym_type,
actions: nd_sym_type,
old_policy_means: nd_sym_type,
old_policy_stds: nd_sym_type,
clip_param_rescaler: nd_sym_type,
advantages: nd_sym_type,
kl_coefficient: nd_sym_type) -> List[Tuple[nd_sym_type, str]]:
"""
Used for forward pass through loss computations.
Works with batches of data, and optionally time_steps, but be consistent in usage: i.e. if using time_step,
new_policy_means, old_policy_means, actions and advantages all must include a time_step dimension.
:param (mx.nd or mx.sym) F: backend api (mx.sym if block has been hybridized).
:param new_policy_means: action means predicted by MultivariateNormalDist network,
of shape (batch_size, num_actions) or
of shape (batch_size, time_step, num_actions).
:param new_policy_stds: action standard deviation returned by head,
of shape (batch_size, num_actions) or
of shape (batch_size, time_step, num_actions).
:param actions: true actions taken during rollout,
of shape (batch_size, num_actions) or
of shape (batch_size, time_step, num_actions).
:param old_policy_means: action means for previous policy,
of shape (batch_size, num_actions) or
of shape (batch_size, time_step, num_actions).
:param old_policy_stds: action standard deviation returned by head previously,
of shape (batch_size, num_actions) or
of shape (batch_size, time_step, num_actions).
:param clip_param_rescaler: scales epsilon to use for likelihood ratio clipping.
:param advantages: change in state value after taking action (a.k.a advantage)
of shape (batch_size,) or
of shape (batch_size, time_step).
:param kl_coefficient: loss coefficient applied kl divergence loss (also see high_kl_penalty_coefficient).
:return: loss, of shape (batch_size).
"""
def diagonal_covariance(stds, size):
vars = stds ** 2
# sets diagonal in (batch size and time step) covariance matrices
vars_tiled = vars.expand_dims(2).tile((1, 1, size))
covars = F.broadcast_mul(vars_tiled, F.eye(size))
return covars
old_covar = diagonal_covariance(stds=old_policy_stds, size=self.num_actions)
old_policy_dist = MultivariateNormalDist(self.num_actions, old_policy_means, old_covar, F=F)
action_probs_wrt_old_policy = old_policy_dist.log_prob(actions)
new_covar = diagonal_covariance(stds=new_policy_stds, size=self.num_actions)
new_policy_dist = MultivariateNormalDist(self.num_actions, new_policy_means, new_covar, F=F)
action_probs_wrt_new_policy = new_policy_dist.log_prob(actions)
entropy_loss = - self.beta * new_policy_dist.entropy().mean()
if self.use_kl_regularization:
kl_div = old_policy_dist.kl_div(new_policy_dist).mean()
weighted_kl_div = kl_coefficient * kl_div
high_kl_div = F.stack(F.zeros_like(kl_div), kl_div - self.kl_cutoff).max().square()
weighted_high_kl_div = self.high_kl_penalty_coefficient * high_kl_div
kl_div_loss = weighted_kl_div + weighted_high_kl_div
else:
kl_div_loss = F.zeros(shape=(1,))
# working with log probs, so minus first, then exponential (same as division)
likelihood_ratio = (action_probs_wrt_new_policy - action_probs_wrt_old_policy).exp()
if self.clip_likelihood_ratio_using_epsilon is not None:
# clipping of likelihood ratio
min_value = 1 - self.clip_likelihood_ratio_using_epsilon * clip_param_rescaler
max_value = 1 + self.clip_likelihood_ratio_using_epsilon * clip_param_rescaler
# can't use F.clip (with variable clipping bounds), hence custom implementation
clipped_likelihood_ratio = hybrid_clip(F, likelihood_ratio, clip_lower=min_value, clip_upper=max_value)
# lower bound of original, and clipped versions or each scaled advantage
# element-wise min between the two ndarrays
unclipped_scaled_advantages = likelihood_ratio * advantages
clipped_scaled_advantages = clipped_likelihood_ratio * advantages
scaled_advantages = F.stack(unclipped_scaled_advantages, clipped_scaled_advantages).min(axis=0)
else:
scaled_advantages = likelihood_ratio * advantages
clipped_likelihood_ratio = F.zeros_like(likelihood_ratio)
# for each batch, calculate expectation of scaled_advantages across time steps,
# but want code to work with data without time step too, so reshape to add timestep if doesn't exist.
scaled_advantages_w_time = scaled_advantages.reshape(shape=(0, -1))
expected_scaled_advantages = scaled_advantages_w_time.mean(axis=1)
# want to maximize expected_scaled_advantages, add minus so can minimize.
surrogate_loss = (-expected_scaled_advantages * self.weight).mean()
return [
(surrogate_loss, LOSS_OUT_TYPE_LOSS),
(entropy_loss + kl_div_loss, LOSS_OUT_TYPE_REGULARIZATION),
(kl_div_loss, LOSS_OUT_TYPE_KL),
(entropy_loss, LOSS_OUT_TYPE_ENTROPY),
(likelihood_ratio, LOSS_OUT_TYPE_LIKELIHOOD_RATIO),
(clipped_likelihood_ratio, LOSS_OUT_TYPE_CLIPPED_LIKELIHOOD_RATIO)
]
class PPOHead(Head):
def __init__(self,
agent_parameters: AgentParameters,
spaces: SpacesDefinition,
network_name: str,
head_type_idx: int=0,
loss_weight: float=1.,
is_local: bool=True,
activation_function: str='tanh',
dense_layer: None=None) -> None:
"""
Head block for Proximal Policy Optimization, to calculate probabilities for each action given middleware
representation of the environment state.
:param agent_parameters: containing algorithm parameters such as clip_likelihood_ratio_using_epsilon
and beta_entropy.
:param spaces: containing action spaces used for defining size of network output.
:param network_name: name of head network. currently unused.
:param head_type_idx: index of head network. currently unused.
:param loss_weight: scalar used to adjust relative weight of loss (if using this loss with others).
:param is_local: flag to denote if network is local. currently unused.
:param activation_function: activation function to use between layers. currently unused.
:param dense_layer: type of dense layer to use in network. currently unused.
"""
super().__init__(agent_parameters, spaces, network_name, head_type_idx, loss_weight, is_local, activation_function,
dense_layer=dense_layer)
self.return_type = ActionProbabilities
self.clip_likelihood_ratio_using_epsilon = agent_parameters.algorithm.clip_likelihood_ratio_using_epsilon
self.beta = agent_parameters.algorithm.beta_entropy
self.use_kl_regularization = agent_parameters.algorithm.use_kl_regularization
if self.use_kl_regularization:
self.initial_kl_coefficient = agent_parameters.algorithm.initial_kl_coefficient
self.kl_cutoff = 2 * agent_parameters.algorithm.target_kl_divergence
self.high_kl_penalty_coefficient = agent_parameters.algorithm.high_kl_penalty_coefficient
else:
self.initial_kl_coefficient, self.kl_cutoff, self.high_kl_penalty_coefficient = (None, None, None)
self._loss = []
if isinstance(self.spaces.action, DiscreteActionSpace):
self.net = DiscretePPOHead(num_actions=len(self.spaces.action.actions))
elif isinstance(self.spaces.action, BoxActionSpace):
self.net = ContinuousPPOHead(num_actions=self.spaces.action.shape[0])
else:
raise ValueError("Only discrete or continuous action spaces are supported for PPO.")
def hybrid_forward(self,
F: ModuleType,
x: nd_sym_type) -> nd_sym_type:
"""
:param (mx.nd or mx.sym) F: backend api (mx.sym if block has been hybridized).
:param x: middleware embedding
:return: policy parameters/probabilities
"""
return self.net(x)
def loss(self) -> mx.gluon.loss.Loss:
"""
Specifies loss block to be used for this policy head.
:return: loss block (can be called as function) for action probabilities returned by this policy network.
"""
if isinstance(self.spaces.action, DiscreteActionSpace):
loss = ClippedPPOLossDiscrete(len(self.spaces.action.actions),
self.clip_likelihood_ratio_using_epsilon,
self.beta,
self.use_kl_regularization, self.initial_kl_coefficient,
self.kl_cutoff, self.high_kl_penalty_coefficient,
self.loss_weight)
elif isinstance(self.spaces.action, BoxActionSpace):
loss = ClippedPPOLossContinuous(self.spaces.action.shape[0],
self.clip_likelihood_ratio_using_epsilon,
self.beta,
self.use_kl_regularization, self.initial_kl_coefficient,
self.kl_cutoff, self.high_kl_penalty_coefficient,
self.loss_weight)
else:
raise ValueError("Only discrete or continuous action spaces are supported for PPO.")
loss.initialize()
# set a property so can assign_kl_coefficient in future,
# make a list, otherwise it would be added as a child of Head Block (due to type check)
self._loss = [loss]
return loss
@property
def kl_divergence(self):
return self.head_type_idx, LOSS_OUT_TYPE_KL
@property
def entropy(self):
return self.head_type_idx, LOSS_OUT_TYPE_ENTROPY
@property
def likelihood_ratio(self):
return self.head_type_idx, LOSS_OUT_TYPE_LIKELIHOOD_RATIO
@property
def clipped_likelihood_ratio(self):
return self.head_type_idx, LOSS_OUT_TYPE_CLIPPED_LIKELIHOOD_RATIO
def assign_kl_coefficient(self, kl_coefficient: float) -> None:
self._loss[0].kl_coefficient.set_data(mx.nd.array((kl_coefficient,)))
```
#### File: mxnet_components/middlewares/fc_middleware.py
```python
from rl_coach.architectures.mxnet_components.layers import Dense
from rl_coach.architectures.mxnet_components.middlewares.middleware import Middleware
from rl_coach.architectures.middleware_parameters import FCMiddlewareParameters
from rl_coach.base_parameters import MiddlewareScheme
class FCMiddleware(Middleware):
def __init__(self, params: FCMiddlewareParameters):
"""
FCMiddleware or Fully-Connected Middleware can be used in the middle part of the network. It takes the
embeddings from the input embedders, after they were aggregated in some method (for example, concatenation)
and passes it through a neural network which can be customizable but shared between the heads of the network.
:param params: parameters object containing batchnorm, activation_function and dropout properties.
"""
super(FCMiddleware, self).__init__(params)
@property
def schemes(self) -> dict:
"""
Schemes are the pre-defined network architectures of various depths and complexities that can be used for the
Middleware. Are used to create Block when FCMiddleware is initialised.
:return: dictionary of schemes, with key of type MiddlewareScheme enum and value being list of mxnet.gluon.Block.
"""
return {
MiddlewareScheme.Empty:
[],
# Use for PPO
MiddlewareScheme.Shallow:
[
Dense(units=64)
],
# Use for DQN
MiddlewareScheme.Medium:
[
Dense(units=512)
],
MiddlewareScheme.Deep:
[
Dense(units=128),
Dense(units=128),
Dense(units=128)
]
}
```
#### File: architectures/mxnet_components/utils.py
```python
import inspect
from typing import Any, Dict, Generator, Iterable, List, Tuple, Union
from types import ModuleType
import mxnet as mx
from mxnet import gluon, nd
from mxnet.ndarray import NDArray
import numpy as np
from rl_coach.core_types import GradientClippingMethod
nd_sym_type = Union[mx.nd.NDArray, mx.sym.Symbol]
def to_mx_ndarray(data: Union[list, tuple, np.ndarray, NDArray, int, float], ctx: mx.Context=None) ->\
Union[List[NDArray], Tuple[NDArray], NDArray]:
"""
Convert data to mx.nd.NDArray. Data can be a list or tuple of np.ndarray, int, or float or
it can be np.ndarray, int, or float
:param data: input data to be converted
:param ctx: context of the data (CPU, GPU0, GPU1, etc.)
:return: converted output data
"""
if isinstance(data, list):
data = [to_mx_ndarray(d, ctx=ctx) for d in data]
elif isinstance(data, tuple):
data = tuple(to_mx_ndarray(d, ctx=ctx) for d in data)
elif isinstance(data, np.ndarray):
data = nd.array(data, ctx=ctx)
elif isinstance(data, NDArray):
assert data.context == ctx
pass
elif isinstance(data, int) or isinstance(data, float):
data = nd.array([data], ctx=ctx)
else:
raise TypeError('Unsupported data type: {}'.format(type(data)))
return data
def asnumpy_or_asscalar(data: Union[NDArray, list, tuple]) -> Union[np.ndarray, np.number, list, tuple]:
"""
Convert NDArray (or list or tuple of NDArray) to numpy. If shape is (1,), then convert to scalar instead.
NOTE: This behavior is consistent with tensorflow
:param data: NDArray or list or tuple of NDArray
:return: data converted to numpy ndarray or to numpy scalar
"""
if isinstance(data, list):
data = [asnumpy_or_asscalar(d) for d in data]
elif isinstance(data, tuple):
data = tuple(asnumpy_or_asscalar(d) for d in data)
elif isinstance(data, NDArray):
data = data.asscalar() if data.shape == (1,) else data.asnumpy()
else:
raise TypeError('Unsupported data type: {}'.format(type(data)))
return data
def global_norm(arrays: Union[Generator[NDArray, NDArray, NDArray], List[NDArray], Tuple[NDArray]]) -> NDArray:
"""
Calculate global norm on list or tuple of NDArrays using this formula:
`global_norm = sqrt(sum([l2norm(p)**2 for p in parameters]))`
:param arrays: list or tuple of parameters to calculate global norm on
:return: single-value NDArray
"""
def _norm(array):
if array.stype == 'default':
x = array.reshape((-1,))
return nd.dot(x, x)
return array.norm().square()
total_norm = nd.add_n(*[_norm(arr) for arr in arrays])
total_norm = nd.sqrt(total_norm)
return total_norm
def split_outputs_per_head(outputs: Tuple[NDArray], heads: list) -> List[List[NDArray]]:
"""
Split outputs into outputs per head
:param outputs: list of all outputs
:param heads: list of all heads
:return: list of outputs for each head
"""
head_outputs = []
for h in heads:
head_outputs.append(list(outputs[:h.num_outputs]))
outputs = outputs[h.num_outputs:]
assert len(outputs) == 0
return head_outputs
def split_targets_per_loss(targets: list, losses: list) -> List[list]:
"""
Splits targets into targets per loss
:param targets: list of all targets (typically numpy ndarray)
:param losses: list of all losses
:return: list of targets for each loss
"""
loss_targets = list()
for l in losses:
loss_data_len = len(l.input_schema.targets)
assert len(targets) >= loss_data_len, "Data length doesn't match schema"
loss_targets.append(targets[:loss_data_len])
targets = targets[loss_data_len:]
assert len(targets) == 0
return loss_targets
def get_loss_agent_inputs(inputs: Dict[str, np.ndarray], head_type_idx: int, loss: Any) -> List[np.ndarray]:
"""
Collects all inputs with prefix 'output_<head_idx>_' and matches them against agent_inputs in loss input schema.
:param inputs: list of all agent inputs
:param head_type_idx: head-type index of the corresponding head
:param loss: corresponding loss
:return: list of agent inputs for this loss. This list matches the length in loss input schema.
"""
loss_inputs = list()
for k in sorted(inputs.keys()):
if k.startswith('output_{}_'.format(head_type_idx)):
loss_inputs.append(inputs[k])
# Enforce that number of inputs for head_type are the same as agent_inputs specified by loss input_schema
assert len(loss_inputs) == len(loss.input_schema.agent_inputs), "agent_input length doesn't match schema"
return loss_inputs
def align_loss_args(
head_outputs: List[NDArray],
agent_inputs: List[np.ndarray],
targets: List[np.ndarray],
loss: Any) -> List[np.ndarray]:
"""
Creates a list of arguments from head_outputs, agent_inputs, and targets aligned with parameters of
loss.loss_forward() based on their name in loss input_schema
:param head_outputs: list of all head_outputs for this loss
:param agent_inputs: list of all agent_inputs for this loss
:param targets: list of all targets for this loss
:param loss: corresponding loss
:return: list of arguments in correct order to be passed to loss
"""
arg_list = list()
schema = loss.input_schema
assert len(schema.head_outputs) == len(head_outputs)
assert len(schema.agent_inputs) == len(agent_inputs)
assert len(schema.targets) == len(targets)
prev_found = True
for arg_name in inspect.getfullargspec(loss.loss_forward).args[2:]: # First two args are self and F
found = False
for schema_list, data in [(schema.head_outputs, head_outputs),
(schema.agent_inputs, agent_inputs),
(schema.targets, targets)]:
try:
arg_list.append(data[schema_list.index(arg_name)])
found = True
break
except ValueError:
continue
assert not found or prev_found, "missing arguments detected!"
prev_found = found
return arg_list
def to_tuple(data: Union[tuple, list, Any]):
"""
If input is list, it is converted to tuple. If it's tuple, it is returned untouched. Otherwise
returns a single-element tuple of the data.
:return: tuple-ified data
"""
if isinstance(data, tuple):
pass
elif isinstance(data, list):
data = tuple(data)
else:
data = (data,)
return data
def to_list(data: Union[tuple, list, Any]):
"""
If input is tuple, it is converted to list. If it's list, it is returned untouched. Otherwise
returns a single-element list of the data.
:return: list-ified data
"""
if isinstance(data, list):
pass
elif isinstance(data, tuple):
data = list(data)
else:
data = [data]
return data
def loss_output_dict(output: List[NDArray], schema: List[str]) -> Dict[str, List[NDArray]]:
"""
Creates a dictionary for loss output based on the output schema. If two output values have the same
type string in the schema they are concatenated in the same dicrionary item.
:param output: list of output values
:param schema: list of type-strings for output values
:return: dictionary of keyword to list of NDArrays
"""
assert len(output) == len(schema)
output_dict = dict()
for name, val in zip(schema, output):
if name in output_dict:
output_dict[name].append(val)
else:
output_dict[name] = [val]
return output_dict
def clip_grad(
grads: Union[Generator[NDArray, NDArray, NDArray], List[NDArray], Tuple[NDArray]],
clip_method: GradientClippingMethod,
clip_val: float,
inplace=True) -> List[NDArray]:
"""
Clip gradient values inplace
:param grads: gradients to be clipped
:param clip_method: clipping method
:param clip_val: clipping value. Interpreted differently depending on clipping method.
:param inplace: modify grads if True, otherwise create NDArrays
:return: clipped gradients
"""
output = list(grads) if inplace else list(nd.empty(g.shape) for g in grads)
if clip_method == GradientClippingMethod.ClipByGlobalNorm:
norm_unclipped_grads = global_norm(grads)
scale = clip_val / (norm_unclipped_grads.asscalar() + 1e-8) # todo: use branching operators?
if scale < 1.0:
for g, o in zip(grads, output):
nd.broadcast_mul(g, nd.array([scale]), out=o)
elif clip_method == GradientClippingMethod.ClipByValue:
for g, o in zip(grads, output):
g.clip(-clip_val, clip_val, out=o)
elif clip_method == GradientClippingMethod.ClipByNorm:
for g, o in zip(grads, output):
nd.broadcast_mul(g, nd.minimum(1.0, clip_val / (g.norm() + 1e-8)), out=o)
else:
raise KeyError('Unsupported gradient clipping method')
return output
def hybrid_clip(F: ModuleType, x: nd_sym_type, clip_lower: nd_sym_type, clip_upper: nd_sym_type) -> nd_sym_type:
"""
Apply clipping to input x between clip_lower and clip_upper.
Added because F.clip doesn't support clipping bounds that are mx.nd.NDArray or mx.sym.Symbol.
:param F: backend api, either `mxnet.nd` or `mxnet.sym` (if block has been hybridized).
:param x: input data
:param clip_lower: lower bound used for clipping, should be of shape (1,)
:param clip_upper: upper bound used for clipping, should be of shape (1,)
:return: clipped data
"""
x_clip_lower = broadcast_like(F, clip_lower, x)
x_clip_upper = broadcast_like(F, clip_upper, x)
x_clipped = F.minimum(F.maximum(x, x_clip_lower), x_clip_upper)
return x_clipped
def broadcast_like(F: ModuleType, x: nd_sym_type, y: nd_sym_type) -> nd_sym_type:
"""
Implementation of broadcast_like using broadcast_add and broadcast_mul because ONNX doesn't support broadcast_like.
:param F: backend api, either `mxnet.nd` or `mxnet.sym` (if block has been hybridized).
:param x: input to be broadcast
:param y: tensor to broadcast x like
:return: broadcast x
"""
return F.broadcast_mul(x, (y * 0) + 1)
def get_mxnet_activation_name(activation_name: str):
"""
Convert coach activation name to mxnet specific activation name
:param activation_name: name of the activation inc coach
:return: name of the activation in mxnet
"""
activation_functions = {
'relu': 'relu',
'tanh': 'tanh',
'sigmoid': 'sigmoid',
# FIXME Add other activations
# 'elu': tf.nn.elu,
'selu': 'softrelu',
# 'leaky_relu': tf.nn.leaky_relu,
'none': None
}
assert activation_name in activation_functions, \
"Activation function must be one of the following {}. instead it was: {}".format(
activation_functions.keys(), activation_name)
return activation_functions[activation_name]
class OnnxHandlerBlock(object):
"""
Helper base class for gluon blocks that must behave differently for ONNX export forward pass
"""
def __init__(self):
self._onnx = False
def enable_onnx(self):
self._onnx = True
def disable_onnx(self):
self._onnx = False
class ScopedOnnxEnable(object):
"""
Helper scoped ONNX enable class
"""
def __init__(self, net: gluon.HybridBlock):
self._onnx_handlers = self._get_onnx_handlers(net)
def __enter__(self):
for b in self._onnx_handlers:
b.enable_onnx()
def __exit__(self, exc_type, exc_val, exc_tb):
for b in self._onnx_handlers:
b.disable_onnx()
@staticmethod
def _get_onnx_handlers(block: gluon.HybridBlock) -> List[OnnxHandlerBlock]:
"""
Iterates through all child blocks and return all of them that are instance of OnnxHandlerBlock
:return: list of OnnxHandlerBlock child blocks
"""
handlers = list()
if isinstance(block, OnnxHandlerBlock):
handlers.append(block)
for child_block in block._children.values():
handlers += ScopedOnnxEnable._get_onnx_handlers(child_block)
return handlers
```
#### File: tensorflow_components/heads/naf_head.py
```python
import tensorflow as tf
from rl_coach.architectures.tensorflow_components.layers import Dense
from rl_coach.architectures.tensorflow_components.heads.head import Head
from rl_coach.base_parameters import AgentParameters
from rl_coach.core_types import QActionStateValue
from rl_coach.spaces import BoxActionSpace
from rl_coach.spaces import SpacesDefinition
class NAFHead(Head):
def __init__(self, agent_parameters: AgentParameters, spaces: SpacesDefinition, network_name: str,
head_idx: int = 0, loss_weight: float = 1., is_local: bool = True,activation_function: str='relu',
dense_layer=Dense):
super().__init__(agent_parameters, spaces, network_name, head_idx, loss_weight, is_local, activation_function,
dense_layer=dense_layer)
if not isinstance(self.spaces.action, BoxActionSpace):
raise ValueError("NAF works only for continuous action spaces (BoxActionSpace)")
self.name = 'naf_q_values_head'
self.num_actions = self.spaces.action.shape[0]
self.output_scale = self.spaces.action.max_abs_range
self.return_type = QActionStateValue
if agent_parameters.network_wrappers[self.network_name].replace_mse_with_huber_loss:
self.loss_type = tf.losses.huber_loss
else:
self.loss_type = tf.losses.mean_squared_error
def _build_module(self, input_layer):
# NAF
self.action = tf.placeholder(tf.float32, [None, self.num_actions], name="action")
self.input = self.action
# V Head
self.V = self.dense_layer(1)(input_layer, name='V')
# mu Head
mu_unscaled = self.dense_layer(self.num_actions)(input_layer, activation=self.activation_function, name='mu_unscaled')
self.mu = tf.multiply(mu_unscaled, self.output_scale, name='mu')
# A Head
# l_vector is a vector that includes a lower-triangular matrix values
self.l_vector = self.dense_layer((self.num_actions * (self.num_actions + 1)) / 2)(input_layer, name='l_vector')
# Convert l to a lower triangular matrix and exponentiate its diagonal
i = 0
columns = []
for col in range(self.num_actions):
start_row = col
num_non_zero_elements = self.num_actions - start_row
zeros_column_part = tf.zeros_like(self.l_vector[:, 0:start_row])
diag_element = tf.expand_dims(tf.exp(self.l_vector[:, i]), 1)
non_zeros_non_diag_column_part = self.l_vector[:, (i + 1):(i + num_non_zero_elements)]
columns.append(tf.concat([zeros_column_part, diag_element, non_zeros_non_diag_column_part], axis=1))
i += num_non_zero_elements
self.L = tf.transpose(tf.stack(columns, axis=1), (0, 2, 1))
# P = L*L^T
self.P = tf.matmul(self.L, tf.transpose(self.L, (0, 2, 1)))
# A = -1/2 * (u - mu)^T * P * (u - mu)
action_diff = tf.expand_dims(self.action - self.mu, -1)
a_matrix_form = -0.5 * tf.matmul(tf.transpose(action_diff, (0, 2, 1)), tf.matmul(self.P, action_diff))
self.A = tf.reshape(a_matrix_form, [-1, 1])
# Q Head
self.Q = tf.add(self.V, self.A, name='Q')
self.output = self.Q
def __str__(self):
result = [
"State Value Stream - V",
"\tDense (num outputs = 1)",
"Action Advantage Stream - A",
"\tDense (num outputs = {})".format((self.num_actions * (self.num_actions + 1)) / 2),
"\tReshape to lower triangular matrix L (new size = {} x {})".format(self.num_actions, self.num_actions),
"\tP = L*L^T",
"\tA = -1/2 * (u - mu)^T * P * (u - mu)",
"Action Stream - mu",
"\tDense (num outputs = {})".format(self.num_actions),
"\tActivation (type = {})".format(self.activation_function.__name__),
"\tMultiply (factor = {})".format(self.output_scale),
"State-Action Value Stream - Q",
"\tAdd (V, A)"
]
return '\n'.join(result)
```
#### File: rl_coach/exploration_policies/continuous_entropy.py
```python
from rl_coach.exploration_policies.additive_noise import AdditiveNoise, AdditiveNoiseParameters
class ContinuousEntropyParameters(AdditiveNoiseParameters):
@property
def path(self):
return 'rl_coach.exploration_policies.continuous_entropy:ContinuousEntropy'
class ContinuousEntropy(AdditiveNoise):
"""
Continuous entropy is an exploration policy that is actually implemented as part of the network.
The exploration policy class is only a placeholder for choosing this policy. The exploration policy is
implemented by adding a regularization factor to the network loss, which regularizes the entropy of the action.
This exploration policy is only intended for continuous action spaces, and assumes that the entire calculation
is implemented as part of the head.
.. warning::
This exploration policy expects the agent or the network to implement the exploration functionality.
There are only a few heads that actually are relevant and implement the entropy regularization factor.
"""
pass
```
#### File: rl_coach/graph_managers/hrl_graph_manager.py
```python
from typing import List, Union, Tuple
from rl_coach.base_parameters import AgentParameters, VisualizationParameters, TaskParameters, \
PresetValidationParameters
from rl_coach.core_types import EnvironmentSteps
from rl_coach.environments.environment import EnvironmentParameters, Environment
from rl_coach.graph_managers.graph_manager import GraphManager, ScheduleParameters
from rl_coach.level_manager import LevelManager
from rl_coach.utils import short_dynamic_import
class HRLGraphManager(GraphManager):
"""
A simple HRL graph manager creates a deep hierarchy with a single composite agent per hierarchy level, and a single
environment which is interacted with.
"""
def __init__(self, agents_params: List[AgentParameters], env_params: EnvironmentParameters,
schedule_params: ScheduleParameters, vis_params: VisualizationParameters,
consecutive_steps_to_run_each_level: Union[EnvironmentSteps, List[EnvironmentSteps]],
preset_validation_params: PresetValidationParameters = PresetValidationParameters()):
"""
:param agents_params: the parameters of all the agents in the hierarchy starting from the top level of the
hierarchy to the bottom level
:param env_params: the parameters of the environment
:param schedule_params: the parameters for scheduling the graph
:param vis_params: the visualization parameters
:param consecutive_steps_to_run_each_level: the number of time steps that each level is ran.
for example, when the top level gives the bottom level a goal, the bottom level can act for
consecutive_steps_to_run_each_level steps and try to reach that goal. This is expected to be either
an EnvironmentSteps which will be used for all levels, or an EnvironmentSteps for each level as a list.
"""
super().__init__('hrl_graph', schedule_params, vis_params)
self.agents_params = agents_params
self.env_params = env_params
self.preset_validation_params = preset_validation_params
if isinstance(consecutive_steps_to_run_each_level, list):
if len(consecutive_steps_to_run_each_level) != len(self.agents_params):
raise ValueError("If the consecutive_steps_to_run_each_level is given as a list, it should match "
"the number of levels in the hierarchy. Alternatively, it is possible to use a single "
"value for all the levels, by passing an EnvironmentSteps")
elif isinstance(consecutive_steps_to_run_each_level, EnvironmentSteps):
self.consecutive_steps_to_run_each_level = [consecutive_steps_to_run_each_level] * len(self.agents_params)
for agent_params in agents_params:
agent_params.visualization = self.visualization_parameters
if agent_params.input_filter is None:
agent_params.input_filter = self.env_params.default_input_filter()
if agent_params.output_filter is None:
agent_params.output_filter = self.env_params.default_output_filter()
if len(self.agents_params) < 2:
raise ValueError("The HRL graph manager must receive the agent parameters for at least two levels of the "
"hierarchy. Otherwise, use the basic RL graph manager.")
def _create_graph(self, task_parameters: TaskParameters) -> Tuple[List[LevelManager], List[Environment]]:
self.env_params.seed = task_parameters.seed
env = short_dynamic_import(self.env_params.path)(**self.env_params.__dict__,
visualization_parameters=self.visualization_parameters)
for agent_params in self.agents_params:
agent_params.task_parameters = task_parameters
# we need to build the hierarchy in reverse order (from the bottom up) in order for the spaces of each level
# to be known
level_managers = []
current_env = env
# out_action_space = env.action_space
for level_idx, agent_params in reversed(list(enumerate(self.agents_params))):
# TODO: the code below is specific for HRL on observation scale
# in action space
# if level_idx == 0:
# # top level agents do not get directives
# in_action_space = None
# else:
# pass
# attention_size = (env.state_space['observation'].shape - 1)//4
# in_action_space = AttentionActionSpace(shape=2, low=0, high=env.state_space['observation'].shape - 1,
# forced_attention_size=attention_size)
# agent_params.output_filter.action_filters['masking'].set_masking(0, attention_size)
agent_params.name = "agent_{}".format(level_idx)
agent_params.is_a_highest_level_agent = level_idx == 0
agent = short_dynamic_import(agent_params.path)(agent_params)
level_manager = LevelManager(
agents=agent,
environment=current_env,
real_environment=env,
steps_limit=self.consecutive_steps_to_run_each_level[level_idx],
should_reset_agent_state_after_time_limit_passes=level_idx > 0,
name="level_{}".format(level_idx)
)
current_env = level_manager
level_managers.insert(0, level_manager)
# out_action_space = in_action_space
return level_managers, [env]
```
#### File: memories/backend/memory.py
```python
class MemoryBackendParameters(object):
def __init__(self, store_type, orchestrator_type, run_type, deployed: str = False):
self.store_type = store_type
self.orchestrator_type = orchestrator_type
self.run_type = run_type
self.deployed = deployed
class MemoryBackend(object):
def __init__(self, params: MemoryBackendParameters):
pass
def deploy(self):
raise NotImplemented("Not yet implemented")
def get_endpoint(self):
raise NotImplemented("Not yet implemented")
def undeploy(self):
raise NotImplemented("Not yet implemented")
def sample(self, size: int):
raise NotImplemented("Not yet implemented")
def store(self, obj):
raise NotImplemented("Not yet implemented")
def store_episode(self, obj):
raise NotImplemented("Not yet implemented")
def fetch(self, num_steps=0):
raise NotImplemented("Not yet implemented")
```
#### File: memories/non_episodic/transition_collection.py
```python
from rl_coach.core_types import Transition
class TransitionCollection(object):
"""
Simple python implementation of transitions collection non-episodic memories
are constructed on top of.
"""
def __init__(self):
super(TransitionCollection, self).__init__()
def append(self, transition):
pass
def extend(self, transitions):
for transition in transitions:
self.append(transition)
def __len__(self):
pass
def __del__(self, range: slice):
# NOTE: the only slice used is the form: slice(None, n)
# NOTE: if it is easier, what we really want here is the ability to
# constrain the size of the collection. as new transitions are added,
# old transitions can be removed to maintain a maximum collection size.
pass
def __getitem__(self, key: int):
# NOTE: we can switch to a method which fetches multiple items at a time
# if that would significantly improve performance
pass
def __iter__(self):
# this is not high priority
pass
```
#### File: mxnet_components/heads/test_head.py
```python
import mxnet as mx
import numpy as np
import os
import pytest
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
from rl_coach.architectures.mxnet_components.heads.head import NormalizedRSSInitializer
@pytest.mark.unit_test
def test_normalized_rss_initializer():
target_rss = 0.5
units = 10
dense = mx.gluon.nn.Dense(units=units, weight_initializer=NormalizedRSSInitializer(target_rss))
dense.initialize()
input_data = mx.random.uniform(shape=(25, 5))
output_data = dense(input_data)
weights = dense.weight.data()
assert weights.shape == (10, 5)
rss = weights.square().sum(axis=1).sqrt()
np.testing.assert_almost_equal(rss.asnumpy(), np.tile(target_rss, units))
```
#### File: mxnet_components/heads/test_ppo_v_head.py
```python
import mxnet as mx
import os
import pytest
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
from rl_coach.architectures.mxnet_components.heads.ppo_v_head import PPOVHead, PPOVHeadLoss
from rl_coach.agents.clipped_ppo_agent import ClippedPPOAlgorithmParameters, ClippedPPOAgentParameters
from rl_coach.spaces import SpacesDefinition, DiscreteActionSpace
@pytest.mark.unit_test
def test_ppo_v_head_loss_batch():
loss_fn = PPOVHeadLoss(clip_likelihood_ratio_using_epsilon=0.1)
total_return = mx.nd.array((5, -3, 0))
old_policy_values = mx.nd.array((3, -1, -1))
new_policy_values_worse = mx.nd.array((2, 0, -1))
new_policy_values_better = mx.nd.array((4, -2, -1))
loss_worse = loss_fn(new_policy_values_worse, old_policy_values, total_return)
loss_better = loss_fn(new_policy_values_better, old_policy_values, total_return)
assert len(loss_worse) == 1 # (LOSS)
loss_worse_val = loss_worse[0]
assert loss_worse_val.ndim == 1
assert loss_worse_val.shape[0] == 1
assert len(loss_better) == 1 # (LOSS)
loss_better_val = loss_better[0]
assert loss_better_val.ndim == 1
assert loss_better_val.shape[0] == 1
assert loss_worse_val > loss_better_val
@pytest.mark.unit_test
def test_ppo_v_head_loss_batch_time():
loss_fn = PPOVHeadLoss(clip_likelihood_ratio_using_epsilon=0.1)
total_return = mx.nd.array(((3, 1, 1, 0),
(1, 0, 0, 1),
(3, 0, 1, 0)))
old_policy_values = mx.nd.array(((2, 1, 1, 0),
(1, 0, 0, 1),
(0, 0, 1, 0)))
new_policy_values_worse = mx.nd.array(((2, 1, 1, 0),
(1, 0, 0, 1),
(2, 0, 1, 0)))
new_policy_values_better = mx.nd.array(((3, 1, 1, 0),
(1, 0, 0, 1),
(2, 0, 1, 0)))
loss_worse = loss_fn(new_policy_values_worse, old_policy_values, total_return)
loss_better = loss_fn(new_policy_values_better, old_policy_values, total_return)
assert len(loss_worse) == 1 # (LOSS)
loss_worse_val = loss_worse[0]
assert loss_worse_val.ndim == 1
assert loss_worse_val.shape[0] == 1
assert len(loss_better) == 1 # (LOSS)
loss_better_val = loss_better[0]
assert loss_better_val.ndim == 1
assert loss_better_val.shape[0] == 1
assert loss_worse_val > loss_better_val
@pytest.mark.unit_test
def test_ppo_v_head_loss_weight():
total_return = mx.nd.array((5, -3, 0))
old_policy_values = mx.nd.array((3, -1, -1))
new_policy_values = mx.nd.array((4, -2, -1))
loss_fn = PPOVHeadLoss(clip_likelihood_ratio_using_epsilon=0.2, weight=1)
loss = loss_fn(new_policy_values, old_policy_values, total_return)
loss_fn_weighted = PPOVHeadLoss(clip_likelihood_ratio_using_epsilon=0.2, weight=0.5)
loss_weighted = loss_fn_weighted(new_policy_values, old_policy_values, total_return)
assert loss[0].sum() == loss_weighted[0].sum() * 2
@pytest.mark.unit_test
def test_ppo_v_head():
agent_parameters = ClippedPPOAgentParameters()
action_space = DiscreteActionSpace(num_actions=5)
spaces = SpacesDefinition(state=None, goal=None, action=action_space, reward=None)
value_net = PPOVHead(agent_parameters=agent_parameters,
spaces=spaces,
network_name="test_ppo_v_head")
value_net.initialize()
batch_size = 15
middleware_data = mx.nd.random.uniform(shape=(batch_size, 100))
values = value_net(middleware_data)
assert values.ndim == 1 # (batch_size)
assert values.shape[0] == batch_size
```
#### File: mxnet_components/heads/test_q_head.py
```python
import mxnet as mx
import os
import pytest
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
from rl_coach.architectures.mxnet_components.heads.q_head import QHead, QHeadLoss
from rl_coach.agents.clipped_ppo_agent import ClippedPPOAgentParameters
from rl_coach.spaces import SpacesDefinition, DiscreteActionSpace
@pytest.mark.unit_test
def test_q_head_loss():
loss_fn = QHeadLoss()
# example with batch_size of 3, and num_actions of 2
target_q_values = mx.nd.array(((3, 5), (-1, -2), (0, 2)))
pred_q_values_worse = mx.nd.array(((6, 5), (-1, -2), (0, 2)))
pred_q_values_better = mx.nd.array(((4, 5), (-2, -2), (1, 2)))
loss_worse = loss_fn(pred_q_values_worse, target_q_values)
loss_better = loss_fn(pred_q_values_better, target_q_values)
assert len(loss_worse) == 1 # (LOSS)
loss_worse_val = loss_worse[0]
assert loss_worse_val.ndim == 1
assert loss_worse_val.shape[0] == 1
assert len(loss_better) == 1 # (LOSS)
loss_better_val = loss_better[0]
assert loss_better_val.ndim == 1
assert loss_better_val.shape[0] == 1
assert loss_worse_val > loss_better_val
@pytest.mark.unit_test
def test_v_head_loss_weight():
target_q_values = mx.nd.array(((3, 5), (-1, -2), (0, 2)))
pred_q_values = mx.nd.array(((4, 5), (-2, -2), (1, 2)))
loss_fn = QHeadLoss()
loss = loss_fn(pred_q_values, target_q_values)
loss_fn_weighted = QHeadLoss(weight=0.5)
loss_weighted = loss_fn_weighted(pred_q_values, target_q_values)
assert loss[0] == loss_weighted[0]*2
@pytest.mark.unit_test
def test_ppo_v_head():
agent_parameters = ClippedPPOAgentParameters()
num_actions = 5
action_space = DiscreteActionSpace(num_actions=num_actions)
spaces = SpacesDefinition(state=None, goal=None, action=action_space, reward=None)
value_net = QHead(agent_parameters=agent_parameters,
spaces=spaces,
network_name="test_q_head")
value_net.initialize()
batch_size = 15
middleware_data = mx.nd.random.uniform(shape=(batch_size, 100))
values = value_net(middleware_data)
assert values.ndim == 2 # (batch_size, num_actions)
assert values.shape[0] == batch_size
assert values.shape[1] == num_actions
```
#### File: architectures/mxnet_components/test_utils.py
```python
import pytest
import mxnet as mx
from mxnet import nd
import numpy as np
from rl_coach.architectures.mxnet_components.utils import *
@pytest.mark.unit_test
def test_to_mx_ndarray():
# scalar
assert to_mx_ndarray(1.2) == nd.array([1.2])
# list of one scalar
assert to_mx_ndarray([1.2]) == [nd.array([1.2])]
# list of multiple scalars
assert to_mx_ndarray([1.2, 3.4]) == [nd.array([1.2]), nd.array([3.4])]
# list of lists of scalars
assert to_mx_ndarray([[1.2], [3.4]]) == [[nd.array([1.2])], [nd.array([3.4])]]
# numpy
assert np.array_equal(to_mx_ndarray(np.array([[1.2], [3.4]])).asnumpy(), nd.array([[1.2], [3.4]]).asnumpy())
# tuple
assert to_mx_ndarray(((1.2,), (3.4,))) == ((nd.array([1.2]),), (nd.array([3.4]),))
@pytest.mark.unit_test
def test_asnumpy_or_asscalar():
# scalar float32
assert asnumpy_or_asscalar(nd.array([1.2])) == np.float32(1.2)
# scalar int32
assert asnumpy_or_asscalar(nd.array([2], dtype=np.int32)) == np.int32(2)
# list of one scalar
assert asnumpy_or_asscalar([nd.array([1.2])]) == [np.float32(1.2)]
# list of multiple scalars
assert asnumpy_or_asscalar([nd.array([1.2]), nd.array([3.4])]) == [np.float32([1.2]), np.float32([3.4])]
# list of lists of scalars
assert asnumpy_or_asscalar([[nd.array([1.2])], [nd.array([3.4])]]) == [[np.float32([1.2])], [np.float32([3.4])]]
# tensor
assert np.array_equal(asnumpy_or_asscalar(nd.array([[1.2], [3.4]])), np.array([[1.2], [3.4]], dtype=np.float32))
# tuple
assert (asnumpy_or_asscalar(((nd.array([1.2]),), (nd.array([3.4]),))) ==
((np.array([1.2], dtype=np.float32),), (np.array([3.4], dtype=np.float32),)))
@pytest.mark.unit_test
def test_global_norm():
data = list()
for i in range(1, 6):
data.append(np.ones((i * 10, i * 10)) * i)
gnorm = np.asscalar(np.sqrt(sum([np.sum(np.square(d)) for d in data])))
assert np.isclose(gnorm, global_norm([nd.array(d) for d in data]).asscalar())
@pytest.mark.unit_test
def test_split_outputs_per_head():
class TestHead:
def __init__(self, num_outputs):
self.num_outputs = num_outputs
assert split_outputs_per_head((1, 2, 3, 4), [TestHead(2), TestHead(1), TestHead(1)]) == [[1, 2], [3], [4]]
class DummySchema:
def __init__(self, num_head_outputs, num_agent_inputs, num_targets):
self.head_outputs = ['head_output_{}'.format(i) for i in range(num_head_outputs)]
self.agent_inputs = ['agent_input_{}'.format(i) for i in range(num_agent_inputs)]
self.targets = ['target_{}'.format(i) for i in range(num_targets)]
class DummyLoss:
def __init__(self, num_head_outputs, num_agent_inputs, num_targets):
self.input_schema = DummySchema(num_head_outputs, num_agent_inputs, num_targets)
@pytest.mark.unit_test
def test_split_targets_per_loss():
assert split_targets_per_loss([1, 2, 3, 4],
[DummyLoss(10, 100, 2), DummyLoss(20, 200, 1), DummyLoss(30, 300, 1)]) == \
[[1, 2], [3], [4]]
@pytest.mark.unit_test
def test_get_loss_agent_inputs():
input_dict = {'output_0_0': [1, 2], 'output_0_1': [3, 4], 'output_1_0': [5]}
assert get_loss_agent_inputs(input_dict, 0, DummyLoss(10, 2, 100)) == [[1, 2], [3, 4]]
assert get_loss_agent_inputs(input_dict, 1, DummyLoss(20, 1, 200)) == [[5]]
@pytest.mark.unit_test
def test_align_loss_args():
class TestLossFwd(DummyLoss):
def __init__(self, num_targets, num_agent_inputs, num_head_outputs):
super(TestLossFwd, self).__init__(num_targets, num_agent_inputs, num_head_outputs)
def loss_forward(self, F, head_output_2, head_output_1, agent_input_2, target_0, agent_input_1, param1, param2):
pass
assert align_loss_args([1, 2, 3], [4, 5, 6, 7], [8, 9], TestLossFwd(3, 4, 2)) == [3, 2, 6, 8, 5]
@pytest.mark.unit_test
def test_to_tuple():
assert to_tuple(123) == (123,)
assert to_tuple((1, 2, 3)) == (1, 2, 3)
assert to_tuple([1, 2, 3]) == (1, 2, 3)
@pytest.mark.unit_test
def test_to_list():
assert to_list(123) == [123]
assert to_list((1, 2, 3)) == [1, 2, 3]
assert to_list([1, 2, 3]) == [1, 2, 3]
@pytest.mark.unit_test
def test_loss_output_dict():
assert loss_output_dict([1, 2, 3], ['loss', 'loss', 'reg']) == {'loss': [1, 2], 'reg': [3]}
@pytest.mark.unit_test
def test_clip_grad():
a = np.array([1, 2, -3])
b = np.array([4, 5, -6])
clip = 2
gscale = np.minimum(1.0, clip / np.sqrt(np.sum(np.square(a)) + np.sum(np.square(b))))
for lhs, rhs in zip(clip_grad([nd.array(a), nd.array(b)], GradientClippingMethod.ClipByGlobalNorm, clip_val=clip),
[a, b]):
assert np.allclose(lhs.asnumpy(), rhs * gscale)
for lhs, rhs in zip(clip_grad([nd.array(a), nd.array(b)], GradientClippingMethod.ClipByValue, clip_val=clip),
[a, b]):
assert np.allclose(lhs.asnumpy(), np.clip(rhs, -clip, clip))
for lhs, rhs in zip(clip_grad([nd.array(a), nd.array(b)], GradientClippingMethod.ClipByNorm, clip_val=clip),
[a, b]):
scale = np.minimum(1.0, clip / np.sqrt(np.sum(np.square(rhs))))
assert np.allclose(lhs.asnumpy(), rhs * scale)
@pytest.mark.unit_test
def test_hybrid_clip():
x = mx.nd.array((0.5, 1.5, 2.5))
a = mx.nd.array((1,))
b = mx.nd.array((2,))
clipped = hybrid_clip(F=mx.nd, x=x, clip_lower=a, clip_upper=b)
assert (np.isclose(a=clipped.asnumpy(), b=(1, 1.5, 2))).all()
@pytest.mark.unit_test
def test_broadcast_like():
x = nd.ones((1, 2)) * 10
y = nd.ones((100, 100, 2)) * 20
assert mx.test_utils.almost_equal(x.broadcast_like(y).asnumpy(), broadcast_like(nd, x, y).asnumpy())
@pytest.mark.unit_test
def test_scoped_onxx_enable():
class Counter(object):
def __init__(self):
self._count = 0
def increment(self):
self._count += 1
@property
def count(self):
return self._count
class TempBlock(gluon.HybridBlock, OnnxHandlerBlock):
def __init__(self, counter: Counter):
super(TempBlock, self).__init__()
OnnxHandlerBlock.__init__(self)
self._counter = counter
def hybrid_forward(self, F, x, *args, **kwargs):
if self._onnx:
self._counter.increment()
return x
counter = Counter()
net = gluon.nn.HybridSequential()
for _ in range(10):
net.add(TempBlock(counter))
# ONNX disabled
net(nd.zeros((1,)))
assert counter.count == 0
# ONNX enabled
with ScopedOnnxEnable(net):
net(nd.zeros((1,)))
assert counter.count == 10
```
#### File: filters/observation/test_observation_reduction_by_sub_parts_name_filter.py
```python
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
import pytest
import numpy as np
from rl_coach.filters.observation.observation_reduction_by_sub_parts_name_filter import ObservationReductionBySubPartsNameFilter
from rl_coach.spaces import VectorObservationSpace
from rl_coach.core_types import EnvResponse
from rl_coach.filters.filter import InputFilter
@pytest.mark.unit_test
def test_filter():
# Keep
observation_space = VectorObservationSpace(3, measurements_names=['a', 'b', 'c'])
env_response = EnvResponse(next_state={'observation': np.ones([3])}, reward=0, game_over=False)
reduction_filter = InputFilter()
reduction_filter.add_observation_filter('observation', 'reduce',
ObservationReductionBySubPartsNameFilter(
["a"],
ObservationReductionBySubPartsNameFilter.ReductionMethod.Keep
))
reduction_filter.get_filtered_observation_space('observation', observation_space)
result = reduction_filter.filter(env_response)[0]
unfiltered_observation = env_response.next_state['observation']
filtered_observation = result.next_state['observation']
# make sure the original observation is unchanged
assert unfiltered_observation.shape == (3,)
# validate the shape of the filtered observation
assert filtered_observation.shape == (1,)
# Discard
reduction_filter = InputFilter()
reduction_filter.add_observation_filter('observation', 'reduce',
ObservationReductionBySubPartsNameFilter(
["a"],
ObservationReductionBySubPartsNameFilter.ReductionMethod.Discard
))
reduction_filter.get_filtered_observation_space('observation', observation_space)
result = reduction_filter.filter(env_response)[0]
unfiltered_observation = env_response.next_state['observation']
filtered_observation = result.next_state['observation']
# make sure the original observation is unchanged
assert unfiltered_observation.shape == (3,)
# validate the shape of the filtered observation
assert filtered_observation.shape == (2,)
@pytest.mark.unit_test
def test_get_filtered_observation_space():
# Keep
observation_space = VectorObservationSpace(3, measurements_names=['a', 'b', 'c'])
env_response = EnvResponse(next_state={'observation': np.ones([3])}, reward=0, game_over=False)
reduction_filter = InputFilter()
reduction_filter.add_observation_filter('observation', 'reduce',
ObservationReductionBySubPartsNameFilter(
["a"],
ObservationReductionBySubPartsNameFilter.ReductionMethod.Keep
))
filtered_observation_space = reduction_filter.get_filtered_observation_space('observation', observation_space)
assert np.all(filtered_observation_space.shape == np.array([1]))
assert filtered_observation_space.measurements_names == ['a']
# Discard
observation_space = VectorObservationSpace(3, measurements_names=['a', 'b', 'c'])
env_response = EnvResponse(next_state={'observation': np.ones([3])}, reward=0, game_over=False)
reduction_filter = InputFilter()
reduction_filter.add_observation_filter('observation', 'reduce',
ObservationReductionBySubPartsNameFilter(
["a"],
ObservationReductionBySubPartsNameFilter.ReductionMethod.Discard
))
filtered_observation_space = reduction_filter.get_filtered_observation_space('observation', observation_space)
assert np.all(filtered_observation_space.shape == np.array([2]))
assert filtered_observation_space.measurements_names == ['b', 'c']
```
#### File: filters/observation/test_observation_rgb_to_y_filter.py
```python
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
import pytest
import numpy as np
from rl_coach.filters.observation.observation_rgb_to_y_filter import ObservationRGBToYFilter
from rl_coach.spaces import ObservationSpace
from rl_coach.core_types import EnvResponse
from rl_coach.filters.filter import InputFilter
@pytest.fixture
def rgb_to_y_filter():
rgb_to_y_filter = InputFilter()
rgb_to_y_filter.add_observation_filter('observation', 'rgb_to_y', ObservationRGBToYFilter())
return rgb_to_y_filter
@pytest.mark.unit_test
def test_filter(rgb_to_y_filter):
# convert RGB observation to graysacle
observation = np.random.rand(20, 30, 3)*255.0
transition = EnvResponse(next_state={'observation': observation}, reward=0, game_over=False)
result = rgb_to_y_filter.filter(transition)[0]
unfiltered_observation = transition.next_state['observation']
filtered_observation = result.next_state['observation']
# make sure the original observation is unchanged
assert unfiltered_observation.shape == (20, 30, 3)
# make sure the filtering is done correctly
assert filtered_observation.shape == (20, 30)
@pytest.mark.unit_test
def test_get_filtered_observation_space(rgb_to_y_filter):
# error on observation space which are not RGB
observation_space = ObservationSpace(np.array([1, 2, 4]), 0, 100)
with pytest.raises(ValueError):
rgb_to_y_filter.get_filtered_observation_space('observation', observation_space)
observation_space = ObservationSpace(np.array([1, 2, 3]), 0, 100)
result = rgb_to_y_filter.get_filtered_observation_space('observation', observation_space)
assert np.all(result.shape == np.array([1, 2]))
```
#### File: tests/graph_managers/test_basic_rl_graph_manager.py
```python
import gc
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
import tensorflow as tf
from rl_coach.base_parameters import TaskParameters, DistributedTaskParameters, Frameworks
from rl_coach.core_types import EnvironmentSteps
from rl_coach.utils import get_open_port
from multiprocessing import Process
from tensorflow import logging
import pytest
logging.set_verbosity(logging.INFO)
@pytest.mark.unit_test
def test_basic_rl_graph_manager_with_pong_a3c():
tf.reset_default_graph()
from rl_coach.presets.Atari_A3C import graph_manager
assert graph_manager
graph_manager.env_params.level = "PongDeterministic-v4"
graph_manager.create_graph(task_parameters=TaskParameters(framework_type=Frameworks.tensorflow,
experiment_path="./experiments/test"))
# graph_manager.improve()
@pytest.mark.unit_test
def test_basic_rl_graph_manager_with_pong_nec():
tf.reset_default_graph()
from rl_coach.presets.Atari_NEC import graph_manager
assert graph_manager
graph_manager.env_params.level = "PongDeterministic-v4"
graph_manager.create_graph(task_parameters=TaskParameters(framework_type=Frameworks.tensorflow,
experiment_path="./experiments/test"))
# graph_manager.improve()
@pytest.mark.unit_test
def test_basic_rl_graph_manager_with_cartpole_dqn():
tf.reset_default_graph()
from rl_coach.presets.CartPole_DQN import graph_manager
assert graph_manager
graph_manager.create_graph(task_parameters=TaskParameters(framework_type=Frameworks.tensorflow,
experiment_path="./experiments/test"))
# graph_manager.improve()
# Test for identifying memory leak in restore_checkpoint
@pytest.mark.unit_test
def test_basic_rl_graph_manager_with_cartpole_dqn_and_repeated_checkpoint_restore():
tf.reset_default_graph()
from rl_coach.presets.CartPole_DQN import graph_manager
assert graph_manager
graph_manager.create_graph(task_parameters=TaskParameters(framework_type=Frameworks.tensorflow,
experiment_path="./experiments/test",
apply_stop_condition=True))
# graph_manager.improve()
# graph_manager.evaluate(EnvironmentSteps(1000))
# graph_manager.save_checkpoint()
#
# graph_manager.task_parameters.checkpoint_restore_dir = "./experiments/test/checkpoint"
# while True:
# graph_manager.restore_checkpoint()
# graph_manager.evaluate(EnvironmentSteps(1000))
# gc.collect()
if __name__ == '__main__':
pass
# test_basic_rl_graph_manager_with_pong_a3c()
# test_basic_rl_graph_manager_with_ant_a3c()
# test_basic_rl_graph_manager_with_pong_nec()
# test_basic_rl_graph_manager_with_cartpole_dqn()
# test_basic_rl_graph_manager_with_cartpole_dqn_and_repeated_checkpoint_restore()
#test_basic_rl_graph_manager_multithreaded_with_pong_a3c()
#test_basic_rl_graph_manager_with_doom_basic_dqn()
```
#### File: rl_coach/tests/test_args.py
```python
import subprocess
import time
import rl_coach.tests.utils.args_utils as a_utils
import rl_coach.tests.utils.presets_utils as p_utils
from rl_coach.tests.utils.definitions import Definitions as Def
def test_preset_args(preset_args, flag, clres, start_time=time.time(),
time_limit=Def.TimeOuts.test_time_limit):
""" Test command arguments - the test will check all flags one-by-one."""
p_valid_params = p_utils.validation_params(preset_args)
run_cmd = [
'python3', 'rl_coach/coach.py',
'-p', '{}'.format(preset_args),
'-e', '{}'.format("ExpName_" + preset_args),
]
if p_valid_params.reward_test_level:
lvl = ['-lvl', '{}'.format(p_valid_params.reward_test_level)]
run_cmd.extend(lvl)
# add flags to run command
test_flag = a_utils.add_one_flag_value(flag=flag)
run_cmd.extend(test_flag)
print(str(run_cmd))
# run command
p = subprocess.Popen(run_cmd, stdout=clres.stdout, stderr=clres.stdout)
# validate results
a_utils.validate_args_results(test_flag, clres, p, start_time, time_limit)
# Close process
p.kill()
```
#### File: rl_coach/utilities/carla_dataset_to_replay_buffer.py
```python
import argparse
import os
import sys
import h5py
import numpy as np
from rl_coach.core_types import Transition
from rl_coach.memories.memory import MemoryGranularity
from rl_coach.memories.non_episodic.experience_replay import ExperienceReplay
from rl_coach.utils import ProgressBar, start_shell_command_and_wait
from rl_coach.logger import screen
def maybe_download(dataset_root):
if not dataset_root or not os.path.exists(os.path.join(dataset_root, "AgentHuman")):
screen.log_title("Downloading the CARLA dataset. This might take a while.")
google_drive_download_id = "1hloAeyamYn-H6MfV1dRtY1gJPhkR55sY"
filename_to_save = "datasets/CORL2017ImitationLearningData.tar.gz"
download_command = 'wget --load-cookies /tmp/cookies.txt "https://docs.google.com/uc?export=download&confirm=' \
'$(wget --quiet --save-cookies /tmp/cookies.txt --keep-session-cookies ' \
'--no-check-certificate \"https://docs.google.com/uc?export=download&id={}\" -O- | ' \
'sed -rn \'s/.*confirm=([0-9A-Za-z_]+).*/\\1\\n/p\')&id={}" -O {} && rm -rf /tmp/cookies.txt'\
.format(google_drive_download_id, google_drive_download_id, filename_to_save)
# start downloading and wait for it to finish
start_shell_command_and_wait(download_command)
screen.log_title("Unzipping the dataset")
unzip_command = 'tar -xzf {} --checkpoint=.10000'.format(filename_to_save)
if dataset_root is not None:
unzip_command += " -C {}".format(dataset_root)
if not os.path.exists(dataset_root):
os.makedirs(dataset_root)
start_shell_command_and_wait(unzip_command)
def create_dataset(dataset_root, output_path):
maybe_download(dataset_root)
dataset_root = os.path.join(dataset_root, 'AgentHuman')
train_set_root = os.path.join(dataset_root, 'SeqTrain')
validation_set_root = os.path.join(dataset_root, 'SeqVal')
# training set extraction
memory = ExperienceReplay(max_size=(MemoryGranularity.Transitions, sys.maxsize))
train_set_files = sorted(os.listdir(train_set_root))
print("found {} files".format(len(train_set_files)))
progress_bar = ProgressBar(len(train_set_files))
for file_idx, file in enumerate(train_set_files[:3000]):
progress_bar.update(file_idx, "extracting file {}".format(file))
train_set = h5py.File(os.path.join(train_set_root, file), 'r')
observations = train_set['rgb'][:] # forward camera
measurements = np.expand_dims(train_set['targets'][:, 10], -1) # forward speed
actions = train_set['targets'][:, :3] # steer, gas, brake
high_level_commands = train_set['targets'][:, 24].astype('int') - 2 # follow lane, left, right, straight
file_length = train_set['rgb'].len()
assert train_set['rgb'].len() == train_set['targets'].len()
for transition_idx in range(file_length):
transition = Transition(
state={
'CameraRGB': observations[transition_idx],
'measurements': measurements[transition_idx],
'high_level_command': high_level_commands[transition_idx]
},
action=actions[transition_idx],
reward=0
)
memory.store(transition)
progress_bar.close()
print("Saving pickle file to {}".format(output_path))
memory.save(output_path)
if __name__ == "__main__":
argparser = argparse.ArgumentParser(description=__doc__)
argparser.add_argument('-d', '--dataset_root', help='The path to the CARLA dataset root folder')
argparser.add_argument('-o', '--output_path', help='The path to save the resulting replay buffer',
default='carla_train_set_replay_buffer.p')
args = argparser.parse_args()
create_dataset(args.dataset_root, args.output_path)
``` |
{
"source": "jl45621/SLM-Lab",
"score": 2
} |
#### File: jl45621/SLM-Lab/run_lab.py
```python
import os
# NOTE increase if needed. Pytorch thread overusage https://github.com/pytorch/pytorch/issues/975
os.environ['OMP_NUM_THREADS'] = '1'
from slm_lab import EVAL_MODES, TRAIN_MODES
from slm_lab.experiment import analysis, retro_analysis
from slm_lab.experiment.control import Session, Trial, Experiment
from slm_lab.experiment.monitor import InfoSpace
from slm_lab.lib import logger, util
from slm_lab.spec import spec_util
from xvfbwrapper import Xvfb
import sys
import torch.multiprocessing as mp
debug_modules = [
# 'algorithm',
]
debug_level = 'DEBUG'
logger.toggle_debug(debug_modules, debug_level)
def run_new_mode(spec_file, spec_name, lab_mode):
'''Run to generate new data with `search, train, dev`'''
spec = spec_util.get(spec_file, spec_name)
info_space = InfoSpace()
analysis.save_spec(spec, info_space, unit='experiment') # first save the new spec
if lab_mode == 'search':
info_space.tick('experiment')
Experiment(spec, info_space).run()
elif lab_mode.startswith('train'):
info_space.tick('trial')
Trial(spec, info_space).run()
elif lab_mode == 'dev':
spec = spec_util.override_dev_spec(spec)
info_space.tick('trial')
Trial(spec, info_space).run()
else:
raise ValueError(f'Unrecognizable lab_mode not of {TRAIN_MODES}')
def run_old_mode(spec_file, spec_name, lab_mode):
'''Run using existing data with `enjoy, eval`. The eval mode is also what train mode's online eval runs in a subprocess via bash command'''
# reconstruct spec and info_space from existing data
lab_mode, prename = lab_mode.split('@')
predir, _, _, _, _, _ = util.prepath_split(spec_file)
prepath = f'{predir}/{prename}'
spec, info_space = util.prepath_to_spec_info_space(prepath)
# see InfoSpace def for more on these
info_space.ckpt = 'eval'
info_space.eval_model_prepath = prepath
# no info_space.tick() as they are reconstructed
if lab_mode == 'enjoy':
spec = spec_util.override_enjoy_spec(spec)
Session(spec, info_space).run()
elif lab_mode == 'eval':
# example eval command:
# python run_lab.py data/dqn_cartpole_2018_12_19_224811/dqn_cartpole_t0_spec.json dqn_cartpole eval@dqn_cartpole_t0_s1_ckpt-epi10-totalt1000
spec = spec_util.override_eval_spec(spec)
Session(spec, info_space).run()
util.clear_periodic_ckpt(prepath) # cleanup after itself
retro_analysis.analyze_eval_trial(spec, info_space, predir)
else:
raise ValueError(f'Unrecognizable lab_mode not of {EVAL_MODES}')
def run_by_mode(spec_file, spec_name, lab_mode):
'''The main run lab function for all lab_modes'''
logger.info(f'Running lab in mode: {lab_mode}')
# '@' is reserved for 'enjoy@{prename}'
os.environ['lab_mode'] = lab_mode.split('@')[0]
if lab_mode in TRAIN_MODES:
run_new_mode(spec_file, spec_name, lab_mode)
else:
run_old_mode(spec_file, spec_name, lab_mode)
def main():
if len(sys.argv) > 1:
args = sys.argv[1:]
assert len(args) == 3, f'To use sys args, specify spec_file, spec_name, lab_mode'
run_by_mode(*args)
return
experiments = util.read('config/experiments.json')
for spec_file in experiments:
for spec_name, lab_mode in experiments[spec_file].items():
run_by_mode(spec_file, spec_name, lab_mode)
if __name__ == '__main__':
mp.set_start_method('spawn') # for distributed pytorch to work
if sys.platform == 'darwin':
# avoid xvfb for MacOS: https://github.com/nipy/nipype/issues/1400
main()
else:
with Xvfb() as xvfb: # safety context for headless machines
main()
```
#### File: slm_lab/env/unity.py
```python
from gym import spaces
from slm_lab.env.base import BaseEnv, ENV_DATA_NAMES, set_gym_space_attr
from slm_lab.env.registration import get_env_path
from slm_lab.lib import logger, util
from slm_lab.lib.decorator import lab_api
from unityagents import brain, UnityEnvironment
import numpy as np
import os
import pydash as ps
logger = logger.get_logger(__name__)
class BrainExt:
'''Unity Brain class extension, where self = brain'''
def is_discrete(self):
return self.action_space_type == 'discrete'
def get_action_dim(self):
return self.action_space_size
def get_observable_types(self):
'''What channels are observable: state, image, sound, touch, etc.'''
observable = {
'state': self.state_space_size > 0,
'image': self.number_observations > 0,
}
return observable
def get_observable_dim(self):
'''Get observable dimensions'''
observable_dim = {
'state': self.state_space_size,
'image': 'some np array shape, as opposed to what Arthur called size',
}
return observable_dim
# Extend Unity BrainParameters class at runtime to add BrainExt methods
util.monkey_patch(brain.BrainParameters, BrainExt)
class UnityEnv(BaseEnv):
'''
Wrapper for Unity ML-Agents env to work with the Lab.
e.g. env_spec
"env": [{
"name": "gridworld",
"max_t": 20,
"max_tick": 3,
"unity": {
"gridSize": 6,
"numObstacles": 2,
"numGoals": 1
}
}],
'''
def __init__(self, spec, e=None, env_space=None):
super(UnityEnv, self).__init__(spec, e, env_space)
util.set_attr(self, self.env_spec, ['unity'])
worker_id = int(f'{os.getpid()}{self.e+int(ps.unique_id())}'[-4:])
self.u_env = UnityEnvironment(file_name=get_env_path(self.name), worker_id=worker_id)
self.patch_gym_spaces(self.u_env)
self._set_attr_from_u_env(self.u_env)
assert self.max_t is not None
if env_space is None: # singleton mode
pass
else:
self.space_init(env_space)
logger.info(util.self_desc(self))
def patch_gym_spaces(self, u_env):
'''
For standardization, use gym spaces to represent observation and action spaces for Unity.
This method iterates through the multiple brains (multiagent) then constructs and returns lists of observation_spaces and action_spaces
'''
observation_spaces = []
action_spaces = []
for a in range(len(u_env.brain_names)):
brain = self._get_brain(u_env, a)
observation_shape = (brain.get_observable_dim()['state'],)
if brain.is_discrete():
dtype = np.int32
action_space = spaces.Discrete(brain.get_action_dim())
else:
dtype = np.float32
action_space = spaces.Box(low=0.0, high=1.0, shape=(1,), dtype=dtype)
observation_space = spaces.Box(low=0, high=1, shape=observation_shape, dtype=dtype)
set_gym_space_attr(observation_space)
set_gym_space_attr(action_space)
observation_spaces.append(observation_space)
action_spaces.append(action_space)
# set for singleton
u_env.observation_space = observation_spaces[0]
u_env.action_space = action_spaces[0]
return observation_spaces, action_spaces
def _get_brain(self, u_env, a):
'''Get the unity-equivalent of agent, i.e. brain, to access its info'''
name_a = u_env.brain_names[a]
brain_a = u_env.brains[name_a]
return brain_a
def _check_u_brain_to_agent(self):
'''Check the size match between unity brain and agent'''
u_brain_num = self.u_env.number_brains
agent_num = len(self.body_e)
assert u_brain_num == agent_num, f'There must be a Unity brain for each agent. e:{self.e}, brain: {u_brain_num} != agent: {agent_num}.'
def _check_u_agent_to_body(self, env_info_a, a):
'''Check the size match between unity agent and body'''
u_agent_num = len(env_info_a.agents)
body_num = util.count_nonan(self.body_e[a])
assert u_agent_num == body_num, f'There must be a Unity agent for each body; a:{a}, e:{self.e}, agent_num: {u_agent_num} != body_num: {body_num}.'
def _get_env_info(self, env_info_dict, a):
'''Unity API returns a env_info_dict. Use this method to pull brain(env)-specific usable for lab API'''
name_a = self.u_env.brain_names[a]
env_info_a = env_info_dict[name_a]
return env_info_a
@lab_api
def reset(self):
_reward = np.nan
env_info_dict = self.u_env.reset(train_mode=(util.get_lab_mode() != 'dev'), config=self.env_spec.get('unity'))
a, b = 0, 0 # default singleton aeb
env_info_a = self._get_env_info(env_info_dict, a)
state = env_info_a.states[b]
self.done = done = False
logger.debug(f'Env {self.e} reset reward: {_reward}, state: {state}, done: {done}')
return _reward, state, done
@lab_api
def step(self, action):
env_info_dict = self.u_env.step(action)
a, b = 0, 0 # default singleton aeb
env_info_a = self._get_env_info(env_info_dict, a)
reward = env_info_a.rewards[b] * self.reward_scale
state = env_info_a.states[b]
done = env_info_a.local_done[b]
self.done = done = done or self.clock.t > self.max_t
logger.debug(f'Env {self.e} step reward: {reward}, state: {state}, done: {done}')
return reward, state, done
@lab_api
def close(self):
self.u_env.close()
# NOTE optional extension for multi-agent-env
@lab_api
def space_init(self, env_space):
'''Post init override for space env. Note that aeb is already correct from __init__'''
self.env_space = env_space
self.aeb_space = env_space.aeb_space
self.observation_spaces = [self.observation_space]
self.action_spaces = [self.action_space]
@lab_api
def space_reset(self):
self._check_u_brain_to_agent()
self.done = False
env_info_dict = self.u_env.reset(train_mode=(util.get_lab_mode() != 'dev'), config=self.env_spec.get('unity'))
_reward_e, state_e, done_e = self.env_space.aeb_space.init_data_s(ENV_DATA_NAMES, e=self.e)
for (a, b), body in util.ndenumerate_nonan(self.body_e):
env_info_a = self._get_env_info(env_info_dict, a)
self._check_u_agent_to_body(env_info_a, a)
state = env_info_a.states[b]
state_e[(a, b)] = state
done_e[(a, b)] = self.done
logger.debug(f'Env {self.e} reset reward_e: {_reward_e}, state_e: {state_e}, done_e: {done_e}')
return _reward_e, state_e, done_e
@lab_api
def space_step(self, action_e):
# TODO implement clock_speed: step only if self.clock.to_step()
if self.done:
return self.space_reset()
action_e = util.nanflatten(action_e)
env_info_dict = self.u_env.step(action_e)
reward_e, state_e, done_e = self.env_space.aeb_space.init_data_s(ENV_DATA_NAMES, e=self.e)
for (a, b), body in util.ndenumerate_nonan(self.body_e):
env_info_a = self._get_env_info(env_info_dict, a)
reward_e[(a, b)] = env_info_a.rewards[b] * self.reward_scale
state_e[(a, b)] = env_info_a.states[b]
done_e[(a, b)] = env_info_a.local_done[b]
self.done = (util.nonan_all(done_e) or self.clock.t > self.max_t)
logger.debug(f'Env {self.e} step reward_e: {reward_e}, state_e: {state_e}, done_e: {done_e}')
return reward_e, state_e, done_e
```
#### File: slm_lab/experiment/control.py
```python
from copy import deepcopy
from importlib import reload
from slm_lab.agent import AgentSpace, Agent
from slm_lab.env import EnvSpace, make_env
from slm_lab.experiment import analysis, retro_analysis, search
from slm_lab.experiment.monitor import AEBSpace, Body, enable_aeb_space
from slm_lab.lib import logger, util
from slm_lab.spec import spec_util
import os
import torch.multiprocessing as mp
class Session:
'''
The base unit of instantiated RL system.
Given a spec,
session creates agent(s) and environment(s),
run the RL system and collect data, e.g. fitness metrics, till it ends,
then return the session data.
'''
def __init__(self, spec, info_space, global_nets=None):
self.spec = spec
self.info_space = info_space
self.index = self.info_space.get('session')
util.set_logger(self.spec, self.info_space, logger, 'session')
self.data = None
# init singleton agent and env
self.env = make_env(self.spec)
util.set_rand_seed(self.info_space.get_random_seed(), self.env)
with util.ctx_lab_mode('eval'): # env for eval
self.eval_env = make_env(self.spec)
util.set_rand_seed(self.info_space.get_random_seed(), self.eval_env)
util.try_set_cuda_id(self.spec, self.info_space)
body = Body(self.env, self.spec['agent'])
self.agent = Agent(self.spec, self.info_space, body=body, global_nets=global_nets)
enable_aeb_space(self) # to use lab's data analysis framework
logger.info(util.self_desc(self))
logger.info(f'Initialized session {self.index}')
def try_ckpt(self, agent, env):
'''Try to checkpoint agent at the start, save_freq, and the end'''
tick = env.clock.get(env.max_tick_unit)
to_ckpt = False
if not util.in_eval_lab_modes() and tick <= env.max_tick:
to_ckpt = (tick % env.eval_frequency == 0) or tick == env.max_tick
if env.max_tick_unit == 'epi': # extra condition for epi
to_ckpt = to_ckpt and env.done
if to_ckpt:
if self.spec['meta'].get('parallel_eval'):
retro_analysis.run_parallel_eval(self, agent, env)
else:
self.run_eval_episode()
if analysis.new_best(agent):
agent.save(ckpt='best')
if tick > 0: # nothing to analyze at start
analysis.analyze_session(self, eager_analyze_trial=True)
def run_eval_episode(self):
with util.ctx_lab_mode('eval'): # enter eval context
self.agent.algorithm.update() # set explore_var etc. to end_val under ctx
self.eval_env.clock.tick('epi')
logger.info(f'Running eval episode for trial {self.info_space.get("trial")} session {self.index}')
total_reward = 0
reward, state, done = self.eval_env.reset()
while not done:
self.eval_env.clock.tick('t')
action = self.agent.act(state)
reward, state, done = self.eval_env.step(action)
total_reward += reward
# exit eval context, restore variables simply by updating
self.agent.algorithm.update()
# update body.eval_df
self.agent.body.eval_update(self.eval_env, total_reward)
self.agent.body.log_summary(body_df_kind='eval')
def run_episode(self):
self.env.clock.tick('epi')
logger.info(f'Running trial {self.info_space.get("trial")} session {self.index} episode {self.env.clock.epi}')
reward, state, done = self.env.reset()
self.agent.reset(state)
while not done:
self.try_ckpt(self.agent, self.env)
self.env.clock.tick('t')
action = self.agent.act(state)
reward, state, done = self.env.step(action)
self.agent.update(action, reward, state, done)
self.try_ckpt(self.agent, self.env) # final timestep ckpt
self.agent.body.log_summary(body_df_kind='train')
def close(self):
'''
Close session and clean up.
Save agent, close env.
'''
self.agent.close()
self.env.close()
self.eval_env.close()
logger.info('Session done and closed.')
def run(self):
while self.env.clock.get(self.env.max_tick_unit) < self.env.max_tick:
self.run_episode()
retro_analysis.try_wait_parallel_eval(self)
self.data = analysis.analyze_session(self) # session fitness
self.close()
return self.data
class SpaceSession(Session):
'''Session for multi-agent/env setting'''
def __init__(self, spec, info_space, global_nets=None):
self.spec = spec
self.info_space = info_space
self.index = self.info_space.get('session')
util.set_logger(self.spec, self.info_space, logger, 'session')
self.data = None
self.aeb_space = AEBSpace(self.spec, self.info_space)
self.env_space = EnvSpace(self.spec, self.aeb_space)
self.aeb_space.init_body_space()
util.set_rand_seed(self.info_space.get_random_seed(), self.env_space)
util.try_set_cuda_id(self.spec, self.info_space)
self.agent_space = AgentSpace(self.spec, self.aeb_space, global_nets)
logger.info(util.self_desc(self))
logger.info(f'Initialized session {self.index}')
def try_ckpt(self, agent_space, env_space):
'''Try to checkpoint agent at the start, save_freq, and the end'''
# TODO ckpt and eval not implemented for SpaceSession
pass
# for agent in agent_space.agents:
# for body in agent.nanflat_body_a:
# env = body.env
# super(SpaceSession, self).try_ckpt(agent, env)
def run_all_episodes(self):
'''
Continually run all episodes, where each env can step and reset at its own clock_speed and timeline.
Will terminate when all envs done are done.
'''
all_done = self.aeb_space.tick('epi')
reward_space, state_space, done_space = self.env_space.reset()
self.agent_space.reset(state_space)
while not all_done:
self.try_ckpt(self.agent_space, self.env_space)
all_done = self.aeb_space.tick()
action_space = self.agent_space.act(state_space)
reward_space, state_space, done_space = self.env_space.step(action_space)
self.agent_space.update(action_space, reward_space, state_space, done_space)
self.try_ckpt(self.agent_space, self.env_space)
retro_analysis.try_wait_parallel_eval(self)
def close(self):
'''
Close session and clean up.
Save agent, close env.
'''
self.agent_space.close()
self.env_space.close()
logger.info('Session done and closed.')
def run(self):
self.run_all_episodes()
self.data = analysis.analyze_session(self, tmp_space_session_sub=True) # session fitness
self.close()
return self.data
def init_run_session(*args):
'''Runner for multiprocessing'''
session = Session(*args)
return session.run()
def init_run_space_session(*args):
'''Runner for multiprocessing'''
session = SpaceSession(*args)
return session.run()
class Trial:
'''
The base unit of an experiment.
Given a spec and number s,
trial creates and runs s sessions,
gather and aggregate data from sessions as trial data,
then return the trial data.
'''
def __init__(self, spec, info_space):
self.spec = spec
self.info_space = info_space
self.index = self.info_space.get('trial')
info_space.set('session', None) # Session starts anew for new trial
util.set_logger(self.spec, self.info_space, logger, 'trial')
self.session_data_dict = {}
self.data = None
analysis.save_spec(spec, info_space, unit='trial')
self.is_singleton = spec_util.is_singleton(spec) # singleton mode as opposed to multi-agent-env space
self.SessionClass = Session if self.is_singleton else SpaceSession
self.mp_runner = init_run_session if self.is_singleton else init_run_space_session
logger.info(f'Initialized trial {self.index}')
def parallelize_sessions(self, global_nets=None):
workers = []
for _s in range(self.spec['meta']['max_session']):
self.info_space.tick('session')
w = mp.Process(target=self.mp_runner, args=(deepcopy(self.spec), deepcopy(self.info_space), global_nets))
w.start()
workers.append(w)
for w in workers:
w.join()
session_datas = retro_analysis.session_data_dict_for_dist(self.spec, self.info_space)
return session_datas
def run_sessions(self):
logger.info('Running sessions')
if util.get_lab_mode() in ('train', 'eval') and self.spec['meta']['max_session'] > 1:
# when training a single spec over multiple sessions
session_datas = self.parallelize_sessions()
else:
session_datas = []
for _s in range(self.spec['meta']['max_session']):
self.info_space.tick('session')
session = self.SessionClass(deepcopy(self.spec), deepcopy(self.info_space))
session_data = session.run()
session_datas.append(session_data)
if analysis.is_unfit(session_data, session):
break
return session_datas
def make_global_nets(self, agent):
global_nets = {}
for net_name in agent.algorithm.net_names:
g_net = getattr(agent.algorithm, net_name)
g_net.share_memory() # make net global
# TODO also create shared optimizer here
global_nets[net_name] = g_net
return global_nets
def init_global_nets(self):
session = self.SessionClass(deepcopy(self.spec), deepcopy(self.info_space))
if self.is_singleton:
session.env.close() # safety
global_nets = self.make_global_nets(session.agent)
else:
session.env_space.close() # safety
global_nets = [self.make_global_nets(agent) for agent in session.agent_space.agents]
return global_nets
def run_distributed_sessions(self):
logger.info('Running distributed sessions')
global_nets = self.init_global_nets()
session_datas = self.parallelize_sessions(global_nets)
return session_datas
def close(self):
logger.info('Trial done and closed.')
def run(self):
if self.spec['meta'].get('distributed'):
session_datas = self.run_distributed_sessions()
else:
session_datas = self.run_sessions()
self.session_data_dict = {data.index[0]: data for data in session_datas}
self.data = analysis.analyze_trial(self)
self.close()
return self.data
class Experiment:
'''
The core high level unit of Lab.
Given a spec-space/generator of cardinality t,
a number s,
a hyper-optimization algorithm hopt(spec, fitness-metric) -> spec_next/null
experiment creates and runs up to t trials of s sessions each to optimize (maximize) the fitness metric,
gather the trial data,
then return the experiment data for analysis and use in evolution graph.
Experiment data will include the trial data, notes on design, hypothesis, conclusion, analysis data, e.g. fitness metric, evolution link of ancestors to potential descendants.
An experiment then forms a node containing its data in the evolution graph with the evolution link and suggestion at the adjacent possible new experiments
On the evolution graph level, an experiment and its neighbors could be seen as test/development of traits.
'''
def __init__(self, spec, info_space):
self.spec = spec
self.info_space = info_space
self.index = self.info_space.get('experiment')
util.set_logger(self.spec, self.info_space, logger, 'trial')
self.trial_data_dict = {}
self.data = None
analysis.save_spec(spec, info_space, unit='experiment')
SearchClass = getattr(search, spec['meta'].get('search'))
self.search = SearchClass(self)
logger.info(f'Initialized experiment {self.index}')
def init_trial_and_run(self, spec, info_space):
'''
Method to run trial with the properly updated info_space (trial_index) from experiment.search.lab_trial.
'''
trial = Trial(spec, info_space)
trial_data = trial.run()
return trial_data
def close(self):
reload(search) # fixes ray consecutive run crashing due to bad cleanup
logger.info('Experiment done and closed.')
def run(self):
self.trial_data_dict = self.search.run()
self.data = analysis.analyze_experiment(self)
self.close()
return self.data
```
#### File: slm_lab/experiment/retro_analysis.py
```python
from slm_lab.experiment import analysis
from slm_lab.lib import logger, util
from slm_lab.spec import spec_util
import numpy as np
import os
import pydash as ps
import regex as re
logger = logger.get_logger(__name__)
def session_data_from_file(predir, trial_index, session_index, ckpt=None, prefix=''):
'''Build session.session_data from file'''
ckpt_str = '' if ckpt is None else f'_ckpt-{ckpt}'
for filename in os.listdir(predir):
if filename.endswith(f'_t{trial_index}_s{session_index}{ckpt_str}_{prefix}session_df.csv'):
filepath = f'{predir}/{filename}'
session_df = util.read(filepath, header=[0, 1, 2, 3], index_col=0)
session_data = util.session_df_to_data(session_df)
return session_data
def session_datas_from_file(predir, trial_spec, trial_index, ckpt=None):
'''Return a dict of {session_index: session_data} for a trial'''
session_datas = {}
for s in range(trial_spec['meta']['max_session']):
session_data = session_data_from_file(predir, trial_index, s, ckpt)
if session_data is not None:
session_datas[s] = session_data
return session_datas
def session_data_dict_from_file(predir, trial_index, ckpt=None):
'''Build trial.session_data_dict from file'''
ckpt_str = '' if ckpt is None else f'_ckpt-{ckpt}'
session_data_dict = {}
for filename in os.listdir(predir):
if f'_t{trial_index}_' in filename and filename.endswith(f'{ckpt_str}_session_fitness_df.csv'):
filepath = f'{predir}/{filename}'
fitness_df = util.read(filepath, header=[0, 1, 2, 3], index_col=0, dtype=np.float32)
util.fix_multi_index_dtype(fitness_df)
session_index = fitness_df.index[0]
session_data_dict[session_index] = fitness_df
return session_data_dict
def session_data_dict_for_dist(spec, info_space):
'''Method to retrieve session_datas (fitness df, so the same as session_data_dict above) when a trial with distributed sessions is done, to avoid messy multiprocessing data communication'''
prepath = util.get_prepath(spec, info_space)
predir, _, _, _, _, _ = util.prepath_split(prepath)
session_datas = session_data_dict_from_file(predir, info_space.get('trial'), ps.get(info_space, 'ckpt'))
session_datas = [session_datas[k] for k in sorted(session_datas.keys())]
return session_datas
def trial_data_dict_from_file(predir):
'''Build experiment.trial_data_dict from file'''
trial_data_dict = {}
for filename in os.listdir(predir):
if filename.endswith('_trial_data.json'):
filepath = f'{predir}/{filename}'
exp_trial_data = util.read(filepath)
trial_index = exp_trial_data.pop('trial_index')
trial_data_dict[trial_index] = exp_trial_data
return trial_data_dict
'''
Interface retro methods
'''
def analyze_eval_trial(spec, info_space, predir):
'''Create a trial and run analysis to get the trial graph and other trial data'''
from slm_lab.experiment.control import Trial
trial = Trial(spec, info_space)
trial.session_data_dict = session_data_dict_from_file(predir, trial.index, ps.get(info_space, 'ckpt'))
# don't zip for eval analysis, slow otherwise
analysis.analyze_trial(trial, zip=False)
def parallel_eval(spec, info_space, ckpt):
'''
Calls a subprocess to run lab in eval mode with the constructed ckpt prepath, same as how one would manually run the bash cmd
@example
python run_lab.py data/dqn_cartpole_2018_12_19_224811/dqn_cartpole_t0_spec.json dqn_cartpole eval@dqn_cartpole_t0_s1_ckpt-epi10-totalt1000
'''
prepath_t = util.get_prepath(spec, info_space, unit='trial')
prepath_s = util.get_prepath(spec, info_space, unit='session')
predir, _, prename, spec_name, _, _ = util.prepath_split(prepath_s)
cmd = f'python run_lab.py {prepath_t}_spec.json {spec_name} eval@{prename}_ckpt-{ckpt}'
logger.info(f'Running parallel eval for ckpt-{ckpt}')
return util.run_cmd(cmd)
def run_parallel_eval(session, agent, env):
'''Plugin to session to run parallel eval for train mode'''
if util.get_lab_mode() == 'train':
ckpt = f'epi{env.clock.epi}-totalt{env.clock.total_t}'
agent.save(ckpt=ckpt)
# set reference to eval process for handling
session.eval_proc = parallel_eval(session.spec, session.info_space, ckpt)
def try_wait_parallel_eval(session):
'''Plugin to wait for session's final parallel eval if any'''
if hasattr(session, 'eval_proc') and session.eval_proc is not None: # wait for final eval before closing
util.run_cmd_wait(session.eval_proc)
session_retro_eval(session) # rerun failed eval
def run_parallel_eval_from_prepath(prepath):
'''Used by retro_eval'''
spec, info_space = util.prepath_to_spec_info_space(prepath)
ckpt = util.find_ckpt(prepath)
return parallel_eval(spec, info_space, ckpt)
def run_wait_eval(prepath):
'''Used by retro_eval'''
eval_proc = run_parallel_eval_from_prepath(prepath)
util.run_cmd_wait(eval_proc)
def retro_analyze_sessions(predir):
'''Retro-analyze all session level datas.'''
logger.info('Retro-analyzing sessions from file')
from slm_lab.experiment.control import Session, SpaceSession
for filename in os.listdir(predir):
# to account for both types of session_df
if filename.endswith('_session_df.csv'):
body_df_kind = 'eval' # from body.eval_df
prefix = ''
is_session_df = True
elif filename.endswith('_trainsession_df.csv'):
body_df_kind = 'train' # from body.train_df
prefix = 'train'
is_session_df = True
else:
is_session_df = False
if is_session_df:
prepath = f'{predir}/{filename}'.replace(f'_{prefix}session_df.csv', '')
spec, info_space = util.prepath_to_spec_info_space(prepath)
trial_index, session_index = util.prepath_to_idxs(prepath)
SessionClass = Session if spec_util.is_singleton(spec) else SpaceSession
session = SessionClass(spec, info_space)
session_data = session_data_from_file(predir, trial_index, session_index, ps.get(info_space, 'ckpt'), prefix)
analysis._analyze_session(session, session_data, body_df_kind)
def retro_analyze_trials(predir):
'''Retro-analyze all trial level datas.'''
logger.info('Retro-analyzing trials from file')
from slm_lab.experiment.control import Trial
filenames = ps.filter_(os.listdir(predir), lambda filename: filename.endswith('_trial_df.csv'))
for idx, filename in enumerate(filenames):
filepath = f'{predir}/{filename}'
prepath = filepath.replace('_trial_df.csv', '')
spec, info_space = util.prepath_to_spec_info_space(prepath)
trial_index, _ = util.prepath_to_idxs(prepath)
trial = Trial(spec, info_space)
trial.session_data_dict = session_data_dict_from_file(predir, trial_index, ps.get(info_space, 'ckpt'))
# zip only at the last
zip = (idx == len(filenames) - 1)
trial_fitness_df = analysis.analyze_trial(trial, zip)
# write trial_data that was written from ray search
trial_data_filepath = filepath.replace('_trial_df.csv', '_trial_data.json')
if os.path.exists(trial_data_filepath):
fitness_vec = trial_fitness_df.iloc[0].to_dict()
fitness = analysis.calc_fitness(trial_fitness_df)
trial_data = util.read(trial_data_filepath)
trial_data.update({
**fitness_vec, 'fitness': fitness, 'trial_index': trial_index,
})
util.write(trial_data, trial_data_filepath)
def retro_analyze_experiment(predir):
'''Retro-analyze all experiment level datas.'''
logger.info('Retro-analyzing experiment from file')
from slm_lab.experiment.control import Experiment
_, _, _, spec_name, _, _ = util.prepath_split(predir)
prepath = f'{predir}/{spec_name}'
spec, info_space = util.prepath_to_spec_info_space(prepath)
if 'search' not in spec:
return
experiment = Experiment(spec, info_space)
experiment.trial_data_dict = trial_data_dict_from_file(predir)
if not ps.is_empty(experiment.trial_data_dict):
return analysis.analyze_experiment(experiment)
def retro_analyze(predir):
'''
Method to analyze experiment from file after it ran.
Read from files, constructs lab units, run retro analyses on all lab units.
This method has no side-effects, i.e. doesn't overwrite data it should not.
@example
yarn retro_analyze data/reinforce_cartpole_2018_01_22_211751
'''
os.environ['PREPATH'] = f'{predir}/retro_analyze' # to prevent overwriting log file
logger.info(f'Retro-analyzing {predir}')
retro_analyze_sessions(predir)
retro_analyze_trials(predir)
retro_analyze_experiment(predir)
def retro_eval(predir, session_index=None):
'''
Method to run eval sessions by scanning a predir for ckpt files. Used to rerun failed eval sessions.
@example
yarn retro_eval data/reinforce_cartpole_2018_01_22_211751
'''
logger.info(f'Retro-evaluate sessions from predir {predir}')
# collect all unique prepaths first
prepaths = []
s_filter = '' if session_index is None else f'_s{session_index}_'
for filename in os.listdir(predir):
if filename.endswith('model.pth') and s_filter in filename:
res = re.search('.+epi(\d+)-totalt(\d+)', filename)
if res is not None:
prepath = f'{predir}/{res[0]}'
if prepath not in prepaths:
prepaths.append(prepath)
if ps.is_empty(prepaths):
return
logger.info(f'Starting retro eval')
np.random.shuffle(prepaths) # so that CUDA_ID by trial/session index is spread out
rand_spec = util.prepath_to_spec(prepaths[0]) # get any prepath, read its max session
max_session = rand_spec['meta']['max_session']
util.parallelize_fn(run_wait_eval, prepaths, num_cpus=max_session)
def session_retro_eval(session):
'''retro_eval but for session at the end to rerun failed evals'''
prepath = util.get_prepath(session.spec, session.info_space, unit='session')
predir, _, _, _, _, _ = util.prepath_split(prepath)
retro_eval(predir, session.index)
```
#### File: SLM-Lab/test/conftest.py
```python
from slm_lab.agent import AgentSpace
from slm_lab.env import EnvSpace
from slm_lab.experiment.monitor import AEBSpace, InfoSpace
from slm_lab.lib import util
from slm_lab.spec import spec_util
from xvfbwrapper import Xvfb
import numpy as np
import pandas as pd
import pytest
spec = None
aeb_space = None
agent = None
env = None
@pytest.fixture(scope='session', autouse=True)
def test_xvfb():
'''provide xvfb in test environment'''
vdisplay = Xvfb()
try: # guard for multiprocessing dist test
vdisplay.start()
yield vdisplay
vdisplay.stop()
except Exception as e:
yield vdisplay
@pytest.fixture(scope='session')
def test_spec():
global spec
spec = spec_util.get('base.json', 'base_case_openai')
spec = spec_util.override_test_spec(spec)
return spec
@pytest.fixture(scope='session')
def test_info_space():
return InfoSpace()
@pytest.fixture(scope='session')
def test_aeb_space(test_spec):
global aeb_space
if aeb_space is None:
aeb_space = AEBSpace(test_spec, InfoSpace())
env_space = EnvSpace(test_spec, aeb_space)
aeb_space.init_body_space()
agent_space = AgentSpace(test_spec, aeb_space)
return aeb_space
@pytest.fixture(scope='session')
def test_agent(test_aeb_space):
agent = test_aeb_space.agent_space.agents[0]
return agent
@pytest.fixture(scope='session')
def test_env(test_aeb_space):
env = test_aeb_space.env_space.envs[0]
return env
@pytest.fixture
def test_df():
data = pd.DataFrame({
'integer': [1, 2, 3],
'square': [1, 4, 9],
'letter': ['a', 'b', 'c'],
})
assert isinstance(data, pd.DataFrame)
return data
@pytest.fixture
def test_dict():
data = {
'a': 1,
'b': 2,
'c': 3,
}
assert isinstance(data, dict)
return data
@pytest.fixture
def test_list():
data = [1, 2, 3]
assert isinstance(data, list)
return data
@pytest.fixture
def test_obj():
class Foo:
bar = 'bar'
return Foo()
@pytest.fixture
def test_str():
data = 'lorem ipsum dolor'
assert isinstance(data, str)
return data
@pytest.fixture(scope='session', params=[
(
2,
[
[np.asarray([1, 1, 1, 1]), 1, 1, np.asarray([2, 2, 2, 2]), 1],
[np.asarray([2, 2, 2, 2]), 1, 2, np.asarray([3, 3, 3, 3]), 2],
[np.asarray([3, 3, 3, 3]), 1, 3, np.asarray([4, 4, 4, 4]), 3],
[np.asarray([4, 4, 4, 4]), 1, 4, np.asarray([5, 5, 5, 5]), 4],
[np.asarray([5, 5, 5, 5]), 1, 5, np.asarray([6, 6, 6, 6]), 5],
[np.asarray([6, 6, 6, 6]), 1, 6, np.asarray([7, 7, 7, 7]), 6],
[np.asarray([7, 7, 7, 7]), 1, 7, np.asarray([8, 8, 8, 8]), 7],
[np.asarray([8, 8, 8, 8]), 1, 8, np.asarray([9, 9, 9, 9]), 8],
]
),
])
def test_memory(request):
memspec = spec_util.get('base.json', 'base_memory')
memspec = spec_util.override_test_spec(memspec)
aeb_mem_space = AEBSpace(memspec, InfoSpace())
env_space = EnvSpace(memspec, aeb_mem_space)
aeb_mem_space.init_body_space()
agent_space = AgentSpace(memspec, aeb_mem_space)
agent = agent_space.agents[0]
body = agent.nanflat_body_a[0]
res = (body.memory, ) + request.param
return res
@pytest.fixture(scope='session', params=[
(
2,
[
[np.asarray([1, 1, 1, 1]), 1, 1, np.asarray([2, 2, 2, 2]), 0],
[np.asarray([2, 2, 2, 2]), 1, 2, np.asarray([3, 3, 3, 3]), 0],
[np.asarray([3, 3, 3, 3]), 1, 3, np.asarray([4, 4, 4, 4]), 0],
[np.asarray([4, 4, 4, 4]), 1, 4, np.asarray([5, 5, 5, 5]), 0],
[np.asarray([5, 5, 5, 5]), 1, 5, np.asarray([6, 6, 6, 6]), 0],
[np.asarray([6, 6, 6, 6]), 1, 6, np.asarray([7, 7, 7, 7]), 0],
[np.asarray([7, 7, 7, 7]), 1, 7, np.asarray([8, 8, 8, 8]), 0],
[np.asarray([8, 8, 8, 8]), 1, 8, np.asarray([9, 9, 9, 9]), 1],
]
),
])
def test_on_policy_episodic_memory(request):
memspec = spec_util.get('base.json', 'base_on_policy_memory')
memspec = spec_util.override_test_spec(memspec)
aeb_mem_space = AEBSpace(memspec, InfoSpace())
env_space = EnvSpace(memspec, aeb_mem_space)
aeb_mem_space.init_body_space()
agent_space = AgentSpace(memspec, aeb_mem_space)
agent = agent_space.agents[0]
body = agent.nanflat_body_a[0]
res = (body.memory, ) + request.param
return res
@pytest.fixture(scope='session', params=[
(
4,
[
[np.asarray([1, 1, 1, 1]), 1, 1, np.asarray([2, 2, 2, 2]), 0],
[np.asarray([2, 2, 2, 2]), 1, 2, np.asarray([3, 3, 3, 3]), 0],
[np.asarray([3, 3, 3, 3]), 1, 3, np.asarray([4, 4, 4, 4]), 0],
[np.asarray([4, 4, 4, 4]), 1, 4, np.asarray([5, 5, 5, 5]), 0],
[np.asarray([5, 5, 5, 5]), 1, 5, np.asarray([6, 6, 6, 6]), 0],
[np.asarray([6, 6, 6, 6]), 1, 6, np.asarray([7, 7, 7, 7]), 0],
[np.asarray([7, 7, 7, 7]), 1, 7, np.asarray([8, 8, 8, 8]), 0],
[np.asarray([8, 8, 8, 8]), 1, 8, np.asarray([9, 9, 9, 9]), 1],
]
),
])
def test_on_policy_batch_memory(request):
memspec = spec_util.get('base.json', 'base_on_policy_batch_memory')
memspec = spec_util.override_test_spec(memspec)
aeb_mem_space = AEBSpace(memspec, InfoSpace())
env_space = EnvSpace(memspec, aeb_mem_space)
aeb_mem_space.init_body_space()
agent_space = AgentSpace(memspec, aeb_mem_space)
agent = agent_space.agents[0]
body = agent.nanflat_body_a[0]
res = (body.memory, ) + request.param
return res
@pytest.fixture(scope='session', params=[
(
4,
[
[np.asarray([1, 1, 1, 1]), 1, 1, np.asarray([2, 2, 2, 2]), 0, 1000],
[np.asarray([2, 2, 2, 2]), 1, 2, np.asarray([3, 3, 3, 3]), 0, 0],
[np.asarray([3, 3, 3, 3]), 1, 3, np.asarray([4, 4, 4, 4]), 0, 0],
[np.asarray([4, 4, 4, 4]), 1, 4, np.asarray([5, 5, 5, 5]), 0, 0],
[np.asarray([5, 5, 5, 5]), 1, 5, np.asarray([6, 6, 6, 6]), 0, 1000],
[np.asarray([6, 6, 6, 6]), 1, 6, np.asarray([7, 7, 7, 7]), 0, 0],
[np.asarray([7, 7, 7, 7]), 1, 7, np.asarray([8, 8, 8, 8]), 0, 0],
[np.asarray([8, 8, 8, 8]), 1, 8, np.asarray([9, 9, 9, 9]), 1, 1000],
]
),
])
def test_prioritized_replay_memory(request):
memspec = spec_util.get('base.json', 'base_prioritized_replay_memory')
memspec = spec_util.override_test_spec(memspec)
aeb_mem_space = AEBSpace(memspec, InfoSpace())
env_space = EnvSpace(memspec, aeb_mem_space)
aeb_mem_space.init_body_space()
agent_space = AgentSpace(memspec, aeb_mem_space)
agent = agent_space.agents[0]
body = agent.nanflat_body_a[0]
res = (body.memory, ) + request.param
return res
``` |
{
"source": "jlaasociados/Django-Api-Rest-Boilerplate",
"score": 3
} |
#### File: server/general/tests.py
```python
from django.test import TestCase
from general.models import SpaUser
# models test
class SpaUserTest(TestCase):
def create_SpaUser(self, email="<EMAIL>", password="<PASSWORD>"):
return SpaUser.objects.create(email=email, password=password)
def test_SpaUser_creation(self):
miusuario = self.create_SpaUser()
self.assertTrue(isinstance(miusuario, SpaUser))
self.assertEqual(miusuario.__str__(), miusuario.email)
``` |
{
"source": "jlabounty/wordle-solver",
"score": 3
} |
#### File: jlabounty/wordle-solver/interface.py
```python
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from solver import *
from selenium.webdriver.common.by import By
import time
class WordlePlayer():
def __init__(self, verbose=False) -> None:
self.verbose=verbose
def setUp(self):
# set up the web driver to open the wordle site
self.driver = webdriver.Firefox()
def play(self):
# open the site and click out of the how-to
driver = self.driver
driver.get("https://www.powerlanguage.co.uk/wordle/")
time.sleep(1)
Elem = driver.find_element_by_tag_name('html')
Elem.click()
time.sleep(1)
# get the relevent objects to be able to parse our guesses
game_app = driver.find_element_by_tag_name('game-app')
board = driver.execute_script("return arguments[0].shadowRoot.getElementById('board')", game_app)
game_rows = board.find_elements_by_tag_name('game-row')
rows = [driver.execute_script("return arguments[0].shadowRoot.children[1]", x) for x in game_rows]
# print('rows:', rows)
solver = WordleSolver(verbose=self.verbose)
for guess in range(6):
this_guess = solver.guess()
if(self.verbose):
print(f"Starting guess {guess} -> {this_guess}")
Elem.send_keys(this_guess)
Elem.send_keys(Keys.ENTER)
time.sleep(2)
result = self.parse_row(rows[guess])
if(self.verbose):
print(' -> parsed with result:', result)
if('.' not in result[0]):
print(f"We did it in {guess+1} guesses! The word is:", result[0])
break
#update the word list with this information and prepate the next guess
solver.parse_guess(*result)
# input("so?")
def parse_row(self, row):
'''using the html from the tile objects, see how our guess did'''
html = row.get_attribute('innerHTML')
exact = ''
absent = ''
wrong_positon = {}
for i,tile in enumerate(html.split("</game-tile>")[:-1]):
# print(tile)
letter = tile.split('letter="')[1].split('"')[0]
evaluation = tile.split('evaluation="')[1].split('"')[0]
# print(letter, evaluation)
if(evaluation=='absent'):
absent += letter
exact += '.'
elif(evaluation=='present'):
wrong_positon[letter] = [i]
exact += '.'
elif(evaluation=='correct'):
exact += letter
for x in exact:
if(x in absent):
absent = absent.replace(x,'')
return exact, wrong_positon, absent
def tearDown(self):
self.driver.close()
def main():
player = WordlePlayer(verbose=True)
player.setUp()
player.play()
input("What do you think?")
player.tearDown()
print("See you tomorrow!")
if __name__ == "__main__":
main()
``` |
{
"source": "jlab/qiita",
"score": 2
} |
#### File: qiita_db/handlers/prep_template.py
```python
from json import loads
from os.path import basename
from tornado.web import HTTPError
import pandas as pd
import qiita_db as qdb
from .oauth2 import OauthBaseHandler, authenticate_oauth
def _get_prep_template(pid):
"""Returns the prep template with the given `pid` if it exists
Parameters
----------
pid : str
The prep template id
Returns
-------
qiita_db.metadata_template.prep_template.PrepTemplate
The requested prep template
Raises
------
HTTPError
If the prep template does not exist, with error code 404
If there is a problem instantiating the template, with error code 500
"""
try:
pid = int(pid)
pt = qdb.metadata_template.prep_template.PrepTemplate(pid)
except qdb.exceptions.QiitaDBUnknownIDError:
raise HTTPError(404)
except Exception as e:
raise HTTPError(500, reason='Error instantiating prep template %s: %s'
% (pid, str(e)))
return pt
class PrepTemplateDBHandler(OauthBaseHandler):
@authenticate_oauth
def get(self, prep_id):
"""Retrieves the prep template information
Parameters
----------
prep_id: str
The id of the prep template whose information is being retrieved
Returns
-------
dict
The prep information:
'data_type': prep info data type
'artifact': artifact attached to the given prep
'investigation_type': prep info investigation type
'study': study that the prep info belongs to
'status': prep info status
'sample-file': the path to the sample information file
'prep-file': the path to the prep info file
"""
with qdb.sql_connection.TRN:
pt = _get_prep_template(prep_id)
prep_files = [fp for _, fp in pt.get_filepaths()
if 'qiime' not in basename(fp)]
artifact = pt.artifact.id if pt.artifact is not None else None
sid = pt.study_id
response = {
'data_type': pt.data_type(),
'artifact': artifact,
'investigation_type': pt.investigation_type,
'study': sid,
'status': pt.status,
# get_filepaths returns an ordered list of [filepath_id,
# filepath] and we want the last pair
'sample-file': qdb.study.Study(
sid).sample_template.get_filepaths()[0][1],
# The first element in the prep_files is the newest
# prep information file - hence the correct one
'prep-file': prep_files[0]
}
self.write(response)
class PrepTemplateDataHandler(OauthBaseHandler):
@authenticate_oauth
def get(self, prep_id):
"""Retrieves the prep contents
Parameters
----------
prep_id : str
The id of the prep template whose information is being retrieved
Returns
-------
dict
The contents of the prep information keyed by sample id
"""
with qdb.sql_connection.TRN:
pt = _get_prep_template(prep_id)
response = {'data': pt.to_dataframe().to_dict(orient='index')}
self.write(response)
class PrepTemplateAPItestHandler(OauthBaseHandler):
@authenticate_oauth
def post(self):
prep_info_dict = loads(self.get_argument('prep_info'))
study = self.get_argument('study')
data_type = self.get_argument('data_type')
metadata = pd.DataFrame.from_dict(prep_info_dict, orient='index')
pt = qdb.metadata_template.prep_template.PrepTemplate.create(
metadata, qdb.study.Study(study), data_type)
self.write({'prep': pt.id})
```
#### File: qiita_db/test/test_analysis.py
```python
from unittest import TestCase, main
from os import remove
from os.path import exists, join, basename
from shutil import move
from biom import load_table
from pandas.util.testing import assert_frame_equal
from functools import partial
from qiita_core.util import qiita_test_checker
from qiita_core.testing import wait_for_processing_job
from qiita_core.qiita_settings import qiita_config
import qiita_db as qdb
from json import dumps
# -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
@qiita_test_checker()
class TestAnalysis(TestCase):
def setUp(self):
self.analysis = qdb.analysis.Analysis(1)
self.portal = qiita_config.portal
_, self.fp = qdb.util.get_mountpoint("analysis")[0]
self.get_fp = partial(join, self.fp)
self.biom_fp = self.get_fp("1_analysis_dt-18S_r-1_c-3.biom")
self._old_portal = qiita_config.portal
self.table_fp = None
# fullpaths for testing
self.duplicated_samples_not_merged = self.get_fp(
"not_merged_samples.txt")
self.map_exp_fp = self.get_fp("1_analysis_mapping_exp.txt")
from glob import glob
conf_files = glob(join(qiita_config.plugin_dir, "BIOM*.conf"))
for i, fp in enumerate(conf_files):
qdb.software.Software.from_file(fp, update=True)
def tearDown(self):
self.analysis.artifacts[0].visibility = 'private'
qiita_config.portal = self.portal
with open(self.biom_fp, 'w') as f:
f.write("")
fp = self.get_fp('testfile.txt')
if exists(fp):
remove(fp)
if self.table_fp:
mp = qdb.util.get_mountpoint("processed_data")[0][1]
if exists(self.table_fp):
move(self.table_fp,
join(mp, "2_study_1001_closed_reference_otu_table.biom"))
qiita_config.portal = self._old_portal
def _wait_for_jobs(self, analysis):
for j in analysis.jobs:
wait_for_processing_job(j.id)
if j.status == 'error':
print(j.log.msg)
def _create_analyses_with_samples(self, user='<EMAIL>',
merge=False):
"""Aux function to create an analysis with samples
Parameters
----------
user : qiita_db.user.User, optional
The user email to attach to the analysis. Default: <EMAIL>
merge : bool, optional
Merge duplicated ids or not
Returns
-------
qiita_db.analysis.Analysis
Notes
-----
Replicates the samples contained in Analysis(1) at the moment of
creation of this function (September 15, 2016)
"""
user = qdb.user.User(user)
dflt_analysis = user.default_analysis
dflt_analysis.add_samples(
{4: ['1.SKB8.640193', '1.SKD8.640184', '1.SKB7.640196',
'1.SKM9.640192', '1.SKM4.640180']})
new = qdb.analysis.Analysis.create(
user, "newAnalysis", "A New Analysis", from_default=True,
merge_duplicated_sample_ids=merge)
self._wait_for_jobs(new)
return new
def test_lock_samples(self):
dflt = qdb.user.User('<EMAIL>').default_analysis
# The default analysis can have samples added/removed
dflt._lock_samples()
QE = qdb.exceptions
with self.assertRaises(QE.QiitaDBOperationNotPermittedError):
qdb.analysis.Analysis(1)._lock_samples()
def test_get_by_status(self):
qiita_config.portal = 'QIITA'
self.assertEqual(
qdb.analysis.Analysis.get_by_status('public'), set([]))
qiita_config.portal = 'EMP'
self.assertEqual(
qdb.analysis.Analysis.get_by_status('public'), set([]))
qiita_config.portal = 'QIITA'
self.analysis.artifacts[0].visibility = 'public'
self.assertEqual(qdb.analysis.Analysis.get_by_status('public'),
{self.analysis})
qiita_config.portal = 'EMP'
self.assertEqual(
qdb.analysis.Analysis.get_by_status('public'), set([]))
def test_can_be_publicized(self):
analysis = qdb.analysis.Analysis(1)
self.assertEqual(analysis.can_be_publicized, (False, [4, 5, 6]))
a4 = qdb.artifact.Artifact(4)
a4.visibility = 'public'
self.assertEqual(analysis.can_be_publicized, (True, []))
a4.visibility = 'private'
self.assertEqual(analysis.can_be_publicized, (False, [4, 5, 6]))
def test_add_artifact(self):
obs = self._create_analyses_with_samples()
exp = qdb.artifact.Artifact(4)
obs.add_artifact(exp)
self.assertIn(exp, obs.artifacts)
def test_has_access_public(self):
analysis = self._create_analyses_with_samples("<EMAIL>")
analysis.artifacts[0].visibility = 'public'
qiita_config.portal = 'QIITA'
self.assertTrue(
analysis.has_access(qdb.user.User("<EMAIL>")))
qiita_config.portal = 'EMP'
self.assertFalse(
analysis.has_access(qdb.user.User("<EMAIL>")))
def test_has_access_shared(self):
self.assertTrue(
self.analysis.has_access(qdb.user.User("<EMAIL>")))
def test_has_access_private(self):
self.assertTrue(
self.analysis.has_access(qdb.user.User("<EMAIL>")))
def test_has_access_admin(self):
qiita_config.portal = 'QIITA'
self.assertTrue(
self.analysis.has_access(qdb.user.User("<EMAIL>")))
qiita_config.portal = 'EMP'
with self.assertRaises(qdb.exceptions.QiitaDBError):
qdb.analysis.Analysis(1).has_access(qdb.user.User("<EMAIL>"))
def test_has_access_no_access(self):
self.assertFalse(
self.analysis.has_access(qdb.user.User("<EMAIL>")))
def test_can_edit(self):
a = qdb.analysis.Analysis(1)
self.assertTrue(a.can_edit(qdb.user.User('<EMAIL>')))
self.assertTrue(a.can_edit(qdb.user.User('<EMAIL>')))
self.assertTrue(a.can_edit(qdb.user.User('<EMAIL>')))
self.assertFalse(a.can_edit(qdb.user.User('<EMAIL>')))
def test_create_nonqiita_portal(self):
qiita_config.portal = "EMP"
obs = qdb.analysis.Analysis.create(
qdb.user.User("<EMAIL>"), "newAnalysis", "A New Analysis")
# make sure portal is associated
self.assertCountEqual(obs._portals, ["QIITA", "EMP"])
def test_create_from_default(self):
with qdb.sql_connection.TRN:
sql = "SELECT NOW()"
qdb.sql_connection.TRN.add(sql)
time1 = qdb.sql_connection.TRN.execute_fetchlast()
owner = qdb.user.User("<EMAIL>")
obs = qdb.analysis.Analysis.create(
owner, "newAnalysis", "A New Analysis", from_default=True)
self.assertEqual(obs.owner, owner)
self.assertEqual(obs.name, "newAnalysis")
self.assertEqual(obs._portals, ["QIITA"])
self.assertLess(time1, obs.timestamp)
self.assertEqual(obs.description, "A New Analysis")
self.assertCountEqual(obs.samples, [4])
self.assertCountEqual(
obs.samples[4], ['1.SKD8.640184', '1.SKB7.640196',
'1.SKM9.640192', '1.SKM4.640180'])
self.assertEqual(obs.data_types, ['18S'])
self.assertEqual(obs.shared_with, [])
self.assertEqual(obs.mapping_file, None)
self.assertEqual(obs.tgz, None)
self.assertNotEqual(obs.jobs, [])
self.assertEqual(obs.pmid, None)
def test_exists(self):
qiita_config.portal = 'QIITA'
self.assertTrue(qdb.analysis.Analysis.exists(1))
self.assertFalse(qdb.analysis.Analysis.exists(1000))
qiita_config.portal = 'EMP'
self.assertFalse(qdb.analysis.Analysis.exists(1))
self.assertFalse(qdb.analysis.Analysis.exists(1000))
def test_delete(self):
# successful delete
new = qdb.analysis.Analysis.create(
qdb.user.User('<EMAIL>'), "newAnalysis",
"A New Analysis")
self.assertTrue(qdb.analysis.Analysis.exists(new.id))
qdb.analysis.Analysis.delete(new.id)
self.assertFalse(qdb.analysis.Analysis.exists(new.id))
# no possible to delete
QE = qdb.exceptions
with self.assertRaises(QE.QiitaDBUnknownIDError):
qdb.analysis.Analysis.delete(new.id)
# Analysis with artifacts
with self.assertRaises(QE.QiitaDBOperationNotPermittedError):
qdb.analysis.Analysis.delete(1)
def test_retrieve_owner(self):
self.assertEqual(self.analysis.owner, qdb.user.User("<EMAIL>"))
def test_retrieve_name(self):
self.assertEqual(self.analysis.name, "SomeAnalysis")
def test_retrieve_description(self):
self.assertEqual(self.analysis.description, "A test analysis")
def test_set_description(self):
self.analysis.description = "New description"
self.assertEqual(self.analysis.description, "New description")
def test_retrieve_samples(self):
exp = {4: ['1.SKB8.640193', '1.SKD8.640184', '1.SKB7.640196',
'1.SKM9.640192', '1.SKM4.640180'],
5: ['1.SKB8.640193', '1.SKD8.640184', '1.SKB7.640196',
'1.SKM9.640192', '1.SKM4.640180'],
6: ['1.SKB8.640193', '1.SKD8.640184', '1.SKB7.640196',
'1.SKM9.640192', '1.SKM4.640180']}
self.assertCountEqual(self.analysis.samples, exp)
def test_retrieve_portal(self):
self.assertEqual(self.analysis._portals, ["QIITA"])
def test_retrieve_data_types(self):
exp = ['18S', '16S']
self.assertCountEqual(self.analysis.data_types, exp)
def test_retrieve_shared_with(self):
self.assertEqual(self.analysis.shared_with,
[qdb.user.User("<EMAIL>")])
def test_retrieve_jobs(self):
self.assertEqual(self.analysis.jobs, [])
def test_retrieve_pmid(self):
self.assertEqual(self.analysis.pmid, "121112")
def test_set_pmid(self):
new = self._create_analyses_with_samples("<EMAIL>")
self.assertIsNone(new.pmid)
new.pmid = "11211221212213"
self.assertEqual(new.pmid, "11211221212213")
def test_retrieve_mapping_file(self):
exp = join(self.fp, "1_analysis_mapping.txt")
obs = self.analysis.mapping_file
self.assertIsNotNone(obs)
self.assertEqual(
qdb.util.get_filepath_information(obs)['fullpath'], exp)
self.assertTrue(exists(exp))
def test_retrieve_tgz(self):
# generating here as the tgz is only generated once the analysis runs
# to completion (un)successfully
analysis = self._create_analyses_with_samples("<EMAIL>")
fp = self.get_fp('test.tgz')
with open(fp, 'w') as f:
f.write('')
analysis._add_file(fp, 'tgz')
self.assertEqual(analysis.tgz, fp)
def test_retrieve_tgz_none(self):
self.assertIsNone(self.analysis.tgz)
def test_summary_data(self):
obs = self.analysis.summary_data()
exp = {'studies': 1,
'artifacts': 3,
'samples': 5}
self.assertEqual(obs, exp)
def test_add_remove_samples(self):
analysis = qdb.user.User('<EMAIL>').default_analysis
exp = {4: ['1.SKD8.640184', '1.SKB7.640196', '1.SKM9.640192',
'1.SKM4.640180', '1.SKB8.640193'],
5: ['1.SKD8.640184', '1.SKB7.640196', '1.SKM9.640192',
'1.SKM4.640180', '1.SKB8.640193'],
6: ['1.SKD8.640184', '1.SKB7.640196', '1.SKM9.640192',
'1.SKM4.640180', '1.SKB8.640193']}
analysis.add_samples(exp)
obs = analysis.samples
self.assertCountEqual(list(obs.keys()), exp.keys())
for k in obs:
self.assertCountEqual(obs[k], exp[k])
analysis.remove_samples(artifacts=(qdb.artifact.Artifact(4), ),
samples=('1.SKB8.640193', ))
exp = {4: ['1.SKD8.640184', '1.SKB7.640196', '1.SKM9.640192',
'1.SKM4.640180'],
5: ['1.SKD8.640184', '1.SKB7.640196', '1.SKM9.640192',
'1.SKM4.640180', '1.SKB8.640193'],
6: ['1.SKD8.640184', '1.SKB7.640196', '1.SKM9.640192',
'1.SKM4.640180', '1.SKB8.640193']}
obs = analysis.samples
self.assertCountEqual(list(obs.keys()), exp.keys())
for k in obs:
self.assertCountEqual(obs[k], exp[k])
analysis.remove_samples(samples=('1.SKD8.640184', ))
exp = {4: ['1.SKB7.640196', '1.SKM9.640192', '1.SKM4.640180'],
5: ['1.SKB8.640193', '1.SKB7.640196', '1.SKM9.640192',
'1.SKM4.640180'],
6: ['1.SKB8.640193', '1.SKB7.640196', '1.SKM9.640192',
'1.SKM4.640180']}
self.assertCountEqual(analysis.samples, exp)
analysis.remove_samples(
artifacts=(qdb.artifact.Artifact(4), qdb.artifact.Artifact(5)))
exp = {6: {'1.SKB7.640196', '1.SKB8.640193',
'1.SKM4.640180', '1.SKM9.640192'}}
self.assertCountEqual(analysis.samples, exp)
def test_share_unshare(self):
analysis = self._create_analyses_with_samples()
user = qdb.user.User("<EMAIL>")
self.assertEqual(analysis.shared_with, [])
analysis.share(user)
exp = [user]
self.assertEqual(analysis.shared_with, exp)
analysis.unshare(user)
self.assertEqual(analysis.shared_with, [])
def test_build_mapping_file(self):
analysis = self._create_analyses_with_samples()
samples = {4: ['1.SKB8.640193', '1.SKD8.640184', '1.SKB7.640196']}
analysis._build_mapping_file(samples)
obs = qdb.util.get_filepath_information(
analysis.mapping_file)['fullpath']
exp = self.get_fp("%s_analysis_mapping.txt" % analysis.id)
self.assertEqual(obs, exp)
obs = qdb.metadata_template.util.load_template_to_dataframe(
obs, index='#SampleID')
exp = qdb.metadata_template.util.load_template_to_dataframe(
self.map_exp_fp, index='#SampleID')
# assert_frame_equal assumes same order on the rows, thus sorting
# frames by index
obs.sort_index(inplace=True)
exp.sort_index(inplace=True)
# then sorting columns
obs = obs.reindex(sorted(obs.columns), axis=1)
exp = exp.reindex(sorted(exp.columns), axis=1)
assert_frame_equal(obs, exp, check_like=True)
def test_build_mapping_file_duplicated_samples_no_merge(self):
analysis = self._create_analyses_with_samples()
samples = {4: ['1.SKB8.640193', '1.SKD8.640184', '1.SKB7.640196'],
3: ['1.SKB8.640193', '1.SKD8.640184', '1.SKB7.640196']}
analysis._build_mapping_file(samples, True)
mapping_fp = qdb.util.get_filepath_information(
analysis.mapping_file)['fullpath']
obs = qdb.metadata_template.util.load_template_to_dataframe(
mapping_fp, index='#SampleID')
exp = qdb.metadata_template.util.load_template_to_dataframe(
self.duplicated_samples_not_merged, index='#SampleID')
# assert_frame_equal assumes same order on the rows, thus sorting
# frames by index
obs.sort_index(inplace=True)
exp.sort_index(inplace=True)
# then sorting columns
obs = obs.reindex(sorted(obs.columns), axis=1)
exp = exp.reindex(sorted(exp.columns), axis=1)
assert_frame_equal(obs, exp, check_like=True)
def test_build_mapping_file_duplicated_samples_merge(self):
analysis = self._create_analyses_with_samples()
samples = {4: ['1.SKB8.640193', '1.SKD8.640184', '1.SKB7.640196'],
3: ['1.SKB8.640193', '1.SKD8.640184', '1.SKB7.640196']}
analysis._build_mapping_file(samples)
mapping_fp = qdb.util.get_filepath_information(
analysis.mapping_file)['fullpath']
obs = qdb.metadata_template.util.load_template_to_dataframe(
mapping_fp, index='#SampleID')
exp = qdb.metadata_template.util.load_template_to_dataframe(
self.map_exp_fp, index='#SampleID')
# assert_frame_equal assumes same order on the rows, thus sorting
# frames by index
obs.sort_index(inplace=True)
exp.sort_index(inplace=True)
# then sorting columns
obs = obs.reindex(sorted(obs.columns), axis=1)
exp = exp.reindex(sorted(exp.columns), axis=1)
assert_frame_equal(obs, exp, check_like=True)
def test_build_biom_tables(self):
analysis = self._create_analyses_with_samples()
grouped_samples = {
'18S || algorithm': [
(4, ['1.SKB8.640193', '1.SKD8.640184', '1.SKB7.640196'])]}
obs_bioms = analysis._build_biom_tables(grouped_samples)
biom_fp = self.get_fp(
"%s_analysis_18S_algorithm.biom" % analysis.id)
obs = [(a, basename(b)) for a, b, _ in obs_bioms]
self.assertEqual(obs, [('18S', basename(biom_fp))])
table = load_table(obs_bioms[0][1])
obs = set(table.ids(axis='sample'))
exp = {'1.SKB8.640193', '1.SKD8.640184', '1.SKB7.640196'}
self.assertEqual(obs, exp)
def test_build_biom_tables_with_references(self):
analysis = self._create_analyses_with_samples()
analysis_id = analysis.id
grouped_samples = {
('18S || Pick closed-reference OTUs (reference: 1) | '
'Split libraries FASTQ'): [
(4, ['1.SKB8.640193', '1.SKD8.640184', '1.SKB7.640196']),
(5, ['1.SKB8.640193', '1.SKD8.640184', '1.SKB7.640196'])],
('18S || Pick closed-reference OTUs (reference: 1) | '
'Trim (lenght: 150)'): [
(4, ['1.SKB8.640193', '1.SKD8.640184', '1.SKB7.640196']),
(5, ['1.SKB8.640193', '1.SKD8.640184', '1.SKB7.640196'])],
('16S || Pick closed-reference OTUs (reference: 2) | '
'Trim (lenght: 100)'): [
(4, ['1.SKB8.640193', '1.SKD8.640184', '1.SKB7.640196']),
(5, ['1.SKB8.640193', '1.SKD8.640184', '1.SKB7.640196'])]}
obs_bioms = analysis._build_biom_tables(grouped_samples)
obs = [(a, basename(b)) for a, b, _ in obs_bioms]
exp = [
('16S', '%s_analysis_16S_PickclosedreferenceOTUsreference2'
'Trimlenght100.biom' % analysis_id),
('18S', '%s_analysis_18S_PickclosedreferenceOTUsreference1'
'SplitlibrariesFASTQ.biom' % analysis_id),
('18S', '%s_analysis_18S_PickclosedreferenceOTUsreference1'
'Trimlenght150.biom' % analysis_id)]
self.assertCountEqual(obs, exp)
exp = {'1.SKB8.640193', '1.SKD8.640184', '1.SKB7.640196'}
for dt, fp, _ in obs_bioms:
table = load_table(fp)
obs = set(table.ids(axis='sample'))
self.assertEqual(obs, exp)
def test_build_biom_tables_duplicated_samples_not_merge(self):
analysis = self._create_analyses_with_samples()
grouped_samples = {
'18S || algorithm': [
(4, ['1.SKB8.640193', '1.SKD8.640184', '1.SKB7.640196']),
(5, ['1.SKB8.640193', '1.SKD8.640184', '1.SKB7.640196'])]}
obs_bioms = analysis._build_biom_tables(grouped_samples, True)
obs = [(a, basename(b)) for a, b, _ in obs_bioms]
biom_fp = (
"%s_analysis_18S_algorithm.biom" % analysis.id)
self.assertEqual(obs, [('18S', biom_fp)])
table = load_table(obs_bioms[0][1])
obs = set(table.ids(axis='sample'))
exp = {'4.1.SKD8.640184', '4.1.SKB7.640196', '4.1.SKB8.640193',
'5.1.SKB8.640193', '5.1.SKB7.640196', '5.1.SKD8.640184'}
self.assertCountEqual(obs, exp)
def test_build_biom_tables_raise_error_due_to_sample_selection(self):
grouped_samples = {
'18S || algorithm': [
(4, ['sample_name_1', 'sample_name_2', 'sample_name_3'])]}
with self.assertRaises(RuntimeError):
self.analysis._build_biom_tables(grouped_samples)
def test_build_files(self):
analysis = self._create_analyses_with_samples()
biom_tables = analysis.build_files(True)
# testing that the generated files have the same sample ids
biom_fp = biom_tables[0][1]
biom_ids = load_table(biom_fp).ids(axis='sample')
mapping_fp = qdb.util.get_filepath_information(
analysis.mapping_file)['fullpath']
mf_ids = qdb.metadata_template.util.load_template_to_dataframe(
mapping_fp, index='#SampleID').index
self.assertCountEqual(biom_ids, mf_ids)
# now that the samples have been prefixed
exp = ['1.SKM9.640192', '1.SKM4.640180', '1.SKD8.640184',
'1.SKB8.640193', '1.SKB7.640196']
self.assertCountEqual(biom_ids, exp)
def test_build_files_post_processing_cmd(self):
tmp = qdb.artifact.Artifact(4).processing_parameters.command
cmd_id = tmp.id
# set a known artifact's additional processing command
# to a known value. Then test for it.
# qiita_db/test/support_files/worker.py will work w/py2.7 & 3.6 envs.
results = {}
results['script_env'] = 'source deactivate; source activate qiita;'
results['script_path'] = 'qiita_db/test/support_files/worker.py'
# no additional parameters are needed for worker.py
# fp_biom and fp_archive will be generated by build_files()
results['script_params'] = {}
# convert to json representation and store in PostgreSQL
results = dumps(results)
sql = """UPDATE qiita.software_command
SET post_processing_cmd = %s
WHERE command_id = %s"""
qdb.sql_connection.perform_as_transaction(sql, [results, cmd_id])
# create a sample analysis and run build_files on it.
analysis = self._create_analyses_with_samples()
biom_files = analysis.build_files(False)
# if build_files used additional processing commands, it will
# return a couple of tuples, where the third element contains
# output archive-artifact data.
self.assertEqual(2, len(biom_files))
aid = analysis.id
exp = [('%d_analysis_18S_PickclosedreferenceOTUsSplitlibraries'
'FASTQ.biom' % aid, None),
('%d_analysis_18S_PickclosedreferenceOTUsSplitlibraries'
'FASTQ.biom' % aid, 'archive_%d.tre' % aid)]
obs = [(basename(fp1),
basename(fp2) if fp2 is not None else None)
for _, fp1, fp2 in biom_files]
self.assertEqual(obs, exp)
# cleanup (assume command was NULL previously)
sql = """UPDATE qiita.software_command
SET post_processing_cmd = NULL
WHERE command_id = %s"""
qdb.sql_connection.perform_as_transaction(sql, [cmd_id])
def test_build_files_merge_duplicated_sample_ids(self):
user = qdb.user.User("<EMAIL>")
dflt_analysis = user.default_analysis
dflt_analysis.add_samples(
{4: ['1.SKB8.640193', '1.SKD8.640184', '1.SKB7.640196',
'1.SKM9.640192', '1.SKM4.640180'],
5: ['1.SKB8.640193', '1.SKB7.640196', '1.SKM9.640192',
'1.SKM4.640180', '1.SKD8.640184'],
6: ['1.SKB8.640193', '1.SKD8.640184', '1.SKB7.640196',
'1.SKM9.640192', '1.SKM4.640180']})
new = qdb.analysis.Analysis.create(
user, "newAnalysis", "A New Analysis", from_default=True,
merge_duplicated_sample_ids=True)
self._wait_for_jobs(new)
biom_tables = new.build_files(False)
# testing that the generated files have the same sample ids
biom_ids = []
for _, fp, _ in biom_tables:
biom_ids.extend(load_table(fp).ids(axis='sample'))
mapping_fp = qdb.util.get_filepath_information(
new.mapping_file)['fullpath']
mf_ids = qdb.metadata_template.util.load_template_to_dataframe(
mapping_fp, index='#SampleID').index
self.assertCountEqual(biom_ids, mf_ids)
# now that the samples have been prefixed
exp = ['4.1.SKM9.640192', '4.1.SKM4.640180', '4.1.SKD8.640184',
'4.1.SKB8.640193', '4.1.SKB7.640196',
'5.1.SKM9.640192', '5.1.SKM4.640180', '5.1.SKD8.640184',
'5.1.SKB8.640193', '5.1.SKB7.640196',
'6.1.SKM9.640192', '6.1.SKM4.640180', '6.1.SKD8.640184',
'6.1.SKB8.640193', '6.1.SKB7.640196']
self.assertCountEqual(biom_ids, exp)
def test_add_file(self):
# Tested indirectly through build_files
pass
def test_is_public_make_public(self):
analysis = self._create_analyses_with_samples()
self.assertFalse(analysis.is_public)
# testing errors
with self.assertRaises(ValueError):
analysis.make_public()
# testing successfully making public
# 4 is the only artifact being used in _create_analyses_with_samples
qdb.artifact.Artifact(4).visibility = 'public'
analysis.make_public()
self.assertTrue(analysis.is_public)
if __name__ == "__main__":
main()
```
#### File: api_proxy/tests/test_processing.py
```python
from unittest import TestCase, main
from json import dumps
from qiita_core.util import qiita_test_checker
from qiita_db.processing_job import ProcessingWorkflow, ProcessingJob
from qiita_db.software import Command, Parameters
from qiita_db.user import User
from qiita_pet.handlers.api_proxy.processing import (
list_commands_handler_get_req, list_options_handler_get_req,
workflow_handler_post_req, workflow_handler_patch_req, job_ajax_get_req,
job_ajax_patch_req)
class TestProcessingAPIReadOnly(TestCase):
def test_list_commands_handler_get_req(self):
obs = list_commands_handler_get_req('FASTQ', True)
exp = {'status': 'success',
'message': '',
'commands': [{'id': 1, 'command': 'Split libraries FASTQ',
'output': [['demultiplexed', 'Demultiplexed']]}]}
self.assertEqual(obs, exp)
obs = list_commands_handler_get_req('Demultiplexed', True)
exp = {'status': 'success',
'message': '',
'commands': [{'id': 3, 'command': 'Pick closed-reference OTUs',
'output': [['OTU table', 'BIOM']]}]}
self.assertEqual(obs, exp)
obs = list_commands_handler_get_req('BIOM', False)
exp = {'status': 'success',
'message': '',
'commands': [
{'command': 'Summarize Taxa', 'id': 9,
'output': [['taxa_summary', 'taxa_summary']]},
{'command': 'Beta Diversity', 'id': 10,
'output': [['distance_matrix', 'beta_div_plots']]},
{'command': 'Alpha Rarefaction', 'id': 11,
'output': [['rarefaction_curves', 'rarefaction_curves']]},
{'command': 'Single Rarefaction', 'id': 12,
'output': [['rarefied_table', 'BIOM']]}]}
# since the order of the commands can change, test them separately
self.assertCountEqual(obs.pop('commands'), exp.pop('commands'))
self.assertEqual(obs, exp)
def test_list_options_handler_get_req(self):
obs = list_options_handler_get_req(3)
exp = {'status': 'success',
'message': '',
'options': [{'id': 10,
'name': 'Defaults',
'values': {'reference': 1,
'similarity': 0.97,
'sortmerna_coverage': 0.97,
'sortmerna_e_value': 1,
'sortmerna_max_pos': 10000,
'threads': 1}}],
'req_options': {'input_data': ('artifact', ['Demultiplexed'])},
'opt_options': {'reference': ['reference', '1'],
'similarity': ['float', '0.97'],
'sortmerna_coverage': ['float', '0.97'],
'sortmerna_e_value': ['float', '1'],
'sortmerna_max_pos': ['integer', '10000'],
'threads': ['integer', '1']}}
# First check that the keys are the same
self.assertCountEqual(obs, exp)
self.assertEqual(obs['status'], exp['status'])
self.assertEqual(obs['message'], exp['message'])
self.assertEqual(obs['options'], exp['options'])
self.assertEqual(obs['req_options'], exp['req_options'])
self.assertEqual(obs['opt_options'], exp['opt_options'])
def test_job_ajax_get_req(self):
obs = job_ajax_get_req("063e553b-327c-4818-ab4a-adfe58e49860")
exp = {
'status': 'success',
'message': '',
'job_id': "063e553b-327c-4818-ab4a-adfe58e49860",
'job_external_id': "Not Available",
'job_status': "queued",
'job_step': None,
'job_error': None,
'job_parameters': {'barcode_type': u'golay_12',
'input_data': 1,
'max_bad_run_length': 3,
'max_barcode_errors': 1.5,
'min_per_read_length_fraction': 0.75,
'phred_quality_threshold': 3,
'rev_comp': False,
'rev_comp_barcode': False,
'rev_comp_mapping_barcodes': False,
'sequence_max_n': 0,
'phred_offset': 'auto'},
'command': 'Split libraries FASTQ',
'command_description': 'Demultiplexes and applies quality '
'control to FASTQ data',
'software': 'QIIME',
'software_version': '1.9.1'}
self.assertEqual(obs, exp)
@qiita_test_checker()
class TestProcessingAPI(TestCase):
def test_workflow_handler_post_req(self):
params = ('{"max_barcode_errors": 1.5, "barcode_type": "golay_12", '
'"max_bad_run_length": 3, "phred_offset": "auto", '
'"rev_comp": false, "phred_quality_threshold": 3, '
'"input_data": 1, "rev_comp_barcode": false, '
'"rev_comp_mapping_barcodes": false, '
'"min_per_read_length_fraction": 0.75, "sequence_max_n": 0}')
obs = workflow_handler_post_req("<EMAIL>", 1, params)
self.assertRegex(
obs.pop('message'), 'Cannot create job because the parameters are '
'the same as jobs that are queued, running or already have '
'succeeded:\n')
exp = {'status': 'error', 'workflow_id': None, 'job': None}
self.assertEqual(obs, exp)
def test_workflow_handler_patch_req(self):
# Create a new workflow so it is in construction
exp_command = Command(1)
json_str = (
'{"input_data": 1, "max_barcode_errors": 1.5, '
'"barcode_type": "golay_12", "max_bad_run_length": 3, '
'"rev_comp": false, "phred_quality_threshold": 3, '
'"rev_comp_barcode": false, "rev_comp_mapping_barcodes": false, '
'"min_per_read_length_fraction": 0.75, "sequence_max_n": 0}')
exp_params = Parameters.load(exp_command, json_str=json_str)
exp_user = User('<EMAIL>')
name = "Test processing workflow"
# tests success
wf = ProcessingWorkflow.from_scratch(
exp_user, exp_params, name=name, force=True)
graph = wf.graph
nodes = list(graph.nodes())
job_id = nodes[0].id
value = {'dflt_params': 10,
'connections': {job_id: {'demultiplexed': 'input_data'}}}
obs = workflow_handler_patch_req(
'add', '/%s/' % wf.id, req_value=dumps(value))
new_jobs = set(wf.graph.nodes()) - set(nodes)
self.assertEqual(len(new_jobs), 1)
new_job = new_jobs.pop()
exp = {'status': 'success',
'message': '',
'job': {'id': new_job.id,
'inputs': [job_id],
'label': 'Pick closed-reference OTUs',
'outputs': [['OTU table', 'BIOM']]}}
self.assertEqual(obs, exp)
obs = workflow_handler_patch_req(
'remove', '/%s/%s/' % (wf.id, new_job.id))
exp = {'status': 'success', 'message': ''}
jobs = set(wf.graph.nodes()) - set(nodes)
self.assertEqual(jobs, set())
def test_workflow_handler_patch_req_error(self):
# Incorrect path parameter
obs = workflow_handler_patch_req('add', '/1/extra/')
exp = {'status': 'error',
'message': 'Incorrect path parameter'}
self.assertEqual(obs, exp)
# Workflow does not exist
obs = workflow_handler_patch_req('add', '/1000/')
exp = {'status': 'error',
'message': 'Workflow 1000 does not exist'}
self.assertEqual(obs, exp)
# Operation not supported
obs = workflow_handler_patch_req('replace', '/1/')
exp = {'status': 'error',
'message': 'Operation "replace" not supported. '
'Current supported operations: add'}
self.assertEqual(obs, exp)
# Incorrect path parameter (op = remove)
obs = workflow_handler_patch_req('remove', '/1/')
exp = {'status': 'error',
'message': 'Incorrect path parameter'}
self.assertEqual(obs, exp)
def test_job_ajax_patch_req(self):
# Create a new job - through a workflow since that is the only way
# of creating jobs in the interface
exp_command = Command(1)
json_str = (
'{"input_data": 1, "max_barcode_errors": 1.5, '
'"barcode_type": "golay_12", "max_bad_run_length": 3, '
'"rev_comp": false, "phred_quality_threshold": 3, '
'"rev_comp_barcode": false, "rev_comp_mapping_barcodes": false, '
'"min_per_read_length_fraction": 0.75, "sequence_max_n": 0}')
exp_params = Parameters.load(exp_command, json_str=json_str)
exp_user = User('<EMAIL>')
name = "Test processing workflow"
# tests success
wf = ProcessingWorkflow.from_scratch(
exp_user, exp_params, name=name, force=True)
graph = wf.graph
nodes = list(graph.nodes())
job_id = nodes[0].id
# Incorrect path parameter
obs = job_ajax_patch_req('remove', '/%s/somethingelse' % job_id)
exp = {'status': 'error',
'message': 'Incorrect path parameter: missing job id'}
self.assertEqual(obs, exp)
obs = job_ajax_patch_req('remove', '/')
exp = {'status': 'error',
'message': 'Incorrect path parameter: missing job id'}
self.assertEqual(obs, exp)
# Job id is not like a job id
obs = job_ajax_patch_req('remove', '/notAJobId')
exp = {'status': 'error',
'message': 'Incorrect path parameter: '
'notAJobId is not a recognized job id'}
self.assertEqual(obs, exp)
# Job doesn't exist
obs = job_ajax_patch_req('remove',
'/6d368e16-2242-4cf8-87b4-a5dc40bc890b')
exp = {'status': 'error',
'message': 'Incorrect path parameter: '
'6d368e16-2242-4cf8-87b4-a5dc40bc890b is not a '
'recognized job id'}
self.assertEqual(obs, exp)
# in_construction job
obs = job_ajax_patch_req('remove', '/%s' % job_id)
exp = {'status': 'error',
'message': "Can't delete job %s. It is 'in_construction' "
"status. Please use /study/process/workflow/"
% job_id}
self.assertEqual(obs, exp)
# job status != 'error'
job = ProcessingJob(job_id)
job._set_status('queued')
obs = job_ajax_patch_req('remove', '/%s' % job_id)
exp = {'status': 'error',
'message': 'Only jobs in "error" status can be deleted.'}
self.assertEqual(obs, exp)
# Operation not supported
job._set_status('queued')
obs = job_ajax_patch_req('add', '/%s' % job_id)
exp = {'status': 'error',
'message': 'Operation "add" not supported. Current supported '
'operations: remove'}
self.assertEqual(obs, exp)
# Test success
job._set_error('Killed for testing')
obs = job_ajax_patch_req('remove', '/%s' % job_id)
exp = {'status': 'success',
'message': ''}
self.assertEqual(obs, exp)
if __name__ == '__main__':
main()
```
#### File: api_proxy/tests/test_sample_template.py
```python
from unittest import TestCase, main
from os import remove, mkdir
from os.path import join, exists
from json import dumps
from qiita_core.util import qiita_test_checker
from qiita_core.qiita_settings import r_client
import qiita_db as qdb
from qiita_pet.handlers.api_proxy.sample_template import (
sample_template_filepaths_get_req, sample_template_get_req,
_check_sample_template_exists, sample_template_samples_get_req,
sample_template_category_get_req, sample_template_meta_cats_get_req,
get_sample_template_processing_status, analyses_associated_with_study,
SAMPLE_TEMPLATE_KEY_FORMAT)
@qiita_test_checker()
class TestSampleAPI(TestCase):
def setUp(self):
info = {
"timeseries_type_id": 1,
"metadata_complete": True,
"mixs_compliant": True,
"study_alias": "FCM",
"study_description": "DESC",
"study_abstract": "ABS",
"principal_investigator_id": qdb.study.StudyPerson(3),
"lab_person_id": qdb.study.StudyPerson(1)
}
self.new_study = qdb.study.Study.create(
qdb.user.User('<EMAIL>'), "Some New Study", info)
base_dir = join(qdb.util.get_mountpoint('uploads')[0][1],
str(self.new_study.id))
if not exists(base_dir):
mkdir(base_dir)
self.new_study_fp = join(base_dir, 'uploaded_file.txt')
if not exists(self.new_study_fp):
with open(self.new_study_fp, 'w') as f:
f.write('')
def tearDown(self):
base_dir = qdb.util.get_mountpoint('uploads')[0][1]
fp = join(base_dir, '1', 'uploaded_file.txt')
if not exists(fp):
with open(fp, 'w') as f:
f.write('')
if exists(self.new_study_fp):
remove(self.new_study_fp)
r_client.flushdb()
qdb.study.Study.delete(self.new_study.id)
def test_check_sample_template_exists(self):
obs = _check_sample_template_exists(1)
self.assertEqual(obs, {'status': 'success', 'message': ''})
def test_check_sample_template_exists_no_template(self):
obs = _check_sample_template_exists(self.new_study.id)
self.assertEqual(obs, {'status': 'error',
'message': 'Sample template %d does not '
'exist' % self.new_study.id})
def test_sample_template_get_req(self):
obs = sample_template_get_req(1, '<EMAIL>')
self.assertCountEqual(obs.keys(), ['status', 'message', 'template'])
self.assertEqual(obs['status'], 'success')
self.assertEqual(obs['message'], '')
self.assertEqual(len(obs['template']), 27)
self.assertEqual(str(
obs['template']['1.SKB2.640194']['collection_timestamp']),
'2011-11-11 13:00:00')
del obs['template']['1.SKB2.640194']['collection_timestamp']
self.assertEqual(obs['template']['1.SKB2.640194'], {
'physical_specimen_location': 'ANL',
'texture': '64.6 sand, 17.6 silt, 17.8 clay',
'common_name': 'soil metagenome',
'water_content_soil': '0.164',
'env_feature': 'ENVO:plant-associated habitat',
'assigned_from_geo': 'n',
'altitude': '0',
'tot_org_carb': '5',
'env_biome': 'ENVO:Temperate grasslands, savannas, and shrubland '
'biome',
'sample_type': 'ENVO:soil',
'scientific_name': '1118232',
'host_taxid': '3483',
'latitude': '35.2374368957',
'ph': '6.94',
'description_duplicate': 'Burmese bulk',
'elevation': '114',
'description': 'Cannabis Soil Microbiome',
'physical_specimen_remaining': 'true',
'dna_extracted': 'true',
'taxon_id': '410658',
'samp_salinity': '7.15',
'host_subject_id': '1001:B4',
'season_environment': 'winter',
'env_package': 'soil',
'temp': '15',
'qiita_study_id': '1',
'country': 'GAZ:United States of America',
'longitude': '68.5041623253',
'tot_nitro': '1.41',
'depth': '0.15',
'anonymized_name': 'SKB2'})
def test_sample_template_get_req_no_access(self):
obs = sample_template_get_req(1, '<EMAIL>')
exp = {'status': 'error',
'message': 'User does not have access to study'}
self.assertEqual(obs, exp)
def test_sample_template_get_req_no_template(self):
obs = sample_template_get_req(self.new_study.id, '<EMAIL>')
self.assertEqual(obs, {'status': 'error',
'message': 'Sample template %d does not '
'exist' % self.new_study.id})
def test_analyses_associated_with_study(self):
obs = analyses_associated_with_study(self.new_study.id, '<EMAIL>')
exp = {'status': 'success', 'message': '', 'values': []}
self.assertEqual(obs, exp)
obs = analyses_associated_with_study(1, '<EMAIL>')
exp = {'status': 'success', 'message': '', 'values': [
{'analysis_id': 1, 'name': 'SomeAnalysis', 'email': '<EMAIL>',
'dflt': False, 'artifact_ids': [8, 9], 'prep_ids': [1],
'visibility': ['sandbox']},
{'analysis_id': 2, 'name': 'SomeSecondAnalysis',
'email': '<EMAIL>', 'dflt': False, 'artifact_ids': None,
'prep_ids': [1], 'visibility': None},
{'analysis_id': 3, 'name': '<EMAIL>-dflt-1',
'email': '<EMAIL>', 'dflt': True, 'artifact_ids': None,
'prep_ids': [1], 'visibility': None}]}
self.assertEqual(obs, exp)
obs = analyses_associated_with_study(
self.new_study.id, '<EMAIL>')
exp = {'status': 'error',
'message': 'User does not have access to study'}
self.assertEqual(obs, exp)
def test_get_sample_template_processing_status(self):
key = SAMPLE_TEMPLATE_KEY_FORMAT % 1
obs_proc, obs_at, obs_am = get_sample_template_processing_status(1)
self.assertFalse(obs_proc)
self.assertEqual(obs_at, "")
self.assertEqual(obs_am, "")
# With job id and processing
qiita_plugin = qdb.software.Software.from_name_and_version('Qiita',
'alpha')
cmd = qiita_plugin.get_command('update_sample_template')
params = qdb.software.Parameters.load(
cmd, values_dict={'study': 1, 'template_fp': 'ignored'})
job = qdb.processing_job.ProcessingJob.create(
qdb.user.User('<EMAIL>'), params, True)
job._set_status('running')
r_client.set(key, dumps({'job_id': job.id}))
obs_proc, obs_at, obs_am = get_sample_template_processing_status(1)
self.assertTrue(obs_proc)
self.assertEqual(obs_at, "info")
self.assertEqual(
obs_am, "This sample template is currently being processed")
# With job id and success
job._set_status('success')
r_client.set(key, dumps({'job_id': job.id, 'alert_type': 'warning',
'alert_msg': 'Some\nwarning'}))
obs_proc, obs_at, obs_am = get_sample_template_processing_status(1)
self.assertFalse(obs_proc)
self.assertEqual(obs_at, "warning")
self.assertEqual(obs_am, "Some</br>warning")
# With job and not success
job = qdb.processing_job.ProcessingJob.create(
qdb.user.User('<EMAIL>'), params, True)
job._set_status('running')
job._set_error('Some\nerror')
r_client.set(key, dumps({'job_id': job.id}))
obs_proc, obs_at, obs_am = get_sample_template_processing_status(1)
self.assertFalse(obs_proc)
self.assertEqual(obs_at, "danger")
self.assertEqual(obs_am, "Some</br>error")
def test_sample_template_columns_get_req_no_template(self):
# Test sample template not existing
obs = sample_template_get_req(self.new_study.id, '<EMAIL>')
exp = {'status': 'error',
'message': 'Sample template %d does not exist' %
self.new_study.id}
self.assertEqual(obs, exp)
def test_sample_template_samples_get_req(self):
obs = sample_template_samples_get_req(1, '<EMAIL>')
exp = {'status': 'success',
'message': '',
'samples': ['1.SKB1.640202', '1.SKB2.640194', '1.SKB3.640195',
'1.SKB4.640189', '1.SKB5.640181', '1.SKB6.640176',
'1.SKB7.640196', '1.SKB8.640193', '1.SKB9.640200',
'1.SKD1.640179', '1.SKD2.640178', '1.SKD3.640198',
'1.SKD4.640185', '1.SKD5.640186', '1.SKD6.640190',
'1.SKD7.640191', '1.SKD8.640184', '1.SKD9.640182',
'1.SKM1.640183', '1.SKM2.640199', '1.SKM3.640197',
'1.SKM4.640180', '1.SKM5.640177', '1.SKM6.640187',
'1.SKM7.640188', '1.SKM8.640201', '1.SKM9.640192']}
self.assertEqual(obs, exp)
def test_sample_template_samples_get_req_no_access(self):
obs = sample_template_samples_get_req(1, '<EMAIL>')
exp = {'status': 'error',
'message': 'User does not have access to study'}
self.assertEqual(obs, exp)
def test_sample_template_sample_get_req_no_template(self):
obs = sample_template_samples_get_req(self.new_study.id,
'<EMAIL>')
self.assertEqual(obs, {'status': 'error',
'message': 'Sample template %d does not '
'exist' % self.new_study.id})
def test_sample_template_category_get_req(self):
obs = sample_template_category_get_req('latitude', 1, '<EMAIL>')
exp = {'status': 'success',
'message': '',
'values': {'1.SKB2.640194': '35.2374368957',
'1.SKM4.640180': 'Not applicable',
'1.SKB3.640195': '95.2060749748',
'1.SKB6.640176': '78.3634273709',
'1.SKD6.640190': '29.1499460692',
'1.SKM6.640187': '0.291867635913',
'1.SKD9.640182': '23.1218032799',
'1.SKM8.640201': '3.21190859967',
'1.SKM2.640199': '82.8302905615',
'1.SKD2.640178': '53.5050692395',
'1.SKB7.640196': '13.089194595',
'1.SKD4.640185': '40.8623799474',
'1.SKB8.640193': '74.0894932572',
'1.SKM3.640197': 'Not applicable',
'1.SKD5.640186': '85.4121476399',
'1.SKB1.640202': '4.59216095574',
'1.SKM1.640183': '38.2627021402',
'1.SKD1.640179': '68.0991287718',
'1.SKD3.640198': '84.0030227585',
'1.SKB5.640181': '10.6655599093',
'1.SKB4.640189': '43.9614715197',
'1.SKB9.640200': '12.6245524972',
'1.SKM9.640192': '12.7065957714',
'1.SKD8.640184': '57.571893782',
'1.SKM5.640177': '44.9725384282',
'1.SKM7.640188': '60.1102854322',
'1.SKD7.640191': '68.51099627'}}
self.assertEqual(obs, exp)
def test_sample_template_category_get_req_no_access(self):
obs = sample_template_category_get_req('latitude', 1,
'<EMAIL>')
exp = {'status': 'error',
'message': 'User does not have access to study'}
self.assertEqual(obs, exp)
def test_sample_template_category_get_req_no_template(self):
obs = sample_template_category_get_req('latitiude', self.new_study.id,
'<EMAIL>')
self.assertEqual(obs, {'status': 'error',
'message': 'Sample template %d does not '
'exist' % self.new_study.id})
def test_sample_template_filepaths_get_req(self):
obs = sample_template_filepaths_get_req(1, '<EMAIL>')
# have to check each key individually as the filepaths will change
self.assertEqual(obs['status'], 'success')
self.assertEqual(obs['message'], '')
# [0] the fp_id is the first element, that should change
fp_ids = [fp[0] for fp in obs['filepaths']]
self.assertCountEqual(fp_ids, [17, 23])
def test_sample_template_filepaths_get_req_no_access(self):
obs = sample_template_filepaths_get_req(1, '<EMAIL>')
exp = {'status': 'error',
'message': 'User does not have access to study'}
self.assertEqual(obs, exp)
def test_sample_template_filepaths_get_req_no_template(self):
obs = sample_template_filepaths_get_req(self.new_study.id,
'<EMAIL>')
self.assertEqual(obs, {'status': 'error',
'message': 'Sample template %d does not '
'exist' % self.new_study.id})
def test_sample_template_meta_cats_get_req(self):
obs = sample_template_meta_cats_get_req(1, '<EMAIL>')
exp = {'status': 'success',
'message': '',
'categories': [
'altitude', 'anonymized_name', 'assigned_from_geo',
'collection_timestamp', 'common_name', 'country', 'depth',
'description', 'description_duplicate', 'dna_extracted',
'elevation', 'env_biome', 'env_feature', 'env_package',
'host_subject_id', 'host_taxid', 'latitude', 'longitude',
'ph', 'physical_specimen_location',
'physical_specimen_remaining', 'samp_salinity',
'sample_type', 'scientific_name', 'season_environment',
'taxon_id', 'temp', 'texture', 'tot_nitro', 'tot_org_carb',
'water_content_soil']}
self.assertEqual(obs, exp)
def test_sample_template_meta_cats_get_req_no_access(self):
obs = sample_template_meta_cats_get_req(1, '<EMAIL>')
exp = {'status': 'error',
'message': 'User does not have access to study'}
self.assertEqual(obs, exp)
def test_sample_template_meta_cats_get_req_no_template(self):
obs = sample_template_meta_cats_get_req(self.new_study.id,
'<EMAIL>')
self.assertEqual(obs, {'status': 'error',
'message': 'Sample template %d does not '
'exist' % self.new_study.id})
if __name__ == '__main__':
main()
```
#### File: handlers/study_handlers/artifact.py
```python
from tornado.web import authenticated
from qiita_pet.handlers.util import to_int
from qiita_pet.handlers.base_handlers import BaseHandler
from qiita_pet.handlers.api_proxy import (
artifact_graph_get_req, artifact_types_get_req, artifact_post_req,
artifact_status_put_req, artifact_get_req, artifact_get_prep_req,
artifact_get_info)
from qiita_core.util import execute_as_transaction
from qiita_core.qiita_settings import qiita_config
class ArtifactGraphAJAX(BaseHandler):
@authenticated
def get(self):
direction = self.get_argument('direction')
artifact = to_int(self.get_argument('artifact_id'))
self.write(artifact_graph_get_req(artifact, direction,
self.current_user.id))
class NewArtifactHandler(BaseHandler):
@authenticated
def get(self):
study_id = self.get_argument("study_id")
prep_id = self.get_argument("prep_template_id")
artifact_types = [(at, desc) for at, desc, _, _, is_user_uploadable in
artifact_types_get_req()['types']
if is_user_uploadable]
self.render("study_ajax/add_artifact.html",
study_id=study_id, prep_id=prep_id,
artifact_types=artifact_types)
@authenticated
@execute_as_transaction
def post(self):
artifact_type = self.get_argument('artifact-type')
name = self.get_argument('name')
prep_id = self.get_argument('prep-template-id')
artifact_id = self.get_argument('import-artifact')
# Request the rest of the arguments, which will be the files
files = dict()
for arg in self.request.arguments:
if arg not in ['name', 'prep-template-id', 'artifact-type',
'import-artifact']:
arg_name = arg
# removing ending [], in case they exist, necessary for JS
# array transformation
if arg_name.endswith('[]'):
arg_name = arg_name[:-2]
files[arg_name] = self.get_argument(arg)
artifact = artifact_post_req(
self.current_user.id, files, artifact_type, name, prep_id,
artifact_id)
self.write(artifact)
class ArtifactGetSamples(BaseHandler):
@authenticated
def get(self):
aids = map(int, self.request.arguments.get('ids[]', []))
response = artifact_get_prep_req(self.current_user.id, aids)
self.write(response)
class ArtifactGetInfo(BaseHandler):
@authenticated
def post(self):
aids = map(int, self.request.arguments.get('ids[]', []))
only_biom = self.get_argument('only_biom', 'True') == 'True'
response = artifact_get_info(self.current_user.id, aids, only_biom)
self.write(response)
class ArtifactAdminAJAX(BaseHandler):
@authenticated
def get(self):
artifact_id = to_int(self.get_argument('artifact_id'))
info = artifact_get_req(self.current_user.id, artifact_id)
status = info['visibility']
buttons = []
btn_base = ('<button onclick="set_admin_visibility(\'%s\', {0})" '
'class="btn btn-primary">%s</button>').format(artifact_id)
if qiita_config.require_approval:
if status == 'sandbox':
# The request approval button only appears if the processed
# data issandboxed and the qiita_config specifies that the
# approval should be requested
buttons.append(
btn_base % ('awaiting_approval', 'Request approval'))
elif self.current_user.level == 'admin' and \
status == 'awaiting_approval':
# The approve processed data button only appears if the user is
# an admin, the processed data is waiting to be approved and
# the qiita config requires processed data approval
buttons.append(btn_base % ('private', 'Approve artifact'))
if status == 'private':
# The make public button only appears if the status is private
buttons.append(btn_base % ('public', 'Make public'))
# The revert to sandbox button only appears if the processed data is
# not sandboxed or public
if status not in {'sandbox', 'public'}:
buttons.append(btn_base % ('sandbox', 'Revert to sandbox'))
# Add EBI and VAMPS submission buttons if allowed
if not info['ebi_run_accessions'] and info['can_submit_ebi']:
buttons.append('<a class="btn btn-primary glyphicon '
'glyphicon-export" href="/ebi_submission/{{ppd_id}}'
'" style="word-spacing: -10px;"> Submit to EBI</a>')
if not info['is_submitted_vamps'] and \
info['can_submit_vamps']:
buttons.append('<a class="btn btn-primary glyphicon '
'glyphicon-export" href="/vamps/{{ppd_id}}" '
'style="word-spacing: -10px;"> Submit to VAMPS</a>')
# Add delete button if in sandbox status
if status == 'sandbox':
buttons = ['<button class="btn btn-danger" '
'onclick="delete_artifact(%d)">Delete Artifact</button>'
% (artifact_id)]
self.write(' '.join(buttons))
@authenticated
def post(self):
visibility = self.get_argument('visibility')
artifact_id = int(self.get_argument('artifact_id'))
response = artifact_status_put_req(artifact_id, self.current_user.id,
visibility)
self.write(response)
``` |
{
"source": "jlachowski/crispy-forms-foundation",
"score": 2
} |
#### File: crispy_forms_foundation/layout/containers.py
```python
from random import randint
from django.template.loader import render_to_string
from crispy_forms import layout as crispy_forms_layout
from crispy_forms.utils import render_field, TEMPLATE_PACK
from crispy_forms import bootstrap as crispy_forms_bootstrap
__all__ = [
'Fieldset', 'Container', 'ContainerHolder', 'TabHolder',
'VerticalTabHolder', 'TabItem', 'AccordionHolder', 'AccordionItem',
]
class Fieldset(crispy_forms_layout.Fieldset):
"""
It wraps fields in a ``<fieldset>``:
.. sourcecode:: python
Fieldset("Text for the legend",
'form_field_1',
'form_field_2'
)
The first parameter is the text for the fieldset legend. This text is
context aware, so you can do things like :
.. sourcecode:: python
Fieldset("Data for {{ user.username }}",
'form_field_1',
'form_field_2'
)
"""
template = "%s/layout/fieldset.html"
class Container(crispy_forms_bootstrap.Container):
"""
Overrides original Container element to get the "active" classname from
Class attribute ``active_css_class`` so it's compatible with Foundation
5 and 6.
"""
css_class = ""
active_css_class = "active"
def get_active_css_class(self, template_pack):
# Foundation-6 addon only which use unusual class name
if template_pack != 'foundation-5':
return "is-active"
return self.active_css_class
def render(self, form, form_style, context, template_pack=TEMPLATE_PACK,
**kwargs):
active_classname = self.get_active_css_class(template_pack)
if self.active:
if active_classname and active_classname not in self.css_class:
self.css_class += ' ' + active_classname
else:
self.css_class = self.css_class.replace(active_classname, '')
return super(Container, self).render(form, form_style, context,
template_pack)
class ContainerHolder(crispy_forms_bootstrap.ContainerHolder):
pass
class TabHolder(crispy_forms_bootstrap.TabHolder):
"""
Tabs holder object to wrap Tab item objects in a container:
.. sourcecode:: python
TabHolder(
TabItem('My tab 1', 'form_field_1', 'form_field_2'),
TabItem('My tab 2', 'form_field_3')
)
``TabHolder`` direct children should allways be a ``TabItem`` layout item.
A random id is builded for the tab holder if you don't define it using
``css_id`` argument.
The first ``TabItem`` containing a field error will be marked as
*active* if any, else this will be just the first ``TabItem``.
"""
template = "%s/layout/tab-holder.html"
default_active_tab = None
def render(self, form, form_style, context, template_pack=TEMPLATE_PACK):
"""
Re-implement almost the same code from crispy_forms but passing
``form`` instance to item ``render_link`` method.
"""
links, content = '', ''
# accordion group needs the parent div id to set `data-parent` (I don't
# know why). This needs to be a unique id
if not self.css_id:
self.css_id = "-".join(["tabsholder", str(randint(1000, 9999))])
for tab in self.fields:
tab.active = False
# Activate item
self.open_target_group_for_form(form)
for tab in self.fields:
content += render_field(
tab, form, form_style, context, template_pack=template_pack
)
links += tab.render_link(form, template_pack)
context.update({
'tabs': self,
'links': links,
'content': content
})
template = self.get_template_name(template_pack)
return render_to_string(template, context.flatten())
class VerticalTabHolder(TabHolder):
"""
VerticalTabHolder appends vertical class to TabHolder container
"""
css_class = 'vertical'
class TabItem(Container):
"""
Tab item object. It wraps fields in a div whose default class is "tabs" and
takes a name as first argument.
Tab item is also responsible of building its associated tab link with its
``render_link`` using the ``link_template`` attribute.
Example:
.. sourcecode:: python
TabItem('My tab', 'form_field_1', 'form_field_2', 'form_field_3')
``TabItem`` layout item has no real utility out of a ``TabHolder``.
"""
template = "%s/layout/tab-item.html"
link_template = "%s/layout/tab-link.html"
def has_errors(self, form):
"""
Find tab fields listed as invalid
"""
return any([fieldname_error for fieldname_error in form.errors.keys()
if fieldname_error in self])
def render_link(self, form, template_pack=TEMPLATE_PACK, **kwargs):
"""
Render the link for the tab-pane. It must be called after render so
``css_class`` is updated with ``active`` class name if needed.
"""
link_template = self.link_template % template_pack
return render_to_string(link_template,
{
'link': self,
'item_has_errors': self.has_errors(form)
})
class AccordionHolder(crispy_forms_bootstrap.Accordion):
"""
Accordion items holder object to wrap Accordion item objects in a
container:
.. sourcecode:: python
AccordionHolder(
AccordionItem("group name", "form_field_1", "form_field_2"),
AccordionItem("another group name", "form_field"),
)
``AccordionHolder`` direct children should allways be a ``AccordionItem``
layout item.
A random id is builded for the accordion holder if you don't define it
using ``css_id`` argument.
The first ``AccordionItem`` containing a field error will be marked as
*active* if any, else this will be just the first ``AccordionItem``.
"""
template = "%s/layout/accordion-holder.html"
def render(self, form, form_style, context, template_pack=TEMPLATE_PACK,
**kwargs):
"""
Re-implement almost the same code from crispy_forms but using
``form`` instance to catch field errors.
"""
content = ''
# accordion group needs the parent div id to set `data-parent` (I don't
# know why). This needs to be a unique id
if not self.css_id:
self.css_id = "-".join(["accordion",
str(randint(1000, 9999))])
# Active first 'AccordionItem' containing a field error if any, else
# active first holder item
self.open_target_group_for_form(form)
for group in self.fields:
group.data_parent = self.css_id
group.item_has_errors = any([fieldname_error for fieldname_error in
form.errors.keys()
if fieldname_error in group])
content += render_field(
group, form, form_style, context, template_pack=template_pack,
**kwargs
)
template = self.get_template_name(template_pack)
context.update({'accordion': self, 'content': content})
return render_to_string(template, context.flatten())
class AccordionItem(crispy_forms_bootstrap.AccordionGroup):
"""
Accordion item object. It wraps given fields inside an accordion
tab. It takes accordion tab name as first argument.
The item name is also slugified to build an id for the tab if you don't
define it using ``css_id`` argument.
Example:
.. sourcecode:: python
AccordionItem("group name", "form_field_1", "form_field_2")
"""
template = "%s/layout/accordion-item.html"
``` |
{
"source": "jlachowski/django-termsandconditions",
"score": 2
} |
#### File: django-termsandconditions/termsandconditions/views.py
```python
from django import VERSION as DJANGO_VERSION
from django.contrib.auth.models import User
from django.db import IntegrityError
from .forms import UserTermsAndConditionsModelForm, EmailTermsForm
from .models import TermsAndConditions, UserTermsAndConditions
from django.conf import settings
from django.contrib import messages
from django.utils.translation import gettext as _
from django.http import HttpResponseRedirect
from django.views.generic import DetailView, CreateView, FormView
from django.template.loader import get_template
from django.core.mail import send_mail
from django.core.cache import cache
import logging
from smtplib import SMTPException
LOGGER = logging.getLogger(name='termsandconditions')
DEFAULT_TERMS_BASE_TEMPLATE = 'base.html'
DEFAULT_TERMS_IP_HEADER_NAME = 'REMOTE_ADDR'
class GetTermsViewMixin(object):
"""Checks URL parameters for slug and/or version to pull the right TermsAndConditions object"""
def get_terms(self, kwargs):
"""Checks URL parameters for slug and/or version to pull the right TermsAndConditions object"""
slug = kwargs.get("slug")
version = kwargs.get("version")
if slug and version:
terms = [TermsAndConditions.objects.filter(slug=slug, version_number=version).latest('date_active')]
elif slug:
terms = [TermsAndConditions.get_active(slug)]
else:
# Return a list of not agreed to terms for the current user for the list view
terms = TermsAndConditions.get_active_terms_not_agreed_to(self.request.user)
return terms
class TermsView(DetailView, GetTermsViewMixin):
"""
View Terms and Conditions View
url: /terms/view
"""
template_name = "termsandconditions/tc_view_terms.html"
context_object_name = 'terms_list'
def get_context_data(self, **kwargs):
"""Pass additional context data"""
context = super(TermsView, self).get_context_data(**kwargs)
context['terms_base_template'] = getattr(settings, 'TERMS_BASE_TEMPLATE', DEFAULT_TERMS_BASE_TEMPLATE)
return context
def get_object(self, queryset=None):
"""Override of DetailView method, queries for which T&C to return"""
LOGGER.debug('termsandconditions.views.TermsView.get_object')
return self.get_terms(self.kwargs)
class AcceptTermsView(CreateView, GetTermsViewMixin):
"""
Terms and Conditions Acceptance view
url: /terms/accept
"""
model = UserTermsAndConditions
form_class = UserTermsAndConditionsModelForm
template_name = "termsandconditions/tc_accept_terms.html"
def get_context_data(self, **kwargs):
"""Pass additional context data"""
context = super(AcceptTermsView, self).get_context_data(**kwargs)
context['terms_base_template'] = getattr(settings, 'TERMS_BASE_TEMPLATE', DEFAULT_TERMS_BASE_TEMPLATE)
return context
def get_initial(self):
"""Override of CreateView method, queries for which T&C to accept and catches returnTo from URL"""
LOGGER.debug('termsandconditions.views.AcceptTermsView.get_initial')
terms = self.get_terms(self.kwargs)
return_to = self.request.GET.get('returnTo', '/')
return {'terms': terms, 'returnTo': return_to}
def post(self, request, *args, **kwargs):
"""
Handles POST request.
"""
return_url = request.POST.get('returnTo', '/')
terms_ids = request.POST.getlist('terms')
if not terms_ids: # pragma: nocover
return HttpResponseRedirect(return_url)
if DJANGO_VERSION <= (2, 0, 0):
user_authenticated = request.user.is_authenticated()
else:
user_authenticated = request.user.is_authenticated
if user_authenticated:
user = request.user
else:
# Get user out of saved pipeline from django-socialauth
if 'partial_pipeline' in request.session:
user_pk = request.session['partial_pipeline']['kwargs']['user']['pk']
user = User.objects.get(id=user_pk)
else:
return HttpResponseRedirect('/')
store_ip_address = getattr(settings, 'TERMS_STORE_IP_ADDRESS', True)
if store_ip_address:
ip_address = request.META.get(getattr(settings, 'TERMS_IP_HEADER_NAME', DEFAULT_TERMS_IP_HEADER_NAME))
else:
ip_address = ""
for terms_id in terms_ids:
try:
new_user_terms = UserTermsAndConditions(
user=user,
terms=TermsAndConditions.objects.get(pk=int(terms_id)),
ip_address=ip_address
)
new_user_terms.save()
except IntegrityError: # pragma: nocover
pass
cache.set('tandc.not_agreed_terms_' + user.get_username(), None, getattr(settings, 'TERMS_CACHE_SECONDS', 30))
return HttpResponseRedirect(return_url)
class EmailTermsView(FormView, GetTermsViewMixin):
"""
Email Terms and Conditions View
url: /terms/email
"""
template_name = "termsandconditions/tc_email_terms_form.html"
form_class = EmailTermsForm
def get_context_data(self, **kwargs):
"""Pass additional context data"""
context = super(EmailTermsView, self).get_context_data(**kwargs)
context['terms_base_template'] = getattr(settings, 'TERMS_BASE_TEMPLATE', DEFAULT_TERMS_BASE_TEMPLATE)
return context
def get_initial(self):
"""Override of CreateView method, queries for which T&C send, catches returnTo from URL"""
LOGGER.debug('termsandconditions.views.EmailTermsView.get_initial')
terms = self.get_terms(self.kwargs)
return_to = self.request.GET.get('returnTo', '/')
return {'terms': terms, 'returnTo': return_to}
def form_valid(self, form):
"""Override of CreateView method, sends the email."""
LOGGER.debug('termsandconditions.views.EmailTermsView.form_valid')
template = get_template("termsandconditions/tc_email_terms.html")
template_rendered = template.render({"terms": form.cleaned_data.get('terms')})
LOGGER.debug("Email Terms Body:")
LOGGER.debug(template_rendered)
try:
send_mail(form.cleaned_data.get('email_subject', _('Terms')),
template_rendered,
settings.DEFAULT_FROM_EMAIL,
[form.cleaned_data.get('email_address')],
fail_silently=False)
messages.add_message(self.request, messages.INFO, _("Terms and Conditions Sent."))
except SMTPException: # pragma: no cover
messages.add_message(self.request, messages.ERROR, _("An Error Occurred Sending Your Message."))
self.success_url = form.cleaned_data.get('returnTo', '/') or '/'
return super(EmailTermsView, self).form_valid(form)
def form_invalid(self, form):
"""Override of CreateView method, logs invalid email form submissions."""
LOGGER.debug("Invalid Email Form Submitted")
messages.add_message(self.request, messages.ERROR, _("Invalid Email Address."))
return super(EmailTermsView, self).form_invalid(form)
``` |
{
"source": "jlack1987/3D-Pendulum",
"score": 3
} |
#### File: jlack1987/3D-Pendulum/mathtomat.py
```python
import re
import os
import glob
# Function that will do regex substitutions defined in dict on the string text
def multiSub(dict,text):
# Function for performing multiple substitutions using regex
# Create a regular expression from the dictionary keys
regex = re.compile("(%s)" % "|".join(map(re.escape, dict.keys())))
# For each match, look-up corresponding value in dictionary
return regex.sub(lambda mo: dict[mo.string[mo.start():mo.end()]], text)
# Get the path to this file
baseDir = os.getcwd()
# This assumes your Mathematica expression files are in a subdirectory called "build"
buildDir = baseDir + '/build'
# Get the filepaths to all files in the build directory with a .mat extension
inputPaths = []
for files in glob.glob(buildDir+"/*.mat"):
inputPaths.append(files)
# Create paths for output files corresponding to each input file
outputPaths = []
for files in inputPaths:
tmpFile = files.split('/')[-1].split('.mat')[0]
outputPaths.append(buildDir + '/' + tmpFile + '.m')
# Create dictionary of expressions to be substituted and what is to be put in their place
subDict = {"[":"(", "]":")", "{":"[", "}":"]"}
# loop through the filepaths
for indx,files in enumerate(inputPaths):
# open the files as writable
newFile = open(outputPaths[indx], 'w+')
# create the function definition text. Note that if you want to add more
# arguments, the (x) here is the argument, you could make this a list or
# generalize the way this is done to make it more flexible/general
newFile.write("function" + " ret = " + files.split('/')[-1].split('.mat')[0] + "(x)\n\n")
# write the Mathematica expression as well as make it all lowercase, make newlines
# compatible with Matlab/Octave and perform a somewhat hacky way of detecting the
# matrix rows and putting in the colons.
contents = open(files).read().lower().replace('},','};').replace('\n','...\n')
# Use the multiSub function to perform regex substitutions.
newFile.write("ret = " + multiSub(subDict,contents).replace(" ","") + ";" + "\n\n" + "end")
``` |
{
"source": "jladan/toc-logger",
"score": 3
} |
#### File: toc-logger/toclogger/tictoc.py
```python
import time
_timer = time.time
if 'perf_counter' in dir(time):
_timer = time.perf_counter
def tic():
global tictoc_time
tictoc_time = _timer()
def toc():
return _timer() - tictoc_time
class TocLogger:
def __init__(self):
self._tictoc_time = _timer()
self._log = []
def log(self, message=''):
self._log.append((message, _timer() - self._tictoc_time))
def get_times(self):
dts = []
for m, t in self._log:
dts.append(t)
return [t for m,t in self._log]
def get_diffs(self):
dts = []
prev_t = 0
for m, t in self._log:
dts.append(t-prev_t)
prev_t = t
return dts
def print_log(self):
prev_t = 0
for message, t in self._log:
print("{}\t{}".format(t-prev_t, message))
prev_t = t
``` |
{
"source": "jladdjr/ansible-builder",
"score": 2
} |
#### File: ansible-builder/ansible_builder/cli.py
```python
import argparse
import logging
import sys
import yaml
from . import __version__
from .colors import MessageColors
from .exceptions import DefinitionError
from .main import AnsibleBuilder
from . import constants
from .introspect import add_introspect_options, process, simple_combine
from .requirements import sanitize_requirements
from .utils import configure_logger
logger = logging.getLogger(__name__)
def run():
args = parse_args()
configure_logger(args.verbosity)
if args.action in ['build']:
logger.debug(f'Ansible Builder is building your execution environment image, "{args.tag}".')
ab = AnsibleBuilder(**vars(args))
action = getattr(ab, ab.action)
try:
if action():
print(MessageColors.OKGREEN + "Complete! The build context can be found at: {0}".format(ab.build_context) + MessageColors.ENDC)
sys.exit(0)
except DefinitionError as e:
logger.error(e.args[0])
sys.exit(1)
elif args.action == 'introspect':
data = process(args.folder)
if args.sanitize:
data['python'] = sanitize_requirements(data['python'])
data['system'] = simple_combine(data['system'])
logger.info('# Sanitized dependencies for {0}'.format(args.folder))
else:
print('# Dependency data for {0}'.format(args.folder))
print('---')
print(yaml.dump(data, default_flow_style=False))
sys.exit(0)
logger.error("An error has occured.")
sys.exit(1)
def parse_args(args=sys.argv[1:]):
parser = argparse.ArgumentParser(
prog='ansible-builder',
description=(
'Tooling to help build container images for running Ansible content. '
'Get started by looking at the help text for one of the subcommands.'
)
)
parser.add_argument(
'--version', action='version', version=__version__,
help='Print ansible-builder version and exit.'
)
subparsers = parser.add_subparsers(help='The command to invoke.', dest='action')
subparsers.required = True # This can be a kwarg in python 3.7+
build_command_parser = subparsers.add_parser(
'build',
help='Builds a container image.',
description=(
'Creates a build context (including a Containerfile) from an execution environment spec. '
'The build context will be populated from the execution environment spec. '
'After that, the specified container runtime podman/docker will be invoked to '
'build an image from that definition. '
'After building the image, it can be used locally or published using the supplied tag.'
)
)
build_command_parser.add_argument('-t', '--tag',
default=constants.default_tag,
help='The name for the container image being built.')
for p in [build_command_parser]:
p.add_argument('-f', '--file',
default=constants.default_file,
dest='filename',
help='The definition of the execution environment.')
p.add_argument('-b', '--base-image',
default=None,
help='The parent image for the execution environment.')
p.add_argument('-c', '--context',
default=constants.default_build_context,
dest='build_context',
help='The directory to use for the build context. Defaults to $PWD/context.')
p.add_argument('--container-runtime',
choices=list(constants.runtime_files.keys()),
default=constants.default_container_runtime,
help='Specifies which container runtime to use. Defaults to podman.')
p.add_argument('-v', '--verbosity',
dest='verbosity',
type=int,
choices=[0, 1, 2, 3],
default=2,
help='Increase the output verbosity, for up to three levels of verbosity '
'(invoked via "--verbosity" or "-v" followed by an integer ranging '
'in value from 0 to 3)')
introspect_parser = subparsers.add_parser(
'introspect',
help='Introspects collections in folder.',
description=(
'Loops over collections in folder and returns data about dependencies. '
'This is used internally and exposed here for verification. '
'This is targeted toward collection authors and maintainers.'
)
)
add_introspect_options(introspect_parser)
introspect_parser.add_argument(
'--sanitize', help=(
'Sanitize and de-duplicate requirements. '
'This is normally done separately from the introspect script, but this '
'option is given to more accurately test collection content.'
), action='store_true')
introspect_parser.add_argument(
'-v', '--verbosity', dest='verbosity', action='count', default=0, help=(
'Increase the output verbosity, for up to three levels of verbosity '
'(invoked via "--verbosity" or "-v" followed by an integer ranging '
'in value from 0 to 3)'))
args = parser.parse_args(args)
return args
```
#### File: ansible-builder/ansible_builder/main.py
```python
import logging
import os
import textwrap
import yaml
from . import constants
from .exceptions import DefinitionError
from .steps import AdditionalBuildSteps, GalaxySteps, PipSteps, BindepSteps
from .utils import run_command, write_file, copy_file
from .requirements import sanitize_requirements
import ansible_builder.introspect
logger = logging.getLogger(__name__)
# Files that need to be moved into the build context, and their naming inside the context
CONTEXT_FILES = {
'galaxy': 'requirements.yml'
}
BINDEP_COMBINED = 'bindep_combined.txt'
BINDEP_OUTPUT = 'bindep_output.txt'
PIP_COMBINED = 'requirements_combined.txt'
class AnsibleBuilder:
def __init__(self, action=None,
filename=constants.default_file,
base_image=None,
build_context=constants.default_build_context,
tag=constants.default_tag,
container_runtime=constants.default_container_runtime,
verbosity=2):
self.action = action
self.definition = UserDefinition(filename=filename)
# Handle precedence of the base image
if base_image is not None:
self.base_image = base_image
if base_image is None:
if self.definition.raw.get('base_image'):
self.base_image = self.definition.raw.get('base_image')
else:
self.base_image = constants.default_base_image
self.tag = tag
self.build_context = build_context
self.container_runtime = container_runtime
self.containerfile = Containerfile(
definition=self.definition,
base_image=self.base_image,
build_context=self.build_context,
container_runtime=self.container_runtime,
tag=self.tag)
self.verbosity = verbosity
@property
def version(self):
return self.definition.version
@property
def build_command(self):
return [
self.container_runtime, "build",
"-f", self.containerfile.path,
"-t", self.tag,
self.build_context
]
def run_in_container(self, command, **kwargs):
wrapped_command = [self.container_runtime, 'run','--rm']
wrapped_command.extend(['-v', f"{os.path.abspath(self.build_context)}:/context:Z"])
wrapped_command.extend([self.tag] + command)
return run_command(wrapped_command, **kwargs)
def run_intermission(self):
run_command(self.build_command, capture_output=True)
rc, introspect_output = self.run_in_container(
['python3', '/context/introspect.py'], capture_output=True
)
collection_data = yaml.safe_load('\n'.join(introspect_output))
# Add data from user definition, go from dicts to list
collection_data['system']['user'] = self.definition.user_system
collection_data['python']['user'] = self.definition.user_python
system_lines = ansible_builder.introspect.simple_combine(collection_data['system'])
python_lines = sanitize_requirements(collection_data['python'])
bindep_output = []
if system_lines:
write_file(os.path.join(self.build_context, BINDEP_COMBINED), system_lines + [''])
rc, bindep_output = self.run_in_container(
['bindep', '-b', '-f', '/context/{0}'.format(BINDEP_COMBINED)],
allow_error=True, capture_output=True
)
return (bindep_output, python_lines)
def build(self):
# Phase 1 of Containerfile
self.containerfile.create_folder_copy_files()
self.containerfile.prepare_prepended_steps()
self.containerfile.prepare_galaxy_steps()
logger.debug('Writing partial Containerfile without collection requirements')
self.containerfile.write()
system_lines, pip_lines = self.run_intermission()
# Phase 2 of Containerfile
self.containerfile.prepare_system_steps(bindep_output=system_lines)
self.containerfile.prepare_pip_steps(pip_lines=pip_lines)
self.containerfile.prepare_appended_steps()
logger.debug('Rewriting Containerfile to capture collection requirements')
self.containerfile.write()
run_command(self.build_command)
return True
class BaseDefinition:
"""Subclasses should populate these properties in the __init__ method
self.raw - a dict that basically is the definition
self.reference_path - the folder which dependencies are specified relative to
"""
@property
def version(self):
version = self.raw.get('version')
if not version:
raise ValueError("Expected top-level 'version' key to be present.")
return str(version)
class UserDefinition(BaseDefinition):
def __init__(self, filename):
self.filename = filename
self.reference_path = os.path.dirname(filename)
try:
with open(filename, 'r') as f:
y = yaml.safe_load(f)
self.raw = y if y else {}
except FileNotFoundError:
raise DefinitionError(textwrap.dedent("""
Could not detect '{0}' file in this directory.
Use -f to specify a different location.
""").format(filename))
except (yaml.parser.ParserError, yaml.scanner.ScannerError) as e:
raise DefinitionError(textwrap.dedent("""
An error occured while parsing the definition file:
{0}
""").format(str(e)))
if not isinstance(self.raw, dict):
raise DefinitionError("Definition must be a dictionary, not {0}".format(type(self.raw).__name__))
self.user_python = self.read_dependency('python')
self.user_system = self.read_dependency('system')
def get_additional_commands(self):
"""Gets additional commands from the exec env file, if any are specified.
"""
commands = self.raw.get('additional_build_steps')
return commands
def get_dep_abs_path(self, entry):
"""Unique to the user EE definition, files can be referenced by either
an absolute path or a path relative to the EE definition folder
This method will return the absolute path.
"""
req_file = self.raw.get('dependencies', {}).get(entry)
if not req_file:
return None
if os.path.isabs(req_file):
return req_file
return os.path.join(self.reference_path, req_file)
def read_dependency(self, entry):
requirement_path = self.get_dep_abs_path(entry)
if not requirement_path:
return []
try:
with open(requirement_path, 'r') as f:
return f.read().split('\n')
except FileNotFoundError:
raise DefinitionError("Dependency file {0} does not exist.".format(requirement_path))
def validate(self):
for item in CONTEXT_FILES:
requirement_path = self.get_dep_abs_path(item)
if requirement_path:
if not os.path.exists(requirement_path):
raise DefinitionError("Dependency file {0} does not exist.".format(requirement_path))
ee_base_image = self.raw.get('base_image')
if ee_base_image:
if not isinstance(ee_base_image, str):
raise DefinitionError(textwrap.dedent(
f"""
Error: Unknown type {type(ee_base_image)} found for base_image; must be a string.
""")
)
additional_cmds = self.get_additional_commands()
if additional_cmds:
if not isinstance(additional_cmds, dict):
raise DefinitionError(textwrap.dedent("""
Expected 'additional_build_steps' in the provided definition file to be a dictionary
with keys 'prepend' and/or 'append'; found a {0} instead.
""").format(type(additional_cmds).__name__))
expected_keys = frozenset(('append', 'prepend'))
unexpected_keys = set(additional_cmds.keys()) - expected_keys
if unexpected_keys:
raise DefinitionError(
f"Keys {*unexpected_keys,} are not allowed in 'additional_build_steps'."
)
class Containerfile:
newline_char = '\n'
def __init__(self, definition,
build_context=constants.default_build_context,
base_image=constants.default_base_image,
container_runtime=constants.default_container_runtime,
tag=constants.default_tag):
self.build_context = build_context
self.definition = definition
filename = constants.runtime_files[container_runtime]
self.path = os.path.join(self.build_context, filename)
self.base_image = base_image or self.definition.raw.get('base_image')
self.container_runtime = container_runtime
self.tag = tag
self.steps = [
"FROM {0}".format(self.base_image),
""
]
def create_folder_copy_files(self):
"""Creates the build context file for this Containerfile
moves files from the definition into the folder
"""
# courteously validate items before starting to write files
self.definition.validate()
os.makedirs(self.build_context, exist_ok=True)
for item, new_name in CONTEXT_FILES.items():
requirement_path = self.definition.get_dep_abs_path(item)
if requirement_path is None:
continue
dest = os.path.join(self.build_context, new_name)
copy_file(requirement_path, dest)
# copy introspect.py file from source into build context
copy_file(
ansible_builder.introspect.__file__,
os.path.join(self.build_context, 'introspect.py')
)
def prepare_prepended_steps(self):
additional_prepend_steps = self.definition.get_additional_commands()
if additional_prepend_steps:
prepended_steps = additional_prepend_steps.get('prepend')
if prepended_steps:
return self.steps.extend(AdditionalBuildSteps(prepended_steps))
return False
def prepare_appended_steps(self):
additional_append_steps = self.definition.get_additional_commands()
if additional_append_steps:
appended_steps = additional_append_steps.get('append')
if appended_steps:
return self.steps.extend(AdditionalBuildSteps(appended_steps))
return False
def prepare_galaxy_steps(self):
if self.definition.get_dep_abs_path('galaxy'):
self.steps.extend(GalaxySteps(CONTEXT_FILES['galaxy']))
return self.steps
def prepare_pip_steps(self, pip_lines):
if ''.join(pip_lines).strip(): # only use file if it is non-blank
pip_file = os.path.join(self.build_context, PIP_COMBINED)
write_file(pip_file, pip_lines)
self.steps.extend(PipSteps(PIP_COMBINED))
return self.steps
def prepare_system_steps(self, bindep_output):
if ''.join(bindep_output).strip():
system_file = os.path.join(self.build_context, BINDEP_OUTPUT)
write_file(system_file, bindep_output)
self.steps.extend(BindepSteps(BINDEP_OUTPUT))
return self.steps
def write(self):
with open(self.path, 'w') as f:
for step in self.steps:
f.write(step + self.newline_char)
return True
```
#### File: ansible-builder/ansible_builder/utils.py
```python
import filecmp
import logging
import logging.config
import os
import shutil
import subprocess
import sys
from .colors import MessageColors
logger = logging.getLogger(__name__)
logging_levels = {
'0': 'ERROR',
'1': 'WARNING',
'2': 'INFO',
'3': 'DEBUG',
}
class ColorFilter(logging.Filter):
color_map = {
'ERROR': MessageColors.FAIL,
'WARNING': MessageColors.WARNING,
'INFO': MessageColors.HEADER,
'DEBUG': MessageColors.OK
}
def filter(self, record):
if sys.stdout.isatty():
record.msg = self.color_map[record.levelname] + record.msg + MessageColors.ENDC
return record
LOGGING = {
'version': 1,
'filters': {
'colorize': {
'()': ColorFilter
}
},
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'filters': ['colorize'],
'stream': 'ext://sys.stdout'
}
},
'loggers': {
'ansible_builder': {
'handlers': ['console'],
}
}
}
def configure_logger(verbosity):
LOGGING['loggers']['ansible_builder']['level'] = logging_levels[str(verbosity)]
logging.config.dictConfig(LOGGING)
def run_command(command, capture_output=False, allow_error=False):
logger.info('Running command:')
logger.info(' {0}'.format(' '.join(command)))
try:
process = subprocess.Popen(command,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
except FileNotFoundError:
logger.error(f"You do not have {command[0]} installed, please specify a different container runtime for this command.")
sys.exit(1)
output = []
for line in iter(process.stdout.readline, b''):
line = line.decode(sys.stdout.encoding)
if capture_output:
output.append(line.rstrip())
logger.debug(line)
rc = process.poll()
if rc is not None and rc != 0 and (not allow_error):
for line in output:
logger.error(line)
logger.error(f"An error occured (rc={rc}), see output line(s) above for details.")
sys.exit(1)
return (rc, output)
def write_file(filename: str, lines: list) -> bool:
new_text = '\n'.join(lines)
if os.path.exists(filename):
with open(filename, 'r') as f:
if f.read() == new_text:
logger.debug("File {0} is already up-to-date.".format(filename))
return False
else:
logger.warning('File {0} had modifications and will be rewritten'.format(filename))
with open(filename, 'w') as f:
f.write(new_text)
return True
def copy_file(source: str, dest: str) -> bool:
should_copy = False
if not os.path.exists(dest):
logger.debug("File {0} will be created.".format(dest))
should_copy = True
elif not filecmp.cmp(source, dest, shallow=False):
logger.warning('File {0} had modifications and will be rewritten'.format(dest))
should_copy = True
elif os.path.getmtime(source) > os.path.getmtime(dest):
logger.warning('File {0} updated time increased and will be rewritten'.format(dest))
should_copy = True
if should_copy:
shutil.copy(source, dest)
else:
logger.debug("File {0} is already up-to-date.".format(dest))
return should_copy
```
#### File: test/integration/test_build.py
```python
import pytest
import os
def test_definition_syntax_error(cli, data_dir):
ee_def = os.path.join(data_dir, 'definition_files', 'invalid.yml')
r = cli(
f'ansible-builder build -f {ee_def} --container-runtime podman',
allow_error=True
)
assert r.rc != 0
assert 'An error occured while parsing the definition file' in (r.stdout + r.stderr), (r.stdout + r.stderr)
def test_build_fail_exitcode(cli, container_runtime, ee_tag, tmpdir, data_dir):
"""Test that when a build fails, the ansible-builder exits with non-zero exit code.
Example: https://github.com/ansible/ansible-builder/issues/51
"""
bc = str(tmpdir)
ee_def = os.path.join(data_dir, 'build_fail', 'execution-environment.yml')
r = cli(
f"ansible-builder build -c {bc} -f {ee_def} -t {ee_tag} --container-runtime {container_runtime}",
allow_error=True
)
assert r.rc != 0
assert 'RUN thisisnotacommand' in (r.stdout + r.stderr), (r.stdout + r.stderr)
assert 'thisisnotacommand: command not found' in (r.stdout + r.stderr), (r.stdout + r.stderr)
def test_missing_python_requirements_file():
"""If a user specifies a python requirements file, but we can't find it, fail sanely."""
pytest.skip("Not implemented")
def test_missing_galaxy_requirements_file():
"""If a user specifies a galaxy requirements file, but we can't find it, fail sanely."""
pytest.skip("Not implemented")
def test_build_streams_output_with_verbosity_on(cli, container_runtime, build_dir_and_ee_yml, ee_tag):
"""Test that 'ansible-builder build' streams build output."""
tmpdir, eeyml = build_dir_and_ee_yml("")
result = cli(f"ansible-builder build -c {tmpdir} -f {eeyml} -t {ee_tag} --container-runtime {container_runtime} -v 3")
assert f'{container_runtime} build -f {tmpdir}' in result.stdout
assert f'Ansible Builder is building your execution environment image, "{ee_tag}".' in result.stdout
assert f'The build context can be found at: {tmpdir}' in result.stdout
def test_build_streams_output_with_verbosity_off(cli, container_runtime, build_dir_and_ee_yml, ee_tag):
"""
Like the test_build_streams_output_with_verbosity_on test but making sure less output is shown with default verbosity level of 2.
"""
tmpdir, eeyml = build_dir_and_ee_yml("")
result = cli(f"ansible-builder build -c {tmpdir} -f {eeyml} -t {ee_tag} --container-runtime {container_runtime}")
assert f'Ansible Builder is building your execution environment image, "{ee_tag}".' not in result.stdout
assert f'The build context can be found at: {tmpdir}' in result.stdout
def test_build_streams_output_with_invalid_verbosity(cli, container_runtime, build_dir_and_ee_yml, ee_tag):
"""
Like the test_build_streams_output_with_verbosity_off test but making sure it errors out correctly with invalid verbosity level.
"""
tmpdir, eeyml = build_dir_and_ee_yml("")
result = cli(f"ansible-builder build -c {tmpdir} -f {eeyml} -t {ee_tag} --container-runtime {container_runtime} -v 6", allow_error=True)
assert result.rc != 0
assert 'invalid choice: 6 (choose from 0, 1, 2, 3)' in (result.stdout + result.stderr)
def test_blank_execution_environment(cli, container_runtime, ee_tag, tmpdir, data_dir):
"""Just makes sure that the buld process does not require any particular input"""
bc = str(tmpdir)
ee_def = os.path.join(data_dir, 'blank', 'execution-environment.yml')
cli(
f'ansible-builder build -c {bc} -f {ee_def} -t {ee_tag} --container-runtime {container_runtime}'
)
result = cli(f'{container_runtime} run --rm {ee_tag} echo "This is a simple test"')
assert 'This is a simple test' in result.stdout, result.stdout
def test_user_system_requirement(cli, container_runtime, ee_tag, tmpdir, data_dir):
bc = str(tmpdir)
ee_def = os.path.join(data_dir, 'subversion', 'execution-environment.yml')
cli(
f'ansible-builder build -c {bc} -f {ee_def} -t {ee_tag} --container-runtime {container_runtime}'
)
result = cli(
f'{container_runtime} run --rm {ee_tag} svn --help'
)
assert 'Subversion is a tool for version control' in result.stdout, result.stdout
def test_collection_system_requirement(cli, container_runtime, ee_tag, tmpdir, data_dir):
bc = str(tmpdir)
ee_def = os.path.join(data_dir, 'ansible.posix.at', 'execution-environment.yml')
cli(
f'ansible-builder build -c {bc} -f {ee_def} -t {ee_tag} --container-runtime {container_runtime}'
)
result = cli(
f'{container_runtime} run --rm {ee_tag} at -V'
)
assert 'at version' in result.stderr, result.stderr
def test_user_python_requirement(cli, container_runtime, ee_tag, tmpdir, data_dir):
bc = str(tmpdir)
ee_def = os.path.join(data_dir, 'pip', 'execution-environment.yml')
cli(
f'ansible-builder build -c {bc} -f {ee_def} -t {ee_tag} --container-runtime {container_runtime}'
)
result = cli(
f'{container_runtime} run --rm {ee_tag} pip3 show awxkit'
)
assert 'The official command line interface for Ansible AWX' in result.stdout, result.stdout
class TestPytz:
@pytest.fixture(scope='class')
def pytz(self, cli_class, container_runtime, ee_tag_class, data_dir, tmpdir_factory):
bc_folder = str(tmpdir_factory.mktemp('bc'))
ee_def = os.path.join(data_dir, 'pytz', 'execution-environment.yml')
r = cli_class(
f'ansible-builder build -c {bc_folder} -f {ee_def} -t {ee_tag_class} --container-runtime {container_runtime} -v 3'
)
assert 'Collecting pytz' in r.stdout, r.stdout
return (ee_tag_class, bc_folder)
def test_has_pytz(self, cli, container_runtime, pytz):
ee_tag, bc_folder = pytz
r = cli(f'{container_runtime} run --rm {ee_tag} pip3 show pytz')
assert 'World timezone definitions, modern and historical' in r.stdout, r.stdout
def test_build_layer_reuse(self, cli, container_runtime, data_dir, pytz):
ee_tag, bc_folder = pytz
ee_def = os.path.join(data_dir, 'pytz', 'execution-environment.yml')
r = cli(
f'ansible-builder build -c {bc_folder} -f {ee_def} -t {ee_tag} --container-runtime {container_runtime} -v 3'
)
assert 'Collecting pytz (from -r /build/' not in r.stdout, r.stdout
assert 'requirements_combined.txt is already up-to-date' in r.stdout, r.stdout
stdout_no_whitespace = r.stdout.replace('--->', '-->').replace('\n', ' ').replace(' ', ' ').replace(' ', ' ')
assert 'ADD requirements_combined.txt /build/ --> Using cache' in stdout_no_whitespace, r.stdout
``` |
{
"source": "jladdjr/pytest-ansible",
"score": 2
} |
#### File: pytest_ansible/host_manager/v28.py
```python
from ansible.parsing.dataloader import DataLoader
from pytest_ansible.host_manager import BaseHostManager
from pytest_ansible.module_dispatcher.v28 import ModuleDispatcherV28
from ansible.vars.manager import VariableManager
from ansible.inventory.manager import InventoryManager
class HostManagerV28(BaseHostManager):
"""Fixme."""
def __init__(self, *args, **kwargs):
"""Fixme."""
super(HostManagerV28, self).__init__(*args, **kwargs)
self._dispatcher = ModuleDispatcherV28
def initialize_inventory(self):
self.options['loader'] = DataLoader()
self.options['inventory_manager'] = InventoryManager(loader=self.options['loader'],
sources=self.options['inventory'])
self.options['variable_manager'] = VariableManager(loader=self.options['loader'],
inventory=self.options['inventory_manager'])
# self.options['inventory_manager'].clear_caches()
``` |
{
"source": "jladdjr/pytest-github",
"score": 3
} |
#### File: pytest-github/tests/__init__.py
```python
def assert_outcome(result, passed=0, failed=0, skipped=0, xpassed=0, xfailed=0):
'''This method works around a limitation where pytester assertoutcome()
doesn't support xpassed and xfailed.
'''
actual_count = dict(passed=0, failed=0, skipped=0, xpassed=0, xfailed=0)
reports = filter(lambda x: hasattr(x, 'when'), result.getreports())
for report in reports:
if report.when == 'setup':
if report.skipped:
actual_count['skipped'] += 1
elif report.when == 'call':
if hasattr(report, 'wasxfail'):
if report.passed:
actual_count['xpassed'] += 1
elif report.skipped:
actual_count['xfailed'] += 1
else:
actual_count[report.outcome] += 1
else:
continue
assert passed == actual_count['passed'], "Unexpected value for 'passed' (%s), %s" % (passed, actual_count)
assert failed == actual_count['failed'], "Unexpected value for 'failed' (%s), %s" % (failed, actual_count)
assert skipped == actual_count['skipped'], "Unexpected value for 'skipped' (%s), %s" % (skipped, actual_count)
assert xfailed == actual_count['xfailed'], "Unexpected value for 'xfailed' (%s), %s" % (xfailed, actual_count)
assert xpassed == actual_count['xpassed'], "Unexpected value for 'xpassed' (%s), %s" % (xpassed, actual_count)
```
#### File: pytest-github/tests/test_misc.py
```python
import pytest # NOQA
from _pytest.main import EXIT_OK, EXIT_TESTSFAILED, EXIT_NOTESTSCOLLECTED, EXIT_INTERRUPTED # NOQA
from . import assert_outcome # NOQA
def test_version():
'''Verifies the module has a '__version__' attribue.'''
import pytest_github
assert hasattr(pytest_github, '__version__')
assert isinstance(pytest_github.__version__, str)
``` |
{
"source": "jlafuenteribera/advanced-formation",
"score": 3
} |
#### File: Contenedores/python/controller.py
```python
from flask import Flask
from flask import make_response
import json
import os
app = Flask(__name__)
@app.route('/')
def hello():
print(os.environ)
return nice_json(dict(**os.environ))
def nice_json(arg):
response = make_response(json.dumps(arg, sort_keys = True, indent=4))
response.headers['Content-type'] = "application/json"
return response
if __name__ == '__main__':
app.run(host='0.0.0.0', port=8080)
``` |
{
"source": "j-lag/aioice",
"score": 3
} |
#### File: aioice/aioice/candidate.py
```python
import hashlib
import ipaddress
def candidate_foundation(candidate_type, candidate_transport, base_address):
"""
See RFC 5245 - 4.1.1.3. Computing Foundations
"""
key = '%s|%s|%s' % (candidate_type, candidate_transport, base_address)
return hashlib.md5(key.encode('ascii')).hexdigest()
def candidate_priority(candidate_component, candidate_type, local_pref=65535):
"""
See RFC 5245 - 4.1.2.1. Recommended Formula
"""
if candidate_type == 'host':
type_pref = 126
elif candidate_type == 'prflx':
type_pref = 110
elif candidate_type == 'srflx':
type_pref = 100
else:
type_pref = 0
return (1 << 24) * type_pref + \
(1 << 8) * local_pref + \
(256 - candidate_component)
class Candidate:
"""
An ICE candidate.
"""
def __init__(self, foundation, component, transport, priority, host, port, type,
related_address=None, related_port=None, tcptype=None, generation=None):
self.foundation = foundation
self.component = component
self.transport = transport
self.priority = priority
self.host = host
self.port = port
self.type = type
self.related_address = related_address
self.related_port = related_port
self.tcptype = tcptype
self.generation = generation
@classmethod
def from_sdp(cls, sdp):
"""
Parse a :class:`Candidate` from SDP.
.. code-block:: python
Candidate.from_sdp(
'6815297761 1 udp 659136 1.2.3.4 31102 typ host generation 0')
"""
bits = sdp.split()
if len(bits) < 8:
raise ValueError('SDP does not have enough properties')
kwargs = {
'foundation': bits[0],
'component': int(bits[1]),
'transport': bits[2],
'priority': int(bits[3]),
'host': bits[4],
'port': int(bits[5]),
'type': bits[7],
}
for i in range(8, len(bits) - 1, 2):
if bits[i] == 'raddr':
kwargs['related_address'] = bits[i + 1]
elif bits[i] == 'rport':
kwargs['related_port'] = int(bits[i + 1])
elif bits[i] == 'tcptype':
kwargs['tcptype'] = bits[i + 1]
elif bits[i] == 'generation':
kwargs['generation'] = int(bits[i + 1])
return Candidate(**kwargs)
def to_sdp(self):
"""
Return a string representation suitable for SDP.
"""
sdp = '%s %d %s %d %s %d typ %s' % (
self.foundation,
self.component,
self.transport,
self.priority,
self.host,
self.port,
self.type)
if self.related_address is not None:
sdp += ' raddr %s' % self.related_address
if self.related_port is not None:
sdp += ' rport %s' % self.related_port
if self.tcptype is not None:
sdp += ' tcptype %s' % self.tcptype
if self.generation is not None:
sdp += ' generation %d' % self.generation
return sdp
def can_pair_with(self, other):
"""
A local candidate is paired with a remote candidate if and only if
the two candidates have the same component ID and have the same IP
address version.
"""
a = ipaddress.ip_address(self.host)
b = ipaddress.ip_address(other.host)
return (
self.component == other.component and
self.transport.lower() == other.transport.lower() and
a.version == b.version
)
def __repr__(self):
return 'Candidate(%s)' % self.to_sdp()
``` |
{
"source": "j-la-haye/write_field_grid",
"score": 2
} |
#### File: j-la-haye/write_field_grid/visualizeGrid_v1.py
```python
from shapely.geometry.polygon import Polygon
from shapely.ops import polygonize,polygonize_full
from shapely.geometry import mapping, LineString, MultiLineString, MultiPolygon,CAP_STYLE,JOIN_STYLE,shape
from shapely.affinity import rotate,affine_transform,translate
import rasterio
from rasterio.plot import show as show
from rasterio.plot import show_hist as sh
import numpy as np
import geopandas
from descartes import PolygonPatch
import numpy as np
import matplotlib as mpl
from matplotlib import pyplot as plt
from matplotlib import transforms
from matplotlib.collections import PatchCollection
import math
import csv
import mapField
from mapField import plot_buffer as plot_buffer
from mapField import plot_grid as plot_grid
from mapField import makeGrid as makeGrid
from mapField import grid_dim as grid_dim
import fiona
from fiona.crs import from_epsg
def read_bands(red,nir,outfile):
with rasterio.open(red) as red:
RED = red.read()
with rasterio.open(nir) as nir:
NIR = nir.read()
#compute the ndvi band and write to tif
np.seterr(divide='ignore', invalid='ignore')
ndvi = (NIR.astype(float)-RED.astype(float))/(NIR+RED)
profile = red.meta
profile.update(driver='GTiff')
profile.update(dtype=rasterio.float32)
profile.update(nodata = 0)
with rasterio.open(outfile, 'w', **profile) as dst:
dst.write(ndvi.astype(rasterio.float32))
return ndvi
def plot_bands(band):
with rasterio.open(band) as src:
ndvi = src.read()
bounds = src.bounds
transform = src.transform
crs = src.crs
affine = src.affine
transform = src.transform
ndvi[ ndvi == 0] = np.nan
ndvi[ndvi < 0] = 0
fig,ax = plt.subplots(figsize=(10,10))
ax.set_xticks(np.around(np.arange(0,7000,500),0))
ax.set_yticks(np.around(np.arange(0,7000,500),0))
ax.set_aspect(1)
ax.set_axis_on()
ax.xaxis.label.set_color('grey')
ax.yaxis.label.set_color('grey')
ax.tick_params(axis='x', colors='white')
ax.tick_params(axis='y', colors='white')
plt.title("NDVI Band",color='#FFFFFF')
ax.xaxis.label.set_color('grey')
ax.yaxis.label.set_color('grey')
show(ndvi,ax,cmap='RdYlGn')
plt.savefig('Results/ndvi_full.png',alpha=True,dpi=300)
return affine
def aoi_zoom(minx,maxx,miny,maxy,img):
vert_ax = (miny,maxy)
hor_ax = (minx,maxx)
window =(vert_ax, hor_ax)
img[ img == 0] = np.nan
img[img < 0] = 0
fig,ax = plt.subplots(figsize=(12,12))
w, h = maxx - minx, maxy - miny
ax.set_ylim(maxy + 0.01 * h, miny - 0.01 * h)
ax.set_xlim(minx - 0.01* w, maxx + 0.01 * w)
ax.set_xticks(np.around(np.arange(minx - 0.01 * w, maxx + 0.01 * w, 30),0))
ax.set_yticks(np.around(np.arange(miny - 0.01 * h, maxy + 0.01 * h, 30),0))
ax.set_aspect(1)
plt.xticks(rotation=45,horizontalalignment='right')
ax.grid(b=True, which='major', color='w', linewidth=0.8)
#ax.grid(b=True, which='minor', color='w', linewidth=0.6)
ax.set_axis_on()
ax.xaxis.label.set_color('white')
ax.yaxis.label.set_color('white')
#ax.get_xaxis().set_minor_locator(mpl.ticker.AutoMinorLocator())
#ax.get_yaxis().set_minor_locator(mpl.ticker.AutoMinorLocator())
ax.tick_params(axis='x', colors='white')
ax.tick_params(axis='y', colors='white')
plt.title("NDVI Band",color='#FFFFFF')
show(img,ax,cmap='RdYlGn')
#plt.savefig('imgzoom.png',alpha=True,dpi=300)
def plot_poly(multi_polygon,img):
mp=MultiPolygon([feature for feature in multi_polygon])
patches=[PolygonPatch(feature, edgecolor="#FFFFFF", facecolor="#555555", linewidth=2)
for feature in multi_polygon]
fig,ax = plt.subplots(figsize=(10,10))
cm = plt.get_cmap('RdBu')
num_colours = len(mp)
minx, miny, maxx, maxy = mp.bounds
print(mp.bounds)
w, h = maxx - minx, maxy - miny
ax.set_ylim(maxy + 0.1 * h, miny - 0.1 * h)
ax.set_xlim(minx - 0.1 * w, maxx + 0.1 * w)
ax.set_aspect(1)
patches = []
for idx, p in enumerate(mp):
colour = cm(1. * idx / num_colours)
patches.append(PolygonPatch(p, fc=colour, ec='red', alpha=0.65, zorder=1))
ax.add_collection(PatchCollection(patches, match_original=True))
ax.set_axis_on()
ax.set_xticks(np.around(np.arange(minx - 0.1 * w, maxx + 0.1 * w, 30),0))
ax.set_yticks(np.around(np.arange( miny - 0.1 * h ,maxy + 0.1 * h, 30),0))
ax.tick_params('both',pad=15)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.xaxis.label.set_color('white')
ax.yaxis.label.set_color('white')
plt.xticks(rotation=45,horizontalalignment='right')
ax.grid(b=True, which='major', color='w', linewidth=0.8)
#ax.grid(b=True, which='minor', color='w', linewidth=0.5)
#ax.get_xaxis().set_minor_locator(mpl.ticker.AutoMinorLocator())
#ax.get_yaxis().set_minor_locator(mpl.ticker.AutoMinorLocator())
ax.tick_params(axis='x', colors='white')
ax.tick_params(axis='y', colors='white')
plt.title("Shapefile",color='#FFFFFF')
show(img,ax, cmap='RdYlGn')
#plt.savefig('Field_Plot.png', alpha=True, dpi=300)
plt.show()
``` |
{
"source": "jlaine/auditwheel",
"score": 2
} |
#### File: jlaine/auditwheel/noxfile.py
```python
import os
import sys
from pathlib import Path
from typing import List
import nox
nox.options.sessions = ["lint", "test-dist"]
PYTHON_ALL_VERSIONS = ["3.6", "3.7", "3.8", "3.9", "3.10"]
RUNNING_CI = "TRAVIS" in os.environ or "GITHUB_ACTIONS" in os.environ
@nox.session(python=["3.6"], reuse_venv=True)
def lint(session: nox.Session) -> None:
"""
Run linters on the codebase.
"""
session.install("pre-commit")
session.run("pre-commit", "run", "--all-files")
@nox.session()
def coverage(session: nox.Session) -> None:
"""
Run coverage using unit tests.
"""
session.install(".[coverage]")
session.run(
"python",
"-m",
"pytest",
"tests/unit",
"--cov=auditwheel",
"--cov-report=term-missing",
)
def _docker_images(session: nox.Session) -> List[str]:
tmp_dir = Path(session.create_tmp())
script = tmp_dir / "list_images.py"
images_file = tmp_dir / "images.lst"
script.write_text(
fr"""
import sys
from pathlib import Path
sys.path.append("./tests/integration")
from test_manylinux import MANYLINUX_IMAGES
images = "\n".join(MANYLINUX_IMAGES.values())
Path(r"{images_file}").write_text(images)
"""
)
session.run("python", str(script), silent=True)
return images_file.read_text().splitlines()
@nox.session(python=PYTHON_ALL_VERSIONS)
def tests(session: nox.Session) -> None:
"""
Run tests.
"""
posargs = session.posargs
extras = "coverage" if RUNNING_CI else "test"
session.install("-e", f".[{extras}]")
if RUNNING_CI:
session.install("codecov")
posargs.extend(["--cov", "auditwheel", "--cov-branch"])
# pull manylinux images that will be used.
# this helps passing tests which would otherwise timeout.
for image in _docker_images(session):
session.run("docker", "pull", image, external=True)
session.run("pytest", "-s", *posargs)
if RUNNING_CI:
session.run("auditwheel", "lddtree", sys.executable)
try:
session.run("codecov")
except nox.command.CommandFailed:
pass # Ignore failures from codecov tool
def _build(session: nox.Session, dist: Path) -> None:
session.install("build")
tmp_dir = Path(session.create_tmp()) / "build-output"
session.run("python", "-m", "build", "--outdir", str(tmp_dir))
(wheel_path,) = tmp_dir.glob("*.whl")
(sdist_path,) = tmp_dir.glob("*.tar.gz")
dist.mkdir(exist_ok=True)
wheel_path.rename(dist / wheel_path.name)
sdist_path.rename(dist / sdist_path.name)
@nox.session(name="test-dist")
def test_dist(session: nox.Session) -> None:
"""
Builds SDist & Wheels then run unit tests on those.
"""
tmp_dir = Path(session.create_tmp())
dist = tmp_dir / "dist"
_build(session, dist)
python_versions = session.posargs or PYTHON_ALL_VERSIONS
for version in python_versions:
session.notify(f"_test_sdist-{version}", [str(dist)])
session.notify(f"_test_wheel-{version}", [str(dist)])
def _test_dist(session: nox.Session, path: str, pattern: str) -> None:
(dist_path,) = Path(path).glob(pattern)
session.install(f"{str(dist_path)}[test]")
session.run("pytest", "tests/unit")
@nox.session(python=PYTHON_ALL_VERSIONS)
def _test_sdist(session: nox.Session) -> None:
"""
Do not run explicitly.
"""
_test_dist(session, session.posargs[0], "*.tar.gz")
@nox.session(python=PYTHON_ALL_VERSIONS)
def _test_wheel(session: nox.Session) -> None:
"""
Do not run explicitly.
"""
_test_dist(session, session.posargs[0], "*.whl")
@nox.session
def build(session: nox.Session) -> None:
"""
Make an SDist and a wheel.
"""
_build(session, Path("dist"))
@nox.session(python=PYTHON_ALL_VERSIONS, reuse_venv=True)
def develop(session: nox.Session) -> None:
session.run("python", "-m", "pip", "install", "--upgrade", "pip", "setuptools")
session.install("-e", ".[develop]")
```
#### File: tests/unit/test_tools.py
```python
import argparse
import lzma
from pathlib import Path
import pytest
from auditwheel.tools import EnvironmentDefault, dir2zip, zip2dir
@pytest.mark.parametrize(
("environ", "passed", "expected"),
[
(None, None, "manylinux1"),
(None, "manylinux2010", "manylinux2010"),
("manylinux2010", None, "manylinux2010"),
("manylinux2010", "linux", "linux"),
],
)
def test_environment_action(monkeypatch, environ, passed, expected):
choices = ["linux", "manylinux1", "manylinux2010"]
argv = []
if passed:
argv = ["--plat", passed]
if environ:
monkeypatch.setenv("AUDITWHEEL_PLAT", environ)
p = argparse.ArgumentParser()
p.add_argument(
"--plat",
action=EnvironmentDefault,
env="AUDITWHEEL_PLAT",
dest="PLAT",
choices=choices,
default="manylinux1",
)
args = p.parse_args(argv)
assert args.PLAT == expected
def test_environment_action_invalid_env(monkeypatch):
choices = ["linux", "manylinux1", "manylinux2010"]
monkeypatch.setenv("AUDITWHEEL_PLAT", "foo")
with pytest.raises(argparse.ArgumentError):
p = argparse.ArgumentParser()
p.add_argument(
"--plat",
action=EnvironmentDefault,
env="AUDITWHEEL_PLAT",
dest="PLAT",
choices=choices,
default="manylinux1",
)
def _write_test_permissions_zip(path):
source_zip_xz = Path(__file__).parent / "test-permissions.zip.xz"
with lzma.open(source_zip_xz) as f:
path.write_bytes(f.read())
def _check_permissions(path):
for i in range(8):
for j in range(8):
for k in range(8):
mode = (path / f"{i}{j}{k}.f").stat().st_mode
assert ((mode >> 6) & 7) == (i | 6) # always read/write
assert ((mode >> 3) & 7) == j
assert ((mode >> 0) & 7) == k
mode = (path / f"{i}{j}{k}.d").stat().st_mode
assert ((mode >> 6) & 7) == 7 # always read/write/execute
assert ((mode >> 3) & 7) == 5 # always read/execute
assert ((mode >> 0) & 7) == 5 # always read/execute
def test_zip2dir_permissions(tmp_path):
source_zip = tmp_path / "test-permissions.zip"
_write_test_permissions_zip(source_zip)
extract_path = tmp_path / "unzip"
zip2dir(str(source_zip), str(extract_path))
_check_permissions(extract_path)
def test_zip2dir_round_trip_permissions(tmp_path):
source_zip = tmp_path / "test-permissions.zip"
_write_test_permissions_zip(source_zip)
extract_path = tmp_path / "unzip2"
zip2dir(str(source_zip), str(tmp_path / "unzip1"))
dir2zip(str(tmp_path / "unzip1"), str(tmp_path / "tmp.zip"))
zip2dir(str(tmp_path / "tmp.zip"), str(extract_path))
_check_permissions(extract_path)
def test_dir2zip_deflate(tmp_path):
buffer = b"\0" * 1024 * 1024
input_dir = tmp_path / "input_dir"
input_dir.mkdir()
input_file = input_dir / "zeros.bin"
input_file.write_bytes(buffer)
output_file = tmp_path / "ouput.zip"
dir2zip(str(input_dir), str(output_file))
assert output_file.stat().st_size < len(buffer) / 4
``` |
{
"source": "jlaine/django-rest-search",
"score": 2
} |
#### File: django-rest-search/tests/views.py
```python
from rest_framework.generics import CreateAPIView
from rest_search.views import SearchAPIView
from tests.forms import BookSearchForm
from tests.indexers import BookIndexer
from tests.serializers import BookSerializer
class BookCreate(CreateAPIView):
serializer_class = BookSerializer
class BookSearch(SearchAPIView):
form_class = BookSearchForm
indexer_class = BookIndexer
class BookSearchSorted(SearchAPIView):
form_class = BookSearchForm
indexer_class = BookIndexer
def get_sort(self):
return [{"id": {"order": "desc"}}]
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.