metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "jlainema/co49kb",
"score": 2
} |
#### File: jlainema/co49kb/easyeda-merge.py
```python
import json,sys,re
if len(sys.argv) < 4:
print ("Usage: <easy eda json to merge to> [optional merge from] DELTA_X DELTA_Y [optional NET=NEW_NAME,NET=... renames]")
print ("For renames you can also use K=REGEXP:REPLACE where if K is in net/name, re.sub(REGEXP,REPLACE,name) is ran")
sys.exit()
def relocs(s,da,loop=True,nidx=-1):
nums = filter(None, re.split('([-]*\d+[.]*\d*)', s))
nout = ""
oa = da
for ns in nums:
try:
n = float(ns)
if da:
nidx += 1
if (nidx >= 0 and nidx < len(da)):
n += da[nidx % len(da)]
if (nidx == len(da)-1) and loop:
nidx = -1
if int(n) == n:
n = str(int(n))
else:
n = "%.4f"%n
except Exception as e:
c = ns.strip()
if c == "M" or c == "L":
loop = c == "L"
nidx = -1
if c == "A":
loop = False
nidx = -6
n = ns
pass
nout += n
return nout
guid = 100
def merges(sout,s,c="~"):
global guid
for ns in s:
if len(sout) != 0:
sout += c
if ns[:3] == "gge":
ns = "gge"+str(guid)
guid += 1
sout += ns
return sout
def do_net(c,name):
# print (c,name)
if name in renames:
return renames[name]
for l in renames["$$reg"]:
if l[0] in name:
return l[1].sub(l[2],name)
return name
def do_nop(sout,arr,l):
return do_arr(merges(sout, arr[0:1]), arr[1:], l)
def do_track(sout,arr,l):
# print ("TRACK", len(arr))
arr[3] = do_net(arr[0],arr[3])
arr[4] = relocs(arr[4], delta)
return merges(sout, arr)
def do_arc(sout,arr,l):
# print ("ARC", len(arr))
arr[3] = do_net(arr[0],arr[3])
arr[4] = relocs(arr[4], delta)
# print l
return merges(sout,arr)
def do_srg(sout,arr,l):
# print ("SOLIDREGION", len(arr))
arr[2] = do_net(arr[0],arr[2])
arr[3] = relocs(arr[3], delta)
# print l
return merges(sout,arr)
def do_text(sout,arr,l):
# print ("TEXT", len(arr))
arr[2] = relocs(arr[2], delta)
arr[3] = relocs(arr[3], delta, False, 0)
arr[10] = do_net(arr[0],arr[10])
arr[11] = relocs(arr[11], delta)
# print l
return merges(sout,arr)
def do_rect(sout,arr,l):
# print ("RECT", len(arr))
arr[1] = relocs(arr[1], delta)
arr[2] = relocs(arr[2], delta, False, 0)
arr[6] = do_net(arr[0],arr[6])
arr[9] = relocs(arr[9], delta)
if len(arr)>17:
arr[18] = relocs(arr[18], delta)
# print l
return merges(sout,arr)
def do_via(sout,arr,l):
# print ("VIA", len(arr))
arr[1] = relocs(arr[1], delta)
arr[2] = relocs(arr[2], delta, False, 0)
arr[4] = do_net(arr[0],arr[4])
# print l
return merges(sout,arr)
def do_hole(sout,arr,l):
# print ("HOLE", len(arr))
arr[1] = relocs(arr[1], delta)
arr[2] = relocs(arr[2], delta, False, 0)
# print l
return merges(sout,arr)
def do_ellipse(sout,arr,l):
# print ("ELLIPSE", len(arr))
arr[1] = relocs(arr[1], delta)
arr[2] = relocs(arr[2], delta, False, 0)
arr[6] = do_net(arr[0],arr[6])
arr[18] = relocs(arr[18], delta)
return merges(sout,arr)
def do_lib(sout,arr,l):
# print ("LIB", len(arr))
arr[1] = relocs(arr[1], delta)
arr[2] = relocs(arr[2], delta, False, 0)
return merges(sout,arr)
def do_arr(sout,arr,l):
if not arr[0] in found:
print ("!!!", arr[0], str(arr))
sys.exit()
return found[arr[0]](sout,arr,l)
found = {"TRACK":do_track, "ARC":do_arc, "TEXT":do_text, "SOLIDREGION":do_srg, "RECT":do_rect, "VIA":do_via, "LIB":do_lib, "HOLE":do_hole, "CIRCLE":do_hole, "PAD":do_nop, "ELLIPSE":do_ellipse}
mto = json.loads(open(sys.argv[1]).read())
mfr = False
base = 3
try:
mfr = json.loads(open(sys.argv[2]).read())
except Exception as e:
base = 2
if not "shape" in mto:
print ("need to have shapes to merge!")
sys.exit()
delta = (float(sys.argv[base]),float(sys.argv[base+1]))
# print json.dumps(mto)
renames = {}
renames["$$reg"] = []
if len(sys.argv)>base+2:
# get net renames, too
ren = sys.argv[base+2].split(",")
for v in ren:
kv = v.split("=")
if ":" in kv[1]:
rv = kv[1].split(":")
renames["$$reg"].append((rv[0],re.compile(rv[0]), rv[1]))
else:
renames[kv[0]] = kv[1]
if mfr:
ns = mto["shape"]
fr = mfr["shape"]
else:
ns = []
fr = mto["shape"]
for l in fr:
marr = l.split("#@$")
nl = ""
was_object = False
for s in marr:
spl = s.split("~")
if mfr and spl[0] == "TRACK" and spl[2]=="10":
# do not propagate board outline when merging multiples TODO: merge outline poly
# sys.stderr.write("SKIPPING BOARD OUTLINE\n")
# sys.stderr.flush()
continue
was_object = True
nl += do_arr("",spl,s)
nl += "#@$"
if was_object:
ns.append(nl[:-3]) # without the trailing #@$
mto["shape"] = ns
print (json.dumps(mto,indent=1))
``` |
{
"source": "jlakin2/python-jenkins",
"score": 2
} |
#### File: python-jenkins/tests/base.py
```python
import sys
from six.moves.urllib.request import build_opener
from testscenarios import TestWithScenarios
import jenkins
if sys.version_info < (2, 7):
import unittest2 as unittest
else:
import unittest
class JenkinsTestBase(TestWithScenarios, unittest.TestCase):
crumb_data = {
"crumb": "dab177f483b3dd93483ef6716d8e792d",
"crumbRequestField": ".crumb",
}
scenarios = [
('base_url1', dict(base_url='http://example.com')),
('base_url2', dict(base_url='http://example.com/jenkins'))
]
def setUp(self):
super(JenkinsTestBase, self).setUp()
self.opener = build_opener()
self.j = jenkins.Jenkins(self.base_url, 'test', 'test')
def make_url(self, path):
return u'{0}/{1}'.format(self.base_url, path)
def _check_requests(self, requests):
for req in requests:
self._check_request(req[0][0])
def _check_request(self, request):
# taken from opener.open() in request
# attribute request.type is only set automatically for python 3
# requests, must use request.get_type() for python 2.7
protocol = request.type or request.get_type()
# check that building the request doesn't throw any exception
meth_name = protocol + "_request"
for processor in self.opener.process_request.get(protocol, []):
meth = getattr(processor, meth_name)
request = meth(request)
```
#### File: python-jenkins/tests/helper.py
```python
import functools
from multiprocessing import Process
from multiprocessing import Queue
import traceback
from six.moves import socketserver
class TestsTimeoutException(Exception):
pass
def time_limit(seconds, fp, func, *args, **kwargs):
if fp:
if not hasattr(fp, 'write'):
raise TypeError("Expected 'file-like' object, got '%s'" % fp)
else:
def record(msg):
fp.write(msg)
else:
def record(msg):
return
def capture_results(msg_queue, func, *args, **kwargs):
try:
result = func(*args, **kwargs)
except Exception as e:
msg_queue.put(
"Running function '%s' resulted in exception '%s' with "
"message: '%s'\n" % (func.__name__, e.__class__.__name__, e))
# no point re-raising an exception from the subprocess, instead
# return False
return False
else:
msg_queue.put(
"Running function '%s' finished with result '%s', and"
"stack:\n%s\n" % (func.__name__, result,
traceback.format_stack()))
return result
messages = Queue()
# although creating a separate process is expensive it's the only way to
# ensure cross platform that we can cleanly terminate after timeout
p = Process(target=functools.partial(capture_results, messages, func),
args=args, kwargs=kwargs)
p.start()
p.join(seconds)
if p.is_alive():
p.terminate()
while not messages.empty():
record(messages.get())
record("Running function '%s' did not finish\n" % func.__name__)
raise TestsTimeoutException
else:
while not messages.empty():
record(messages.get())
record("Running function '%s' finished with exit code '%s'\n"
% (func.__name__, p.exitcode))
class NullServer(socketserver.TCPServer):
request_queue_size = 1
def __init__(self, server_address, *args, **kwargs):
# TCPServer is old style in python 2.x so cannot use
# super() correctly, explicitly call __init__.
# simply init'ing is sufficient to open the port, which
# with the server not started creates a black hole server
socketserver.TCPServer.__init__(
self, server_address, socketserver.BaseRequestHandler,
*args, **kwargs)
```
#### File: python-jenkins/tests/test_jenkins_sockets.py
```python
from six.moves import StringIO
import testtools
from testtools.content import text_content
import jenkins
from tests.helper import NullServer
from tests.helper import TestsTimeoutException
from tests.helper import time_limit
class JenkinsRequestTimeoutTests(testtools.TestCase):
def setUp(self):
super(JenkinsRequestTimeoutTests, self).setUp()
self.server = NullServer(("127.0.0.1", 0))
self.messages = StringIO()
self.addOnException(self._get_messages)
def _get_messages(self, exc_info):
self.addDetail('timeout-tests-messages',
text_content(self.messages.getvalue()))
def test_jenkins_open_timeout(self):
j = jenkins.Jenkins("http://%s:%s" % self.server.server_address,
None, None, timeout=0.1)
request = jenkins.Request('http://%s:%s/job/TestJob' %
self.server.server_address)
# assert our request times out when no response
with testtools.ExpectedException(jenkins.TimeoutException):
j.jenkins_open(request, add_crumb=False)
def test_jenkins_open_no_timeout(self):
j = jenkins.Jenkins("http://%s:%s" % self.server.server_address,
None, None)
request = jenkins.Request('http://%s:%s/job/TestJob' %
self.server.server_address)
# assert we don't timeout quickly like previous test when
# no timeout defined.
with testtools.ExpectedException(TestsTimeoutException):
time_limit(0.5, self.messages,
j.jenkins_open, request, add_crumb=False)
```
#### File: python-jenkins/tests/test_whoami.py
```python
import json
from mock import patch
import jenkins
from tests.base import JenkinsTestBase
class JenkinsWhoamiTest(JenkinsTestBase):
@patch.object(jenkins.Jenkins, 'jenkins_open')
def test_simple(self, jenkins_mock):
user_to_return = \
{u'absoluteUrl': u'https://example.com/jenkins/user/jsmith',
u'description': None,
u'fullName': u'<NAME>',
u'id': u'jsmith',
u'property': [{},
{},
{},
{u'address': u'<EMAIL>'},
{},
{},
{u'insensitiveSearch': False},
{}]}
jenkins_mock.return_value = json.dumps(user_to_return)
user = self.j.get_whoami()
self.assertEqual(user, user_to_return)
self.assertEqual(
jenkins_mock.call_args[0][0].get_full_url(),
self.make_url('me/api/json'))
self._check_requests(jenkins_mock.call_args_list)
@patch.object(jenkins.Jenkins, 'jenkins_open')
def test_raise_HTTPError(self, jenkins_mock):
jenkins_mock.side_effect = jenkins.HTTPError(
self.make_url('me/api/json'),
code=401,
msg='basic auth failed',
hdrs=[],
fp=None)
with self.assertRaises(jenkins.JenkinsException):
self.j.get_whoami()
self.assertEqual(
jenkins_mock.call_args[0][0].get_full_url(),
self.make_url('me/api/json'))
self._check_requests(jenkins_mock.call_args_list)
``` |
{
"source": "jlakkis/CarDEC",
"score": 3
} |
#### File: CarDEC/CarDEC/CarDEC_dataloaders.py
```python
from tensorflow import convert_to_tensor as tensor
from numpy import setdiff1d
from numpy.random import choice, seed
class batch_sampler(object):
def __init__(self, array, val_frac, batch_size, splitseed):
seed(splitseed)
self.val_indices = choice(range(len(array)), round(val_frac * len(array)), False)
self.train_indices = setdiff1d(range(len(array)), self.val_indices)
self.batch_size = batch_size
def __iter__(self):
batch = []
if self.val:
for idx in self.val_indices:
batch.append(idx)
if len(batch) == self.batch_size:
yield batch
batch = []
else:
train_idx = choice(self.train_indices, len(self.train_indices), False)
for idx in train_idx:
batch.append(idx)
if len(batch) == self.batch_size:
yield batch
batch = []
if batch:
yield batch
def __call__(self, val):
self.val = val
return self
class simpleloader(object):
def __init__(self, array, batch_size):
self.array = array
self.batch_size = batch_size
def __iter__(self):
batch = []
for idx in range(len(self.array)):
batch.append(idx)
if len(batch) == self.batch_size:
yield tensor(self.array[batch].copy())
batch = []
if batch:
yield self.array[batch].copy()
class tupleloader(object):
def __init__(self, *arrays, batch_size):
self.arrays = arrays
self.batch_size = batch_size
def __iter__(self):
batch = []
for idx in range(len(self.arrays[0])):
batch.append(idx)
if len(batch) == self.batch_size:
yield [tensor(arr[batch].copy()) for arr in self.arrays]
batch = []
if batch:
yield [tensor(arr[batch].copy()) for arr in self.arrays]
class aeloader(object):
def __init__(self, *arrays, val_frac, batch_size, splitseed):
self.arrays = arrays
self.batch_size = batch_size
self.sampler = batch_sampler(arrays[0], val_frac, batch_size, splitseed)
def __iter__(self):
for idxs in self.sampler(self.val):
yield [tensor(arr[idxs].copy()) for arr in self.arrays]
def __call__(self, val):
self.val = val
return self
class countloader(object):
def __init__(self, embedding, target, sizefactor, val_frac, batch_size, splitseed):
self.sampler = batch_sampler(embedding, val_frac, batch_size, splitseed)
self.embedding = embedding
self.target = target
self.sizefactor = sizefactor
def __iter__(self):
for idxs in self.sampler(self.val):
yield (tensor(self.embedding[idxs].copy()), tensor(self.sizefactor[idxs].copy())), tensor(self.target[idxs].copy())
def __call__(self, val):
self.val = val
return self
class dataloader(object):
def __init__(self, hvg_input, hvg_target, lvg_input = None, lvg_target = None, val_frac = 0.1, batch_size = 128, splitseed = 0):
self.sampler = batch_sampler(hvg_input, val_frac, batch_size, splitseed)
self.hvg_input = hvg_input
self.hvg_target = hvg_target
self.lvg_input = lvg_input
self.lvg_target = lvg_target
def __iter__(self):
for idxs in self.sampler(self.val):
hvg_input = tensor(self.hvg_input[idxs].copy())
hvg_target = tensor(self.hvg_target[idxs].copy())
p_target = tensor(self.p_target[idxs].copy())
if (self.lvg_input is not None) and (self.lvg_target is not None):
lvg_input = tensor(self.lvg_input[idxs].copy())
lvg_target = tensor(self.lvg_target[idxs].copy())
else:
lvg_input = None
lvg_target = None
yield [hvg_input, lvg_input], hvg_target, lvg_target, p_target
def __call__(self, val):
self.val = val
return self
def update_p(self, new_p_target):
self.p_target = new_p_target
```
#### File: CarDEC/CarDEC/CarDEC_utils.py
```python
import numpy as np
import os
from scipy.sparse import issparse
import scanpy as sc
from anndata import AnnData
def normalize_scanpy(adata, batch_key = None, n_high_var = 1000, LVG = True,
normalize_samples = True, log_normalize = True,
normalize_features = True):
""" This function preprocesses the raw count data.
Arguments:
------------------------------------------------------------------
- adata: `anndata.AnnData`, the annotated data matrix of shape (n_obs, n_vars). Rows correspond to cells and columns to genes.
- batch_key: `str`, string specifying the name of the column in the observation dataframe which identifies the batch of each cell. If this is left as None, then all cells are assumed to be from one batch.
- n_high_var: `int`, integer specifying the number of genes to be idntified as highly variable. E.g. if n_high_var = 2000, then the 2000 genes with the highest variance are designated as highly variable.
- LVG: `bool`, Whether to retain and preprocess LVGs.
- normalize_samples: `bool`, If True, normalize expression of each gene in each cell by the sum of expression counts in that cell.
- log_normalize: `bool`, If True, log transform expression. I.e., compute log(expression + 1) for each gene, cell expression count.
- normalize_features: `bool`, If True, z-score normalize each gene's expression.
Returns:
------------------------------------------------------------------
- adata: `anndata.AnnData`, the annotated data matrix of shape (n_obs, n_vars). Contains preprocessed data.
"""
n, p = adata.shape
sparsemode = issparse(adata.X)
if batch_key is not None:
batch = list(adata.obs[batch_key])
batch = convert_vector_to_encoding(batch)
batch = np.asarray(batch)
batch = batch.astype('float32')
else:
batch = np.ones((n,), dtype = 'float32')
norm_by_batch = False
sc.pp.filter_genes(adata, min_counts=1)
sc.pp.filter_cells(adata, min_counts=1)
count = adata.X.copy()
if normalize_samples:
out = sc.pp.normalize_total(adata, inplace = False)
obs_ = adata.obs
var_ = adata.var
adata = None
adata = AnnData(out['X'])
adata.obs = obs_
adata.var = var_
size_factors = out['norm_factor'] / np.median(out['norm_factor'])
out = None
else:
size_factors = np.ones((adata.shape[0], ))
if not log_normalize:
adata_ = adata.copy()
sc.pp.log1p(adata)
if n_high_var is not None:
sc.pp.highly_variable_genes(adata, inplace = True, min_mean = 0.0125, max_mean = 3, min_disp = 0.5,
n_bins = 20, n_top_genes = n_high_var, batch_key = batch_key)
hvg = adata.var['highly_variable'].values
if not log_normalize:
adata = adata_.copy()
else:
hvg = [True] * adata.shape[1]
if normalize_features:
batch_list = np.unique(batch)
if sparsemode:
adata.X = adata.X.toarray()
for batch_ in batch_list:
indices = [x == batch_ for x in batch]
sub_adata = adata[indices]
sc.pp.scale(sub_adata)
adata[indices] = sub_adata.X
adata.layers["normalized input"] = adata.X
adata.X = count
adata.var['Variance Type'] = [['LVG', 'HVG'][int(x)] for x in hvg]
else:
if sparsemode:
adata.layers["normalized input"] = adata.X.toarray()
else:
adata.layers["normalized input"] = adata.X
adata.var['Variance Type'] = [['LVG', 'HVG'][int(x)] for x in hvg]
if n_high_var is not None:
del_keys = ['dispersions', 'dispersions_norm', 'highly_variable', 'highly_variable_intersection', 'highly_variable_nbatches', 'means']
del_keys = [x for x in del_keys if x in adata.var.keys()]
adata.var = adata.var.drop(del_keys, axis = 1)
y = np.unique(batch)
num_batch = len(y)
adata.obs['size factors'] = size_factors.astype('float32')
adata.obs['batch'] = batch
adata.uns['num_batch'] = num_batch
if sparsemode:
adata.X = adata.X.toarray()
if not LVG:
adata = adata[:, adata.var['Variance Type'] == 'HVG']
return adata
def build_dir(dir_path):
""" This function builds a directory if it does not exist.
Arguments:
------------------------------------------------------------------
- dir_path: `str`, The directory to build. E.g. if dir_path = 'folder1/folder2/folder3', then this function will creates directory if folder1 if it does not already exist. Then it creates folder1/folder2 if folder2 does not exist in folder1. Then it creates folder1/folder2/folder3 if folder3 does not exist in folder2.
"""
subdirs = [dir_path]
substring = dir_path
while substring != '':
splt_dir = os.path.split(substring)
substring = splt_dir[0]
subdirs.append(substring)
subdirs.pop()
subdirs = [x for x in subdirs if os.path.basename(x) != '..']
n = len(subdirs)
subdirs = [subdirs[n - 1 - x] for x in range(n)]
for dir_ in subdirs:
if not os.path.isdir(dir_):
os.mkdir(dir_)
def convert_string_to_encoding(string, vector_key):
"""A function to convert a string to a numeric encoding.
Arguments:
------------------------------------------------------------------
- string: `str`, The specific string to convert to a numeric encoding.
- vector_key: `np.ndarray`, Array of all possible values of string.
Returns:
------------------------------------------------------------------
- encoding: `int`, The integer encoding of string.
"""
return np.argwhere(vector_key == string)[0][0]
def convert_vector_to_encoding(vector):
"""A function to convert a vector of strings to a dense numeric encoding.
Arguments:
------------------------------------------------------------------
- vector: `array_like`, The vector of strings to encode.
Returns:
------------------------------------------------------------------
- vector_num: `list`, A list containing the dense numeric encoding.
"""
vector_key = np.unique(vector)
vector_strings = list(vector)
vector_num = [convert_string_to_encoding(string, vector_key) for string in vector_strings]
return vector_num
def find_resolution(adata_, n_clusters, random):
"""A function to find the louvain resolution tjat corresponds to a prespecified number of clusters, if it exists.
Arguments:
------------------------------------------------------------------
- adata_: `anndata.AnnData`, the annotated data matrix of shape (n_obs, n_vars). Rows correspond to cells and columns to low dimension features.
- n_clusters: `int`, Number of clusters.
- random: `int`, The random seed.
Returns:
------------------------------------------------------------------
- resolution: `float`, The resolution that gives n_clusters after running louvain's clustering algorithm.
"""
obtained_clusters = -1
iteration = 0
resolutions = [0., 1000.]
while obtained_clusters != n_clusters and iteration < 50:
current_res = sum(resolutions)/2
adata = sc.tl.louvain(adata_, resolution = current_res, random_state = random, copy = True)
labels = adata.obs['louvain']
obtained_clusters = len(np.unique(labels))
if obtained_clusters < n_clusters:
resolutions[0] = current_res
else:
resolutions[1] = current_res
iteration = iteration + 1
return current_res
``` |
{
"source": "jlakkis/sciPENN",
"score": 3
} |
#### File: sciPENN/Data_Infrastructure/Samplers.py
```python
from numpy import arange, setdiff1d
from numpy.random import choice
class batchSampler:
def __init__(self, indices, train_keys, bsize, shuffle = False):
self.indices = indices
self.train_keys = train_keys
self.bsize = bsize
self.shuffle = shuffle
def __iter__(self):
if self.shuffle:
indices = choice(self.indices, size = len(self.indices), replace = False)
else:
indices = self.indices
minibatch_idx = []
bool_idx = []
for idx in indices:
minibatch_idx.append(idx)
bool_idx.append(sum([int(idx >= x) for x in self.train_keys]))
if len(minibatch_idx) >= self.bsize:
yield minibatch_idx, bool_idx
minibatch_idx, bool_idx = [], []
if minibatch_idx:
yield minibatch_idx, bool_idx
def __len__(self):
return len(self.indices)
def build_trainSamplers(adata, n_train, bsize = 128, val_frac = 0.1):
num_val = round(val_frac * len(adata))
assert num_val >= 1
idx = arange(len(adata))
val_idx = choice(idx, num_val, replace = False)
train_indices, val_indices = setdiff1d(idx, val_idx).tolist(), val_idx.tolist()
train_sampler = batchSampler(train_indices, n_train, bsize, shuffle = True)
val_sampler = batchSampler(val_indices, n_train, bsize)
return train_sampler, val_sampler
def build_testSampler(adata, train_keys, bsize = 128):
indices = list(range(len(adata)))
return batchSampler(indices, train_keys, bsize)
```
#### File: sciPENN/Network/Model.py
```python
from math import log, exp
from numpy import inf, zeros, zeros_like as np_zeros_like, arange, asarray, empty
from pandas import concat
from anndata import AnnData
from torch import cat, no_grad, randn, zeros_like, zeros as torch_zeros, ones, argmax
from torch.nn import Module, Linear, Sequential, RNNCell, Softplus, Parameter, Softmax
from torch.optim import Adam
from torch.optim.lr_scheduler import StepLR
from .Layers import Input_Block, FF_Block, LambdaLayer, Dual_Forward
class sciPENN_Model(Module):
def __init__(self, p_mod1, p_mod2, loss1, loss2, quantiles, categories):
super(sciPENN_Model, self).__init__()
h_size, drop_rate = 512, 0.25
self.RNNCell = RNNCell(h_size, h_size)
self.input_block = Input_Block(p_mod1, h_size, drop_rate, drop_rate)
self.skip_1 = FF_Block(h_size, drop_rate)
self.skip_2 = FF_Block(h_size, drop_rate)
self.skip_3 = FF_Block(h_size, drop_rate)
MSE_output = Linear(h_size, p_mod2)
if len(quantiles) > 0:
quantile_layer = []
quantile_layer.append(Linear(h_size, p_mod2 * len(quantiles)))
quantile_layer.append(LambdaLayer(lambda x: x.view(-1, p_mod2, len(quantiles))))
quantile_layer = Sequential(*quantile_layer)
self.mod2_out = Dual_Forward(MSE_output, quantile_layer)
else:
self.mod2_out = MSE_output
if categories is not None:
self.celltype_out = Sequential(Linear(h_size, len(categories)), Softmax(1))
self.forward = self.forward_transfer
self.categories_arr = empty((len(categories), ), dtype = 'object')
for cat in categories:
self.categories_arr[categories[cat]] = cat
else:
self.forward = self.forward_simple
self.categories_arr = None
self.quantiles = quantiles
self.loss1, self.loss2 = loss1, loss2
def forward_transfer(self, x):
x = self.input_block(x)
h = self.RNNCell(x, zeros_like(x))
x = self.skip_1(x)
h = self.RNNCell(x, h)
x = self.skip_2(x)
h = self.RNNCell(x, h)
x = self.skip_3(x)
h = self.RNNCell(x, h)
return {'celltypes': self.celltype_out(h.detach()), 'modality 2': self.mod2_out(h), 'embedding': h}
def forward_simple(self, x):
x = self.input_block(x)
h = self.RNNCell(x, zeros_like(x))
x = self.skip_1(x)
h = self.RNNCell(x, h)
x = self.skip_2(x)
h = self.RNNCell(x, h)
x = self.skip_3(x)
h = self.RNNCell(x, h)
return {'celltypes': None, 'modality 2': self.mod2_out(h), 'embedding': h}
def train_backprop(self, train_loader, val_loader,
n_epoch = 10000, ES_max = 30, decay_max = 10, decay_step = 0.1, lr = 10**(-3)):
optimizer = Adam(self.parameters(), lr = lr)
scheduler = StepLR(optimizer, step_size = 1, gamma = decay_step)
patience = 0
bestloss = inf
if self.categories_arr is None:
get_correct = lambda x: 0
else:
get_correct = lambda outputs: (argmax(outputs['celltypes'], axis = 1) == celltypes).sum()
for epoch in range(n_epoch):
with no_grad():
running_loss, rtype_acc = 0., 0.
self.eval()
for batch, inputs in enumerate(val_loader):
mod1, mod2, protein_bools, celltypes = inputs
outputs = self(mod1)
n_correct = get_correct(outputs)
mod2_loss = self.loss2(outputs['modality 2'], mod2, protein_bools)
rtype_acc += n_correct
running_loss += mod2_loss.item() * len(mod2)
if self.categories_arr is None:
print(f"Epoch {epoch} prediction loss = {running_loss/len(val_loader):.3f}")
else:
print(f"Epoch {epoch} prediction loss = {running_loss/len(val_loader):.3f}, validation accuracy = {rtype_acc/len(val_loader):.3f}")
patience += 1
if bestloss/1.005 > running_loss:
bestloss, patience = running_loss, 0
if (patience + 1) % decay_max == 0:
scheduler.step()
print(f"Decaying loss to {optimizer.param_groups[0]['lr']}")
if (patience + 1) > ES_max:
break
self.train()
for batch, inputs in enumerate(train_loader):
optimizer.zero_grad()
mod1, mod2, protein_bools, celltypes = inputs
outputs = self(mod1)
mod1_loss = self.loss1(outputs['celltypes'], celltypes)
mod2_loss = self.loss2(outputs['modality 2'], mod2, protein_bools)
loss = mod1_loss + mod2_loss
loss.backward()
optimizer.step()
def impute(self, impute_loader, requested_quantiles, denoise_genes, proteins):
imputed_test = proteins.copy()
for quantile in requested_quantiles:
imputed_test.layers['q' + str(round(100 * quantile))] = np_zeros_like(imputed_test.X)
self.eval()
start = 0
for mod1, bools, celltypes in impute_loader:
end = start + mod1.shape[0]
with no_grad():
outputs = self(mod1)
if len(self.quantiles) > 0:
mod2_impute, mod2_quantile = outputs['modality 2']
else:
mod2_impute = outputs['modality 2']
imputed_test.X[start:end] = self.fill_predicted(imputed_test.X[start:end], mod2_impute, bools)
for quantile in requested_quantiles:
index = [i for i, q in enumerate(self.quantiles) if quantile == q][0]
q_name = 'q' + str(round(100 * quantile))
imputed_test.layers[q_name][start:end] = mod2_quantile[:, : , index].cpu().numpy()
start = end
return imputed_test
def embed(self, impute_loader, test_loader, cells_train, cells_test):
if cells_test is not None:
embedding = AnnData(zeros(shape = (len(cells_train) + len(cells_test), 512)))
embedding.obs = concat((cells_train, cells_test), join = 'inner')
else:
embedding = AnnData(zeros(shape = (len(cells_train), 512)))
embedding.obs = cells_train
self.eval()
start = 0
for mod1, bools, celltypes in impute_loader:
end = start + mod1.shape[0]
outputs = self(mod1)
embedding[start:end] = outputs['embedding'].detach().cpu().numpy()
start = end
if cells_test is not None:
for mod1 in test_loader:
end = start + mod1.shape[0]
outputs = self(mod1)
embedding[start:end] = outputs['embedding'].detach().cpu().numpy()
start = end
return embedding
def fill_predicted(self, array, predicted, bools):
bools = bools.cpu().numpy()
return (1. - bools) * predicted.cpu().numpy() + array
def predict(self, test_loader, requested_quantiles, denoise_genes, proteins, cells):
imputed_test = AnnData(zeros(shape = (len(cells), len(proteins.var))))
imputed_test.obs = cells
imputed_test.var.index = proteins.var.index
if self.categories_arr is not None:
celltypes = ['None'] * len(cells)
for quantile in requested_quantiles:
imputed_test.layers['q' + str(round(100 * quantile))] = np_zeros_like(imputed_test.X)
self.eval()
start = 0
for mod1 in test_loader:
end = start + mod1.shape[0]
with no_grad():
outputs = self(mod1)
if self.categories_arr is not None:
predicted_types = argmax(outputs['celltypes'], axis = 1).cpu().numpy()
celltypes[start:end] = self.categories_arr[predicted_types].tolist()
if len(self.quantiles) > 0:
mod2_impute, mod2_quantile = outputs['modality 2']
else:
mod2_impute = outputs['modality 2']
imputed_test.X[start:end] = mod2_impute.cpu().numpy()
for quantile in requested_quantiles:
index = [i for i, q in enumerate(self.quantiles) if quantile == q][0]
q_name = 'q' + str(round(100 * quantile))
imputed_test.layers[q_name][start:end] = mod2_quantile[:, : , index].cpu().numpy()
start = end
if self.categories_arr is not None:
imputed_test.obs['transfered cell labels'] = celltypes
return imputed_test
``` |
{
"source": "jlalleve-EA/NodeGraphQt",
"score": 2
} |
#### File: NodeGraphQt/widgets/viewer.py
```python
import math
from .dialogs import BaseDialog, FileDialog
from .scene import NodeScene
from .tab_search import TabSearchMenuWidget
from .. import QtGui, QtCore, QtWidgets, QtOpenGL
from ..base.menu import BaseMenu
from ..constants import (IN_PORT, OUT_PORT,
PIPE_LAYOUT_CURVED)
from ..qgraphics.node_abstract import AbstractNodeItem
from ..qgraphics.node_backdrop import BackdropNodeItem
from ..qgraphics.pipe import Pipe, LivePipe
from ..qgraphics.port import PortItem
from ..qgraphics.slicer import SlicerPipe
ZOOM_MIN = -0.95
ZOOM_MAX = 2.0
class NodeViewer(QtWidgets.QGraphicsView):
"""
The widget interface used for displaying the scene and nodes.
functions in this class should mainly be called by the
class:`NodeGraphQt.NodeGraph` class.
"""
# node viewer signals.
# (some of these signals are called by port & node items and connected
# to the node graph slot functions)
moved_nodes = QtCore.Signal(dict)
search_triggered = QtCore.Signal(str, tuple)
connection_sliced = QtCore.Signal(list)
connection_changed = QtCore.Signal(list, list)
insert_node = QtCore.Signal(object, str, dict)
need_show_tab_search = QtCore.Signal()
node_name_changed = QtCore.Signal(str, str)
# pass through signals that are translated into "NodeGraph()" signals.
node_selected = QtCore.Signal(str)
node_selection_changed = QtCore.Signal(list, list)
node_double_clicked = QtCore.Signal(str)
data_dropped = QtCore.Signal(QtCore.QMimeData, QtCore.QPoint)
def __init__(self, parent=None):
super(NodeViewer, self).__init__(parent)
self.setScene(NodeScene(self))
self.setRenderHint(QtGui.QPainter.Antialiasing, True)
self.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.setViewportUpdateMode(QtWidgets.QGraphicsView.FullViewportUpdate)
self.setCacheMode(QtWidgets.QGraphicsView.CacheBackground)
self.setOptimizationFlag(QtWidgets.QGraphicsView.DontAdjustForAntialiasing)
self.setAcceptDrops(True)
self.resize(850, 800)
self._scene_range = QtCore.QRectF(
0, 0, self.size().width(), self.size().height())
self._update_scene()
self._last_size = self.size()
self.editable = True
self._pipe_layout = PIPE_LAYOUT_CURVED
self._detached_port = None
self._start_port = None
self._origin_pos = None
self._previous_pos = QtCore.QPoint(self.width(), self.height())
self._prev_selection_nodes = []
self._prev_selection_pipes = []
self._node_positions = {}
self._rubber_band = QtWidgets.QRubberBand(
QtWidgets.QRubberBand.Rectangle, self
)
self._rubber_band.isActive = False
self._LIVE_PIPE = LivePipe()
self._LIVE_PIPE.setVisible(False)
self.scene().addItem(self._LIVE_PIPE)
self._SLICER_PIPE = SlicerPipe()
self._SLICER_PIPE.setVisible(False)
self.scene().addItem(self._SLICER_PIPE)
self._undo_stack = QtWidgets.QUndoStack(self)
self._search_widget = TabSearchMenuWidget()
self._search_widget.search_submitted.connect(self._on_search_submitted)
# workaround fix for shortcuts from the non-native menu actions
# don't seem to trigger so we create a hidden menu bar.
menu_bar = QtWidgets.QMenuBar(self)
menu_bar.setNativeMenuBar(False)
# shortcuts don't work with "setVisibility(False)".
menu_bar.setMaximumWidth(0)
self._ctx_menu = BaseMenu('NodeGraph', self)
self._ctx_node_menu = BaseMenu('Nodes', self)
menu_bar.addMenu(self._ctx_menu)
menu_bar.addMenu(self._ctx_node_menu)
self._ctx_node_menu.setDisabled(True)
self.acyclic = True
self.LMB_state = False
self.RMB_state = False
self.MMB_state = False
self.ALT_state = False
self.CTRL_state = False
self.SHIFT_state = False
self.COLLIDING_state = False
def __repr__(self):
return '<{}() object at {}>'.format(
self.__class__.__name__, hex(id(self)))
# --- private ---
def _set_viewer_zoom(self, value, sensitivity=None, pos=None):
if pos:
pos = self.mapToScene(pos)
if sensitivity is None:
scale = 1.001 ** value
self.scale(scale, scale, pos)
return
if value == 0.0:
return
scale = (0.9 + sensitivity) if value < 0.0 else (1.1 - sensitivity)
zoom = self.get_zoom()
if ZOOM_MIN >= zoom:
if scale == 0.9:
return
if ZOOM_MAX <= zoom:
if scale == 1.1:
return
self.scale(scale, scale, pos)
def _set_viewer_pan(self, pos_x, pos_y):
speed = self._scene_range.width() * 0.0015
x = -pos_x * speed
y = -pos_y * speed
self._scene_range.adjust(x, y, x, y)
self._update_scene()
def scale(self, sx, sy, pos=None):
scale = [sx, sx]
center = pos or self._scene_range.center()
w = self._scene_range.width() / scale[0]
h = self._scene_range.height() / scale[1]
self._scene_range = QtCore.QRectF(
center.x() - (center.x() - self._scene_range.left()) / scale[0],
center.y() - (center.y() - self._scene_range.top()) / scale[1],
w, h
)
self._update_scene()
def _update_scene(self):
"""
Redraw the scene.
"""
self.setSceneRect(self._scene_range)
self.fitInView(self._scene_range, QtCore.Qt.KeepAspectRatio)
def _combined_rect(self, nodes):
group = self.scene().createItemGroup(nodes)
rect = group.boundingRect()
self.scene().destroyItemGroup(group)
return rect
def _items_near(self, pos, item_type=None, width=20, height=20):
x, y = pos.x() - width, pos.y() - height
rect = QtCore.QRectF(x, y, width, height)
items = []
excl = [self._LIVE_PIPE, self._SLICER_PIPE]
for item in self.scene().items(rect):
if item in excl:
continue
if not item_type or isinstance(item, item_type):
items.append(item)
return items
def _on_search_submitted(self, node_type):
pos = self.mapToScene(self._previous_pos)
self.search_triggered.emit(node_type, (pos.x(), pos.y()))
def _on_pipes_sliced(self, path):
ports = []
for i in self.scene().items(path):
if isinstance(i, Pipe) and i != self._LIVE_PIPE:
if any([i.input_port.locked, i.output_port.locked]):
continue
ports.append([i.input_port, i.output_port])
self.connection_sliced.emit(ports)
# --- reimplemented events ---
def resizeEvent(self, event):
delta = max(self.size().width() / self._last_size.width(),
self.size().height() / self._last_size.height())
self._set_viewer_zoom(delta)
self._last_size = self.size()
super(NodeViewer, self).resizeEvent(event)
def contextMenuEvent(self, event):
self.RMB_state = False
ctx_menu = None
if self._ctx_node_menu.isEnabled():
pos = self.mapToScene(self._previous_pos)
items = self._items_near(pos)
nodes = [i for i in items if isinstance(i, AbstractNodeItem)]
if nodes:
node = nodes[0]
ctx_menu = self._ctx_node_menu.get_menu(node.type_, node.id)
if ctx_menu:
for action in ctx_menu.actions():
if not action.menu():
action.node_id = node.id
ctx_menu = ctx_menu or self._ctx_menu
if len(ctx_menu.actions()) > 0:
if ctx_menu.isEnabled():
ctx_menu.exec_(event.globalPos())
else:
return super(NodeViewer, self).contextMenuEvent(event)
else:
self.need_show_tab_search.emit()
def mousePressEvent(self, event):
if event.button() == QtCore.Qt.LeftButton:
self.LMB_state = True
elif event.button() == QtCore.Qt.RightButton:
self.RMB_state = True
elif event.button() == QtCore.Qt.MiddleButton:
self.MMB_state = True
self._origin_pos = event.pos()
self._previous_pos = event.pos()
self._prev_selection_nodes, self._prev_selection_pipes = \
self.selected_items()
# close tab search
if self._search_widget.isVisible():
self.tab_search_toggle()
# cursor pos.
map_pos = self.mapToScene(event.pos())
# pipe slicer enabled.
if self.ALT_state and self.SHIFT_state and self.LMB_state:
self._SLICER_PIPE.draw_path(map_pos, map_pos)
self._SLICER_PIPE.setVisible(True)
return
# pan mode.
if self.ALT_state:
return
items = self._items_near(map_pos, None, 20, 20)
nodes = [i for i in items if isinstance(i, AbstractNodeItem)]
if nodes:
self.MMB_state = False
# toggle extend node selection.
if self.LMB_state:
if self.SHIFT_state:
for node in nodes:
node.selected = not node.selected
elif self.CTRL_state:
for node in nodes:
node.selected = False
# update the recorded node positions.
self._node_positions.update(
{n: n.xy_pos for n in self.selected_nodes()}
)
# show selection selection marquee.
if self.LMB_state and not items:
rect = QtCore.QRect(self._previous_pos, QtCore.QSize())
rect = rect.normalized()
map_rect = self.mapToScene(rect).boundingRect()
self.scene().update(map_rect)
self._rubber_band.setGeometry(rect)
self._rubber_band.isActive = True
if self.LMB_state and (self.SHIFT_state or self.CTRL_state):
return
if not self._LIVE_PIPE.isVisible():
super(NodeViewer, self).mousePressEvent(event)
def mouseReleaseEvent(self, event):
if event.button() == QtCore.Qt.LeftButton:
self.LMB_state = False
elif event.button() == QtCore.Qt.RightButton:
self.RMB_state = False
elif event.button() == QtCore.Qt.MiddleButton:
self.MMB_state = False
# hide pipe slicer.
if self._SLICER_PIPE.isVisible():
self._on_pipes_sliced(self._SLICER_PIPE.path())
p = QtCore.QPointF(0.0, 0.0)
self._SLICER_PIPE.draw_path(p, p)
self._SLICER_PIPE.setVisible(False)
# hide selection marquee
if self._rubber_band.isActive:
self._rubber_band.isActive = False
if self._rubber_band.isVisible():
rect = self._rubber_band.rect()
map_rect = self.mapToScene(rect).boundingRect()
self._rubber_band.hide()
rect = QtCore.QRect(self._origin_pos, event.pos()).normalized()
rect_items = self.scene().items(
self.mapToScene(rect).boundingRect()
)
node_ids = []
for item in rect_items:
if isinstance(item, AbstractNodeItem):
node_ids.append(item.id)
# emit the node selection signals.
if node_ids:
prev_ids = [
n.id for n in self._prev_selection_nodes
if not n.selected
]
self.node_selected.emit(node_ids[0])
self.node_selection_changed.emit(node_ids, prev_ids)
self.scene().update(map_rect)
return
# find position changed nodes and emit signal.
moved_nodes = {
n: xy_pos for n, xy_pos in self._node_positions.items()
if n.xy_pos != xy_pos
}
# only emit of node is not colliding with a pipe.
if moved_nodes and not self.COLLIDING_state:
self.moved_nodes.emit(moved_nodes)
# reset recorded positions.
self._node_positions = {}
# emit signal if selected node collides with pipe.
# Note: if collide state is true then only 1 node is selected.
nodes, pipes = self.selected_items()
if self.COLLIDING_state:
if nodes and pipes:
self.insert_node.emit(pipes[0], nodes[0].id, moved_nodes)
# emit node selection changed signal.
prev_ids = [n.id for n in self._prev_selection_nodes if not n.selected]
node_ids = [n.id for n in nodes if n not in self._prev_selection_nodes]
self.node_selection_changed.emit(node_ids, prev_ids)
super(NodeViewer, self).mouseReleaseEvent(event)
def mouseMoveEvent(self, event):
if self.ALT_state and self.SHIFT_state:
if self.LMB_state and self._SLICER_PIPE.isVisible():
p1 = self._SLICER_PIPE.path().pointAtPercent(0)
p2 = self.mapToScene(self._previous_pos)
self._SLICER_PIPE.draw_path(p1, p2)
self._SLICER_PIPE.show()
self._previous_pos = event.pos()
super(NodeViewer, self).mouseMoveEvent(event)
return
if self.MMB_state and self.ALT_state:
pos_x = (event.x() - self._previous_pos.x())
zoom = 0.1 if pos_x > 0 else -0.1
self._set_viewer_zoom(zoom, 0.05, pos=event.pos())
elif self.MMB_state or (self.LMB_state and self.ALT_state):
pos_x = (event.x() - self._previous_pos.x())
pos_y = (event.y() - self._previous_pos.y())
self._set_viewer_pan(pos_x, pos_y)
if self.LMB_state and self._rubber_band.isActive:
rect = QtCore.QRect(self._origin_pos, event.pos()).normalized()
# if the rubber band is too small, do not show it.
if max(rect.width(), rect.height()) > 5:
if not self._rubber_band.isVisible():
self._rubber_band.show()
map_rect = self.mapToScene(rect).boundingRect()
path = QtGui.QPainterPath()
path.addRect(map_rect)
self._rubber_band.setGeometry(rect)
self.scene().setSelectionArea(path, QtCore.Qt.IntersectsItemShape)
self.scene().update(map_rect)
if self.SHIFT_state or self.CTRL_state:
nodes, pipes = self.selected_items()
for pipe in self._prev_selection_pipes:
pipe.setSelected(True)
for node in self._prev_selection_nodes:
node.selected = True
if self.CTRL_state:
for pipe in pipes:
pipe.setSelected(False)
for node in nodes:
node.selected = False
elif self.LMB_state:
self.COLLIDING_state = False
nodes = self.selected_nodes()
if len(nodes) == 1:
node = nodes[0]
for pipe in self.selected_pipes():
pipe.setSelected(False)
for item in node.collidingItems():
if isinstance(item, Pipe) and item.isVisible():
if not item.input_port:
continue
if not item.input_port.node is node and \
not item.output_port.node is node:
item.setSelected(True)
self.COLLIDING_state = True
break
self._previous_pos = event.pos()
super(NodeViewer, self).mouseMoveEvent(event)
def wheelEvent(self, event):
try:
delta = event.delta()
except AttributeError:
# For PyQt5
delta = event.angleDelta().y()
if delta == 0:
delta = event.angleDelta().x()
self._set_viewer_zoom(delta, pos=event.pos())
def dropEvent(self, event):
pos = self.mapToScene(event.pos())
event.setDropAction(QtCore.Qt.MoveAction)
self.data_dropped.emit(
event.mimeData(), QtCore.QPoint(pos.x(), pos.y()))
def dragEnterEvent(self, event):
if event.mimeData().hasFormat('text/plain'):
event.accept()
else:
event.ignore()
def dragMoveEvent(self, event):
if event.mimeData().hasFormat('text/plain'):
event.accept()
else:
event.ignore()
def dragLeaveEvent(self, event):
event.ignore()
def keyPressEvent(self, event):
"""
Key press event re-implemented to update the states for attributes:
- ALT_state
- CTRL_state
- SHIFT_state
Args:
event (QtGui.QKeyEvent): key event.
"""
self.ALT_state = event.modifiers() == QtCore.Qt.AltModifier
self.CTRL_state = event.modifiers() == QtCore.Qt.ControlModifier
self.SHIFT_state = event.modifiers() == QtCore.Qt.ShiftModifier
# Todo: find a better solution to catch modifier keys.
if event.modifiers() == (QtCore.Qt.AltModifier | QtCore.Qt.ShiftModifier):
self.ALT_state = True
self.SHIFT_state = True
super(NodeViewer, self).keyPressEvent(event)
def keyReleaseEvent(self, event):
"""
Key release event re-implemented to update the states for attributes:
- ALT_state
- CTRL_state
- SHIFT_state
Args:
event (QtGui.QKeyEvent): key event.
"""
self.ALT_state = event.modifiers() == QtCore.Qt.AltModifier
self.CTRL_state = event.modifiers() == QtCore.Qt.ControlModifier
self.SHIFT_state = event.modifiers() == QtCore.Qt.ShiftModifier
super(NodeViewer, self).keyReleaseEvent(event)
# --- scene events ---
def sceneMouseMoveEvent(self, event):
"""
triggered mouse move event for the scene.
- redraw the connection pipe.
Args:
event (QtWidgets.QGraphicsSceneMouseEvent):
The event handler from the QtWidgets.QGraphicsScene
"""
if not self._LIVE_PIPE.isVisible():
return
if not self._start_port:
return
pos = event.scenePos()
items = self.scene().items(pos)
if items and isinstance(items[0], PortItem):
x = items[0].boundingRect().width() / 2
y = items[0].boundingRect().height() / 2
pos = items[0].scenePos()
pos.setX(pos.x() + x)
pos.setY(pos.y() + y)
self._LIVE_PIPE.draw_path(self._start_port, cursor_pos=pos)
def sceneMousePressEvent(self, event):
"""
triggered mouse press event for the scene (takes priority over viewer event).
- detect selected pipe and start connection.
- remap Shift and Ctrl modifier.
Args:
event (QtWidgets.QGraphicsScenePressEvent):
The event handler from the QtWidgets.QGraphicsScene
"""
# pipe slicer enabled.
if self.ALT_state and self.SHIFT_state:
return
# viewer pan mode.
if self.ALT_state:
return
if self._LIVE_PIPE.isVisible():
self.apply_live_connection(event)
return
pos = event.scenePos()
port_items = self._items_near(pos, PortItem, 5, 5)
if port_items and self.editable:
port = port_items[0]
if port.locked:
return
if not port.multi_connection and port.connected_ports:
self._detached_port = port.connected_ports[0]
self.start_live_connection(port)
if not port.multi_connection:
[p.delete() for p in port.connected_pipes]
return
node_items = self._items_near(pos, AbstractNodeItem, 3, 3)
if node_items:
node = node_items[0]
# record the node positions at selection time.
for n in node_items:
self._node_positions[n] = n.xy_pos
# emit selected node id with LMB.
if event.button() == QtCore.Qt.LeftButton:
self.node_selected.emit(node.id)
if not isinstance(node, BackdropNodeItem):
return
pipe_items = self._items_near(pos, Pipe, 3, 3)
if pipe_items and self.editable:
if not self.LMB_state:
return
pipe = pipe_items[0]
from_port = pipe.port_from_pos(pos, True)
if from_port.locked:
return
from_port.hovered = True
attr = {IN_PORT: 'output_port', OUT_PORT: 'input_port'}
self._detached_port = getattr(pipe, attr[from_port.port_type])
self.start_live_connection(from_port)
self._LIVE_PIPE.draw_path(self._start_port, cursor_pos=pos)
if self.SHIFT_state:
self._LIVE_PIPE.shift_selected = True
return
pipe.delete()
def sceneMouseReleaseEvent(self, event):
"""
triggered mouse release event for the scene.
Args:
event (QtWidgets.QGraphicsSceneMouseEvent):
The event handler from the QtWidgets.QGraphicsScene
"""
if event.button() != QtCore.Qt.MiddleButton:
self.apply_live_connection(event)
# --- port connections ---
def apply_live_connection(self, event):
"""
triggered mouse press/release event for the scene.
- verifies the live connection pipe.
- makes a connection pipe if valid.
- emits the "connection changed" signal.
Args:
event (QtWidgets.QGraphicsSceneMouseEvent):
The event handler from the QtWidgets.QGraphicsScene
"""
if not self._LIVE_PIPE.isVisible():
return
self._start_port.hovered = False
# find the end port.
end_port = None
for item in self.scene().items(event.scenePos()):
if isinstance(item, PortItem):
end_port = item
break
connected = []
disconnected = []
# if port disconnected from existing pipe.
if end_port is None:
if self._detached_port and not self._LIVE_PIPE.shift_selected:
dist = math.hypot(self._previous_pos.x() - self._origin_pos.x(),
self._previous_pos.y() - self._origin_pos.y())
if dist <= 2.0: # cursor pos threshold.
self.establish_connection(self._start_port,
self._detached_port)
self._detached_port = None
else:
disconnected.append((self._start_port, self._detached_port))
self.connection_changed.emit(disconnected, connected)
self._detached_port = None
self.end_live_connection()
return
else:
if self._start_port is end_port:
return
# restore connection check.
restore_connection = any([
# if the end port is locked.
end_port.locked,
# if same port type.
end_port.port_type == self._start_port.port_type,
# if connection to itself.
end_port.node == self._start_port.node,
# if end port is the start port.
end_port == self._start_port,
# if detached port is the end port.
self._detached_port == end_port
])
if restore_connection:
if self._detached_port:
to_port = self._detached_port or end_port
self.establish_connection(self._start_port, to_port)
self._detached_port = None
self.end_live_connection()
return
# end connection if starting port is already connected.
if self._start_port.multi_connection and \
self._start_port in end_port.connected_ports:
self._detached_port = None
self.end_live_connection()
return
# register as disconnected if not acyclic.
if self.acyclic and not self.acyclic_check(self._start_port, end_port):
if self._detached_port:
disconnected.append((self._start_port, self._detached_port))
self.connection_changed.emit(disconnected, connected)
self._detached_port = None
self.end_live_connection()
return
# make connection.
if not end_port.multi_connection and end_port.connected_ports:
dettached_end = end_port.connected_ports[0]
disconnected.append((end_port, dettached_end))
if self._detached_port:
disconnected.append((self._start_port, self._detached_port))
connected.append((self._start_port, end_port))
self.connection_changed.emit(disconnected, connected)
self._detached_port = None
self.end_live_connection()
def start_live_connection(self, selected_port):
"""
create new pipe for the connection.
(show the live pipe visibility from the port following the cursor position)
"""
if not selected_port:
return
self._start_port = selected_port
if self._start_port.type == IN_PORT:
self._LIVE_PIPE.input_port = self._start_port
elif self._start_port == OUT_PORT:
self._LIVE_PIPE.output_port = self._start_port
self._LIVE_PIPE.setVisible(True)
def end_live_connection(self):
"""
delete live connection pipe and reset start port.
(hides the pipe item used for drawing the live connection)
"""
self._LIVE_PIPE.reset_path()
self._LIVE_PIPE.setVisible(False)
self._LIVE_PIPE.shift_selected = False
self._start_port = None
def establish_connection(self, start_port, end_port):
"""
establish a new pipe connection.
(adds a new pipe item to draw between 2 ports)
"""
if not self.editable:
return
pipe = Pipe()
self.scene().addItem(pipe)
pipe.set_connections(start_port, end_port)
pipe.draw_path(pipe.input_port, pipe.output_port)
if start_port.node.selected or end_port.node.selected:
pipe.highlight()
if not start_port.node.visible or not end_port.node.visible:
pipe.hide()
@staticmethod
def acyclic_check(start_port, end_port):
"""
Validate the node connections so it doesn't loop itself.
Args:
start_port (PortItem): port item.
end_port (PortItem): port item.
Returns:
bool: True if port connection is valid.
"""
start_node = start_port.node
check_nodes = [end_port.node]
io_types = {IN_PORT: 'outputs', OUT_PORT: 'inputs'}
while check_nodes:
check_node = check_nodes.pop(0)
for check_port in getattr(check_node, io_types[end_port.port_type]):
if check_port.connected_ports:
for port in check_port.connected_ports:
if port.node != start_node:
check_nodes.append(port.node)
else:
return False
return True
# --- viewer ---
def tab_search_set_nodes(self, nodes):
self._search_widget.set_nodes(nodes)
def tab_search_toggle(self):
if isinstance(self._search_widget, TabSearchMenuWidget):
return
pos = self._previous_pos
state = not self._search_widget.isVisible()
if state:
rect = self._search_widget.rect()
new_pos = QtCore.QPoint(int(pos.x() - rect.width() / 2),
int(pos.y() - rect.height() / 2))
self._search_widget.move(new_pos)
self._search_widget.setVisible(state)
rect = self.mapToScene(rect).boundingRect()
self.scene().update(rect)
else:
self._search_widget.setVisible(state)
self.clearFocus()
def rebuild_tab_search(self):
if isinstance(self._search_widget, TabSearchMenuWidget):
self._search_widget.rebuild = True
def context_menus(self):
return {'graph': self._ctx_menu,
'nodes': self._ctx_node_menu}
@staticmethod
def question_dialog(text, title='Node Graph'):
"""
Prompt node viewer question dialog widget with "yes", "no" buttons.
Args:
text (str): dialog text.
title (str): dialog window title.
Returns:
bool: true if user click yes.
"""
return BaseDialog.question_dialog(text, title)
@staticmethod
def message_dialog(text, title='Node Graph'):
"""
Prompt node viewer message dialog widget with "ok" button.
Args:
text (str): dialog text.
title (str): dialog window title.
"""
BaseDialog.message_dialog(text, title)
def load_dialog(self, current_dir=None, ext=None):
"""
Prompt node viewer file load dialog widget.
Args:
current_dir (str): directory path starting point. (optional)
ext (str): custom file extension filter type. (optional)
Returns:
str: selected file path.
"""
ext = '*{} '.format(ext) if ext else ''
ext_filter = ';;'.join([
'Node Graph ({}*json)'.format(ext), 'All Files (*)'
])
file_dlg = FileDialog.getOpenFileName(
self, 'Open File', current_dir, ext_filter)
file = file_dlg[0] or None
return file
def save_dialog(self, current_dir=None, ext=None):
"""
Prompt node viewer file save dialog widget.
Args:
current_dir (str): directory path starting point. (optional)
ext (str): custom file extension filter type. (optional)
Returns:
str: selected file path.
"""
ext_label = '*{} '.format(ext) if ext else ''
ext_type = '.{}'.format(ext) if ext else '.json'
ext_map = {'Node Graph ({}*json)'.format(ext_label): ext_type,
'All Files (*)': ''}
file_dlg = FileDialog.getSaveFileName(
self, 'Save Session', current_dir, ';;'.join(ext_map.keys()))
file_path = file_dlg[0]
if not file_path:
return
ext = ext_map[file_dlg[1]]
if ext and not file_path.endswith(ext):
file_path += ext
return file_path
def all_pipes(self):
"""
Returns all pipe qgraphic items.
Returns:
list[Pipe]: instances of pipe items.
"""
excl = [self._LIVE_PIPE, self._SLICER_PIPE]
return [i for i in self.scene().items()
if isinstance(i, Pipe) and i not in excl]
def all_nodes(self):
"""
Returns all node qgraphic items.
Returns:
list[AbstractNodeItem]: instances of node items.
"""
return [i for i in self.scene().items()
if isinstance(i, AbstractNodeItem)]
def selected_nodes(self):
"""
Returns selected node qgraphic items.
Returns:
list[AbstractNodeItem]: instances of node items.
"""
nodes = [item for item in self.scene().selectedItems()
if isinstance(item, AbstractNodeItem)]
return nodes
def selected_pipes(self):
"""
Returns selected pipe qgraphic items.
Returns:
list[Pipe]: pipe items.
"""
pipes = [item for item in self.scene().selectedItems()
if isinstance(item, Pipe)]
return pipes
def selected_items(self):
"""
Return selected graphic items in the scene.
Returns:
tuple(list[AbstractNodeItem], list[Pipe]):
selected (node items, pipe items).
"""
nodes = []
pipes = []
for item in self.scene().selectedItems():
if isinstance(item, AbstractNodeItem):
nodes.append(item)
elif isinstance(item, Pipe):
pipes.append(item)
return nodes, pipes
def add_node(self, node, pos=None):
"""
Add node item into the scene.
Args:
node (AbstractNodeItem): node item instance.
pos (tuple or list): node scene position.
"""
pos = pos or (self._previous_pos.x(), self._previous_pos.y())
node.pre_init(self, pos)
self.scene().addItem(node)
node.post_init(self, pos)
@staticmethod
def remove_node(node):
"""
Remove node item from the scene.
Args:
node (AbstractNodeItem): node item instance.
"""
if isinstance(node, AbstractNodeItem):
node.delete()
def move_nodes(self, nodes, pos=None, offset=None):
"""
Globally move specified nodes.
Args:
nodes (list[AbstractNodeItem]): node items.
pos (tuple or list): custom x, y position.
offset (tuple or list): x, y position offset.
"""
group = self.scene().createItemGroup(nodes)
group_rect = group.boundingRect()
if pos:
x, y = pos
else:
pos = self.mapToScene(self._previous_pos)
x = pos.x() - group_rect.center().x()
y = pos.y() - group_rect.center().y()
if offset:
x += offset[0]
y += offset[1]
group.setPos(x, y)
self.scene().destroyItemGroup(group)
def get_pipes_from_nodes(self, nodes=None):
nodes = nodes or self.selected_nodes()
if not nodes:
return
pipes = []
for node in nodes:
n_inputs = node.inputs if hasattr(node, 'inputs') else []
n_outputs = node.outputs if hasattr(node, 'outputs') else []
for port in n_inputs:
for pipe in port.connected_pipes:
connected_node = pipe.output_port.node
if connected_node in nodes:
pipes.append(pipe)
for port in n_outputs:
for pipe in port.connected_pipes:
connected_node = pipe.input_port.node
if connected_node in nodes:
pipes.append(pipe)
return pipes
def center_selection(self, nodes=None):
if not nodes:
if self.selected_nodes():
nodes = self.selected_nodes()
elif self.all_nodes():
nodes = self.all_nodes()
if len(nodes) == 1:
self.centerOn(nodes[0])
else:
rect = self._combined_rect(nodes)
self.centerOn(rect.center().x(), rect.center().y())
def get_pipe_layout(self):
"""
Returns the pipe layout mode.
Returns:
int: pipe layout mode.
"""
return self._pipe_layout
def set_pipe_layout(self, layout):
"""
Sets the pipe layout mode and redraw all pipe items in the scene.
Args:
layout (int): pipe layout mode. (see the contants module)
"""
self._pipe_layout = layout
for pipe in self.all_pipes():
pipe.draw_path(pipe.input_port, pipe.output_port)
def reset_zoom(self, cent=None):
"""
Reset the viewer zoom level.
Args:
cent (QtCore.QPoint): specified center.
"""
self._scene_range = QtCore.QRectF(0, 0,
self.size().width(),
self.size().height())
if cent:
self._scene_range.translate(cent - self._scene_range.center())
self._update_scene()
def get_zoom(self):
"""
Returns the viewer zoom level.
Returns:
float: zoom level.
"""
transform = self.transform()
cur_scale = (transform.m11(), transform.m22())
return float('{:0.2f}'.format(cur_scale[0] - 1.0))
def set_zoom(self, value=0.0):
"""
Set the viewer zoom level.
Args:
value (float): zoom level
"""
if value == 0.0:
self.reset_zoom()
return
zoom = self.get_zoom()
if zoom < 0.0:
if not (ZOOM_MIN <= zoom <= ZOOM_MAX):
return
else:
if not (ZOOM_MIN <= value <= ZOOM_MAX):
return
value = value - zoom
self._set_viewer_zoom(value, 0.0)
def zoom_to_nodes(self, nodes):
self._scene_range = self._combined_rect(nodes)
self._update_scene()
if self.get_zoom() > 0.1:
self.reset_zoom(self._scene_range.center())
def force_update(self):
self._update_scene()
def scene_rect(self):
return [self._scene_range.x(), self._scene_range.y(),
self._scene_range.width(), self._scene_range.height()]
def scene_center(self):
"""
Get the center x,y pos from the scene.
Returns:
list[float]: x, y position.
"""
cent = self._scene_range.center()
return [cent.x(), cent.y()]
def nodes_rect_center(self, nodes):
"""
Get the center x,y pos from the specified nodes.
Args:
nodes (list[AbstractNodeItem]): list of node qgrphics items.
Returns:
list[float]: x, y position.
"""
cent = self._combined_rect(nodes).center()
return [cent.x(), cent.y()]
def set_scene_rect(self, rect):
self._scene_range = QtCore.QRectF(*rect)
self._update_scene()
def clear_key_state(self):
self.CTRL_state = False
self.SHIFT_state = False
self.ALT_state = False
def use_OpenGL(self):
format = QtOpenGL.QGLFormat(QtOpenGL.QGL.SampleBuffers)
format.setSamples(4)
self.setViewport(QtOpenGL.QGLWidget(format))
``` |
{
"source": "jlalvis/VAE_SGD",
"score": 2
} |
#### File: VAE_SGD/SGAN/generator.py
```python
import torch
import torch.nn as nn
class Generator(nn.Module):
def __init__(self, gpath, cuda=True):
super(Generator, self).__init__()
nc = 1
nz = 1
ngf = 64
gfs = 5
self.main = nn.Sequential(
nn.ConvTranspose2d( nz, ngf * 8, gfs, 2, gfs//2, bias=False),
nn.ReLU(True),
nn.InstanceNorm2d(ngf * 8),
nn.ConvTranspose2d(ngf * 8, ngf * 4, gfs, 2, gfs//2, bias=False),
nn.ReLU(True),
nn.InstanceNorm2d(ngf * 4),
nn.ConvTranspose2d(ngf * 4, ngf * 2, gfs, 2, gfs//2, bias=False),
nn.ReLU(True),
nn.InstanceNorm2d(ngf * 2),
nn.ConvTranspose2d(ngf * 2, ngf, gfs, 2, gfs//2, bias=False),
nn.ReLU(True),
nn.InstanceNorm2d(ngf),
nn.ConvTranspose2d( ngf, nc, gfs, 2, 2, bias=False),
nn.ReLU(True),
# Do some dilations #
nn.ConvTranspose2d( nc, ngf, gfs, 1, 6, output_padding=0,bias=False,dilation=3),
nn.ReLU(True),
nn.InstanceNorm2d(ngf),
nn.ConvTranspose2d( ngf, nc, gfs, 1, 10, output_padding=0, bias=False,dilation=5),
nn.Tanh()
)
if cuda:
self.load_state_dict(torch.load(gpath))
else:
self.load_state_dict(torch.load(gpath,
map_location=lambda storage,
loc: storage))
def forward(self, input):
output = self.main(input)
return output
``` |
{
"source": "jlam609/spindrift",
"score": 2
} |
#### File: lib/spindrift/wsgi.py
```python
import io
import sys
import urllib
from werkzeug.wrappers import Response
from werkzeug.wsgi import ClosingIterator
def handler(app, event, context):
print(event)
print(context)
environ = create_wsgi_environ(event)
# override some lambda specifics
environ["HTTPS"] = "on"
environ["wsgi.url_scheme"] = "https"
environ["lambda.event"] = event
environ["lambda.context"] = context
# create a response
response = Response.from_app(app, environ)
# create the object we're going to send back to api gateway
ret = {}
# populate the body
ret["body"] = response.get_data(as_text=True) # XXX: binary support...
ret["isBase64Encoded"] = False
# add in a status code
ret["statusCode"] = response.status_code
# add in headers
ret["headers"] = {}
for header, value in response.headers:
ret["headers"][header] = value
# boom.
return ret
def create_wsgi_environ(event):
# see https://www.python.org/dev/peps/pep-0333/
# determine GET, POST, etc.
method = event["httpMethod"]
# determine the script name
script_name = "" # XXX: this shouldn't always be root
# decode the path being request
path = event["path"]
path = urllib.parse.unquote_plus(path)
# format the query string
query = event["queryStringParameters"]
query_string = ""
if query:
query_string = urllib.parse.urlencode(query)
# server name should be configurable?
server_name = "spindrift"
# fixup headers
headers = event["headers"] or {}
for header in list(headers.keys()):
canonical = header.title()
if header != canonical:
headers[canonical] = headers.pop(header)
# XXX: do we trust this?
server_port = headers.get("X-Forwarded-Port", "80")
# determine the remote address
x_forwarded_for = headers.get("X-Forwarded-For", "")
remote_addr = "127.0.0.1"
if "," in x_forwarded_for:
remotes = x_forwarded_for.split(",")
remotes = [r.strip() for r in remotes]
# last address is the load balancer, second from last is the actual
# address
if len(remotes) >= 2:
remote_addr = remotes[-2]
# XXX: do we trust this? isn't it always https?
wsgi_url_scheme = headers.get("X-Forwarded-Proto", "http"),
# retrieve the body and encode it
body = event["body"]
if isinstance(body, str):
body = body.encode("utf-8")
# setup initial environ dict
environ = {
"REQUEST_METHOD": method,
"SCRIPT_NAME": script_name,
"PATH_INFO": path,
"QUERY_STRING": query_string,
"SERVER_NAME": server_name,
"SERVER_PORT": server_port,
"SERVER_PROTOCOL": "HTTP/1.1",
"REMOTE_ADDR": remote_addr,
"wsgi.version": (1, 0),
"wsgi.url_scheme": wsgi_url_scheme,
"wsgi.input": io.BytesIO(body),
"wsgi.errors": sys.stderr, # XXX: this should be a logger.
"wsgi.multithread": False,
"wsgi.multiprocess": False,
"wsgi.run_once": False,
}
# get content-type from headers
content_type = headers.get("Content-Type")
if content_type is not None:
environ["CONTENT_TYPE"] = content_type
# determine content-length from the body of request
environ["CONTENT_LENGTH"] = 0
if body:
environ["CONTENT_LENGTH"] = len(body)
# apply all HTTP_* headers into the environ
for header, value in headers.items():
key_name = header.replace("-", "_")
key_name = key_name.upper()
key_name = "HTTP_" + key_name
environ[key_name] = value
# send back our completed environ
return environ
class SpindriftMiddleware(object):
def __init__(self, application):
self.application = application
def __call__(self, environ, start_response):
return ClosingIterator(self.application(environ, start_response))
``` |
{
"source": "jlambert121/beaver",
"score": 2
} |
#### File: beaver/transports/rabbitmq_transport.py
```python
from Queue import Queue
import pika
import ssl
from threading import Thread
import time
from beaver.transports.base_transport import BaseTransport
from beaver.transports.exception import TransportException
class RabbitmqTransport(BaseTransport):
def __init__(self, beaver_config, logger=None):
super(RabbitmqTransport, self).__init__(beaver_config, logger=logger)
self._rabbitmq_config = {}
config_to_store = [
'key', 'exchange', 'username', 'password', 'host', 'port', 'vhost',
'queue', 'queue_durable', 'ha_queue', 'exchange_type', 'exchange_durable',
'ssl', 'ssl_key', 'ssl_cert', 'ssl_cacert', 'timeout', 'delivery_mode'
]
for key in config_to_store:
self._rabbitmq_config[key] = beaver_config.get('rabbitmq_' + key)
self._connection = None
self._channel = None
self._count = 0
self._lines = Queue()
self._connect()
def _on_connection_open(self,connection):
self._logger.debug("connection created")
self._channel = connection.channel(self._on_channel_open)
def _on_channel_open(self,unused):
self._logger.debug("Channel Created")
self._channel.exchange_declare(self._on_exchange_declareok,
exchange=self._rabbitmq_config['exchange'],
exchange_type=self._rabbitmq_config['exchange_type'],
durable=self._rabbitmq_config['exchange_durable'])
def _on_exchange_declareok(self,unused):
self._logger.debug("Exchange Declared")
self._channel.queue_declare(self._on_queue_declareok,
queue=self._rabbitmq_config['queue'],
durable=self._rabbitmq_config['queue_durable'],
arguments={'x-ha-policy': 'all'} if self._rabbitmq_config['ha_queue'] else {})
def _on_queue_declareok(self,unused):
self._logger.debug("Queue Declared")
self._channel.queue_bind(self._on_bindok,
exchange=self._rabbitmq_config['exchange'],
queue=self._rabbitmq_config['queue'],
routing_key=self._rabbitmq_config['key'])
def _on_bindok(self,unused):
self._logger.debug("Exchange to Queue Bind OK")
self._is_valid = True;
self._logger.debug("Scheduling next message for %0.1f seconds",1)
self._connection.add_timeout(1,self._publish_message)
def _publish_message(self):
while True:
self._count += 0
if self._lines.not_empty:
line = self._lines.get()
if self._count == 10000:
self._logger.debug("RabbitMQ transport queue size: %s" % (self._lines.qsize(), ))
self._count = 0
else:
self._count += 1
self._channel.basic_publish(
exchange=self._rabbitmq_config['exchange'],
routing_key=self._rabbitmq_config['key'],
body=line,
properties=pika.BasicProperties(
content_type='text/json',
delivery_mode=self._rabbitmq_config['delivery_mode']
))
else:
self._logger.debug("RabbitMQ transport queue is empty, sleeping for 1 second.")
time.sleep(1)
def _on_connection_open_error(self,non_used_connection=None,error=None):
self._logger.debug("connection open error")
if not error==None:
self._logger.error(error)
def _on_connection_closed(self, connection, reply_code, reply_text):
self._channel = None
if self._connection._closing:
try:
self._connection.ioloop.stop()
except:
pass
else:
self._logger.warning('RabbitMQ Connection closed, reopening in 1 seconds: (%s) %s',
reply_code, reply_text)
self._connection.add_timeout(1, self.reconnect)
def reconnect(self):
try:
self._connection.ioloop.stop()
except:
pass
self._connection_start()
def _connection_start(self):
self._logger.debug("Creating Connection")
try:
self._connection = pika.adapters.SelectConnection(parameters=self._parameters,on_open_callback=self._on_connection_open,on_open_error_callback=self._on_connection_open_error,on_close_callback=self._on_connection_closed,stop_ioloop_on_close=False)
except Exception,e:
self._logger.error("Failed Creating RabbitMQ connection")
self._logger.error(e)
self._logger.debug("Starting ioloop")
self._connection.ioloop.start()
def _connect(self):
# Setup RabbitMQ connection
credentials = pika.PlainCredentials(
self._rabbitmq_config['username'],
self._rabbitmq_config['password']
)
ssl_options = {
'keyfile': self._rabbitmq_config['ssl_key'],
'certfile': self._rabbitmq_config['ssl_cert'],
'ca_certs': self._rabbitmq_config['ssl_cacert'],
'ssl_version': ssl.PROTOCOL_TLSv1
}
self._parameters = pika.connection.ConnectionParameters(
credentials=credentials,
host=self._rabbitmq_config['host'],
port=self._rabbitmq_config['port'],
ssl=self._rabbitmq_config['ssl'],
ssl_options=ssl_options,
virtual_host=self._rabbitmq_config['vhost'],
socket_timeout=self._rabbitmq_config['timeout']
)
Thread(target=self._connection_start).start()
def callback(self, filename, lines, **kwargs):
timestamp = self.get_timestamp(**kwargs)
if kwargs.get('timestamp', False):
del kwargs['timestamp']
for line in lines:
try:
import warnings
with warnings.catch_warnings():
warnings.simplefilter('error')
body = self.format(filename, line, timestamp, **kwargs)
self._lines.put(body)
except UserWarning:
self._is_valid = False
raise TransportException('Connection appears to have been lost')
except Exception as e:
self._is_valid = False
try:
raise TransportException(e.strerror)
except AttributeError:
raise TransportException('Unspecified exception encountered') # TRAP ALL THE THINGS!
def interrupt(self):
if self._connection:
self._connection.close()
def unhandled(self):
return True
```
#### File: beaver/transports/sqs_transport.py
```python
import boto.sqs
import uuid
from boto.sqs.message import Message
from beaver.transports.base_transport import BaseTransport
from beaver.transports.exception import TransportException
class SqsTransport(BaseTransport):
def __init__(self, beaver_config, logger=None):
super(SqsTransport, self).__init__(beaver_config, logger=logger)
self._access_key = beaver_config.get('sqs_aws_access_key')
self._secret_key = beaver_config.get('sqs_aws_secret_key')
self._profile = beaver_config.get('sqs_aws_profile_name')
self._region = beaver_config.get('sqs_aws_region')
self._queue_owner_acct_id = beaver_config.get('sqs_aws_queue_owner_acct_id')
self._queues = beaver_config.get('sqs_aws_queue').split(',')
try:
if self._profile:
self._connection = boto.sqs.connect_to_region(self._region,
profile_name=self._profile)
elif self._access_key is None and self._secret_key is None:
self._connection = boto.sqs.connect_to_region(self._region)
else:
self._connection = boto.sqs.connect_to_region(self._region,
aws_access_key_id=self._access_key,
aws_secret_access_key=self._secret_key)
if self._connection is None:
self._logger.warn('Unable to connect to AWS - check your AWS credentials')
raise TransportException('Unable to connect to AWS - check your AWS credentials')
self._queue = {}
for queue in self._queues:
self._logger.debug('Attempting to load SQS queue: {}'.format(queue))
if self._queue_owner_acct_id is None:
self._queue[queue] = self._connection.get_queue(queue)
else:
self._queue[queue] = self._connection.get_queue(queue,
owner_acct_id=self._queue_owner_acct_id)
if self._queue[queue] is None:
raise TransportException('Unable to access queue with name {0}'.format(queue))
self._logger.debug('Successfully loaded SQS queue: {}'.format(queue))
except Exception, e:
raise TransportException(e.message)
def callback(self, filename, lines, **kwargs):
timestamp = self.get_timestamp(**kwargs)
if kwargs.get('timestamp', False):
del kwargs['timestamp']
message_batch = []
message_batch_size = 0
message_batch_size_max = 250000 # Max 256KiB but leave some headroom
for line in lines:
m = Message()
m.set_body(self.format(filename, line, timestamp, **kwargs))
message_size = len(m)
if (message_size > message_batch_size_max):
self._logger.debug('Dropping the message as it is too large to send ({0} bytes)'.format(message_size))
continue
# SQS can only handle up to 10 messages in batch send and it can not exceed 256KiB (see above)
# Check the new total size before adding a new message and don't try to send an empty batch
if (len(message_batch) > 0) and (((message_batch_size + message_size) >= message_batch_size_max) or (len(message_batch) == 10)):
self._logger.debug('Flushing {0} messages to SQS queue {1} bytes'.format(len(message_batch), message_batch_size))
self._send_message_batch(message_batch)
message_batch = []
message_batch_size = 0
message_batch_size = message_batch_size + message_size
message_batch.append((uuid.uuid4(), self.format(filename, line, timestamp, **kwargs), 0))
if len(message_batch) > 0:
self._logger.debug('Flushing the last {0} messages to SQS queue {1} bytes'.format(len(message_batch), message_batch_size))
self._send_message_batch(message_batch)
return True
def _send_message_batch(self, message_batch):
for queue in self._queue:
try:
self._logger.debug('Attempting to push batch message to SQS queue: {}'.format(queue))
result = self._queue[queue].write_batch(message_batch)
if not result:
self._logger.error('Error occurred sending messages to SQS queue {0}. result: {1}'.format(
queue, result))
raise TransportException('Error occurred sending message to queue {0}'.format(queue))
self._logger.debug('Successfully pushed batch message to SQS queue: {}'.format(queue))
except Exception, e:
self._logger.exception('Exception occurred sending batch to SQS queue')
raise TransportException(e.message)
def interrupt(self):
return True
def unhandled(self):
return True
``` |
{
"source": "jlambert23/COP4331",
"score": 3
} |
#### File: public_html/API/newuser.py
```python
import util
print("Content-Type: application/json\n\n")
def newuser(jsonPayload):
import pymysql
try:
firstname = jsonPayload['firstname']
lastname = jsonPayload['lastname']
username = jsonPayload['username']
password = jsonPayload['password']
email = jsonPayload['email']
except:
util.throwErr("JSON incorrectly configured.\n" + str(jsonPayload))
return
try:
# Import connection settings.
from dbsettings import connection_properties
conn = pymysql.connect( **connection_properties )
cursor = conn.cursor()
except:
util.throwErr("Server was unable to be reached.")
return
try:
try:
# Throw before accessing database if non-alphanumeric characters are used.
import re
if not re.match('^[\w-]+$', username) is not None:
raise Exception
# Insert new user into the database.
sql = "INSERT INTO user (firstname,lastname,username,password,email) VALUES ('%s','%s','%s','%s', '%s');" % (firstname, lastname, username, password, email)
cursor.execute(sql)
conn.commit()
except:
util.throwErr("User name already in use.")
return
# Retrieve user information.
sql2 = "SELECT * FROM user WHERE username='%s' AND password='%s';" % (username, password)
cursor.execute(sql2)
columns = cursor.description
result = [{columns[index][0]:column for index, column in enumerate(value)} for value in cursor.fetchall()]
util.sendjson(result)
conn.close()
except:
util.throwErr("Unable to create new user.")
return
try:
#import json
#parsed_json = json.loads('{"username":"babby", "password":"<PASSWORD>", "firstname":"How is", "lastname":"babby formed?", "email":"<EMAIL>"}')
parsed_json = util.getjson()
newuser(parsed_json)
except:
util.throwErr("Failed to add new user.")
``` |
{
"source": "JLambertazzo/SeeFood",
"score": 2
} |
#### File: JLambertazzo/SeeFood/app.py
```python
from flask import Flask, render_template, redirect, session, Response, send_file
from flask_restful import Api, Resource, reqparse, fields, marshal_with, abort
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy import create_engine, Column, String
from sqlalchemy.ext.declarative import declarative_base
from werkzeug.utils import secure_filename
import werkzeug
import uuid
import hashlib
import os
import qrcode
import PIL
import io
import requests
app = Flask(__name__)
api = Api(app)
app.config['SECRET_KEY'] = 'TEMPORARYSECRET'
local = True
Base = None
dbpass = os.environ.get('DBPASS')
if local:
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///app.db'
db = SQLAlchemy(app)
Base = db.Model
Column = db.Column
String = db.String
else:
Base = declarative_base()
engine = create_engine(f"cockroachdb://julien:{dbpass}@free-tier.gcp-us-central1.cockroachlabs.cloud:26257/defaultdb?sslmode=verify-full&sslrootcert=cert/cc-ca.crt&options=--cluster=good-bat-867", echo=True)
class RestaurantModel(Base):
__tablename__ = 'Restaurants'
id = Column(String(100), primary_key=True)
name = Column(String(100), nullable=False, unique=True)
password = Column(String(100), nullable=False)
description = Column(String(500))
def __repr__(self):
return f"Restaurant(name={self.name}, description={self.description})"
class ItemModel(Base):
__tablename__ = 'Items'
id = Column(String(100), primary_key=True)
restaurant = Column(String(100), nullable=False)
name = Column(String(100), nullable=False)
description = Column(String(500))
ingredients = Column(String(500))
image = Column(String(500))
qr = Column(String(500))
def __repr__(self):
return f"Item(restaurant={self.restaurant}, name={self.name}, description={self.description}, ingredients={self.ingredients}, image={self.image}, qr={self.qr})"
class ImageModel(Base):
__tablename__ = 'Images'
id = Column(String(100), primary_key=True)
image = Column(String(500), unique=True)
mimetype = Column(String(500), nullable=False)
name = Column(String(500), nullable=False)
def __repr__(self):
return self.name
if local:
db.create_all()
else:
Base.metadata.create_all(engine)
restaurant_post_args = reqparse.RequestParser()
restaurant_post_args.add_argument("name", required=True, type=str, help="Restaurant name was not included")
restaurant_post_args.add_argument("password", required=True, type=str, help="Password was not included")
restaurant_post_args.add_argument("description", type=str)
restaurant_patch_args = reqparse.RequestParser()
restaurant_patch_args.add_argument("name", type=str)
restaurant_patch_args.add_argument("password", type=str)
restaurant_patch_args.add_argument("description", type=str)
item_post_args = reqparse.RequestParser()
item_post_args.add_argument("restaurant", required=True, type=str, help='Restaurant id was not included')
item_post_args.add_argument("name", required=True, type=str, help='Item name was not included')
item_post_args.add_argument("description", type=str)
item_post_args.add_argument("ingredients", type=str)
item_post_args.add_argument("image", type=werkzeug.datastructures.FileStorage, location='files')
item_post_args.add_argument("qr", type=str)
item_patch_args = reqparse.RequestParser()
item_patch_args.add_argument("name", type=str)
item_patch_args.add_argument("description", type=str)
item_patch_args.add_argument("ingredients", type=str)
item_patch_args.add_argument("image", type=werkzeug.datastructures.FileStorage, location='files')
item_patch_args.add_argument("qr", type=str)
restaurant_fields = {
'id': fields.String,
'name': fields.String,
'password': fields.String,
'description': fields.String
}
item_fields = {
'id': fields.String,
'restaurant': fields.String,
'name': fields.String,
'description': fields.String,
'ingredients': fields.String,
'image': fields.String,
'qr': fields.String
}
class Restaurants(Resource):
@marshal_with(restaurant_fields)
def get(self, id):
result = RestaurantModel.query.filter_by(id=id).first()
if not result:
abort(404, message="restaurant not found")
return result
@marshal_with(restaurant_fields)
def patch(self, id):
args = restaurant_patch_args.parse_args()
result = RestaurantModel.query.filter_by(id=id).first()
if not result:
abort(404, message="restaurant not found")
if args['name']:
result.name = args['name']
if args['password']:
salt = os.urandom(32)
key = hashlib.pbkdf2_hmac('sha256', args['password'].encode('utf-8'), salt, 100000)
result.password = <PASSWORD> + key
if args['description']:
result.description = args['description']
db.session.commit()
return result
@marshal_with(restaurant_fields)
def delete(self, id):
result = RestaurantModel.query.filter_by(id=id).first()
if not result:
abort(404, message="restaurant not found")
db.session.delete(result)
db.session.commit()
return result
class Items(Resource):
@marshal_with(item_fields)
def get(self, id):
result = ItemModel.query.filter_by(id=id).first()
if not result:
abort(404, message="item not found")
return result
@marshal_with(item_fields)
def patch(self, id):
args = item_patch_args.parse_args()
result = ItemModel.query.filter_by(id=id).first()
if not result:
abort(404, message="item not found")
image = args['image']
if args['image']:
filename = secure_filename(args['image'].filename)
mimetype = args['image'].mimetype
image = args['image'].read()
img = ImageModel(image=image, mimetype=mimetype, name=filename)
if args['name']:
result.name = args['name']
if args['description']:
result.description = args['description']
if args['ingredients']:
result.ingredients = args['ingredients']
if image:
result.image = image
db.session.commit()
return result
@marshal_with(item_fields)
def delete(self, id):
result = ItemModel.query.filter_by(id=id).first()
if not result:
abort(404, message="item not found")
db.session.delete(result)
db.session.commit()
return result
class NewRestaurant(Resource):
@marshal_with(restaurant_fields)
def post(self):
args = restaurant_post_args.parse_args()
found = RestaurantModel.query.filter_by(name=args['name']).first()
if found:
abort(409, message="Restaurant witht this name already exists")
salt = os.urandom(32)
key = hashlib.pbkdf2_hmac('sha256', args['password'].encode('utf-8'), salt, 100000)
restaurant = RestaurantModel(id=str(uuid.uuid1()), name=args['name'], password=<PASSWORD> + key, description=args['description'])
db.session.add(restaurant)
db.session.commit()
session['name'] = args['name']
return restaurant, 201
# handle login
@marshal_with(restaurant_fields)
def get(self):
args = restaurant_post_args.parse_args()
found = RestaurantModel.query.filter_by(name=args['name']).first()
if not found:
abort(404, message="restaurant not found")
found_salt = found.password[:32]
found_key = found.password[32:]
given_hash = hashlib.pbkdf2_hmac('sha256', args['password'].encode('utf-8'), found_salt, 100000)
if given_hash == found_key:
session['name'] = args['name']
return found
else:
abort(403, message="Login Failed")
class NewItem(Resource):
@marshal_with(item_fields)
def post(self):
args = item_post_args.parse_args()
found = ItemModel.query.filter_by(restaurant=args['restaurant'], name=args['name']).first()
if found:
abort(409, message="You already have an item with this name")
print('args:' + str(args))
iid = None
if args['image']:
filename = secure_filename(args['image'].filename)
mimetype = args['image'].mimetype
iid = str(uuid.uuid1())
newimage = ImageModel(id=iid, image=args['image'].read(), mimetype=mimetype, name=filename)
db.session.add(newimage)
else:
print('no image given')
item = ItemModel(id=str(uuid.uuid1()), restaurant=args['restaurant'], name=args['name'], description=args['description'],
ingredients=args['ingredients'], image=iid, qr=args['qr'])
print('new item:' + item.id)
db.session.add(item)
db.session.commit()
return item, 201
class Search(Resource):
@marshal_with(item_fields)
def get(self, query):
results = []
name_matches = ItemModel.query.filter_by(name=query).first()
desc_matches = ItemModel.query.filter_by(description=query).first()
ingr_matches = ItemModel.query.filter_by(ingredients=query).first()
if name_matches:
return name_matches
elif desc_matches:
return desc_matches
elif ingr_matches:
return ingr_matches
else:
return abort(404, message="no resource found")
class Image(Resource):
def get(self, id):
found = ImageModel.query.filter_by(id=id).first()
if not found:
abort(404, message="image could not be found")
return Response(found.image, mimetype=found.mimetype)
class QR(Resource):
def get(self, id):
img = qrcode.make(f"http://localhost:5000/viewitem/{id}")
print(type(img))
image = img.get_image()
output = io.BytesIO()
image.convert('RGBA').save(output, format='PNG')
output.seek(0, 0)
return Response(output, mimetype="image/png")
api.add_resource(NewRestaurant, '/api/restaurants')
api.add_resource(NewItem, '/api/item')
api.add_resource(Restaurants, '/api/restaurants/<string:id>')
api.add_resource(Items, '/api/item/<string:id>')
api.add_resource(Search, '/api/search/<string:query>')
api.add_resource(Image, '/api/image/<string:id>')
api.add_resource(QR, '/api/qr/<string:id>')
@app.route('/')
def index():
return render_template('index.html')
@app.route('/dashboard')
def dashboard():
if not session['name']:
return redirect('/', 301)
found = RestaurantModel.query.filter_by(name=session['name']).first()
items = ItemModel.query.filter_by(restaurant=found.id).all()
return render_template('dashboard.html', name=found.name, description=found.description, items=items)
@app.route('/newitem')
def newitempage():
if not session['name']:
return redirect('/', 301)
found = RestaurantModel.query.filter_by(name=session['name']).first()
return render_template('newitem.html', rest_id=found.id)
@app.route('/viewitem/<string:id>')
def viewitempage(id):
found = ItemModel.query.filter_by(id=id).first()
if not session or not session['name']:
return redirect('/', 301)
rest = RestaurantModel.query.filter_by(name=session['name']).first()
if rest.id != found.restaurant:
abort(409, message="This item belongs to another restaurant")
if not found:
abort(404, message="Item was not found")
image = ImageModel.query.filter_by(id=found.image).first()
if not image:
abort(404, message="Error loading image")
return render_template('viewitem.html', mimetype=image.mimetype, image=f"/api/image/{image.id}", qr=f"/api/qr/{id}", name=found.name, description=found.description, ingredients=found.ingredients)
@app.route('/edititem/<string:id>')
def edititempage(id):
found = ItemModel.query.filter_by(id=id).first()
if not session or not session['name']:
return redirect('/', 301)
rest = RestaurantModel.query.filter_by(name=session['name']).first()
if rest.id != found.restaurant:
abort(409, message="This item belongs to another restaurant")
if not found:
abort(404, message="Item was not found")
return render_template('edititem.html', id=found.id, name=found.name, description=found.description, ingredients=found.ingredients)
@app.route('/search')
def searchpage():
return render_template('search.html')
if __name__ == '__main__':
app.run(debug=False)
``` |
{
"source": "jlamendo/hoymiles",
"score": 2
} |
#### File: jlamendo/hoymiles/hoymiles.py
```python
__author__ = 'dmslabs'
from os.path import realpath
import requests
from requests.models import HTTPBasicAuth, Response, StreamConsumedError
from requests import Request, Session
import hashlib
from string import Template
import json
import configparser
import logging
import dmslibs as dl
import comum
from dmslibs import Color, IN_HASSIO, mostraErro, log, pega_url, pega_url2, printC
from datetime import datetime, timedelta
import paho.mqtt.client as mqtt
from paho.mqtt import client
import uuid
import time
import os
import sys
from flask import Flask
from flask import render_template
import webserver
import multiprocessing
from multiprocessing import Process, Pipe
# CONFIG Secrets
HOYMILES_USER = "user"
HOYMILES_PASSWORD = "<PASSWORD>"
HOYMILES_PLANT_ID = 00000
MQTT_HOST = "mqtt.eclipse.org"
MQTT_USERNAME = "MQTT_USERNAME"
MQTT_PASSWORD = "<PASSWORD>"
INTERVALO_MQTT = 240 # How often to send data to the MQTT server?
INTERVALO_HASS = 1200 # How often to send device information in a format compatible with Home Asssistant MQTT discovery?
INTERVALO_GETDATA = 480 # How often do I read site data
SECRETS = 'secrets.ini'
WEB_SERVER = True
# Contants
VERSAO = '0.17'
DEVELOPERS_MODE = False
MANUFACTURER = 'dmslabs'
APP_NAME = 'Hoymiles Gateway'
SHORT_NAME = 'solarH'
SOLAR_MODEL = "DTU-W100" # mudar para pegar
TOKEN = ''
COOKIE_UID = "'uid=fff9c382-389f-4a47-8dc9-c5486fc3d9f5"
COOKIE_EGG_SESS = "EGG_SESS=XHfAhiHWwU__OUVeKh0IiITBnmwA-IIXEzTCHgHgww6ZYYddOPntPSwVz4Gx7ISbfU0WrvzOLungThcL-9D2KxavrtyPk8Mr2YXLFzJwvM0usPvhzYdt2Y2S9Akt5sjP"
URL1 = "https://global.hoymiles.com/platform/api/gateway/iam/auth_login"
URL2 = "https://global.hoymiles.com/platform/api/gateway/pvm-data/data_count_station_real_data"
URL3 = 'https://global.hoymiles.com/platform/api/gateway/iam/user_me'
URL4 = 'https://global.hoymiles.com/platform/api/gateway/pvm/statistics_count_station_state'
URL5 = 'https://global.hoymiles.com/platform/api/gateway/pvm/station_select_by_page'
URL6 = 'https://global.hoymiles.com/platform/api/gateway/pvm/station_find'
UUID = str(uuid.uuid1())
MQTT_PUB = "home/solar"
SID = 'solar'
MQTT_HASS = "homeassistant"
DEFAULT_MQTT_PASS = "<PASSWORD>"
INTERVALO_EXPIRE = int(INTERVALO_GETDATA) * 1.5
NODE_ID = 'dmslabs'
PAYLOAD_T1= '''
{
"ERROR_BACK":true,
"LOAD":{
"loading":true
},
"body":{
"password":"<PASSWORD>",
"user_name":"$user"
},
"WAITING_PROMISE":true
}
'''
PAYLOAD_T2 = '''
{
"body": {
"sid": $sid
},
"WAITING_PROMISE": true
}
'''
headers_h1 = {
'Content-Type': 'application/json',
'Cookie': '' # 'uid=fff9c382-389f-4a47-8dc9-c5486fc3d9f5; EGG_SESS=XHfAhiHWwU__OUVeKh0IiITBnmwA-IIXEzTCHgHgww6ZYYddOPntPSwVz4Gx7ISbfU0WrvzOLungThcL-9D2KxavrtyPk8Mr2YXLFzJwvM0usPvhzYdt2Y2S9Akt5sjP'
}
headers_h2 = {
'Content-Type': 'application/json;charset=UTF-8',
'Cache-Control': 'no-cache',
'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:60.0) Gecko/20100101 Firefox/60.0',
'Host': 'global.hoymiles.com',
'Connection': 'keep-alive',
'Accept': 'application/json, text/plain, */*',
'Accept-Encoding': 'gzip, deflate, br',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 11_2_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36',
'Accept-Language': 'pt-BR,pt;q=0.9,it-IT;q=0.8,it;q=0.7,es-ES;q=0.6,es;q=0.5,en-US;q=0.4,en;q=0.3',
'Cookie': 'hm_token_language=en_us; ' # 'uid=fff9c382-389f-4a47-8dc9-c5486fc3d9f5; EGG_SESS=XHfAhiHWwU__OUVeKh0IiITBnmwA-IIXEzTCHgHgww6ZYYddOPntPSwVz4Gx7ISbfU0WrvzOLungThcL-9D2KxavrtyPk8Mr2YXLFzJwvM0usPvhzYdt2Y2S9Akt5sjP'
}
json_hass = {"sensor": '''
{
"stat_t": "home/$sid/json",
"name": "$name",
"uniq_id": "$uniq_id",
"val_tpl": "{{ value_json.$val_tpl }}",
"icon": "$icon",
"device_class": "$device_class",
"expire_after": "$expire_after",
"device": { $device_dict }
}'''}
device_dict = ''' "name": "$device_name",
"manufacturer": "$manufacturer",
"model": "$model",
"sw_version": "$sw_version",
"via_device": "$via_device",
"identifiers": [ "$identifiers" ] '''
# # GLOBAL VARS
token = ""
status = {"ip":"?",
"mqtt": False}
gDevices_enviados = { 'b': False, 't':datetime.now() } # Global - Controla quando enviar novamente o cabeçalho para autodiscovery
gMqttEnviado = { 'b': False, 't':datetime.now() } # Global - Controla quando publicar novamente
statusLast = status.copy()
gDadosSolar = dict()
gConnected = False #global variable for the state of the connection
sensor_dic = dict() # {}
def pega_url_jsonDic(url, payload, headers, qualPega):
# recebe o dic da url
if qualPega ==1 :
resposta, sCode = pega_url(url, payload, headers, DEVELOPERS_MODE)
else:
resposta, sCode = pega_url2(url, payload, headers, DEVELOPERS_MODE)
ret = dict()
if sCode == 200:
json_res = json.loads(resposta)
ret = json_res
return ret
def pega_token():
# pega o token
global token
global TOKEN
global HOYMILES_PASSWORD
global HOYMILES_USER
pass_hash = hashlib.md5(HOYMILES_PASSWORD.encode()) # b'senh<PASSWORD>'
pass_hex = pass_hash.hexdigest()
# print(pass_hex)
ret = False
T1 = Template(PAYLOAD_T1)
payload_T1 = T1.substitute(user = HOYMILES_USER, password = <PASSWORD>hex)
#print(payload_T1)
header = headers_h1
header['Cookie'] = "'" + COOKIE_UID + "; " + COOKIE_EGG_SESS + "'"
login, sCode = pega_url(URL1, payload_T1, header)
if sCode == 200:
json_res = json.loads(login)
if json_res['status'] == '0':
data_body = json_res['data']
token = json_res['data']['token']
TOKEN = token
ret = True
printC(Color.F_Blue, 'I got the token!! :-)')
if token == "":
print ('erro na resposta')
ret = False
elif json_res['status'] == '1':
TOKEN = ''
token = ''
print (Color.F_Red + "Wrong user/password" + Color.F_Default)
else:
TOKEN = ''
token = ''
print (Color.F_Red + "HTTP Error: " + str(sCode) + Color.F_Default + " " + dl.httpStatusCode(sCode))
return ret
def pega_solar(uid):
# pega dados da usina
ret = False
T2 = Template(PAYLOAD_T2)
payload_t2 = T2.substitute(sid = uid)
header = headers_h2
# header['Cookie'] = COOKIE_UID + "; " + COOKIE_EGG_SESS + "; hm_token=" + token + "; Path=/; Domain=.global.hoymiles.com; Expires=Sat, 19 Mar 2022 22:11:48 GMT;" + "'"
header['Cookie'] = COOKIE_UID + "; hm_token=" + token + "; Path=/; Domain=.global.hoymiles.com; Expires=Sat, 19 Mar 2022 22:11:48 GMT;" + "'"
solar = pega_url_jsonDic(URL2, payload_t2, header, 2)
if 'status' in solar.keys():
solar_status = solar['status']
if solar_status == "0":
ret = solar.copy()
if solar_status != "0":
ret = solar_status
if DEVELOPERS_MODE:
printC(Color.B_Red, 'Solar Status Error: ' + str(solar_status) )
if solar_status == "100":
# erro no token
# pede um novo
if (pega_token()):
# chama pega solar novamente
ret = pega_solar(uid)
else:
print(Color.B_Red + "I can't connect!" + Color.B_Default)
return ret
def get_secrets():
''' GET configuration data '''
global HOYMILES_USER
global HOYMILES_PASSWORD
global HOYMILES_PLANT_ID
global MQTT_HOST
global MQTT_PASSWORD
global MQTT_USERNAME
global DEVELOPERS_MODE
global WEB_SERVER
config = dl.getConfigParser(SECRETS)
printC (Color.F_LightGreen, "Reading secrets.ini")
# le os dados
HOYMILES_USER = dl.get_config(config, 'secrets', 'HOYMILES_USER', HOYMILES_USER)
HOYMILES_PASSWORD = dl.get_config(config, 'secrets', 'HOYMILES_PASSWORD', HOYMILES_PASSWORD)
HOYMILES_PLANT_ID = dl.get_config(config, 'secrets','HOYMILES_PLANT_ID', HOYMILES_PLANT_ID, getInt=True)
MQTT_PASSWORD = dl.get_config(config, 'secrets', 'MQTT_PASS', MQTT_PASSWORD)
MQTT_USERNAME = dl.get_config(config, 'secrets', 'MQTT_USER', MQTT_USERNAME)
MQTT_HOST = dl.get_config(config, 'secrets', 'MQTT_HOST', MQTT_HOST)
dev_mode = dl.get_config(config, 'developers', 'DEVELOPERS_MODE', "")
if bool(dev_mode) == True:
DEVELOPERS_MODE = True
else:
DEVELOPERS_MODE = False
WEB_SERVER = dl.get_config(config, 'secrets', 'WEB_SERVER', WEB_SERVER)
def substitui_secrets():
"No HASS.IO ADD-ON substitui os dados do secrets.ini pelos do options.json"
global HOYMILES_USER
global HOYMILES_PASSWORD
global HOYMILES_PLANT_ID
global MQTT_HOST
global MQTT_PASSWORD
global MQTT_USERNAME
global DEVELOPERS_MODE
global FILE_COMM
global WEB_SERVER
log().debug ("Loading env data....")
HOYMILES_USER = dl.pegaEnv("HOYMILES_USER")
HOYMILES_PASSWORD = dl.pegaEnv("HOYMILES_PASSWORD")
HOYMILES_PLANT_ID = dl.pegaEnv("HOYMILES_PLANT_ID")
MQTT_HOST = dl.pegaEnv("MQTT_HOST")
MQTT_PASSWORD = dl.pegaEnv("MQTT_PASSWORD")
MQTT_USERNAME = dl.pegaEnv("MQTT_USER")
DEVELOPERS_MODE = dl.pegaEnv("DEVELOPERS_MODE")
DEVELOPERS_MODE = dl.onOff(DEVELOPERS_MODE, True, False)
if dl.IN_HASSIO():
WEB_SERVER = True
FILE_COMM = '/data/' + comum.FILE_COMM
log().debug ("Env data loaded.")
def mqttStart():
''' Start MQTT '''
global client
global clientOk
# MQTT Start
client = mqtt.Client()
log().info("Starting MQTT " + MQTT_HOST)
print (Color.B_LightYellow + "Starting MQTT " + MQTT_HOST + Color.B_Default)
if DEVELOPERS_MODE:
log().debug("mqttStart MQTT_USERNAME: " + str(MQTT_USERNAME))
log().debug("mqttStart MQTT_PASSWORD: " + str(MQTT_PASSWORD))
client.username_pw_set(username=MQTT_USERNAME, password=MQTT_PASSWORD)
client.on_connect = on_connect
# client.on_message = on_message
client.on_disconnect = on_disconnect
client.on_publish = on_publish
try:
clientOk = True
rc = client.connect(MQTT_HOST, 1883, 60)
except Exception as e: # OSError
if e.__class__.__name__ == 'OSError':
clientOk = False
log().warning("Can't start MQTT")
print (Color.F_Red + "Can't start MQTT" + Color.F_Default) # e.errno = 51 - 'Network is unreachable'
mostraErro(e,20, "MQTT Start")
else:
clientOk = False
mostraErro(e,30, "MQTT Start")
if clientOk: client.loop_start() # start the loop
# The callback for when the client receives a CONNACK response from the server.
def on_connect(client, userdata, flags, rc):
global gConnected
global status
if rc == 0:
cor = Color.F_Blue
else:
cor = Color.F_Red
print(cor + "MQTT connected with result code " + str(rc) + Color.F_Default)
log().debug("MQTT connected with result code " + str(rc))
if rc == 0:
print ("Connected to " + MQTT_HOST)
gConnected = True
status['mqtt'] = "on"
client.connected_flag = True
# Mostra clientes
status['publish_time'] = datetime.today().strftime('%Y-%m-%d %H:%M:%S')
send_clients_status()
else:
tp_c = {0: "Connection successful",
1: "Connection refused – incorrect protocol version",
2: "Connection refused – invalid client identifier",
3: "Connection refused – server unavailable",
4: "Connection refused – bad username or password",
5: "Connection refused – not authorised",
100: "Connection refused - other things"
}
gConnected = False
status['mqtt'] = "off"
if rc>5: rc=100
#print (str(rc) + str(tp_c[rc]))
print (str(rc) + dl.MQTT_STATUS_CODE(rc))
log().error(str(rc) + str(dl.MQTT_STATUS_CODE(rc)))
# tratar quando for 3 e outros
if rc == 4 or rc == 5:
# senha errada
print(Color.F_Magenta + "APP EXIT" + str(rc) + Color.F_Default)
time.sleep(60000)
#raise SystemExit(0)
#sys.exit()
#quit()
def on_publish(client, userdata, mid):
# fazer o que aqui?
# fazer uma pilha para ver se foi publicado ou não
# aparentemente só vem aqui, se foi publicado.
if 1==2:
print("Published mid: " + str(mid), "last: " + str(gLastMidMqtt))
if gLastMidMqtt-1 != mid:
print ("Erro mid:" + str(mid) + " não publicado.")
def on_disconnect(client, userdata, rc):
global gConnected
global gDevices_enviados
global status
gConnected = False
log().info("disconnecting reason " +str(rc))
print("disconnecting reason " +str(rc))
client.connected_flag=False
client.disconnect_flag=True
gDevices_enviados['b'] = False # Force sending again
status['mqtt'] = "off"
# mostra cliente desconectado
try:
send_clients_status()
except Exception as e:
mostraErro(e, 30, "on_disconnect")
def send_clients_status():
''' send connected clients status '''
global status
dadosEnviar = status.copy()
mqtt_topic = MQTT_PUB + "/clients/" + status['ip']
dadosEnviar.pop('ip')
dadosEnviar['UUID'] = UUID
dadosEnviar['version'] = VERSAO
dadosEnviar['plant_id'] = HOYMILES_PLANT_ID
dadosEnviar['inHass'] = dl.IN_HASSIO()
jsonStatus = json.dumps(dadosEnviar)
(rc, mid) = publicaMqtt(mqtt_topic, jsonStatus)
return rc
def publicaMqtt(topic, payload):
"Publica no MQTT atual"
global gLastMidMqtt
(rc, mid) = client.publish(topic, payload)
# if DEVELOPERS_MODE:
# print (Color.F_Cyan, topic, Color.F_Default)
# print (Color.F_Blue, payload, Color.F_Default)
gLastMidMqtt = mid
if rc == mqtt.MQTT_ERR_NO_CONN:
print ("mqtt.MQTT_ERR_NO_CONN")
if rc == mqtt.MQTT_ERR_SUCCESS:
# certo, sem erro.
#print ("mqtt.MQTT_ERR_SUCCESS")
gLastMidMqtt = mid
if rc == mqtt.MQTT_ERR_QUEUE_SIZE:
print ("mqtt.MQTT_ERR_QUEUE_SIZE")
return rc, mid
def send_hass():
''' Envia parametros para incluir device no hass.io '''
global sensor_dic
global gDevices_enviados
# var comuns
varComuns = {'sw_version': VERSAO,
'model': SOLAR_MODEL,
'manufacturer': MANUFACTURER,
'device_name': APP_NAME,
'identifiers': SHORT_NAME + "_" + str(HOYMILES_PLANT_ID),
'via_device': SOLAR_MODEL,
'sid': SID,
'uniq_id': UUID } #"UPS_ID"
if DEVELOPERS_MODE:
log().debug('Sensor_dic: ' + str(len(sensor_dic)))
if len(sensor_dic) == 0:
for k in json_hass.items():
json_file_path = k[0] + '.json'
if dl.IN_HASSIO():
json_file_path = '/' + json_file_path # to run on HASS.IO
if not os.path.isfile(json_file_path):
log().error(json_file_path + " not found!")
printC(Color.F_Cyan, json_file_path)
json_file = open(json_file_path)
if not json_file.readable():
printC(Color.B_Red,"I can't read file")
json_str = json_file.read()
sensor_dic[k[0]] = json.loads(json_str)
if len(sensor_dic) == 0:
printC(Color.B_Red, "Sensor_dic error")
rc = 0
for k in sensor_dic.items():
# print('Componente:' + k[0])
rc = monta_publica_topico(k[0], sensor_dic[k[0]], varComuns)
if not rc == 0:
printC(Color.B_LightRed, 'Hass publish error: ' + str(rc) )
if rc == 0:
gDevices_enviados['b'] = True
gDevices_enviados['t'] = datetime.now()
log().debug('Hass Sended')
def publicaDados(solarData):
# publica dados no MQTT
global status
global gMqttEnviado
jsonUPS = json.dumps(solarData)
(rc, mid) = publicaMqtt(MQTT_PUB + "/json", jsonUPS)
gMqttEnviado['b'] = True
gMqttEnviado['t'] = datetime.now()
print (Color.F_Blue + "Dados Solares Publicados..." + Color.F_Default + str(datetime.now()))
if status['mqtt'] == 'on':
status[APP_NAME] = "on"
else:
status[APP_NAME] = "off"
send_clients_status()
return jsonUPS
def monta_publica_topico(component, sDict, varComuns):
''' monta e envia topico '''
ret_rc = 0
key_todos = sDict['todos']
newDict = sDict.copy()
newDict.pop('todos')
for key,dic in newDict.items():
# print(key,dic)
if key[:1] != '#':
varComuns['uniq_id']=varComuns['identifiers'] + "_" + key
if not('val_tpl' in dic):
dic['val_tpl'] = dic['name']
dic['name'] = varComuns['uniq_id']
dic['device_dict'] = device_dict
dic['publish_time'] = datetime.today().strftime('%Y-%m-%d %H:%M:%S')
dic['expire_after'] = int(INTERVALO_EXPIRE) # quando deve expirar
dados = Template(json_hass[component]) # sensor
dados = Template(dados.safe_substitute(dic))
dados = Template(dados.safe_substitute(varComuns)) # faz ultimas substituições
dados = dados.safe_substitute(key_todos) # remove os não substituidos.
topico = MQTT_HASS + "/" + component + "/" + NODE_ID + "/" + varComuns['uniq_id'] + "/config"
# print(topico)
# print(dados)
dados = dl.json_remove_vazio(dados)
(rc, mid) = publicaMqtt(topico, dados)
if rc == 0:
if DEVELOPERS_MODE:
topicoResumo = topico.replace(MQTT_HASS + "/" + component + "/" + NODE_ID, '...')
topicoResumo = topicoResumo.replace("/config", '')
printC (Color.F_Cyan, topicoResumo)
else:
# deu erro na publicação
printC (Color.B_Red, "Erro monta_publica_topico")
printC (Color.F_Red, topico)
ret_rc = ret_rc + rc
# print ("rc: ", rc)
return rc
def ajustaDadosSolar():
''' ajusta dados solar '''
global gDadosSolar
realPower = dl.float2number(gDadosSolar['real_power'],0)
capacidade = dl.float2number(gDadosSolar['capacitor'])
plant_tree = dl.float2number(gDadosSolar['plant_tree'], 0)
month_eq = dl.float2number(gDadosSolar['month_eq']) / 1000
month_eq = round(month_eq, 2)
total_eq = dl.float2number(gDadosSolar['total_eq']) / 1000000
total_eq = round(total_eq, 2)
co2 = dl.float2number(gDadosSolar['co2_emission_reduction']) / 1000000
co2 = round(co2,2)
# corrige escala e digitos
if capacidade > 0 and capacidade < 100:
capacidade = capacidade * 1000
capacidade = round(capacidade)
power = (realPower / capacidade) * 100
power = round(power,1)
if realPower == 0:
printC (Color.F_Magenta, "realPower = 0")
printC (Color.B_LightMagenta, dl.hoje() )
if DEVELOPERS_MODE:
#printC ('parada 1/0', str(1/0))
printC(Color.B_Red,'parada')
gDadosSolar['real_power'] = int( realPower )
gDadosSolar['power_ratio'] = float( power )
gDadosSolar['capacitor'] = int( capacidade )
gDadosSolar['co2_emission_reduction'] = float( co2 )
gDadosSolar['plant_tree'] = int( plant_tree )
gDadosSolar['total_eq'] = float( total_eq )
gDadosSolar['month_eq'] = float( month_eq )
def pegaDadosSolar():
global gDadosSolar
''' pega dados solar '''
dados_solar = pega_solar(HOYMILES_PLANT_ID)
if DEVELOPERS_MODE:
print ("dados_solar: " + str(dados_solar))
gDadosSolar = dados_solar['data']
capacidade = dl.float2number(gDadosSolar['capacitor'])
real_power = dl.float2number(gDadosSolar['real_power'])
if real_power == 0:
# é igual a 0
printC(Color.B_Red, "REAL_POWER = 0")
time.sleep(60) # espera 60 segundos
printC(Color.F_Blue, "Getting data again")
dados_solar = pega_solar(HOYMILES_PLANT_ID)
gDadosSolar = dados_solar['data']
capacidade = dl.float2number(gDadosSolar['capacitor'])
real_power = dl.float2number(gDadosSolar['real_power'])
if capacidade == 0:
# é um erro
print (Color.B_Red + "Erro capacitor: " + str(capacidade) + Color.B_Default)
else:
ajustaDadosSolar()
return gDadosSolar
# RODA O APP WEB
def iniciaWebServerB(Conf):
''' inicia o webserver '''
webserver.app.run(debug=True, host="0.0.0.0", threaded=True)
#app.run(debug=True, host="0.0.0.0", threaded=False)
def iniciaWebServer():
''' inicia o webserver '''
printC (Color.B_LightMagenta, "WEB SERVER Starting ...")
path_index = comum.PATH_TEMPLATE
if IN_HASSIO():
path_index = comum.PATH_TEMPLATE_HAS
bl_existe_index = os.path.isfile(path_index + '/index.html')
if not bl_existe_index:
''' não existe o index '''
printC (Color.B_Red, "Index not found. I can't start webserver. ")
arr = os.listdir(path_index)
printC(Color.F_Magenta, path_index)
print(arr)
else:
# existe index
p = multiprocessing.Process(target=iniciaWebServerB, args=({"Something":"SomethingElese"},))
p.start()
# INICIO, START
print (Color.B_Blue + "********** " + MANUFACTURER + " " + APP_NAME + " v." + VERSAO + Color.B_Default)
print (Color.B_Green + "Starting up... " + datetime.today().strftime('%Y-%m-%d %H:%M:%S') + ' ' + Color.B_Default)
dl.inicia_log(logFile='/var/tmp/hass.hoymiles.log', logName='hass.hoymiles', stdOut=True)
# info
dl.dadosOS()
status['ip'] = dl.get_ip()
print (Color.B_Cyan + "IP: " + Color.B_Default + Color.F_Magenta + status['ip'] + Color.F_Default)
if DEVELOPERS_MODE:
print (Color.B_Red, "DEVELOPERS_MODE", Color.B_Default)
get_secrets()
if dl.IN_HASSIO():
print (Color.B_Blue, "IN HASS.IO", Color.B_Default)
if not DEVELOPERS_MODE or 1==1: # teste
substitui_secrets()
if DEVELOPERS_MODE:
print (Color.B_Red, "DEVELOPERS_MODE", Color.B_Default)
if DEFAULT_MQTT_PASS == MQTT_PASSWORD:
log().warning ("YOU SHOUD CHANGE DE DEFAULT MQTT PASSWORD!")
print (Color.F_Red + "YOU SHOUD CHANGE DE DEFAULT MQTT PASSWORD!" + Color.F_Default)
if DEVELOPERS_MODE or MQTT_HOST == '192.168.50.20':
print (Color.F_Green + "HOYMILES_USER: " + Color.F_Default + str(HOYMILES_USER))
print (Color.F_Green + "HOYMILES_PASSWORD: " + Color.F_Default + str(HOYMILES_PASSWORD))
print (Color.F_Green + "HOYMILES_PLANT_ID: " + Color.F_Default + str(HOYMILES_PLANT_ID))
print (Color.F_Green + "MQTT_HOST: " + Color.F_Default + str(MQTT_HOST))
print (Color.F_Green + "MQTT_PASSWORD: " + Color.F_Default + str(MQTT_PASSWORD))
print (Color.F_Green + "MQTT_USERNAME: " + Color.F_Default + str(MQTT_USERNAME))
print (Color.F_Blue + "INTERVALO_MQTT: " + Color.F_Default + str(INTERVALO_MQTT))
print (Color.F_Blue + "INTERVALO_HASS: " + Color.F_Default + str(INTERVALO_HASS))
print (Color.F_Blue + "INTERVALO_GETDATA: " + Color.F_Default + str(INTERVALO_GETDATA))
print (Color.F_Blue + "WEB_SERVER: " + Color.F_Default + str(WEB_SERVER))
if dl.float2number(HOYMILES_PLANT_ID) < 100:
print (Color.F_Green + "HOYMILES_PLANT_ID: " + Color.F_Default + str(HOYMILES_PLANT_ID))
print (Color.B_Magenta + "Wrong plant ID" + Color.B_Default )
cnt = 0
while token == '':
pega_token()
cnt = cnt + 1
if token == '':
print (Color.B_Red + "I can't get access token" + Color.B_Default)
if cnt >= 5:
exit()
time.sleep(60000)
if token != '':
# pega dados solar
#dados_solar = pega_solar(HOYMILES_PLANT_ID)
#print (str(dados_solar))
#gDadosSolar = dados_solar['data']
pegaDadosSolar()
else:
log().error("I can't get access token")
print (Color.B_Red + "I can't get access token" + Color.B_Default)
quit()
# força a conexão
while not gConnected:
mqttStart()
time.sleep(1) # wait for connection
if not clientOk:
time.sleep(240)
send_hass()
# primeira publicação
jsonx = publicaDados(gDadosSolar)
if dl.float2number(gDadosSolar['total_eq'], 0) == 0:
log().warning('All data is 0. Maybe your Plant_ID is wrong.')
status['response'] = "Plant_ID could be wrong!"
send_clients_status()
if WEB_SERVER: # se tiver webserver, inicia o web server
iniciaWebServer()
dl.writeJsonFile(FILE_COMM, jsonx)
printC(Color.B_LightCyan, 'Loop start!')
# loop start
while True:
if gConnected:
time_dif = dl.date_diff_in_Seconds(datetime.now(), \
gDevices_enviados['t'])
if time_dif > INTERVALO_HASS:
gDevices_enviados['b'] = False
send_hass()
time_dif = dl.date_diff_in_Seconds(datetime.now(), \
gMqttEnviado['t'])
if time_dif > INTERVALO_GETDATA:
pegaDadosSolar()
jsonx = publicaDados(gDadosSolar)
if WEB_SERVER:
dl.writeJsonFile(FILE_COMM, jsonx)
if not clientOk: mqttStart() # tenta client mqqt novamente.
time.sleep(10) # dá um tempo de 10s
``` |
{
"source": "jlamontagne/prawler",
"score": 3
} |
#### File: prawler/spidermiddleware/phantomjs.py
```python
from scrapy.http import Request
class PhantomJSMiddleware(object):
def _rewrite_url(self, r):
if isinstance(r, Request):
url = 'phantomjs-' + r.url
r = r.replace(url=url)
return r
def process_start_requests(self, start_requests, spider):
if not hasattr(spider, 'use_phantomjs') or not spider.use_phantomjs:
return start_requests
return (self._rewrite_url(r) for r in start_requests or ())
def process_spider_output(self, response, result, spider):
if not hasattr(spider, 'use_phantomjs') or not spider.use_phantomjs:
return result
return (self._rewrite_url(r) for r in result or ())
```
#### File: prawler/utils/misc.py
```python
import re
from bs4 import NavigableString
def decompose_all(s):
for tag in s:
if isinstance(tag, NavigableString):
tag.replace_with('')
else:
tag.decompose()
def text(x):
if isinstance(x, NavigableString):
x = unicode(x)
elif not isinstance(x, basestring):
x = ' '.join(x.stripped_strings)
x = re.sub('(\r\n)|\r|\n', '', x).replace('\t', ' ')
return x.strip()
``` |
{
"source": "jlamoso/petisco",
"score": 3
} |
#### File: petisco/application/repository.py
```python
from abc import ABCMeta, abstractmethod
from typing import Dict
class Repository:
__metaclass__ = ABCMeta
@abstractmethod
def info(self) -> Dict:
return {"Repository": "Not Implemented"}
```
#### File: events/redis/redis_based_event_handler.py
```python
from petisco.events.event import Event
def redis_based_event_handler(func):
def wrapper(message):
json_data = message["data"].decode("utf-8")
event = Event.from_json(json_data)
return func(event)
return wrapper
```
#### File: integration/controller/fixtures.py
```python
import os
import pytest
from petisco import FlaskApplication
SWAGGER_DIR = os.path.dirname(os.path.abspath(__file__)) + "/application/"
app = FlaskApplication(application_name="petisco", swagger_dir=SWAGGER_DIR).get_app()
@pytest.fixture
def client():
with app.app.test_client() as c:
yield c
@pytest.fixture
def given_any_apikey():
apikey = "apikey"
return apikey
```
#### File: unit/mocks/fake_logger.py
```python
from petisco.logger.logger import Logger
class FakeLogger(Logger):
def __init__(self):
self.logging_messages = []
def log(self, logging_level, message):
self.logging_messages.append((logging_level, message))
def get_logging_messages(self):
return self.logging_messages
``` |
{
"source": "jlampar/Evaluation",
"score": 3
} |
#### File: jlampar/Evaluation/evaluation.py
```python
import csv
def csv_dict_loader(dict_file):
csv_dict = {}
for row in csv.DictReader(open(dict_file)):
for col, value in row.iteritems():
csv_dict.setdefault(col, []).append(value)
return csv_dict
def compute_total_price(products,currence):
total = []
for elements_product in products['id']:
nr = int(elements_product) - 1
currency_index = currence['code'].index(products['currency'][nr])
ratio = (float(currence['ratio'][currency_index]))
total.append((int(products['price'][nr]) / ratio) * int(products['quantity'][nr]))
return total
def sort_groups(products,total,match):
zip_list = [list(x) for x in zip(products['currency'], products['quantity'], products['corresponding_id'], total)]
match_list = []
sorted_list = []
for m_id in match['corresponding_id']:
match_list.append([])
for ele in zip_list:
match_list[int(ele[2]) - 1].append(ele[3])
for every_sub_list in match_list:
sorted_list.append(sorted(every_sub_list, reverse=True))
return sorted_list
def generate_output(sorted_group,match,test):
output_list = []
for every_element in sorted_group:
index = sorted_group.index(every_element)
count = int(match['limit'][index])
ignored = len(every_element) - count
total = 0
for elements_element in every_element[0:count]:
total += elements_element
average = total / count
t = '%.2f' % total
av = '%.2f' % average
output_list.append([index + 1, t, av, 'PLN', ignored])
file_name = 'finest' + test + '.csv'
with open(file_name, 'wb') as write_file:
writer = csv.writer(write_file)
writer.writerow(['id', 'in_total', 'average', 'currency', 'ignored'])
writer.writerows(output_list)
data = csv_dict_loader('products.csv')
currencies = csv_dict_loader('currency_ratio.csv')
correspond = csv_dict_loader('correspondence.csv')
total_price = compute_total_price(data,currencies)
sorted_group = sort_groups(data,total_price,correspond)
generate_output(sorted_group,correspond,'')
``` |
{
"source": "jlampar/RINEX-to-geocentric-XYZ",
"score": 3
} |
#### File: jlampar/RINEX-to-geocentric-XYZ/RINEX_to_Geocentric_XYZ.py
```python
import datetime
from math import sqrt
from math import sin
from math import cos
from math import atan2
class SateliteTime(object):
def __init__(self,date_day,date_time,prop):
self.date_day = date_day
self.date_time = date_time
self.prop = prop
def time(self):
date_day_array = self.date_day.split("/")
date_time_array = self.date_time.split(":")
week = datetime.date(int(date_day_array[2]), int(date_day_array[1]), int(date_day_array[0])).weekday()
if week >= 0 and week < 6:
week += 1
elif week == 6:
week = 0
return float(week * 24 * 60 * 60) + float(int(date_time_array[0]) * 60 * 60) + float(int(date_time_array[1]) * 60) + float(date_time_array[2])
class RINEX(object):
mi = 3.986005*(10**14)
oe = 7.2921151467*(10**(-5))
def __init__(self,file):
self.file = file
def extract(self):
open_file = open(self.file, 'r').read().split('\n')
first_iter = []
for every_line in open_file:
if every_line[0] == "-":
first_iter.append(every_line)
else:
first_iter.append(" " + str(every_line))
second_iter = []
for every_verse in first_iter:
second_iter.append(list(every_verse[0 + i:19 + i] for i in range(0, len(every_verse), 19)))
variables = []
for items in second_iter[1:6]:
items = [item.replace('D', 'E') for item in items]
for every_element in items:
variables.append(float(every_element))
variables.append(float(first_iter[0][23:42].replace('D', 'E')))
variables.append(float(first_iter[0][42:61].replace('D', 'E')))
variables.append(float(first_iter[0][61:80].replace('D', 'E')))
return variables
# [33.0(IODE), 56.1875(Crs), 4.54483216765e-09(DELTAn), -2.88842646276(M0), 2.83680856228e-06(Cuc), 0.0169634080958(e), 5.29177486897e-06(Cus), 5153.68485069(sqrtA), 64800.0(Toe), 2.14204192162e-07(Cic), -1.79229306516(OMEGA0), -1.26659870148e-07(Cis), 0.973313017457(I0), 282.21875(Crc), -1.89637567079(omega), -7.93497338063e-09(OMEGADOT), 5.64666377764e-10(Idot), 1.0(nd), 1628.0(nd), 0.0(nd), 2.63666734099e-05(a0), 2.27373675443e-12(a1), 0.0(a2)]
# [IODE(0), Crs(1), DELTAn(2), M0(3), Cuc(4), e(5), Cus(6), sqrtA(7), Toe(8), Cic(9), OMEGA0(10), Cis(11), I0(12), Crc(13), omega(14), OMEGADOT(15), Idot(16), nd(17), nd(18), nd(19), a0(20), a1(21), a2(22)]
def dt(self,arr,t):
return (arr[20] + arr[21]*(t-arr[8]) + arr[22]*((t-arr[8])**2))
def get_coordinates(self,arr,t,dt):
tk = t - arr[8] - dt # epoka odniesienia efemeryd
a = arr[7] ** 2 # duza polos orbity satelity
n0 = sqrt(self.mi / (a ** 3)) # ruch sredni satelity
n = n0 + arr[2] # poprawiony ruch sredni
Mk = arr[3] + n * tk # anomalia srednia w epoce tk
E = 0
Ek = 1
epsilon = 1 * 10 ** (-15)
while abs(Ek - E) > epsilon:
E = Ek
Ek = Mk + arr[5] * sin(E) # anomalia mimosrodowa
vk = atan2((sqrt(1 - arr[5] ** 2) * sin(Ek)), (cos(Ek) - arr[5])) # anomalia prawdziwa (w = vk)
u = arr[14] + vk # argument szerokosci (u = Fik)
duk = arr[6] * sin(2 * u) + arr[4] * cos(2 * u) # poprawka dla argumentu szerokosci
drk = arr[1] * sin(2 * u) + arr[13] * cos(2 * u) # poprawka do promienia wodzacego
dik = arr[11] * sin(2 * u) + arr[9] * cos(2 * u) + arr[16] * tk # poprawka dla kata nachylenia orbity
uk = u + duk # poprawiony argument szereokosci
rk = a * (1 - arr[5] * cos(Ek)) + drk # poprawiony promien wodzacy
ik = arr[12] + dik # poprawiona wartosc kata nachylenia orbity
OMEGAk = arr[10] + (arr[15] - self.oe) * tk - self.oe * arr[8] # poprawiona dlugosc wezla wstepujacego orbity
s = rk * cos(uk)
ni = rk * sin(uk) # wspolrzedne satleity w plaszczyznie orbity (x', y' = s, ni)
# Wspolrzedne geocentryczne:
XG = s * cos(OMEGAk) - ni * cos(ik) * sin(OMEGAk)
YG = s * sin(OMEGAk) + ni * cos(ik) * cos(OMEGAk)
ZG = ni * sin(ik)
return (XG, YG, ZG)
#Przykładowe dane dla pliku RINEX_d28
#Data obserwacji: 20/03/2011, 18:20:00
#Czas propagacji: 0.0738237203352194
path = raw_input("Podaj ścieżkę do pliku RINEX: ")
print
print "Podaj datę obserwacji."
ddmmyy = raw_input("dd/mm/yy: ")
hminsec = raw_input("h:min:sec: ")
print
propag = float(raw_input("Podaj czas propagacji: "))
print
time = SateliteTime(ddmmyy,hminsec,propag).time()
print "Sekundy zegara satelity: ", time
rinex_file = RINEX(path)
matrix = rinex_file.extract()
delta = rinex_file.dt(matrix,time)
coordinates = rinex_file.get_coordinates(matrix,time,delta)
print "Poprawka zegara satelity: ", delta
print "Współrzędne geocentryczne XYZ: ", coordinates
``` |
{
"source": "jlamperez/Vitis-Tutorials",
"score": 2
} |
#### File: HLS/design/create_fir_coefs.py
```python
from __future__ import print_function
import sys
import copy
def create_fir_coefs(n_taps, area):
coefs = []
midpoint = (n_taps + 1.0) / 2.0
for ix in range (0, (n_taps + 1) // 2) :
tap = area * (ix + 1) / midpoint**2
coefs.append(int(tap))
return coefs
if ((len(sys.argv) != 5) and (len(sys.argv) != 6)) :
print("Syntax is: ", sys.argv[0], "[-aie | -hls] <number_of_fir_filters> <number_of_fir_taps> <impulse area> [<window_size>]")
sys.exit(1)
window_size = 1024
mode = sys.argv[1]
n_firs = int(sys.argv[2])
n_taps = int(sys.argv[3])
area = float(sys.argv[4])
coefs = []
coefs = create_fir_coefs(n_taps, area)
if (mode == "-hls") :
coefs2 = copy.copy(coefs)
coefs2.reverse()
if ((n_taps % 2) == 1) :
del coefs2[0]
coefs.extend(coefs2)
elif (mode == "-aie") :
if (len(sys.argv) == 6) :
window_size = int(sys.argv[5])
else :
print("**ERROR: switch must be either '-aie' or '-hls'")
sys.exit(2)
print("// FIR Coefficients Taps: ", n_taps, "Impulse Response Area: ", area)
print("//")
print("#define N_FIR_FILTERS ", n_firs);
print("#define N_FIR_TAPS ", n_taps);
if (mode == "-aie") :
print("#define FIR_WINDOW_SIZE ", window_size);
print("#define FIR_COEF_VALUES { ", end="")
print(*coefs, sep=", ", end="")
print(" }")
```
#### File: 07-AI-Engine-Floating-Point/Utils/GenerationLib.py
```python
import numpy as np
from math import *
import random
def GenerateTestVector(dtval,pliow,NPhases_s,NStreams_s,NSamples_s,NFrames_s,SeqType_s,Basename_s):
print('DtVal : ',dtval.get())
print('PLIO width : ',pliow.get())
print('NPhases : ',NPhases_s.get())
print('NStreams : ',NStreams_s.get())
print('NSamples : ',NSamples_s.get())
print('NFrames : ',NFrames_s.get())
print('Type of Sequence : ',SeqType_s.get())
print('Base filename : ',Basename_s.get())
NPhases = int(NPhases_s.get())
NStreams = int(NStreams_s.get())
LFrame = int(NSamples_s.get())
NFrames = int(NFrames_s.get())
SequenceType = SeqType_s.get()
Basename = Basename_s.get()
#parameters that should be in the GUI
# SequenceType ='Linear' # 'SinCos' 'Linear' 'Random' 'Dirac'
# Basename = 'PhaseIn'
NSamples = NPhases*NStreams*LFrame*NFrames;
NSamples1 = NPhases*NStreams*LFrame*(NFrames+1); # A little longer to allow for delay in streams
NBitsData = 32;
if( dtval.get() == 'int16'):
NBitsData = 16
HasImag = 0
if (dtval.get() == 'cint16'):
HasImag = 1
if(SequenceType != 'SinCos' and SequenceType != 'Linear' and SequenceType != 'Random' and SequenceType != 'Dirac'):
print ('Unknown Sequence Type')
return
# Create the overall signal that will be distributed over all streams
# it is already separated in phases
S = np.zeros((NPhases,int(NSamples1/NPhases),1+HasImag))
for i in range(int(NSamples1/NPhases)):
for p in range (NPhases):
k = i*NPhases+p
if (SequenceType == 'SinCos'):
vr = int(5000*cos(6.28*5/(NPhases*NStreams*LFrame)*k))
vi = int(5000*sin(6.28*5/(NPhases*NStreams*LFrame)*k))
elif (SequenceType == 'Linear'):
vr = k
vi = -k
elif (SequenceType == 'Random'):
vr = random.randint(-5000,5000)
vi = random.randint(-5000,5000)
elif (SequenceType == 'Dirac'):
vr = 0
vi = 0
if(k%151 == 1):
vr = 1
elif(k%151 == 40):
vi = 1
elif(k%151 == 81):
vr = 2
elif(k%151 == 115):
vi = -2
# if(k%311 == 50):
# vr = 1
# S[p,i,0] =
# if(HasImag==1):
# S[p,i,1] = int(5000*sin(6.28*5/(NPhases*NStreams*LFrame)*k))
S[p,i,0] = vr
if (HasImag == 1 ):
S[p,i,1] = vi
PLIOwidth = int(pliow.get())
NSamplesPerLine = int(PLIOwidth/NBitsData) # Data are read in blocks of 128 bits (4 data in cint16)
# Create an Input test Vector in TestInputS.txt
FileNames = [];
# Easiest case: 1 stream per AI Engine
if (NStreams == 1):
#Creates list of filenames
for Phi in range(NPhases):
FileNames.append(Basename+'_'+str(Phi)+'.txt')
#Open all files
fds = [open(path, 'w') for path in FileNames]
#Fill all files with the right data
for p in range(NPhases):
fd = fds[p]
for s in range(int(NSamples1/NPhases/NSamplesPerLine)):
for d in range(NSamplesPerLine):
index = s*NSamplesPerLine + d
fd.write(str(int(S[p,index,0]))+' ')
if(HasImag):
fd.write(str(int(S[p,index,1]))+' ')
fd.write('\n')
for fd in fds:
fd.close()
if (NStreams == 2):
#Creates list of filenames
for Phi in range(NPhases):
for Stream in range(NStreams):
FileNames.append('PhaseIn_'+str(Phi)+'_'+str(Stream)+'.txt')
# Hash table to associate data to streams
NSamplesIn128bits = int(128/NBitsData )
H = np.zeros((int(NSamples1/NPhases/2),2))
H = H.astype('int32')
index = np.zeros(2)
index = index.astype('int32')
for s in range(int(NSamples1/NPhases)):
k = int(s/NSamplesIn128bits) # Block order
i = k%2 # Which streams
H[index[i],i] = s
index[i] = index[i]+1
#Open all files
fds = [open(path, 'w') for path in FileNames]
#Fill all files with the right data
for p in range(NPhases):
for stream in range(2):
fd = fds[2*p+stream]
for s in range(int(NSamples1/NPhases/NSamplesPerLine/NStreams)):
for d in range(NSamplesPerLine):
index = s*NSamplesPerLine + d
fd.write(str(int(S[p,H[index,stream],0]))+' ')
if(HasImag):
fd.write(str(int(S[p,H[index,stream],1]))+' ')
fd.write('\n')
for fd in fds:
fd.close()
```
#### File: sw/pysrc/run_traffic_gen_noplot.py
```python
import os
os.environ['OPENBLAS_NUM_THREADS'] = '1'
from xilinx_xtlm import ipc_axis_master_util
from xilinx_xtlm import ipc_axis_slave_util
from xilinx_xtlm import xtlm_ipc
import struct
import multiprocessing as mp
import numpy as np
import copy as copy
#import matplotlib.pyplot as plt
class IQData():
def __init__(self, numpy_cplx_data,aietype='cint16',plio_width=32):
""" Initialization """
self.input_cplx_data = numpy_cplx_data
self.parent_conn0, self.child_conn0 = mp.Pipe()
self.aietype = aietype
self.plio_width = plio_width
self.rx_payload_len = -1
self.tx_payload_len = -1
def self_test(self):
print("inside self_test")
#iqdata =MakeInputStim(NSamps)
self.input_cplx_data = MakeCountingPattern(16)
#self.plot_results(self.input_cplx_data,iqdata)
#this is a test plot
t1 = self.convert_numpy_to_bytes()
self.convert_bytes_to_numpy(t1)
#self.tx_to_aie(self.convert_numpy_to_bytes(),True)
def rx_from_aie(self):
payload = self.out0_util.sample_transaction()
print("Rx Payload1(pasteable)= ")
#this magic makes sure the first byte isnt interpreted
#as an ASCII encoding character and just prints the value
#print(''.join(r'\x'+hex(letter)[2:] for letter in payload.data))
print(payload.data)
#print(payload.tkeep)
print("Rx payload len=")
print(len(payload.data))
#formatString = "<"+str(len(payload.data)//2)+"h"
#rxData = struct.unpack(formatString, payload.data)
#print("Rx payload finished")
cvec = self.convert_bytes_to_numpy(payload.data)
self.child_conn0.send(cvec)
print("Rx payload tx to parent")
def tx_to_aie(self,iq_data_as_bytes,test):
NumBytesToSend = len(iq_data_as_bytes)
print("xmit: len Bytes = %d" % NumBytesToSend)
NumBytesPerBeat = self.plio_width//8
NumTransports = NumBytesToSend//NumBytesPerBeat
print("NumBytesToSend=%d,NumBytesPerBeat=%d,NumTransports=%d" % (NumBytesToSend,NumBytesPerBeat,NumTransports))
for i in range(NumTransports):
data2send = iq_data_as_bytes[(i*NumBytesPerBeat):(i*NumBytesPerBeat)+NumBytesPerBeat]
if(test == False):
payload = xtlm_ipc.axi_stream_packet()
payload.data_length = NumBytesPerBeat
if(i == NumTransports-1):
payload.tlast = True
print("Tlast sent!")
else:
payload.tlast = False
payload.data =data2send
if(self.plio_width == 32):
payload.tkeep = bytes(bytearray(struct.pack("B", 0xF)))
elif(self.plio_width == 64):
payload.tkeep = bytes(bytearray(struct.pack("H", 0xFF)))
elif(self.plio_width == 128):
payload.tkeep = bytes(bytearray(struct.pack("I", 0xFFFF)))
print("Payload Num: %d" % i)
print(data2send)
#print(''.join(r'\x'+hex(letter)[2:] for letter in data2send))
if(test == False):
self.in0_util.b_transport(payload)
print("Finished sending")
def run_test(self, ipc=False):
if ipc:
self.in0_util = ipc_axis_master_util("sim_ipc_axis_master")
self.out0_util = ipc_axis_slave_util("sim_ipc_axis_slave")
self.tx_to_aie(self.convert_numpy_to_bytes(),False)
print("Data sent to AIE. Waiting for results...")
if ipc:
p= mp.Process(target=self.rx_from_aie())
p.start()
aie_output = self.parent_conn0.recv()
print("Data received from AIE ")
#print("z aie: ", z_aie)
p.join()
print(aie_output)
self.plot_results(self.input_cplx_data,aie_output)
input("Enter any key to end simulation")
self.out0_util.disconnect()
self.in0_util.end_of_simulation()
print("Disconnected all IPC handles.. done!")
def convert_numpy_to_bytes(self):
#print(self.input_cplx_data)
L = len(self.input_cplx_data)
data = self.input_cplx_data
print("Cplx Samples Len = %d" % L)
if(self.aietype == "cint16"):
rVec = np.real(data).astype(np.int16)
iVec = np.imag(data).astype(np.int16)
out2column = np.zeros((L,2)).astype(np.int16)
elif(self.aietype =='cfloat'):
print("cfloat!")
rVec = np.real(data)
iVec = np.imag(data)
out2column = np.zeros((L,2)).astype(np.single)
# elif(self.aietype =='cint16_plio128'):
# print("cint16_PLIO128!")
# #its unclear to me if i have to pack this differently...
# rVec = np.real(data)
# iVec = np.imag(data)
# out2column = np.zeros((L,2)).astype(np.int16)
else:
print("Not supported type!")
out2column[:,0] = rVec
out2column[:,1] = iVec
print("Byte array to send")
print(''.join(r'\x'+hex(letter)[2:] for letter in out2column.tobytes()))
print("outlen=")
#print(len(out2column.tobytes() ) )
return out2column.tobytes()
def convert_bytes_to_numpy(self,byte_array):
#print("convert back byte_array =")
#print(byte_array)
#print(''.join(r'\x'+hex(letter)[2:] for letter in byte_array))
if(self.aietype == "cint16"):
print("AIE TYPE = cint16")
formatString = "<"+str(len(byte_array)//2)+"h"
upack = struct.unpack(formatString, byte_array)
ivec = upack[0:len(upack):2]
rvec = upack[1:len(upack):2]
elif(self.aietype =='cfloat'):
print("AIE TYPE = cfloat")
formatString = "<"+str(len(byte_array)//4)+"f"
upack = struct.unpack(formatString, byte_array)
print("Len Rx Array=")
print(len(byte_array))
#allgoodvalues = upack[0:len(upack):2]
ivec = upack[0:len(upack):2]
rvec = upack[1:len(upack):2]
#Shouldnt need this
# if(len(ivec) != len(rvec)):
# shorterlen = np.min((len(ivec),len(rvec)))
# ivec = ivec[:shorterlen]
# rvec = rvec[:shorterlen]
# elif(self.aietype =='cint16_plio128'):
# formatString = "<"+str(len(byte_array)//2)+"h"
# upack = struct.unpack(formatString, byte_array)
# #print(upack)
# rvec = upack[0:len(upack):2]
# ivec = upack[1:len(upack):2]
else:
print("Not supported type!")
cVec = np.array(rvec) + 1j*np.array(ivec)
print("back to numpy")
#print(cVec)
return cVec
def plot_results(self,aie_in,aie_out,style='t'):
print("In Plot")
##AIE IN
#plt.plot( list(range(0,len(aie_in))) ,np.real(aie_in),label ="aie_in R")
#plt.plot( list(range(0,len(aie_in))) ,np.imag(aie_in),label ="aie_in I")
### SKIPPING FFT FOR CFLOAT EXAMPLE
##Perform Golden Operation on AIE IN to generate Golden/reference output
#golden_iq_out = np.fft.fftshift(np.fft.fft(aie_in))
#golden_iq_out = golden_iq_out/4 #DSPLIB FFT HAS OUTPUT = MATLAB*4. Compensate for this.
#aie_out_shft = np.fft.fftshift(aie_out)
#plt.plot( list(range(0,len(golden_iq_out))),np.abs(golden_iq_out),label ="Golden FFT - MAG",marker="+")
#plt.plot( list(range(0,len(aie_out))) ,np.abs(aie_out_shft),label ="AIE OUT - MAG")
#AIE OUT
#plt.plot( list(range(0,len(aie_out))),np.real(aie_out),label ="aie_out R")
#plt.plot( list(range(0,len(aie_out))),np.imag(aie_out),label ="aie_out I ")
#plt.legend()
#plt.show()
# TriplePlot(aie_in,"Aie IN",file_or_plot="plot",logplot=True)
# TriplePlot(aie_out,"Aie out",file_or_plot="plot",logplot=True)
# def TriplePlot(iqdata,name,file_or_plot="file",FsMHz=245.76,logplot=False):
# n = len(iqdata)
# k = np.arange(n)
# T = n/FsMHz
# frq = (k-k.size/2)/T
# fig, ax = plt.subplots(2, 1,figsize=(10, 5))
# ax[0].plot(np.real(iqdata), 'r')
# if(iqdata.dtype == 'complex'):
# ax[0].plot(np.imag(iqdata),'b')
# ax[0].set_xlabel('Time Domain - Sample count')
# fdom = np.fft.fftshift(np.fft.fft(iqdata))
# if(logplot == True):
# logfdom = 20*np.log10(np.abs(fdom)+1e-9)
# NBits = 10
# auto_scale_pk = np.max(logfdom)
# auto_scale_range = 20*np.log10(2**(NBits-1))
# #print(auto_scale_range)
# if(auto_scale_pk-auto_scale_range > 6):
# #dont use this autoscale if the data doesnt have at least 6db range
# ax[1].set_ylim([(auto_scale_pk*1.1)-auto_scale_range,auto_scale_pk*1.1])
# ax[1].set_ylabel('db (20log)')
# else:
# logfdom= np.abs(fdom)
# ax[1].set_ylabel('linear')
# ax[1].plot(frq,logfdom,'r')
# ax[1].grid(True)
# ax[1].set_xlabel('Frequency Domain ')
# ax[0].set_title(name)
# if(file_or_plot == "file"):
# figfilename = name + ".png"
# plt.savefig(figfilename,bbox_inches='tight')
# else:
# plt.show()
def MakeInputStim(Nsamps=1024):
n = np.arange(0,Nsamps)
Fs = 245.76e6
Ftone = 30.72e6/16
cplxTone = 1.0* np.exp(-2*1j*np.pi*Ftone/Fs*n)
#cplxTone += 1.0* np.exp(-2*1j*np.pi*15.36e6/Fs*n)
Nbits = 16-4
Nbits = Nbits -2 #removed additional bits to help with FFT processing gain
mx = np.max(np.abs(cplxTone))
cscaled = np.round(cplxTone/mx * 2**(Nbits-1)-1)
return cscaled
def MakeCountingPattern(Nsamps=16):
#print("inside counting")
#print(Nsamps)
n = np.arange(0,Nsamps)
nneg = -1*np.arange(0,Nsamps)
cscale = n + 1j*nneg
return cscale
if __name__ == "__main__":
NSamps=32
#iqdata =MakeInputStim(NSamps)
iqdata = MakeCountingPattern(NSamps)
obj = IQData(iqdata,aietype="cint16",plio_width=64)
#obj.self_test()
print("Starting Test...")
obj.run_test(ipc=True)
print("Test complete")
```
#### File: sw/pysrc/run_traffic_gen.py
```python
import os
os.environ['OPENBLAS_NUM_THREADS'] = '1'
from xilinx_xtlm import ipc_axis_master_util
from xilinx_xtlm import ipc_axis_slave_util
from xilinx_xtlm import xtlm_ipc
import struct
import multiprocessing as mp
import numpy as np
import copy as copy
import matplotlib.pyplot as plt
import sys
class IQData():
def __init__(self, numpy_cplx_data,aietype='cint16',plio_width=32,supressplots=0):
""" Initialization """
self.input_cplx_data = numpy_cplx_data
self.parent_conn0, self.child_conn0 = mp.Pipe()
self.aietype = aietype
self.plio_width = plio_width
self.rx_payload_len = -1
self.tx_payload_len = -1
self.supressplots = supressplots
def self_test(self):
print("inside self_test")
self.input_cplx_data = MakeCountingPattern(16)
#self.plot_results(self.input_cplx_data,iqdata)
t1 = self.convert_numpy_to_bytes()
self.convert_bytes_to_numpy(t1)
def rx_from_aie(self):
payload = self.rx_axis.sample_transaction()
#This call blocks until the AIE passes some data to the AXI SIM IPC SLAVE
cvec = self.convert_bytes_to_numpy(payload.data)
self.child_conn0.send(cvec)
print("Received AIE Output. Sending to parent thread for processing")
def tx_to_aie(self,iq_data_as_bytes,test):
NumBytesToSend = len(iq_data_as_bytes)
#print("xmit: len Bytes = %d" % NumBytesToSend)
NumBytesPerBeat = self.plio_width//8
NumTrans = NumBytesToSend//NumBytesPerBeat
print("NumBytesToSend=%d,NumBytesPerTransaction=%d,TotalTransactions=%d" % (NumBytesToSend,NumBytesPerBeat,NumTrans))
for i in range(NumTrans):
data2send = iq_data_as_bytes[(i*NumBytesPerBeat):(i*NumBytesPerBeat)+NumBytesPerBeat]
#Stride through byte array in steps of BytesPerBeat
payload = xtlm_ipc.axi_stream_packet()
#Create a axi stream packet object
payload.data_length = NumBytesPerBeat
#Tell the object how much data will be sent in bytes
if(i == NumTrans-1):
payload.tlast = True
print("Tlast sent!")
else:
payload.tlast = False
payload.data =data2send
self.tx_axis.b_transport(payload)
#Send the data to the ipc master
print("Finished sending")
def run_test(self, ipc=False):
if ipc:
self.tx_axis = ipc_axis_master_util("tx_iqdata")
self.rx_axis = ipc_axis_slave_util("rx_iqdata")
#Create both Master and Slave ipc utils.
#The argument strings must match the names in system.cfg
self.tx_to_aie(self.convert_numpy_to_bytes(),False)
print("Data sent to AIE. Waiting for results...this may take a few minutes")
if ipc:
p= mp.Process(target=self.rx_from_aie())
p.start()
aie_output = self.parent_conn0.recv()
print("Data received from AIE ")
p.join()
if (not self.supressplots):
self.plot_results(self.input_cplx_data,aie_output)
input("Enter any key to end simulation")
self.rx_axis.disconnect()
self.tx_axis.end_of_simulation()
print("Disconnected all IPC handles.. done!")
def convert_numpy_to_bytes(self):
L = len(self.input_cplx_data)
data = self.input_cplx_data
if(self.aietype == "cint16"):
rVec = np.real(data).astype(np.int16)
iVec = np.imag(data).astype(np.int16)
out2column = np.zeros((L,2)).astype(np.int16)
elif(self.aietype =='cfloat'):
print("cfloat!")
rVec = np.real(data)
iVec = np.imag(data)
out2column = np.zeros((L,2)).astype(np.single)
else:
print("Not supported type!")
out2column[:,0] = rVec
out2column[:,1] = iVec
#print("Byte array to send")
#print(''.join(r'\x'+hex(letter)[2:] for letter in out2column.tobytes()))
#print("outlen=")
return out2column.tobytes()
def convert_bytes_to_numpy(self,byte_arry):
if(self.aietype == "cint16"):
formatString = "<"+str(len(byte_arry)//2)+"h"
upack = struct.unpack(formatString, byte_arry)
ivec = upack[0:len(upack):2]
rvec = upack[1:len(upack):2]
elif(self.aietype =='cfloat'):
formatString = "<"+str(len(byte_arry)//4)+"f"
upack = struct.unpack(formatString, byte_arry)
print("Len Rx Array=")
print(len(byte_arry))
ivec = upack[0:len(upack):2]
rvec = upack[1:len(upack):2]
else:
print("Not supported type!")
cVec = np.array(rvec) + 1j*np.array(ivec)
return cVec
def plot_results(self,aie_in,aie_out,style='t'):
##AIE IN
# plt.plot( list(range(0,len(aie_in))) ,np.real(aie_in),label ="aie_in R")
# plt.plot( list(range(0,len(aie_in))) ,np.imag(aie_in),label ="aie_in I")
#Perform Golden Operation on AIE IN to generate Golden/reference output
golden_iq_out = np.fft.fftshift(np.fft.fft(aie_in))
golden_iq_out = golden_iq_out/4 #DSPLIB FFT HAS OUTPUT = MATLAB*4. Compensate for this.
aie_out_shft = np.fft.fftshift(aie_out)
plt.plot( list(range(0,len(golden_iq_out))),np.abs(golden_iq_out),label ="Golden FFT - MAG",marker="+")
plt.plot( list(range(0,len(aie_out))) ,np.abs(aie_out_shft),label ="AIE OUT - MAG")
plt.legend()
plt.show()
def MakeInputStim(Nsamps=1024):
n = np.arange(0,Nsamps)
Fs = 245.76e6
Ftone = 30.72e6/16
cplxTone = 1.0* np.exp(-2*1j*np.pi*Ftone/Fs*n)
Nbits = 16-4
Nbits = Nbits -2 #removed additional bits to help with FFT processing gain
mx = np.max(np.abs(cplxTone))
cscaled = np.round(cplxTone/mx * 2**(Nbits-1)-1)
return cscaled
def MakeCountingPattern(Nsamps=16):
n = np.arange(0,Nsamps)
nneg = -1*np.arange(0,Nsamps)
cscale = n + 1j*nneg
return cscale
if __name__ == "__main__":
for i, arg in enumerate(sys.argv):
if( i == 1):
cmd_line_pliowidth = int(arg)
if( i == 2):
skipplots=int(arg)
print(skipplots)
NSamps=128
iqdata =MakeInputStim(NSamps)
#iqdata = MakeCountingPattern(NSamps)
obj = IQData(iqdata,aietype="cint16",plio_width=cmd_line_pliowidth,supressplots=skipplots)
obj.run_test(ipc=True)
print("TEST PASSED")
``` |
{
"source": "jlandais/recvis",
"score": 2
} |
#### File: recvis/ravens_torch/demos.py
```python
import os
import numpy as np
from absl import app, flags
from ravens_torch import tasks
from ravens_torch.constants import EXPERIMENTS_DIR, ENV_ASSETS_DIR
from ravens_torch.dataset import Dataset
from ravens_torch.environments.environment import Environment
flags.DEFINE_string('assets_root', ENV_ASSETS_DIR, '')
flags.DEFINE_string('data_dir', EXPERIMENTS_DIR, '')
flags.DEFINE_bool('disp', False, '')
flags.DEFINE_bool('shared_memory', False, '')
flags.DEFINE_string('task', 'block-insertion', '')
flags.DEFINE_string('mode', 'test', '')
flags.DEFINE_integer('n', 1000, '')
FLAGS = flags.FLAGS
def main(unused_argv):
# Initialize environment and task.
env = Environment(
FLAGS.assets_root,
disp=FLAGS.disp,
shared_memory=FLAGS.shared_memory,
hz=480)
task = tasks.names[FLAGS.task]()
task.mode = FLAGS.mode
# Initialize scripted oracle agent and dataset.
agent = task.oracle(env)
dataset = Dataset(os.path.join(
FLAGS.data_dir, f'{FLAGS.task}-{task.mode}'))
# Train seeds are even and test seeds are odd.
seed = dataset.max_seed
if seed < 0:
seed = -1 if (task.mode == 'test') else -2
# Collect training data from oracle demonstrations.
while dataset.n_episodes < FLAGS.n:
print(f'Oracle demonstration: {dataset.n_episodes + 1}/{FLAGS.n}')
episode, total_reward = [], 0
seed += 2
np.random.seed(seed)
env.set_task(task)
obs = env.reset()
info = None
reward = 0
for _ in range(task.max_steps):
act = agent.act(obs, info)
# print('Acting...', act)
episode.append((obs, act, reward, info))
obs, reward, done, info = env.step(act)
total_reward += reward
print(f'Total Reward: {total_reward} Done: {done}')
if done:
break
episode.append((obs, None, reward, info))
# Only save completed demonstrations.
if total_reward > 0.99:
dataset.add(seed, episode)
if __name__ == '__main__':
app.run(main)
```
#### File: ravens_torch/models/gt_state.py
```python
import torch
import torch.nn as nn
ACTIVATIONS = {
'relu': nn.ReLU,
}
def init_normal_weights_bias(m):
if type(m) == nn.Linear:
torch.nn.init.normal_(m.weight)
torch.nn.init.normal_(m.bias)
def DenseBlock(in_channels, out_channels, activation=None):
if activation is not None:
fc = nn.Sequential(
nn.Linear(in_channels, out_channels),
activation(),
)
else:
fc = nn.Linear(in_channels, out_channels)
fc.apply(init_normal_weights_bias)
return fc
class MlpModel(nn.Module):
"""MLP ground-truth state module."""
def __init__(self,
d_obs,
d_action,
activation="relu",
mdn=False,
dropout=0.2,
use_sinusoid=True):
super(MlpModel, self).__init__()
self.normalize_input = True
self.use_sinusoid = use_sinusoid
if self.use_sinusoid:
k = 3
else:
k = 1
activation = ACTIVATIONS[activation]
dim_concat = 128 + d_obs * k
# CHECK INPUT DIMENSION
self.fc1 = DenseBlock(d_obs * k, 128, activation)
self.drop1 = nn.Dropout(p=dropout)
self.fc2 = DenseBlock(dim_concat, 128, activation)
self.drop2 = nn.Dropout(p=dropout)
self.fc3 = DenseBlock(dim_concat, d_action, activation)
self.mdn = mdn
if self.mdn:
k = 26
self.mu = DenseBlock(dim_concat, d_action * k)
# Variance should be non-negative, so exp()
self.logvar = DenseBlock(dim_concat, k)
# mixing coefficient should sum to 1.0, so apply softmax
self.pi = DenseBlock(dim_concat, k)
self.softmax = nn.Softmax()
self.temperature = 2.5
def reset_states(self):
pass
def set_normalization_parameters(self, obs_train_parameters):
"""Set normalization parameters.
Args:
obs_train_parameters: dict with key, values:
- 'mean', numpy.ndarray of shape (obs_dimension)
- 'std', numpy.ndarray of shape (obs_dimension)
"""
self.obs_train_mean = obs_train_parameters["mean"]
self.obs_train_std = obs_train_parameters["std"]
def call(self, x):
"""FPROP through module.
Args:
x: shape: (batch_size, obs_dimension)
Returns:
shape: (batch_size, action_dimension) (if MDN)
shape of pi: (batch_size, num_gaussians)
shape of mu: (batch_size, num_gaussians*action_dimension)
shape of var: (batch_size, num_gaussians)
"""
obs = x * 1.0
# if self.normalize_input:
# x = (x - self.obs_train_mean) / (self.obs_train_std + 1e-7)
def cs(x):
if self.use_sinusoid:
sin = torch.sin(x)
cos = torch.cos(x)
return torch.cat((x, cos, sin), dim=1)
else:
return x
cs_obs = cs(obs)
x = self.drop1(self.fc1(cs_obs))
x = self.drop2(self.fc2(torch.cat((x, cs_obs), dim=1)))
x = torch.cat((x, cs_obs), dim=1)
if not self.mdn:
x = self.fc3(x)
return x
else:
pi = self.pi(x)
pi = pi / self.temperature
pi = self.softmax(pi)
mu = self.mu(x)
var = torch.exp(self.logvar(x))
return (pi, mu, var)
```
#### File: ravens_torch/models/regression.py
```python
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from ravens_torch.models import mdn_utils
from ravens_torch.models.conv_mlp import ConvMLP, DeepConvMLP
from ravens_torch.utils import utils, MeanMetrics, to_device
def Regression(in_channels, verbose=False):
"""Regression module."""
model = nn.Sequential(
nn.Linear(in_channels, 32),
nn.ReLU(),
nn.Linear(32, 32),
nn.ReLU(),
nn.Linear(32, 1),
)
_ = to_device([model], "Regression", verbose=verbose)
return model
# class Regression:
# """Regression module."""
# def __init__(self, in_channels, preprocess, use_mdn, verbose=False):
# self.preprocess = preprocess
# resnet = False
# if resnet:
# self.model = DeepConvMLP(in_channels, d_action=6, use_mdn=use_mdn)
# else:
# self.model = ConvMLP(d_action=6, use_mdn=use_mdn)
# self.device = to_device([self.model], "Regression", verbose=verbose)
# self.optim = optim.Adam(self.model.parameters(), lr=2e-4)
# self.metric = MeanMetrics()
# self.val_metric = MeanMetrics()
# self.loss_criterion = nn.MSELoss() if not use_mdn else mdn_utils.mdn_loss
# def set_batch_size(self, batch_size):
# self.model.set_batch_size(batch_size)
# def forward(self, in_img):
# """Forward pass.
# Args:
# in_img: [B, C, H, W]
# Returns:
# output tensor.
# """
# input_data = self.preprocess(in_img)
# in_tensor = torch.tensor(
# input_data, dtype=torch.float32).to(self.device)
# output = self.model(in_tensor)
# return output
# def train_pick(self, batch_obs, batch_act, train_step, validate=False):
# """Train pick."""
# self.metric.reset()
# self.val_metric.reset()
# input_data = self.preprocess(batch_obs)
# in_tensor = torch.tensor(
# input_data, dtype=torch.float32).to(self.device)
# loss = train_step(self.model, self.optim, in_tensor, batch_act,
# self.loss_criterion)
# if not validate:
# self.metric(loss)
# else:
# self.val_metric(loss)
# return np.float32(loss)
# def save(self, fname):
# torch.save(self.model.state_dict(), fname)
# def load(self, fname):
# self.model.load_state_dict(torch.load(fname))
```
#### File: ravens_torch/models/transport_6dof.py
```python
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from einops.layers.torch import Rearrange
from ravens_torch.models.regression import Regression
from ravens_torch.models.transport import Transport
from ravens_torch.utils import MeanMetrics
class TransportHybrid6DoF(Transport):
"""Transport + 6DoF regression hybrid."""
def __init__(self, in_channels, n_rotations, crop_size, preprocess, verbose=False):
self.output_dim = 24
self.kernel_dim = 24
super().__init__(in_channels, n_rotations, crop_size,
preprocess, verbose, name="Transport 6DoF")
self.regress_loss = nn.HuberLoss()
self.z_regressor = Regression(in_channels, verbose=verbose)
self.roll_regressor = Regression(in_channels, verbose=verbose)
self.pitch_regressor = Regression(in_channels, verbose=verbose)
self.z_metric = MeanMetrics()
self.roll_metric = MeanMetrics()
self.pitch_metric = MeanMetrics()
def correlate(self, in0, in1, softmax):
in0 = Rearrange('b h w c -> b c h w')(in0)
in1 = Rearrange('b h w c -> b c h w')(in1)
output = F.conv2d(in0[Ellipsis, :3], in1[:, :, :3, :])
z_tensor = F.conv2d(in0[Ellipsis, :8], in1[:, :, :8, :])
roll_tensor = F.conv2d(in0[Ellipsis, 8:16], in1[:, :, 8:16, :])
pitch_tensor = F.conv2d(in0[Ellipsis, 16:24], in1[:, :, 16:24, :])
if softmax:
output_shape = output.shape
output = Rearrange('b c h w -> b (c h w)')(output)
output = self.softmax(output)
output = Rearrange(
'b (c h w) -> b c h w',
c=output_shape[1],
h=output_shape[2],
w=output_shape[3])(output)
output = output.detach().cpu().numpy()
return output, z_tensor, roll_tensor, pitch_tensor
def train_block(self, in_img, p, q, theta, z, roll, pitch):
output = self.forward(in_img, p, softmax=False)
output, z_tensor, roll_tensor, pitch_tensor = output
# Get one-hot pixel label map and 6DoF labels.
itheta = theta / (2 * np.pi / self.n_rotations)
itheta = np.int32(np.round(itheta)) % self.n_rotations
label_size = in_img.shape[:2] + (self.n_rotations,)
label = np.zeros(label_size)
label[q[0], q[1], itheta] = 1
z_label, roll_label, pitch_label = z, roll, pitch
# Use a window for regression rather than only exact.
u_window = 7
v_window = 7
theta_window = 1
u_min = max(q[0] - u_window, 0)
u_max = min(q[0] + u_window + 1, z_tensor.shape[1])
v_min = max(q[1] - v_window, 0)
v_max = min(q[1] + v_window + 1, z_tensor.shape[2])
theta_min = max(itheta - theta_window, 0)
theta_max = min(itheta + theta_window + 1, z_tensor.shape[3])
z_est_at_xytheta = z_tensor[0, u_min:u_max, v_min:v_max,
theta_min:theta_max]
roll_est_at_xytheta = roll_tensor[0, u_min:u_max, v_min:v_max,
theta_min:theta_max]
pitch_est_at_xytheta = pitch_tensor[0, u_min:u_max, v_min:v_max,
theta_min:theta_max]
z_est_at_xytheta = Rearrange('b c h w -> b (h w c)')(z_est_at_xytheta)
roll_est_at_xytheta = Rearrange(
'b c h w -> b (h w c)')(roll_est_at_xytheta)
pitch_est_at_xytheta = Rearrange(
'b c h w -> b (h w c)')(pitch_est_at_xytheta)
z_est_at_xytheta = self.z_regressor(z_est_at_xytheta)
roll_est_at_xytheta = self.roll_regressor(roll_est_at_xytheta)
pitch_est_at_xytheta = self.pitch_regressor(pitch_est_at_xytheta)
z_weight = 10.0
roll_weight = 10.0
pitch_weight = 10.0
z_label = torch.tensor(z_label[np.newaxis, ...]).to(self.device)
roll_label = torch.tensor(roll_label[np.newaxis, ...]).to(self.device)
pitch_label = torch.tensor(
pitch_label[np.newaxis, ...]).to(self.device)
z_loss = z_weight * self.regress_loss(z_label, z_est_at_xytheta)
roll_loss = roll_weight * \
self.regress_loss(roll_label, roll_est_at_xytheta)
pitch_loss = pitch_weight * \
self.regress_loss(pitch_label, pitch_est_at_xytheta)
return z_loss, roll_loss, pitch_loss
def train(self, in_img, p, q, theta, z, roll, pitch):
self.metric.reset()
self.z_metric.reset()
self.roll_metric.reset()
self.pitch_metric.reset()
self.model_query.train()
self.model_key.train()
self.z_regressor.model.train()
self.roll_regressor.model.train()
self.pitch_regressor.model.train()
self.optimizer_query.zero_grad()
self.optimizer_key.zero_grad()
self.z_regressor.optimizer.zero_grad()
self.roll_regressor.optimizer.zero_grad()
self.pitch_regressor.optimizer.zero_grad()
z_loss, roll_loss, pitch_loss = self.train_block(
in_img, p, q, theta, z, roll, pitch)
loss = z_loss + roll_loss + pitch_loss
loss.backward()
# MAYBE NOT FOLLOWING TWO LINES
self.optimizer_query.step()
self.optimizer_key.step()
self.z_regressor.optimizer.step()
self.roll_regressor.optimizer.step()
self.pitch_regressor.optimizer.step()
self.z_metric(z_loss)
self.roll_metric(roll_loss)
self.pitch_metric(pitch_loss)
self.iters += 1
return np.float32(loss.detach().cpu().numpy())
def test(self, in_img, p, q, theta, z, roll, pitch):
self.model_query.eval()
self.model_key.eval()
self.z_regressor.model.eval()
self.roll_regressor.model.eval()
self.pitch_regressor.model.eval()
with torch.no_grad():
z_loss, roll_loss, pitch_loss = self.train_block(
in_img, p, q, theta, z, roll, pitch)
loss = z_loss + roll_loss + pitch_loss
self.z_metric(z_loss)
self.roll_metric(roll_loss)
self.pitch_metric(pitch_loss)
self.iters += 1
return np.float32(loss.detach().cpu().numpy())
# ---------------------------------------------------------------------------
# Visualization methods.
# ---------------------------------------------------------------------------
# # For visualization
# Need to pass in theta to use this visualization.
# self.feature_visualize = False
# if self.feature_visualize:
# self.fig, self.ax = plt.subplots(5, 1)
# self.write_visualize = False
# self.plot_interval = 20
# visualize_input = False
# if visualize_input and self.six_dof: # only supported for six dof model
# self.visualize_train_input(in_img, p, q, theta, z, roll, pitch)
# if theta is not None and self.feature_visualize and
# self.iters % self.plot_interval == 0:
# self.visualize_introspection(img_unprocessed, p, rvecs, in_shape,
# theta, logits, kernel_raw, output)
# def visualize_introspection(self, img_unprocessed, p, rvecs, in_shape,
# theta, logits, kernel, output):
# """Utils for visualizing features at
# the end of the Background and Foreground networks."""
# # Do this again, for visualization
# crop_rgb = tf.convert_to_tensor(
# img_unprocessed.copy().reshape(in_shape), dtype=tf.float32)
# crop_rgb = tf.repeat(crop_rgb, repeats=self.n_rotations, axis=0)
# crop_rgb = tfa.image.transform(crop_rgb, rvecs, interpolation="NEAREST")
# crop_rgb = crop_rgb[:, p[0]:(p[0] + self.crop_size),
# p[1]:(p[1] + self.crop_size), :]
# crop_rgb = crop_rgb.numpy()
# self.ax[0].cla()
# self.ax[1].cla()
# self.ax[2].cla()
# self.ax[3].cla()
# itheta = theta / (2 * np.pi / self.n_rotations)
# itheta = np.int32(np.round(itheta)) % self.n_rotations
# self.ax[0].imshow(crop_rgb[itheta, :, :, :3].transpose(1, 0, 2) / 255.)
# if self.write_visualize:
# # delete first:
# try:
# shutil.rmtree("vis/crop_rgb")
# shutil.rmtree("vis/crop_kernel")
# except: # pylint: disable=bare-except
# print("Warning: couldn't delete folder for visualization.")
# os.system("mkdir -p vis/crop_rgb")
# os.system("mkdir -p vis/crop_kernel")
# for theta_idx in range(self.n_rotations):
# filename = "itheta_" + str(theta_idx).zfill(4) + ".png"
# if itheta == theta_idx:
# filename = "label-" + filename
# imageio.imwrite(
# os.path.join("vis/crop_rgb/", filename),
# crop_rgb[theta_idx, :, :, :3].transpose(1, 0, 2))
# self.ax[1].imshow(img_unprocessed[:, :, :3].transpose(1, 0, 2) / 255.)
# if self.write_visualize:
# filename = "img_rgb.png"
# imageio.imwrite(
# os.path.join("vis/", filename),
# img_unprocessed[:, :, :3].transpose(1, 0, 2))
# logits_numpy = logits.numpy()
# kernel_numpy = kernel.numpy()
# for c in range(3):
# channel_mean = np.mean(logits_numpy[:, :, :, c])
# channel_std = np.std(logits_numpy[:, :, :, c])
# channel_1std_max = channel_mean + channel_std
# # channel_1std_max = np.max(logits_numpy[:, :, :, c])
# channel_1std_min = channel_mean - channel_std
# # channel_1std_min = np.min(logits_numpy[:, :, :, c])
# logits_numpy[:, :, :, c] -= channel_1std_min
# logits_numpy[:, :, :, c] /= (channel_1std_max - channel_1std_min)
# for theta_idx in range(self.n_rotations):
# channel_mean = np.mean(kernel_numpy[theta_idx, :, :, c])
# channel_std = np.std(kernel_numpy[theta_idx, :, :, c])
# channel_1std_max = channel_mean + channel_std
# # channel_1std_max = np.max(kernel_numpy[itheta, :, :, c])
# channel_1std_min = channel_mean - channel_std
# # channel_1std_min = np.min(kernel_numpy[itheta, :, :, c])
# kernel_numpy[theta_idx, :, :, c] -= channel_1std_min
# kernel_numpy[theta_idx, :, :, c] /= (
# channel_1std_max - channel_1std_min)
# self.ax[2].imshow(logits_numpy[0, :, :, :3].transpose(1, 0, 2))
# self.ax[3].imshow(kernel_numpy[itheta, :, :, :3].transpose(1, 0, 2))
# if self.write_visualize:
# imageio.imwrite(
# os.path.join("vis", "img_features.png"),
# logits_numpy[0, :, :, :3].transpose(1, 0, 2))
# for theta_idx in range(self.n_rotations):
# filename = "itheta_" + str(theta_idx).zfill(4) + ".png"
# if itheta == theta_idx:
# filename = "label-" + filename
# imageio.imwrite(
# os.path.join("vis/crop_kernel/", filename),
# kernel_numpy[theta_idx, :, :, :3].transpose(1, 0, 2))
# heatmap = output[0, :, :, itheta].numpy().transpose()
# # variance = 0.1
# heatmap = -np.exp(-heatmap / 0.1)
# cmap = plt.cm.jet
# norm = plt.Normalize(vmin=heatmap.min(), vmax=heatmap.max())
# heatmap = cmap(norm(heatmap))
# self.ax[4].imshow(heatmap)
# if self.write_visualize:
# imageio.imwrite("vis/heatmap.png", heatmap)
# # non-blocking
# plt.draw()
# plt.pause(0.001)
# # blocking
# # plt.show()
# def visualize_train_input(self, in_img, p, q, theta, z, roll, pitch):
# """Visualize the training input."""
# points = []
# colors = []
# height = in_img[:, :, 3]
# for i in range(in_img.shape[0]):
# for j in range(in_img.shape[1]):
# pixel = (i, j)
# position = utils.pix_to_xyz(pixel, height, self.bounds,
# self.pixel_size)
# points.append(position)
# colors.append(in_img[i, j, :3])
# points = np.array(points).T # shape (3, N)
# colors = np.array(colors).T / 255.0 # shape (3, N)
# self.vis["pointclouds/scene"].set_object(
# g.PointCloud(position=points, color=colors))
# pick_position = utils.pix_to_xyz(p, height, self.bounds,
# self.pixel_size)
# label = "pick"
# utils.make_frame(self.vis, label, h=0.05, radius=0.0012, o=0.1)
# pick_transform = np.eye(4)
# pick_transform[0:3, 3] = pick_position
# self.vis[label].set_transform(pick_transform)
# place_position = utils.pix_to_xyz(q, height, self.bounds,
# self.pixel_size)
# label = "place"
# utils.make_frame(self.vis, label, h=0.05, radius=0.0012, o=0.1)
# place_transform = np.eye(4)
# place_transform[0:3, 3] = place_position
# place_transform[2, 3] = z
# rotation = utils.eulerXYZ_to_quatXYZW((roll, pitch, -theta))
# quaternion_wxyz = np.asarray(
# [rotation[3], rotation[0], rotation[1], rotation[2]])
# place_transform[0:3, 0:3] =
# mtf.quaternion_matrix(quaternion_wxyz)[0:3, 0:3]
# self.vis[label].set_transform(place_transform)
# _, ax = plt.subplots(2, 1)
# ax[0].imshow(in_img.transpose(1, 0, 2)[:, :, :3] / 255.0)
# ax[0].scatter(p[0], p[1])
# ax[0].scatter(q[0], q[1])
# ax[1].imshow(in_img.transpose(1, 0, 2)[:, :, 3])
# ax[1].scatter(p[0], p[1])
# ax[1].scatter(q[0], q[1])
# plt.show()
```
#### File: ravens_torch/tasks/align_box_corner.py
```python
import os
import numpy as np
from ravens_torch.tasks.task import Task
from ravens_torch.utils import utils
class AlignBoxCorner(Task):
"""Aligning task."""
def __init__(self):
super().__init__()
self.max_steps = 3
def reset(self, env):
super().reset(env)
self._add_instance(env)
def _add_instance(self, env):
# Generate randomly shaped box.
box_size = self.get_random_size(0.05, 0.15, 0.05, 0.15, 0.01, 0.06)
# Add corner.
dimx = (box_size[0] / 2 - 0.025 + 0.0025, box_size[0] / 2 + 0.0025)
dimy = (box_size[1] / 2 + 0.0025, box_size[1] / 2 - 0.025 + 0.0025)
corner_template = 'corner/corner-template.urdf'
replace = {'DIMX': dimx, 'DIMY': dimy}
corner_urdf = self.fill_template(corner_template, replace)
corner_size = (box_size[0], box_size[1], 0)
corner_pose = self.get_random_pose(env, corner_size)
env.add_object(corner_urdf, corner_pose, 'fixed')
os.remove(corner_urdf)
# Add possible placing poses.
theta = utils.quatXYZW_to_eulerXYZ(corner_pose[1])[2]
fip_rot = utils.eulerXYZ_to_quatXYZW((0, 0, theta + np.pi))
pose1 = (corner_pose[0], fip_rot)
alt_x = (box_size[0] / 2) - (box_size[1] / 2)
alt_y = (box_size[1] / 2) - (box_size[0] / 2)
alt_pos = (alt_x, alt_y, 0)
alt_rot0 = utils.eulerXYZ_to_quatXYZW((0, 0, np.pi / 2))
alt_rot1 = utils.eulerXYZ_to_quatXYZW((0, 0, 3 * np.pi / 2))
pose2 = utils.multiply(corner_pose, (alt_pos, alt_rot0))
pose3 = utils.multiply(corner_pose, (alt_pos, alt_rot1))
# Add box.
box_template = 'box/box-template.urdf'
box_urdf = self.fill_template(box_template, {'DIM': box_size})
box_pose = self.get_random_pose(env, box_size)
box_id = env.add_object(box_urdf, box_pose)
os.remove(box_urdf)
self.color_random_brown(box_id)
# Goal: box is aligned with corner (1 of 4 possible poses).
self.goals.append(([(box_id, (2 * np.pi, None))], np.int32([[1, 1, 1, 1]]),
[corner_pose, pose1, pose2, pose3],
False, True, 'pose', None, 1))
```
#### File: ravens_torch/utils/text.py
```python
class STYLE:
BOLD = '\033[1m'
END = '\033[0m'
def bold(text):
return STYLE.BOLD + text + STYLE.END
```
#### File: ravens_torch/utils/video_recorder.py
```python
import os
import numpy as np
import skvideo.io
import pybullet as p
from ravens_torch.tasks import cameras
from ravens_torch.utils.text import bold
CONFIG = cameras.RealSenseD415.CONFIG
class CameraImageGetter:
def __init__(self, camera_config=None, width=1280):
self.camera_idx = 0
if camera_config is not None:
self.config = camera_config
else:
self.config = CONFIG[self.camera_idx]
self.width = width
self._compute_view_matrix()
self._compute_projection_matrix_fov()
def _compute_view_matrix(self):
lookdir = np.float32([0, 0, 1]).reshape(3, 1)
updir = np.float32([0, -1, 0]).reshape(3, 1)
rotation = p.getMatrixFromQuaternion(self.config['rotation'])
rotm = np.float32(rotation).reshape(3, 3)
lookdir = (rotm @ lookdir).reshape(-1)
camera_position = self.config['position']
camera_target_position = self.config['position'] + lookdir
camera_up_vector = (rotm @ updir).reshape(-1)
self.view_matrix = p.computeViewMatrix(
cameraEyePosition=camera_position,
cameraTargetPosition=camera_target_position,
cameraUpVector=camera_up_vector)
def _compute_projection_matrix_fov(self):
im_w, im_h = self.config['image_size']
focal_len = self.config['intrinsics'][0]
znear, zfar = self.config['zrange']
fovh = (im_w / 2) / focal_len
fovh = 180 * np.arctan(fovh) * 2 / np.pi
aspect_ratio = im_h / im_w
self.projection_matrix = p.computeProjectionMatrixFOV(
fov=fovh,
aspect=aspect_ratio,
nearVal=znear,
farVal=zfar)
def __call__(self):
height = int(self.width * 3 / 4)
_, _, rgbImg, _, _ = p.getCameraImage(
width=self.width,
height=height,
viewMatrix=self.view_matrix,
projectionMatrix=self.projection_matrix)
return rgbImg
class VideoRecorder:
"""
Video Recorder for the PyBullet environment
Call VideoRecorder with a wrapper:
`with VideoRecorder(...) as vid_rec:`
Call `record_frame` wherever you need to save a frame in your code
(for instace: after `p.stepSimulation()`, after `p.multiplyTransforms`...).
"""
def __init__(self, save_dir, episode_idx=0, record_mp4=True, display=True, verbose=False, camera_config=None):
self.record_mp4 = record_mp4
self.display = display
self.verbose = verbose
self.record_every = 5
self.n_steps = 0
self.video_name = f"{save_dir}/episode-{episode_idx}.mp4"
if record_mp4:
if not os.path.exists(save_dir):
os.makedirs(save_dir)
if verbose and record_mp4:
print(
f"{bold('Video Recorder')} active, will save videos at {bold(self.video_name)}")
width = 1280
self.camera_image_getter = CameraImageGetter(
camera_config=camera_config, width=width)
def __enter__(self):
if self.record_mp4:
self.frames = []
return self
def record_frame(self):
if self.record_mp4 and self.n_steps % self.record_every == 0:
rgbImg = self.camera_image_getter()[..., :3]
self.frames.append(rgbImg)
self.n_steps += 1
def __exit__(self, exc_type, exc_val, exc_tb):
if self.record_mp4:
if self.verbose:
print(f"Saving video at {self.video_name}")
skvideo.io.vwrite(
self.video_name,
np.array(self.frames),
inputdict={'-r': "120/1"})
``` |
{
"source": "jlanday/home-watch",
"score": 2
} |
#### File: home-watch/app/utils.py
```python
import base64
import datetime
import logging
import json
import time
import typing
import os
import cv2
from google.cloud import storage
import numpy as np
from sendgrid import SendGridAPIClient
from sendgrid.helpers.mail import Mail
from sendgrid.helpers.mail import (
Mail,
Attachment,
FileContent,
FileName,
FileType,
Disposition,
ContentId,
)
import tensorflow as tf
import tensorflow_hub as hub
from plotters import *
class spy_cam:
def __init__(
self,
bucket_name:str="justin-source-data",
# See https://github.com/tensorflow/hub/blob/master/examples/colab/tf2_object_detection.ipynb for other
# compatible models.
model_url:str="https://tfhub.dev/tensorflow/ssd_mobilenet_v2/2",
seconds_between_alerts:str="60",
camera_name:str="dev",
project_name:str="home-watch",
gcp_creds_path:str="gcp_creds.json",
sg_creds_path:str="sg_api_key.json",
min_score_thresh:str="0.5",
rotate_angle:str="180",
adjust_color:bool=False,
sg_from_email:str="<EMAIL>",
sg_to_email:str="<EMAIL>",
resolution:str="800,800",
ignore_list:str="car=1.0",
cache_size:str="5",
):
self.bucket_name = bucket_name
self.model_url = model_url
self.seconds_between_alerts = int(seconds_between_alerts)
self.camera_name = camera_name
self.project_name = project_name
self.gcp_creds_path = gcp_creds_path
self.sg_creds_path = sg_creds_path
self.min_score_thresh = float(min_score_thresh)
self.rotate_angle = int(rotate_angle)
self.adjust_color = adjust_color
self.sg_from_email = sg_from_email
self.sg_to_email = sg_to_email
self.resolution = resolution.split(",")
assert len(self.resolution) == 2, "resolution needs to be shape (dim1,dim2)"
self.ignore_dict = {
item.split("=")[0]: float(item.split("=")[1])
for item in ignore_list.split(",")
}
self.logger = logging.getLogger()
self.logger.setLevel(logging.INFO)
self.cache_size = int(cache_size)
try:
ch = self.logger.handlers[0]
ch.setFormatter(
logging.Formatter(
"[%(asctime)s][%(name)s][%(levelname)s] %(message)s'",
"%Y-%m-%d %H:%M:%S",
)
)
self.logger.handlers[0] = ch
except:
ch = logging.StreamHandler()
ch.setFormatter(
logging.Formatter(
"[%(asctime)s][%(name)s][%(levelname)s] %(message)s'",
"%Y-%m-%d %H:%M:%S",
)
)
self.logger.addHandler(ch)
if self.gcp_creds_path:
self.gcp_client = storage.Client.from_service_account_json(self.gcp_creds_path)
self.gcp_bucket = self.gcp_client.bucket(bucket_name)
if not os.path.exists("images"):
os.mkdir("images")
self.logger.info("made images")
self.cam = cv2.VideoCapture(0)
self.cam.set(3, int(self.resolution[0]))
self.cam.set(4, int(self.resolution[1]))
self.logger.info("opened camera")
self.model = hub.load(self.model_url)
self.logger.info("loaded model")
with open("category_index.json", "r") as f:
self.category_index = json.loads(f.read())
if self.sg_creds_path:
with open(self.sg_creds_path, "r") as f:
self.sg_api_key = json.loads(f.read())["sg_api_key"]
self.mail_client = SendGridAPIClient(self.sg_api_key)
self.cache = []
def add_image_to_cache(self, image_path: str) -> None:
"""Appends an image_path to a cache for emailing"""
self.cache.append(image_path)
def clear_cache(self) -> None:
"""Clears the cache"""
self.cache = []
def send_email(self, subject: str = "Stranger Danger?", content: str = "<strong>:-)</strong>") -> None:
"""Emails all of the images in the cache"""
# -- Initialize a template
message = Mail(
from_email=self.sg_from_email,
to_emails=self.sg_to_email,
subject=subject,
html_content=content,
)
attachments = []
# -- We want to reverse the cache so the images are sent in the order they are taken
# -- We want to limit the number of images by the cache size.
for index, image_path in enumerate(self.cache[::-1][: self.cache_size]):
try:
with open(image_path, "rb") as f:
image_bytes = f.read()
attachment = Attachment()
attachment.file_content = FileContent(base64.b64encode(image_bytes).decode())
attachment.file_type = FileType("image/jpg")
attachment.file_name = FileName(image_path)
attachment.disposition = Disposition("attachment")
attachment.content_id = ContentId("ID{}".format(index))
self.logger.info("added {}".format(image_path))
message.add_attachment(attachment)
except:
self.logger.error("{} attatchment failure".format(image_path))
response = self.mail_client.send(message)
if response.status_code == 202:
self.logger.info("email sent!")
else:
self.logger.error("email failure {}".format(email_response.body))
def capture_image(self) -> (str, np.ndarray):
"""Uses a webcam to take an image and provides transformations"""
# -- Construct image path
now = datetime.datetime.now()
dt = now.strftime("%Y-%m-%d")
hms = now.strftime("%H:%M:%S:%f")
if not os.path.exists("images/dt={dt}".format(dt=dt)):
os.mkdir("images/dt={dt}".format(dt=dt))
path = "images/dt={dt}/{batch}.jpg".format(dt=dt, batch=hms)
# -- Take the photo
_, image = self.cam.read()
# -- Recall
if self.adjust_color == "True":
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# -- Rotate
if self.rotate_angle:
assert isinstance(self.rotate_angle, int), "rotate_angle needs to be an integer or None or False"
image_center = tuple(np.array(image.shape[1::-1]) / 2)
rot_mat = cv2.getRotationMatrix2D(image_center, self.rotate_angle, 1.0)
image = cv2.warpAffine(image, rot_mat, image.shape[1::-1], flags=cv2.INTER_LINEAR)
return path, image
def batch_numpy_image(self, image: np.ndarray) -> np.ndarray:
"""Reshape to have batch size = 1"""
return image.reshape((1, *image.shape)).astype(np.uint8)
def make_predictions(self, image_np: np.ndarray) -> dict:
"""Runs image through arbitrary tensorhub model and returns results"""
results = self.model(image_np)
result = {key: value.numpy() for key, value in results.items()}
return result
def draw_boxes_on_predictions(self, image_np: np.ndarray, result: dict) -> np.ndarray:
"""Draws boundary boxes and labels on image"""
image_np_with_detections = image_np.copy()
# -- A helper function from google :)
visualize_boxes_and_labels_on_image_array(
image_np_with_detections[0],
result["detection_boxes"][0],
(result["detection_classes"][0]).astype(int),
result["detection_scores"][0],
self.category_index,
use_normalized_coordinates=True,
max_boxes_to_draw=200,
min_score_thresh=self.min_score_thresh,
agnostic_mode=False,
)
return image_np_with_detections[0]
def disk_to_cloud(self, image_path: str):
"""Takes an image saved to disk and uploads it to all provided cloud storage vendors"""
if self.gcp_client:
try:
gcp_path = self.disk_to_gcp(image_path)
self.logger.info("image w boxes saved to cloud {}".format(gcp_path))
except:
self.logger.error("problem saving to gcp {}".format(gcp_path))
# -- TODO: Implement other cloud cloud providers
def disk_to_gcp(self, image_path: str) -> str:
"""Uploads a jpg to google cloud storage"""
blob_path = "{project_name}/{camera_name}/{path}".format(
project_name=self.project_name,
camera_name=self.camera_name,
path=image_path,
)
blob = self.gcp_bucket.blob(blob_path)
blob.upload_from_filename(image_path, content_type="image/jpg")
return blob_path
def validate_results(self, results: dict) -> bool:
"""Checks your results against exclusions to determine notifications"""
send_message = False
class_indicies = np.where(results["detection_scores"][0] >= self.min_score_thresh)[0]
class_labels = results["detection_classes"][0][class_indicies]
class_labels = [
self.category_index[str(int(index))]["name"] for index in class_labels
]
class_scores = results["detection_scores"][0][class_indicies]
for index, label in enumerate(class_labels):
class_score = class_scores[index]
message = "{}={:.2f}".format(label, class_score)
self.logger.info(message)
if label in self.ignore_dict.keys():
if class_score >= self.ignore_dict[label]:
send_message = True
elif class_score >= self.min_score_thresh:
send_message = True
return send_message
``` |
{
"source": "jlandercy/newproject",
"score": 2
} |
#### File: newproject/newproject/_new.py
```python
import sys
import newproject
from newproject.settings import settings
def main():
"""
Module entrypoint
"""
settings.logger.info("New package {}".format(newproject.__version__))
sys.exit(0)
if __name__ == "__main__":
main()
```
#### File: newproject/newproject/service.py
```python
import sys
from newproject.settings import settings
def main():
"""
Service entrypoint
"""
import argparse
# CLI Arguments:
cli_parser = argparse.ArgumentParser(
description="Service Command Line",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
cli_parser.add_argument(
"--verbose", type=int, default=40, help="Logger Verbose Level"
)
cli_parser.add_argument(
"--config", type=str, default=str(settings.file), help="Configuration path"
)
cli_parameters = cli_parser.parse_args()
# Set Logger Level
settings.logger.setLevel(cli_parameters.verbose)
sys.exit(0)
if __name__ == "__main__":
main()
``` |
{
"source": "jlandercy/odapi",
"score": 3
} |
#### File: odapi/odapi/errors.py
```python
import sys
from odapi.settings import settings
class GenericError(Exception):
"""Generic Package Exception must be subclassed not raised"""
def __init__(self, message, **kwargs):
super().__init__(message)
self.__dict__.update(kwargs)
settings.logger.error("[{}] {}: {}".format(type(self).__name__, message, kwargs))
class BadParameter(GenericError):
"""This exception stands for Bad Parameter error in method calls"""
class MissingFrequency(GenericError):
"""This exception stands when a frequency is needed but not found"""
def main():
raise BadParameter("There is nothing about foo or bar", foo="bar")
sys.exit(0)
if __name__ == "__main__":
main()
```
#### File: tests/interfaces/test_interfaces_geomatic.py
```python
import sys
import unittest
import pandas as pd
from odapi.settings import settings
class GeomaticTest:
pass
def main():
unittest.main()
sys.exit(0)
if __name__ == "__main__":
main()
```
#### File: tests/package/test_package_settings.py
```python
import sys
import unittest
from types import SimpleNamespace
from odapi.settings import settings
class SettingsTest(unittest.TestCase):
def test_NameSpace(self):
self.assertIsInstance(settings, SimpleNamespace)
def test_RequiredSettings(self):
self.assertTrue({'package', 'resources', 'uuid4'}.issubset(settings.__dict__))
def main():
unittest.main()
sys.exit(0)
if __name__ == "__main__":
main()
```
#### File: odapi/toolbox/generic.py
```python
import sys
import pathlib
import json
from functools import wraps
import numpy as np
from odapi.settings import settings
class SettingsFile:
"""
Settings files
"""
@staticmethod
def load(path_or_object):
"""
Load file or dict settings
:param path_or_object: Valid path pointing to a valid JSON resource or dict
:param path_or_object: str, pathlib.Path, dict
:return: Dictionary of Settings
:rtype: dict
"""
if isinstance(path_or_object, str):
path_or_object = pathlib.Path(path_or_object)
if isinstance(path_or_object, dict) or path_or_object is None:
path_or_object = path_or_object
else:
with path_or_object.open() as fh:
settings.logger.debug("Load Settings {}".format(path_or_object))
path_or_object = json.load(fh)
return path_or_object
def main():
sys.exit(0)
if __name__ == "__main__":
main()
```
#### File: odapi/toolbox/psychro.py
```python
import numpy as np
from scipy import optimize
import matplotlib.pyplot as plt
from matplotlib.patches import Polygon
from matplotlib.collections import PatchCollection
class Constants:
"""
Physical Constants
"""
R = 8.314472 # [J/mol.K]
T0K = 273.15 # [K]
@staticmethod
def TK(TC):
"""
Temperature Conversion from Celcius to Kelvin
"""
return TC + Constants.T0K
@staticmethod
def TC(TK):
"""
Temperature Conversion from Kelvin to Celcius
"""
return TK - Constants.T0K
class Conditions:
"""
Standard Conditions
"""
p0 = 1.01325e5 # [Pa]
T0 = 0 + Constants.T0K # [K]
class Water:
"""
Water Physical/Chemical Description
"""
M = 18.0153e-3 # [Kg/mol]
Tvap = 99.94 + Constants.T0K # [K]
cp = 1.826e3 # [J/kg.K]
Hv = 40.662e3 # [J/mol]
lv = Hv/M # [J/Kg]
class Air:
"""
Dry Air Physical/Chemical Description
"""
M = 28.6953e-3 # [Kg/mol]
cp = 1.006e3 # [J/Kg.K]
class Mix:
"""
Mix of Gas and Liquid
All quantities are expressed in Standard Units System
"""
C = Constants
CSTP = Conditions
gas = Air
liquid = Water
Mr = liquid.M/gas.M
@staticmethod
def psat(T):
"""
Saturation Pressure p_sat(T) [Pa]
as a Temperature T [K] function
"""
return Mix.CSTP.p0*np.exp(-Mix.liquid.Hv/Mix.C.R*(1/T - 1/Mix.liquid.Tvap))
@staticmethod
def xpw(pw):
"""
Vapour Mass Ratio x(p_w) [Kg Liquid/Kg Gas]
as a function of Liquid Partial Pressure p_w [Pa]
"""
return Mix.Mr*pw/(Mix.CSTP.p0-pw)
@staticmethod
def xw(T, phi):
"""
Vapour Mass Ratio x(p_w) [Kg Liquid/Kg Gas]
as a function of Liquid Partial Pressure p_w [Pa]
"""
return Mix.pisow(T, phi=phi)
@staticmethod
def pwx(x):
"""
Liquid Partial Pressure p_w(x) [Pa]
as a function of Vapour Mass Ratio x [Kg Liquid/Kg Gas]
"""
return Mix.CSTP.p0*x/(x + Mix.Mr)
@staticmethod
def pisow(T, phi=1.):
"""
Isopleth: Iso Relative Humidity (phi) Curve w(T)=k [-]
as a function of Temperature T [K]
Relative Humidity is defined as the ratio of Liquid Partial Pressure p_w [Pa]
and Saturation Pressure p_sat(T) [Pa]: w = p_w/p_sat(T)
"""
return phi*Mix.psat(T)
@staticmethod
def pisov(T, v):
"""
Isopleth (Isochoric): Iso Specific Volume Curve v(T)=k [m^3 Mix/Kg Gas]
as a function of Temperature T [K]
"""
return Mix.CSTP.p0 - (Mix.C.R*T)/(Mix.gas.M*v)
@staticmethod
def pisoh(T, h):
"""
Isopleth (Isenthalpic): Iso Specific Enthalpy Curve h(T)=k [J/Kg Gas]
as a function of Temperature T [K]
"""
dT = (T - Mix.CSTP.T0)
return Mix.CSTP.p0*(h - Mix.gas.cp*dT)/((h + Mix.Mr*Mix.liquid.lv) + (Mix.Mr*Mix.liquid.cp - Mix.gas.cp)*dT)
@staticmethod
def Tmin_score(f, k):
"""
Score function for then intersection of the k-isopleth of kind f and Saturation Curve p_sat(T)
as a function of Temperature T [K]
Score function is designed to determine Tmin [K] for Psychrometric Chart Display
"""
def inner(T):
return Mix.psat(T) - f(T, k)
return inner
@staticmethod
def Tmin(f, k, tol=5e-3):
"""
Solve score function to determine Tmin [K] for Psychrometric Chart Display
"""
return optimize.root(Mix.Tmin_score(f, k), 0.1, tol=tol)
@staticmethod
def Tmax(f, k, tol=5e-3):
"""
Find root of the k-isopleth of kind f to get Tmax [K] for Psychrometric Chart Display
"""
return optimize.root(lambda T: f(T, k), 0.1, tol=tol)
@staticmethod
def get_limits(f, konsts, Tmin, Tmax):
"""
Compute Temperature Boundaries for a given isopleth of a kind f of level k
"""
n = konsts.size
Ts = np.full((n, 2), np.nan)
for i, k in enumerate(konsts):
rmin = Mix.Tmin(f, k)
if rmin.success:
Ts[i, 0] = max(rmin.x[0], Tmin)
rmax = Mix.Tmax(f, k)
if rmax.success:
Ts[i, 1] = min(rmax.x[0], Tmax)
return Ts
@staticmethod
def domestic_ranges():
"""
Basic Ranges for Domestic Use
"""
return {
'Tmin': +0. + Constants.T0K, # [K]
'Tmax': +35. + Constants.T0K, # [K]
'isow': np.arange(0.1, 0.91, 0.1), # [-]
'isov': np.arange(0.76, 0.95, 0.01), # [m^3/kg]
'isoh': np.arange(-1.e4, 13.1e4, 1e4), # [J/Kg]
'kOy': 1000., # [KJ/J]
'ylim': [0., 5.e3], # [Pa]
'Tmode': 'kelvin',
'ymode': 'partial'
}
@staticmethod
def compute(f, konsts, Tmin, Tmax, ns=101):
"""
Compute k-isopleths of kind f for the given Temperature Range (Tmin, Tmax) [K]
Temperature Range is refined to real Temperature Boundaries (keep resolution equals, nice display)
"""
nk = konsts.size
T = np.full((ns, nk), np.nan)
xT = np.full((ns, nk), np.nan)
Ts = Mix.get_limits(f, konsts, Tmin, Tmax)
for i, k in enumerate(konsts):
T[:, i] = np.linspace(*Ts[i, :], ns)
xT[:, i] = f(T[:, i], k)
return T, xT
_requiredKeys = ('Tmin', 'Tmax', 'isow', 'isov', 'isoh', 'ylim')
@staticmethod
def plot(Tmin=None, Tmax=None, ns=101, rtype='domestic', area=True,
isow=None, isov=None, isoh=None, kOy=None, ylim=None, Tmode=None, ymode=None):
"""
Plot Psychrometric Chart for the given Temperature Range (Tmin, Tmax) [K]
Including k-isopleths of each kind (iso-w, iso-v, iso-h)
Also perform Units Conversion for readability sake
"""
# Parameters:
ranges = getattr(Mix, '{}_ranges'.format(rtype))()
Tmin = Tmin or ranges['Tmin']
Tmax = Tmax or ranges['Tmax']
isow = isoh or ranges['isow']
isov = isoh or ranges['isov']
isoh = isoh or ranges['isoh']
ylim = ylim or ranges['ylim']
kOy = kOy or ranges['kOy']
Tmode = Tmode or ranges['Tmode']
ymode = ymode or ranges['ymode']
# Temperature:
T = np.linspace(Tmin, Tmax, ns)
# Curves:
psat = Mix.psat(T)/kOy
pphi = np.array([Mix.pisow(T, phi=phi)/kOy for phi in isow]).T
Tv, pv = Mix.compute(Mix.pisov, isov, Tmin, Tmax, ns)
Th, ph = Mix.compute(Mix.pisoh, isoh, Tmin, Tmax, ns)
# Polygons:
if area:
T1 = Constants.TK(np.linspace(15, 27, 10))
X1 = np.concatenate((Mix.pisow(T1, phi=0.4), np.array([0., 0.])))/kOy
T1 = np.concatenate((T1, Constants.TK(np.array([27, 15]))), axis=0)
P1 = Polygon(np.array([T1, X1]).T, True, color='blue', alpha=0.4)
T2 = Constants.TK(np.linspace(15, 23, 10))
X2 = np.concatenate((Mix.pisow(T2, phi=0.7), Mix.pisow(np.flip(T2), phi=1.)), axis=0)/kOy
T2 = np.concatenate((T2, np.flip(T2)), axis=0)
P2 = Polygon(np.array([T2, X2]).T, True, color='red', alpha=0.4)
T3 = Constants.TK(np.linspace(23, 27, 10))
X3 = np.concatenate((Mix.pisow(T3, phi=0.7), Mix.pisow(np.flip(T3), phi=1.)), axis=0)/kOy
T3 = np.concatenate((T3, np.flip(T3)), axis=0)
P3 = Polygon(np.array([T3, X3]).T, True, color='orange', alpha=0.4)
T4 = Constants.TK(np.array([17, 17, 26, 26]))
X4 = Mix.pisow(T4, phi=np.array([0.45, 0.8, 0.5, 0.35]))/kOy
P4 = Polygon(np.array([T4, X4]).T, True, color='green', alpha=0.4)
# Figure:
fig, axe = plt.subplots()
l1 = axe.plot(T, psat, color='black', linewidth=2.0)
l2 = axe.plot(T, pphi, color='black', linewidth=0.75)
l3 = axe.plot(Tv, pv/kOy, color='blue', linewidth=0.75)
l4 = axe.plot(Th, ph/kOy, color='violet', linewidth=0.75)
ls = [l1[0], l2[0], l3[0], l4[0]]
ll = ['Saturation', 'Isohydric', 'Isochoric', 'Isenthalpic']
if area:
l5 = axe.add_line(P1)
l6 = axe.add_line(P2)
l7 = axe.add_line(P3)
l8 = axe.add_line(P4)
ls.extend([l5, l6, l7, l8])
ll.extend(["Dry Zone", "Mold Zone", "Mite Zone", "Comfort Zone"])
axe.set_title(r"{} Psychrometric Chart: $p_0 = {:.3f}$ $[\mathrm{{kPa}}]$".format(
Mix.liquid.__name__, Mix.CSTP.p0/kOy))
axe.set_xlabel(r"Temperature, $T$ $[\mathrm{K}]$")
axe.set_ylabel(r"Partial Pressure, $p_w$ $[\mathrm{kPa}]$")
axe.set_xlim(T[[0, -1]])
axe.set_ylim(np.array(ylim)/kOy)
lgd = axe.legend(ls, ll, bbox_to_anchor=(0, 1), loc='upper left', ncol=2)
#axe.get_figure().tight_layout()
axe.grid()
return axe
def main():
axe = Mix.plot()
plt.show(axe.get_figure())
#axe.get_figure().savefig("psychro.png")
if __name__ == "__main__":
main()
``` |
{
"source": "jlandercy/pysgrs",
"score": 3
} |
#### File: pysgrs/alphabets/specific.py
```python
import sys
from pysgrs.settings import settings
from pysgrs import alphabets
from pysgrs import errors
class PolybeAlphabet(alphabets.IntegerAlphabet):
def __init__(self):
super().__init__(
"ABCDEFGHIKLMNOPQRSTUVWXYZ",
indices=[
11, 12, 13, 14, 15,
21, 22, 23, 24, 25,
31, 32, 33, 34, 35,
41, 42, 43, 44, 45,
51, 52, 53, 54, 55,
]
)
class MorseAlphabet(alphabets.StringAlphabet):
def __init__(self):
super().__init__({
"A": "*-", "B": "-***", "C": "-*-*",
"D": "-**", "E": "*", "F": "**-*",
"G": "--*", "H": "****", "I": "**",
"J": "*---", "K": "-*-", "L": "*-**",
"M": "--", "N": "-*", "O": "---",
"P": "*--*", "Q": "--*-", "R": "*-*",
"S": "***", "T": "-", "U": "**-",
"V": "***-", "W": "*--", "X": "-**-",
"Y": "-*--", "Z": "--**",
"0": "-----", "1": "*----", "2": "**---",
"3": "***--", "4": "****-", "5": "*****",
"6": "-****", "7": "--***", "8": "---**",
"9": "----*"
})
class BaconAlphabet(alphabets.StringAlphabet):
def __init__(self):
super().__init__({
"A": "aaaaa", "B": "aaaab", "C": "aaaba",
"D": "aaabb", "E": "aabaa", "F": "aabab",
"G": "aabba", "H": "aabbb", "I": "abaaa",
"K": "abaab", "L": "ababa", "M": "ababb",
"N": "abbaa", "O": "abbab", "P": "abbba",
"Q": "abbbb", "R": "baaaa", "S": "baaab",
"T": "baaba", "U": "baabb", "W": "babaa",
"X": "babab", "Y": "babba", "Z": "babbb"
})
def main():
sys.exit(0)
if __name__ == "__main__":
main()
```
#### File: pysgrs/ciphers/pipeline.py
```python
import sys
import copy
from pysgrs import ciphers
from pysgrs import errors
from pysgrs.settings import settings
class PipelineCipher(ciphers.GenericCipher):
def __init__(self, pipeline, **kwargs):
if not all([isinstance(c, ciphers.GenericCipher) for c in pipeline]):
raise errors.IllegalCipherParameter(
"Pipeline must be a sequence of ciphers, received {} instead.".format(pipeline))
self._pipeline = tuple(pipeline)
self._kwargs = kwargs
def __str__(self):
return "<{} pipeline=({}) kwargs={}>".format(self.__class__.__name__,
", ".join([str(x) for x in self.pipeline]), self.kwargs)
@property
def pipeline(self):
return self._pipeline
@property
def kwargs(self):
return self._kwargs
def configuration(self):
return {
"pipeline": self.pipeline,
"kwargs": self.kwargs
}
def encipher(self, s, **kwargs):
kw = copy.deepcopy(self.kwargs.copy())
kw.update(kwargs)
r = copy.copy(s)
for cipher in self.pipeline:
r = cipher.encipher(r, **kw)
settings.logger.debug("{}.{}('{}') -> '{}'".format(self, "encipher", s, r))
return r
def decipher(self, s, **kwargs):
kw = copy.deepcopy(self.kwargs.copy())
kw.update(kwargs)
r = copy.copy(s)
for cipher in reversed(self.pipeline):
r = cipher.decipher(r, **kw)
settings.logger.debug("{}.{}('{}') -> '{}'".format(self, "decipher", s, r))
return r
def main():
sys.exit(0)
if __name__ == "__main__":
main()
```
#### File: pysgrs/pysgrs/errors.py
```python
import sys
from pysgrs.settings import settings
class GenericException(Exception):
pass
class IllegalParameter(GenericException):
pass
class AlphabetException(GenericException):
pass
class CipherException(GenericException):
pass
class IllegalAlphabetParameter(IllegalParameter):
pass
class IllegalAlphabetOperation(GenericException):
pass
class IllegalAlphabetIndex(AlphabetException):
pass
class AmbiguousAlphabetIndex(AlphabetException):
pass
class IllegalCipherParameter(IllegalParameter):
pass
class IllegalCipherOperation(GenericException):
pass
def main():
sys.exit(0)
if __name__ == "__main__":
main()
```
#### File: pysgrs/interfaces/score.py
```python
import abc
import sys
import collections
import pathlib
import json
import numpy as np
from pysgrs.settings import settings
from pysgrs import errors
from pysgrs.toolbox.cleaner import AsciiCleaner
class GenericScore(abc.ABC):
@abc.abstractmethod
def score(self, text, **kwargs):
pass
def main():
sys.exit(0)
if __name__ == "__main__":
main()
```
#### File: pysgrs/scores/ngrams.py
```python
import sys
import collections
import pathlib
import json
import numpy as np
from pysgrs.interfaces.score import GenericScore
from pysgrs.settings import settings
from pysgrs import errors
from pysgrs.toolbox.cleaner import AsciiCleaner
class NGramScore(GenericScore):
def __init__(self, source=None, order=1, language="fr", floor=0.01, scaler=np.log10):
self._language = language
if source is None:
source = settings.resources / 'ngrams/ngrams_{}.json'.format(self.language)
if isinstance(source, (str, pathlib.Path)):
source = pathlib.Path(source)
with source.open() as fh:
source = json.load(fh)
values = source["%d-grams" % order]
source = {k: v for k, v in zip(values["ngram"], values["count"])}
if isinstance(source, collections.Counter):
source = dict(source)
if not isinstance(source, dict):
raise errors.IllegalParameter("Requires a dict or a path, received {} instead".format(type(source)))
if not all(isinstance(c, str) for c in source.keys()):
raise errors.IllegalParameter("All keys must be string, received instead")
if not all(isinstance(c, int) and (c > 0) for c in source.values()):
raise errors.IllegalParameter("All values must be positive integer, received instead")
self._order = len(tuple(source.keys())[0])
if not all(len(ngram) == self.order for ngram in source):
raise errors.IllegalParameter("All keys must have the same length")
self._counts = source
self._total = sum(self.counts.values())
self._scaler = scaler
self._floor = self.scaler(floor/self.total)
self._frequency = {k: float(v) / self.total for k, v in self.counts.items()}
if not np.isclose(np.sum(tuple(self.frequency.values())), 1.):
raise errors.IllegalParameter("Probability sum must converge to unit")
self._likelihood = {k: self.scaler(v) for k, v in self.frequency.items()}
if not all(x > self.floor for x in self.likelihood.values()):
raise errors.IllegalParameter("Floor must be lower than all existing likelihood")
def __str__(self):
return "<NGramScore language='{}' order={} size={} floor={:.3f} scaler={}>".format(
self.language, self.order, self.size, self.floor, self.scaler.__name__)
def __repr__(self):
return self.__str__()
@property
def language(self):
return self._language
@property
def order(self):
return self._order
@property
def floor(self):
return self._floor
@property
def scaler(self):
return self._scaler
@property
def counts(self):
return self._counts
@property
def total(self):
return self._total
@property
def frequency(self):
return self._frequency
@property
def likelihood(self):
return self._likelihood
@property
def size(self):
return len(self.counts)
def __len__(self):
return self.size
def contains(self, item):
return item in self.counts
def __contains__(self, item):
return self.contains(item)
def score(self, text, normalize=True):
if normalize:
text = AsciiCleaner.normalize(text)
n = len(text)
score = 0.
for k in range(n - self.order + 1):
ngram = text[k:(k+self.order)]
score += self.likelihood.get(ngram, self.floor)
settings.logger.debug("Score: {:.3f} for '{}' with {}".format(score, text[:16], self))
return score
class MultiNGramScore(GenericScore):
def __init__(self, source=None, language="fr", min_order=1, max_order=5):
if source is None:
source = settings.resources / 'ngrams/ngrams_{}.json'.format(language)
if isinstance(source, (str, pathlib.Path)):
source = pathlib.Path(source)
with source.open() as fh:
source = json.load(fh)
self._ngrams = dict()
for order in range(min_order, max_order+1):
ngram = NGramScore(order=order, language=language)
self._ngrams[ngram.order] = ngram
def __str__(self):
return "<MultiNGramScore ngrams={}>".format(self.ngrams)
@property
def ngrams(self):
return self._ngrams
def score(self, text):
return {"score-%d" % ngram.order: ngram.score(text) for ngram in self.ngrams.values()}
def main():
x = MultiNGramScore()
#x = NGramScore()
print(x)
sys.exit(0)
if __name__ == "__main__":
main()
```
#### File: pysgrs/tests/test_alphabet_specific.py
```python
import sys
import unittest
from pysgrs import alphabets
from pysgrs.tests.test_alphabet import TestAlphabet
from pysgrs import errors
class TestPolybeAlphabet(TestAlphabet, unittest.TestCase):
alphabet = alphabets.PolybeAlphabet()
class TestStringAlphabet(TestAlphabet):
def test_parser_with_size_hint(self):
for message in self.messages:
for parsed in self.alphabet.parse(message[0], max_length=len(message[1])):
if message[1] == parsed:
break
else:
raise errors.IllegalAlphabetOperation("Cannot decode message '{}' from '{}'".format(
message[1], message[0]
))
def test_parser_without_size_hint(self):
for message in self.messages:
for parsed in self.alphabet.parse(message[0]):
if message[1] == parsed:
break
else:
raise errors.IllegalAlphabetOperation("Cannot decode message '{}' from '{}'".format(
message[1], message[0]
))
class TestMorseAlphabet(TestStringAlphabet, unittest.TestCase):
alphabet = alphabets.MorseAlphabet()
messages = [
('******-***-**---', "HELLO"),
('*--*-*--*', "ACME"),
('*---**--*', "JEAN"),
]
def test_is_monotonic(self):
self.assertFalse(self.alphabet.is_monotonic)
def test_is_index_size_constant(self):
self.assertFalse(self.alphabet.is_index_size_constant)
def test_index_size(self):
self.assertEqual(1, self.alphabet.index_min_size)
self.assertEqual(5, self.alphabet.index_max_size)
def test_index_symbols(self):
self.assertEqual({"*", "-"}, self.alphabet.index_symbols)
class TestBaconAlphabet(TestStringAlphabet, unittest.TestCase):
alphabet = alphabets.BaconAlphabet()
messages = [
('aabbbaabaaababaababaabbab', "HELLO"),
('aaaaaaaabaababbaabaa', "ACME"),
('abaaaaabaaaaaaaabbaa', "IEAN"),
]
def test_index_size(self):
self.assertEqual(5, self.alphabet.index_min_size)
self.assertEqual(5, self.alphabet.index_max_size)
def test_index_symbols(self):
self.assertEqual({"a", "b"}, self.alphabet.index_symbols)
def main():
unittest.main()
sys.exit(0)
if __name__ == "__main__":
main()
```
#### File: pysgrs/tests/test_cipher_substitution.py
```python
import sys
import unittest
from pysgrs.tests.test_cipher import TestStreamCipher
from pysgrs import alphabets
from pysgrs import ciphers
class TestIdentityStreamCipher(TestStreamCipher, unittest.TestCase):
cipher = ciphers.RotationCipher(offset=0)
ciphertexts = TestStreamCipher.plaintexts
class TestRotationStreamCipher(TestStreamCipher, unittest.TestCase):
cipher = ciphers.RotationCipher(offset=7)
ciphertexts = [
"HIJKLMNOPQRSTUVWXYZABCDEFG",
"GFEDCBAZYXWVUTSRQPONMLKJIH",
"AOLXBPJRIYVDUMVEQBTWZVCLYAOLSHGFKVN",
"DHSAGIHKUFTWOMVYXBPJRQPNZCLE",
"QPCLKMVEUFTWONYHIZXBPJRDHSAG",
"NSPIQVJRZXBPGUFTWOAVCLEKDHYM",
"ZW<KEY>",
"OVDCLEPUNSFXBPJRKHMAGLIYHZQBTW",
"<KEY>",
"<KEY>",
"<KEY>",
"Spcl hz pm fvb dlyl av kpl avtvyyvd. Slhyu hz pm fvb dlyl av spcl mvylcly.",
"Il dov fvb hyl huk zhf doha fvb mlls, iljhbzl aovzl dov tpuk kvu’a thaaly huk aovzl dov thaaly kvu’a tpuk.",
"Pm fvb jhuuva kv nylha aopunz, kv zthss aopunz pu h nylha dhf.",
"Dpzl tlu zwlhr iljhbzl aolf ohcl zvtlaopun av zhf; mvvsz iljhbzl aolf ohcl av zhf zvtlaopun.",
]
class TestNegativeRotationStreamCipher(TestStreamCipher, unittest.TestCase):
cipher = ciphers.RotationCipher(offset=-7)
ciphertexts = [
"TUVWXYZABCDEFGHIJKLMNOPQRS"
]
class TestCaesarStreamCipher(TestStreamCipher, unittest.TestCase):
cipher = ciphers.CaesarCipher()
ciphertexts = [
"DEFGHIJKLMNOPQRSTUVWXYZABC",
"CBAZYXWVUTSRQPONMLKJIHGFED",
"WKHTXLFNEURZQIRAMXPSVRYHUWKHODCBGRJ",
"ZDOWCEDGQBPSKIRUTXLFNMLJVYHA",
"MLYHGIRAQBPSKJUDEVTXLFNZDOWC",
"JOLEMRFNVTXLCQBPSKWRYHAGZDUI",
"VSKLQARIEODFNTXDUWCMXGJHPBYRZ",
"KR<KEY>LFNGDIWCHEUDVMXPS",
"<KEY>",
"M<KEY>BELJVSKLQARITXDUWC",
"SDFNPBERAZLWKILYHGRCHQOLTXRUMXJV",
"Olyh dv li brx zhuh wr glh wrpruurz. Ohduq dv li brx zhuh wr olyh iruhyhu.",
"Eh zkr brx duh dqg vdb zkdw brx ihho, ehfdxvh wkrvh zkr plqg grq’w pdwwhu dqg wkrvh zkr pdwwhu grq’w plqg.",
"Li brx fdqqrw gr juhdw wklqjv, gr vpdoo wklqjv lq d juhdw zdb.",
"Zlvh phq vshdn ehfdxvh wkhb kdyh vrphwklqj wr vdb; irrov ehfdxvh wkhb kdyh wr vdb vrphwklqj.",
"Jdjd Jrxjrx Jrxjrx Gdgd",
"Txdwuh mrxuqdxa krvwlohv vrqw soxv d fudlqguh txh plooh edlrqqhwwhv.", # This one is a bit ironic!
]
class TestReversedStreamCipher(TestStreamCipher, unittest.TestCase):
cipher = ciphers.ReversedCipher()
ciphertexts = [
"ZYXWVUTSRQPONMLKJIHGFEDCBA"
]
class TestAlphabetStreamCipher(TestStreamCipher, unittest.TestCase):
cipher = ciphers.AlphabetCipher(
alphabet=alphabets.StringAlphabet("ABCDEFGHIJKLMNOPQRSTUVWXYZ",
indices="DEFGHIJKLMNOPQRSTUVWXYZABC")
)
ciphertexts = TestCaesarStreamCipher.ciphertexts
class TestPermutationIdentityStreamCipher(TestStreamCipher, unittest.TestCase):
cipher = ciphers.PermutationCipher()
ciphertexts = TestStreamCipher.plaintexts
class TestPermutationStreamCipher(TestStreamCipher, unittest.TestCase):
cipher = ciphers.PermutationCipher(
[
10, 24, 8, 18, 15, 13, 1, 25, 9,
22, 20, 6, 2, 0, 5, 3, 12, 21,
19, 14, 16, 11, 7, 4, 23, 17
]
)
ciphertexts = [
"KYISPNBZJWUGCAFDMVTOQLHEXR"
]
class TestPermutationStreamCipherRandom(TestStreamCipher, unittest.TestCase):
cipher = ciphers.PermutationCipher(auto=True)
ciphertexts = []
class TestPermutationStreamCipherIdentity(TestStreamCipher, unittest.TestCase):
cipher = ciphers.PermutationCipher()
ciphertexts = TestStreamCipher.plaintexts
class TestAffineStreamCipher(TestStreamCipher, unittest.TestCase):
cipher = ciphers.AffineCipher()
ciphertexts = [
"INSXCHMRWBGLQVAFKPUZEJOTYD"
]
def main():
unittest.main()
sys.exit(0)
if __name__ == "__main__":
main()
```
#### File: pysgrs/tests/test_cipher_vigenere.py
```python
import sys
import unittest
from pysgrs.tests.test_cipher import TestStreamCipher
from pysgrs import ciphers
class TestVigenereStreamCipherSmallKey(TestStreamCipher, unittest.TestCase):
cipher = ciphers.VigenereCipher(key="ABC")
ciphertexts = [
"ACEDFHGIKJLNMOQPRTSUWVXZYA",
"ZZZWWWTTTQQQNNNKKKHHHEEEBB",
"TIGQVKCLDRPYNGQXKWMQUOWGRUJEMCZZFOH",
"WBNTADAEPYNRHGQRRWIDMJJISWGX",
"JJXEEHOYPYNRHHTACUQVKCLYAMVZ",
"GMKBKQCLUQVKZOAMQJTPXEYFWBTF",
"SQJIOZOGDLBEKRWASVZKWDHGMZXOX",
"HPYVFZIOILZSUJEKECFUBECTATLUNR",
"<KEY>",
"J<KEY>HUPIKNYQFRWASVZ",
"PBEKNABPZWJVHGKVFFOAGNMKQVQRKWGT",
"Ljxe bu ig aov yesg tp fif vonqrsqw. Mgasp at kf zqu xgrf vo mkvf hosgvft.",
"Bf yhp aov crf cne uaz yhbv ypw ffgl, cgcbwsf vhpue xjo nkne foo’v mbvtft aof tiqsf yhp oauves foo’v mjpd.",
"Ig aov eaopou fo htebv tiknhu, dp umbnl ujiois jp a htebv wba.",
"Wjue ngn trebm bfeavue ujez jawg spoeujioi tp uaz; hopns cgcbwsf vhfa hbxe uq sba spoeujioi.",
"Gbia Hquhqu Hquhqu Ecdb"
]
class TestVigenereStreamCipherMediumKey(TestStreamCipher, unittest.TestCase):
cipher = ciphers.VigenereCipher(key="NATURELLEMENT")
ciphertexts = [
"NBVXVJRSMVOYFAOIKIWEFZIBLS",
"MYQQMYEDVCTBGZLDDZLRQIPGOT",
"GHXKLMNVFDSJGSOQDLQADSHIEMUEEUQCOZK",
"JAENQFLORKQCASOKKLMNVNUKFORX",
"WIOYUJZIRKQCATRTVJUFTGWANEGZ",
"TLBVASNVWCYVSAYFJYXZGIJHJTEF",
"FPACEBZQFXEPDDUTLKDUFHSIZRIOP",
"UOPPVBTYKXCDNVCDXRJEKINVNLWUFJ",
"GHXZZZPMSJMAZJISUIHDUYYTDNVCDFP",
"WAVEUEHDPAZRFLBBAJTSTRJSSJHAKNQ",
"CAVEDCMZBIMGASIOYUSKPRXMDNBRCOXW",
"Yioy rw tq cay jxee mi ump esysekbw. Eyrvy lw uj lhh wxlv xz wmhi sheeoyi.",
"Oe pbf czf edi ngq sts nlle cay sxrl, uytefdi flblr wai dmyo har’g fntmyi eyo xtsfx jhh grxepv psa’m zigx.",
"Vf ril glyrax qh trxuk xstrsw, qh fmtfc xstrsw vg n gkyrx hlc.",
"Jily diy dtqex urctoji esik lnor shgvxstrs xb lny; yifpd mioehlr tayp llgi fs ftl shgvxstrs."
]
def main():
unittest.main()
sys.exit(0)
if __name__ == "__main__":
main()
```
#### File: pysgrs/toolbox/arithmetic.py
```python
import sys
import math
from pysgrs import errors
from pysgrs.settings import settings
class Arithmetic:
pass
class ModularArithmetic(Arithmetic):
@staticmethod
def gcd(a, b):
# https://en.wikipedia.org/wiki/Greatest_common_divisor
return math.gcd(a, b)
@staticmethod
def lcm(a, b):
# https://en.wikipedia.org/wiki/Least_common_multiple
return abs(a * b) // math.gcd(a, b)
@staticmethod
def egcd(a, b):
# https://en.wikipedia.org/wiki/Extended_Euclidean_algorithm
if a == 0:
return b, 0, 1
else:
g, y, x = ModularArithmetic.egcd(b % a, a)
return g, x - (b // a) * y, y
@staticmethod
def modinv(a, m):
# https://en.wikipedia.org/wiki/Modular_multiplicative_inverse
try:
# Only Python 3.8+
return pow(a, -1, m)
except ValueError:
g, x, y = ModularArithmetic.egcd(a, m)
if g != 1:
raise errors.IllegalParameter('Modular inverse does not exist for {} mod {}'.format(a, m))
else:
return x % m
@staticmethod
def factor(a):
import sympy
return sympy.factorint(a)
@staticmethod
def is_prime(a):
import sympy
return sympy.isprime(a)
def main():
sys.exit(0)
if __name__ == "__main__":
main()
```
#### File: pysgrs/toolbox/shaper.py
```python
import sys
from collections.abc import Iterable
import numpy as np
import pandas as pd
from pysgrs import errors
from pysgrs.settings import settings
class Shaper:
@staticmethod
def get_shapes(n, shape=None, score=None):
def _score(x):
return (((1/2 + x["padding"])/n)**3)*(1 + x["shape_diff"]**4)
score = score or _score
# Explore:
m = np.sqrt(n)
mmin = int(np.floor(m))
mmax = int(np.ceil(m))
shapes = [
{"id": "min-square", "shape": (mmin, mmin)},
{"id": "opt-rect-1", "shape": (mmin, mmax)},
{"id": "opt-rect-2", "shape": (mmax, mmin)},
{"id": "max-square", "shape": (mmax, mmax)},
]
if shape:
modekey = "user"
shapes.append({"id": modekey, "shape": shape})
else:
modekey = "auto"
for i in range(2, n):
shapes.append({"id": "rect-{:d}".format(i), "shape": (i, int(np.ceil(n/i)))})
df = pd.DataFrame(shapes)
# Arrange:
df["size"] = df["shape"].apply(np.prod)
df["padding"] = df["size"] - n
df["shape_diff"] = df["shape"].apply(lambda x: np.abs(x[0] - x[1]))
df["score"] = df.apply(score, axis=1)
df = df.set_index("id")
df = df.sort_values(["score", "padding", "shape_diff"])
df.loc["auto", :] = df.loc[(df["score"] > 0) & (df.index.str.contains("-square|-rect")), :].iloc[0, :]
df = df.sort_values(["score", "padding", "shape_diff"])
settings.logger.debug("Shaper: {}={}".format(modekey, df.loc[modekey].to_dict()))
return df
@staticmethod
def pad(s, n, padding=" "):
m = n - len(s)
if m >= 0:
settings.logger.debug("Shaper: {}-pad string of length {} with '{}'".format(m, len(s), padding))
return s + padding*m
else:
raise errors.IllegalParameter(
"Final size (max_ngram={}) must be greater or equal to string length ({})".format(n, len(s)))
@staticmethod
def to_matrix(s, shape=None, mode="auto", padding=" ", row_separator="\n"):
if isinstance(s, str):
if shape:
s = s.replace(row_separator, "")
if row_separator in s:
x = s.split(row_separator)
x[-1] = Shaper.pad(x[-1], len(x[0]), padding=padding)
x = [list(t) for t in x]
if not all([len(s) == len(x[0]) for s in x]):
raise errors.IllegalParameter(
"All rows must have the same length unless the last which may be padded")
else:
shape = shape or Shaper.get_shapes(len(s), shape=shape).loc[mode, "shape"]
n = np.prod(shape)
s = Shaper.pad(s, n, padding=padding)
x = list(s)
x = np.array(x)
elif isinstance(s, Iterable):
x = np.array(s)
else:
raise errors.IllegalParameter("String or array expected, received {} instead".format(type(s)))
if len(x.shape) < 2 or shape:
x = x.reshape(shape)
settings.logger.debug("Shaper: {} of size {} shaped to {}-matrix.".format(type(s), len(s), x.shape))
return x
@staticmethod
def to_vector(x):
return np.array(x).flatten()
@staticmethod
def to_str(x):
return "".join(Shaper.to_vector(x))
def main():
sys.exit(0)
if __name__ == "__main__":
main()
``` |
{
"source": "jlandgre/debug",
"score": 3
} |
#### File: jlandgre/debug/debug.py
```python
import pandas as pd
import numpy as np
import datetime as dt
#Debug library functions
def init():
data = {'Desc':[], 'colname':[], 'size':[], 'dtype_string':[], 'dtype_int':[],
'dtype_float':[], 'isnull':[], 'notnull':[], 'Desc2':[], 'Val2':[], 'time':[]}
dfDebug = pd.DataFrame(data=data)
#Trick Pandas into dtyping count columns as integer
dfDebug.loc[0,:] = ['Dummy_val','',0,0,0,0,0,0,'','',0]
lst_count_cols = ['size','dtype_string', 'dtype_int', 'dtype_float', 'isnull', 'notnull']
dfDebug[lst_count_cols] = dfDebug[lst_count_cols].astype('int')
return dfDebug
def CountDTypeString(df, col):
return df.loc[df[col].apply(lambda x: isinstance(x, str)), col].size
def CountDTypeInt(df, col):
return int(df.loc[df[col].apply(lambda x: isinstance(x, int)), col].size)
def CountDTypeFloat(df, col):
return int(df.loc[df[col].apply(lambda x: isinstance(x, float)), col].size)
def CountNull(df, col):
return int(df.loc[df[col].isnull(), col].size)
def CountNotNull(df, col):
return int(df.loc[~df[col].isnull(), col].size)
#Add a new row to dfDebug
def loginfo(dfDebug, logtype, desc, df=None, col='', desc2='', val2=''):
#Construct row as a list of values and append row to dfDebug
if logtype == 'colinfo':
lst = [desc, col, df[col].size, CountDTypeString(df, col), CountDTypeInt(df, col),
CountDTypeFloat(df, col), CountNull(df, col), CountNotNull(df, col), desc2, val2, '']
elif logtype == 'indexsize':
lst = [desc,'',df.index.size, '', '', '', '', '', desc2, val2, '']
elif logtype == 'time':
lst = [desc, '', '', '', '', '', '', '', desc2, val2, dt.datetime.now().strftime('%H:%M:%S.%f')]
elif logtype == 'info':
lst = [desc, '','', '','', '','', '', desc2, val2, '']
dfDebug.loc[dfDebug.index.size] = lst
#Control dtype of count columns for nicer display
if dfDebug.loc[0,'Desc'] == 'Dummy_val':
dfDebug.drop(0, axis=0, inplace=True)
dfDebug.reset_index(drop=True, inplace=True)
lst_count_cols = ['size','dtype_string', 'dtype_int', 'dtype_float', 'isnull', 'notnull']
dfDebug[lst_count_cols] = dfDebug[lst_count_cols].astype('str')
return dfDebug
``` |
{
"source": "jlandgre/graphlet_plots",
"score": 3
} |
#### File: graphlet_plots/libs/graphlet_plot.py
```python
import pandas as pd
import numpy as np
#Import JDL utility modules
import sys
sys.path.append(sys.path[0])
import pd_util
import util
#graphlet_plot.py
class Graphlet():
"""
Set attributes common to Categorical and Continuous plots. Parent of
GraphletCategorical and GraphletContinuous
Attributes:
t_range [tuple - datetime format] - the datetime range of plot data
ylims [tuple - either numeric or None] - (ymin, ymax) for the Graphlet
in y-axis units. Specify either ymin or ymax to anchor Graphlet
location on y-axis of plot
spacing [tuple - numeric] - (spacing between categories, buffer between
graphlets)
data [tuple - Pandas Series] (x-data, unscaled/unmapped y-data)
IsHLine [boolean] toggles horizontal line below the graphlet on the plot
heading - [string] - optional heading to label graphlet at its top left
dot_format [tuple - mixed types] - (string Matplotlib color, integer dot
size, integer dot transparency)
Version: 8/6/20 JDL Data Delve LLC
"""
def __init__(self, t_range, spacing, ylims, data, IsHLine, heading=None, dot_format=None):
self.xdata = data[0]
self.ydata_unscaled = data[1]
self.heading = ''
if not heading is None: self.heading = heading
if not dot_format[0] is None: self.dotcolor = dot_format[0]
if not dot_format[1] is None: self.dotsize = dot_format[1]
if not dot_format[2] is None: self.dot_transparency = dot_format[2]
self.heading_coords = (t_range[0], (self.ymax + 1 * spacing[1])) #Label's x-y position
self.hline = None
self.ypos_hline = None
if IsHLine: self.ypos_hline = self.ymin - spacing[1]
class GraphletCategorical():
"""
Set attributes for categorical time-series plots. Map y-values to
specified categories
Child of Graphlet, which initializes common attributes between
GraphletCategorical and GraphletContinuous
Attributes:
See Parent Class docstring for its attributes
ticklabels - dictionary of categorical keys (in terms of unmapped y-data)
and values that are labels to use on the plot for data series
Methods:
CreateCombinedFlagColSeries
CalculateYLimitsCategorical
SeriesFromDFCols
Version: 8/7/20 JDL Data Delve LLC
"""
def __init__(self, t_range, spacing, ylims, data, IsHLine, ticklabels=None, heading=None, dot_format=None):
#Generate y-limits, ticklist and labels for categorical plot
lst_cats = list(set(data[1]))
ncats = len(lst_cats)
self.ymin, self.ymax = GraphletCategorical.CalculateYLimitsCategorical(ylims, ncats, spacing)
Graphlet.__init__(self, t_range, spacing, ylims, data, IsHLine, heading, dot_format)
self.ticklist, self.labels = [], []
for i, cat in zip(range(0,ncats), lst_cats):
self.ticklist.append(self.ymin+ (i * spacing[0]))
if ticklabels is not None:
self.labels.append(ticklabels[cat])
else:
self.labels.append(cat)
#Generate y-data that maps categories to ticklist values
self.ydata = pd_util.MapSerToAltVals(self.ydata_unscaled, lst_cats, self.ticklist)
def CreateCombinedFlagColSeries(lst_flags):
"""
Create Concatenated Series with mapped flag values and create dictionary of original
flag column names or labels by value.
Designed to work on columns that use 1/blank to flag time series discrete
events. This function maps multiple such columns to alternate integers and
concatenates the result into a single series. The returned dictionary decodes
how integers and original column names (or integers and user-specified labels)
match up.
Args:
lst_flags (list of 3-item or, optionally, 4-item tuples) describing one or more
series to be concatenated:
df name - name of Pandas DataFrame containing valuecol and indexcol
valuecol - column in df to serve as returned series values
indexcol - column in df to serve as returned series index
label string [optional] - Description for plot labeling
Returns:
Concatenated Series with original values mapped to integers
Dictionary of integer (keys) to original column names (values)
"""
lst_flag_series, ticklabels = [], {}
for flag, i in zip(lst_flags, range(0,len(lst_flags))):
#Default to None label then check whether it's specified
label = None
if len(flag) == 3: (df, valuecol, indexcol) = flag
if len(flag) == 4: (df, valuecol, indexcol, label) = flag
#Create a series from flag and append it to list of such series
ser = pd_util.SeriesFromDFCols(df, valuecol, indexcol, 'int64')
ser = pd_util.MapSerToAltVals(ser, [1], [i+1])
lst_flag_series.append(ser)
#Add a label to dict --either column name or user-specified string
ticklabels[i+1] = ser.name
if not label is None: ticklabels[i+1] = label
return pd.concat(lst_flag_series), ticklabels
def CalculateYLimitsCategorical(ylims, ncats, spacing):
"""
Populate y-limits for Categorical plots from either upper or lower limit
Args:
ylims [tuple; number] - (lower, upper) y-limits; one may be None
ncats [integer] - number of categories
spacing [tuple; number] - (spacing between categories, spacing between graphlets) in y-axis limits
Returns:
individual ymin and ymax (calculation overrides specified values if ylims input has both specified)
"""
if ylims[0] is not None:
ymin = ylims[0]
ymax = ymin + (ncats - 1) * spacing[0]
elif ylims[1] is not None:
ymax = ylims[1]
ymin = ymax - (ncats - 1) * spacing[0]
else:
return None, None
return ymin, ymax
def SeriesFromDFCols(df, valcol, indexcol, dtype=None):
"""
Convert two DataFrame columns into a Series as index and values
args:
df (Pandas DataFrame) - DataFrame containing valcol and indexcol
valcol (String) - name of DataFrame column to return as Series values (and name of Series)
indexcol (String) - name of DataFrame column to return as Series index
dtype (Python dtype - typically int) - optional returned dtype of Series
"""
ser = pd.Series(df[valcol].values, index=df[indexcol]).dropna()
ser.name = valcol
if not dtype is None: ser = ser.astype(dtype)
return ser
class GraphletContinuous():
"""
Set attributes for continuous variable graphlet
Optionally, rescale y-values for graphing
Child of Graphlet, which initializes common attributes between
GraphletCategorical and GraphletContinuous
Attributes:
See Parent Class docstring for its attributes
ticklabels - Optional dictionary of values (unscaled y units) as keys
string labels as values
scale_orig - [tuple - numeric]: Upper and lower values in unscaled y-units
for linear rescaling to plot y-axis units)
scale_scaled [tuple - numeric]: Upper and lower values in scaled y-units;
used with scale_orig for linear rescaling of y-data
Methods: None
Version: 8/6/20 JDL Data Delve LLC
"""
def __init__(self, t_range, spacing, ylims, data, IsHLine, ticklabels=None, heading=None, scale_orig=None, scale_scaled=None, dot_format=None):
self.ymin = ylims[0]
self.ymax = ylims[1]
Graphlet.__init__(self, t_range, spacing, ylims, data, IsHLine, heading, dot_format)
IsScaled = False
if (scale_orig is not None) and (scale_scaled is not None): IsScaled=True
#If specified, scale the y-values and the ticklist values
self.ydata = self.ydata_unscaled
if IsScaled:
self.ydata = pd_util.RescaleSerValues(self.ydata_unscaled, scale_orig, scale_scaled)
#Build ticklist and tick label list
if not ticklabels is None:
self.ticklist, self.labels = [], []
for k, v in ticklabels.items():
#Scale the tick value if needed
k_scaled = k
if IsScaled: k_scaled = pd_util.RescaleValue(k, scale_orig, scale_scaled)
#Add tick and label to lists; If no specified label, tick value is the label
self.ticklist.append(k_scaled)
if v is not None:
self.labels.append(v)
else:
self.labels.append(k_scaled)
``` |
{
"source": "jlandman71/cvnd-image-captioning",
"score": 3
} |
#### File: jlandman71/cvnd-image-captioning/model.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.models as models
import numpy as np
class EncoderCNN(nn.Module):
def __init__(self, embed_size):
super(EncoderCNN, self).__init__()
resnet = models.resnet50(pretrained=True)
for param in resnet.parameters():
param.requires_grad_(False)
modules = list(resnet.children())[:-1]
self.resnet = nn.Sequential(*modules)
self.embed = nn.Linear(resnet.fc.in_features, embed_size)
def forward(self, images):
features = self.resnet(images)
features = features.view(features.size(0), -1)
features = self.embed(features)
return features
class DecoderRNN(nn.Module):
def __init__(self, embed_size, hidden_size, vocab_size, num_layers=1):
#super(DecoderRNN, self).__init__()
super().__init__()
# set class variables
self.num_layers = num_layers
self.hidden_size = hidden_size
self.embed_size = embed_size
self.vocab_size = vocab_size
# define model layers
# Embedding layer
self.embed = nn.Embedding(vocab_size, embed_size)
# LSTM layer
self.lstm = nn.LSTM(input_size=embed_size, hidden_size=hidden_size, num_layers=num_layers, batch_first=True)
# Linear layer maps hidden_size to scores of vocab_size
self.hidden2scores = nn.Linear(hidden_size, vocab_size)
def forward(self, features, captions):
# get batch size
batch_size = features.size(0)
# get embeddings for captions except last one
capt_embeds = self.embed(captions[:,:-1])
# concatenate features and embedded captions
inputs = torch.cat((features.unsqueeze(1), capt_embeds),1)
# clean out hidden state and cell state
if (torch.cuda.is_available()):
hidden = (torch.zeros(self.num_layers, batch_size, self.hidden_size).cuda(),
torch.zeros(self.num_layers, batch_size, self.hidden_size).cuda())
else:
hidden = (torch.zeros(self.num_layers, batch_size, self.hidden_size),
torch.zeros(self.num_layers, batch_size, self.hidden_size))
lstm_out, hidden = self.lstm(inputs, hidden)
# score outputs
out = self.hidden2scores(lstm_out)
# return output word scores
return out
def sample(self, inputs, states=None, max_len=20):
" accepts pre-processed image tensor (inputs) and returns predicted sentence (list of tensor ids of length max_len) "
predicted_word_ids = []
input = inputs
# clean out hidden state and cell state
if (torch.cuda.is_available()):
hidden = (torch.zeros(self.num_layers, 1, self.hidden_size).cuda(),
torch.zeros(self.num_layers, 1, self.hidden_size).cuda())
else:
hidden = (torch.zeros(self.num_layers, 1, self.hidden_size),
torch.zeros(self.num_layers, 1, self.hidden_size))
for _ in range(max_len):
lstm_out, hidden = self.lstm(input, hidden)
# score outputs
out = self.hidden2scores(lstm_out)
# get word id with max probability
_, word_id = out.max(dim=2)
word_id_int = word_id.item()
# append word id to list of predictions
predicted_word_ids.append(word_id_int)
# if predicted word is 1 (<end>) then stop
if word_id_int == 1:
break
# embedding of last word id becomes next input
input = self.embed(word_id)
return predicted_word_ids
``` |
{
"source": "jlandmann/glaciersat",
"score": 3
} |
#### File: glaciersat/core/gis.py
```python
import xarray as xr
import salem
import os
import pandas as pd
import numpy as np
from glaciersat.core import imagery
import logging
log = logging.getLogger(__file__)
def crop_sat_image_to_glacier(ds: xr.Dataset or imagery.SatelliteImage,
gdir_candidates: list = None,
out_dirs: list = None, grids: list = None,
min_overlap: float = 100.,
shapes: list or None = None) -> None:
"""
Crop a satellite image into glacier domains and append to existing images.
Parameters
----------
ds : xr.Dataset or glaciersat.core.imagery.SatelliteImage
Object containing the satellite image.
gdir_candidates: list of GlacierDirectories, optional
List with potential GlacierDirectories that might be included in the
scene. Mutually exclusive with `grids`. Default: None.
out_dirs: list of str, optional
List with according output directories for the case that `grids` is
given.
grids: list of salem.Grid, optional
List with salem.Grids defining a glacier region to which the data shall
be clipped. Mutually exclusive with `gdir_candidates`. Default: None.
min_overlap: float
Minimum overlap percentage of satellite image and glacier. Default:
100. (glacier must be contained fully in satellite image footprint).
shapes: list or None, optional
List of paths to shapes of glaciers. Must be in the same order like
`gdir_candidates` or `grids`, respectively. If `None` and
`gdir_candidates` is given, shapes will be retrieved from the outlines
in the glacier directory. Default: None.
Returns
-------
None.
"""
if (gdir_candidates is not None) and (grids is not None):
raise ValueError('The keywords "gdir_candidates" and "grids" are '
'mutually exclusive.')
elif (gdir_candidates is None) and (grids is None):
raise ValueError('Either of the keywords "gdir_candidates" or "grids" '
'must be given.')
elif (gdir_candidates is not None) and (grids is None):
grids = [salem.Grid.from_json(g.get_filepath('glacier_grid')) for g in
gdir_candidates]
else:
pass
if (shapes is None) and (gdir_candidates is not None):
shapes = [gdir_candidates[i].get_filepath('outlines') for i in
range(len(gdir_candidates))]
# cheap way to pre-assess whether given glacier is in the image at all
if callable(getattr(ds, 'overlaps_shape', None)) and (shapes is not None):
cand_in_img = [ds.overlaps_shape(i, percentage=min_overlap) for i in
shapes]
if np.array(cand_in_img).any() is False: # no intersection at all
log.info(
'No intersection of the satellite image and the supplied grids'
' at the given level ({}%) at all.'.format(min_overlap))
return
else: # postpone to next step (more expensive)
cand_in_img = None
# we need to load for this operation
if not isinstance(ds, (xr.Dataset, xr.DataArray)):
ds = ds.data
ds.load()
if not isinstance(ds, xr.Dataset):
ds = ds.to_dataset(name='albedo', promote_attrs=True)
for i, grid in enumerate(grids):
if (cand_in_img is not None) and (cand_in_img[i] is False):
continue
grid_ds = grid.to_dataset()
ds_glacier = grid_ds.salem.transform(ds, interp='linear')
# can be that grid is outside satellite image
# todo: what about half coverage (edge) and clouds? check with outline?
if pd.isnull(ds_glacier.to_array()).all():
# todo: log something here?
continue
if gdir_candidates is not None:
gi = gdir_candidates[i]
if gi.has_file('sat_images'):
with xr.open_dataset(gi.get_filepath('sat_images')) as exist:
exist.load()
ds_total = xr.merge([ds_glacier, exist],
combine_attrs='no_conflicts',
compat='override')
ds_total.to_netcdf(gi.get_filepath('sat_images'))
else:
ds_glacier.to_netcdf(gi.get_filepath('sat_images'))
elif out_dirs is not None:
gi = out_dirs[i]
fp = os.path.join(gi, 'sat_images.nc')
if os.path.exists(fp):
with xr.open_dataset(fp) as exist:
exist.load()
ds_total = xr.merge([ds_glacier, exist],
combine_attrs='no_conflicts',
compat='override')
ds_total.to_netcdf(fp)
else:
ds_glacier.to_netcdf(fp)
```
#### File: glaciersat/tests/test_utils.py
```python
import pytest
from glaciersat.tests import requires_credentials
from glaciersat.utils import *
import configobj
@requires_credentials
def test_get_credentials():
cred = get_credentials(credfile=None)
assert isinstance(cred, configobj.ConfigObj)
cred = get_credentials(credfile='.\\.credentials')
assert isinstance(cred, configobj.ConfigObj)
def test_declutter():
input = np.zeros((7, 7), dtype=int)
input[1:6, 1:6] = 1
# erode a lot, then leave as is
res = declutter(input, 3, 1).astype(int)
desired = np.zeros((7, 7), dtype=int)
desired[2:5, 2:5] = 1
np.testing.assert_array_equal(res, desired)
# do not erode, but dilate
res = declutter(input, 1, 3).astype(int)
desired = np.ones((7, 7), dtype=int)
np.testing.assert_array_equal(res, desired)
# erode and dilate (the offset is unfortunate though)
res = declutter(input, 3, 2).astype(int)
desired = np.zeros((7, 7), dtype=int)
desired[1:5, 1:5] = 1
np.testing.assert_array_equal(res, desired)
res = declutter(input, 2, 3).astype(int)
desired = np.zeros((7, 7), dtype=int)
desired[1:, 1:] = 1
np.testing.assert_array_equal(res, desired)
``` |
{
"source": "jlandmann/xdem",
"score": 2
} |
#### File: docs/source/conf.py
```python
import os
import sys
#import xdem.version
# Allow conf.py to find the xdem module
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "../../"))
# -- Project information -----------------------------------------------------
project = 'xdem'
copyright = '2021, xdem contributors'
author = 'xdem contributors'
# The full version, including alpha/beta/rc tags
release = "0.0.1"
os.environ["PYTHON"] = sys.executable
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc", # Create the API documentation automatically
"sphinx.ext.viewcode", # Create the "[source]" button in the API to show the source code.
'matplotlib.sphinxext.plot_directive', # Render matplotlib figures from code.
"sphinx.ext.autosummary", # Create API doc summary texts from the docstrings.
"sphinx.ext.inheritance_diagram", # For class inheritance diagrams (see coregistration.rst).
"sphinx_autodoc_typehints", # Include type hints in the API documentation.
"sphinxcontrib.programoutput"
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = [
"api/modules.rst" # This is not really needed, but is created automatically by autodoc
]
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static'] # Commented out as we have no custom static data
def run_apidoc(_):
"""
Make sure readthedocs finds the module.
Maybe this is not needed?
"""
from sphinx.ext.apidoc import main
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
cur_dir = os.path.abspath(os.path.dirname(__file__))
module = os.path.join(cur_dir, "../../", "xdem")
output_path = os.path.join(cur_dir, 'api/')
main(['-e', '-o', output_path, module, os.path.join(module, "version.py"), "--force"])
def setup(app):
app.connect('builder-inited', run_apidoc)
``` |
{
"source": "jlandowner/k8s-workload-convertor",
"score": 2
} |
#### File: jlandowner/k8s-workload-convertor/k8s-workload-convertor.py
```python
import ruamel
import ruamel.yaml
import fire
from functools import reduce
available_kinds = ["Deployment", "DaemonSet"]
class WorkloadConvertorCmd(object):
def validate_available_kind(self, kind=""):
if kind in available_kinds:
return True
return False
def convert(self, from_file="", from_kind="", to_kind="", to_file=""):
if not self.validate_available_kind(from_kind) or not self.validate_available_kind(to_kind):
print(f"Error: from_kind and to_kind can be in {available_kinds}")
exit(9)
if from_file == "" or to_file == "":
print("Error: from_file or to_file is not defined")
exit(9)
w = WorkloadConvertor()
w.load(from_file)
if from_kind == "Deployment" and to_kind == "DaemonSet":
w.convert_deployment_to_daemonset()
else:
print(f"Error: Convert from {from_kind} to {to_kind} is not available")
exit(1)
w.save(to_file)
class WorkloadConvertor(object):
inputData = {}
outputData = {}
yaml = ruamel.yaml.YAML()
def load(self, filename):
with open(filename) as stream:
self.inputData = self.yaml.load(stream)
def save(self, filename):
with open(filename, 'w') as stream:
self.yaml.dump(self.outputData, stream=stream)
def convert_matadata(self):
self.outputData["metadata"] = {}
self.outputData["metadata"]["annotations"] = self.inputData["metadata"]["annotations"]
self.outputData["metadata"]["labels"] = self.inputData["metadata"]["labels"]
self.outputData["metadata"]["name"] = self.inputData["metadata"]["name"]
self.outputData["metadata"]["namespace"] = self.inputData["metadata"]["namespace"]
def convert_podSpec(self):
self.outputData["spec"] = {}
self.outputData["spec"]["selector"] = self.inputData["spec"]["selector"]
self.outputData["spec"]["template"] = self.inputData["spec"]["template"]
def remove_creationTimestamp(self):
if dict_get(self.outputData, "metadata.creationTimestamp"):
del self.outputData["metadata"]["creationTimestamp"]
if dict_get(self.outputData, "spec.template.metadata.creationTimestamp"):
del self.outputData["spec"]["template"]["metadata"]["creationTimestamp"]
def convert_deployment_to_daemonset(self):
deploy = self.inputData
ds = self.outputData
# DaemonSet Kind
ds["apiVersion"] = "apps/v1"
ds["kind"] = "DaemonSet"
# metadata & podSpec
self.convert_matadata()
self.convert_podSpec()
# DaemonSet Strategy
if dict_get(deploy, "spec.updateStrategy.type") == "RollingUpdate":
ds["spec"]["updateStrategy"] = {}
ds["spec"]["updateStrategy"]["type"] = deploy["spec"]["strategy"]["type"]
ds["spec"]["updateStrategy"]["rollingUpdate"] = {}
ds["spec"]["updateStrategy"]["rollingUpdate"]["maxUnavailable"] = deploy["spec"]["strategy"]["rollingUpdate"]["maxUnavailable"]
if dict_get(deploy, "spec.minReadySeconds"):
ds["spec"]["minReadySeconds"] = deploy["spec"]["minReadySeconds"]
# fixup
self.remove_creationTimestamp()
def dict_get(dictionary, keys, default=None):
return reduce(lambda d, key: d.get(key, default) if isinstance(d, dict) else default, keys.split("."), dictionary)
if __name__ == '__main__':
fire.Fire(WorkloadConvertorCmd)
``` |
{
"source": "jlandrum/theheck",
"score": 2
} |
#### File: tests/rules/test_docker_image_being_used_by_container.py
```python
from theheck.rules.docker_image_being_used_by_container import match, get_new_command
from theheck.types import Command
def test_match():
err_response = """Error response from daemon: conflict: unable to delete cd809b04b6ff (cannot be forced) - image is being used by running container e5e2591040d1"""
assert match(Command('docker image rm -f cd809b04b6ff', err_response))
def test_not_match():
err_response = 'bash: docker: command not found'
assert not match(Command('docker image rm -f cd809b04b6ff', err_response))
def test_not_docker_command():
err_response = """Error response from daemon: conflict: unable to delete cd809b04b6ff (cannot be forced) - image is being used by running container e5e2591040d1"""
assert not match(Command('git image rm -f cd809b04b6ff', err_response))
def test_get_new_command():
err_response = """
Error response from daemon: conflict: unable to delete cd809b04b6ff (cannot be forced) - image
is being used by running container e5e2591040d1
"""
result = get_new_command(Command('docker image rm -f cd809b04b6ff', err_response))
expected = 'docker container rm -f e5e2591040d1 && docker image rm -f cd809b04b6ff'
assert result == expected
```
#### File: tests/rules/test_git_rm_local_modifications.py
```python
import pytest
from theheck.rules.git_rm_local_modifications import match, get_new_command
from theheck.types import Command
@pytest.fixture
def output(target):
return ('error: the following file has local modifications:\n {}\n(use '
'--cached to keep the file, or -f to force removal)').format(target)
@pytest.mark.parametrize('script, target', [
('git rm foo', 'foo'),
('git rm foo bar', 'bar')])
def test_match(output, script, target):
assert match(Command(script, output))
@pytest.mark.parametrize('script', ['git rm foo', 'git rm foo bar', 'git rm'])
def test_not_match(script):
assert not match(Command(script, ''))
@pytest.mark.parametrize('script, target, new_command', [
('git rm foo', 'foo', ['git rm --cached foo', 'git rm -f foo']),
('git rm foo bar', 'bar', ['git rm --cached foo bar', 'git rm -f foo bar'])])
def test_get_new_command(output, script, target, new_command):
assert get_new_command(Command(script, output)) == new_command
```
#### File: tests/rules/test_workon_doesnt_exists.py
```python
import pytest
from theheck.rules.workon_doesnt_exists import match, get_new_command
from theheck.types import Command
@pytest.fixture(autouse=True)
def envs(mocker):
return mocker.patch(
'theheck.rules.workon_doesnt_exists._get_all_environments',
return_value=['theheck', 'code_view'])
@pytest.mark.parametrize('script', [
'workon tehheck', 'workon code-view', 'workon new-env'])
def test_match(script):
assert match(Command(script, ''))
@pytest.mark.parametrize('script', [
'workon theheck', 'workon code_view', 'work on tehheck'])
def test_not_match(script):
assert not match(Command(script, ''))
@pytest.mark.parametrize('script, result', [
('workon tehheck', 'workon theheck'),
('workon code-view', 'workon code_view'),
('workon zzzz', 'mkvirtualenv zzzz')])
def test_get_new_command(script, result):
assert get_new_command(Command(script, ''))[0] == result
```
#### File: theheck/tests/test_ui.py
```python
import pytest
from itertools import islice
from theheck import ui
from theheck.types import CorrectedCommand
from theheck import const
@pytest.fixture
def patch_get_key(monkeypatch):
def patch(vals):
vals = iter(vals)
monkeypatch.setattr('theheck.ui.get_key', lambda: next(vals))
return patch
def test_read_actions(patch_get_key):
patch_get_key([
# Enter:
'\n',
# Enter:
'\r',
# Ignored:
'x', 'y',
# Up:
const.KEY_UP, 'k',
# Down:
const.KEY_DOWN, 'j',
# Ctrl+C:
const.KEY_CTRL_C, 'q'])
assert (list(islice(ui.read_actions(), 8))
== [const.ACTION_SELECT, const.ACTION_SELECT,
const.ACTION_PREVIOUS, const.ACTION_PREVIOUS,
const.ACTION_NEXT, const.ACTION_NEXT,
const.ACTION_ABORT, const.ACTION_ABORT])
def test_command_selector():
selector = ui.CommandSelector(iter([1, 2, 3]))
assert selector.value == 1
selector.next()
assert selector.value == 2
selector.next()
assert selector.value == 3
selector.next()
assert selector.value == 1
selector.previous()
assert selector.value == 3
@pytest.mark.usefixtures('no_colors')
class TestSelectCommand(object):
@pytest.fixture
def commands_with_side_effect(self):
return [CorrectedCommand('ls', lambda *_: None, 100),
CorrectedCommand('cd', lambda *_: None, 100)]
@pytest.fixture
def commands(self):
return [CorrectedCommand('ls', None, 100),
CorrectedCommand('cd', None, 100)]
def test_without_commands(self, capsys):
assert ui.select_command(iter([])) is None
assert capsys.readouterr() == ('', 'No hecks given\n')
def test_without_confirmation(self, capsys, commands, settings):
settings.require_confirmation = False
assert ui.select_command(iter(commands)) == commands[0]
assert capsys.readouterr() == ('', const.USER_COMMAND_MARK + 'ls\n')
def test_without_confirmation_with_side_effects(
self, capsys, commands_with_side_effect, settings):
settings.require_confirmation = False
assert (ui.select_command(iter(commands_with_side_effect))
== commands_with_side_effect[0])
assert capsys.readouterr() == ('', const.USER_COMMAND_MARK + 'ls (+side effect)\n')
def test_with_confirmation(self, capsys, patch_get_key, commands):
patch_get_key(['\n'])
assert ui.select_command(iter(commands)) == commands[0]
assert capsys.readouterr() == (
'', const.USER_COMMAND_MARK + u'\x1b[1K\rls [enter/↑/↓/ctrl+c]\n')
def test_with_confirmation_abort(self, capsys, patch_get_key, commands):
patch_get_key([const.KEY_CTRL_C])
assert ui.select_command(iter(commands)) is None
assert capsys.readouterr() == (
'', const.USER_COMMAND_MARK + u'\x1b[1K\rls [enter/↑/↓/ctrl+c]\nAborted\n')
def test_with_confirmation_with_side_effct(self, capsys, patch_get_key,
commands_with_side_effect):
patch_get_key(['\n'])
assert (ui.select_command(iter(commands_with_side_effect))
== commands_with_side_effect[0])
assert capsys.readouterr() == (
'', const.USER_COMMAND_MARK + u'\x1b[1K\rls (+side effect) [enter/↑/↓/ctrl+c]\n')
def test_with_confirmation_select_second(self, capsys, patch_get_key, commands):
patch_get_key([const.KEY_DOWN, '\n'])
assert ui.select_command(iter(commands)) == commands[1]
stderr = (
u'{mark}\x1b[1K\rls [enter/↑/↓/ctrl+c]'
u'{mark}\x1b[1K\rcd [enter/↑/↓/ctrl+c]\n'
).format(mark=const.USER_COMMAND_MARK)
assert capsys.readouterr() == ('', stderr)
```
#### File: theheck/rules/cd_cs.py
```python
def match(command):
if command.script_parts[0] == 'cs':
return True
def get_new_command(command):
return 'cd' + ''.join(command.script[2:])
priority = 900
```
#### File: theheck/rules/composer_not_command.py
```python
import re
from theheck.utils import replace_argument, for_app
@for_app('composer')
def match(command):
return (('did you mean this?' in command.output.lower()
or 'did you mean one of these?' in command.output.lower())) or (
"install" in command.script_parts and "composer require" in command.output.lower()
)
def get_new_command(command):
if "install" in command.script_parts and "composer require" in command.output.lower():
broken_cmd, new_cmd = "install", "require"
else:
broken_cmd = re.findall(r"Command \"([^']*)\" is not defined", command.output)[0]
new_cmd = re.findall(r'Did you mean this\?[^\n]*\n\s*([^\n]*)', command.output)
if not new_cmd:
new_cmd = re.findall(r'Did you mean one of these\?[^\n]*\n\s*([^\n]*)', command.output)
new_cmd = new_cmd[0].strip()
return replace_argument(command.script, broken_cmd, new_cmd)
```
#### File: theheck/rules/gulp_not_task.py
```python
import re
import subprocess
from theheck.utils import replace_command, for_app, cache
@for_app('gulp')
def match(command):
return 'is not in your gulpfile' in command.output
@cache('gulpfile.js')
def get_gulp_tasks():
proc = subprocess.Popen(['gulp', '--tasks-simple'],
stdout=subprocess.PIPE)
return [line.decode('utf-8')[:-1]
for line in proc.stdout.readlines()]
def get_new_command(command):
wrong_task = re.findall(r"Task '(\w+)' is not in your gulpfile",
command.output)[0]
return replace_command(command, wrong_task, get_gulp_tasks())
```
#### File: theheck/rules/has_exists_script.py
```python
import os
from theheck.specific.sudo import sudo_support
@sudo_support
def match(command):
return command.script_parts and os.path.exists(command.script_parts[0]) \
and 'command not found' in command.output
@sudo_support
def get_new_command(command):
return u'./{}'.format(command.script)
```
#### File: theheck/rules/python_module_error.py
```python
import re
from theheck.shells import shell
MISSING_MODULE = r"ModuleNotFoundError: No module named '([^']+)'"
def match(command):
return "ModuleNotFoundError: No module named '" in command.output
def get_new_command(command):
missing_module = re.findall(MISSING_MODULE, command.output)[0]
return shell.and_("pip install {}".format(missing_module), command.script)
```
#### File: theheck/rules/remove_shell_prompt_literal.py
```python
import re
def match(command):
return (
"$: command not found" in command.output
and re.search(r"^[\s]*\$ [\S]+", command.script) is not None
)
def get_new_command(command):
return command.script.lstrip("$ ")
```
#### File: theheck/shells/powershell.py
```python
from subprocess import Popen, PIPE
from ..utils import DEVNULL
from .generic import Generic, ShellConfiguration
class Powershell(Generic):
friendly_name = 'PowerShell'
def app_alias(self, alias_name):
return 'function ' + alias_name + ' {\n' \
' $history = (Get-History -Count 1).CommandLine;\n' \
' if (-not [string]::IsNullOrWhiteSpace($history)) {\n' \
' $heck = $(theheck $args $history);\n' \
' if (-not [string]::IsNullOrWhiteSpace($heck)) {\n' \
' if ($heck.StartsWith("echo")) { $heck = $heck.Substring(5); }\n' \
' else { iex "$heck"; }\n' \
' }\n' \
' }\n' \
' [Console]::ResetColor() \n' \
'}\n'
def and_(self, *commands):
return u' -and '.join('({0})'.format(c) for c in commands)
def how_to_configure(self):
return ShellConfiguration(
content=u'iex "$(theheck --alias)"',
path='$profile',
reload='. $profile',
can_configure_automatically=False)
def _get_version(self):
"""Returns the version of the current shell"""
try:
proc = Popen(
['powershell.exe', '$PSVersionTable.PSVersion'],
stdout=PIPE,
stderr=DEVNULL)
version = proc.stdout.read().decode('utf-8').rstrip().split('\n')
return '.'.join(version[-1].split())
except IOError:
proc = Popen(['pwsh', '--version'], stdout=PIPE, stderr=DEVNULL)
return proc.stdout.read().decode('utf-8').split()[-1]
``` |
{
"source": "jlane9/eve-dynamodb",
"score": 2
} |
#### File: eve-dynamodb/eve_dynamodb/dynamodb.py
```python
import decimal
import itertools
from typing import Union
import boto3
from botocore.exceptions import ClientError as BotoCoreClientError
from bson import decimal128, ObjectId
from bson.dbref import DBRef
from eve.io.base import DataLayer
from eve.utils import ParsedRequest, config, debug_error_message, str_to_date, validate_filters
from flask import Flask, abort
import simplejson as json
from eve_dynamodb.expression import build_attr_expression, build_key_expression
"""
String/Set
Boolean
Null
Number/Set
Binary/Set
Map
List
"""
class DynamoDBResult:
"""DynamoDB search result
"""
def __init__(self, result: dict, **_kwargs):
"""Initialize DynamoDB result
:param dict result: DynamoDB response
:param dict _kwargs: Extra arguments
"""
self._result = result
def __iter__(self):
"""Return next item from result
:return:
"""
if 'Items' not in self._result:
return
for item in self._result['Items']:
yield item
def count(self, **_kwargs) -> int:
"""Return a count of all items
:param dict _kwargs: Extra arguments
:return: Count of all items
:rtype: int
"""
return self._result['Count'] if 'Count' not in self._result else 0
class DynamoDB(DataLayer):
"""DynamoDB data layer access for Eve REST API
"""
serializers = {
'boolean': lambda v: {"1": True, "true": True, "0": False, "false": False}[str(v).lower()],
'datetime': str_to_date,
'dbref': lambda value: DBRef(
value['$col'] if '$col' in value else value['$ref'],
value['$id'],
value['$db'] if '$db' in value else None
) if value is not None else None,
'decimal': lambda value: decimal128.Decimal128(decimal.Decimal(str(value))) if value is not None else None,
'float': lambda value: float(value) if value is not None else None,
'integer': lambda value: int(value) if value is not None else None,
'number': lambda val: json.loads(val) if val is not None else None,
'objectid': lambda value: ObjectId(value) if value else None
}
def init_app(self, app: Flask):
"""Initialize DynamoDB
:param Flask app: Flask application
"""
self.driver = boto3.resource('dynamodb')
def find(self, resource: str, req: ParsedRequest = None, sub_resource_lookup: dict = None,
perform_count: bool = True) -> tuple:
"""Retrieves a set of documents matching a given request
:param str resource: Resource being accessed
:param ParsedRequest req: Contains all the constraints that must be fulfilled in order to satisfy the request
:param dict sub_resource_lookup: Sub-resource lookup from the endpoint url
:param bool perform_count: Whether a document count should be performed and returned to the client
:return: Result from DynamoDB search and count
:rtype: tuple
"""
args = dict()
spec = self._convert_where_request_to_dict(req)
bad_filter = validate_filters(spec, resource)
is_soft_delete = config.DOMAIN[resource]["soft_delete"]
if req and req.max_results:
args["limit"] = req.max_results
if req and req.page > 1:
args["skip"] = (req.page - 1) * req.max_results
if bad_filter:
abort(400, bad_filter)
if sub_resource_lookup:
spec = self.combine_queries(spec, sub_resource_lookup)
if is_soft_delete and not (req and req.show_deleted and self.query_contains_field(spec, config.DELETED)):
spec = self.combine_queries(spec, {config.DELETED: {"$ne": True}})
client_projection = self._client_projection(req)
data_source, spec, projection, sort = self._datasource_ex(resource, spec, client_projection, None)
if req and req.if_modified_since:
spec[config.LAST_UPDATED] = {"$gt": req.if_modified_since}
if len(spec) > 0:
args["filter"] = spec
if projection:
args["projection"] = projection
try:
table = self.driver.Table(data_source)
result = DynamoDBResult(table.scan(**args)) # TODO: Finish this
return result, result.count() if perform_count else None
except BotoCoreClientError as e:
abort(500, description=debug_error_message(e.response['Error']['Message']))
def aggregate(self, resource: str, pipeline: dict, options: dict):
"""Perform an aggregation on the resource data source and returns the result
:param str resource: Resource being accessed
:param dict pipeline: Aggregation pipeline to be executed
:param dict options: Aggregation options to be considered
"""
# TODO: Finish this, maybe this isn't supported?. Also doc strings
raise NotImplementedError
def find_one(self, resource: str, req: ParsedRequest, check_auth_value: bool = True,
force_auth_field_projection: bool = False, **lookup) -> dict:
"""Retrieves a single document
:param str resource: Resource being accessed
:param ParsedRequest req: Contains all the constraints that must be fulfilled in order to satisfy the request
:param bool check_auth_value: Boolean flag indicating if the find operation should consider user-restricted
resource access. Defaults to ``True``
:param bool force_auth_field_projection: Boolean flag indicating if the find operation should always include
the user-restricted resource access field (if configured). Defaults to ``False``
:param dict lookup: Lookup query
:return: A single document
:rtype: dict
"""
client_projection = self._client_projection(req)
is_soft_delete = config.DOMAIN[resource]["soft_delete"]
show_deleted = req and req.show_deleted
query_contains_deleted = self.query_contains_field(lookup, config.DELETED)
data_source, filter_, projection, _ = self._datasource_ex(
resource,
lookup,
client_projection,
check_auth_value=check_auth_value,
force_auth_field_projection=force_auth_field_projection,
)
if is_soft_delete and not show_deleted and not query_contains_deleted:
filter_ = self.combine_queries(filter_, {config.DELETED: {"$ne": True}})
try:
table = self.driver.Table(data_source)
result = table.get_item(Key=lookup) # TODO: Add projection and pass filter, change to scan or query?
return result['Item'] if 'Item' in result else None
except BotoCoreClientError as e:
abort(500, description=debug_error_message(e.response['Error']['Message']))
"""
# Here, we feed pymongo with `None` if projection is empty.
return (
self.pymongo(resource).db[datasource].find_one(filter_, projection or None)
)
"""
def find_one_raw(self, resource: str, **lookup) -> dict:
"""Retrieves a single raw document
:param str resource: Resource name
:param dict lookup: Lookup query
:return: A single document
:rtype: dict
"""
id_field = config.DOMAIN[resource]["id_field"]
_id = lookup.get(id_field)
data_source, filter_, _, _ = self._datasource_ex(resource, {id_field: _id}, None)
try:
table = self.driver.Table(data_source)
result = table.get_item(Key=filter_)
return result['Item'] if 'Item' in result else None
except BotoCoreClientError as e:
abort(500, description=debug_error_message(e.response['Error']['Message']))
def find_list_of_ids(self, resource: str, ids: list, client_projection=None) -> DynamoDBResult:
"""Retrieves a list of documents from the collection given by `resource`, matching the given list of ids
:param str resource: Resource name
:param list ids: A list of ids corresponding to the documents to retrieve
:param client_projection: A specific projection to use
:return: A list of documents matching the ids in `ids` from the collection specified in `resource`
:rtype: DynamoDBResult
"""
id_field = config.DOMAIN[resource]["id_field"]
query = {"$or": [{id_field: id_} for id_ in ids]}
data_source, filter_, projection, _ = self._datasource_ex(
resource, query=query, client_projection=client_projection
)
try:
table = self.driver.Table(data_source)
return DynamoDBResult(table.scan()) # TODO finish this
except BotoCoreClientError as e:
abort(500, description=debug_error_message(e.response['Error']['Message']))
def insert(self, resource: str, doc_or_docs: Union[dict, list]) -> list:
"""Inserts a document into a resource collection/table
:param str resource: Resource being accessed
:param (Union[dict, list]) doc_or_docs: JSON document or list of JSON documents to be added to the database
:return: A list of ids
:rtype: list
"""
id_field = config.DOMAIN[resource]["id_field"]
data_source, _, _, _ = self._datasource_ex(resource)
if isinstance(doc_or_docs, dict):
doc_or_docs = [doc_or_docs]
try:
table = self.driver.Table(data_source)
with table.batch_writer() as batch:
for doc in doc_or_docs:
# Note: Existing documents are overwritten https://github.com/boto/boto/issues/3273
# TODO: Maybe we could a search first?
batch.put_item(Item=doc)
return [doc[id_field] for doc in doc_or_docs]
except BotoCoreClientError as e:
abort(500, description=debug_error_message(e.response['Error']['Message']))
def update(self, resource: str, id_: str, updates: dict, original: dict):
"""Updates a collection/table document/row
:param str resource: Resource being accessed
:param str id_: The unique id of the document
:param dict updates: JSON updates to be performed on the database document (or row)
:param dict original: Definition of the json document that should be updated
:raise OriginalChangedError: Raised if the database layer notices a change from the supplied original parameter
"""
# TODO: Finish this
raise NotImplementedError
def replace(self, resource: str, id_: str, document: dict, original: dict):
"""Replaces a collection/table document/row
:param str resource: Resource being accessed
:param str id_: The unique id of the document
:param dict document: The new JSON document
:param original: Definition of the json document that should be updated
:raise OriginalChangedError: Raised if the database layer notices a change from the supplied original parameter
"""
# TODO: Finish this
raise NotImplementedError
def remove(self, resource: str, lookup: dict):
"""Removes a document/row or an entire set of documents/rows from a database collection/table
:param str resource: Resource being accessed
:param dict lookup: A query that documents must match in order to qualify for deletion
:return:
"""
id_field = config.DOMAIN[resource]["id_field"]
data_source, filter_, _, _ = self._datasource_ex(resource, lookup)
try:
table = self.driver.Table(data_source)
with table.batch_writer() as batch:
for item in self.find(resource, sub_resource_lookup=lookup)[0]:
batch.delete_item(Key={id_field: item[id_field]})
except BotoCoreClientError as e:
abort(500, description=debug_error_message(e.response['Error']['Message']))
def combine_queries(self, query_a: dict, query_b: dict) -> dict:
"""Takes two db queries and applies db-specific syntax to produce the intersection
:param dict query_a: Left query
:param dict query_b: Right query
:return: Combined query
:rtype: dict
"""
return {
"$and": [
{k: v} for k, v in itertools.chain(query_a.items(), query_b.items())
]
}
def get_value_from_query(self, query: dict, field_name: str) -> str:
"""For the specified field name, parses the query and returns the value being assigned in the query
For example,
get_value_from_query({'_id': 123}, '_id')
123
This mainly exists to deal with more complicated compound queries
get_value_from_query(
{'$and': [{'_id': 123}, {'firstname': 'mike'}],
'_id'
)
123
:param dict query: Lookup query
:param str field_name: Field name to get value for
:return: Value for field name within query
:rtype: str
"""
if field_name in query:
return query[field_name]
elif "$and" in query:
for condition in query["$and"]:
if field_name in condition:
return condition[field_name]
raise KeyError
def query_contains_field(self, query: dict, field_name: str) -> bool:
"""For the specified field name, does the query contain it?
:param dict query: Filter query
:param str field_name: Field name
:return: True, if the query contains the field. False otherwise
:rtype: bool
"""
try:
self.get_value_from_query(query, field_name)
except KeyError:
return False
return True
def is_empty(self, resource: str) -> bool:
"""Returns whether the resource is empty
:param str resource: Resource being accessed
:return: True, if the collection is empty. False otherwise
:rtype: bool
"""
data_source, filter_, _, _ = self.datasource(resource)
try:
table = self.driver.Table(data_source)
if not filter_:
return table.scan()['Count'] == 0
else:
if config.LAST_UPDATED in filter_:
del filter_[config.LAST_UPDATED]
return self.find(resource, None, filter_, perform_count=True)[1] == 0
except BotoCoreClientError as e:
abort(400, description=debug_error_message(e.response['Error']['Message']))
@staticmethod
def _convert_where_request_to_dict(req: ParsedRequest) -> dict:
"""Converts the contents of a `ParsedRequest`'s `where` property to a dict
:param ParsedRequest req: Contains all the constraints that must be fulfilled in order to satisfy the request
:return: Where clause from request
:rtype: dict
"""
if not req or not req.where:
return {}
try:
return json.loads(req.where)
except json.decoder.JSONDecodeError:
abort(400, description=debug_error_message("Unable to parse `where` clause"),)
``` |
{
"source": "jlane9/Flask-MonitoringDashboard",
"score": 3
} |
#### File: core/forms/daterange.py
```python
import datetime
from flask import request
from flask_wtf import FlaskForm
from wtforms import validators, SubmitField
from wtforms.fields.html5 import DateField
from flask_monitoringdashboard.core.timezone import to_local_datetime
DATE_FORMAT = '%Y-%m-%d'
class SelectDateRangeForm(FlaskForm):
""" Used for selecting two dates, which together specify a range. """
start_date = DateField('Start date', format=DATE_FORMAT, validators=[validators.data_required()])
end_date = DateField('End date', format=DATE_FORMAT, validators=[validators.data_required()])
submit = SubmitField('Update')
title = 'Select the time interval'
def get_days(self):
"""
:return: A list with datetime.date object from form.start_date to (including) form.end_date
"""
delta = self.end_date.data - self.start_date.data
return [self.start_date.data + datetime.timedelta(days=i) for i in range(delta.days + 1)]
def content(self):
return '''
<div class="row">
<div class="col-sm-4"><i class="fa fa-calendar"></i> {} </div>
<div class="col-sm-4"><i class="fa fa-calendar"></i> {} </div>
</div>
<div class="row">
<div class="col-sm-4"> {} </div>
<div class="col-sm-4"> {} </div>
<div class="col-sm-4"> {} </div>
</div>'''.format(self.start_date.label, self.end_date.label,
self.start_date(class_="form-control", required=True),
self.end_date(class_="form-control", required=True),
self.submit(class_="btn btn-primary btn-block"))
def get_daterange_form(num_days=20):
"""
Returns a SelectDateRangeForm with two dates:
- end_date is today
- start_date is the today - numdays
:param num_days: the date for the start_date
:return: A SelectDateRangeForm object with the required logic
"""
form = SelectDateRangeForm(request.form)
if form.validate():
if form.start_date.data > form.end_date.data:
form.start_date.data, form.end_date.data = form.end_date.data, form.start_date.data
else:
form.end_date.data = to_local_datetime(datetime.datetime.utcnow()).date()
form.start_date.data = form.end_date.data - datetime.timedelta(days=num_days)
return form
```
#### File: flask_monitoringdashboard/core/info_box.py
```python
from flask_monitoringdashboard.core.forms import MONITOR_CHOICES
GRAPH_INFO = '''You can hover the graph with your mouse to see the actual values. You can also use
the buttons at the top of the graph to select a subset of graph, scale it accordingly or save the graph
as a PNG image.'''
def b(s):
return '<b>{}</b>'.format(s)
def p(s):
return '<p>{}</p>'.format(s)
def get_plot_info(axes='', content=''):
"""
:param axes: If specified, information about the axis
:param content: If specified, information about the content
:return: a String with information in HTML
"""
information = b('Graph') + p(GRAPH_INFO)
if axes:
information += b('Axes') + p(axes)
if content:
information += b('Content') + p(content)
return information
def get_rules_info():
""" :return: a string with information in HTML """
info = b(MONITOR_CHOICES[0][1]) + \
p('When the monitoring-level is set to 0, you don\'t monitor anything about the performance of this '
'endpoint. The only data that is stored is when the ' + b('endpoint is last requested.'))
info += b(MONITOR_CHOICES[1][1]) + \
p('When the monitoring-level is set to 1, you collect data when the endpoint is last requested, plus '
'data about the ' + b('performance and utilization') + ' of this endpoint (as a black-box).')
info += b(MONITOR_CHOICES[2][1]) + \
p('When the monitoring-level is set to 2, you get all the functionality from 1, plus data about the ' +
b('performance per line of code') + ' from all requests.')
info += b(MONITOR_CHOICES[3][1]) + \
p('When the monitoring-level is set to 3, you get all the functionality from 2, including ' + b('more data'
' if a request is an outlier.'))
return info
```
#### File: core/plot/__init__.py
```python
import plotly
import plotly.graph_objs as go
from flask_monitoringdashboard.core.plot.util import add_default_value
from flask_monitoringdashboard.core.plot.plots import heatmap, boxplot, barplot, scatter, get_average_bubble_size
def get_layout(**kwargs):
"""
:param kwargs: additional arguments for the layout
:return: a Plotly Layout object with the required values
"""
kwargs = add_default_value('showlegend', False, **kwargs)
kwargs = add_default_value('autosize', True, **kwargs)
kwargs = add_default_value('plot_bgcolor', 'rgba(249,249,249,1)', **kwargs)
kwargs = add_default_value('height', 700, **kwargs)
kwargs = add_default_value('hovermode', 'closest', **kwargs)
return go.Layout(**kwargs)
def get_margin(**kwargs):
"""
:param kwargs: additional arguments for the Margin object
:return: a Plotly Margin instance
"""
kwargs = add_default_value('l', 200, **kwargs)
return go.Margin(**kwargs)
def get_figure(layout, data, **kwargs):
"""
:param layout: must be a Plotly Layout instance
:param data: the data for the
:param kwargs: additional arguments for the plot
:return: A plotly generated plot with the required data
"""
if not data:
return None
kwargs = add_default_value('output_type', 'div', **kwargs)
kwargs = add_default_value('show_link', False, **kwargs)
return plotly.offline.plot(go.Figure(data=data, layout=layout), **kwargs)
```
#### File: profiler/util/stringHash.py
```python
class StringHash(object):
def __init__(self):
self._h = {}
def hash(self, string):
"""
Performs the following reduction:
hash('abc') ==> 0
hash('def') ==> 1
hash('abc') ==> 0
:param string: the string to be hashed
:return: a unique int for every string.
"""
if string in self._h:
return self._h[string]
self._h[string] = len(self._h)
return self._h[string]
def unhash(self, hash):
""" Opposite of hash.
unhash(hash('abc')) == 'abc
:param hash: string to be unhashed
:return: the value that corresponds to the given hash
"""
for k, v in self._h.items():
if v == hash:
return k
raise ValueError('Value not possible to unhash: {}'.format(hash))
```
#### File: flask_monitoringdashboard/core/utils.py
```python
import ast
import numpy as np
from flask import url_for
from werkzeug.routing import BuildError
from flask_monitoringdashboard import config
from flask_monitoringdashboard.core.rules import get_rules
from flask_monitoringdashboard.core.timezone import to_local_datetime
from flask_monitoringdashboard.database.count import count_requests, count_total_requests
from flask_monitoringdashboard.database.endpoint import get_endpoint_by_id
from flask_monitoringdashboard.database.request import get_date_of_first_request, get_date_of_first_request_version
def get_endpoint_details(db_session, endpoint_id):
"""
Returns details about an endpoint.
:param db_session: session for the database
:param endpoint_id: id of the endpoint
:return dictionary
"""
endpoint = get_endpoint_by_id(db_session, endpoint_id)
endpoint.time_added = to_local_datetime(endpoint.time_added)
return {
'id': endpoint_id,
'endpoint': endpoint.name,
'rules': ', '.join([r.rule for r in get_rules(endpoint.name)]),
'rule': endpoint,
'url': get_url(endpoint.name),
'total_hits': count_requests(db_session, endpoint.id)
}
def get_details(db_session):
"""
Returns details about the deployment.
:param db_session: session for the database
:return dictionary
"""
import json
from flask_monitoringdashboard import loc
with open(loc() + 'constants.json', 'r') as f:
constants = json.load(f)
return {
'link': config.link,
'dashboard-version': constants['version'],
'config-version': config.version,
'first-request': get_date_of_first_request(db_session),
'first-request-version': get_date_of_first_request_version(db_session, config.version),
'total-requests': count_total_requests(db_session)
}
def get_url(end):
"""
Returns the URL if possible.
URL's that require additional arguments, like /static/<file> cannot be retrieved.
:param end: the endpoint for the url.
:return: the url_for(end) or None,
"""
try:
return url_for(end)
except BuildError:
return None
def simplify(values, n=5):
"""
Simplify a list of values. It returns a list that is representative for the input
:param values: list of values
:param n: length of the returned list
:return: list with n values: min, q1, median, q3, max
"""
return [np.percentile(values, i * 100 // (n - 1)) for i in range(n)]
```
#### File: profiler/util/test_util.py
```python
import unittest
from flask_monitoringdashboard.core.profiler.util import order_histogram
class TestProfilerUtil(unittest.TestCase):
def test_order_histogram(self):
histogram = {
('0:42->1:12', 'c'): 610,
('0:42', 'a'): 1234,
('0:42->1:13', 'b'): 614
}
self.assertEqual(order_histogram(histogram.items()),
[(('0:42', 'a'), 1234), (('0:42->1:13', 'b'), 614), (('0:42->1:12', 'c'), 610)])
```
#### File: test/db/test_codeline.py
```python
import unittest
from flask_monitoringdashboard.database import session_scope
from flask_monitoringdashboard.database.code_line import get_code_line
FN = 'filename'
LN = 42
FUN = 'fun'
CODE = 'code'
class TestCodeLine(unittest.TestCase):
def test_get_code_line(self):
with session_scope() as db_session:
code_line1 = get_code_line(db_session, FN, LN, FUN, CODE)
code_line2 = get_code_line(db_session, FN, LN, FUN, CODE)
self.assertEqual(code_line1.id, code_line2.id)
self.assertEqual(code_line1.function_name, code_line2.function_name)
self.assertEqual(code_line1.filename, code_line2.filename)
self.assertEqual(code_line1.line_number, code_line2.line_number)
self.assertEqual(code_line1.code, code_line2.code)
```
#### File: test/db/test_data_grouped.py
```python
import unittest
from flask_monitoringdashboard.database import session_scope
from flask_monitoringdashboard.database.data_grouped import get_endpoint_data_grouped, get_test_data_grouped, \
get_version_data_grouped
from flask_monitoringdashboard.test.db.test_count_group import TestCountGroup
from flask_monitoringdashboard.test.utils import set_test_environment, clear_db, add_fake_data, NAME, REQUESTS
class TestDataGrouped(unittest.TestCase):
def setUp(self):
set_test_environment()
clear_db()
add_fake_data()
@staticmethod
def median(values):
return sum(values) / len(values)
def test_get_endpoint_data_grouped(self):
with session_scope() as db_session:
self.assertEqual(get_endpoint_data_grouped(db_session, self.median), {1: 12000}.items())
def test_get_test_data_grouped(self):
with session_scope() as db_session:
TestCountGroup.add_test_data(db_session)
self.assertEqual(get_test_data_grouped(db_session, self.median), {NAME: 1789.5}.items())
def test_get_version_data_grouped(self):
with session_scope() as db_session:
self.assertEqual(get_version_data_grouped(db_session, lambda x: x), {'1.0': REQUESTS}.items())
```
#### File: test/db/test_tested_endpoints.py
```python
import unittest
from flask_monitoringdashboard.database import session_scope
from flask_monitoringdashboard.database.tested_endpoints import get_tested_endpoint_names
from flask_monitoringdashboard.test.db.test_count_group import TestCountGroup
from flask_monitoringdashboard.test.utils import set_test_environment, clear_db, add_fake_data, NAME
class TestTestedEndpoints(unittest.TestCase):
def setUp(self):
set_test_environment()
clear_db()
add_fake_data()
def test_get_tested_endpoint_names(self):
with session_scope() as db_session:
TestCountGroup.add_test_data(db_session)
self.assertEqual(get_tested_endpoint_names(db_session), [NAME])
```
#### File: test/db/test_tests.py
```python
import unittest
import datetime
from flask_monitoringdashboard.database import session_scope, TestEndpoint
from flask_monitoringdashboard.database.tests import get_endpoint_measurements, get_last_tested_times
from flask_monitoringdashboard.test.utils import set_test_environment, clear_db, add_fake_data, add_fake_test_runs, \
REQUESTS, NAME, TEST_NAMES, ENDPOINT_ID
NAME2 = 'main2'
SUITE = 3
class TestDBTests(unittest.TestCase):
def setUp(self):
set_test_environment()
clear_db()
add_fake_data()
add_fake_test_runs()
def test_add_test_result(self):
"""
Test whether the function returns the right values.
"""
from flask_monitoringdashboard.database.tests import add_test_result, get_suite_measurements, add_or_update_test
from flask_monitoringdashboard.database.tested_endpoints import add_endpoint_hit
from flask_monitoringdashboard import config
import datetime
with session_scope() as db_session:
self.assertEqual(get_suite_measurements(db_session, SUITE), [0])
for exec_time in REQUESTS:
for test in TEST_NAMES:
add_or_update_test(db_session, test, True, datetime.datetime.utcnow(), config.version)
add_test_result(db_session, test, exec_time, datetime.datetime.utcnow(), config.version, SUITE, 0)
add_endpoint_hit(db_session, NAME, exec_time, test, config.version, SUITE)
result = get_suite_measurements(db_session, SUITE)
self.assertEqual(len(result), len(REQUESTS) * len(TEST_NAMES))
def test_get_results(self):
"""
Test whether the function returns the right values.
"""
self.test_add_test_result() # can be replaced by test_add_test_result, since this function covers two tests
def test_get_suites(self):
"""
Test whether the function returns the right values.
"""
from flask_monitoringdashboard.database.tests import get_test_suites
self.test_add_test_result()
with session_scope() as db_session:
self.assertEqual(2, len(get_test_suites(db_session)))
def test_get_measurements(self):
"""
Test whether the function returns the right values.
"""
from flask_monitoringdashboard.database.tests import get_suite_measurements
with session_scope() as db_session:
self.assertEqual(get_suite_measurements(db_session, SUITE), [0])
self.test_add_test_result()
result = get_suite_measurements(db_session, SUITE)
self.assertEqual(len(REQUESTS) * 2, len(result))
def test_get_test_measurements(self):
"""
Test whether the function returns the right values.
"""
from flask_monitoringdashboard.database.tests import get_endpoint_measurements_job
with session_scope() as db_session:
self.assertEqual(1, len(get_endpoint_measurements_job(db_session, NAME, SUITE)))
self.test_add_test_result()
result = get_endpoint_measurements_job(db_session, NAME, SUITE)
self.assertEqual(len(TEST_NAMES) * len(REQUESTS), len(result))
def test_get_endpoint_measurements(self):
with session_scope() as db_session:
self.assertEqual(get_endpoint_measurements(db_session, "1"), [0])
db_session.add(TestEndpoint(endpoint_id=ENDPOINT_ID, test_id=1, duration=1234, app_version="1.0",
travis_job_id="1", time_added=datetime.datetime.utcnow()))
db_session.add(TestEndpoint(endpoint_id=ENDPOINT_ID, test_id=1, duration=2345, app_version="1.0",
travis_job_id="1", time_added=datetime.datetime.utcnow()))
self.assertEqual(get_endpoint_measurements(db_session, "1"), [1234, 2345])
def test_get_last_tested_times(self):
with session_scope() as db_session:
self.assertEqual(get_last_tested_times(db_session), [])
db_session.add(TestEndpoint(endpoint_id=ENDPOINT_ID, test_id=1, duration=1234, app_version="1.0",
travis_job_id="1", time_added=datetime.datetime.utcnow()))
self.assertNotEqual(get_last_tested_times(db_session), [])
```
#### File: test/views/test_auth.py
```python
import unittest
from flask_monitoringdashboard.test.utils import set_test_environment, clear_db, add_fake_data, login, get_test_app
class TestLogin(unittest.TestCase):
def setUp(self):
set_test_environment()
clear_db()
add_fake_data()
self.app = get_test_app()
def test_get_login(self):
"""
Just retrieve the content and check if nothing breaks
"""
with self.app.test_client() as c:
self.assertEqual(200, c.get('dashboard/login').status_code)
login(c)
self.assertEqual(302, c.get('dashboard/login').status_code)
def test_incorrect_login(self):
"""
Try whether logging with incorrect credentials returns the login page
"""
args = {'name': 'admin', 'password': '<PASSWORD>'}
with self.app.test_client() as c:
self.assertIn('formLogin', c.post('dashboard/login', data=args).data.decode())
def test_correct_login(self):
"""
Try whether logging with correct credentials does not return the login page
"""
args = {'name': 'admin', 'password': '<PASSWORD>'}
with self.app.test_client() as c:
self.assertNotIn('formLogin', c.post('dashboard/login', data=args).data.decode())
def test_logout(self):
"""
Just retrieve the content and check if nothing breaks
"""
with self.app.test_client() as c:
self.assertEqual(302, c.get('dashboard/logout').status_code)
```
#### File: views/dashboard/heatmap.py
```python
import datetime
import numpy
import plotly.graph_objs as go
from flask import render_template
from flask_monitoringdashboard import blueprint
from flask_monitoringdashboard.core.auth import secure
from flask_monitoringdashboard.core.forms import get_daterange_form
from flask_monitoringdashboard.core.plot import get_layout, get_figure, heatmap as plot_heatmap
from flask_monitoringdashboard.core.info_box import get_plot_info
from flask_monitoringdashboard.core.timezone import to_utc_datetime, to_local_datetime
from flask_monitoringdashboard.database import session_scope
from flask_monitoringdashboard.database.endpoint import get_num_requests
TITLE = 'Hourly API Utilization'
AXES_INFO = '''The X-axis presents a number of days. The Y-axis presents every hour of
the day.'''
CONTENT_INFO = '''The color of the cell presents the number of requests that the application received
in a single hour. The darker the cell, the more requests it has processed. This information can be used
to validate on which moment of the day the Flask application processes to most requests.'''
@blueprint.route('/hourly_load', methods=['GET', 'POST'])
@secure
def hourly_load():
form = get_daterange_form()
return render_template('fmd_dashboard/graph.html', form=form, graph=hourly_load_graph(form), title=TITLE,
information=get_plot_info(AXES_INFO, CONTENT_INFO))
def hourly_load_graph(form, endpoint_id=None):
"""
Return HTML string for generating a Heatmap.
:param form: A SelectDateRangeForm, which is used to filter the selection
:param endpoint_id: optionally, filter the data on a specific endpoint
:return: HTML code with the graph
"""
# list of hours: 0:00 - 23:00
hours = ['0{}:00'.format(h) for h in range(0, 10)] + ['{}:00'.format(h) for h in range(10, 24)]
days = form.get_days()
# create empty 2D-list: [hour][day]
heatmap_data = numpy.zeros((len(hours), len(days)))
# add data from database to heatmap_data
start_datetime = to_utc_datetime(datetime.datetime.combine(form.start_date.data, datetime.time(0, 0, 0, 0)))
end_datetime = to_utc_datetime(datetime.datetime.combine(form.end_date.data, datetime.time(23, 59, 59)))
with session_scope() as db_session:
for time, count in get_num_requests(db_session, endpoint_id, start_datetime, end_datetime):
parsed_time = datetime.datetime.strptime(time, '%Y-%m-%d %H:%M:%S')
day_index = (parsed_time - start_datetime).days
hour_index = int(to_local_datetime(parsed_time).strftime('%H'))
heatmap_data[hour_index][day_index] = count
start_datetime = to_local_datetime(start_datetime - datetime.timedelta(days=1)).strftime('%Y-%m-%d 12:00:00')
end_datetime = to_local_datetime(form.end_date.data).strftime('%Y-%m-%d 12:00:00')
layout = get_layout(
xaxis=go.XAxis(range=[start_datetime, end_datetime])
)
return get_figure(layout, [plot_heatmap(x=days, y=hours, z=heatmap_data)])
```
#### File: views/dashboard/version_usage.py
```python
from flask import render_template
from flask_monitoringdashboard import blueprint
from flask_monitoringdashboard.core.auth import secure
from flask_monitoringdashboard.core.forms import get_slider_form
from flask_monitoringdashboard.core.plot import get_layout, get_figure, get_margin, heatmap
from flask_monitoringdashboard.core.info_box import get_plot_info
from flask_monitoringdashboard.database import Request, session_scope
from flask_monitoringdashboard.database.count import count_versions
from flask_monitoringdashboard.database.count_group import count_requests_group, get_value
from flask_monitoringdashboard.database.endpoint import get_endpoints
from flask_monitoringdashboard.database.versions import get_versions
TITLE = 'Multi Version API Utilization'
AXES_INFO = '''The X-axis presents the versions that are used. The Y-axis presents the
endpoints that are found in the Flask application.'''
CONTENT_INFO = '''The color of the cell presents the distribution of the amount of requests that the
application received in a single version for a single endpoint. The darker the cell, the more requests
a certain endpoint has processed in that version. Since it displays the distribution of the load, each
column sums up to 100%. This information can be used to validate which endpoints processes the most
requests.'''
@blueprint.route('/version_usage', methods=['GET', 'POST'])
@secure
def version_usage():
with session_scope() as db_session:
form = get_slider_form(count_versions(db_session), 'Select the number of versions')
graph = version_usage_graph(db_session, form)
return render_template('fmd_dashboard/graph.html', graph=graph, title=TITLE,
information=get_plot_info(AXES_INFO, CONTENT_INFO), form=form)
def version_usage_graph(db_session, form):
"""
Used for getting a Heatmap with an overview of which endpoints are used in which versions
:param db_session: session for the database
:param form: instance of SliderForm
:return:
"""
endpoints = get_endpoints(db_session)
versions = get_versions(db_session, limit=form.get_slider_value())
requests = [count_requests_group(db_session, Request.version_requested == v) for v in versions]
total_hits = []
hits = [[]] * len(endpoints)
for hits_version in requests:
total_hits.append(max(1, sum([value for key, value in hits_version])))
for j in range(len(endpoints)):
hits[j] = [0] * len(versions)
for i in range(len(versions)):
hits[j][i] = get_value(requests[i], endpoints[j].id) * 100 / total_hits[i]
layout = get_layout(
xaxis={'title': 'Versions', 'type': 'category'},
yaxis={'type': 'category', 'autorange': 'reversed'},
margin=get_margin()
)
trace = heatmap(
z=hits,
x=versions,
y=['{} '.format(e.name) for e in endpoints],
colorbar={
'titleside': 'top',
'tickmode': 'array',
}
)
return get_figure(layout=layout, data=[trace])
```
#### File: views/details/time_version.py
```python
from flask import render_template
from flask_monitoringdashboard import blueprint
from flask_monitoringdashboard.core.auth import secure
from flask_monitoringdashboard.core.colors import get_color
from flask_monitoringdashboard.core.forms import get_slider_form
from flask_monitoringdashboard.core.plot import boxplot, get_figure, get_layout, get_margin
from flask_monitoringdashboard.core.info_box import get_plot_info
from flask_monitoringdashboard.core.utils import get_endpoint_details, simplify
from flask_monitoringdashboard.database import Request, session_scope
from flask_monitoringdashboard.database.count import count_versions_endpoint
from flask_monitoringdashboard.database.count_group import get_value
from flask_monitoringdashboard.database.data_grouped import get_version_data_grouped
from flask_monitoringdashboard.database.endpoint import to_local_datetime
from flask_monitoringdashboard.database.versions import get_first_requests
TITLE = 'Per-Version Performance'
AXES_INFO = '''The X-axis presents the execution time in ms. The Y-axis presents the versions
that are used.'''
CONTENT_INFO = '''This graph shows a horizontal boxplot for the versions that are used. With this
graph you can found out whether the performance changes across different versions.'''
@blueprint.route('/endpoint/<endpoint_id>/versions', methods=['GET', 'POST'])
@secure
def versions(endpoint_id):
with session_scope() as db_session:
details = get_endpoint_details(db_session, endpoint_id)
form = get_slider_form(count_versions_endpoint(db_session, endpoint_id), title='Select the number of versions')
graph = versions_graph(db_session, endpoint_id, form)
return render_template('fmd_dashboard/graph-details.html', details=details, graph=graph,
title='{} for {}'.format(TITLE, details['endpoint']), form=form,
information=get_plot_info(AXES_INFO, CONTENT_INFO))
def format_version(version, first_used):
"""
:param version: name of the version
:param first_used: datetime object when the version was first used.
:return: string that represents the version
"""
if not first_used:
return version
return '{}<br>{}'.format(version, to_local_datetime(first_used).strftime('%Y-%m-%d %H:%M'))
def versions_graph(db_session, endpoint_id, form):
times = get_version_data_grouped(db_session, lambda x: simplify(x, 10), Request.endpoint_id == endpoint_id)
first_requests = get_first_requests(db_session, endpoint_id, form.get_slider_value())
data = [boxplot(
name=format_version(request.version_requested, get_value(first_requests, request.version_requested)),
values=get_value(times, request.version_requested),
marker={'color': get_color(request.version_requested)})
for request in first_requests]
layout = get_layout(
height=350 + 40 * len(first_requests),
xaxis={'title': 'Execution time (ms)'},
yaxis={'type': 'category', 'title': 'Version', 'autorange': 'reversed'},
margin=get_margin()
)
return get_figure(layout=layout, data=data)
```
#### File: views/export/csv.py
```python
import datetime
from flask import make_response
from flask_monitoringdashboard import blueprint
from flask_monitoringdashboard.core.auth import admin_secure
from flask_monitoringdashboard.database import session_scope
from flask_monitoringdashboard.database.outlier import get_all_outliers
from flask_monitoringdashboard.database.request import get_data
REQUESTS_COLUMNS = ['id', 'endpoint_id', 'duration', 'time_requested',
'version_requested', 'group_by', 'ip']
OUTLIER_COLUMNS = ['id', 'request_id', 'request_header', 'request_environment',
'request_url', 'cpu_percent', 'memory', 'stacktrace']
@blueprint.route('/download-requests')
@admin_secure
def download_requests():
csv = ','.join(REQUESTS_COLUMNS) + '\n'
with session_scope() as db_session:
for entry in get_data(db_session):
csv += ','.join([str(entry.__getattribute__(c)) for c in REQUESTS_COLUMNS]) + '\n'
response = make_response(csv)
response.headers["Content-Disposition"] = "attachment; filename=requests_{0}.csv".format(
str(datetime.datetime.utcnow()).replace(" ", "_").replace(":", "-")[:19])
return response
@blueprint.route('/download-outliers')
@admin_secure
def download_outliers():
csv = ','.join(OUTLIER_COLUMNS) + '\n'
with session_scope() as db_session:
for entry in get_all_outliers(db_session):
data = ','.join([str(entry.__getattribute__(c)) for c in OUTLIER_COLUMNS])
data = ' '.join(data.split()) # remove newlines
csv += data + '\n'
response = make_response(csv)
response.headers["Content-Disposition"] = "attachment; filename=outliers_{0}.csv".format(
str(datetime.datetime.utcnow()).replace(" ", "_").replace(":", "-")[:19])
return response
``` |
{
"source": "jlane9/mockerena",
"score": 2
} |
#### File: mockerena/mockerena/app.py
```python
import inspect
import json
import logging
import os
import re
from bson.objectid import ObjectId
from cerberus import Validator
from eve import Eve
from faker.providers import BaseProvider
from flasgger import Swagger, swag_from
from flask import abort, jsonify, request, render_template
from healthcheck import HealthCheck, EnvironmentDump
from pymongo.errors import ServerSelectionTimeoutError
from mockerena import __author__, __email__, __version__
from mockerena.errors import ERROR_404, ERROR_422
from mockerena.format import format_output
from mockerena.generate import fake, generate_data, make_safe
from mockerena.models.schema import CUSTOM_SCHEMA
from mockerena.settings import DEBUG, DEFAULT_FILE_FORMAT, DEFAULT_INCLUDE_HEAD, DEFAULT_SIZE, \
DEFAULT_QUOTE_CHARACTER, DEFAULT_EXCLUDE_NULL, DEFAULT_DELIMITER, DEFAULT_KEY_SEPARATOR, \
DEFAULT_IS_NESTED, DEFAULT_RESPONSES, ENV, HOST, PORT, SECRET_KEY
from mockerena.swagger import TEMPLATE
app = Eve(__name__, settings=os.path.join(os.path.dirname(os.path.abspath(__file__)), 'settings.py'))
envdump = EnvironmentDump(include_python=False, include_process=False)
health = HealthCheck()
swagger = Swagger(app, template=TEMPLATE)
app.config.update(ENV=ENV, DEBUG=DEBUG, SECRET_KEY=SECRET_KEY)
def application_data() -> dict:
"""Returns information about the application
:return: A map of application information
:rtype: dict
"""
return {
"version": __version__,
"maintainer": __author__,
"maintainer_email": __email__,
"git_repo": "https://github.com/FanThreeSixty/mockerena"
}
def application_settings() -> dict:
"""Returns application settings
:return: A map of application settings
:rtype: dict
"""
return {
"DEFAULT_FILE_FORMAT": DEFAULT_FILE_FORMAT,
"DEFAULT_INCLUDE_HEAD": DEFAULT_INCLUDE_HEAD,
"DEFAULT_SIZE": DEFAULT_SIZE,
"DEFAULT_QUOTE_CHARACTER": DEFAULT_QUOTE_CHARACTER,
"DEFAULT_EXCLUDE_NULL": DEFAULT_EXCLUDE_NULL,
"DEFAULT_DELIMITER": DEFAULT_DELIMITER,
"DEFAULT_KEY_SEPARATOR": DEFAULT_KEY_SEPARATOR,
"DEFAULT_IS_NESTED": DEFAULT_IS_NESTED,
"DEFAULT_RESPONSES": DEFAULT_RESPONSES
}
def mongo_available() -> tuple:
"""Return status of mongo connection
:return: Tuple with boolean and text status
:rtype: tuple
"""
try:
app.data.driver.db.client.server_info()
return True, "mongo up"
except ServerSelectionTimeoutError:
return False, "mongo down"
def get_provider_types() -> dict:
"""Returns all available generator types
:return: Mapping of all generator types
:rtype: dict
"""
def is_generator(method) -> bool:
return inspect.ismethod(method) and issubclass(type(method.__self__), BaseProvider)
return {
gen[0]: {
'module': re.sub(
r'((?:(?:faker|mockerena)\.providers\.?)|\.?en_US)', '', inspect.getmodule(gen[1]).__name__
),
'method': gen[0],
'display': gen[0].replace('_', ' ').title(),
'doc': inspect.getdoc(gen[1]),
'examples': make_safe([gen[1]() for _ in range(2)]) if gen[0] != 'binary' else None,
'args': {
str(key): {
"name": str(param.name),
"default": make_safe(param.default) if not param.empty else None
}
for (key, param) in inspect.signature(gen[1]).parameters.items()
}
} for gen in inspect.getmembers(fake, predicate=is_generator)
}
def generate_and_format(schema: dict) -> tuple:
"""Generate and return formatted data
:param dict schema:
:return: A http response
:rtype: tuple
"""
if not isinstance(schema, dict):
error = {
"_status": "ERR",
"_issues": {
"validation exception": f"'{str(schema)}' is not a document, must be a dict"
},
"_error": ERROR_422
}
return json.dumps(error), 422, {'Content-Type': 'application/json'}
num_rows = request.args.get('num_rows', schema.get('num_rows', DEFAULT_SIZE))
size = int(num_rows if str(num_rows).isnumeric() else DEFAULT_SIZE)
try:
return format_output(generate_data(schema, size), schema, size)
except (AttributeError, SyntaxError, TypeError, ValueError, ZeroDivisionError) as err:
abort(400, description=str(err))
@app.before_request
def seed():
"""Seed Faker random generator
"""
fake.seed(request.args.get('seed'))
@app.route("/")
def index() -> tuple:
"""Test route to make sure everything is running
:return: A http response
:rtype: tuple
"""
return render_template('index.html')
@swag_from('swagger/generate.yml')
@app.route("/api/schema/<schema_id>/generate")
def generate(schema_id: str) -> tuple:
"""Generates sample data from a schema
:param str schema_id: Schema id
:return: A http response
:rtype: tuple
"""
search = [{'schema': schema_id}]
if ObjectId.is_valid(schema_id):
search.append({'_id': ObjectId(schema_id)})
schema = app.data.driver.db['schema'].find_one({"$or": search})
if not schema:
return json.dumps({"_status": "ERR", "_error": ERROR_404}), 404, {'Content-Type': 'application/json'}
return generate_and_format(schema)
@swag_from('swagger/custom_schema.yml')
@app.route("/api/schema/generate", methods=['POST'])
def custom_schema() -> tuple:
"""Generates sample data for the provided schema
:return: A http response
:rtype: tuple
"""
validator = Validator(CUSTOM_SCHEMA)
data = request.get_json()
if not isinstance(data, dict) or not validator.validate(data):
data_error = {"validation exception": f"'{str(data)}' is not a document, must be a dict"}
error = {
"_status": "ERR",
"_issues": data_error if not isinstance(data, dict) else validator.errors,
"_error": ERROR_422
}
return json.dumps(error), 422, {'Content-Type': 'application/json'}
return generate_and_format(data)
@swag_from('swagger/types.yml')
@app.route("/api/types")
def get_types() -> tuple:
"""Returns all available generator types
:return: A http response
:rtype: tuple
"""
return json.dumps(get_provider_types()), 200, {'Content-Type': 'application/json'}
@app.errorhandler(400)
def bad_request(error: Exception) -> tuple:
"""Handle bad requests
:param Exception error: Exception thrown
:return: A http response
:rtype: tuple
"""
return jsonify(_status="ERR", _error={"code": 400, "message": str(error)}), 400
# Add environment and health check routes
envdump.add_section("application", application_data)
envdump.add_section("settings", application_settings)
health.add_check(mongo_available)
health.add_section("version", __version__)
app.add_url_rule("/healthcheck", "healthcheck", view_func=health.run)
app.add_url_rule("/environment", "environment", view_func=envdump.run)
if __name__ != '__main__': # pragma: no cover
gunicorn_logger = logging.getLogger('gunicorn.error')
app.logger.handlers = gunicorn_logger.handlers
logging.basicConfig(level=gunicorn_logger.level)
if __name__ == "__main__": # pragma: no cover
app.run(host=HOST, debug=DEBUG, port=PORT)
```
#### File: mockerena/tests/conftest.py
```python
from copy import deepcopy
from flask import url_for
from eve import Eve
import pytest
from mockerena.app import app as server
MOCK_SCHEMA = {
"schema": "mock_example",
"num_rows": 10,
"file_format": "csv",
"file_name": "mock_{}_example",
"columns": [
{
"name": "foo",
"type": "random_element",
"args": {
"elements": ["this"]
}
},
{
"name": "bar",
"type": "random_element",
"args": {
"elements": ["that"]
}
}
]
}
@pytest.fixture(scope="session")
def app() -> Eve:
"""Returns mockerena app instance as a test fixture
:return: An Eve application
:rtype: Eve
"""
return server
@pytest.fixture()
def sample_schema() -> dict:
"""Returns sample schema for mockerena
:return: An example schema
:rtype: dict
"""
return deepcopy(MOCK_SCHEMA)
@pytest.fixture(autouse=True)
def setup_data(client):
"""Setup example schema for testing
:param Flask client: Mockerena app instance
"""
data = deepcopy(MOCK_SCHEMA)
# Setup
if not client.get(url_for('schema|item_lookup', _id='mock_example')).status_code == 200:
client.post(url_for('schema|resource'), json=data, headers={'Content-Type': "application/json"})
yield
# Teardown
if client.get(url_for('schema|item_lookup', _id='mock_example')).status_code == 200:
client.delete(url_for('schema|item_lookup', _id='mock_example'))
```
#### File: mockerena/tests/test_environment.py
```python
from flask import url_for
from eve import Eve
import pytest
@pytest.mark.environment
def test_environment(client: Eve):
"""Test environment route
:param Eve client: Mockerena app instance
:raises: AssertionError
"""
res = client.get(url_for('environment'))
assert res.status_code == 200
assert res.mimetype == 'application/json'
data = res.json
assert data['os']
assert data['application']
assert data['settings']
```
#### File: mockerena/tests/test_example.py
```python
from flask import url_for
from eve import Eve
import pytest
@pytest.mark.example
def test_example(client: Eve):
"""Example test for reference
:param Eve client: Mockerena app instance
:raises: AssertionError
"""
res = client.get(url_for('generate', schema_id='mock_example'))
assert res.status_code == 200
assert res.mimetype == 'text/csv'
```
#### File: mockerena/tests/test_get_types.py
```python
from flask import url_for
from eve import Eve
import pytest
@pytest.mark.params
@pytest.mark.get_types
def test_get_types(client: Eve):
"""Test to ensure that types can be retrieved
:param Eve client: Mockerena app instance
:raises: AssertionError
"""
res = client.get(url_for('get_types'))
assert res.status_code == 200
assert res.mimetype == 'application/json'
```
#### File: mockerena/tests/test_index.py
```python
from flask import url_for
from eve import Eve
import pytest
@pytest.mark.index
def test_index(client: Eve):
"""To to ensure index page successfully returns
:param Eve client: Mockerena app instance
:raises: AssertionError
"""
res = client.get(url_for('index'))
assert res.status_code == 200
```
#### File: mockerena/tests/test_params.py
```python
from eve import Eve
from flask import url_for
import pytest
from mockerena.settings import DEFAULT_SIZE
@pytest.mark.params
@pytest.mark.num_rows
def test_num_rows(client: Eve):
"""Test to ensure number of rows can be overridden
:param Eve client: Mockerena app instance
:raises: AssertionError
"""
res = client.get(url_for('generate', schema_id='mock_example'), query_string={'num_rows': 50})
assert res.status_code == 200
assert res.mimetype == 'text/csv'
assert res.get_data().decode('utf-8').count('\n') == 51 # Includes header (+1)
@pytest.mark.params
@pytest.mark.num_rows
def test_negative_num_rows(client: Eve):
"""Test to ensure negative num_rows defaults to DEFAULT_SIZE
:param Eve client: Mockerena app instance
:raises: AssertionError
"""
res = client.get(url_for('generate', schema_id='mock_example'), query_string={'num_rows': -10})
assert res.status_code == 200
assert res.mimetype == 'text/csv'
assert res.get_data().decode('utf-8').count('\n') == DEFAULT_SIZE + 1 # Includes header (+1)
@pytest.mark.params
@pytest.mark.num_rows
def test_invalid_num_rows(client: Eve):
"""Test to ensure invalid num_rows defaults to DEFAULT_SIZE
:param Eve client: Mockerena app instance
:raises: AssertionError
"""
res = client.get(url_for('generate', schema_id='mock_example'), query_string={'num_rows': 'a'})
assert res.status_code == 200
assert res.mimetype == 'text/csv'
assert res.get_data().decode('utf-8').count('\n') == DEFAULT_SIZE + 1 # Includes header (+1)
@pytest.mark.params
@pytest.mark.include_header
@pytest.mark.parametrize('value,count', (
(True, 11),
('true', 11),
('t', 11),
('yes', 11),
('y', 11),
('1', 11),
(False, 10),
('false', 10),
('f', 10),
('no', 10),
('n', 10),
('0', 10)
))
def test_include_header(client: Eve, value: str, count: int):
"""Test to ensure include header can be overridden
:param Eve client: Mockerena app instance
:param str value: Include header value
:param int count: Row count
:raises: AssertionError
"""
url = url_for('generate', schema_id='mock_example')
res = client.get(url, query_string={'include_header': value})
assert res.status_code == 200
assert res.mimetype == 'text/csv'
assert res.get_data().decode('utf-8').count('\n') == count
@pytest.mark.params
@pytest.mark.include_header
def test_invalid_include_header(client: Eve):
"""Test to ensure include_header defaults to false
:param Eve client: Mockerena app instance
:raises: AssertionError
"""
url = url_for('generate', schema_id='mock_example')
res = client.get(url, query_string={'include_header': 'foo'}) # Omits header
assert res.status_code == 200
assert res.mimetype == 'text/csv'
assert res.get_data().decode('utf-8').count('\n') == 10
@pytest.mark.params
@pytest.mark.file_format
def test_file_format(client: Eve):
"""Test to ensure file format can be overridden
:param Eve client: Mockerena app instance
:raises: AssertionError
"""
url = url_for('generate', schema_id='mock_example')
res = client.get(url, query_string={'file_format': 'json'})
assert res.status_code == 200
assert res.mimetype == 'application/json'
assert res.json
@pytest.mark.params
@pytest.mark.file_format
def test_invalid_file_format(client: Eve):
"""Test to ensure invalid file format return an error response
:param Eve client: Mockerena app instance
:raises: AssertionError
"""
url = url_for('generate', schema_id='mock_example')
res = client.get(url, query_string={'file_format': 'foo'})
assert res.status_code == 422
@pytest.mark.params
@pytest.mark.exclude_null
@pytest.mark.parametrize('value,is_removed', (
(True, True),
('true', True),
('t', True),
('yes', True),
('y', True),
('1', True),
(False, False),
('false', False),
('f', False),
('no', False),
('n', False),
('0', False)
))
def test_exclude_null(client: Eve, sample_schema: dict, value: str, is_removed: bool):
"""Test to ensure exclude_null can be overridden
:param Eve client: Mockerena app instance
:param dict sample_schema: Sample schema data
:param str value: Include header value
:param bool is_removed: Row count
:raises: AssertionError
"""
sample_schema["file_format"] = "json"
sample_schema['columns'][0]['percent_empty'] = 1
res = client.post(url_for('custom_schema'), json=sample_schema, query_string={'exclude_null': value})
assert res.status_code == 200
assert res.mimetype == 'application/json'
assert 'foo' not in res.json[0] if is_removed else 'foo' in res.json[0]
@pytest.mark.params
@pytest.mark.include_header
def test_invalid_exclude_null(client: Eve, sample_schema: dict):
"""Test to ensure exclude_null defaults to false
:param Eve client: Mockerena app instance
:param dict sample_schema: Sample schema data
:raises: AssertionError
"""
sample_schema["file_format"] = "json"
sample_schema['columns'][0]['percent_empty'] = 1
res = client.post(url_for('custom_schema'), json=sample_schema, query_string={'exclude_null': 'foo'})
assert res.status_code == 200
assert res.mimetype == 'application/json'
assert 'foo' in res.json[0]
```
#### File: mockerena/tests/test_responses.py
```python
import datetime
from eve import Eve
from flask import url_for
import pytest
@pytest.mark.responses
def test_response_override_status_code(client: Eve, sample_schema: dict):
"""Test to ensure status code can be overridden on a response
:param Eve client: Mockerena app instance
:param dict sample_schema: Sample schema data
:raises: AssertionError
"""
sample_schema["file_format"] = "json"
sample_schema["responses"] = [{"status_code": 201}]
res = client.post(url_for('custom_schema'), json=sample_schema, headers={'Content-Type': "application/json"})
assert res.status_code == 201
@pytest.mark.responses
def test_response_override_data(client: Eve, sample_schema: dict):
"""Test to ensure data can be overridden on a response
:param Eve client: Mockerena app instance
:param dict sample_schema: Sample schema data
:raises: AssertionError
"""
sample_schema["file_format"] = "json"
sample_schema["responses"] = [{"data": '{"status": "ok"}', "content_type": "application/json"}]
res = client.post(url_for('custom_schema'), json=sample_schema, headers={'Content-Type': "application/json"})
assert res.status_code == 200
assert res.mimetype == 'application/json'
assert res.json == {"status": "ok"}
@pytest.mark.responses
def test_response_override_content_type(client: Eve, sample_schema: dict):
"""Test to ensure content type can be overridden on a response
:param Eve client: Mockerena app instance
:param dict sample_schema: Sample schema data
:raises: AssertionError
"""
sample_schema["file_format"] = "json"
sample_schema["responses"] = [{"content_type": "application/xml"}]
res = client.post(url_for('custom_schema'), json=sample_schema, headers={'Content-Type': "application/json"})
assert res.status_code == 200
assert res.mimetype == 'application/xml'
@pytest.mark.responses
def test_response_custom_header(client: Eve, sample_schema: dict):
"""Test to ensure headers can be overridden on a response
:param Eve client: Mockerena app instance
:param dict sample_schema: Sample schema data
:raises: AssertionError
"""
sample_schema["responses"] = [{"headers": {"Last-Modified": "Thur, 19 Sep 2019 19:25:10 GMT"}}]
res = client.post(url_for('custom_schema'), json=sample_schema, headers={'Content-Type': "application/json"})
assert res.status_code == 200
assert res.last_modified == datetime.datetime(2019, 9, 19, 19, 25, 10)
``` |
{
"source": "jlane9/pytest-statsd",
"score": 2
} |
#### File: pytest-statsd/pytest_statsd/plugin.py
```python
from __future__ import absolute_import
import time
import statsd
def pytest_addoption(parser):
"""
:param parser:
:return:
"""
group = parser.getgroup('terminal reporting')
group.addoption('--stats-d', action='store_true',
help='send test results to graphite')
group.addoption('--stats-host', action='store', dest='stats_host',
metavar='host', default='localhost',
help='statsd host. default is \'localhost\'')
group.addoption('--stats-port', action='store', dest='stats_port',
metavar='port', default=8125,
help='statsd port. default is 8125')
group.addoption('--stats-prefix', action='store', dest='stats_prefix',
metavar='prefix', default=None,
help='prefix to give all stats')
def pytest_configure(config):
"""
:param config:
:return:
"""
stats_d = config.getoption('stats_d')
# prevent opening statsd on slave nodes (xdist)
if stats_d and not hasattr(config, 'slaveinput'):
config._graphite = GraphiteReport(config)
config.pluginmanager.register(config._graphite)
def pytest_unconfigure(config):
"""
:param config:
:return:
"""
graphite = getattr(config, '_graphite', None)
if graphite:
del config._graphite
config.pluginmanager.unregister(graphite)
class GraphiteReport(object):
"""Graphite report implementation
"""
def __init__(self, config):
self.errors = self.failed = 0
self.passed = self.skipped = 0
self.xfailed = self.xpassed = 0
self.total = 0
self.config = config
self.host = config.getoption('stats_host')
self.port = config.getoption('stats_port')
self.prefix = config.getoption('stats_prefix')
self.suite_start_time = 0
def pytest_runtest_logreport(self, report):
"""Add report metrics
:param pytest.Report report: Test case report
:return:
"""
if report.passed:
self.append_passed(report)
elif report.failed:
self.append_failed(report)
elif report.skipped:
self.append_skipped(report)
def append_passed(self, report):
"""Add passed test metric
:param pytest.Report report: Test case report
:return:
"""
if report.when == 'call':
if hasattr(report, "wasxfail"):
self.xpassed += 1
else:
self.passed += 1
def append_failed(self, report):
"""Add failed test metric
:param pytest.Report report: Test case report
:return:
"""
if report.when == "call":
if hasattr(report, "wasxfail"):
self.xpassed += 1
else:
self.failed += 1
else:
self.errors += 1
def append_skipped(self, report):
"""Add skipped test metric
:param pytest.Report report: Test case report
:return:
"""
if hasattr(report, "wasxfail"):
self.xfailed += 1
else:
self.skipped += 1
def pytest_sessionstart(self, session):
"""before test run begins
:param pytest.Session session:
:return:
"""
self.suite_start_time = time.time()
def pytest_sessionfinish(self, session):
"""whole test run finishes
:param pytest.Session session:
:return:
"""
stats = statsd.StatsClient(self.host, self.port, prefix=self.prefix)
stats.gauge('passed', self.passed)
stats.gauge('skipped', self.skipped)
stats.gauge('failed', self.failed)
stats.gauge('errors', self.errors)
stats.gauge('xfailed', self.xfailed)
stats.gauge('xpassed', self.xpassed)
stats.gauge('total', sum([self.passed, self.skipped, self.failed,
self.errors, self.xfailed, self.xpassed]))
stats.gauge('aggregate_runs', 1, delta=True)
if sum([self.errors, self.failed]) == 0:
stats.gauge('aggregate_passing', 1, delta=True)
else:
stats.gauge('aggregate_failing', 1, delta=True)
duration = int((time.time() - self.suite_start_time) * 1000)
stats.timing('duration', duration)
def pytest_terminal_summary(self, terminalreporter):
"""add additional section in terminal summary reporting
:param terminalreporter:
:return:
"""
terminalreporter.write_sep('-', 'sent results to http://{}:{}'.format(self.host, self.port))
``` |
{
"source": "jlane9/selenium-data-attributes",
"score": 2
} |
#### File: jlane9/selenium-data-attributes/fabfile.py
```python
from contextlib import contextmanager as _contextmanager
import os
from fabric.api import local, prefix, task
from fabric.context_managers import lcd
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
@_contextmanager
def virtualenv(bin_folder):
"""Activate virtual environment
:param basestring bin_folder: Location of virtualenv /bin folder
:return:
"""
with prefix('source {}/bin/activate'.format(bin_folder)):
yield
def get_virtual_env():
"""Return location of virtual environment
:return:
"""
for root, dir_names, file_names in os.walk(BASE_DIR):
if all([i in dir_names for i in ['bin', 'include', 'lib']]):
return os.path.join(BASE_DIR, root)
def get_os():
"""Return System platform
:return:
"""
return local('python -c "import platform; print platform.system()"', capture=True)
@task()
def update_requirements():
"""Update project python requirements
:return:
"""
virtual_env = get_virtual_env()
if virtual_env:
with virtualenv(virtual_env):
with lcd(BASE_DIR):
local('pip install -r requirements.txt')
print 'Requirements installed'
else:
print 'Virtual environment needs to be installed first'
print 'Please run `fab install_virtualenv` first'
def run_setup(command):
"""Run setup.py file
:return:
"""
virtual_env = get_virtual_env()
if virtual_env:
with virtualenv(virtual_env):
with lcd(BASE_DIR):
local('python setup.py {}'.format(str(command)))
else:
print 'Virtual environment needs to be installed first'
print 'Please run `fab install_virtualenv` first'
@task()
def install():
"""Install project
:return:
"""
run_setup('install')
@task()
def develop():
"""Install project (develop mode)
:return:
"""
run_setup('develop')
@task()
def install_virtualenv():
"""Install python virtual environment
:return:
"""
virtual_env = get_virtual_env()
if not virtual_env:
with lcd(BASE_DIR):
local('virtualenv -p /usr/bin/python2.7 venv')
print "Virtual environment installed..."
print "Usage: source {}/venv/bin/activate".format(BASE_DIR)
else:
print "Virtual environment already installed..."
print "Usage: source {}/bin/activate".format(virtual_env)
@task()
def publish(version=None):
"""Publish project to PyPi
:param basestring version: Version number
:return:
"""
if isinstance(version, str):
with lcd(BASE_DIR):
local("sed -i '' -E -- \"s/__version__ = '.*'/__version__ = '{}'/g\" sda/__init__.py".format(version))
local('git add sda/__init__.py')
local('git commit -m "PyPi release {}"'.format(version))
local('git tag {0} -m "PyPi release {0}"'.format(version))
local('git push --tags origin master')
release = local('python setup.py sdist bdist_wheel upload -r "https://pypi.python.org/pypi"', capture=True)
if '200' not in release:
print "{} release failed...".format(version)
else:
print "{} released...".format(version)
else:
print 'Please include a version'
```
#### File: selenium-data-attributes/sda/site.py
```python
from sda.element import SeleniumObject
try:
from urlparse import urljoin, urlparse
except (ImportError, ModuleNotFoundError):
from urllib.parse import urljoin, urlparse
__all__ = ['Site']
class Site(SeleniumObject):
"""The Site Implementation
The intention for the Site object is to contain all website pages. An example usage of this might be:
Let's say we have the following file structure
my_project
- __init__.py
- main.py
- page_1
- __init__.py
- fixtures.py
- locators.py
- page.py
- page_2
- __init__.py
- fixtures.py
- locators.py
- page.py
- site
- __init__.py
- site.py
- settings.py
site/site.py
.. code-block:: python
from sda.site import Site
from page_1.page import Page1
from page_2.page import Page2
class ExampleSite(Site):
def __init__(self, web_driver):
super(ExampleSite, self).__init__(web_driver)
self.page_1 = Page1(web_driver)
self.page_2 = Page2(web_driver)
"""
@property
def domain(self):
"""Returns the domain for a website
:return: domain
:rtype: str
"""
return urlparse(self.url).netloc
@property
def path(self):
"""Returns the website path
:return: path
:rtype: str
"""
return urlparse(self.url).path
@property
def url(self):
"""Current page URL
:return: Page URL
:rtype: str
"""
return self.driver.current_url
``` |
{
"source": "jlaneve/astro",
"score": 2
} |
#### File: astro/example_dags/example_amazon_s3_postgres_load_and_save.py
```python
from airflow.decorators import dag
from airflow.utils import timezone
# Import Operator
import astro.sql as aql
from astro.sql.table import Table
default_args = {
"owner": "airflow",
"retries": 1,
"retry_delay": 0,
}
@dag(
default_args=default_args,
schedule_interval=None,
start_date=timezone.utcnow(),
tags=["demo"],
)
def example_amazon_s3_postgres_load_and_save():
t1 = aql.load_file(
path="s3://tmp9/homes.csv",
file_conn_id="",
output_table=Table(
"expected_table_from_s3", conn_id="postgres_conn", database="postgres"
),
)
aql.save_file(
input=t1,
output_file_path="s3://tmp9/homes.csv",
overwrite=True,
)
example_amazon_s3_postgres_load_and_save_dag = (
example_amazon_s3_postgres_load_and_save()
)
```
#### File: sql/operators/agnostic_sql_append.py
```python
import importlib
from typing import Dict, List
from sqlalchemy import MetaData, cast, column, insert, select
from sqlalchemy.sql.schema import Table as SqlaTable
from astro.constants import Database
from astro.sql.operators.sql_decorator import SqlDecoratedOperator
from astro.sql.table import Table
from astro.utils.database import get_database_name
from astro.utils.schema_util import (
get_error_string_for_multiple_dbs,
tables_from_same_db,
)
from astro.utils.table_handler import TableHandler
from astro.utils.task_id_helper import get_unique_task_id
class SqlAppendOperator(SqlDecoratedOperator, TableHandler):
template_fields = ("main_table", "append_table")
def __init__(
self,
append_table: Table,
main_table: Table,
columns: List[str] = [],
casted_columns: dict = {},
**kwargs,
):
self.append_table = append_table
self.main_table = main_table
self.sql = ""
self.columns = columns
self.casted_columns = casted_columns
task_id = get_unique_task_id("append_table")
def null_function():
pass
super().__init__(
raw_sql=True,
parameters={},
task_id=kwargs.get("task_id") or task_id,
op_args=(),
python_callable=null_function,
handler=lambda x: None,
**kwargs,
)
def execute(self, context: Dict):
if not tables_from_same_db([self.append_table, self.main_table]):
raise ValueError(
get_error_string_for_multiple_dbs([self.append_table, self.main_table])
)
self.main_table.conn_id = self.main_table.conn_id or self.append_table.conn_id
self.conn_id = self.main_table.conn_id or self.append_table.conn_id
self.database = self.main_table.database or self.append_table.database
self.warehouse = self.main_table.warehouse or self.append_table.warehouse
self.schema = self.main_table.schema or self.append_table.schema
self.sql = self.append(
main_table=self.main_table,
append_table=self.append_table,
columns=self.columns,
casted_columns=self.casted_columns,
conn_id=self.main_table.conn_id,
)
super().execute(context)
return self.main_table
def append(
self, main_table: Table, columns, casted_columns, append_table: Table, conn_id
):
engine = self.get_sql_alchemy_engine()
if self.schema and get_database_name(engine) != Database.SQLITE:
metadata = MetaData(schema=self.schema)
else:
metadata = MetaData()
# TO Do - fix bigquery and postgres reflection table issue.
main_table_sqla = SqlaTable(
main_table.table_name, metadata, autoload_with=engine
)
append_table_sqla = SqlaTable(
append_table.table_name, metadata, autoload_with=engine
)
column_names = [column(c) for c in columns]
sqlalchemy = importlib.import_module("sqlalchemy")
casted_fields = [
cast(column(k), getattr(sqlalchemy, v)) for k, v in casted_columns.items()
]
main_columns = [k for k, v in casted_columns.items()]
main_columns.extend(list(columns))
if len(column_names) + len(casted_fields) == 0:
column_names = [column(c) for c in append_table_sqla.c.keys()]
main_columns = column_names
column_names.extend(casted_fields)
sel = select(column_names).select_from(append_table_sqla)
return insert(main_table_sqla).from_select(main_columns, sel)
```
#### File: sql/operators/sql_dataframe.py
```python
import inspect
from typing import Dict, Optional
import pandas as pd
from airflow.decorators.base import DecoratedOperator
from airflow.providers.sqlite.hooks.sqlite import SqliteHook
from astro.constants import Database
from astro.settings import SCHEMA
from astro.sql.table import Table, TempTable, create_table_name
from astro.utils import get_hook
from astro.utils.database import get_database_from_conn_id
from astro.utils.dependencies import (
BigQueryHook,
PostgresHook,
SnowflakeHook,
postgres_sql,
)
from astro.utils.load import load_dataframe_into_sql_table
from astro.utils.table_handler import TableHandler
class SqlDataframeOperator(DecoratedOperator, TableHandler):
def __init__(
self,
conn_id: Optional[str] = None,
database: Optional[str] = None,
schema: Optional[str] = None,
warehouse: Optional[str] = None,
role: Optional[str] = None,
identifiers_as_lower: Optional[bool] = True,
**kwargs,
):
"""
Converts a SQL table into a dataframe. Users can then give a python function that takes a dataframe as
one of its inputs and run that python function. Once that function has completed, the result is accessible
via the Taskflow API.
:param conn_id: Connection to the DB that you will pull the table from
:param database: Database for input table
:param schema: schema for input table
:param warehouse: (Snowflake) Which warehouse to use for the input table
:param kwargs:
"""
self.conn_id = conn_id
self.database = database
self.schema = schema
self.warehouse = warehouse
self.role = role
self.parameters = None
self.kwargs = kwargs or {}
self.op_kwargs: Dict = self.kwargs.get("op_kwargs") or {}
if self.op_kwargs.get("output_table"):
self.output_table: Optional[Table] = self.op_kwargs.pop("output_table")
else:
self.output_table = None
self.op_args = self.kwargs.get("op_args") # type: ignore
self.identifiers_as_lower = identifiers_as_lower
super().__init__(
**kwargs,
)
def handle_op_args(self):
full_spec = inspect.getfullargspec(self.python_callable)
op_args = list(self.op_args)
ret_args = []
for arg in op_args:
current_arg = full_spec.args.pop(0)
if (
full_spec.annotations[current_arg] == pd.DataFrame
and type(arg) == Table
):
ret_args.append(self._get_dataframe(arg))
else:
ret_args.append(arg)
self.op_args = tuple(ret_args)
def handle_op_kwargs(self):
param_types = inspect.signature(self.python_callable).parameters
self.op_kwargs = {
k: self._get_dataframe(v)
if param_types.get(k).annotation == pd.DataFrame and type(v) == Table
else v
for k, v in self.op_kwargs.items()
}
def execute(self, context: Dict):
self._set_variables_from_first_table()
self.handle_op_args()
self.handle_op_kwargs()
pandas_dataframe = self.python_callable(*self.op_args, **self.op_kwargs)
if self.output_table:
self.populate_output_table()
if type(self.output_table) == TempTable:
self.output_table = self.output_table.to_table(
table_name=create_table_name(context=context), schema=SCHEMA
)
self.output_table.schema = self.output_table.schema or SCHEMA
hook = get_hook(
conn_id=self.output_table.conn_id,
database=self.output_table.database,
schema=self.output_table.schema,
warehouse=self.output_table.warehouse,
)
load_dataframe_into_sql_table(pandas_dataframe, self.output_table, hook)
return self.output_table
else:
return pandas_dataframe
def get_snow_hook(self, table: Table) -> SnowflakeHook:
"""
Create and return SnowflakeHook.
:return: a SnowflakeHook instance.
:rtype: SnowflakeHook
"""
return SnowflakeHook(
snowflake_conn_id=table.conn_id,
warehouse=table.warehouse,
database=table.database,
role=self.role,
schema=table.schema,
authenticator=None,
session_parameters=None,
)
def _get_dataframe(self, table: Table):
database = get_database_from_conn_id(table.conn_id)
self.log.info(f"Getting dataframe for {table}")
if database in (Database.POSTGRES, Database.POSTGRESQL):
self.hook = PostgresHook(
postgres_conn_id=table.conn_id, schema=table.database
)
schema = table.schema or SCHEMA
query = (
postgres_sql.SQL("SELECT * FROM {schema}.{input_table}")
.format(
schema=postgres_sql.Identifier(schema),
input_table=postgres_sql.Identifier(table.table_name),
)
.as_string(self.hook.get_conn())
)
df = self.hook.get_pandas_df(query)
elif database == Database.SNOWFLAKE:
hook = self.get_snow_hook(table)
df = hook.get_pandas_df(
"SELECT * FROM IDENTIFIER(%(input_table)s)",
parameters={"input_table": table.table_name},
)
elif database == Database.SQLITE:
hook = SqliteHook(sqlite_conn_id=table.conn_id, database=table.database)
engine = hook.get_sqlalchemy_engine()
df = pd.read_sql_table(table.table_name, engine)
elif database == Database.BIGQUERY:
hook = BigQueryHook(gcp_conn_id=table.conn_id)
engine = hook.get_sqlalchemy_engine()
df = pd.read_sql_table(table.qualified_name(), engine)
if self.identifiers_as_lower:
df.columns = [col_label.lower() for col_label in df.columns]
return df
```
#### File: astro/utils/database.py
```python
from typing import Union
from airflow.hooks.base import BaseHook
from airflow.providers.sqlite.hooks.sqlite import SqliteHook
from sqlalchemy import create_engine, text
from sqlalchemy.engine import Engine
from sqlalchemy.engine.result import ResultProxy
from astro.constants import Database
from astro.utils.dependencies import BigQueryHook, PostgresHook, SnowflakeHook
def get_database_from_conn_id(conn_id: str) -> Database:
"""
Given a conn_id, identify the database name.
:param conn_id: Airflow connection ID
:type conn_id: str
:return: the database this interface relates to (e.g. Database.SQLITE)
:rtype: astro.constants.Database enum item
"""
conn_type = BaseHook.get_connection(conn_id).conn_type
type_to_db = {
"postgres": Database.POSTGRES,
"postgresql": Database.POSTGRES,
"sqlite": Database.SQLITE,
"bigquery": Database.BIGQUERY,
"snowflake": Database.SNOWFLAKE,
"google_cloud_platform": Database.BIGQUERY,
}
try:
database_name = type_to_db[conn_type]
except KeyError:
raise ValueError(f"Unsupported database <{conn_type}>")
return database_name
def get_database_name(interface: Union[Engine, BaseHook, SqliteHook]) -> Database:
"""
Given a hook or a SQL engine, identify the database name.
:param interface: interface to the database
:type interface: SQLAlchemy engine or Airflow Hook (BigQueryHook, PostgresHook, SnowflakeHook, SqliteHook)
:return: the database this interface relates to (e.g. Database.SQLITE)
:rtype: astro.constants.Database enum item
"""
if isinstance(interface, BaseHook):
hook_to_database = {
BigQueryHook: Database.BIGQUERY,
PostgresHook: Database.POSTGRES,
SnowflakeHook: Database.SNOWFLAKE,
SqliteHook: Database.SQLITE,
}
try:
database_name = hook_to_database[type(interface)]
except KeyError:
raise ValueError(f"Unsupported database {type(interface)}")
else: # SqlAlchemy engine
database_name = getattr(Database, interface.name.upper())
return database_name
def get_sqlalchemy_engine(hook: Union[BaseHook, SqliteHook]) -> Engine:
"""
Given a hook, return a SQLAlchemy engine for the target database.
:param hook: Airflow Hook used to access a SQL-like database
:type hook: (BigQueryHook, PostgresHook, SnowflakeHook, SqliteHook)
:return: SQLAlchemy engine
:rtype: sqlalchemy.Engine
"""
database = get_database_name(hook)
engine = None
if database == Database.SQLITE:
uri = hook.get_uri()
if "////" not in uri:
uri = hook.get_uri().replace("///", "////")
engine = create_engine(uri)
if engine is None:
engine = hook.get_sqlalchemy_engine()
return engine
def run_sql(
engine: Engine,
sql_statement: Union[str, text],
parameters: Union[None, dict] = None,
) -> ResultProxy:
"""
Run a SQL statement using the given engine.
:param engine: SQLAlchemy engine
:type engine: sqlalchemy.Engine
:param sql_statement: SQL statement to be run on the engine
:type sql_statement: (sqlalchemy.text or str)
:param parameters: (optional) Parameters to be passed to the SQL statement
:type parameters: dict
:return: Result of running the statement
:rtype: sqlalchemy.engine.result.ResultProxy
"""
if parameters is None:
parameters = {}
connection = engine.connect()
if isinstance(sql_statement, str):
return connection.execute(text(sql_statement), parameters)
else:
return connection.execute(sql_statement, parameters)
```
#### File: astro/utils/file.py
```python
import os
import pathlib
from typing import Union
from astro.constants import LOAD_DATAFRAME_BYTES_LIMIT, FileType
def get_size(filepath: str) -> int:
"""
Return the size (bytes) of the given file.
:param filepath: Path to a file in the filesystem
:type filepath: str
:return: File size in bytes
:rtype: int
"""
path = pathlib.Path(filepath)
return os.path.getsize(path)
def get_filetype(filepath: Union[str, pathlib.PosixPath]) -> FileType:
"""
Return a FileType given the filepath. Uses a naive strategy, using the file extension.
:param filepath: URI or Path to a file
:type filepath: str or pathlib.PosixPath
:return: The filetype (e.g. csv, ndjson, json, parquet)
:rtype: astro.constants.FileType
"""
if isinstance(filepath, pathlib.PosixPath):
extension = filepath.suffix[1:]
else:
extension = filepath.split(".")[-1]
try:
filetype = getattr(FileType, extension.upper())
except AttributeError:
raise ValueError(f"Unsupported filetype '{extension}' from file '{filepath}'.")
return filetype
def is_binary(filetype: FileType) -> bool:
"""
Return a FileType given the filepath. Uses a naive strategy, using the file extension.
:param filetype: File type
:type filetype: astro.constants.FileType
:return: True or False
:rtype: bool
"""
if filetype == FileType.PARQUET:
return True
return False
def is_small(filepath: str) -> bool:
"""
Checks if a file is small enough to be loaded into a Pandas dataframe in memory efficiently.
This value was obtained through performance tests.
:param filepath: Path to a file in the filesystem
:type filepath: str
:return: If the file is small enough
:rtype: boolean
"""
size_in_bytes = get_size(filepath)
return size_in_bytes <= LOAD_DATAFRAME_BYTES_LIMIT
```
#### File: tests/benchmark/analyse.py
```python
import argparse
import json
import sys
import pandas as pd
SUMMARY_FIELDS = [
"database",
"dataset",
"total_time",
"memory_rss",
"cpu_time_user",
"cpu_time_system",
]
if sys.platform == "linux":
SUMMARY_FIELDS.append("memory_pss")
SUMMARY_FIELDS.append("memory_shared")
def format_bytes(bytes_):
if abs(bytes_) < 1000:
return str(bytes_) + "B"
elif abs(bytes_) < 1e6:
return str(round(bytes_ / 1e3, 2)) + "kB"
elif abs(bytes_) < 1e9:
return str(round(bytes_ / 1e6, 2)) + "MB"
else:
return str(round(bytes_ / 1e9, 2)) + "GB"
def format_time(time):
if time < 1:
return str(round(time * 1000, 2)) + "ms"
if time < 60:
return str(round(time, 2)) + "s"
if time < 3600:
return str(round(time / 60, 2)) + "min"
else:
return str(round(time / 3600, 2)) + "hrs"
def analyse_results(results_filepath):
data = []
with open(results_filepath) as fp:
for line in fp.readlines():
data.append(json.loads(line.strip()))
df = pd.json_normalize(data, sep="_")
# calculate total CPU from process & children
mean_by_dag = df.groupby("dag_id", as_index=False).mean()
# format data
mean_by_dag["database"] = mean_by_dag.dag_id.apply(
lambda text: text.split("into_")[-1]
)
mean_by_dag["dataset"] = mean_by_dag.dag_id.apply(
lambda text: text.split("load_file_")[-1].split("_into")[0]
)
mean_by_dag["memory_rss"] = mean_by_dag.memory_full_info_rss.apply(
lambda value: format_bytes(value)
)
if sys.platform == "linux":
mean_by_dag["memory_pss"] = mean_by_dag.memory_full_info_pss.apply(
lambda value: format_bytes(value)
)
mean_by_dag["memory_shared"] = mean_by_dag.memory_full_info_shared.apply(
lambda value: format_bytes(value)
)
mean_by_dag["total_time"] = mean_by_dag["duration"].apply(
lambda ms_time: format_time(ms_time)
)
mean_by_dag["cpu_time_system"] = (
mean_by_dag["cpu_time_system"] + mean_by_dag["cpu_time_children_system"]
).apply(lambda ms_time: format_time(ms_time))
mean_by_dag["cpu_time_user"] = (
mean_by_dag["cpu_time_user"] + mean_by_dag["cpu_time_children_user"]
).apply(lambda ms_time: format_time(ms_time))
summary = mean_by_dag[SUMMARY_FIELDS]
# print Markdown tables per database
for database_name in summary["database"].unique().tolist():
print(f"\n### Database: {database_name}\n")
print(summary[summary["database"] == database_name].to_markdown(index=False))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Trigger benchmark DAG")
parser.add_argument(
"--results-filepath",
"-r",
type=str,
help="NDJSON containing the results for a benchmark run",
)
args = parser.parse_args()
print(f"Running the analysis on {args.results_filepath}...")
analyse_results(args.results_filepath)
```
#### File: tests/operators/test_agnostic_aggregate_check.py
```python
import logging
import os
import pathlib
import pytest
from airflow.exceptions import BackfillUnfinished
from airflow.utils import timezone
import astro.sql as aql
from astro.constants import SUPPORTED_DATABASES, Database
from astro.settings import SCHEMA
from astro.sql.table import Table
from tests.operators.utils import get_table_name, run_dag
log = logging.getLogger(__name__)
DEFAULT_DATE = timezone.datetime(2016, 1, 1)
CWD = pathlib.Path(__file__).parent
@pytest.fixture(scope="module")
def table(request):
aggregate_table = Table(
"aggregate_check_test",
database="pagila",
conn_id="postgres_conn",
schema="airflow_test_dag",
)
aggregate_table_bigquery = Table(
"aggregate_check_test",
conn_id="bigquery",
schema=SCHEMA,
)
aggregate_table_sqlite = Table("aggregate_check_test", conn_id="sqlite_conn")
aggregate_table_snowflake = Table(
table_name=get_table_name("aggregate_check_test"),
database=os.getenv("SNOWFLAKE_DATABASE"), # type: ignore
schema=os.getenv("SNOWFLAKE_SCHEMA"), # type: ignore
warehouse=os.getenv("SNOWFLAKE_WAREHOUSE"), # type: ignore
conn_id="snowflake_conn",
)
path = str(CWD) + "/../data/homes_merge_1.csv"
tables = {
"postgres": aggregate_table,
"bigquery": aggregate_table_bigquery,
"sqlite": aggregate_table_sqlite,
"snowflake": aggregate_table_snowflake,
}
aql.load_file(
path=path,
output_table=tables[request.param],
).operator.execute({"run_id": "foo"})
yield tables[request.param]
tables[request.param].drop()
@pytest.mark.parametrize("table", SUPPORTED_DATABASES, indirect=True)
def test_range_values(sample_dag, table):
@aql.transform
def get_table(input_table: Table):
return "SELECT * FROM {{input_table}}"
with sample_dag:
aggregate_table = get_table(table)
aql.aggregate_check(
table=aggregate_table,
check="select count(*) FROM {{table}}",
greater_than=4,
less_than=4,
)
run_dag(sample_dag)
@pytest.mark.parametrize("table", SUPPORTED_DATABASES, indirect=True)
def test_out_of_range_value(sample_dag, table):
@aql.transform
def get_table(input_table: Table):
return "SELECT * FROM {{input_table}}"
with pytest.raises(BackfillUnfinished):
with sample_dag:
aggregate_table = get_table(table)
aql.aggregate_check(
table=aggregate_table,
check="select count(*) FROM {{table}}",
greater_than=10,
less_than=20,
)
run_dag(sample_dag)
@pytest.mark.parametrize("table", SUPPORTED_DATABASES, indirect=True)
def test_equal_to_param(sample_dag, table):
@aql.transform
def get_table(input_table: Table):
return "SELECT * FROM {{input_table}}"
with sample_dag:
aggregate_table = get_table(table)
aql.aggregate_check(
table=aggregate_table,
check="select count(*) FROM {{table}}",
equal_to=4,
)
run_dag(sample_dag)
@pytest.mark.parametrize("table", SUPPORTED_DATABASES, indirect=True)
def test_only_less_than_param(sample_dag, table):
@aql.transform
def get_table(input_table: Table):
return "SELECT * FROM {{input_table}}"
with pytest.raises(BackfillUnfinished):
with sample_dag:
aggregate_table = get_table(table)
aql.aggregate_check(
table=aggregate_table,
check="select count(*) FROM {{table}}",
less_than=3,
)
run_dag(sample_dag)
@pytest.mark.parametrize("table", SUPPORTED_DATABASES, indirect=True)
def test_only_greater_than_param(sample_dag, table):
@aql.transform
def get_table(input_table: Table):
return "SELECT * FROM {{input_table}}"
with sample_dag:
aggregate_table = get_table(table)
aql.aggregate_check(
table=aggregate_table,
check="select count(*) FROM {{table}}",
greater_than=3,
)
run_dag(sample_dag)
@pytest.mark.parametrize("table", SUPPORTED_DATABASES, indirect=True)
def test_all_three_params_provided_priority_given_to_equal_to_param(sample_dag, table):
"""greater_than should be less than or equal to less_than"""
@aql.transform
def get_table(input_table: Table):
return "SELECT * FROM {{input_table}}"
with pytest.raises(ValueError):
with sample_dag:
aggregate_table = get_table(table)
aql.aggregate_check(
table=aggregate_table,
check="select count(*) FROM {{table}}",
greater_than=20,
less_than=10,
equal_to=4,
)
run_dag(sample_dag)
@pytest.mark.parametrize("table", [Database.SQLITE])
def test_invalid_params_no_test_values(sample_dag, table):
@aql.transform
def get_table(input_table: Table):
return "SELECT * FROM {{input_table}}"
with pytest.raises(ValueError):
with sample_dag:
aggregate_table = get_table(table)
aql.aggregate_check(
table=aggregate_table, check="select count(*) FROM {{table}}"
)
run_dag(sample_dag)
@pytest.mark.parametrize("table", [Database.SQLITE])
def test_invalid_values(sample_dag, table):
"""greater_than should be less than or equal to less_than"""
@aql.transform
def get_table(input_table: Table):
return "SELECT * FROM {{input_table}}"
with pytest.raises(ValueError):
with sample_dag:
aggregate_table = get_table(table)
aql.aggregate_check(
table=aggregate_table,
check="select count(*) FROM {{table}}",
greater_than=20,
less_than=10,
)
run_dag(sample_dag)
```
#### File: tests/operators/test_agnostic_boolean_check.py
```python
import logging
import os
import pathlib
import pytest
from airflow.exceptions import BackfillUnfinished
from airflow.providers.snowflake.hooks.snowflake import SnowflakeHook
from airflow.utils import timezone
# Import Operator
import astro.sql as aql
from astro.constants import SUPPORTED_DATABASES
from astro.settings import SCHEMA
from astro.sql.operators.agnostic_boolean_check import Check
from astro.sql.table import Table
from tests.operators.utils import get_table_name, run_dag
log = logging.getLogger(__name__)
DEFAULT_DATE = timezone.datetime(2016, 1, 1)
CWD = pathlib.Path(__file__).parent
def drop_table_snowflake(
table_name: str,
conn_id: str = "snowflake_conn",
schema: str = os.environ["SNOWFLAKE_SCHEMA"],
database: str = os.environ["SNOWFLAKE_DATABASE"],
warehouse: str = os.environ["SNOWFLAKE_WAREHOUSE"],
):
hook = SnowflakeHook(
snowflake_conn_id=conn_id,
schema=schema,
database=database,
warehouse=warehouse,
)
snowflake_conn = hook.get_conn()
cursor = snowflake_conn.cursor()
cursor.execute(f"DROP TABLE IF EXISTS {table_name} CASCADE;")
snowflake_conn.commit()
cursor.close()
snowflake_conn.close()
@pytest.fixture(scope="module")
def table(request):
boolean_check_table = Table(
get_table_name("boolean_check_test"),
database="pagila",
conn_id="postgres_conn",
schema="airflow_test_dag",
)
boolean_check_table_bigquery = Table(
get_table_name("boolean_check_test"),
conn_id="bigquery",
schema=SCHEMA,
)
boolean_check_table_sqlite = Table(
get_table_name("boolean_check_test"), conn_id="sqlite_conn"
)
boolean_check_table_snowflake = Table(
table_name=get_table_name("boolean_check_test"),
database=os.getenv("SNOWFLAKE_DATABASE"), # type: ignore
schema=os.getenv("SNOWFLAKE_SCHEMA"), # type: ignore
warehouse=os.getenv("SNOWFLAKE_WAREHOUSE"), # type: ignore
conn_id="snowflake_conn",
)
path = str(CWD) + "/../data/homes_append.csv"
tables = {
"postgres": boolean_check_table,
"bigquery": boolean_check_table_bigquery,
"sqlite": boolean_check_table_sqlite,
"snowflake": boolean_check_table_snowflake,
}
aql.load_file(
path=path,
output_table=tables[request.param],
).operator.execute({"run_id": "foo"})
yield tables[request.param]
tables[request.param].drop()
@pytest.mark.parametrize("table", SUPPORTED_DATABASES, indirect=True)
def test_happyflow_success(sample_dag, table):
@aql.transform
def get_table(input_table: Table):
return "SELECT * FROM {{input_table}}"
with sample_dag:
temp_table = get_table(table)
aql.boolean_check(
table=temp_table,
checks=[Check("test_1", "rooms > 3")],
max_rows_returned=10,
)
run_dag(sample_dag)
@pytest.mark.parametrize("table", SUPPORTED_DATABASES, indirect=True)
def test_happyflow_fail(sample_dag, table, caplog):
@aql.transform
def get_table(input_table: Table):
return "SELECT * FROM {{input_table}}"
with pytest.raises(BackfillUnfinished):
with sample_dag:
temp_table = get_table(table)
aql.boolean_check(
table=temp_table,
checks=[
Check("test_1", "rooms > 7"),
Check("test_2", "beds >= 3"),
],
max_rows_returned=10,
)
run_dag(sample_dag)
expected_error = "Some of the check(s) have failed"
assert expected_error in caplog.text
@pytest.mark.parametrize(
"table",
[
"postgres",
pytest.param(
"bigquery",
marks=pytest.mark.xfail(
reason="bigquery don't expect table name before cols."
),
),
pytest.param(
"snowflake",
marks=pytest.mark.xfail(
reason="Binding data in type (table) is not supported."
),
),
"sqlite",
],
indirect=True,
)
def test_happyflow_success_with_templated_query(sample_dag, table):
@aql.transform
def get_table(input_table: Table):
return "SELECT * FROM {{input_table}}"
with sample_dag:
temp_table = get_table(table)
aql.boolean_check(
table=temp_table,
checks=[Check("test_1", "{{table}}.rooms > 3")],
max_rows_returned=10,
)
run_dag(sample_dag)
```
#### File: astro/tests/test_example_dags.py
```python
import os
import pytest
from airflow.executors.debug_executor import DebugExecutor
from airflow.models.dagbag import DagBag
from airflow.utils import timezone
from airflow.utils.db import create_default_connections
from airflow.utils.session import provide_session
from airflow.utils.state import State
DEFAULT_DATE = timezone.datetime(2016, 1, 1)
@provide_session
def get_session(session=None):
create_default_connections(session)
return session
@pytest.fixture()
def session():
return get_session()
@pytest.mark.parametrize(
"dag_id",
[
"example_amazon_s3_postgres",
"example_amazon_s3_postgres_load_and_save",
"example_amazon_s3_snowflake_transform",
"example_postgres_render",
"example_snowflake_partial_table_with_append",
"example_snowflake_render",
"example_sqlite_load_transform",
],
)
def test_example_dag(session, dag_id):
dir_path = os.path.dirname(os.path.realpath(__file__))
db = DagBag(dir_path + "/../example_dags")
dag = db.get_dag(dag_id)
if dag is None:
raise NameError(f"The DAG with dag_id: {dag_id} was not found")
dag.clear(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, dag_run_state=State.NONE)
dag.run(
executor=DebugExecutor(),
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
run_at_least_once=True,
)
``` |
{
"source": "jlanga/exfi",
"score": 3
} |
#### File: exfi/exfi/build_baited_bloom_filter.py
```python
import logging
import shutil
import os
from typing import List
def _get_biobloommaker_command(args: dict, output_dir: str) -> List[str]:
"""Helper function to compose the command to execute
:param dict args: Dict of arguments.
:param str: output_dir: Output folder.
:ivar dict args: Dict of arguments. The ones used are {"threads": int, "kmer": int}, where
- threads is the number of threads to be used
- kmer is the kmer size
:ivar str output_dir: Output directory. Must exist and won't be created.
.. seealso: Where it is used:
:py:meth:`build_baited_bloom_filter`
"""
biobloommaker_path = shutil.which('biobloommaker')
build_transcriptome_bf = [
biobloommaker_path,
'--file_prefix', "transcriptome",
'--output_dir', output_dir,
'--threads', str(args["threads"]),
'--kmer_size', str(args["kmer"]),
args["fasta"]
]
return build_transcriptome_bf
def _get_categorize_command(args: dict, output_dir: str) -> List[str]:
"""Helper function to compose the categorize command
:arg dict args: Dict of arguments
:arg str output_dir: The output directory
:ivar dict args: Dict of arguments. The ones used are:
- "threads": int: number of threads to be used
- "kmer": int: the kmer size,
- "reads": list: list of str of paths to the reads to be categorized
:ivar str output_dir: Must already exist. Won't be created.
.. seealso: Where it is used:
:py:meth: `build_baited_bloom_filter`
:param dict args: Dict of arguments.
:param str output_dir: Output directory.
"""
categorize_path = shutil.which('biobloomcategorizer')
categorize = [
categorize_path,
'--prefix', output_dir + '/categories',
'--filter_files', output_dir + '/transcriptome.bf',
'--threads', str(args["threads"]),
'--score', str(args["kmer"]),
'--fa',
'--stdout_filter', 'transcriptome',
] + args["reads"]
return categorize
def _get_build_bf_command(args: dict, in_fn: List[str]) -> List[str]:
"""Helper function to compose command to get the final Bloom Filter
:arg dict args: Dict of arguments.
:arg str in_fn: Path to file where the reads will be read
:ivar dict args: Dict of arguments. The ones used are:
- "kmer" (int): size of the kmers.
- "threads" (int): number of threads to be used.
- "bloom_size" (str): size of the Bloom filter in bytes. K/M/G units can be used.
- "levels" (int): number of Bloom filters used.
- "output_bloom" (str): path where to store the Bloom Filter
:ivar str in_fn: Path to file where the reads will be read. In later methods /dev/stdin is used.
:param dict args: Dict of arguments.
:param str in_fn: Input filename.
..seealso: Where it is used:
:py:meth: `build_baited_bloom_filter`
"""
abyss_bloom_path = shutil.which('abyss-bloom')
build_bf = [
abyss_bloom_path, 'build',
'--verbose',
'--kmer', str(args["kmer"]),
'--bloom-size', args["bloom_size"],
'--levels', str(args["levels"]),
'--threads', str(args["threads"]),
args["bloom"]
] + in_fn
return build_bf
def _create_links(output_dir: str) -> None:
"""Create soft links from output_dir/categries-* to /dev/null
:arg str output_dir: path where data will be stored
:ivar str output_dir: path where data will be stored. Must exist, won't be created.
..seealso: Where it is used:
:py:meth: `build_baited_bloom_filter`
:param str output_dir: Output directory.
"""
os.symlink(os.devnull, output_dir + "/categories_transcriptome.fa")
os.symlink(os.devnull, output_dir + "/categories_noMatch.fa")
os.symlink(os.devnull, output_dir + "/categories_multiMatch.fa")
os.symlink(os.devnull, output_dir + "/categories_summary.tsv")
def _destroy_links(output_dir: str) -> None:
"""Destroy the links created by _create_links
:arg str output_dir: path where data is being stored.
:ivar str output_dir: path where data will be stored. Must exist, won't be created.
..seealso: Where it is used:
:py:meth: `build_baited_bloom_filter`
:param str output_dir: Output directory.
"""
os.remove(output_dir + "/categories_transcriptome.fa")
os.remove(output_dir + "/categories_noMatch.fa")
os.remove(output_dir + "/categories_multiMatch.fa")
os.remove(output_dir + "/categories_summary.tsv")
def build_baited_bloom_filter(args: dict) -> None:
"""Run the build_baited_bloom_filter pipeline.
The pipeline works as follows:
- Build a secondary Bloom filter of the transcriptome with `biobloommaker`.
- Categorize reads with `biobloomcategorizer` and pipe to `abyss-bloom build` to build the
primary Bloom filter
:param dict args: Dict of arguments.
.. note:: Parameter `args` must have the following keys:
- fasta (str): path to transcriptome in FASTA format.
- kmer (int): size of the kmers.
- bloom_size (str): total size of the Bloom filter(s) in bytes. K/M/G units can be used.
- levels (int): number of Bloom filters used to count.
- bloom (str): path where to store the final Bloom filter.
- threads (int): number of threads to be used.
- reads (list of str): paths to the reads in FASTA/Q format, gzipped or not.
.. seealso: Functions used:
:py:meth: `_get_biobloommaker_command`, `_get_categorize_command`, `_get_build_bf_command`,
`_create_links`, `_destroy_links`.
"""
# Imports
from subprocess import Popen, PIPE
from os.path import dirname, abspath
output_dir = dirname(abspath(args["bloom"]))
# Convert single read library to list
if isinstance(args["reads"], str):
args["reads"] = [args["reads"]]
# Prepare the commands
build_transcriptome_bf = _get_biobloommaker_command(args, output_dir)
categorize = _get_categorize_command(args, output_dir)
build_bf = _get_build_bf_command(args, ["/dev/stdin"])
# Run the pipeline
logging.info("\n\nRunning command: %s\n", " ".join(build_transcriptome_bf))
# Create links to /dev/null for categories_{match,nomatch,multi}.fa and summary
_create_links(output_dir)
# Put the porcesses to work together
p_build_transcriptome_bf = Popen(build_transcriptome_bf, shell=False)
p_build_transcriptome_bf.wait()
logging.info("\n\nRunning command: %s | %s\n",
" ".join(categorize), " ".join(build_bf))
p_categorize = Popen(categorize, stdout=PIPE, shell=False)
p_build_bf = Popen(build_bf, stdin=p_categorize.stdout, shell=False)
p_categorize.stdout.close()
p_categorize.wait()
p_build_bf.wait()
# Clean up files from biobloommaker
if os.path.isfile(output_dir + "/transcriptome.bf"):
os.remove(output_dir + "/transcriptome.bf")
if os.path.isfile(output_dir + "/transcriptome.txt"):
os.remove(output_dir + "/transcriptome.txt")
# Clean up files from biobloomcategorizer
_destroy_links(output_dir)
```
#### File: exfi/exfi/compare.py
```python
import sys
import logging
from subprocess import Popen, PIPE
from os import remove
from tempfile import mkstemp
import pandas as pd
import numpy as np
from exfi.io.bed import \
BED3_COLS, BED3_DTYPES
TP_DF_COLS = [
'chrom_pred', 'chrom_start_pred', 'chrom_end_pred',
'chrom_true', 'chrom_start_true', 'chrom_end_true'
]
TP_DF_DTYPES = {
'chrom_pred': object, 'chrom_start_pred': np.int, 'chrom_end_pred': np.int,
'chrom_true': object, 'chrom_start_true': np.int, 'chrom_end_true': np.int
}
STATS_COLS = [
'true', 'predicted', 'true_positives', 'false_positives', 'false_negatives',
'precision', 'recall', 'f_1'
]
STATS_DTYPES = {
'true': np.float, 'predicted': np.float, 'true_positives': np.float,
'false_positives': np.float, 'false_negatives': np.float,
'precision': np.float, 'recall': np.float, 'f_1': np.float
}
DISTANCES_COLS = [
'pred_chrom', 'pred_chrom_start', 'pred_chrom_end',
'true_chrom', 'true_chrom_start', 'true_chrom_end',
'distance'
]
DISTANCES_DTYPES = {
'pred_chrom': object, 'pred_chrom_start': np.int, 'pred_chrom_end': np.int,
'true_chrom': object, 'true_chrom_start': np.int, 'true_chrom_end': np.int,
'distance': np.int
}
def bedtools_intersect(bed1_fn, bed2_fn, additional_flags=None):
"""Bedtools intersect wrapper
bed1_fn: path to BED file
bed2_fn: path to BED file
additional_flags: list with additional flags to pass to bedtools in the shape of a list:
['-r', '-v', '-f', '0.95']
returns a list of lists ()
"""
if additional_flags is None:
additional_flags = []
command = ["bedtools", "intersect", "-a", bed1_fn, "-b", bed2_fn] + \
additional_flags
process = Popen(command, stdout=PIPE, stderr=PIPE)
data = [
line.decode().strip().split()
for line in process.stdout.readlines()
]
process.stdout.close()
status_code = process.wait()
if status_code != 0:
sys.exit(
"ERROR: something went wrong:\n" + \
",".join([x.decode() for x in process.stderr.readlines()])
)
return data
def classify(bed3_true, bed3_pred, fraction=0.95):
"""Compute the True Posivites, False Positives and False Negatives
between two BED3 dataframes (true and predicted) using bedtools intersect
Fraction is a real number between 0 and 1 that stands for
minimum similarity.
Result is a dict where values are dataframes
"""
logging.info('Classifying')
bed3_pred_fn = mkstemp()[1]
bed3_true_fn = mkstemp()[1]
# Dump to disk
logging.info("Dumping predictions to disk")
bed3_true\
.sort_values(by=['chrom', 'chrom_start', 'chrom_end'])\
.to_csv(path_or_buf=bed3_true_fn, sep='\t', index=False, header=False)
logging.info('Dumping true values to disk')
bed3_pred\
.sort_values(by=['chrom', 'chrom_start', 'chrom_end'])\
.to_csv(path_or_buf=bed3_pred_fn, sep='\t', index=False, header=False)
logging.info('Computing true positives')
true_positives_df = pd.DataFrame(
data=bedtools_intersect(
bed1_fn=bed3_pred_fn,
bed2_fn=bed3_true_fn,
additional_flags=['-f', f'{fraction}', '-r', '-wo']
),
columns=TP_DF_COLS + [6]
).astype(dtype=TP_DF_DTYPES)\
.drop(columns=6)
logging.info('Computing false positives')
false_positives_df = pd.DataFrame(
data=bedtools_intersect(
bed1_fn=bed3_pred_fn,
bed2_fn=bed3_true_fn,
additional_flags=['-f', f'{fraction}', '-r', '-v'],
),
columns=BED3_COLS
).astype(BED3_DTYPES)
logging.info('Computing false negatives')
false_negatives_df = pd.DataFrame(
data=bedtools_intersect(
bed1_fn=bed3_true_fn,
bed2_fn=bed3_pred_fn,
additional_flags=['-f', f'{fraction}', '-r', '-v']
),
columns=BED3_COLS
).astype(BED3_DTYPES)
remove(bed3_pred_fn)
remove(bed3_true_fn)
return {
'true_positives': true_positives_df,
'false_positives': false_positives_df,
'false_negatives': false_negatives_df
}
def compute_precision(true_positives, false_positives):
"""Compute precision
>>> compute_precision(0, 10)
0.0
>>> compute_precision(446579, 13932)
0.969747
"""
return true_positives / (true_positives + false_positives)
def compute_recall(true_positives, false_negatives):
"""Compute recall
>>> compute_recall(0, 10)
0.0
>>> compute_recall(446579, 48621)
0.901815
"""
return true_positives / (true_positives + false_negatives)
def compute_f_1(true_positives, false_positives, false_negatives):
"""Compute F_1
>>> compute_f_1(0, 10, 10)
0
>>> compute_f_1(446579, 13932, 48621)
0.934548
"""
precision = compute_precision(true_positives, false_positives)
recall = compute_recall(true_positives, false_negatives)
return 2 * precision * recall / (precision + recall)
def compute_stats_per_exon(classification):
"""Compute the classification stats per exon
Input should be the one from exfi.compare.classify
"""
logging.info('Computing the stats per exon')
tp_exons = classification['true_positives'].shape[0]
fp_exons = classification['false_positives'].shape[0]
fn_exons = classification['false_negatives'].shape[0]
true_exons = tp_exons + fn_exons
pred_exons = tp_exons + fp_exons
stats = pd.DataFrame(
data=[[
true_exons, pred_exons,
tp_exons, fp_exons, fn_exons,
compute_precision(tp_exons, fp_exons),
compute_recall(tp_exons, fn_exons),
compute_f_1(tp_exons, fp_exons, fn_exons)
]],
columns=STATS_COLS
)\
.astype(STATS_DTYPES)
return stats
def compute_true_bases(classification):
"""Compute the total number of bases in the truth splice graph"""
true_positives, _, false_negatives = classification.values()
return \
np.sum(true_positives.chrom_end_true - true_positives.chrom_start_true) + \
np.sum(false_negatives.chrom_end - false_negatives.chrom_start)
def compute_pred_bases(classification):
"""Compute the total number of bases in the predicted splice graph"""
true_positives, false_positives, _ = classification.values()
return \
np.sum(true_positives.chrom_end_pred - true_positives.chrom_start_pred) + \
np.sum(false_positives.chrom_end - false_positives.chrom_start)
def compute_true_positive_bases(classification):
"""Compute the total number of true positive bases in the predicted splice
graph
TP bases are the common part between the two exons
"""
true_positives = classification['true_positives']
return np.sum(
true_positives[['chrom_end_true', 'chrom_end_pred']].min(axis=1) -
true_positives[['chrom_start_true', 'chrom_start_pred']].max(axis=1)
)
def compute_false_positive_bases(classification):
"""Compute the total number of true positive bases in the predicted splice
graph
FP bases are:
- all bases in false positive exons
- all bases over predicted in the start
- all bases over predicted in the end
"""
tp_df, fp_df, _ = classification.values()
starts = tp_df[tp_df.chrom_start_pred < tp_df.chrom_start_true]
ends = tp_df[tp_df.chrom_end_pred > tp_df.chrom_end_true]
fp_bases = \
np.sum(fp_df.chrom_end - fp_df.chrom_start) + \
np.sum(starts.chrom_start_true - starts.chrom_start_pred) + \
np.sum(ends.chrom_end_pred - ends.chrom_end_true)
return fp_bases
def compute_false_negative_bases(classification):
"""Compute the total number of true positive bases in the predicted splice
graph
FN bases are:
- all bases in false negative exons
- all bases underpredicted in the start
- all bases underpredicted in the end
"""
tp_df, _, fn_df = classification.values()
starts = tp_df[tp_df.chrom_start_pred > tp_df.chrom_start_true]
ends = tp_df[tp_df.chrom_end_pred < tp_df.chrom_end_true]
fn_bases = \
np.sum(fn_df.chrom_end - fn_df.chrom_start) + \
np.sum(starts.chrom_start_pred - starts.chrom_start_true) + \
np.sum(ends.chrom_end_pred - ends.chrom_end_true)
return fn_bases
def compute_stats_per_base(classification):
"""Compute the classification stats per base pair
Input should be the one from exfi.compare.classify
"""
logging.info('Computing the stats per base')
true_bases = compute_true_bases(classification)
pred_bases = compute_pred_bases(classification)
tp_bases = compute_true_positive_bases(classification)
fp_bases = compute_false_positive_bases(classification)
fn_bases = compute_false_negative_bases(classification)
stats = pd.DataFrame(
data=[[
true_bases, pred_bases,
tp_bases, fp_bases, fn_bases,
compute_precision(tp_bases, fp_bases),
compute_recall(tp_bases, fn_bases),
compute_f_1(tp_bases, fp_bases, fn_bases)
]],
columns=STATS_COLS
)\
.astype(STATS_DTYPES)
return stats
def get_starts(bed3):
"""Get the BED coordinates of the first nucleotide of each record."""
starts = bed3.copy()
starts['chrom_end'] = starts['chrom_start'] + 1
return starts.sort_values(by=['chrom', 'chrom_start'])
def get_ends(bed3):
"""Get the BED coordinates of the last nucleotide of each record."""
ends = bed3.copy()
ends['chrom_start'] = ends['chrom_end'] - 1
return ends.sort_values(by=['chrom', 'chrom_start'])
def get_distances(bed3_true, bed3_pred):
"""For every record in bed3_pred, get the closest record and distance to
bed3_true."""
bed3_true_fn = mkstemp()[1]
bed3_pred_fn = mkstemp()[1]
distances_fn = mkstemp()[1]
bed3_true.to_csv(bed3_true_fn, sep='\t', index=False, header=False)
bed3_pred.to_csv(bed3_pred_fn, sep='\t', index=False, header=False)
command = [
'bedtools', 'closest',
'-a', bed3_pred_fn, '-b', bed3_true_fn,
'-d', '-k', '1', '-t', 'first'
]
with open(distances_fn, 'w') as out:
process = Popen(command, stdout=out)
process.wait()
distances = pd.read_csv(
distances_fn,
sep='\t',
names=DISTANCES_COLS
)\
.astype(DISTANCES_DTYPES)
remove(bed3_true_fn)
remove(bed3_pred_fn)
remove(distances_fn)
return distances
def compute_stats_per_ieb(true_bed3, pred_bed3, max_distance=10):
"""Compute the classification stats per IEB"""
true_starts = get_starts(true_bed3)
true_ends = get_ends(true_bed3)
pred_starts = get_starts(pred_bed3)
pred_ends = get_ends(pred_bed3)
dist_start = get_distances(true_starts, pred_starts)
dist_end = get_distances(true_ends, pred_ends)
true_ieb = true_starts.shape[0] + true_ends.shape[0]
pred_ieb = pred_starts.shape[0] + pred_ends.shape[0]
tp_ieb = sum(
(dist_start.distance <= max_distance) & (dist_start.distance >= 0)
) + sum(
(dist_end.distance <= max_distance) & (dist_end.distance >= 0)
)
fp_ieb = pred_ieb - tp_ieb
fn_ieb = true_ieb - tp_ieb
stats = pd.DataFrame(
data=[[
true_ieb, pred_ieb,
tp_ieb, fp_ieb, fn_ieb,
compute_precision(tp_ieb, fp_ieb),
compute_recall(tp_ieb, fn_ieb),
compute_f_1(tp_ieb, fp_ieb, fn_ieb)
]],
columns=STATS_COLS
)\
.astype(STATS_DTYPES)
return stats
```
#### File: exfi/io/gfa1_to_bed.py
```python
import logging
import numpy as np
from exfi.io.bed import BED4_COLS, BED4_DTYPES
from exfi.io.read_gfa import read_gfa1
def gfa1_to_bed4(filename):
"""Read a GFA1 file and convert it to BED4"""
logging.info('Converting GFA1 to BED4')
containments = read_gfa1(filename)['containments']
logging.info('Renaming columns')
containments = containments.rename(columns={
"container": "chrom",
"contained": "name"
})
logging.info('Overlap to int')
containments["overlap"] = containments\
.overlap.map(lambda x: np.int(x[:-1]))
logging.info('Computing coordinates')
containments["chrom_start"] = containments["pos"]
containments["chrom_end"] = containments["pos"] + containments["overlap"]
containments = containments[BED4_COLS]
containments = containments.astype(BED4_DTYPES)
logging.info('Done')
return containments
```
#### File: exfi/io/gff3_to_bed.py
```python
import logging
import pandas as pd
from exfi.io.bed import BED3_COLS, BED3_DTYPES
GFF3_COLS = [
"seqid", "source", "type", "start", "end", "score", "strand", "phase",
"attributes"
]
def gff3_to_bed3(gff3_in, mode="ensembl"):
"""Read a GFF3 file and convert it to BED3, where coordinates are with
respect to the transcriptome
Modes available:
- "ensembl": for files downloaded from Ensembl,
- "gmap": for GFF3 files generated from GMAP,
- "ncbi": for GFF3 files downloaded from NCBI Genomes
"""
logging.info("Reading GFF3 file")
raw = pd.read_csv(
sep='\t',
na_values=".",
usecols=["type", "start", "end", "strand", "attributes"],
filepath_or_buffer=gff3_in,
comment="#",
header=None,
names=GFF3_COLS,
low_memory=False # Convert types at the end. Seqid is char, not int
)
if raw.shape[0] == 0:
exons = pd.DataFrame(columns=BED3_COLS)
exons = exons.astype(BED3_DTYPES)
return exons
logging.info('Extracting the transcript ids')
if mode == "gmap":
logging.info("gff3 comes from gmap")
exons = raw[raw['type'] == 'cDNA_match'].drop(columns='type')
exons['transcript_id'] = exons['attributes']\
.str.split(";").str[1]\
.str.extract(r'Name=([\w\d.-_]+)')
elif mode == "ncbi":
logging.info("gff3 comes from NCBI Genomes")
exons = raw[raw['type'] == 'exon'].drop(columns='type')
exons['transcript_id'] = exons.attributes\
.str.extract(r"transcript_id=([A-Za-z0-9_.]+)")
exons = exons.dropna()
else:
logging.info('gff3 comes from ensembl')
exons = raw[raw['type'] == 'exon'].drop(columns='type')
exons["transcript_id"] = exons["attributes"]\
.str.split(";", 1, ).str[0]\
.str.extract(r'Parent=transcript:([\w\d.-_]+)')
exons = exons[['transcript_id', 'strand', 'start', 'end']]
logging.info('Reordering exons by strand')
positive = (
exons
[exons['strand'] == '+']
.drop(columns='strand')
.sort_values(by=['transcript_id', 'start', 'end'])
)
negative = (
exons
[exons['strand'] == '-']
.drop(columns='strand')
.sort_values(
by=['transcript_id', 'start', 'end'],
ascending=[True, False, False]
)
)
merged = pd.concat([positive, negative])
logging.info('Computing lengths')
merged['length'] = merged['end'] - merged['start'] + 1
logging.info('Computing ends')
merged['transcript_end'] = (
merged
.groupby('transcript_id')
['transcript_id', 'length']
.cumsum()
)
logging.info('Computing starts')
merged['transcript_start'] = merged['transcript_end'] - merged['length']
logging.info('Tidying up')
merged = merged[['transcript_id', 'transcript_start', 'transcript_end']]
merged = merged.rename(columns={
'transcript_id': 'chrom',
'transcript_start': 'chrom_start',
'transcript_end': 'chrom_end'
})
merged = merged.astype(BED3_DTYPES)
merged = merged.reset_index(drop=True)
logging.info('Done')
return merged
```
#### File: exfi/io/masking.py
```python
import logging
import numpy as np
def cigar_to_int(cigar):
"""Convert a simple CIGAR string to overlap int
>>> cigar_to_int('71N')
-71
>>> cigar_to_int('3M')
3
"""
if cigar[-1] == 'N':
return -int(cigar[:-1])
return int(cigar[:-1])
def soft_mask(sequence, left, right):
"""Lowercase the first left bases and last right bases of sequence
>>> soft_mask('ACCGATCGATCGTAG', 2, 1)
'acCGATCGATCGTAg'
>>> soft_mask('ACCGATCGATCGTAG', 0, 2)
'ACCGATCGATCGTag'
>>> soft_mask('ACCGATCGATCGTAG', 2, 0)
'acCGATCGATCGTAG'
>>> soft_mask('ACCGATCGATCGTAG', 0, 0)
'ACCGATCGATCGTAG'
"""
if left == 0 and right == 0:
return sequence
if left == 0 and right > 0:
return sequence[:-right] + sequence[-right:].lower()
if left > 0 and right == 0:
return sequence[:left].lower() + sequence[left:]
return sequence[:left].lower() + sequence[left:-right] + sequence[-right:].lower()
def hard_mask(sequence, left, right):
"""Mask with N the first left bases and last right bases of sequence
>>> hard_mask('ACCGATCGATCGTAG', 2, 1)
'NNCGATCGATCGTAN'
>>> hard_mask('ACCGATCGATCGTAG', 0, 2)
'ACCGATCGATCGTNN'
>>> hard_mask('ACCGATCGATCGTAG', 2, 0)
'NNCGATCGATCGTAG'
>>> hard_mask('ACCGATCGATCGTAG', 0, 0)
'ACCGATCGATCGTAG'
"""
if left == 0 and right == 0:
return sequence
if left == 0 and right > 0:
return sequence[:-right] + 'N' * right
if left > 0 and right == 0:
return 'N' * left + sequence[left:]
return 'N' * left + sequence[left:-right] + 'N' * right
def mask(node2sequence, edge2overlap, masking: str = "none"):
"""If any of the soft mask or hard mask are activated, mask
:param dict exon_dict: Dict of the shape exon_id: sequence.
:param dict overlap_dict: Dict of the shape (exon1, exon2): overlap between them.
:param str masking: Type of masking to apply. Options: hard, soft, none
(Default value = "None") .
"""
logging.info('Masking sequences')
if masking == 'none':
return node2sequence
# Compose a dataframe of name, sequence, bases to trim to the left
# and bases to trim to the right
logging.info('Computing bases to trim to the right and to the left')
complete = node2sequence.merge(
edge2overlap[['u', 'overlap']]\
.rename(columns={'u': 'name', 'overlap': 'mask_right'}),
on=['name'],
how='outer'
).merge(
edge2overlap[['v', 'overlap']]\
.rename(columns={'v': 'name', 'overlap': 'mask_left'}),
on=['name'],
how='outer'
)\
.fillna(0)\
.astype({'mask_right': np.int64, 'mask_left':np.int64})
logging.info('Removing negative masking')
complete['mask_right'] = complete.mask_right\
.map(lambda x: x if x > 0 else 0)
complete['mask_left'] = complete.mask_left\
.map(lambda x: x if x > 0 else 0)
if masking == "hard":
logging.info("Hard masking sequences")
complete['sequence'] = complete.apply(
lambda x: hard_mask(x.sequence, x.mask_left, x.mask_right),
axis=1
)
elif masking == "soft":
logging.info("Soft masking sequences")
complete['sequence'] = complete.apply(
lambda x: soft_mask(x.sequence, x.mask_left, x.mask_right),
axis=1
)
logging.info('Tidying up')
node2sequence_masked = complete\
[['name', 'sequence']]\
.reset_index(drop=True)
logging.info('Done')
return node2sequence_masked
```
#### File: exfi/io/read_bed.py
```python
import logging
import pandas as pd
from exfi.io.bed import BED3_COLS, BED3_DTYPES
def read_bed3(filename):
"""Read a BED file and return the BED3 dataframe."""
logging.info('Reading BED3 from disk')
bed3 = pd.read_csv(
filepath_or_buffer=filename,
header=None,
sep='\t',
usecols=[0, 1, 2],
names=BED3_COLS,
engine='c'
).astype(BED3_DTYPES)
logging.info('Done')
return bed3
```
#### File: exfi/tests/auxiliary_functions.py
```python
import tempfile
import shutil
from subprocess import \
Popen, PIPE
import numpy as np
import pandas as pd
from Bio.SeqIO.FastaIO import SimpleFastaParser
from exfi.find_exons import \
process_output, \
get_fasta, \
find_exons
from exfi.build_baited_bloom_filter import \
_get_build_bf_command
def _command_to_list(command):
"""Execute command and return output as list of strings"""
process = Popen(command, stdout=PIPE, shell=False)
results = process_output(process)
return results
def _fasta_to_list(filename):
"""fasta to list with SimpleFastaParser"""
with open(filename, "r") as handle:
return [record for record in SimpleFastaParser(handle)]
def _getfasta_to_list(transcriptome_dict, iterable_of_bed):
"""Convert to a list the generator from getfasta"""
return list(get_fasta(transcriptome_dict, iterable_of_bed))
def _silent_popen(command):
"""Create a Popen with no stderr and stdout"""
return Popen(
command,
stdout=open("/dev/null", 'w'),
stderr=open("/dev/null", 'w'),
shell=False
)
def _bf_and_process(reads_fns, transcriptome_fn):
"""(list of str, str) -> list
Build the BF and process the reads
"""
tmp_dir = tempfile.mkdtemp()
tmp_bf = tmp_dir + "/transcriptome_noreads.bf"
args = {
"kmer": 30,
"bloom_size": "100M",
"levels": 1,
"threads": 1,
"bloom": tmp_bf,
"reads": reads_fns,
"fasta": transcriptome_fn,
"max_fp_bases": 5,
"max_overlap": 10
}
command = _get_build_bf_command(args, reads_fns)
process = _silent_popen(command)
process.wait()
results = find_exons(args)
shutil.rmtree(tmp_dir)
bed3 = pd.DataFrame(
data=results,
columns=["chrom", "chromStart", "chromEnd"]
)
bed3.chromStart.astype(np.int64)
bed3.chromEnd.astype(np.int64)
return bed3
```
#### File: exfi/tests/test_compare.py
```python
from unittest import main, TestCase
from exfi.compare import \
bedtools_intersect, \
classify, \
compute_stats_per_exon, \
compute_true_bases, \
compute_pred_bases, \
compute_true_positive_bases, \
compute_false_positive_bases, \
compute_false_negative_bases, \
compute_stats_per_base, \
compute_stats_per_ieb
from tests.compare import \
BED3_EMPTY_FN, BED3_TRUE_FN, BED3_PRED_FN, \
BED3_EMPTY, BED3_TRUE, BED3_PRED, \
TP_DF, FP_DF, FN_DF, \
CLASSIFICATION, \
STATS_PER_EXON, STATS_PER_BASE, STATS_PER_IEB
class TestBedtoolsIntersect(TestCase):
"""Tests for exfi.compare.bedtools_intersect"""
def test_empty(self):
"""exfi.compare.bedtools_intersect: empty case"""
observed = bedtools_intersect(BED3_EMPTY_FN, BED3_EMPTY_FN, [])
print('Observed:', observed, 'Expected:', BED3_EMPTY.values, sep='\n')
self.assertEqual(observed, [])
def test_tp(self):
"""exfi.compare.bedtools_intersect: true positive case"""
observed = bedtools_intersect(
bed1_fn=BED3_PRED_FN,
bed2_fn=BED3_TRUE_FN,
additional_flags=['-f', '0.99', '-r', '-wo']
)
print('Observed:', observed, 'Expected:', TP_DF, sep='\n')
self.assertEqual(observed, TP_DF.values.tolist())
def test_fp(self):
"""exfi.compare.bedtools_intersect: false positive case"""
observed = bedtools_intersect(
bed1_fn=BED3_PRED_FN,
bed2_fn=BED3_TRUE_FN,
additional_flags=['-f', '0.99', '-r', '-v']
)
print('Observed:', observed, 'Expected:', FP_DF, sep='\n')
self.assertEqual(observed, FP_DF.values.tolist())
def test_fn(self):
"""exfi.compare.bedtools_intersect: false negative case"""
observed = bedtools_intersect(
bed1_fn=BED3_TRUE_FN,
bed2_fn=BED3_PRED_FN,
additional_flags=['-f', '0.99', '-r', '-v']
)
print('Observed:', observed, 'Expected:', FN_DF, sep='\n')
self.assertEqual(observed, FN_DF.values.tolist())
class TestClassify(TestCase):
"""Tests for exfi.compare.classify"""
def test_complex(self):
"""exfi.compare.classify: Test some exons"""
observed = classify(
bed3_true=BED3_TRUE, bed3_pred=BED3_PRED, fraction=0.99
)
print(
'Observed:', observed['true_positives'],
'Expected:', CLASSIFICATION['true_positives'],
sep='\n'
)
self.assertTrue(
observed['true_positives'].equals(CLASSIFICATION['true_positives'])
)
print(
'Observed:', observed['false_positives'],
'Expected:', CLASSIFICATION['false_positives'],
sep='\n'
)
self.assertTrue(
observed['false_positives'].equals(CLASSIFICATION['false_positives'])
)
print(
'Observed:', observed['false_negatives'],
'Expected:', CLASSIFICATION['false_negatives'],
sep='\n'
)
self.assertTrue(
observed['false_negatives'].equals(CLASSIFICATION['false_negatives'])
)
class TestComputeStatsPerExon(TestCase):
"""Tests for exfi.compare.compute_stats_per_exon"""
def test_complex(self):
"""exfi.compare.compute_stats_per_exon: some exons"""
observed = compute_stats_per_exon(CLASSIFICATION)
print(observed.values.tolist())
print(STATS_PER_EXON.values.tolist())
self.assertTrue(observed.equals(STATS_PER_EXON))
class TestComputeTrueBases(TestCase):
"""Tests for exfi.compare.compute_true_bases"""
def test_complex(self):
"""exfi.compare.compute_true_bases: some exons"""
observed = compute_true_bases(CLASSIFICATION)
print(observed)
self.assertEqual(observed, 5080.0)
class TestComputePredBases(TestCase):
"""Tests for exfi.compare.compute_pred_bases"""
def test_complex(self):
"""exfi.compare.compute_pred_bases: some exons"""
observed = compute_pred_bases(CLASSIFICATION)
print(observed)
self.assertEqual(observed, 5090.0)
class TestComputeTruePositiveBases(TestCase):
"""Tests for exfi.compare.compute_true_positive_bases"""
def test_complex(self):
"""exfi.compare.compute_true_positive_bases: some exons"""
observed = compute_true_positive_bases(CLASSIFICATION)
self.assertEqual(observed, 4159.0)
class TestComputeFalsePositiveBases(TestCase):
"""Tests for exfi.compare.compute_false_positive_bases"""
def test_complex(self):
"""exfi.compare.compute_false_positive_bases: some exons"""
observed = compute_false_positive_bases(CLASSIFICATION)
self.assertEqual(observed, 931)
class TestComputeFalseNegativeBases(TestCase):
"""Tests for exfi.compare.compute_false_negative_bases"""
def test_complex(self):
"""exfi.compare.compute_false_negative_bases: some exons"""
observed = compute_false_negative_bases(CLASSIFICATION)
self.assertEqual(observed, 911)
class TestComputeStatsPerBase(TestCase):
"""Tests for exfi.compare.compute_stats_per_base"""
def test_complex(self):
"""exfi.compare.compute_stats_per_base: some exons"""
observed = compute_stats_per_base(CLASSIFICATION)
print(observed.values.tolist())
print(STATS_PER_BASE.values.tolist())
self.assertTrue(observed.equals(STATS_PER_BASE))
class TestComputeStatsPerIEB(TestCase):
"""Tests for exfi.compare.compute_stats_per_ieb"""
def test_complex(self):
"""exfi.compare.compute_stats_per_ieb: some exons"""
observed = compute_stats_per_ieb(BED3_TRUE, BED3_PRED, 2)
print(observed.values.tolist())
print(STATS_PER_IEB.values.tolist())
self.assertTrue(observed.equals(STATS_PER_IEB))
if __name__ == '__main__':
main()
```
#### File: tests/test_io/test_bed4_to_gfa1.py
```python
from unittest import TestCase, main
from tempfile import mkstemp
import os
import filecmp
from exfi.io.bed4_to_gfa1 import \
compute_header, \
compute_segments, \
compute_links, \
compute_containments, \
compute_paths, \
bed4_to_gfa1
from tests.io.bed import \
BED4_EMPTY, BED4_SIMPLE, BED4_COMPLEX
from tests.io.transcriptome_dicts import \
TRANSCRIPTOME_EMPTY_DICT, TRANSCRIPTOME_SIMPLE_DICT, \
TRANSCRIPTOME_COMPLEX_DICT
from tests.io.gfa1 import \
HEADER, \
SEGMENTS_EMPTY, SEGMENTS_SIMPLE, SEGMENTS_COMPLEX, \
SEGMENTS_COMPLEX_SOFT, SEGMENTS_COMPLEX_HARD, \
LINKS_EMPTY, LINKS_SIMPLE, LINKS_COMPLEX, \
CONTAINMENTS_EMPTY, CONTAINMENTS_SIMPLE, CONTAINMENTS_COMPLEX, \
PATHS_EMPTY, PATHS_SIMPLE, PATHS_COMPLEX, \
GFA1_EMPTY_FN, GFA1_SIMPLE_FN, GFA1_COMPLEX_FN, \
GFA1_COMPLEX_SOFT_FN, GFA1_COMPLEX_HARD_FN, \
GFA1_COMPLEX_COLLAPSED_FN, \
GFA1_COMPLEX_COLLAPSED_SOFT_FN, GFA1_COMPLEX_COLLAPSED_HARD_FN
class TestComputeHeader(TestCase):
"""Tests for exfi.io.bed4_to_gfa1.compute_header"""
def test_header(self):
"""exfi.io.bed4_to_gfa1.compute_header: single test"""
observed = compute_header()
self.assertTrue(observed.equals(HEADER))
class TestComputeSegments(TestCase):
"""Tests for exfi.io.bed4_to_gfa1.compute_segments"""
def test_empty(self):
"""exfi.io.bed4_to_gfa1.compute_segments: empty case"""
observed = compute_segments(BED4_EMPTY, TRANSCRIPTOME_EMPTY_DICT)
print("Observed:", observed, "Expected:", SEGMENTS_EMPTY, sep="\n")
self.assertTrue(observed.equals(SEGMENTS_EMPTY))
def test_simple(self):
"""exfi.io.bed4_to_gfa1.compute_segments: simple case"""
observed = compute_segments(BED4_SIMPLE, TRANSCRIPTOME_SIMPLE_DICT)
print("Observed:", observed, "Expected:", SEGMENTS_SIMPLE, sep="\n")
self.assertTrue(observed.equals(SEGMENTS_SIMPLE))
def test_complex(self):
"""exfi.io.bed4_to_gfa1.compute_segments: complex case"""
observed = compute_segments(BED4_COMPLEX, TRANSCRIPTOME_COMPLEX_DICT)
print("Observed:", observed, "Expected:", SEGMENTS_COMPLEX, sep="\n")
self.assertTrue(observed.equals(SEGMENTS_COMPLEX))
def test_complex_simple(self):
"""exfi.io.bed4_to_gfa1.compute_segments: complex case"""
observed = compute_segments(
BED4_COMPLEX, TRANSCRIPTOME_COMPLEX_DICT, 'soft'
)
print("Observed:", observed, "Expected:", SEGMENTS_COMPLEX_SOFT,
sep="\n")
self.assertTrue(observed.equals(SEGMENTS_COMPLEX_SOFT))
def test_complex_hard(self):
"""exfi.io.bed4_to_gfa1.compute_segments: complex case"""
observed = compute_segments(
BED4_COMPLEX, TRANSCRIPTOME_COMPLEX_DICT, 'hard'
)
print("Observed:", observed, "Expected:", SEGMENTS_COMPLEX_HARD,
sep="\n")
self.assertTrue(observed.equals(SEGMENTS_COMPLEX_HARD))
class TestComputeLinks(TestCase):
"""Tests for exfi.io.bed4_to_gfa1.compute_links"""
def test_empty(self):
"""exfi.io.bed4_to_gfa1.compute_links: empty case"""
observed = compute_links(BED4_EMPTY)
self.assertTrue(observed.equals(LINKS_EMPTY))
def test_simple(self):
"""exfi.io.bed4_to_gfa1.compute_links: simple case"""
observed = compute_links(BED4_SIMPLE)
self.assertTrue(observed.equals(LINKS_SIMPLE))
def test_complex(self):
"""exfi.io.bed4_to_gfa1.compute_links: complex case"""
observed = compute_links(BED4_COMPLEX)
# print("Observed", observed, observed.dtypes, sep="\n")
# print("Expected", LINKS_COMPLEX, LINKS_COMPLEX.dtypes, sep="\n")
self.assertTrue(observed.equals(LINKS_COMPLEX))
class TestComputeContainments(TestCase):
"""Tests for exfi.io.bed4_to_gfa1.compute_containments"""
def test_empty(self):
"""exfi.io.bed4_to_gfa1.compute_containments: empty case"""
observed = compute_containments(BED4_EMPTY)
self.assertTrue(observed.equals(CONTAINMENTS_EMPTY))
def test_simple(self):
"""exfi.io.bed4_to_gfa1.compute_containments: simple case"""
observed = compute_containments(BED4_SIMPLE)
# print("Observed", observed, observed.dtypes, sep="\n")
# print("Expected", CONTAINMENTS_SIMPLE, CONTAINMENTS_SIMPLE.dtypes, sep="\n")
self.assertTrue(observed.equals(CONTAINMENTS_SIMPLE))
def test_complex(self):
"""exfi.io.bed4_to_gfa1.compute_containments: complex case"""
observed = compute_containments(BED4_COMPLEX)
# print("Observed", observed, observed.dtypes, sep="\n")
# print("Expected", CONTAINMENTS_COMPLEX, CONTAINMENTS_COMPLEX.dtypes, sep="\n")
self.assertTrue(observed.equals(CONTAINMENTS_COMPLEX))
class TestComputePaths(TestCase):
"""Tests for exfi.io.bed4_to_gfa1.compute_paths"""
def test_empty(self):
"""exfi.io.bed4_to_gfa1.compute_paths: empty case"""
observed = compute_paths(BED4_EMPTY)
print("Observed", observed, observed.dtypes, sep="\n")
print("Expected", PATHS_EMPTY, PATHS_EMPTY.dtypes, sep="\n")
self.assertTrue(observed.equals(PATHS_EMPTY))
def test_simple(self):
"""exfi.io.bed4_to_gfa1.compute_paths: simple case"""
observed = compute_paths(BED4_SIMPLE)
print("Observed", observed, observed.dtypes, sep="\n")
print("Expected", PATHS_SIMPLE, PATHS_SIMPLE.dtypes, sep="\n")
self.assertTrue(observed.equals(PATHS_SIMPLE))
def test_complex(self):
"""exfi.io.bed4_to_gfa1.compute_paths: complex case"""
observed = compute_paths(BED4_COMPLEX)
print("Observed", observed, observed.dtypes, sep="\n")
print("Expected", PATHS_COMPLEX, PATHS_COMPLEX.dtypes, sep="\n")
self.assertTrue(observed.equals(PATHS_COMPLEX))
class TestBED4TOGFA1(TestCase):
"""Tests for exfi.io.bed4_to_gfa1.bed4_to_gfa1"""
def test_empty(self):
"""exfi.io.bed4_to_gfa1.bed4_to_gfa1: empty case"""
tmp_file = mkstemp()[1]
print(tmp_file)
bed4_to_gfa1(
gfa1_fn=tmp_file,
bed4=BED4_EMPTY,
transcriptome_dict=TRANSCRIPTOME_EMPTY_DICT,
collapse=False
)
self.assertTrue(filecmp.cmp(tmp_file, GFA1_EMPTY_FN))
os.remove(tmp_file)
def test_simple(self):
"""exfi.io.bed4_to_gfa1.bed4_to_gfa1: simple case"""
tmp_file = mkstemp()[1]
print(tmp_file)
bed4_to_gfa1(
gfa1_fn=tmp_file,
bed4=BED4_SIMPLE,
transcriptome_dict=TRANSCRIPTOME_SIMPLE_DICT,
collapse=False
)
self.assertTrue(filecmp.cmp(tmp_file, GFA1_SIMPLE_FN))
os.remove(tmp_file)
def test_complex(self):
"""exfi.io.bed4_to_gfa1.bed4_to_gfa1: complex case"""
tmp_file = mkstemp()[1]
print(tmp_file)
bed4_to_gfa1(
gfa1_fn=tmp_file,
bed4=BED4_COMPLEX,
transcriptome_dict=TRANSCRIPTOME_COMPLEX_DICT,
collapse=False
)
self.assertTrue(filecmp.cmp(tmp_file, GFA1_COMPLEX_FN))
os.remove(tmp_file)
def test_complex_soft(self):
"""exfi.io.bed4_to_gfa1.bed4_to_gfa1: complex soft masked case"""
tmp_file = mkstemp()[1]
print(tmp_file)
bed4_to_gfa1(
gfa1_fn=tmp_file,
bed4=BED4_COMPLEX,
transcriptome_dict=TRANSCRIPTOME_COMPLEX_DICT,
masking='soft'
)
self.assertTrue(filecmp.cmp(tmp_file, GFA1_COMPLEX_SOFT_FN))
os.remove(tmp_file)
def test_complex_hard(self):
"""exfi.io.bed4_to_gfa1.bed4_to_gfa1: complex hard masked case"""
tmp_file = mkstemp()[1]
print(tmp_file)
bed4_to_gfa1(
gfa1_fn=tmp_file,
bed4=BED4_COMPLEX,
transcriptome_dict=TRANSCRIPTOME_COMPLEX_DICT,
masking='hard',
collapse=False
)
self.assertTrue(filecmp.cmp(tmp_file, GFA1_COMPLEX_HARD_FN))
os.remove(tmp_file)
def test_complex_collapse(self):
"""exfi.io.bed4_to_gfa1.bed4_to_gfa1: complex collapsed case"""
tmp_file = mkstemp()[1]
print(tmp_file)
bed4_to_gfa1(
gfa1_fn=tmp_file,
bed4=BED4_COMPLEX,
transcriptome_dict=TRANSCRIPTOME_COMPLEX_DICT,
collapse=True
)
self.assertTrue(filecmp.cmp(tmp_file, GFA1_COMPLEX_COLLAPSED_FN))
def test_complex_soft_collapse(self):
"""exfi.io.bed4_to_gfa1.bed4_to_gfa1: complex collapsed and soft masked
case"""
tmp_file = mkstemp()[1]
print(tmp_file)
bed4_to_gfa1(
gfa1_fn=tmp_file,
bed4=BED4_COMPLEX,
transcriptome_dict=TRANSCRIPTOME_COMPLEX_DICT,
masking='soft',
collapse=True
)
self.assertTrue(filecmp.cmp(tmp_file, GFA1_COMPLEX_COLLAPSED_SOFT_FN))
os.remove(tmp_file)
def test_complex_hard_collapse(self):
"""exfi.io.bed4_to_gfa1.bed4_to_gfa1: complex collapsed and hard masked
case"""
tmp_file = mkstemp()[1]
print(tmp_file)
bed4_to_gfa1(
gfa1_fn=tmp_file,
bed4=BED4_COMPLEX,
transcriptome_dict=TRANSCRIPTOME_COMPLEX_DICT,
masking='hard',
collapse=True
)
self.assertTrue(filecmp.cmp(tmp_file, GFA1_COMPLEX_COLLAPSED_HARD_FN))
os.remove(tmp_file)
if __name__ == '__main__':
main()
```
#### File: tests/test_io/test_bed.py
```python
import unittest
from exfi.io.bed import \
bed3_to_bed4, \
bed4_to_node2coordinates, \
bed4_to_path2nodes, \
bed4_to_node2sequence, \
bed4_to_edge2overlap
from tests.io.bed import \
BED3_EMPTY, BED3_SIMPLE, BED3_COMPLEX, \
BED4_EMPTY, BED4_SIMPLE, BED4_COMPLEX, \
NODE2COORDINATES_EMPTY, NODE2COORDINATES_SIMPLE, NODE2COORDINATES_COMPLEX, \
PATH2NODES_EMPTY, PATH2NODES_SIMPLE, PATH2NODES_COMPLEX, \
NODE2SEQUENCE_EMPTY, NODE2SEQUENCE_SIMPLE, NODE2SEQUENCE_COMPLEX, \
EDGE2OVERLAP_EMPTY, EDGE2OVERLAP_SIMPLE, EDGE2OVERLAP_COMPLEX
from tests.io.transcriptome_dicts import \
TRANSCRIPTOME_EMPTY_DICT, TRANSCRIPTOME_SIMPLE_DICT, \
TRANSCRIPTOME_COMPLEX_DICT
class TestBed3ToBed4(unittest.TestCase):
"""Tests for exfi.io.bed.bed3_to_bed4"""
def test_empty(self):
"""exfi.io.bed.bed3_to_bed4: empty case"""
observed = bed3_to_bed4(BED3_EMPTY)
self.assertTrue(BED4_EMPTY.equals(observed))
def test_simple(self):
"""exfi.io.bed.bed3_to_bed4: simple case"""
observed = bed3_to_bed4(BED3_SIMPLE)
self.assertTrue(observed.equals(BED4_SIMPLE))
def test_complex(self):
"""exfi.io.bed.bed3_to_bed4: complex case"""
observed = bed3_to_bed4(BED3_COMPLEX)
self.assertTrue(observed.equals(BED4_COMPLEX))
class TestBed4ToNode2Coordinates(unittest.TestCase):
"""Tests for exfi.io.bed.bed4_to_node2coordinates"""
def test_empty(self):
"""exfi.io.bed.bed4_to_node2coordinates: empty case"""
observed = bed4_to_node2coordinates(BED4_EMPTY)
self.assertTrue(observed.equals(NODE2COORDINATES_EMPTY))
def test_simple(self):
"""exfi.io.bed.bed4_to_node2coordinates: simple case"""
observed = bed4_to_node2coordinates(BED4_SIMPLE)
self.assertTrue(observed.equals(NODE2COORDINATES_SIMPLE))
def test_complex(self):
"""exfi.io.bed.bed4_to_node2coordinates: complex case"""
observed = bed4_to_node2coordinates(BED4_COMPLEX)
self.assertTrue(observed.equals(NODE2COORDINATES_COMPLEX))
class TestBed4ToPath2Nodes(unittest.TestCase):
"""Tests for exfi.io.bed.bed4_to_path2nodes"""
def test_empty(self):
"""exfi.io.bed.bed4_to_path2nodes: empty case"""
observed = bed4_to_path2nodes(BED4_EMPTY)
self.assertEqual(observed, PATH2NODES_EMPTY)
def test_simple(self):
"""exfi.io.bed.bed4_to_path2nodes: simple case"""
observed = bed4_to_path2nodes(BED4_SIMPLE)
self.assertEqual(observed, PATH2NODES_SIMPLE)
def test_complex(self):
"""exfi.io.bed.bed4_to_path2nodes: complex case"""
observed = bed4_to_path2nodes(BED4_COMPLEX)
print("Observed:\n", observed)
print("Expected:\n", PATH2NODES_COMPLEX)
self.assertEqual(observed, PATH2NODES_COMPLEX)
class TestBed4ToNode2Sequence(unittest.TestCase):
"""Tests for exfi.io.bed.bed4_to_node2sequence"""
def test_empty(self):
"""exfi.io.bed.bed4_to_node2sequence: empty case"""
observed = bed4_to_node2sequence(BED4_EMPTY, TRANSCRIPTOME_EMPTY_DICT)
self.assertTrue(observed.equals(NODE2SEQUENCE_EMPTY))
def test_simple(self):
"""exfi.io.bed.bed4_to_node2sequence: simple case"""
observed = bed4_to_node2sequence(BED4_SIMPLE, TRANSCRIPTOME_SIMPLE_DICT)
self.assertTrue(observed.equals(NODE2SEQUENCE_SIMPLE))
def test_complex(self):
"""exfi.io.bed.bed4_to_node2sequence: complex case"""
observed = bed4_to_node2sequence(
BED4_COMPLEX, TRANSCRIPTOME_COMPLEX_DICT
)
self.assertTrue(observed.equals(NODE2SEQUENCE_COMPLEX))
class TestBed4ToEdge2Overlap(unittest.TestCase):
"""Tests for exfi.io.bed.bed4_to_edge2overlap"""
def test_empty(self):
"""exfi.io.bed.bed4_to_edge2overlap: empty case"""
observed = bed4_to_edge2overlap(BED4_EMPTY)
print("Observed:\n", observed, observed.dtypes)
print("Expected:\n", EDGE2OVERLAP_EMPTY, EDGE2OVERLAP_EMPTY.dtypes)
self.assertTrue(observed.equals(EDGE2OVERLAP_EMPTY))
def test_simple(self):
"""exfi.io.bed.bed4_to_edge2overlap: simple case"""
observed = bed4_to_edge2overlap(BED4_SIMPLE)
print("Observed:\n", observed, observed.dtypes)
print("Expected:\n", EDGE2OVERLAP_SIMPLE, EDGE2OVERLAP_SIMPLE.dtypes)
self.assertTrue(observed.equals(EDGE2OVERLAP_SIMPLE))
def test_complex(self):
"""exfi.io.bed.bed4_to_edge2overlap: complex case"""
observed = bed4_to_edge2overlap(BED4_COMPLEX)
print("Observed:\n", observed, observed.dtypes)
print("Expected:\n", EDGE2OVERLAP_COMPLEX, EDGE2OVERLAP_COMPLEX.dtypes)
self.assertTrue(observed.equals(EDGE2OVERLAP_COMPLEX))
if __name__ == '__main__':
unittest.main()
```
#### File: tests/test_io/test_gff3_to_bed.py
```python
from unittest import TestCase, main
from exfi.io.gff3_to_bed import gff3_to_bed3
from tests.io.gff3 import \
GFF3_EMPTY_FN, GFF3_ENSEMBL_FN, GFF3_GMAP_FN
from tests.io.bed import \
BED3_EMPTY, BED3_ENSEMBL, BED3_GMAP
class TestGFF3ToBED(TestCase):
"""Tests for exfi.io.gff3_to_bed.gff3_to_bed3"""
def test_empty(self):
"""exfi.io.gff3_to_bed.gff3_to_bed3: empty case"""
observed = gff3_to_bed3(GFF3_EMPTY_FN)
self.assertTrue(observed.equals(BED3_EMPTY))
def test_ensembl(self):
"""exfi.io.gff3_to_bed.gff3_to_bed3: ensembl case"""
observed = gff3_to_bed3(GFF3_ENSEMBL_FN, mode="ensembl")
print("Observed", observed.values.tolist(), observed.dtypes, sep='\n')
self.assertTrue(observed.equals(BED3_ENSEMBL))
def test_gmap(self):
"""exfi.io.gff3_to_bed.gff3_to_bed3: gmap case"""
observed = gff3_to_bed3(GFF3_GMAP_FN, mode="gmap")
print("Observed", observed.values.tolist(), observed.dtypes, sep='\n')
self.assertTrue(observed.equals(BED3_GMAP))
# def test_ncbi(self):
# """exfi.io.gff3_to_bed.gff3_to_bed3: ncbi case"""
# observed = gff3_to_bed3(GFF3_NCBI_FN, mode=ncbi)
# self.assertTrue(observed.equals(BED3_NCBI))
if __name__ == '__main__':
main()
``` |
{
"source": "jlanga/smsk_exfi_paper",
"score": 2
} |
#### File: src/snakefiles/chopstitch.py
```python
def get_reads(wildcards):
sample = wildcards.sample
reads_nested = (
samples
[(samples["sample"] == sample)]
[["forward", "reverse"]]
.values
.tolist()
)
reads_flat = [item for sublist in reads_nested for item in sublist]
return reads_flat
rule chopstitch_build_bloom:
input:
get_reads
output:
bf = CHOPSTITCH + "{sample}.k{kmer}.fpr{fpr}.full.unbaited.bf",
inf = CHOPSTITCH + "{sample}.k{kmer}.fpr{fpr}.full.unbaited.inf"
params:
kmer = "{kmer}",
fpr = "{fpr}",
bf = "Bfilter.bf",
inf = "Bfilter.inf"
threads:
24 # All
log:
CHOPSTITCH + "{sample}.k{kmer}.fpr{fpr}.full.unbaited.bf.log"
benchmark:
CHOPSTITCH + "{sample}.k{kmer}.fpr{fpr}.full.unbaited.bf.bmk"
shell:
"""
./bin/CreateBloom \
-t {threads} \
-k {params.kmer} \
--fpr1 {params.fpr} \
--fpr2 {params.fpr} \
{input} \
2> {log}
mv {params.bf} {output.bf}
mv {params.inf} {output.inf}
"""
rule chopstitch_find_exons:
input:
fa = RAW + "{sample}.rna.fa",
bf = CHOPSTITCH + "{sample}.k{kmer}.fpr{fpr}.full.unbaited.bf",
inf = CHOPSTITCH + "{sample}.k{kmer}.fpr{fpr}.full.unbaited.inf"
output:
exons = CHOPSTITCH + "{sample}.k{kmer}.fpr{fpr}.exons.fa",
processed_exons = CHOPSTITCH + "{sample}.k{kmer}.fpr{fpr}.processedexons.fa"
params:
bf = "Bfilter.bf",
inf = "Bfilter.inf",
exons = "exons.fa",
processed_exons = "processed_exons.fa"
shadow:
"shallow"
log:
CHOPSTITCH + "{sample}.k{kmer}.fpr{fpr}.find.exons.log"
benchmark:
CHOPSTITCH + "{sample}.k{kmer}.fpr{fpr}.find.exons.bmk"
shell:
"""
ln -s {input.bf} {params.bf}
ln -s {input.inf} {params.inf}
./bin/FindExons \
--input-bloom {params.bf} \
--lsplicesignals AG,TG,AC,GC,GG \
--rsplicesignals GT,TT,AT \
--allexons \
{input.fa} \
2> {log}
mv {params.exons} {output.exons}
mv {params.processed_exons} {output.processed_exons}
"""
rule pr_chopstitch_exons_to_bed3:
input:
CHOPSTITCH + "{sample}.k{kmer}.fpr{fpr}.exons.fa"
output:
CHOPSTITCH + "{sample}.k{kmer}.fpr{fpr}.chopstitch.bed"
log:
CHOPSTITCH + "{sample}.k{kmer}.fpr{fpr}.chopstitch.log"
benchmark:
CHOPSTITCH + "{sample}.k{kmer}.fpr{fpr}.chopstitch.bmk"
conda:
"chopstitch.yml"
shell:
'bash src/chopstitch_exons_to_bed3.sh {input} '
'| sort -k 1,1 -k2,2n '
'> {output} 2> {log}'
```
#### File: src/snakefiles/raw.py
```python
def get_reads(wildcards):
sample = wildcards.sample
library = wildcards.library
forward, reverse = (
samples
[(samples["sample"] == sample and samples["library"] == library)]
[["forward", "reverse"]]
.values
.tolist()[0]
)
return forward, reverse
rule raw_link_pe_sample:
input:
get_reads
output:
forward = RAW + "{sample}_{library}_1.fq.gz",
reverse = RAW + "{sample}_{library}_2.fq.gz"
log:
RAW + "link_dna_pe_{sample}_{library}.log"
benchmark:
RAW + "link_dna_pe_{sample}_{library}.json"
shell:
"ln "
"--symbolic "
"$(readlink --canonicalize {input[0]}) "
"{output.forward} 2> {log}; "
"ln "
"--symbolic "
"$(readlink --canonicalize {input[1]}) "
"{output.reverse} 2>> {log}"
def get_transcriptome(wildcards):
return features[wildcards.sample]["transcriptome"]
def get_genome(wildcards):
return features[wildcards.sample]["genome"]
def get_annotation(wildcards):
return features[wildcards.sample]["annotation"]
rule raw_link_transcriptome:
input:
get_transcriptome
output:
RAW + "{sample}.rna.fa"
shell:
"ln --symbolic $(readlink --canonicalize {input}) {output}"
rule raw_link_genome:
input:
get_genome
output:
RAW + "{sample}.dna.fa"
shell:
"ln --symbolic $(readlink --canonicalize {input}) {output}"
rule raw_link_annotation:
input:
get_annotation
output:
RAW + "{sample}.gff3"
shell:
"ln --symbolic $(readlink --canonicalize {input}) {output}"
rule raw_reference:
input:
expand(
RAW + "{sample}.{ending}",
sample=SPECIES,
ending=["dna.fa", "rna.fa", "gff3"]
)
``` |
{
"source": "jlanga/smsk_exfi",
"score": 3
} |
#### File: src/snakefiles/raw.py
```python
def get_reads(wildcards):
sample = wildcards.sample
forward, reverse = (
samples
[(samples["sample"] == sample)]
[["forward", "reverse"]]
.values
.tolist()[0]
)
return forward, reverse
rule raw_link_pe_sample:
input:
get_reads
output:
forward = RAW + "{sample}_1.fq.gz",
reverse = RAW + "{sample}_2.fq.gz"
log:
RAW + "link_dna_pe_{sample}.log"
benchmark:
RAW + "link_dna_pe_{sample}.json"
shell:
"ln "
"--symbolic "
"$(readlink --canonicalize {input[0]}) "
"{output.forward} 2> {log}; "
"ln "
"--symbolic "
"$(readlink --canonicalize {input[0]}) "
"{output.reverse} 2>> {log}"
rule raw_link_assembly:
input:
fasta = features["assembly"]
output:
fasta = RAW + "assembly.fa"
log:
RAW + "link_assembly.log"
benchmark:
RAW + "link_assembly.json"
shell:
"ln "
"--symbolic "
"$(readlink --canonicalize {input.fasta}) "
"{output.fasta} 2> {log}"
``` |
{
"source": "jlanga/smsk_selection",
"score": 3
} |
#### File: src/homologs/ete3_evol_prepare_folder.py
```python
from copy import deepcopy
import sys
from Bio import Phylo
from Bio import AlignIO
from helpers import fix_dir_path, process_folders
def get_species_to_seqid(alignment):
"""Get the species to seqid dictionary:
{"aalo": "aalo@TRINITY_DN123937_c0_g1_i1.p1"}
"""
return {sequence.name.split("@")[0]: sequence.name for sequence in alignment}
def get_seqid_to_species(alignment):
"""Get the seqid to species dictionary:
{"aalo@TRINITY_DN123937_c0_g1_i1.p1": "aalo"}
"""
return {sequence.name: sequence.name.split("@")[0] for sequence in alignment}
def keep_leafs(tree, leafs):
"""
Trim the tree keeping only every leaf in `leafs`
"""
tree_trimmed = deepcopy(tree)
all_species = set(leaf.name for leaf in tree.get_terminals())
for species_to_remove in all_species - set(leafs):
tree_trimmed.prune(species_to_remove)
return tree_trimmed
def rename_tree(tree, alignment):
"""Remove leafs not in alignment and rename them accordingly"""
tree = deepcopy(tree)
species_to_seqid = get_species_to_seqid(alignment)
tree = keep_leafs(tree, species_to_seqid.keys())
for leaf in tree.get_terminals():
leaf.name = species_to_seqid[leaf.name]
return tree
def has_enough_by_background_and_foreground(
alignment, foreground_list, min_foreground=2, min_background=2
):
"""
Return the alignment if it has at least min_foreground and min_background
sequences
"""
alignment_ids = {sequence.id.split("@")[0]: sequence.id for sequence in alignment}
n_foreground = len(set(foreground_list) & set(alignment_ids.keys()))
n_background = len(set(alignment_ids.keys()) - set(foreground_list))
if n_foreground >= min_foreground and n_background >= min_background:
return True
return False
def ete3_evol_prepare(
tree_in_fn,
alignment_in_fn,
tree_out_fn,
foreground_list,
min_foreground=2,
min_background=2,
):
"""
Read a species tree and alignment (nwk and fasta),
Read the list of foreground taxa
If there are enough foreground and background species:
subset and rename the species tree into a protein tree
write the alignment separately.
"""
# Read inputs
tree_in = Phylo.read(file=tree_in_fn, format="newick")
alignment_in = AlignIO.read(handle=open(alignment_in_fn, "r"), format="fasta")
# Slice and rename leafs in tree
tree_out = rename_tree(tree=tree_in, alignment=alignment_in)
# Check that there are enough sequences
if has_enough_by_background_and_foreground(
alignment_in, foreground_list, min_foreground, min_background
):
Phylo.write(trees=tree_out, file=tree_out_fn, format="newick")
if __name__ == "__main__":
if len(sys.argv) != 9:
sys.stderr.write(
"ERROR. Incorrect number of parameters. Example:\n"
"ete3_evol_prepare.py species_tree.nwk msa_folder msa_extension "
"output_folder output_extension human,chimp,bonobo 2 2\n"
)
sys.exit(1)
TREE_IN_FN = sys.argv[1]
MSA_DIR_IN = fix_dir_path(sys.argv[2])
MSA_EXT_IN = sys.argv[3]
TREE_DIR_OUT = fix_dir_path(sys.argv[4])
TREE_EXT_OUT = sys.argv[5]
FOREGROUND_LIST = sys.argv[6].split(",")
MIN_FOREGROUND = int(sys.argv[7])
MIN_BACKGROUND = int(sys.argv[8])
def ete3_evol_prepare_wrapper(file_in, file_out):
ete3_evol_prepare(
tree_in_fn=TREE_IN_FN,
alignment_in_fn=file_in,
tree_out_fn=file_out,
foreground_list=FOREGROUND_LIST,
min_foreground=MIN_FOREGROUND,
min_background=MIN_BACKGROUND,
)
process_folders(
MSA_DIR_IN, MSA_EXT_IN, TREE_DIR_OUT, TREE_EXT_OUT, ete3_evol_prepare_wrapper
)
# if __name__ == '__main__':
# if len(sys.argv) != 7:
# sys.stderr.write(
# "ERROR. Incorrect number of parameters. Example:\n"
# "ete3_evol_prepare.py species_tree.nwk codon_alignment.fa "
# "output.nwk human,chimp,bonobo 2 2\n"
# )
# sys.exit(1)
# TREE_IN_FN = sys.argv[1]
# ALIGNMENT_IN_FN = sys.argv[2]
# TREE_OUT_FN = sys.argv[3]
# FOREGROUND_LIST = sys.argv[4].split(",")
# MIN_FOREGROUND = int(sys.argv[5])
# MIN_BACKGROUND = int(sys.argv[6])
# ete3_evol_prepare(
# tree_in_fn=TREE_IN_FN,
# alignment_in_fn=ALIGNMENT_IN_FN,
# tree_out_fn=TREE_OUT_FN,
# foreground_list=FOREGROUND_LIST,
# min_foreground=MIN_FOREGROUND,
# min_background=MIN_BACKGROUND
# )
```
#### File: src/homologs/filter_by_occupancy.py
```python
import os
import sys
from helpers import fasta_to_dict
def filter_by_occupancy(filename_in, filename_out, min_occupancy=0.5):
"""
Filter an alignment in fasta format according to the occupancy of the
columns. Store the results in fasta format.
"""
fasta_raw = fasta_to_dict(filename_in)
n_sequences = len(fasta_raw.keys())
alignment_length = len(fasta_raw[tuple(fasta_raw.keys())[0]])
columns = tuple(
"".join(fasta_raw[seqname][column_index] for seqname in fasta_raw.keys())
for column_index in range(alignment_length)
)
columns_to_keep = []
for column_number, column in enumerate(columns):
n_gaps = column.count("-")
if 1 - float(n_gaps) / float(n_sequences) >= min_occupancy:
columns_to_keep.append(column_number)
fasta_trimmed = {}
for seqname, sequence in fasta_raw.items():
fasta_trimmed[seqname] = "".join(
fasta_raw[seqname][column_to_keep] for column_to_keep in columns_to_keep
)
if not os.path.exists(os.path.dirname(filename_out)):
os.makedirs(os.path.dirname(filename_out))
with open(filename_out, "w") as f_out:
for seqname, sequence in fasta_trimmed.items():
f_out.write(
">{seqname}\n{sequence}\n".format(seqname=seqname, sequence=sequence)
)
if __name__ == "__main__":
if len(sys.argv) != 4:
sys.stderr.write(
"ERROR: incorrect number of arguments.\n"
"python filter_by_occupancy.py fastain fastaout min_occupancy\n"
)
sys.exit(1)
FASTA_IN = sys.argv[1]
FASTA_OUT = sys.argv[2]
MIN_OCCUPANCY = float(sys.argv[3])
filter_by_occupancy(FASTA_IN, FASTA_OUT, MIN_OCCUPANCY)
```
#### File: src/homologs/run_trimal.py
```python
import argparse
import multiprocessing as mp
import tempfile
import os
from subprocess import run
from helpers import fasta_to_dict, fix_dir_path, process_folders_parallel
bases = "TCAG"
codons = [a + b + c for a in bases for b in bases for c in bases] + ["---"]
aminoacids = "FFLLSSSSYY**CC*WLLLLPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG-"
codon_table = dict(zip(codons, aminoacids))
del bases, codons, aminoacids
def translate(string):
"""Translate a nucleotide string to amino acid"""
n = len(string)
string = string.upper()
if n % 3 != 0:
sys.exit(f"ERROR: Incorrect number of characters: {n}")
translated = ""
for i in range(0, n, 3):
codon = string[i : i + 3]
if codon not in codon_table:
translated += "-"
else:
translated += codon_table[codon]
return translated
def translate_fasta(fasta_in, fasta_out):
"""Translate an entire fasta aligment"""
with open(fasta_out, "w") as f_out:
for name, sequence in fasta_to_dict(fasta_in).items():
f_out.write(f">{name}\n{translate(sequence)}\n")
def run_trimal(fasta_in, fasta_out, fasta_gapless):
"""Run trimal.
At least 60% of the col is not a gap, simmilariy score < 0.001
"""
command = [
"trimal",
"-in",
fasta_in,
"-out",
fasta_out,
"-automated1",
"-backtrans",
fasta_gapless,
]
run(command)
def remove_gaps(fasta_in, fasta_out):
"""Remove all gaps in the alignment"""
with open(fasta_out, "w") as f_out:
for name, sequence in fasta_to_dict(fasta_in).items():
f_out.write(f">{name}\n{sequence.replace('-', '')}\n")
def run_pipeline(raw_fn, trimmed_fn):
"""
Align CDS with trimal (translate | trim)
"""
translated = tempfile.NamedTemporaryFile()
gapless = tempfile.NamedTemporaryFile()
translate_fasta(fasta_in=raw_fn, fasta_out=translated.name)
remove_gaps(fasta_in=raw_fn, fasta_out=gapless.name)
run_trimal(
fasta_in=translated.name, fasta_out=trimmed_fn, fasta_gapless=gapless.name
)
def parse_arguments():
parser = argparse.ArgumentParser(
description="Run trimal over an entire folder of codon alignments"
)
parser.add_argument(
"-i",
"--input-folder",
help="Input folder. All files must be *.fa",
required=True,
)
parser.add_argument(
"-o",
"--output-folder",
help="Output folder. All files will be *.fa",
required=True,
)
parser.add_argument(
"-t",
"--threads",
help="Number of threads to use",
default=1,
type=int,
required=False,
)
return vars(parser.parse_args())
if __name__ == "__main__":
ARGS = parse_arguments()
process_folders_parallel(
fix_dir_path(ARGS["input_folder"]),
"fa",
fix_dir_path(ARGS["output_folder"]),
"fa",
run_pipeline,
ncpus=ARGS["threads"],
)
```
#### File: src/homologs/split_cds.py
```python
import sys
from helpers import fasta_to_dict, fix_dir_path, process_folders
def subset_file(pep_fn, cds_fn, cds_dict):
"""Write to cds_fn the cds sequences that are in pep_fn"""
with open(cds_fn, "w") as cds_out:
for seqid in fasta_to_dict(pep_fn).keys():
cds_out.write(
">{seqid}\n{sequence}\n".format(seqid=seqid, sequence=cds_dict[seqid])
)
if __name__ == "__main__":
if len(sys.argv) != 6:
sys.stderr.write(
"Error. Usage: python split_cds.py folder_in ext_in folder_out "
"ext_out all_cds.fa"
)
sys.exit(1)
IN_DIR = fix_dir_path(sys.argv[1])
IN_EXT = sys.argv[2]
OUT_DIR = fix_dir_path(sys.argv[3])
OUT_EXT = sys.argv[4]
CDS_FN = sys.argv[5]
CDS_DICT = fasta_to_dict(CDS_FN)
def process(pep_fn, cds_fn):
"""Fix parameter"""
return subset_file(pep_fn, cds_fn, CDS_DICT)
process_folders(IN_DIR, IN_EXT, OUT_DIR, OUT_EXT, process)
```
#### File: pdc2/scripts/bait_homologs.py
```python
import phylo3,newick3,os,sys
import seq
import ntpath
RUN_BLASTP = True # run blastp search along side of swipe for comparison
def get_filename_from_path(path):
head, tail = ntpath.split(path)
return tail or ntpath.basename(head)
def swipe(query_fasta,DIR,num_cores,max_num_hits=20,min_bitscore=20.0):
"""
given a DIR with peptide fasta files that either end with .pep.fa or
.cdhit, swipe on each one
return a fasta file with hits from each taxa plus the queries added
"""
if DIR[-1] != "/": DIR += "/"
max_num_hits = int(max_num_hits)
min_bitscore = float(min_bitscore)
# swipe with each taxon
pepfiles = [i for i in os.listdir(DIR) if (i.endswith(".pep.fa") or i.endswith(".cdhit"))]
datasets = [i.split(".")[0] for i in pepfiles]
print len(pepfiles),"input peptide files read"
assert len(set(datasets)) == len(datasets),\
"dataset name repeats. remove duplicated sets"
for i in os.listdir(DIR):
if i.endswith(".pep.fa") or i.endswith(".cdhit"):
if not os.path.exists(DIR+i+".psd"):
os.system("makeblastdb -in "+DIR+i+" -parse_seqids -dbtype prot -out "+DIR+i)
swipe_outname = DIR+i+"."+get_filename_from_path(query_fasta).split(".")[0]+".swipe"
if not os.path.exists(swipe_outname):
cmd = "swipe -d "+DIR+i+" -i "+query_fasta+" -a "+str(num_cores)
cmd += " -p blastp -o "+swipe_outname+" -m 8 -e 10"
print cmd
os.system(cmd)
assert os.path.exists(swipe_outname), \
"swipe did not finish correctly"
"""
swipe output colums are:
Query id, Subject id, % identity, alignment length, mismatches,
gap openings, q. start, q. end, s. start, s. end, e-value, bit score
"""
# summarize the hit seq ids
if not os.path.exists(swipe_outname+".hits"):
hit_tuples = [] # alist of tuples (hit, bitscore)
with open(swipe_outname,"r") as infile:
for line in infile:
if len(line) < 3: continue # skip empty lines
spls = line.strip().split("\t")
query,hit,bitscore = spls[0],spls[1].replace("lcl|",""),float(spls[-1])
if query != hit and bitscore >= min_bitscore:
hit_tuples.append((hit,bitscore))
out = [] # unique hit ids
for hit, bitscore in sorted(hit_tuples,key=lambda x:x[1],reverse=True):
if hit not in out:
out.append(hit)
if len(out) == max_num_hits:
break
if len(out) == 0: print "Warning: No hits found"
with open(swipe_outname+".hits","w") as outfile:
for hit in out:
print hit
outfile.write(hit+"\n")
# write output fasta
outname = query_fasta.replace(".pep.fa","_swipe.fa")
print "Writing output fasta",outname
outfile = open(outname,"w")
query_seqids = [] # avoid seq id repeats
with open(query_fasta,"r") as infile:
for line in infile:
outfile.write(line) # copy over query seqs
if line[0] == ">":
query_seqids.append(line.strip()[1:])
for i in os.listdir(DIR):
if i.endswith(".pep.fa") or i.endswith(".cdhit"):
seqDICT = {} # key is seq name, value is seq
for s in seq.read_fasta_file(DIR+i):
seqDICT[s.name] = s.seq
with open(DIR+i+"."+get_filename_from_path(query_fasta).split(".")[0]+".swipe.hits","r") as infile:
for line in infile:
line = line.strip()
if len(line) > 0 and line not in query_seqids:
outfile.write(">"+line+"\n"+seqDICT[line]+"\n")
outfile.close()
def blastp(query_fasta,DIR,num_cores,max_num_hits=20,min_bitscore=20.0):
"""
same as swipe but using blastp
"""
if DIR[-1] != "/": DIR += "/"
max_num_hits = int(max_num_hits)
min_bitscore = float(min_bitscore)
# blastp with each taxon
pepfiles = [i for i in os.listdir(DIR) if (i.endswith(".pep.fa") or i.endswith(".cdhit"))]
datasets = [i.split(".")[0] for i in pepfiles]
print len(pepfiles),"input peptide files read"
assert len(set(datasets)) == len(datasets),\
"dataset name repeats. remove duplicated sets"
for i in os.listdir(DIR):
if i.endswith(".pep.fa") or i.endswith(".cdhit"):
if not os.path.exists(DIR+i+".psd"):
os.system("makeblastdb -in "+DIR+i+" -parse_seqids -dbtype prot -out "+DIR+i)
blastp_outname = DIR+i+"."+get_filename_from_path(query_fasta).split(".")[0]+".blastp"
if not os.path.exists(blastp_outname):
cmd = "blastp -db "+DIR+i
cmd += " -query "+query_fasta
cmd += " -num_threads "+str(num_cores)
cmd += " -out "+blastp_outname
cmd += " -evalue 10"
cmd += " -max_target_seqs "+str(max_num_hits)
cmd += " -outfmt '6 qseqid qlen sseqid slen frames pident nident length mismatch gapopen qstart qend sstart send evalue bitscore'"
print cmd
os.system(cmd)
assert os.path.exists(blastp_outname), \
"blastp did not finish correctly"
"""
blastp output colums are:
0-qseqid 2-sseqid 15-bitscore'"
"""
# summarize the hit seq ids
if not os.path.exists(blastp_outname+".hits"):
hit_tuples = [] # alist of tuples (hit, bitscore)
with open(blastp_outname,"r") as infile:
for line in infile:
if len(line) < 3: continue # skip empty lines
spls = line.strip().split("\t")
query,hit,bitscore = spls[0],spls[2],float(spls[-1])
if query != hit and bitscore >= min_bitscore:
hit_tuples.append((hit,bitscore))
out = [] # unique hit ids
for hit, bitscore in sorted(hit_tuples,key=lambda x:x[1],reverse=True):
if hit not in out:
out.append(hit)
if len(out) == max_num_hits:
break
if len(out) == 0: print "Warning: No hits found"
with open(blastp_outname+".hits","w") as outfile:
for hit in out:
print hit
outfile.write(hit+"\n")
# write output fasta
outname = query_fasta.replace(".pep.fa","_blastp.fa")
print "Writing output fasta",outname
outfile = open(outname,"w")
query_seqids = [] # avoid seq id repeats
with open(query_fasta,"r") as infile:
for line in infile:
outfile.write(line) # copy over query seqs
if line[0] == ">":
query_seqids.append(line.strip()[1:])
for i in os.listdir(DIR):
if i.endswith(".pep.fa") or i.endswith(".cdhit"):
seqDICT = {} # key is seq name, value is seq
for s in seq.read_fasta_file(DIR+i):
seqDICT[s.name] = s.seq
with open(DIR+i+"."+get_filename_from_path(query_fasta).split(".")[0]+".blastp.hits","r") as infile:
for line in infile:
line = line.strip()
if len(line) > 0 and line not in query_seqids:
outfile.write(">"+line+"\n"+seqDICT[line]+"\n")
outfile.close()
if __name__ == "__main__":
if len(sys.argv) != 5:
print "python bait_homologs.py query_pep_fa databaseDIR num_to_bait num_cores"
sys.exit(0)
query_fasta,DIR,num_to_bait,num_cores = sys.argv[1:]
swipe(query_fasta,DIR,num_cores,max_num_hits=num_to_bait)
if RUN_BLASTP: blastp(query_fasta,DIR,num_cores,max_num_hits=num_to_bait)
```
#### File: pdc2/scripts/check_alignment_overlap.py
```python
import sys
import seq
import matplotlib.pyplot as plt
import networkx as nx
from networkx.drawing.nx_agraph import write_dot
def check_overlap(seq1,seq2):
match = True
once = False
for i,j in zip(seq1,seq2):
if i == "-" or j == "-":
continue
else:
if i == j:
once = True
continue
else:
match = False
break
# need at least one match.
if once == False:
match = False
return match
def combine_seqs(seq1,seq2):
finalseq = ""
for i,j in zip(seq1,seq2):
if i == "-":
finalseq += j
else:
finalseq += i
return finalseq
if __name__ == "__main__":
if len(sys.argv) != 3:
print "python "+sys.argv[0]+" infile.aln outfile.aln"
sys.exit()
seqs = seq.read_fasta_file(sys.argv[1])
seqs_sample = {}
seqs_d = {}
keep_seqs = {}
for i in seqs:
seqs_d[i.name] = i
keep_seqs[i.name] = i
spls = i.name.split("@")[0]
try:
seqs_sample[spls].append(i)
except:
seqs_sample[spls] = []
seqs_sample[spls].append(i)
for i in seqs_sample:
G = nx.MultiGraph()
for j in range(len(seqs_sample[i])):
for k in range(len(seqs_sample[i])):
if j < k:
ovr = check_overlap(seqs_sample[i][j].seq,seqs_sample[i][k].seq)
if ovr == True:
#print "\t",ovr,j,k,seqs_sample[i][j].name,seqs_sample[i][k].name
G.add_edge(seqs_sample[i][j].name,seqs_sample[i][k].name)
if len(G) > 0:
print i +" "+str(len(seqs_sample[i]))
for j in nx.connected_component_subgraphs(G):
js = list(j)
if len(j) == 2:
#print "\tsimple 2 seq connect"
#print "from"
#print seqs_d[js[0]].seq
#print seqs_d[js[1]].seq
#print "to"
print "\tcombining:",js,js[0]+"______"+js[1]
finalse = combine_seqs(seqs_d[js[0]].seq,seqs_d[js[1]].seq)
ns = seq.Sequence()
ns.name = js[0]+"______"+js[1]
ns.seq = finalse
keep_seqs[ns.name] = ns
print "\t\tremoving",js[0],js[1]
del keep_seqs[js[0]]
del keep_seqs[js[1]]
else:
going = True
while going:
found = False
for k in j.edges():
found = True
ks = list(k)
print "\tcombining:",ks,ks[0]+"______"+ks[1]
finalse = combine_seqs(seqs_d[ks[0]].seq,seqs_d[ks[1]].seq)
ns = seq.Sequence()
ns.name = ks[0]+"______"+ks[1]
ns.seq = finalse
keep_seqs[ns.name] = ns
seqs_d[ns.name] = ns
x = set()
for m in j.neighbors(k[0]):
if m in k:
continue
x.add(m)
for m in j.neighbors(k[1]):
if m in k:
continue
x.add(m)
j.remove_node(k[0])
j.remove_node(k[1])
print "\t\tremoving",k[0],k[1]
del keep_seqs[k[0]]
del keep_seqs[k[1]]
for m in x:
if ns.name == m:
continue
if check_overlap(ns.seq,seqs_d[m].seq) == True:
j.add_edge(ns.name,m)
print "\t\t",ns.name,"->",m
break
if found == False:
break
#for m in
#j.remove_node(
#nx.draw(G)
#plt.show()
#sys.exit()
outf = open(sys.argv[2],"w")
for i in keep_seqs:
outf.write(keep_seqs[i].get_fasta())
outf.close()
```
#### File: pdc2/scripts/corset_wrapper.py
```python
import sys, os
def salmon_index(transcript,num_cores,DIR):
if DIR == ".": DIR = os.getcwd()
if os.path.isabs(DIR) == False: DIR = os.path.abspath(DIR)
if DIR[-1] != "/": DIR += "/"
path_transcript, file_transcript = os.path.split(transcript) #splits the path from the file name
transcript_name = str(file_transcript)
salmon_base_name = transcript_name.split( "." )
index_name = str(salmon_base_name[0])+"_salmon_index"
if os.path.exists(DIR+index_name):
print "Found salmon index folder for",transcript_name
else:
cmd= ["salmon-0.9.1","index","-t",transcript,"-i",DIR+index_name,"--type quasi","-p",str(num_cores)]
print (" ".join(cmd))
os.system(" ".join(cmd))
assert os.path.exists(DIR+index_name), "salmon-index did not finish"
def salmon_quantify_pe(transcript,index,pe_fq1,pe_fq2,num_cores,DIR):
if DIR == ".": DIR = os.getcwd()
if os.path.isabs(DIR) == False: DIR = os.path.abspath(DIR)
if DIR[-1] != "/": DIR += "/"
path_transcript, file_transcript = os.path.split(transcript) #splits the path from the file name
transcript_name = str(file_transcript)
salmon_base_name = transcript_name.split( "." )
index_name = str(salmon_base_name[0])+"_salmon_index"
quant_name = str(salmon_base_name[0])+"_salmon_quant"
if os.path.exists(DIR+quant_name):
print "Found salmon quantification folder for",salmon_base_name[0]
else:
cmd= ["salmon-0.9.1","quant","-i",DIR+index_name,"--dumpEq","--libType A","-p",str(num_cores), \
"-1",pe_fq1, "-2",pe_fq2, "-o", DIR+quant_name]
print (" ".join(cmd))
os.system(" ".join(cmd))
assert os.path.exists(DIR+quant_name), "salmon-quant did not finish"
def salmon_quantify_se(transcript,index,se_fq,num_cores,DIR):
if DIR == ".": DIR = os.getcwd()
if os.path.isabs(DIR) == False: DIR = os.path.abspath(DIR)
if DIR[-1] != "/": DIR += "/"
path_transcript, file_transcript = os.path.split(transcript) #splits the path from the file name
transcript_name = str(file_transcript)
salmon_base_name = transcript_name.split( "." )
index_name = str(salmon_base_name[0])+"_salmon_index"
quant_name = str(salmon_base_name[0])+"_salmon_quant"
if os.path.exists(DIR+quant_name):
print "Found salmon quantification folder for",salmon_base_name[0]
else:
cmd= ["salmon-0.9.1","quant","-i",DIR+index_name,"--dumpEq","--libType A","-p",str(num_cores), \
"-r",se_fq,"-o", DIR+quant_name]
print (" ".join(cmd))
os.system(" ".join(cmd))
assert os.path.exists(DIR+quant_name), "salmon-quant did not finish"
def corset_salmon_eq_classes(transcript,eq_classes,DIR):
if DIR == ".": DIR = os.getcwd()
if os.path.isabs(DIR) == False: DIR = os.path.abspath(DIR)
if DIR[-1] != "/": DIR += "/"
path_transcript, file_transcript = os.path.split(transcript) #splits the path from the file name
transcript_name = str(file_transcript)
salmon_base_name = transcript_name.split( "." )
clusters_name = str(salmon_base_name[0])+"_salmon"+"-clusters.txt"
counts_name = str(salmon_base_name[0])+"_salmon"+"-counts.txt"
if os.path.exists(DIR+clusters_name) and os.path.exists(DIR+counts_name) :
print "Found corset-salmon files for",salmon_base_name[0]
else:
cmd = ["corset","-i salmon_eq_classes",eq_classes,"-m 5","-p",(DIR+str(salmon_base_name[0])+"_salmon")]
print (" ".join(cmd))
os.system(" ".join(cmd))
assert os.path.exists(DIR+clusters_name) and os.path.exists(DIR+counts_name), "Corsert did not finish"
def run_pe_salmon(transcript,pe_fq1,pe_fq2,num_cores,DIR):
if DIR == ".": DIR = os.getcwd()
if os.path.isabs(DIR) == False: DIR = os.path.abspath(DIR)
if DIR[-1] != "/": DIR += "/"
path_transcript, file_transcript = os.path.split(transcript) #splits the path from the file name
transcript_name = str(file_transcript)
salmon_base_name = transcript_name.split( "." )
index_name = str(salmon_base_name[0])+"_salmon_index"
quant_name = str(salmon_base_name[0])+"_salmon_quant"
salmon_index(transcript,num_cores,DIR)
salmon_quantify_pe(transcript,DIR+index_name,pe_fq1,pe_fq2,num_cores,DIR)
corset_salmon_eq_classes(transcript,str(DIR+quant_name)+"/aux_info/eq_classes.txt",DIR)
def run_se_salmon(transcript,se_fq,num_cores,DIR):
if DIR == ".": DIR = os.getcwd()
if os.path.isabs(DIR) == False: DIR = os.path.abspath(DIR)
if DIR[-1] != "/": DIR += "/"
path_transcript, file_transcript = os.path.split(transcript) #splits the path from the file name
transcript_name = str(file_transcript)
salmon_base_name = transcript_name.split( "." )
index_name = str(salmon_base_name[0])+"_salmon_index"
quant_name = str(salmon_base_name[0])+"_salmon_quant"
salmon_index(transcript,num_cores,DIR)
salmon_quantify_se(transcript,DIR+index_name,se_fq,num_cores,DIR)
corset_salmon_eq_classes(transcript,str(DIR+quant_name)+"/aux_info/eq_classes.txt",DIR)
def bowtie_index(transcript,num_cores,DIR):
if DIR == ".": DIR = os.getcwd()
if os.path.isabs(DIR) == False: DIR = os.path.abspath(DIR)
if DIR[-1] != "/": DIR += "/"
if os.path.isabs(transcript) == False: transcript = os.path.abspath(transcript)
path_transcript, file_transcript = os.path.split(transcript) #splits the path from the file name
if path_transcript[-1] != "/": path_transcript += "/"
transcript_name = str(file_transcript)
bowtie_base_name = transcript_name.split( "." )
index_name = DIR+str(bowtie_base_name[0])
index_1_bt = (bowtie_base_name[0])+".1.ebwt"
index_2_bt = (bowtie_base_name[0])+".2.ebwt"
index_3_bt = (bowtie_base_name[0])+".3.ebwt"
index_4_bt = (bowtie_base_name[0])+".4.ebwt"
index_rev_1_bt = (bowtie_base_name[0])+".rev.1.ebwt"
index_rev_2_bt = (bowtie_base_name[0])+".rev.2.ebwt"
if os.path.exists(DIR+index_1_bt) \
and os.path.exists(DIR+index_2_bt) \
and os.path.exists(DIR+index_3_bt) \
and os.path.exists(DIR+index_4_bt) \
and os.path.exists(DIR+index_rev_1_bt) \
and os.path.exists(DIR+index_rev_2_bt):
print "Found bowtie index for",transcript_name
else:
cmd= ["bowtie-build","-q",transcript,index_name,"--threads",str(num_cores)]
print (" ".join(cmd))
os.system(" ".join(cmd))
assert os.path.exists(DIR+index_1_bt) \
and os.path.exists(DIR+index_2_bt) \
and os.path.exists(DIR+index_3_bt) \
and os.path.exists(DIR+index_4_bt) \
and os.path.exists(DIR+index_rev_1_bt) \
and os.path.exists(DIR+index_rev_2_bt), "bowtie-build not completed"
def bowtie_align_pe(transcript,index,pe_fq1,pe_fq2,num_cores,DIR):
if DIR == ".": DIR = os.getcwd()
if os.path.isabs(DIR) == False: DIR = os.path.abspath(DIR)
if DIR[-1] != "/": DIR += "/"
if os.path.isabs(transcript) == False: transcript = os.path.abspath(transcript)
path_transcript, file_transcript = os.path.split(transcript) #splits the path from the file name
if path_transcript[-1] != "/": path_transcript += "/"
transcript_name = str(file_transcript)
bowtie_base_name = transcript_name.split( "." )
sam_file = str((bowtie_base_name[0])+"_bowtie.sam")
bam_file = str((bowtie_base_name[0])+"_bowtie.bam")
if os.path.exists(DIR+bam_file):
print "Found bowtie bam alignment for",bowtie_base_name[0]
elif os.path.exists(DIR+sam_file):
cmd = ["samtools view -S","-b",(DIR+sam_file), ">", (DIR+bam_file)]
print (" ".join(cmd))
os.system(" ".join(cmd))
assert os.path.exists(DIR+bam_file), "Samtools not completed"
if os.path.exists(DIR+bam_file):
os.remove(DIR+sam_file)
else:
cmd= ["bowtie","--all","-k 10","--chunkmbs 2580","--rf --nofw","--threads",str(num_cores), (DIR+str(bowtie_base_name[0])),\
"-1", pe_fq1, "-2", pe_fq2,"-S",(DIR+sam_file)]
print (" ".join(cmd))
os.system(" ".join(cmd))
assert os.path.exists(DIR+sam_file), "bowtie-align not completed"
if os.path.exists(DIR+sam_file):
cmd = ["samtools view -S","-b",(DIR+sam_file), ">", (DIR+bam_file)]
print (" ".join(cmd))
os.system(" ".join(cmd))
assert os.path.exists(DIR+bam_file), "Samtools not completed"
if os.path.exists(DIR+bam_file):
os.remove(DIR+sam_file)
def bowtie_align_se(transcript,index,se_fq,num_cores,DIR):
if DIR == ".": DIR = os.getcwd()
if os.path.isabs(DIR) == False: DIR = os.path.abspath(DIR)
if DIR[-1] != "/": DIR += "/"
if os.path.isabs(transcript) == False: transcript = os.path.abspath(transcript)
path_transcript, file_transcript = os.path.split(transcript) #splits the path from the file name
if path_transcript[-1] != "/": path_transcript += "/"
transcript_name = str(file_transcript)
bowtie_base_name = transcript_name.split( "." )
sam_file = str((bowtie_base_name[0])+"_bowtie.sam")
bam_file = str((bowtie_base_name[0])+"_bowtie.bam")
if os.path.exists(DIR+bam_file):
print "Found bowtie bam alignment for",bowtie_base_name[0]
elif os.path.exists(DIR+sam_file):
cmd = ["samtools view -S","-b",(DIR+sam_file), ">", (DIR+bam_file)]
print (" ".join(cmd))
os.system(" ".join(cmd))
if os.path.exists(DIR+bam_file):
os.remove(DIR+sam_file)
assert os.path.exists(DIR+bam_file), "Samtools not completed"
else:
cmd= ["bowtie","--all","-k 10","--threads",str(num_cores), DIR+str(bowtie_base_name[0]),\
se_fq, "-S", (DIR+sam_file)]
print (" ".join(cmd))
os.system(" ".join(cmd))
assert os.path.exists(DIR+sam_file), "bowtie-align not completed"
if os.path.exists(DIR+sam_file):
cmd = ["samtools view -S","-b",(DIR+sam_file), ">", (DIR+bam_file)]
print (" ".join(cmd))
os.system(" ".join(cmd))
assert os.path.exists(DIR+bam_file), "Samtools not completed"
if os.path.exists(DIR+bam_file):
os.remove(DIR+sam_file)
def corset_bowtie(transcript,bam,DIR):
if DIR == ".": DIR = os.getcwd()
if os.path.isabs(DIR) == False: DIR = os.path.abspath(DIR)
if DIR[-1] != "/": DIR += "/"
path_transcript, file_transcript = os.path.split(transcript) #splits the path from the file name
transcript_name = str(file_transcript)
bowtie_base_name = transcript_name.split( "." )
clusters_name = str(bowtie_base_name[0])+"_bowtie"+"-clusters.txt"
counts_name = str(bowtie_base_name[0])+"_bowtie"+"-counts.txt"
if os.path.exists(DIR+clusters_name) and os.path.exists(DIR+counts_name) :
print "Corset files found for",bowtie_base_name[0]
else:
cmd = ["corset","-i bam",bam,"-m -5","-p",(DIR+str(bowtie_base_name[0])+"_bowtie")]
print (" ".join(cmd))
os.system(" ".join(cmd))
assert os.path.exists(DIR+clusters_name) and os.path.exists(DIR+counts_name), "Corsert did not finish"
def run_pe_bowtie(transcript,pe_fq1,pe_fq2,num_cores,DIR):
if DIR == ".": DIR = os.getcwd()
if os.path.isabs(DIR) == False: DIR = os.path.abspath(DIR)
if DIR[-1] != "/": DIR += "/"
path_transcript, file_transcript = os.path.split(transcript) #splits the path from the file name
transcript_name = str(file_transcript)
bowtie_base_name = transcript_name.split( "." )
index_name = DIR+str(bowtie_base_name[0])
bam_file = str((bowtie_base_name[0])+"_bowtie.bam")
bowtie_index(transcript,num_cores,DIR)
bowtie_align_pe(transcript,index_name,pe_fq1,pe_fq2,num_cores,DIR)
corset_bowtie(transcript,DIR+bam_file,DIR)
def run_se_bowtie(transcript,se_fq,num_cores,DIR):
if DIR == ".": DIR = os.getcwd()
if os.path.isabs(DIR) == False: DIR = os.path.abspath(DIR)
if DIR[-1] != "/": DIR += "/"
path_transcript, file_transcript = os.path.split(transcript) #splits the path from the file name
transcript_name = str(file_transcript)
bowtie_base_name = transcript_name.split( "." )
index_name = DIR+str(bowtie_base_name[0])
bam_file = str((bowtie_base_name[0])+"_bowtie.bam")
bowtie_index(transcript,num_cores,DIR)
bowtie_align_se(transcript,index_name,se_fq,num_cores,DIR)
corset_bowtie(transcript,DIR+bam_file,DIR)
if __name__ == "__main__":
if len(sys.argv) == 6 and sys.argv[5]=="salmon":
run_se_salmon(transcript=sys.argv[1],se_fq=sys.argv[2],num_cores=int(sys.argv[3]),DIR=sys.argv[4])
elif len(sys.argv) == 7 and sys.argv[6]=="salmon":
run_pe_salmon(transcript=sys.argv[1],pe_fq1=sys.argv[2],pe_fq2=sys.argv[3],num_cores=int(sys.argv[4]),DIR=sys.argv[5])
elif len(sys.argv) == 6 and sys.argv[5]=="bowtie":
run_se_bowtie(transcript=sys.argv[1],se_fq=sys.argv[2],num_cores=int(sys.argv[3]),DIR=sys.argv[4])
elif len(sys.argv) == 7 and sys.argv[6]=="bowtie":
run_pe_bowtie(transcript=sys.argv[1],pe_fq1=sys.argv[2],pe_fq2=sys.argv[3],num_cores=int(sys.argv[4]),DIR=sys.argv[5])
else:
print ("Usage:")
print ("For single end reads: python corset_wrapper.py transcript fastq_se_reads num_cores output_dir aligner[salmon or bowtie]")
print ("For paired end reads: python corset_wrapper.py transcript fastq_pe_reads1 fastq_pe_reads2 num_cores output_dir aligner[salmon or bowtie]")
sys.exit(0)
```
#### File: pdc2/scripts/cut_seq_ends.py
```python
import os,sys
from Bio import SeqIO
MIN_SEQ_LEN = 40
#if taxon id pattern changes, change it here
def get_taxonid(seqid):
return seqid.split("@")[0]
def cut_seq_ends(fasta,blast_output,logfile="log"):
print "Reading raw blast output"
cutDICT = {} #key is seqid, value is a list [start,end,length]
with open(blast_output,"r") as infile:
for line in infile:
if len(line) < 3:
continue #skip empty lines
spls = line.strip().split("\t")
query,hit = spls[0],spls[2]
if get_taxonid(query) == get_taxonid(hit):
continue #ignore hits from the same taxa
qlen,qstart,qend = int(spls[1]),int(spls[10]),int(spls[11])
slen,sstart,send = int(spls[3]),int(spls[12]),int(spls[13])
#get the widest range
if query not in cutDICT:
cutDICT[query] = [10000000,1,qlen] #[start,end,qlen]
if hit not in cutDICT:
cutDICT[hit] = [10000000,1,slen] #[start,end,slen]
cutDICT[query][0] = min(cutDICT[query][0],qstart,qend) #compare starts
cutDICT[query][1] = max(cutDICT[query][1],qstart,qend) #compare ends
cutDICT[hit][0] = min(cutDICT[hit][0],sstart,send) #compare starts
cutDICT[hit][1] = max(cutDICT[hit][1],sstart,send) #compare ends
#output seqid, start and end for cutting, and seq length
with open(blast_output+".cutinfo","w") as outfile:
for seqid in cutDICT:
start,end,length = cutDICT[seqid] #[start,end,length]
outfile.write(seqid+"\t"+str(start)+"\t"+str(end)+"\t"+str(length)+"\n")
print "Output written to",sys.argv[1]+".cutinfo"
print "Cutting"
outfile = open(fasta+".cut","w")
outdict = {} # key is taxonid, value is [before,after,half_left]
with open(fasta,"r") as handle:
for record in SeqIO.parse(handle,"fasta"):
seqid,seq = str(record.id),str(record.seq)
taxonid = get_taxonid(seqid)
if taxonid not in outdict:
outdict[taxonid]= [0,0,0]
outdict[taxonid][0] += 1
if seqid in cutDICT:
start,end,length = cutDICT[seqid]
seq_cut = seq[start-1:end]
if len(seq_cut) >= (len(seq)/2):
outdict[taxonid][1] += 1 # at least half survived cutting
#print seqid, start, end, length,end-start+1,MIN_SEQ_LEN
if len(seq_cut) >= MIN_SEQ_LEN:
outfile.write(">"+seqid+"\n"+seq_cut+"\n")
outdict[taxonid][2] += 1
else: pass # remove seqs with no interspecific hits
outfile.close()
summary = "taxonid\tbefore\thalf_left\tafter\tperc_survive\n"
for i in outdict:
out = outdict[i]
summary+= i+"\t"+str(out[0])+"\t"+str(out[1])+"\t"+str(out[2])+"\t"
summary+= str(out[2]/float(out[0])*100)+"%\n"
print summary
with open(logfile,"a") as f: f.write(summary)
return fasta+".cut"
if __name__ =="__main__":
if len(sys.argv) != 3:
print "usage: cut_seq_ends.py fasta blast_output"
sys.exit()
fasta,blast_output = sys.argv[1:]
cut_seq_ends(fasta,blast_output)
```
#### File: pdc2/scripts/detect_chimera_from_blastx_modifed.py
```python
import sys,os
PIDENT_CUTOFF = 30 #only look at HSPs >= this percentage similarity
LENGTH_CUTOFF = 100 #only look at HSPs >= this length
#calculate length of query coverage
def qcov(hsp):
return abs(hsp[11]-hsp[10]) + 1
#given two hsps, return True if
#overlap les than 20% of the shorter and overlap less than 60 bp
def separated(hsp1,hsp2):
length1 = qcov(hsp1)
length2 = qcov(hsp2)
start = min(hsp1[10],hsp1[11],hsp2[10],hsp2[11])
end = max(hsp1[10],hsp1[11],hsp2[10],hsp2[11])
overlap = length1+length2 - (end-start) + 1
#value of overlap can < 0 but only the upper limit maters
if overlap < min(60, 0.2*min(length1,length2)):
return True
else: return False
#expand query range given two hsps of the same query-hit pair
#both hsps are the same direction
def expand_range(hsp1,hsp2):
if hsp1 == []: return hsp2
if hsp2 == []: return hsp1
start1,end1,start2,end2 = hsp1[10],hsp1[11],hsp2[10],hsp2[11]
if start1 < end1 and start2 < end2:#both forward
start,end = min(start1,start2),max(end1,end2)
elif start1 > end1 and start2 > end2:#both reverse
start,end = max(start1,start2),min(end1,end2)
#no change if new hsp is of opposite direction
hsp1[10],hsp1[11] = start,end
return hsp1
#detect chimera from a block of hits
#block can be a query-hit block (only check for self-chimera)
#or block can be a query block (check both self and multi-gene chimera)
#return True if no chimera is detected, False if chimera is detected
#also write to out file the combined best HSP
def check_block(block,multigene):
#only one hsp -> not chimera
if len(block) == 1: return True
#summarize all pos and neg HSPs
pos,neg = [],[]
for hsp in block:
if hsp[4][0] == "-":
neg = expand_range(neg,hsp)
else:
pos = expand_range(pos,hsp)
#compare pos_hit and neg_hit
if (pos == [] and neg != []) or (neg == [] and pos != []):
return True #only has hits of one direction
elif separated(pos,neg):#has both direction and separate -> chimera!
#write range to cut
if multigene: #output both hsps
start1,end1 = min(pos[10],pos[11]),max(pos[10],pos[11])
start2,end2 = min(neg[10],neg[11]),max(neg[10],neg[11])
outfile1.write(pos[0]+" "+str(int(start1))+" "+str(int(end1))+" trans-multi\n")
outfile1.write(neg[0]+" "+str(int(start2))+" "+str(int(end2))+" trans-multi\n")
else:
if qcov(pos) > qcov(neg):
outhsp = pos #outhsp is the better covered of the two
else: outhsp = neg
start,end = min(outhsp[10],outhsp[11]),max(outhsp[10],outhsp[11]) #range to cut
outfile1.write(outhsp[0]+" "+str(int(start))+" "+str(int(end))+" trans-self\n")
#write the blastx block to a .info file for visual checking
for i in pos:
outfile2.write(str(i)+"\t")
outfile2.write("\n")
for i in neg:
outfile2.write(str(i)+"\t")
outfile2.write("\n")
return False
else:
return True #has both direction but not separate
if __name__ == "__main__":
if len(sys.argv) != 3:
print "usage: python detect_chimera_from_blastx.py blastx_output output_dir"
sys.exit()
blastx_output = sys.argv[1]
DIR = sys.argv[2]
if DIR == ".": DIR = os.getcwd()
if os.path.isabs(DIR) == False: DIR = os.path.abspath(DIR)
if DIR[-1] != "/": DIR += "/"
path_blastx, file_blastx = os.path.split(blastx_output) #splits the path from the file name
blastx_name = str(file_blastx)
blastx_base_name = blastx_name.split( "." )
infile = open(blastx_output,"rU")
outfile1 = open(DIR+blastx_base_name[0]+".cut","w")
outfile2 = open(DIR+blastx_base_name[0]+".info","w")
last_query = ""
for line in infile:
if len(line) < 3: continue #ignore empty lines
hsp = line.strip().split("\t")
for i in [5,10,11]: hsp[i] = float(hsp[i])
if hsp[5] < PIDENT_CUTOFF or qcov(hsp) < LENGTH_CUTOFF:
continue #ignore low similarity or short HSPs
query,hit = hsp[0],hsp[2]
if last_query == "": #at the beginning of a file
hit_block = [hsp] #store all HSPs of the same query and same hit
query_block = [hsp] #store all HSPs from the same query
good_seq = True #False if chimera is detected
elif query == last_query: #continue with the same query
query_block.append(hsp)
if good_seq: #only check if no chimera has been detected
if hit == last_hit:
hit_block.append(hsp)
else: #send off the hit_block
good_seq = check_block(hit_block,False)
hit_block = [hsp]
else: #starting a new query
if good_seq: #haven't found self chimera
good_seq = check_block(hit_block,False) #check the last hit block
if good_seq: #haven't found self chimera
good_seq = check_block(query_block,True) #look for multi-chimera
query_block,hit_block = [hsp],[hsp]
good_seq = True
#keep track of the last line processed
last_query,last_hit = query,hit
if good_seq: #haven't found self chimera
good_seq = check_block(hit_block,False) #check the last hit block
if good_seq: #haven't found self chimera
good_seq = check_block(query_block,True) #check the last query block
infile.close()
outfile1.close()
outfile2.close()
```
#### File: pdc2/scripts/fasta_to_tree.py
```python
import os,sys
from mafft_wrapper import mafft
from pasta_wrapper import pasta
from phyutility_wrapper import phyutility
from fasttree_wrapper import fasttree
from raxml_wrapper import raxml
from raxml_bs_wrapper import raxml_bs
from seq import read_fasta_file
NUM_SEQ_CUTOFF = 1000 # Use different alignment and tree inference tools
# below vs. above this cutoff
def get_fasta_size(fasta):
"""
given a fasta file
output the number of seqs and the length of the longest seq
"""
longest = 0
seqlist = read_fasta_file(fasta)
for s in seqlist:
longest = max(longest,len(s.seq.replace("-","")))
return len(seqlist),longest
def fasta_to_tree(DIR,fasta,num_cores,seqtype,num_seq_cutoff=NUM_SEQ_CUTOFF):
"""
given a fasta file
align, trim alignment and build a tree
choose appropriate tools depending on size of the fasta file
"""
if DIR[-1] != "/": DIR += "/"
seqcount, maxlen = get_fasta_size(DIR+fasta)
assert seqcount >= 4, "Less than four sequences in "+DIR+fasta
print fasta,seqcount,"sequences"
if seqcount >= NUM_SEQ_CUTOFF: # large cluster
alignment = pasta(DIR,fasta,num_cores,seqtype)
cleaned = phyutility(DIR,alignment,0.01,seqtype)
if len(read_fasta_file(DIR+cleaned)) >= 4:
tree = fasttree(DIR,cleaned,seqtype)
else: print "Less than 4 taxa in",cleaned
else: # small cluster
alignment = mafft(DIR,fasta,num_cores,seqtype)
cleaned = phyutility(DIR,alignment,0.1,seqtype)
if len(read_fasta_file(DIR+cleaned)) >= 4:
tree = raxml(DIR,cleaned,num_cores,seqtype)
else: print "Less than 4 taxa in",cleaned
def fasta_to_bs_tree(DIR,fasta,num_cores,seqtype):
"""
given a fasta file for the final homolog
align, trim alignment and build a tree with bootstrap support
"""
if DIR[-1] != "/": DIR += "/"
seqcount, maxlen = get_fasta_size(DIR+fasta)
assert seqcount >= 4, "Less than four sequences in "+DIR+fasta
print fasta,seqcount,"sequences"
alignment = mafft(DIR,fasta,num_cores,seqtype)
cleaned = phyutility(DIR,alignment,0.2,seqtype)
if len(read_fasta_file(DIR+cleaned)) >= 4:
tree = raxml_bs(DIR,cleaned,num_cores,seqtype)
else: print "Less than 4 taxa in",cleaned
def main(DIR,num_cores,seqtype,bs,test=False):
"""if test, only process clusterID that ends with 0"""
assert seqtype == "aa" or seqtype == "dna",\
"Input data type: dna or aa"
assert bs == "y" or bs=="n",\
"bootstrap? (y/n)"
if DIR[-1] != "/": DIR += "/"
#check for really long sequences or large alignments.
#These crashes the alignment program
for i in os.listdir(DIR):
if i.endswith(".fa"):
seqcount,maxlen = get_fasta_size(DIR+i)
if (maxlen>=10000 and seqtype=="aa") or (maxlen>=30000 and seqtype=="dna"):
print i,"has",seqcount,"sequences"
print "longest sequence has",maxlen,"characters"
print "Warning: sequence too long. May crash alignment process"
#sys.exit()
filecount = 0
for i in os.listdir(DIR):
if not i.endswith(".fa"): continue
if test and (i.split(".")[0])[-1] != "0": continue
filecount += 1
if bs == "n":
fasta_to_tree(DIR=DIR,fasta=i,num_cores=num_cores,seqtype=seqtype)
else: fasta_to_bs_tree(DIR=DIR,fasta=i,num_cores=num_cores,seqtype=seqtype)
assert filecount > 0, "No file end with .fa found in "+DIR
if __name__ == "__main__":
if len(sys.argv) != 5:
print "python fasta_to_tree.py DIR number_cores dna/aa bootstrap(y/n)"
sys.exit(0)
main(sys.argv[1],sys.argv[2],sys.argv[3],sys.argv[4])
```
#### File: pdc2/scripts/filter_1to1_orthologs.py
```python
import newick3,phylo3,os,sys
from tree_utils import *
def get_121(indir,tree_file_ending,min_taxa,outdir,min_bootstrap=0.0):
if indir[-1] != "/": indir += "/"
if outdir[-1] != "/": outdir += "/"
min_taxa = int(min_taxa)
min_bootstrap = float(min_bootstrap)
infile_count, outfile_count = 0,0
print "Filter one-to-one homologs with average bootstrap of at least",\
min_bootstrap
for i in os.listdir(indir):
if not i.endswith(tree_file_ending): continue
infile_count += 1
with open(indir+i,"r") as infile: #only 1 tree in each file
intree = newick3.parse(infile.readline())
names = get_front_names(intree)
num_tips,num_taxa = len(names),len(set(names))
print "number of tips:",num_tips,"number of taxa:",num_taxa
if num_tips == num_taxa and num_taxa >= min_taxa:
if min_bootstrap > 0.0 and not pass_boot_filter(intree,min_bootstrap):
continue
print i,"written to out dir"
outname = i.split(".")[0]+".1to1ortho.tre"
os.system("cp "+indir+i+" "+outdir+outname)
outfile_count += 1
assert infile_count > 0,\
"No file ends with "+tree_file_ending+" was found in "+indir
print infile_count,"files read,",outfile_count,"written to",outdir
if __name__ == "__main__":
if len(sys.argv) != 5:
print "python filter_1to1_orthologs.py homoTreeDIR tree_file_ending minimal_taxa outDIR"
sys.exit(0)
indir,tree_file_ending,min_taxa,outdir = sys.argv[1:]
get_121(indir,tree_file_ending,min_taxa,outdir)
```
#### File: pdc2/scripts/filter_corset_output.py
```python
import sys, os
import pandas as pd
from Bio import SeqIO
def filter_corset(transcripts,corset_cluster,DIR):
if DIR == ".": DIR = os.getcwd()
if os.path.isabs(DIR) == False: DIR = os.path.abspath(DIR)
if DIR[-1] != "/": DIR += "/"
path_transcript, files_transcript = os.path.split(transcripts)
transcripts_name = str(files_transcript)
base_name_transcripts = transcripts_name.split( "." )
largest_cluster_transcripts = base_name_transcripts[0]+".largest_cluster_transcripts.fa"
redundant_transcripts = base_name_transcripts[0]+".redundant_cluster_transcripts.fa"
if os.path.exists(DIR+largest_cluster_transcripts) and os.path.exists(DIR+redundant_transcripts):
print "Largest and reduntant transcript files found for", base_name_transcripts[0]
else:
#reads corset cluster files as dataframe(df) and add headers
clusters_df = pd.read_table(corset_cluster, header=None)
cluster_columns = "seqid cluster".strip().split(' ')
clusters_df.columns = cluster_columns
#takes the transcripts file and create a df with sequence name and length
seqid = []
length = []
for rec in SeqIO.parse(transcripts, 'fasta'):
name = rec.id
seq = rec.seq
seqLen = len(rec)
seqid.append(name)
length.append(seqLen)
d = {"seqid":seqid,"length":length}
seq_len_df = pd.DataFrame(d)
#filters previous df to contain only info from the surviving clusters of corset
seq_len_filtered_df= seq_len_df[seq_len_df['seqid'].isin(clusters_df['seqid'])]
#merge previous df to the corset cluster df so it has cluster sequence name, cluster name and sequence length
clusters_with_len_df=pd.merge(clusters_df, seq_len_filtered_df, on="seqid", how="left")
#sort previous df by length and drop duplicate clusters(redundant) keeping only the one with the largest sequence
largest_cluster_df = clusters_with_len_df.sort_values('length', ascending=False).drop_duplicates('cluster').sort_index()
#returns the df with only the redundant sequences info
removed_cluster_df = clusters_with_len_df[~clusters_with_len_df['seqid'].isin(largest_cluster_df['seqid'])]
#index transcript sequences
transcripts = SeqIO.index(transcripts, "fasta")
#writes fasta file and cluster info for the largest sequences from corset clusters
largest_cluster=[]
largest_cluster_name = largest_cluster_df["seqid"]
for i in largest_cluster_name: largest_cluster.append(transcripts[i])
count = SeqIO.write(largest_cluster,(DIR+largest_cluster_transcripts), "fasta")
print("Kept %i largest transcripts from corset clusters" % count)
largest_cluster_df.to_csv((DIR+base_name_transcripts[0]+".largest_cluster.csv"),index=False)
#writes fasta file and cluster info for redundant sequences from corset clusters
removed_cluster=[]
removed_cluster_name = removed_cluster_df["seqid"]
for i in removed_cluster_name: removed_cluster.append(transcripts[i])
count = SeqIO.write(removed_cluster,(DIR+redundant_transcripts), "fasta")
print("Removed %i redundant transcripts" % count)
removed_cluster_df.to_csv((DIR+base_name_transcripts[0]+".redundant_cluster.csv"),index=False)
assert os.path.exists(DIR+largest_cluster_transcripts) and os.path.exists(DIR+redundant_transcripts), "filter_corset_output did not finish"
if __name__ == "__main__":
if len(sys.argv) == 4:
filter_corset(transcripts=sys.argv[1],corset_cluster=sys.argv[2],DIR=sys.argv[3])
else:
print ("Usage:")
print ("python filter_corset_output.py transcripts_fasta corset_cluster_file output_dir")
sys.exit(0)
```
#### File: pdc2/scripts/mask_tips_by_taxonID_genomes.py
```python
import newick3,phylo3,os,sys
#if taxon id pattern changes, change it here
def get_name(name):
return name.split("@")[0]
#smooth the kink created by prunning
#to prevent creating orphaned tips after prunning twice at the same node
def remove_kink(node,curroot):
if node == curroot and curroot.nchildren == 2:
#move the root away to an adjacent none-tip internal node
if curroot.children[0].istip: #the other child is not tip
curroot = phylo3.reroot(curroot,curroot.children[1])
else: #tree has >=4 leaves so the other node cannot be tip
curroot = phylo3.reroot(curroot,curroot.children[0])
#---node---< all nodes should have one child only now
length = node.length + (node.children[0]).length
par = node.parent
kink = node
node = node.children[0]
#parent--kink---node<
par.remove_child(kink)
par.add_child(node)
node.length = length
return node,curroot
def monophyly_masking_by_bl(curroot):
going = True
while going and len(curroot.leaves()) >= 4:
going = False
for node in curroot.iternodes(): #walk through nodes
if not node.istip: continue #only look at tips
for sister in node.get_sisters():
if sister.istip and get_name(node.label)==get_name(sister.label):
if node.length > sister.length:
node = node.prune()
else: node = sister.prune()
if len(curroot.leaves()) >= 4:
if node.nchildren==1 or (node==curroot and node.nchildren==2):
node,curroot = remove_kink(node,curroot)
#no kink if the original node had more than 2 children
going = True
break
return curroot
if __name__ == "__main__":
if len(sys.argv) != 2:
print "python mask_tips_by_taxonID_genomes.py DIR"
sys.exit()
DIR = sys.argv[1]+"/"
filecount = 0
for i in os.listdir(DIR):
if i[-3:] == ".tt": #only mask trees that have tips trimmed
with open(DIR+i,"r") as infile:
intree = newick3.parse(infile.readline())
print i
filecount += 1
with open(DIR+i+".mm","w") as outfile:
outfile.write(newick3.tostring(monophyly_masking_by_bl(intree))+";\n")
if filecount == 0:
print "No file name with 'best' or 'tt' or 'fasttree' found in the treDIR"
```
#### File: pdc2/scripts/mask_tips_by_taxonID_transcripts.py
```python
import newick3,phylo3,os,sys
from tree_utils import get_name,remove_kink
from seq import read_fasta_file
INTREE_FILE_ENDING = ".tt"
def get_clusterID(filename):
"""given a file name return the cluster id"""
return filename.split(".")[0]
def mask_monophyletic_tips(curroot,unamb_chrDICT):
going = True
while going and len(curroot.leaves()) >= 4:
going = False
for node in curroot.iternodes(): #walk through nodes
if not node.istip: continue #only look at tips
for sister in node.get_sisters():
if sister.istip and get_name(node.label)==get_name(sister.label): #masking
#print node.label,unamb_chrDICT[node.label],sister.label,unamb_chrDICT[sister.label]
if unamb_chrDICT[node.label] > unamb_chrDICT[sister.label]:
node = sister.prune()
else: node = node.prune()
if len(curroot.leaves()) >= 4:
if (node==curroot and node.nchildren==2) or (node!=curroot and node.nchildren==1):
node,curroot = remove_kink(node,curroot)
going = True
break
return curroot
def mask_paraphyletic_tips(curroot,unamb_chrDICT):
going = True
while going and len(curroot.leaves()) >= 4:
going = False
for node in curroot.iternodes(): #walk through nodes
if not node.istip: continue #only look at tips
parent = node.parent
if node == curroot or parent == curroot:
continue #no paraphyletic tips for the root
for para in parent.get_sisters():
if para.istip and get_name(node.label)==get_name(para.label):
if unamb_chrDICT[node.label] > unamb_chrDICT[para.label]:
node = para.prune()
else: node = node.prune()
if len(curroot.leaves()) >= 4:
if (node==curroot and node.nchildren==2) or (node!=curroot and node.nchildren==1):
node,curroot = remove_kink(node,curroot)
going = True
break
return curroot
def main(treDIR,clnDIR,para,intree_file_ending=INTREE_FILE_ENDING):
if treDIR[-1] != "/": treDIR += "/"
if clnDIR[-1] != "/": clnDIR += "/"
assert para=="y" or para=="n", "mask paraphyletic tips? (y/n)"
mask_para = True if para == "y" else False
filecount = 0
filematch = {} #key is clusterID, value is the .aln-cln file
for i in os.listdir(clnDIR):
if i.endswith(".aln-cln"):
clusterID = get_clusterID(i)
assert clusterID not in filematch, \
"The clusterID "+clusterID+" repeats in "+clnDIR
filematch[clusterID] = i
for i in os.listdir(treDIR):
if i.endswith(intree_file_ending):
with open(treDIR+i,"r") as infile:
intree = newick3.parse(infile.readline())
print i
clusterID = get_clusterID(i)
filecount += 1
chrDICT = {} #key is seqid, value is number of unambiguous chrs
for s in read_fasta_file(clnDIR+filematch[clusterID]):
for ch in ['-','X',"x","?","*"]:
s.seq = s.seq.replace(ch,"") #ignore gaps, xs and Xs
chrDICT[s.name] = len(s.seq)
curroot = mask_monophyletic_tips(intree,chrDICT)
if mask_para: curroot = mask_paraphyletic_tips(curroot,chrDICT)
with open(treDIR+i+".mm","w") as outfile:
outfile.write(newick3.tostring(curroot)+";\n")
assert filecount > 0, \
"No file ends with "+intree_file_ending+" found in "+treDIR
if __name__ == "__main__":
if len(sys.argv) != 4:
print "python mask_tips_by_taxonID_transcripts.py treDIR aln-clnDIR mask_paraphyletic(y/n)"
sys.exit(0)
treDIR,clnDIR,para = sys.argv[1:]
main(treDIR,clnDIR,para)
```
#### File: pdc2/scripts/phyutility_wrapper.py
```python
import sys,os
from seq import read_fasta_file
def phyutility(DIR,alignment,min_col_occup,seqtype,min_chr=10):
"""
remove columns with occupancy lower than MIN_COLUMN_OCCUPANCY
remove seqs shorter than MIN_CHR after filter columns
"""
if DIR[-1] != "/": DIR += "/"
cleaned = alignment+"-cln"
if os.path.exists(DIR+cleaned): return cleaned
assert alignment.endswith(".aln"),\
"phyutility infile "+alignment+" not ends with .aln"
assert os.stat(DIR+alignment).st_size > 0, DIR+alignment+"empty"
assert seqtype == "aa" or seqtype == "dna","Input data type: dna or aa"
if seqtype == "aa":
cmd = ["phyutility","-aa","-clean",str(min_col_occup),"-in",\
DIR+alignment,"-out",DIR+alignment+"-pht"]
else:
cmd = ["phyutility","-clean",str(min_col_occup),"-in",\
DIR+alignment,"-out",DIR+alignment+"-pht"]
print " ".join(cmd)
os.system(" ".join(cmd))
assert os.path.exists(DIR+alignment+"-pht"),"Error phyutility"
#remove empty and very short seqs
outfile = open(DIR+cleaned,"w")
for s in read_fasta_file(DIR+alignment+"-pht"):
if len(s.seq.replace("-","")) >= min_chr:
outfile.write(s.get_fasta())
outfile.close()
os.remove(DIR+alignment+"-pht")
return cleaned
def main(DIR,min_col_occup,seqtype):
if DIR[-1] != "/": DIR += "/"
filecount = 0
for i in os.listdir(DIR):
if i.endswith(".aln"):
filecount += 1
phyutility(DIR,i,min_col_occup,seqtype)
assert filecount > 0, "No file end with .aln found in "+DIR
if __name__ == "__main__":
if len(sys.argv) != 4:
print "python phyutility_wrapper.py DIR min_column_occupancy dna/aa"
sys.exit(0)
DIR,min_col_occup,seqtype = sys.argv[1:]
main(DIR,min_col_occup,seqtype)
```
#### File: pdc2/scripts/prune_paralogs_MI.py
```python
import newick3,phylo3,os,sys
import trim_tips
import tree_utils
OUTPUT_1to1_ORTHOLOGS = True
def get_clusterID(filename):
return filename.split(".")[0]
def get_front_score(node):
front_labels = tree_utils.get_front_labels(node)
num_labels = len(front_labels)
num_taxa = len(set([tree_utils.get_name(i) for i in front_labels]))
if num_taxa == num_labels:
return num_taxa
return -1
def get_back_score(node,root):
back_labels = tree_utils.get_back_labels(node,root)
num_labels = len(back_labels)
num_taxa = len(set([tree_utils.get_name(i) for i in back_labels]))
if num_taxa == num_labels:
return num_taxa
return -1
def prune(score_tuple,node,root,pp_trees):
if score_tuple[0] > score_tuple[1]: #prune front
print "prune front"
pp_trees.append(node)
par = node.prune()
if par != None and len(root.leaves()) >= 3:
par,root = tree_utils.remove_kink(par,root)
return root,node == root
else:
if node != root: #prune back
par = node.parent #par--node<
par.remove_child(node)
if par.parent != None:
par,root = tree_utils.remove_kink(par,root)
node.prune()
print "prune back"
pp_trees.append(root)
if len(node.leaves()) >= 3:
node,newroot = tree_utils.remove_kink(node,node)
else:
newroot = node
return newroot,False #original root was cutoff, not done yet
if __name__ == "__main__":
if len(sys.argv) != 7:
print "python prune_paralogs_MI.py homoTreeDIR tree_file_ending relative_tip_cutoff absolute_tip_cutoff MIN_TAXA outDIR"
print "LONG_TIP_CUTOFF is typically same value of the previous LONG_TIP_CUTOFF"
sys.exit(0)
inDIR = sys.argv[1]+"/"
tree_file_ending = sys.argv[2]
relative_tip_cutoff,absolute_tip_cutoff = float(sys.argv[3]),float(sys.argv[4])
MIN_TAXA = int(sys.argv[5])
outDIR = sys.argv[6]+"/"
for i in os.listdir(inDIR):
if not i.endswith(tree_file_ending): continue
print i
with open(inDIR+i,"r") as infile: #only 1 tree in each file
intree = newick3.parse(infile.readline())
curroot = intree
pp_trees = []
if get_front_score(curroot) >= MIN_TAXA: #No need to prune
print "No pruning needed"
if OUTPUT_1to1_ORTHOLOGS:
os.system("cp "+inDIR+i+" "+outDIR+get_clusterID(i)+"_1to1ortho.tre")
else: #scoring the tree
going = True
pp_trees = []
while going: #python version of do..while loop
highest = 0
highest_node = None
score_hashes = {} #key is node, value is a tuple (front_score,back_score)
for node in curroot.iternodes():
front_score = get_front_score(node)
back_score = get_back_score(node,curroot)
score_hashes[node] = (front_score,back_score)
if front_score > highest or back_score > highest:
highest_node = node
highest = max(front_score,back_score)
if highest >= MIN_TAXA: #prune
curroot,done = prune(score_hashes[highest_node],highest_node,curroot,pp_trees)
if done or len(curroot.leaves()) < MIN_TAXA:
going = False
break
else:
going = False
break
if len(pp_trees) > 0:
count = 1
for tree in pp_trees:
if tree.nchildren == 2:
node,tree = trim_tips.remove_kink(tree,tree)
tree = trim_tips.trim(tree,relative_tip_cutoff,absolute_tip_cutoff)
if tree != None and len(tree.leaves()) >= MIN_TAXA:
with open(outDIR+get_clusterID(i)+"_MIortho"+str(count)+".tre","w") as outfile:
outfile.write(newick3.tostring(tree)+";\n")
count += 1
```
#### File: pdc2/scripts/rcorrector_wrapper.py
```python
import sys,os
APPS_HOME = "/usr/local/bin/" # where rcorrector is located
RCORRECTOR_CMD = 'perl '+APPS_HOME+"/Rcorrector/run_rcorrector.pl" # Basic command to call rcorrector
def rcorrector_se(se_fq,num_cores,DIR):
if DIR == ".": DIR = os.getcwd()
if os.path.isabs(DIR) == False: DIR = os.path.abspath(DIR)
if DIR[-1] != "/": DIR += "/"
path_se, file_se = (os.path.split(se_fq)) #splits the path from the file name
se_name = str(file_se)
base_name_se = se_name.split( "." )
if (os.path.splitext(file_se)[-1]) == ".gz" :
corrected = (base_name_se[0])+".cor.fq.gz"
else:
corrected = (base_name_se[0])+".cor.fq"
if os.path.exists(DIR+corrected):
print ("Found", corrected)
else:
cmd = [RCORRECTOR_CMD,"-s",se_fq,"-t",str(num_cores),"-od",DIR]
print (" ".join(cmd))
os.system(" ".join(cmd))
assert os.path.exists(DIR+corrected),"Rcorrector did not finished"
def rcorrector_pe(pe_fq1,pe_fq2,num_cores,DIR):
if DIR == ".": DIR = os.getcwd()
if os.path.isabs(DIR) == False: DIR = os.path.abspath(DIR)
if DIR[-1] != "/": DIR += "/"
path_pe_1, file_pe_1 = (os.path.split(pe_fq1)) #splits the path from the file name
path_pe_2, file_pe_2 = (os.path.split(pe_fq2))
path_pe_1, file_pe_1 = os.path.split(pe_fq1)
pe_fq1_name = str(file_pe_1)
base_name_1 = pe_fq1_name.split( "." )
path_pe_2, file_pe_2 = os.path.split(pe_fq2)
pe_fq2_name = str(file_pe_2)
base_name_2 = pe_fq2_name.split( "." )
if (os.path.splitext(file_pe_1)[-1]) and (os.path.splitext(file_pe_2)[-1]) == ".gz" :
corrected_1 = (base_name_1[0])+".cor.fq.gz"
corrected_2 = (base_name_2[0])+".cor.fq.gz"
else:
corrected_1 = (base_name_1[0])+".cor.fq" #input paired fq1 with extension ".cor.fq"
corrected_2 = (base_name_2[0])+".cor.fq" #input paired fq2 with extension ".cor.fq"
if os.path.exists(DIR+corrected_1) and os.path.exists(DIR+corrected_2):
print ("Found", corrected_1, corrected_2)
else:
cmd = [RCORRECTOR_CMD,"-1",pe_fq1,"-2",pe_fq2,"-t",str(num_cores),"-od",DIR]
print (" ".join(cmd))
os.system(" ".join(cmd))
assert os.path.exists(DIR+corrected_1) \
and os.path.exists(DIR+corrected_2),"Rcorrector did not finished"
if __name__ == "__main__":
if len(sys.argv) == 4:
rcorrector_se(se_fq=sys.argv[1],num_cores=int(sys.argv[2]),DIR=sys.argv[3])
elif len(sys.argv) == 5:
rcorrector_pe(pe_fq1=sys.argv[1],pe_fq2=sys.argv[2],num_cores=int(sys.argv[3]),DIR=sys.argv[4])
else:
print ("Usage:")
print ("For single end reads: python rcorrector.py fastq_se_reads num_cores output_dir")
print ("For paired end reads: python rcorrector.py fastq_pe_reads1 num_cores fastq_pe_reads2 output_dir")
sys.exit(0)
```
#### File: pdc2/scripts/shorten_trinity_names.py
```python
import sys, os, re
def short_trinity_names(transcripts,DIR):
if DIR == ".": DIR = os.getcwd()
if os.path.isabs(DIR) == False: DIR = os.path.abspath(DIR)
if DIR[-1] != "/": DIR += "/"
path_transcript, files_transcript = os.path.split(transcripts)
transcripts_name = str(files_transcript)
base_name_transcripts = transcripts_name.split( "." )
transcripts_short_name_file = base_name_transcripts[0]+".Trinity.short_name.fa"
if DIR == ".": DIR = os.getcwd()
if os.path.isabs(DIR) == False: DIR = os.path.abspath(DIR)
if DIR[-1] != "/": DIR += "/"
#trim trinity length and path information from transcript fasta names of good contigs
searchstr = r'(>\w+)(\slen.*)'
replacestr = r'\1'
outfile = open((DIR+transcripts_short_name_file), 'w')
with open((DIR+transcripts), 'rU') as fasta_file:
reg = re.compile(searchstr)
for line in fasta_file:
line=line.strip('\n')
if line.startswith('>'):
fixline = reg.sub(replacestr, line)
outfile.write(fixline + '\n')
else:
outfile.write(line + '\n')
outfile.close()
if __name__ == "__main__":
if len(sys.argv) == 3:
short_trinity_names(transcripts=sys.argv[1],DIR=sys.argv[2])
else:
print ("Usage:")
print ("python shorten_trinity_names.py transcripts_fasta output_dir")
sys.exit(0)
```
#### File: pdc2/scripts/tree_utils.py
```python
import phylo3,newick3
import os,sys
def get_name(label):
"""Get taxonID from a tip label"""
return label.split("@")[0]
#return (label.split("@")[0]).split("_")[0]
def get_clusterID(filename):
return filename.split(".")[0]
def get_front_labels(node):
"""given a node, return a list of front tip labels"""
leaves = node.leaves()
return [i.label for i in leaves]
def get_back_labels(node,root):
"""given a node, return a list of back tip labels"""
all_labels = get_front_labels(root)
front_labels = get_front_labels(node)
return set(all_labels) - set(front_labels)
def get_front_names(node):
"""given a node, return a list of front tip taxonIDs
list may contain identical taxonIDs"""
labels = get_front_labels(node)
return [get_name(i) for i in labels]
def get_back_names(node,root):
"""given a node, return a list of back tip taxonIDs
list may contain identical taxonIDs"""
back_labels = get_back_labels(node,root)
return [get_name(i) for i in back_labels]
def remove_kink(node,curroot):
"""
smooth the kink created by prunning
to prevent creating orphaned tips
after prunning twice at the same node
"""
if node == curroot and curroot.nchildren == 2:
#move the root away to an adjacent none-tip
if curroot.children[0].istip: #the other child is not tip
curroot = phylo3.reroot(curroot,curroot.children[1])
else: curroot = phylo3.reroot(curroot,curroot.children[0])
#---node---< all nodes should have one child only now
length = node.length + (node.children[0]).length
par = node.parent
kink = node
node = node.children[0]
#parent--kink---node<
par.remove_child(kink)
par.add_child(node)
node.length = length
return node,curroot
def pass_boot_filter(node,min_ave_boot):
"""check whether the average bootstrap value pass a cutoff"""
total = 0.0
count = 0
for i in node.iternodes():
if not i.istip and i.parent != None:
total += float(i.label)
count += 1
if count == 0: #extracted clades with only two tips
return True
boot_average = total / float(count)
print boot_average
return boot_average >= float(min_ave_boot)
def get_ortho_from_rooted_inclade(inclade):
"""
input a rooted tree
cut appart bifucating nodes when duplicated taxonIDs are detected
"""
assert inclade.nchildren == 2, "input clade not properly rooted"
orthologs = [] #store ortho clades
clades = [inclade]
while True:
newclades = [] #keep track of subclades generated in this round
for clade in clades:
num_taxa = len(set(get_front_names(clade)))
num_tips = len((get_front_labels(clade)))
if num_taxa == num_tips: #no taxon repeats
orthologs.append(clade)
else: #has duplicated taxa
for node in clade.iternodes(order=0): #PREORDER, root to tip
if node.istip: continue
#traverse the tree from root to tip
child0,child1 = node.children[0],node.children[1]
name_set0 = set(get_front_names(child0))
name_set1 = set(get_front_names(child1))
if len(name_set0.intersection(name_set1)) > 0:
if node == clade:
newclades += [child0,child1] #break by bifid at the base
elif len(name_set0) > len(name_set1): #cut the side with less taxa
node.remove_child(child1)
child1.prune()
node,clade = remove_kink(node,clade) #no rerooting here
newclades += [clade,child1]
else:
node.remove_child(child0)
child0.prune()
node,clade = remove_kink(node,clade) #no rerooting here
newclades += [clade,child0]
break
if newclades == []: break
clades = newclades
return orthologs
def extract_rooted_ingroup_clades(root,ingroups,outgroups,min_ingroup_taxa):
"""
input a tree with ingroups and at least 1 outgroups
output a list of rooted ingroup clades
"""
inclades = []
while True:
max_score,direction,max_node = 0,"",None
for node in root.iternodes():
front,back = 0,0
front_names_set = set(get_front_names(node))
for name in front_names_set:
if name in outgroups:
front = -1
break
elif name in ingroups: front += 1
else: sys.exit("Check taxonID "+name)
back_names_set = set(get_back_names(node,root))
for name in back_names_set:
if name in outgroups:
back = -1
break
elif name in ingroups: back += 1
else: sys.exit("Check taxonID "+name)
if front > max_score:
max_score,direction,max_node = front,"front",node
if back > max_score:
max_score,direction,max_node = back,"back",node
#print max_score,direction
if max_score >= min_ingroup_taxa:
if direction == "front":
inclades.append(max_node)
kink = max_node.prune()
if len(root.leaves()) > 3:
newnode,root = remove_kink(kink,root)
else: break
elif direction == "back":
par = max_node.parent
par.remove_child(max_node)
max_node.prune()
inclades.append(phylo3.reroot(root,par))#flip dirction
if len(max_node.leaves()) > 3:
max_node,root = remove_kink(max_node,max_node)
else: break
else: break
return inclades
```
#### File: pdc2/scripts/write_ortholog_fasta_files.py
```python
import sys,os,newick3,phylo3
from Bio import SeqIO
ORTHO_TREE_FILE_ENDING = ".tre"
def get_name(label):
return label.split("@")[0]
def get_front_labels(node):
leaves = node.leaves()
return [i.label for i in leaves]
if __name__ =="__main__":
if len(sys.argv) != 5:
print "usage: python write_ortholog_fasta_files.py fasta treeDIR outDIR MIN_TAXA"
sys.exit()
fasta = sys.argv[1]
treDIR = sys.argv[2]+"/"
outDIR = sys.argv[3]+"/"
MIN_TAXA = int(sys.argv[4])
print "Reading the original fasta file"
handle = open(fasta,"rU")
#hash table of taxonID -> seqID -> seq
seqDICT = {} #key is taxonID, value is seqID
for seq_record in SeqIO.parse(handle,"fasta"):
seqID,seq = str(seq_record.id),str(seq_record.seq)
taxonID = get_name(seqID)
if taxonID not in seqDICT:
seqDICT[taxonID] = {} #key is taxonID, value is seq
print "Adding sequence from",taxonID
seqDICT[taxonID][seqID] = seq
handle.close()
print "Writing fasta files"
for i in os.listdir(treDIR):
if i[-len(ORTHO_TREE_FILE_ENDING):] == ORTHO_TREE_FILE_ENDING:
#read in tree tips and write output alignment
with open(treDIR+i,"r")as infile:
intree = newick3.parse(infile.readline())
labels = get_front_labels(intree)
if len(labels) >= MIN_TAXA:
with open(outDIR+i.replace(ORTHO_TREE_FILE_ENDING,".fa"),"w") as outfile:
for lab in labels:
name = get_name(lab)
outfile.write(">"+name+"\n"+seqDICT[name][lab]+"\n")
``` |
{
"source": "jlangdev/falconpy",
"score": 2
} |
#### File: falconx_sandbox/single_scan/falconx_scan_example_uber.py
```python
import os
import time
import argparse
from enum import Enum
from datetime import timedelta
from argparse import RawTextHelpFormatter
try:
from falconpy.api_complete import APIHarness
except ImportError as no_falconpy:
raise SystemExit(
"CrowdStrike FalconPy must be installed in order to use this application.\n"
"Please execute `python3 -m pip install crowdstrike-falconpy` and try again."
) from no_falconpy
class Environment(Enum):
"""
Enum to hold our different environment specifiers.
"""
WIN7 = 100
WIN7_64 = 110
WIN10 = 160
DROID = 200
LINUX = 300
class Indicator():
"""
Silly progress indicator styled after a classic Cylon.
"""
def __init__(self, start_position: int = -1, start_direction: bool = True):
self.position = start_position
self.direction = start_direction
self.indicator = self._setup()
@staticmethod
def _setup():
cylons = []
total = 7
cylons.append(f"{'o' * (total+1)}")
for cnt in range(total):
cylons.append(f"{'o' * (cnt)}O{'o' * (total - cnt)}")
cylons.append(f"{'o' * (cnt+1)}O")
cylons.append(f"{'o' * (total+1)}")
return cylons
def step(self):
"""
Calculates the position and direction of the indicator.
"""
if self.position >= len(self.indicator) - 1:
# Too long - out of bounds
self.direction = False
if self.position <= 0:
# Too short - out of bounds
self.direction = True
if self.direction:
# Increment position by 1
self.position += 1
else:
# Decrement position by 1
self.position -= 1
def display(self) -> str:
"""
Increments the indicator position and returns its value.
"""
# Step the indicator forward
self.step()
# Return the new indicator display
return f"[ {self.indicator[self.position]} ]"
def parse_command_line():
"""
Parses and returns inbound command line arguments.
"""
# Argument parser for our command line
parser = argparse.ArgumentParser(
description="Falcon X Sandbox example",
formatter_class=RawTextHelpFormatter
)
# File to be analyzed
parser.add_argument(
'-f', '--file',
help='File to analyze',
required=True
)
# Environment to use for analysis
parser.add_argument(
'-e', '--environment',
help="Environment to use for analysis (win7, win7_64, win10, droid, linux)",
required=False
)
# CrowdStrike API Client ID
parser.add_argument(
'-k', '--key',
help='Your CrowdStrike API key ID\n'
' Required Scopes\n'
' Sample Uploads: WRITE\n'
' Sandbox: WRITE\n', required=True
)
# CrowdStrike API Client secret
parser.add_argument(
'-s', '--secret',
help='Your CrowdStrike API key secret', required=True
)
return parser.parse_args()
def check_scan_status(check_id: str) -> dict:
"""
Retrieves the status of a submission and returns it.
"""
# Return our submission response by ID
return falcon.command("GetSubmissions", ids=check_id)
def upload_file(filename: str,
upload_name: str,
submit_comment: str,
confidential: bool
) -> dict:
"""
Uploads the specified file to CrowdStrike cloud
applying any provided attributes. Returns the result.
"""
params = {
"file_name": upload_name,
"comment": submit_comment,
"is_confidential": confidential
}
# Read in our binary payload
with open(filename, 'rb') as payload:
# Upload this file to the Sample Uploads API
return falcon.command("UploadSampleV3",
file_data=payload.read(),
parameters=params,
content_type="application/octet-stream"
)
def submit_for_analysis(sha_value: str) -> dict:
"""
Submits an uploaded file that matches the provided SHA256
to the specified Falcon X Sandbox environment for analysis.
Returns the result.
"""
# Call the submit method and provide the SHA256
# of our upload file and our desired environment type.
return falcon.command("Submit",
body={
"sandbox": [{
"sha256": sha_value,
"environment_id": Environment[SANDBOX_ENV].value
}]
}
)
def delete_file(id_value: str) -> int:
"""
Deletes a file from CrowdStrike cloud based upon the
SHA256 provided. Returns the operation status code.
"""
# Call the delete_sample method using the SHA256
return falcon.command("DeleteSampleV3", ids=id_value)["status_code"]
def inform(msg: str):
"""
Provides informational updates to
the user as the program progresses.
"""
# Dynamic user update messages
print(" %-80s" % msg, end="\r", flush=True)
def running_time(begin: time):
"""
Calculates the current running time and returns it.
"""
return f"[ Time running: {str(timedelta(seconds=(time.time() - begin)))} ]"
# Start the clock
start_time = time.time()
# Parse our command line arguments
args = parse_command_line()
# Check for environment
if not args.environment:
SANDBOX_ENV = "WIN10"
else:
# Convert the submitted environment name to upper case
SANDBOX_ENV = str(args.environment).upper()
MATCHED = False
# Loop thru our defined environment names
for env in Environment:
# User submitted name matches an accepted type
if env.name == SANDBOX_ENV:
MATCHED = True
if not MATCHED:
# We only accept the environments defined in our Enum above
raise SystemExit("Invalid sandbox environment specified.")
if not os.path.isfile(args.file):
# We were not provided a valid filename
raise SystemExit("Invalid filename specified.")
# Announce progress
inform(f"[ Init ] {running_time(start_time)}")
# Connec to the API and provide our credentials for authorization
falcon = APIHarness(client_id=args.key,
client_secret=args.secret
)
# Announce progress
inform(f"[ Upload ] {running_time(start_time)}")
# Upload our test file
response = upload_file(args.file,
f"FalconX File Analysis: {time.strftime('%v %r %Z')}",
"Falcon X upload and scan example",
confidential=False
)
# Retrieve the SHA of our upload file
sha = response["body"]["resources"][0]["sha256"]
# Announce progress
inform(f"[ Submit ] {running_time(start_time)}")
# Submit the file for analysis to Falcon X Sandbox
submit_response = submit_for_analysis(sha)
# Track our running status
RUNNING = "running"
# Create a new progress indicator
indicator = Indicator()
# Loop until success or error
while RUNNING == "running":
# Submission ID
submit_id = submit_response["body"]["resources"][0]["id"]
# Check the scan status
result = check_scan_status(submit_id)
if result["body"]["resources"]:
# Announce progress with our KITT indicator
inform(f"{indicator.display()} {running_time(start_time)}")
# Grab our latest status
RUNNING = result["body"]["resources"][0]["state"]
# We've finished, retrieve the report. There will only be one in this example.
analysis = falcon.command("GetReports", ids=submit_id)["body"]["resources"][0]["sandbox"][0]
# Announce progress
inform(f"[ Delete ] {running_time(start_time)}")
# Remove our test file
delete_response = delete_file(sha)
# Display the analysis results
if "error_type" in analysis:
# Error occurred, display the detail
print(f"{analysis['error_type']}: {analysis['error_message']}")
else:
# No error, display the full analysis
print(f"Detonated on: {analysis['environment_description']}{' ' * 20}")
print(f"File type: {analysis['file_type']}")
if len(analysis['classification']):
print("\nClassifications")
for classification in analysis['classification']:
print(classification)
if len(analysis['extracted_interesting_strings']):
print("\nInteresting strings")
for interesting in analysis['extracted_interesting_strings']:
print(f"Source: {interesting['source']} Type: {interesting['type']}")
print(f"{interesting['value']}\n")
print(f"\nVerdict: {analysis['verdict']}")
# Inform the user of our deletion failure
if delete_response != 200:
print("Unable to remove test file from Falcon X Sandbox")
# Display our total execution time
COMPLETE_TIME = str(timedelta(seconds=(time.time() - start_time)))
print(f"Total running time: {COMPLETE_TIME}")
```
#### File: samples/ioc/create_ioc.py
```python
import json
from falconpy.api_complete import APIHarness as Uber
from falconpy.ioc import IOC as IOC
def connectAPI(class_type: str = "service"):
with open("../config.json", "r") as cred_file:
config = json.loads(cred_file.read())
creds = {
"client_id": config["falcon_client_id"],
"client_secret": config["falcon_client_secret"]
}
if class_type.lower() == "service":
falcon = IOC(creds=creds)
elif class_type.lower() == "uber":
falcon = Uber(creds=creds)
else:
falcon = None
return falcon
def createIOCPayload(source: str, action: str, expiration: str, desc: str, type: str, val: str, platforms: list):
payload = {
"indicators": [
{
"source": source,
"action": action,
"expiration": expiration,
"description": desc,
"type": type,
"value": val,
"platforms": platforms,
"applied_globally": True
}
]
}
return payload
# Create an IOC using the IOC Service class
falcon = connectAPI("service")
BODY = createIOCPayload("Test", "detect", "2021-06-30T05:00:00.000Z", "Testing", "ipv4", "9.8.7.6", ["linux"])
response = falcon.indicator_create_v1(body=BODY)
print(response)
# Create an IOC using the Uber class
falcon = connectAPI("uber")
BODY = createIOCPayload("Test", "detect", "2021-06-30T05:00:00.000Z", "Testing", "ipv4", "8.7.6.5", ["linux"])
response = falcon.command('indicator_create_v1', body=BODY)
print(response)
```
#### File: falconpy/tests/test_overwatch_dashboard.py
```python
import os
import sys
# Authentication via test_authorization.py
from tests import test_authorization as Authorization
# Import our sibling src folder into the path
sys.path.append(os.path.abspath('src'))
# Classes to test - manually imported from sibling folder
from falconpy.overwatch_dashboard import Overwatch_Dashboard as FalconOWD
auth = Authorization.TestAuthorization()
token = auth.getConfigExtended()
falcon = FalconOWD(access_token=token)
AllowedResponses = [200, 403, 429]
class TestOverwatchDashboard:
"""
Overwatch Dashboard Service Class test harness
"""
def overwatch_generate_errors(self):
"""
Test code paths within methods by generating 500s, does not hit the API
"""
falcon.base_url = "nowhere"
error_checks = True
tests = {
"aggregates_events": falcon.AggregatesEvents(body={})["status_code"],
"aggregates_events_collections": falcon.AggregatesEventsCollections(body=[{}])["status_code"]
}
for key in tests:
if tests[key] != 500:
error_checks = False
# print(f"{key} processed with a {tests[key]} response")
return error_checks
def test_aggregates_detections_global_counts(self):
"""
Pytest harness hook
"""
assert bool(falcon.AggregatesDetectionsGlobalCounts()["status_code"] in AllowedResponses) is True
def test_aggregates_incidents_global_counts(self):
"""
Pytest harness hook
"""
assert bool(falcon.AggregatesIncidentsGlobalCounts()["status_code"] in AllowedResponses) is True
def test_aggregates_events_global_counts(self):
"""
Pytest harness hook
"""
assert bool(falcon.AggregatesOWEventsGlobalCounts(bananas="yellow")["status_code"] in AllowedResponses) is True
def test_errors(self):
"""
Pytest harness hook
"""
assert self.overwatch_generate_errors() is True
# @staticmethod
# def test_logout():
# """
# Pytest harness hook
# """
# assert bool(falcon.auth_object.revoke(
# falcon.auth_object.token()["body"]["access_token"]
# )["status_code"] in AllowedResponses) is True
``` |
{
"source": "jlangy/flowers",
"score": 2
} |
#### File: jlangy/flowers/predict.py
```python
import json
import numpy as np
import torch
from torchvision import transforms, datasets, models
from network import Network
import PIL
from PIL import Image
from validate import validation
from torch import nn, optim
from parse_arguments import make_predict_parser
from network import make_network
img_path, cat_names, gpu, topk = make_predict_parser()
data_transforms = transforms.Compose([transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])
#test_dataset = datasets.ImageFolder('flowers/test', transform=data_transforms)
#testloader = torch.utils.data.DataLoader(test_dataset, batch_size=32, shuffle=True)
if cat_names:
with open(cat_names, 'r') as f:
cat_to_name = json.load(f)
#Import pretrained model
if gpu:
checkpoint = torch.load('checkpoint.pth')
else:
checkpoint = torch.load('checkpoint.pth', map_location=lambda storage, loc: storage)
model = make_network(checkpoint['arch'], checkpoint['hidden_units'])
model.load_state_dict(checkpoint['model_state_dict'])
class_to_idx = checkpoint['class_to_idx']
def process_image(image):
image = Image.open(image)
image = data_transforms(image)
return image.numpy()
def predict(image_path, model, topk=3):
model.eval()
image = process_image(image_path)
image = torch.from_numpy(image)
image.unsqueeze_(0)
if gpu:
image = image.to('cuda')
model = model.to('cuda')
output = model.forward(image)
output = output.exp()
probs,classes = output.topk(topk)
classes = classes.cpu().numpy()[0]
probs = probs.detach().cpu().numpy()[0]
idx_to_class = {_class: index for index, _class in class_to_idx.items()}
classes = list(map(lambda x : idx_to_class[x], classes ))
return probs, classes
probs, classes = predict(img_path, model, topk)
if cat_names:
name_classes = list(map(lambda x: cat_to_name[x], classes))
print(' Predicted species is {} with {:.2f}% confidence. \n '.format(name_classes[0], probs[0]*100))
for num in range(1,topk):
print('{} most likely species is {}'.format(num+1, name_classes[num]))
else:
print(' Predicted class is {} with {:.2f}% confidence. \n '.format(classes[0], probs[0]*100))
for num in range(1,topk):
print('{} most likely class is {}'.format(num+1, classes[num]))
```
#### File: jlangy/flowers/train.py
```python
import json
import torch
from torch import nn, optim
from torchvision import transforms, datasets
from network import make_network
from validate import validation
from parse_arguments import make_train_parser
data_dir, arch, hidden_units, epochs, learning_rate, gpu, save_dir = make_train_parser()
train_dir = data_dir + '/train'
valid_dir = data_dir + '/valid'
training_transforms = transforms.Compose([transforms.RandomRotation(30),
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
data_transforms = transforms.Compose([transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])
training_dataset = datasets.ImageFolder(train_dir, transform=training_transforms)
validation_dataset = datasets.ImageFolder(valid_dir, transform=data_transforms)
trainloader = torch.utils.data.DataLoader(training_dataset, batch_size=64, shuffle=True)
validationloader = torch.utils.data.DataLoader(validation_dataset, batch_size=32, shuffle=True)
with open('cat_to_name.json', 'r') as f:
cat_to_name = json.load(f)
model = make_network(arch, hidden_units)
def train_network(model):
criterion = nn.NLLLoss()
optimizer = optim.Adam(model.classifier.parameters(), lr=learning_rate)
print_every = 40
if gpu:
model.to('cuda')
curr_epoch = 0
for e in range(epochs):
curr_epoch += 1
training_loss = 0
steps = 0
for images, labels in iter(trainloader):
steps += 1
if gpu:
images = images.to('cuda')
labels = labels.to('cuda')
optimizer.zero_grad()
output = model.forward(images)
loss = criterion(output, labels)
training_loss += loss
loss.backward()
optimizer.step()
validation_loss, num_correct = validation(model, validationloader, criterion, gpu)
print("epoch: {} \n total training loss: {:.4f} \n average training loss: {:.4f} \n total validation loss: {:.4f} \n average validation loss: {:.4f} \n validation accuracy: {:.2f}%".format(curr_epoch, training_loss, training_loss/len(training_dataset), validation_loss, validation_loss/len(validation_dataset), int(num_correct)*100/len(validation_dataset)))
train_network(model)
torch.save({
'model_state_dict': model.state_dict(),
'class_to_idx': training_dataset.class_to_idx,
'arch': arch,
'hidden_units': hidden_units
}, save_dir)
``` |
{
"source": "jlanillos/clinAcc_PGx_WES",
"score": 3
} |
#### File: clinAcc_PGx_WES/DISCOVERY_scripts/Discovery_PGx_4_annotateSamples_updateVarcols.py
```python
import pandas as pd
import os
import numpy as np
import re
import argparse
import os
parser=argparse.ArgumentParser(description='Annotates SAMPLES column and update Var Columns')
parser.add_argument('--searchpath', required=True)
args=parser.parse_args()
searchpath=str(args.searchpath)
def files(path):
for file in os.listdir(path):
if os.path.isfile(os.path.join(path, file)):
yield file
###############################################################################
FILES = list()
filenameformat = 'pgx.csv'
for file in files(searchpath):
if filenameformat in file:
FILES.append(file)
df = pd.read_csv(FILES[0],sep='\t')
# Extract column names from the first individual sample
samplecols = list(df.columns)
# df_concat is the "template.csv" file which will be now filled with annotation data related to the variants (info annotated from databases, no related to the individual's variant)
df_concat = pd.read_csv(searchpath + '/' + 'template.csv',sep='\t', dtype=str)
eachsamplecols = ['genotype', 'zigosity', 'VAF', 'AlleleRatio', 'AlleleCoverage']
varcols = list(set(samplecols) - set(eachsamplecols) - set(['exact_gene']))
del globals()['df']
df_concat.set_index('ID',inplace=True)
df_concat['ID_aux'] = list(df_concat.index)
df_concat['samples'] = ''
df_concat[varcols] = ''
# Extract annotation information from each individual file and update it into the df_concat dataframe
for file in FILES:
sampleid = file.split('.')[0]
print(sampleid)
fd = pd.read_csv(file,sep='\t')
fd = fd.astype(str)
fd['ID'] = fd['locus'].str.replace(':','_') + '_' + fd['REF'] + '_' + fd['genotype'].str.split('/').str[1]
fd = fd.drop_duplicates(subset = ['ID'], keep = 'first')
df_concat.update(fd[['ID'] +varcols].set_index('ID'))
df_concat['samples'] = df_concat.apply(lambda x: ';'.join([x['samples'],sampleid]) if x['ID_aux'] in list(fd['ID']) else x['samples'],axis=1)
del globals()['fd']
for s in eachsamplecols:
df_concat[s] = ''
df_concat = df_concat.reset_index()
df_concat['samples'] = df_concat['samples'].str.lstrip(';')
df_concat['N_samples'] = df_concat['samples'].apply(lambda x: len(x.split(';')))
finalcols = ['ID'] + eachsamplecols + ['N_samples', 'MAF', 'samples'] + varcols
df_concat = df_concat[finalcols].copy()
df_concat.to_csv(searchpath + '/' + 'checkpoint_samples.csv',sep='\t', index = None)
```
#### File: clinAcc_PGx_WES/Figures_tables/0_Fig1B_QC_coverage.py
```python
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# x is a vector of coverage values to plot
x = list(np.arange(0,110,10))
# Load reference alleles and extract hg19 "actionable" positions we aim to calculate coverage statistics for each panel (CCP17 and SSV6)
ref = pd.read_csv('reference_HAPLOTYPES_hg38_hg19.csv',sep='\t')
ref['actionable'].loc[(ref['SYMBOL'] == 'CYP4F2') & (ref['allele'] == '*2')] = 'Yes'# This line could be commented to not include CYP4F2*2 in QC
ref['SNV_only'].loc[(ref['SYMBOL'] == 'CYP4F2') & (ref['allele'] == '*2')] = 'SNV'# This line could be commented to not include CYP4F2*2 in QC
actref = ref.loc[(ref['actionable'] == 'Yes')] # & (ref['SNV_only'] == 'SNV')] # & (ref['SYMBOL'] != 'CYP2B6')]
actref['position_hg19'] = actref['position_hg19'].astype(str).apply(lambda x: x.replace('_notfound','').split('_')[0])
posref =[str(x) for x in list(set(list(','.join(actref['position_hg19']).split(','))))]
def multiID(CHR, position, ref, alt):
position_list = str(position).split(',')
chr_element = ['chr']*len(list(str(position).split(',')))
chr_list = [str(CHR)]*len(str(position).split(','))
chr_final = list(map("".join,list(zip(chr_element,chr_list))))
ref_list = str(ref).split(',')
alt_list = str(alt).split(',')
outlist = list(map("_".join, list(zip(chr_final,position_list,ref_list,alt_list))))
outlist = ','.join(outlist)
return(outlist)
actref['multiID'] = actref.apply(lambda x: multiID(x['chr'], x['position_hg19'],x['ref'],x['alt']),axis=1)
# Get MERGED matrix from SSV6 panel and filter only "actionable" positions in posref
df_ssv6 = pd.read_csv('/path/to/SSV6_panel/QC_MERGED_GVCFs.csv',sep='\t')
df_ssv6_aux = df_ssv6.copy()
df_ssv6 = df_ssv6.loc[df_ssv6['POS'].astype(str).str.contains('|'.join(posref))].copy()
colscov_ssv6 = [i for i in list(df_ssv6.columns) if '_dp' in i]
# Get MERGED matrix from CP17 panel and filter only "actionable" positions in posref
df_ccp17 = pd.read_csv('/path/to/CCP17_panel/QC_MERGED_GVCFs.csv',sep='\t')
df_ccp17_aux = df_ccp17.copy()
df_ccp17 = df_ccp17.loc[df_ccp17['POS'].astype(str).str.contains('|'.join(posref))].copy()
colscov_ccp17 = [i for i in list(df_ccp17.columns) if '_dp' in i]
nrows = int(len(set(list(actref['SYMBOL']))))
ncols = 2
x3 = list()
for i in list(np.arange(0, np.ceil( nrows / 2), 1)):
x3.append((i,0))
x3.append((i,1))
d = dict()
genes_sorted = list(set(list(actref['SYMBOL'])))
genes_sorted.sort()
gene_names = ['|'.join(list(set(list(actref['SYMBOL']))))] + genes_sorted #all genes + pergene
d = dict(zip(gene_names, x3))
d['All genes'] = d.pop('|'.join(list(set(list(actref['SYMBOL'])))))
# Store statistical results coverage values, mean, stdev...) into dictionaries
# This part takes a while, consider it for optimization
d2_ssv6 = dict()
d2_ccp17 = dict()
for GENE in gene_names:
GENEposref =[str(j) for j in list(set(list(','.join(actref['position_hg19'].loc[actref['SYMBOL'].str.contains(GENE)]).split(','))))]
# Indels
aux = [str(j) for j in list(set(','.join(actref['multiID'].loc[(actref['SYMBOL'].str.contains(GENE)) & (actref['SNV_only'] == 'INDEL' )]).split(',')))]
if (aux[0] == '') and (len(aux) == 1):
GENEposref_indels = []
else:
GENEposref_indels = aux
#SNVs
aux = [str(j) for j in list(set(','.join(actref['multiID'].loc[(actref['SYMBOL'].str.contains(GENE)) & (actref['SNV_only'] == 'SNV' )]).split(',')))]
if (aux[0] == '') and (len(aux) == 1):
GENEposref_snvs = []
else:
GENEposref_snvs = aux
GENE = GENE.replace('|'.join(list(set(list(actref['SYMBOL'])))),'All genes')
cov_ssv6 = df_ssv6[colscov_ssv6].loc[df_ssv6['POS'].astype(str).str.contains('|'.join(GENEposref))]
y_ssv6 = list()
e_ssv6 = list()
for j in x:
y_ssv6.append(cov_ssv6.apply(lambda y: sum(map(lambda i: i > j, y)), axis = 1).mean())
e_ssv6.append(cov_ssv6.apply(lambda y: sum(map(lambda i: i > j, y)), axis = 1).std())
y2_ssv6 = list(np.array(y_ssv6)/len(cov_ssv6.columns))
e2_ssv6 = list(np.array(e_ssv6)/len(cov_ssv6.columns))
d2_ssv6[GENE] = [y2_ssv6, e2_ssv6]
#CCP17
cov_ccp17 = df_ccp17[colscov_ccp17].loc[df_ccp17['POS'].astype(str).str.contains('|'.join(GENEposref))]
y_ccp17 = list()
e_ccp17 = list()
for j in x:
y_ccp17.append(cov_ccp17.apply(lambda y: sum(map(lambda i: i > j, y)), axis = 1).mean())
e_ccp17.append(cov_ccp17.apply(lambda y: sum(map(lambda i: i > j, y)), axis = 1).std())
y2_ccp17 = list(np.array(y_ccp17)/len(cov_ccp17.columns))
e2_ccp17 = list(np.array(e_ccp17)/len(cov_ccp17.columns))
d2_ccp17[GENE] = [y2_ccp17, e2_ccp17]
# Using the statistical results calculated above and stored in dictionaries (y2_ccp17, e2_ccp17 and y2_ssv6, e2_ssv6) create and plot the figure 1B
fig, axs = plt.subplots(int(np.ceil( nrows / 2)), 2, figsize=(4, 10))
ylim = [0,1.1]
for GENE in gene_names:
GENEposref =[str(j) for j in list(set(list(','.join(actref['position_hg19'].loc[actref['SYMBOL'].str.contains(GENE)]).split(','))))]
# Indels
aux = [str(j) for j in list(set(','.join(actref['multiID'].loc[(actref['SYMBOL'].str.contains(GENE)) & (actref['SNV_only'] == 'INDEL' )]).split(',')))]
if (aux[0] == '') and (len(aux) == 1):
GENEposref_indels = []
else:
GENEposref_indels = aux
#SNVs
aux = [str(j) for j in list(set(','.join(actref['multiID'].loc[(actref['SYMBOL'].str.contains(GENE)) & (actref['SNV_only'] == 'SNV' )]).split(',')))]
if (aux[0] == '') and (len(aux) == 1):
GENEposref_snvs = []
else:
GENEposref_snvs = aux
GENE = GENE.replace('|'.join(list(set(list(actref['SYMBOL'])))),'All genes')
coord_col = int(d[GENE][1])
coord_row = int(d[GENE][0])
y2_ssv6 = d2_ssv6[GENE][0]
e2_ssv6 = d2_ssv6[GENE][1]
LABEL = 'SSv6 (n=4002)'
axs[coord_row, coord_col].errorbar(np.array(x)+1, y2_ssv6, yerr=e2_ssv6, fmt='o', label=LABEL, ecolor='lightgray',color = 'blue', ms=3)
y2_ccp17 = d2_ccp17[GENE][0]
e2_ccp17 = d2_ccp17[GENE][1]
LABEL = 'SSccp17 (n=998)'
axs[coord_row, coord_col].errorbar(np.array(x)-1, y2_ccp17, yerr=e2_ccp17, fmt='o', label = LABEL, ecolor='lightgray', color = 'red', ms=3)
if (len(GENEposref_snvs) > 1) or (len(GENEposref_snvs) == 0):
GENEposref_snvs_text = ' SNVs'
elif len(GENEposref_snvs) == 1:
GENEposref_snvs_text = ' SNV'
if (len(GENEposref_indels) > 1) or (len(GENEposref_indels) == 0):
GENEposref_indels_text = ' indels'
if len(GENEposref) <= 1:
LOCUS_word = ' locus'
else:
LOCUS_word = ' loci'
GENE = GENE.replace('All genes', 'All\ genes')
axs[coord_row, coord_col].set_title( r"$\bf{" + str(GENE) + "}$" + '\n' + '(' + str(len(GENEposref_snvs)) + GENEposref_snvs_text + ', ' + str(len(GENEposref_indels)) + GENEposref_indels_text+ ', ' + str(len(GENEposref)) + LOCUS_word + ')', fontdict={'fontsize':8})
axs[coord_row, coord_col].tick_params(axis = 'x', which = 'both', bottom = False, labelbottom = False, labelsize=6)
axs[coord_row, coord_col].tick_params(axis = 'y', labelsize=6)
axs[coord_row, coord_col].set_ylim(ylim)
if coord_col == 0:
axs[coord_row,coord_col].set_ylabel('%', fontsize=8, labelpad=1)
else:
axs[coord_row,coord_col].yaxis.set_visible(False)
if coord_row == 5:
axs[coord_row,coord_col].set_xlabel('Coverage', fontsize=8, labelpad=1)
handles, labels = axs[coord_row, coord_col].get_legend_handles_labels()
fig.legend(handles, labels)
axs[5, 0].tick_params(axis = 'x', which = 'both', bottom = True, labelbottom = True, labelsize=6)
axs[5, 1].tick_params(axis = 'x', which = 'both', bottom = True, labelbottom = True, labelsize=6)
plt.subplots_adjust(left=0.095, bottom=0.060, right=0.98, top=0.910, wspace=0.13, hspace=0.400)
plt.savefig('/path/to/Figures/Fig1B.png', dpi = 400)
plt.show()
```
#### File: clinAcc_PGx_WES/Figures_tables/3_Fig2C_allelespergene_pop.py
```python
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
df = pd.read_csv('/path/to/haplotypes_20210107.csv',sep='\t')
alleles = pd.read_csv('/path/to/Alleles_20201228.csv',sep='\t')
alleles = alleles.loc[(alleles['count_carrier_ids'].astype(str) != 'nan') & (alleles['actionable'] == 'Yes')].copy()
GENES = list(set(list(alleles['SYMBOL'])))
GENES.sort()
ncols = 2
nrows = int(np.ceil( len(GENES) / ncols))
x3 = list()
for i in list(np.arange(0, nrows, 1)):
x3.append((0,i))
x3.append((1,i))
d_coords = dict(zip(GENES, x3))
def countdiffalleles(x):
count = 0
for i in x.split(','):
i = i.split('_')[1]
if '*1/' in i:
i = i.split('/')
i.remove('*1')
else:
i = i.split('/')
count = count + len(i)
return(count)
def prepareDF4plot(df, country, GENE, RF):
#d = dict()
d_comphet = dict()
otherallelelist = list()
dff = df.loc[df['from'].str.contains(country)].groupby(GENE)['sample'].count().reset_index()
dff = dff.loc[~(dff[GENE] == '')].copy()
nr_found = (dff[GENE].apply(lambda x: countdiffalleles(x)) * dff['sample']).sum()
posalleles = list(set([i.split('_')[1].split(',')[0].split('/')[1] for i in list(set(','.join(list(dff[GENE])).split(',')))]))
allele_names = list()
allele_counts = list()
allele_counts_aux = list()
allele_names_comphet = list()
allele_counts_comphet = list()
allele_counts_aux_comphet = list()
for al in posalleles:
posalleles_aux = [x for x in posalleles if x != al]
homozall = [i for i in list(dff[GENE]) if '/'.join([al,al]) in i]
heterozall = [i for i in list(dff[GENE]) if (al in i) and not ('/'.join([al,al]) in i)] #[i for i in list(contigency.columns) if '/'.join(['*1',al]) in i]
comphet = [i for i in list(dff[GENE]) if (al in i) and not ('/'.join([al,al]) in i) and any(x in i for x in posalleles_aux)]
dff['aux'] = dff[GENE].str.replace('*','.')
if homozall == []:
nr_homoz = 0
else:
nr_homoz = dff['sample'].loc[dff['aux'].str.contains('|'.join([i.replace('*','.') for i in homozall]))].sum()
if heterozall == []:
nr_heteroz = 0
else:
nr_heteroz = dff['sample'].loc[dff['aux'].str.contains('|'.join([i.replace('*','.') for i in heterozall]))].sum()
if comphet == []:
nr_comhet = 0
else:
nr_comhet = dff['sample'].loc[dff['aux'].str.contains('|'.join([i.replace('*','.') for i in comphet]))].sum() #nr_heteroz -
count = 100*(nr_heteroz + nr_homoz*2) / nr_found
count_comphet = 100*(nr_comhet + nr_homoz*2) / nr_found
count_nocomphet = count - count_comphet
count_aux = (nr_heteroz + nr_homoz*2)
#d[al] = [count]
allele_names.append(al)
allele_counts.append(count)
allele_names_comphet.append(al + '_COMPHET')
allele_names_comphet.append(al + '_NOCOMPHET')
allele_counts_comphet.append(count_comphet)
allele_counts_comphet.append(count_nocomphet)
allele_counts_aux.append(count_aux)
rf = pd.DataFrame({'alleles':allele_names,'count':allele_counts})
rf_comphet = pd.DataFrame({'alleles':allele_names_comphet,'count':allele_counts_comphet})
if GENE == 'DPYD':
if country == 'Ecuador|EUROPA|-|Chile|Brazil|USA|Mexico|Portugal|Argentina|Spain|Colombia':
n_alleles_aux = 5
else:
n_alleles_aux = 4
d = dict(zip(list(rf.sort_values(by=['count'], ascending=False).iloc[0:n_alleles_aux]['alleles']),[[j] for j in list(rf.sort_values(by=['count'], ascending=False).iloc[0:n_alleles_aux]['count'])]))
for w in list(d.keys()):
d_comphet[w+'_COMPHET']= [rf_comphet['count'].loc[rf_comphet['alleles'].apply(lambda x: x.replace('*','P')) == w.replace('*','P')+'_COMPHET'].sum()]
d_comphet[w+'_NOCOMPHET']= [rf_comphet['count'].loc[rf_comphet['alleles'].apply(lambda x: x.replace('*','P')) == w.replace('*','P')+'_NOCOMPHET'].sum()]
otheralleleslength = len(rf.sort_values(by=['count'], ascending=False).reset_index().iloc[n_alleles_aux:])
otherallelelist = list(rf.sort_values(by=['count'], ascending=False).reset_index().iloc[n_alleles_aux:]['alleles'].values)
if otheralleleslength > 1:
#d['Other alleles ' + '(N=' + str(otheralleleslength) + ')'] = [rf.sort_values(by=['count'], ascending=False).reset_index().iloc[2:]['count'].sum()]
d['Other alleles'] = [rf.sort_values(by=['count'], ascending=False).reset_index().iloc[n_alleles_aux:]['count'].sum()]
d_comphet['Other alleles_COMPHET'] = [rf_comphet['count'].loc[rf_comphet['alleles'].apply(lambda x: x.replace('*','P')).str.contains('|'.join([i.replace('*','P')+'_COMPHET' for i in otherallelelist]))].sum()]
d_comphet['Other alleles_NOCOMPHET'] = [rf_comphet['count'].loc[rf_comphet['alleles'].apply(lambda x: x.replace('*','P')).str.contains('|'.join([i.replace('*','P')+'_NOCOMPHET' for i in otherallelelist]))].sum()]
elif otheralleleslength == 1:
theother = rf.sort_values(by=['count'], ascending=False).reset_index().iloc[n_alleles_aux:]['alleles'].values[0]
d[theother] = [rf.sort_values(by=['count'], ascending=False).reset_index().iloc[n_alleles_aux:]['count'].sum()]
d_comphet[theother +'_COMPHET'] = [rf_comphet['count'].loc[rf_comphet['alleles'].apply(lambda x: x.replace('*','P')).str.contains('|'.join([i.replace('*','P')+'_COMPHET' for i in otherallelelist]))].sum()]
d_comphet[theother +'_NOCOMPHET'] = [rf_comphet['count'].loc[rf_comphet['alleles'].apply(lambda x: x.replace('*','P')).str.contains('|'.join([i.replace('*','P')+'_NOCOMPHET' for i in otherallelelist]))].sum()]
genecountdf = pd.DataFrame(d) #({'alleles':allele_names,GENE:allele_counts})
genecountdf_comhet = pd.DataFrame(d_comphet)
elif GENE == 'CYP2B6':
n_alleles_aux = 3
d = dict(zip(list(rf.sort_values(by=['count'], ascending=False).iloc[0:n_alleles_aux]['alleles']),[[j] for j in list(rf.sort_values(by=['count'], ascending=False).iloc[0:n_alleles_aux]['count'])]))
for w in list(d.keys()):
d_comphet[w+'_COMPHET']= [rf_comphet['count'].loc[rf_comphet['alleles'].apply(lambda x: x.replace('*','P')) == w.replace('*','P')+'_COMPHET'].sum()]
d_comphet[w+'_NOCOMPHET']= [rf_comphet['count'].loc[rf_comphet['alleles'].apply(lambda x: x.replace('*','P')) == w.replace('*','P')+'_NOCOMPHET'].sum()]
otheralleleslength = len(rf.sort_values(by=['count'], ascending=False).reset_index().iloc[n_alleles_aux:])
otherallelelist = list(rf.sort_values(by=['count'], ascending=False).reset_index().iloc[n_alleles_aux:]['alleles'].values)
if otheralleleslength > 1:
#d['Other alleles ' + '(N=' + str(otheralleleslength) + ')'] = [rf.sort_values(by=['count'], ascending=False).reset_index().iloc[2:]['count'].sum()]
d['Other alleles'] = [rf.sort_values(by=['count'], ascending=False).reset_index().iloc[n_alleles_aux:]['count'].sum()]
d_comphet['Other alleles_COMPHET'] = [rf_comphet['count'].loc[rf_comphet['alleles'].apply(lambda x: x.replace('*','P')).str.contains('|'.join([i.replace('*','P')+'_COMPHET' for i in otherallelelist]))].sum()]
d_comphet['Other alleles_NOCOMPHET'] = [rf_comphet['count'].loc[rf_comphet['alleles'].apply(lambda x: x.replace('*','P')).str.contains('|'.join([i.replace('*','P')+'_NOCOMPHET' for i in otherallelelist]))].sum()]
elif otheralleleslength == 1:
theother = rf.sort_values(by=['count'], ascending=False).reset_index().iloc[n_alleles_aux:]['alleles'].values[0]
d[theother] = [rf.sort_values(by=['count'], ascending=False).reset_index().iloc[n_alleles_aux:]['count'].sum()]
d_comphet[theother +'_COMPHET'] = [rf_comphet['count'].loc[rf_comphet['alleles'].apply(lambda x: x.replace('*','P')).str.contains('|'.join([i.replace('*','P')+'_COMPHET' for i in otherallelelist]))].sum()]
d_comphet[theother +'_NOCOMPHET'] = [rf_comphet['count'].loc[rf_comphet['alleles'].apply(lambda x: x.replace('*','P')).str.contains('|'.join([i.replace('*','P')+'_NOCOMPHET' for i in otherallelelist]))].sum()]
genecountdf = pd.DataFrame(d) #({'alleles':allele_names,GENE:allele_counts})
genecountdf_comhet = pd.DataFrame(d_comphet)
elif GENE == 'CYP2C9':
n_alleles_aux = 2
d = dict(zip(list(rf.sort_values(by=['count'], ascending=False).iloc[0:n_alleles_aux]['alleles']),[[j] for j in list(rf.sort_values(by=['count'], ascending=False).iloc[0:n_alleles_aux]['count'])]))
for w in list(d.keys()):
d_comphet[w+'_COMPHET']= [rf_comphet['count'].loc[rf_comphet['alleles'].apply(lambda x: x.replace('*','P')) == w.replace('*','P')+'_COMPHET'].sum()]
d_comphet[w+'_NOCOMPHET']= [rf_comphet['count'].loc[rf_comphet['alleles'].apply(lambda x: x.replace('*','P')) == w.replace('*','P')+'_NOCOMPHET'].sum()]
otheralleleslength = len(rf.sort_values(by=['count'], ascending=False).reset_index().iloc[n_alleles_aux:])
otherallelelist = list(rf.sort_values(by=['count'], ascending=False).reset_index().iloc[n_alleles_aux:]['alleles'].values)
if otheralleleslength > 1:
#d['Other alleles ' + '(N=' + str(otheralleleslength) + ')'] = [rf.sort_values(by=['count'], ascending=False).reset_index().iloc[2:]['count'].sum()]
d['Other alleles'] = [rf.sort_values(by=['count'], ascending=False).reset_index().iloc[n_alleles_aux:]['count'].sum()]
d_comphet['Other alleles_COMPHET'] = [rf_comphet['count'].loc[rf_comphet['alleles'].apply(lambda x: x.replace('*','P')).str.contains('|'.join([i.replace('*','P')+'_COMPHET' for i in otherallelelist]))].sum()]
d_comphet['Other alleles_NOCOMPHET'] = [rf_comphet['count'].loc[rf_comphet['alleles'].apply(lambda x: x.replace('*','P')).str.contains('|'.join([i.replace('*','P')+'_NOCOMPHET' for i in otherallelelist]))].sum()]
elif otheralleleslength == 1:
theother = rf.sort_values(by=['count'], ascending=False).reset_index().iloc[n_alleles_aux:]['alleles'].values[0]
d[theother] = [rf.sort_values(by=['count'], ascending=False).reset_index().iloc[n_alleles_aux:]['count'].sum()]
d_comphet[theother +'_COMPHET'] = [rf_comphet['count'].loc[rf_comphet['alleles'].apply(lambda x: x.replace('*','P')).str.contains('|'.join([i.replace('*','P')+'_COMPHET' for i in otherallelelist]))].sum()]
d_comphet[theother +'_NOCOMPHET'] = [rf_comphet['count'].loc[rf_comphet['alleles'].apply(lambda x: x.replace('*','P')).str.contains('|'.join([i.replace('*','P')+'_NOCOMPHET' for i in otherallelelist]))].sum()]
genecountdf = pd.DataFrame(d) #({'alleles':allele_names,GENE:allele_counts})
genecountdf_comhet = pd.DataFrame(d_comphet)
elif GENE == 'UGT1A1':
d = dict(zip(list(rf.sort_values(by=['count'], ascending=False)['alleles']),[[j] for j in list(rf.sort_values(by=['count'], ascending=False)['count'])]))
for w in list(d.keys()):
d_comphet[w+'_COMPHET']= [rf_comphet['count'].loc[rf_comphet['alleles'].apply(lambda x: x.replace('*','P')) == w.replace('*','P')+'_COMPHET'].sum()]
d_comphet[w+'_NOCOMPHET']= [rf_comphet['count'].loc[rf_comphet['alleles'].apply(lambda x: x.replace('*','P')) == w.replace('*','P')+'_NOCOMPHET'].sum()]
genecountdf = pd.DataFrame(d) #({'alleles':allele_names,GENE:allele_counts})
genecountdf_comhet = pd.DataFrame(d_comphet)
elif GENE == 'RYR1':
d = dict(zip(list(rf.sort_values(by=['count'], ascending=False)['alleles']),[[j] for j in list(rf.sort_values(by=['count'], ascending=False)['count'])]))
for w in list(d.keys()):
d_comphet[w+'_COMPHET']= [rf_comphet['count'].loc[rf_comphet['alleles'].apply(lambda x: x.replace('*','P')) == w.replace('*','P')+'_COMPHET'].sum()]
d_comphet[w+'_NOCOMPHET']= [rf_comphet['count'].loc[rf_comphet['alleles'].apply(lambda x: x.replace('*','P')) == w.replace('*','P')+'_NOCOMPHET'].sum()]
genecountdf = pd.DataFrame(d) #({'alleles':allele_names,GENE:allele_counts})
genecountdf_comhet = pd.DataFrame(d_comphet)
elif GENE == 'TPMT':
d = dict(zip(list(rf.sort_values(by=['count'], ascending=False)['alleles']),[[j] for j in list(rf.sort_values(by=['count'], ascending=False)['count'])]))
for w in list(d.keys()):
d_comphet[w+'_COMPHET']= [rf_comphet['count'].loc[rf_comphet['alleles'].apply(lambda x: x.replace('*','P')) == w.replace('*','P')+'_COMPHET'].sum()]
d_comphet[w+'_NOCOMPHET']= [rf_comphet['count'].loc[rf_comphet['alleles'].apply(lambda x: x.replace('*','P')) == w.replace('*','P')+'_NOCOMPHET'].sum()]
genecountdf = pd.DataFrame(d) #({'alleles':allele_names,GENE:allele_counts})
genecountdf_comhet = pd.DataFrame(d_comphet)
else:
if len(posalleles) > 2:
d = dict(zip(list(rf.sort_values(by=['count'], ascending=False).iloc[[0,1]]['alleles']),[[j] for j in list(rf.sort_values(by=['count'], ascending=False).iloc[[0,1]]['count'])]))
for w in list(d.keys()):
d_comphet[w+'_COMPHET']= [rf_comphet['count'].loc[rf_comphet['alleles'].apply(lambda x: x.replace('*','P')) == w.replace('*','P')+'_COMPHET'].sum()]
d_comphet[w+'_NOCOMPHET']= [rf_comphet['count'].loc[rf_comphet['alleles'].apply(lambda x: x.replace('*','P')) == w.replace('*','P')+'_NOCOMPHET'].sum()]
otheralleleslength = len(rf.sort_values(by=['count'], ascending=False).reset_index().iloc[2:])
otherallelelist = list(rf.sort_values(by=['count'], ascending=False).reset_index().iloc[2:]['alleles'].values)
if otheralleleslength > 1:
#d['Other alleles ' + '(N=' + str(otheralleleslength) + ')'] = [rf.sort_values(by=['count'], ascending=False).reset_index().iloc[2:]['count'].sum()]
d['Other alleles'] = [rf.sort_values(by=['count'], ascending=False).reset_index().iloc[2:]['count'].sum()]
d_comphet['Other alleles_COMPHET'] = [rf_comphet['count'].loc[rf_comphet['alleles'].apply(lambda x: x.replace('*','P')).str.contains('|'.join([i.replace('*','P')+'_COMPHET' for i in otherallelelist]))].sum()]
d_comphet['Other alleles_NOCOMPHET'] = [rf_comphet['count'].loc[rf_comphet['alleles'].apply(lambda x: x.replace('*','P')).str.contains('|'.join([i.replace('*','P')+'_NOCOMPHET' for i in otherallelelist]))].sum()]
else:
theother = rf.sort_values(by=['count'], ascending=False).reset_index().iloc[2:]['alleles'].values[0]
d[theother] = [rf.sort_values(by=['count'], ascending=False).reset_index().iloc[2:]['count'].sum()]
d_comphet[theother +'_COMPHET'] = [rf_comphet['count'].loc[rf_comphet['alleles'].apply(lambda x: x.replace('*','P')).str.contains('|'.join([i.replace('*','P')+'_COMPHET' for i in otherallelelist]))].sum()]
d_comphet[theother +'_NOCOMPHET'] = [rf_comphet['count'].loc[rf_comphet['alleles'].apply(lambda x: x.replace('*','P')).str.contains('|'.join([i.replace('*','P')+'_NOCOMPHET' for i in otherallelelist]))].sum()]
genecountdf = pd.DataFrame(d) #({'alleles':allele_names,GENE:allele_counts})
genecountdf_comhet = pd.DataFrame(d_comphet)
#genecountdf['from'] = country
elif len(posalleles) == 2:
d = dict(zip(list(rf.sort_values(by=['count'], ascending=False).iloc[[0,1]]['alleles']),[[j] for j in list(rf.sort_values(by=['count'], ascending=False).iloc[[0,1]]['count'])]))
genecountdf = pd.DataFrame(d) #({'alleles':allele_names,GENE:allele_counts})
for w in list(d.keys()):
d_comphet[w+'_COMPHET']= [rf_comphet['count'].loc[rf_comphet['alleles'].apply(lambda x: x.replace('*','.')) == w.replace('*','.')+'_COMPHET'].sum()]
d_comphet[w+'_NOCOMPHET']= [rf_comphet['count'].loc[rf_comphet['alleles'].apply(lambda x: x.replace('*','.')) == w.replace('*','.')+'_NOCOMPHET'].sum()]
genecountdf_comhet = pd.DataFrame(d_comphet)
#genecountdf['from'] = country
elif len(posalleles) == 1:
d = dict(zip(list(rf.sort_values(by=['count'], ascending=False).iloc[[0]]['alleles']),[[j] for j in list(rf.sort_values(by=['count'], ascending=False).iloc[[0]]['count'])]))
for w in list(d.keys()):
d_comphet[w+'_COMPHET']= [rf_comphet['count'].loc[rf_comphet['alleles'].apply(lambda x: x.replace('*','.')) == w.replace('*','.')+'_COMPHET'].sum()]
d_comphet[w+'_NOCOMPHET']= [rf_comphet['count'].loc[rf_comphet['alleles'].apply(lambda x: x.replace('*','.')) == w.replace('*','.')+'_NOCOMPHET'].sum()]
genecountdf = pd.DataFrame(d) #({'alleles':allele_names,GENE:allele_counts})
genecountdf_comhet = pd.DataFrame(d_comphet)
#genecountdf['from'] = country
rf_aux = pd.DataFrame({'Gene':[GENE]*len(allele_names),'alleles':allele_names,'count':allele_counts_aux, 'country': [country]*len(allele_counts_aux), 'nr_found': [nr_found]*len(allele_counts_aux)})
return genecountdf, genecountdf_comhet, otherallelelist, rf_aux
def prepareDF4plot_chrX(df, country, GENE, RF):
#d = dict()
d_comphet = dict()
otherallelelist = list()
dff = df.loc[df['from'].str.contains(country)].groupby(GENE)['sample'].count().reset_index()
dff = dff.loc[~(dff[GENE] == '')].copy()
dff_females = df.loc[(df['from'].str.contains(country)) & (df['gender'] == 'F')].groupby(GENE)['sample'].count().reset_index()
dff_females = dff_females.loc[~(dff_females[GENE] == '')].copy()
dff_males = df.loc[(df['from'].str.contains(country)) & (df['gender'] == 'M')].groupby(GENE)['sample'].count().reset_index()
dff_males = dff_males.loc[~(dff_males[GENE] == '')].copy()
nr_found_females = (dff_females[GENE].apply(lambda x: countdiffalleles(x)) * dff_females['sample']).sum()
nr_found_males = (dff_males[GENE].apply(lambda x: countdiffalleles(x)) * dff_males['sample'] / 2).sum() # or dff_males['sample'].sum()
nr_found = nr_found_females + nr_found_males
posalleles = list(set([i.split('_')[1].split(',')[0].split('/')[1] for i in list(set(','.join(list(dff[GENE])).split(',')))]))
allele_names = list()
allele_counts = list()
allele_counts_aux = list()
allele_names_comphet = list()
allele_counts_comphet = list()
allele_counts_aux_comphet = list()
for al in posalleles:
posalleles_aux = [x for x in posalleles if x != al]
homozall = [i for i in list(dff[GENE]) if '/'.join([al,al]) in i]
heterozall = [i for i in list(dff[GENE]) if (al in i) and not ('/'.join([al,al]) in i)] #[i for i in list(contigency.columns) if '/'.join(['*1',al]) in i]
comphet = [i for i in list(dff[GENE]) if (al in i) and not ('/'.join([al,al]) in i) and any(x in i for x in posalleles_aux)]
dff['aux'] = dff[GENE].str.replace('*','.')
dff_females['aux'] = dff_females[GENE].str.replace('*','.')
dff_males['aux'] = dff_males[GENE].str.replace('*','.')
if homozall == []:
#nr_homoz = 0
nr_homoz_female = 0
nr_homoz_male = 0
else:
#nr_homoz = dff['sample'].loc[dff['aux'].str.contains('|'.join([i.replace('*','.') for i in homozall]))].sum()
nr_homoz_female = dff_females['sample'].loc[dff_females['aux'].str.contains('|'.join([i.replace('*','.') for i in homozall]))].sum()
nr_homoz_male = dff_males['sample'].loc[dff_males['aux'].str.contains('|'.join([i.replace('*','.') for i in homozall]))].sum()
if heterozall == []:
nr_heteroz = 0
else:
nr_heteroz = dff['sample'].loc[dff['aux'].str.contains('|'.join([i.replace('*','.') for i in heterozall]))].sum() # Assumption: there are not heterozygous males (checked genotypes before)
if comphet == []:
nr_comhet = 0
else:
nr_comhet = dff['sample'].loc[dff['aux'].str.contains('|'.join([i.replace('*','.') for i in comphet]))].sum() #nr_heteroz -
nr_homoz = 2*nr_homoz_female + nr_homoz_male # Number of alleles coming from homozygous individuals, taking into account that male individuals are hemizygous, an thus do add one allele
count = 100*(nr_heteroz + nr_homoz) / nr_found
count_comphet = 100*(nr_comhet + nr_homoz) / nr_found
count_nocomphet = count - count_comphet
count_aux = (nr_heteroz + nr_homoz)
#d[al] = [count]
allele_names.append(al)
allele_counts.append(count)
allele_names_comphet.append(al + '_COMPHET')
allele_names_comphet.append(al + '_NOCOMPHET')
allele_counts_comphet.append(count_comphet)
allele_counts_comphet.append(count_nocomphet)
allele_counts_aux.append(count_aux)
rf = pd.DataFrame({'alleles':allele_names,'count':allele_counts})
rf_comphet = pd.DataFrame({'alleles':allele_names_comphet,'count':allele_counts_comphet})
n_alleles_aux = 7
if GENE == 'G6PD':
d = dict(zip(list(rf.sort_values(by=['count'], ascending=False).iloc[0:n_alleles_aux]['alleles']),[[j] for j in list(rf.sort_values(by=['count'], ascending=False).iloc[0:n_alleles_aux]['count'])]))
for w in list(d.keys()):
d_comphet[w+'_COMPHET']= [rf_comphet['count'].loc[rf_comphet['alleles'].apply(lambda x: x.replace('*','P')) == w.replace('*','P')+'_COMPHET'].sum()]
d_comphet[w+'_NOCOMPHET']= [rf_comphet['count'].loc[rf_comphet['alleles'].apply(lambda x: x.replace('*','P')) == w.replace('*','P')+'_NOCOMPHET'].sum()]
otheralleleslength = len(rf.sort_values(by=['count'], ascending=False).reset_index().iloc[n_alleles_aux:])
otherallelelist = list(rf.sort_values(by=['count'], ascending=False).reset_index().iloc[n_alleles_aux:]['alleles'].values)
if otheralleleslength > 1:
#d['Other alleles ' + '(N=' + str(otheralleleslength) + ')'] = [rf.sort_values(by=['count'], ascending=False).reset_index().iloc[2:]['count'].sum()]
d['Other alleles'] = [rf.sort_values(by=['count'], ascending=False).reset_index().iloc[n_alleles_aux:]['count'].sum()]
d_comphet['Other alleles_COMPHET'] = [rf_comphet['count'].loc[rf_comphet['alleles'].apply(lambda x: x.replace('*','P')).str.contains('|'.join([i.replace('*','P')+'_COMPHET' for i in otherallelelist]))].sum()]
d_comphet['Other alleles_NOCOMPHET'] = [rf_comphet['count'].loc[rf_comphet['alleles'].apply(lambda x: x.replace('*','P')).str.contains('|'.join([i.replace('*','P')+'_NOCOMPHET' for i in otherallelelist]))].sum()]
elif otheralleleslength == 1:
theother = rf.sort_values(by=['count'], ascending=False).reset_index().iloc[n_alleles_aux:]['alleles'].values[0]
d[theother] = [rf.sort_values(by=['count'], ascending=False).reset_index().iloc[n_alleles_aux:]['count'].sum()]
d_comphet[theother +'_COMPHET'] = [rf_comphet['count'].loc[rf_comphet['alleles'].apply(lambda x: x.replace('*','P')).str.contains('|'.join([i.replace('*','P')+'_COMPHET' for i in otherallelelist]))].sum()]
d_comphet[theother +'_NOCOMPHET'] = [rf_comphet['count'].loc[rf_comphet['alleles'].apply(lambda x: x.replace('*','P')).str.contains('|'.join([i.replace('*','P')+'_NOCOMPHET' for i in otherallelelist]))].sum()]
genecountdf = pd.DataFrame(d) #({'alleles':allele_names,GENE:allele_counts})
genecountdf_comhet = pd.DataFrame(d_comphet)
genecountdf = genecountdf.rename(columns={'Seattle Lodi Modena FerraraII Athens-like':'Seattle*'})
genecountdf = genecountdf.rename(columns={'Mediterranean Dallas Panama‚Sassari Cagliari Birmingham':'Mediterranean*'})
genecountdf = genecountdf.rename(columns={'G6PDA-968C-376G':'A-968C-376G'})
genecountdf = genecountdf.rename(columns={'Union Maewo Chinese-2 Kalo':'Union*'})
genecountdf_comhet = genecountdf_comhet.rename(columns={'Seattle Lodi Modena FerraraII Athens-like_COMPHET':'Seattle*_COMPHET','Seattle Lodi Modena FerraraII Athens-like_NOCOMPHET':'Seattle*_NOCOMPHET'})
genecountdf_comhet = genecountdf_comhet.rename(columns={'Mediterranean Dallas Panama‚Sassari Cagliari Birmingham_COMPHET':'Mediterranean*_COMPHET','Mediterranean Dallas Panama‚Sassari Cagliari Birmingham_NOCOMPHET':'Mediterranean*_NOCOMPHET'})
genecountdf_comhet = genecountdf_comhet.rename(columns={'G6PDA-968C-376G_COMPHET':'A-968C-376G_COMPHET','G6PDA-968C-376G_NOCOMPHET':'A-968C-376G_NOCOMPHET'})
genecountdf_comhet = genecountdf_comhet.rename(columns={'Union Maewo Chinese-2 Kalo_COMPHET':'Union*_COMPHET','Union Maewo Chinese-2 Kalo_NOCOMPHET':'Union*_NOCOMPHET'})
else:
if len(posalleles) > n_alleles_aux:
d = dict(zip(list(rf.sort_values(by=['count'], ascending=False).iloc[[0,1]]['alleles']),[[j] for j in list(rf.sort_values(by=['count'], ascending=False).iloc[[0,1]]['count'])]))
for w in list(d.keys()):
d_comphet[w+'_COMPHET']= [rf_comphet['count'].loc[rf_comphet['alleles'].apply(lambda x: x.replace('*','P')) == w.replace('*','P')+'_COMPHET'].sum()]
d_comphet[w+'_NOCOMPHET']= [rf_comphet['count'].loc[rf_comphet['alleles'].apply(lambda x: x.replace('*','P')) == w.replace('*','P')+'_NOCOMPHET'].sum()]
otheralleleslength = len(rf.sort_values(by=['count'], ascending=False).reset_index().iloc[2:])
otherallelelist = list(rf.sort_values(by=['count'], ascending=False).reset_index().iloc[2:]['alleles'].values)
if otheralleleslength > 1:
#d['Other alleles ' + '(N=' + str(otheralleleslength) + ')'] = [rf.sort_values(by=['count'], ascending=False).reset_index().iloc[2:]['count'].sum()]
d['Other alleles'] = [rf.sort_values(by=['count'], ascending=False).reset_index().iloc[2:]['count'].sum()]
d_comphet['Other alleles_COMPHET'] = [rf_comphet['count'].loc[rf_comphet['alleles'].apply(lambda x: x.replace('*','P')).str.contains('|'.join([i.replace('*','P')+'_COMPHET' for i in otherallelelist]))].sum()]
d_comphet['Other alleles_NOCOMPHET'] = [rf_comphet['count'].loc[rf_comphet['alleles'].apply(lambda x: x.replace('*','P')).str.contains('|'.join([i.replace('*','P')+'_NOCOMPHET' for i in otherallelelist]))].sum()]
else:
theother = rf.sort_values(by=['count'], ascending=False).reset_index().iloc[2:]['alleles'].values[0]
d[theother] = [rf.sort_values(by=['count'], ascending=False).reset_index().iloc[2:]['count'].sum()]
d_comphet[theother +'_COMPHET'] = [rf_comphet['count'].loc[rf_comphet['alleles'].apply(lambda x: x.replace('*','P')).str.contains('|'.join([i.replace('*','P')+'_COMPHET' for i in otherallelelist]))].sum()]
d_comphet[theother +'_NOCOMPHET'] = [rf_comphet['count'].loc[rf_comphet['alleles'].apply(lambda x: x.replace('*','P')).str.contains('|'.join([i.replace('*','P')+'_NOCOMPHET' for i in otherallelelist]))].sum()]
genecountdf = pd.DataFrame(d) #({'alleles':allele_names,GENE:allele_counts})
genecountdf_comhet = pd.DataFrame(d_comphet)
#genecountdf['from'] = country
elif len(posalleles) == 2:
d = dict(zip(list(rf.sort_values(by=['count'], ascending=False).iloc[[0,1]]['alleles']),[[j] for j in list(rf.sort_values(by=['count'], ascending=False).iloc[[0,1]]['count'])]))
genecountdf = pd.DataFrame(d) #({'alleles':allele_names,GENE:allele_counts})
for w in list(d.keys()):
d_comphet[w+'_COMPHET']= [rf_comphet['count'].loc[rf_comphet['alleles'].apply(lambda x: x.replace('*','.')) == w.replace('*','.')+'_COMPHET'].sum()]
d_comphet[w+'_NOCOMPHET']= [rf_comphet['count'].loc[rf_comphet['alleles'].apply(lambda x: x.replace('*','.')) == w.replace('*','.')+'_NOCOMPHET'].sum()]
genecountdf_comhet = pd.DataFrame(d_comphet)
#genecountdf['from'] = country
elif len(posalleles) == 1:
d = dict(zip(list(rf.sort_values(by=['count'], ascending=False).iloc[[0]]['alleles']),[[j] for j in list(rf.sort_values(by=['count'], ascending=False).iloc[[0]]['count'])]))
for w in list(d.keys()):
d_comphet[w+'_COMPHET']= [rf_comphet['count'].loc[rf_comphet['alleles'].apply(lambda x: x.replace('*','.')) == w.replace('*','.')+'_COMPHET'].sum()]
d_comphet[w+'_NOCOMPHET']= [rf_comphet['count'].loc[rf_comphet['alleles'].apply(lambda x: x.replace('*','.')) == w.replace('*','.')+'_NOCOMPHET'].sum()]
genecountdf = pd.DataFrame(d) #({'alleles':allele_names,GENE:allele_counts})
genecountdf_comhet = pd.DataFrame(d_comphet)
#genecountdf['from'] = country
#rf = pd.DataFrame({'Gene':[GENE]*len(allele_names),'alleles':allele_names,'count':allele_counts})
rf_aux = pd.DataFrame({'Gene':[GENE]*len(allele_names),'alleles':allele_names,'count':allele_counts_aux, 'country': [country]*len(allele_counts_aux), 'nr_found': [nr_found]*len(allele_counts_aux)})
return genecountdf, genecountdf_comhet, otherallelelist, rf_aux
##
COLORS = ['tab:blue','tab:red','tab:green', 'tab:olive', 'tab:cyan', 'tab:orange', 'tab:brown','m','tab:pink']
othercountries = list(set(df['from']))
othercountries.remove('Spain')
othercountries.remove('Colombia')
othercountries.remove('Brazil')
othercountries = '|'.join(othercountries)
allcountries = '|'.join(list(set(df['from'])))
countries = [allcountries, 'Spain', 'Colombia', 'Brazil']#, othercountries]
RF = pd.DataFrame({'Gene':['hey'],'alleles':['what'], 'count':[0], 'country': [0], 'nr_found': [0]}) # initiallising this dataframe
fig, ax1 = plt.subplots(nrows, ncols, figsize=(7,10), sharey=False, sharex=False)
for GENE in GENES:
genecountdf = pd.DataFrame()
genecountdf_comhet = pd.DataFrame()
coord_row = d_coords[GENE][1]
coord_col = d_coords[GENE][0]
country_col = list()
otherallelistpergene = list()
print(GENE)
for country in countries:
dff = df.loc[df['from'].str.contains(country)].groupby(GENE)['sample'].count().reset_index()
dff = dff.loc[~(dff[GENE] == '')].copy()
if len(dff) == 0:
genecountdf = genecountdf.append(pd.Series(name=0))
genecountdf_comhet = genecountdf_comhet.append(pd.Series(name=0))
countryname = country.replace(allcountries, 'All').replace(othercountries, 'Other')
country_col.append(countryname)
else:
if GENE == 'G6PD':
genecountdf_aux, genecountdf_comhet_aux, otherallelelist_aux, rf = prepareDF4plot_chrX(df, country, GENE, RF)
else:
genecountdf_aux, genecountdf_comhet_aux, otherallelelist_aux, rf = prepareDF4plot(df, country, GENE, RF)
otherallelistpergene.append(otherallelelist_aux)
RF = pd.concat([RF,rf])
countryname = country.replace(allcountries, 'All').replace(othercountries, 'Other')
country_col.append(countryname)
genecountdf = pd.concat([genecountdf, genecountdf_aux])
genecountdf_comhet = pd.concat([genecountdf_comhet, genecountdf_comhet_aux])
#print(country + ' ' + ' '.join(otherallelelist_aux))
country_colcount = list()
if coord_row == nrows-1:
for c, cc in zip(country_col,countries):
c = c + '\n'
#country_colcount.append(c + '(n=' + str(len(df.loc[(~(df[GENE]).isnull()) & (df['from'].str.contains(cc))])) + ')')
country_colcount.append(c + '[' + str(len(df.loc[(~(df[GENE]).isnull()) & (df['from'].str.contains(cc))]))+ ']')
else:
for c, cc in zip(country_col,countries):
c = ' '
#country_colcount.append(c + '(n=' + str(len(df.loc[(~(df[GENE]).isnull()) & (df['from'].str.contains(cc))])) + ')')
country_colcount.append(c + '[' + str(len(df.loc[(~(df[GENE]).isnull()) & (df['from'].str.contains(cc))]))+ ']')
genecountdf['from'] = country_colcount
genecountdf_comhet['from'] = country_colcount
nr_individuals = len(df.loc[~(df[GENE]).isnull()])
f = list()
othercheck = False
for i in list(genecountdf.columns):
if (i != 'Other alleles') and (i != 'from'):
f.append(i)
elif i == 'Other alleles':
othercheck = True
colors_aux = COLORS[0:len(f)]
if othercheck:
f.append('Other alleles')
colors_aux.append('tab:purple')
f_comhet = [[i+'_COMPHET',i+'_NOCOMPHET'] for i in f]
f_comhet = [item for sublist in f_comhet for item in sublist]
f.append('from')
f_comhet.append('from')
colors = colors_aux
genecountdf = genecountdf[f].copy()
genecountdf_comhet = genecountdf_comhet[f_comhet].copy()
ylim = [0,100]
otherallelistpergene_squeezed= list(set([item for sublist in otherallelistpergene for item in sublist]))
genecountdf = genecountdf.rename(columns={'Other alleles':'Other [' + str(len(otherallelistpergene_squeezed)) + ']'})
genecountdf.plot(x='from',ax = ax1[coord_row,coord_col],align = 'edge', kind = 'bar',width = -0.35, stacked = True, edgecolor = 'black', title = GENE , mark_right = True, color=colors)#,position=1+ ' (N=' + str(nr_individuals) + ')'
label_list = list()
for t in ax1[coord_row,coord_col].get_legend_handles_labels():
label_list.append(t)
#ax2 = ax1[coord_row,coord_col].twinx() # Create another axes that shares the same x-axis as ax.
#genecountdf_comhet.plot(x='from',ax = ax2,position=0 ,kind = 'bar',width = 0.35, stacked = True, edgecolor = 'black' , mark_right = True, color=['lightgray','white']*len(colors)) #, title = GENE
genecountdf = genecountdf.rename(columns={'Other alleles_COMPHET':'Other [' + str(len(otherallelistpergene_squeezed)) + ']','Other alleles_NOCOMPHET':'Other [' + str(len(otherallelistpergene_squeezed)) + ']'})
genecountdf_comhet.plot(x='from',ax = ax1[coord_row,coord_col] , align= 'edge',kind = 'bar',width = 0.15, stacked = True, edgecolor = 'black' , mark_right = True, color=['lightgray','white']*len(colors)) #, title = GENE
if GENE == 'UGT1A1': # Adding a second legend
label_list_comphet = [ax1[coord_row,coord_col].get_legend_handles_labels()[0][-2:], ['Comp.Het.\nHomozygous', 'No Comp.Het.\nNo Homoz.']]# ax2.get_legend_handles_labels()[1][0:2]]
legend2 = plt.legend(handles=label_list_comphet[0], labels=label_list_comphet[1],loc='center right', fontsize = 9, labelspacing=0.15, handletextpad=0.2,handlelength=1,bbox_to_anchor=(0.0,-0.1))
ax1[coord_row,coord_col].add_artist(legend2)
label_list = [label_list[0],label_list[1]]
#label_list = [label_list[0] + label_list_comphet[0],label_list[1] + label_list_comphet[1]]
#ax1[coord_row,coord_col].legend().set_visible(False)
ax1[coord_row,coord_col].legend(handles=label_list[0], labels=label_list[1],loc='center left', fontsize = 9, labelspacing=0.1, handletextpad=0.2, handlelength=1,bbox_to_anchor=(1.0,0.5))
xlim = [-0.5,3.3]
ax1[coord_row, coord_col].set_xlim(xlim)
ax1[coord_row, coord_col].set_ylim(ylim)
ax1[coord_row, coord_col].set_title(label = GENE , fontsize= 10, fontweight="bold") #+ ' (N=' + str(nr_individuals) + ')', fontsize= 10) # title of plot
plt.setp(ax1[coord_row, coord_col].get_xticklabels(), visible=True, rotation=0, ha='center')
ax1[coord_row, coord_col].tick_params(axis = 'x',which='major', labelsize=10) #,labelrotation=0
ax1[coord_row, coord_col].set_xlabel('')
if coord_col == 0:
ax1[coord_row,coord_col].set_ylabel('%')
else:
ax1[coord_row,coord_col].yaxis.set_visible(False)
plt.subplots_adjust(left=0.080, bottom=0.050, right=0.850, top=0.97, wspace=0.665, hspace=0.365)
plt.savefig('/path/to/Figures/Figure_2B_splittedbyLatamCountry.png',format = 'png', dpi = 500)
plt.show()
``` |
{
"source": "jlant/gagepy",
"score": 3
} |
#### File: gagepy/gagepy/output.py
```python
import json
from jinja2 import Template, PackageLoader, Environment
def render_summary(data, template_filename):
"""Render a summary of the data.
:param data: Data to fill template with
:param type: dictionary
:param template_filename: The filename of the template to fill
:type template_filename: string
"""
loader = PackageLoader("gagepy", "templates")
env = Environment(loader=loader)
template = env.get_template(template_filename)
return template.render(data)
def save_summary(data, template_filename, output_file):
"""Save summary as a restructured text file.
:param data: Data to fill template with
:param type: dictionary
:param template_filename: The filename of the template to fill
:type template_filename: string
:param output_file: The full path with filename of the output file to write
:type output_file: string
"""
if isinstance(output_file, str):
with open(output_file, "w") as f:
f.write(render_summary(data, template_filename))
```
#### File: gagepy/gagepy/parameter.py
```python
import os
import numpy as np
from jinja2 import Template, PackageLoader, Environment
from .plotting import plot_parameter, plot_parameter_html
from .output import render_summary
class Parameter(object):
""" A class that contains data and functionality of a timeseries parameter.
:param name: Name of the parameter
:type name: string
:param dates: Array of datetime objects
:type dates: numpy.ndarray
:param values: Array of data values
:type values: numpy.ndarray
:param units: Units of the parameter
:type param: string
:param code: A numeric code identifier of the parameter; default of None
.. warning::
Length of dates and length of values must be equal to be a valid
timeseries parameter.
"""
template_rst = "parameter-summary-template.rst"
template_html = "parameter-summary-template.html"
def __init__(self, name, dates, values, units, code=None):
assert len(dates) == len(values), "Length of dates {0} does not equal length of values {1}".format(len(dates), len(values))
self.name = name
self.dates = dates
self.values = values
self.units = units
self.code = code
@property
def mean(self):
"""Get the mean ignoring any NaN values"""
return np.nanmean(self.values)
@property
def max(self):
"""Get the maximum ignoring any NaN values"""
return np.nanmax(self.values)
@property
def min(self):
"""Get the minimum ignoring any NaN values"""
return np.nanmin(self.values)
@property
def max_date(self):
"""Get the date that the maximum value occurred on"""
max_index = np.nanargmax(self.values)
return self.dates[max_index]
@property
def min_date(self):
"""Get the date that the min value occurred on"""
min_index = np.nanargmin(self.values)
return self.dates[min_index]
def get_summary_data(self):
"""Return a dictionary of the data for the summary"""
return {"name": self.name,
"units": self.units,
"mean": self.mean,
"max": self.max,
"min": self.min,
"max_date": self.max_date,
"min_date": self.min_date,
"start_date": self.dates[0],
"end_date": self.dates[-1],
"num_vals": len(self.values),
"plot": self.plot_html(),
}
def __str__(self):
"""Return a formatted view of the data"""
return render_summary(self.get_summary_data(), self.template_rst)
def plot_html(self):
"""Return an html string represenation of the figure"""
return plot_parameter_html(self.dates, self.values, self.name, self.units)
def plot(self, path=""):
"""Show and optionally save plot."""
plot_parameter(self.dates, self.values, self.mean, self.max, self.min, self.name, self.units, path)
```
#### File: gagepy/gagepy/utils.py
```python
import os
import numpy as np
import datetime
import re
def get_file_paths(dirname, file_ext):
"""Return a list of absolute file paths for certain files files in a directory. Walks through
subdirectories.
:param dirname: Name of directory to start walking
:type dirname: string
:param file_ext: File extension to look for
:file_ext type: string
:returns: List of absolute file paths
:rtype: list
"""
file_paths = []
for root, directories, files in os.walk(directory):
for filename in files:
filepath = os.path.join(root, filename)
if file_ext and filepath.endswith(file_ext):
file_paths.append(filepath)
return file_paths
def get_file_info(filepath):
"""Return a file's directory and name for a file path.
:param filepath: Path to file
:type filepath: string
:returns: File directory and file name
:type: tuple
"""
filedir, filename = os.path.split(filepath)
# filedir is an empty string when file is in current directory
if not filedir:
filedir = os.getcwd()
return filedir, filename
def rmchars(value):
"""Remove special characters from alphanumeric values except for period (.)
and negative (-) characters.
:param value: Alphanumeric value
:type value: string
:returns: Alphanumeric value stripped of any special characters
:rtype: string
>>> import utils
>>> utils.rmchars(value = "*6.5_")
'6.5'
>>> utils.rmchars(value = "ICE")
'ICE'
>>> utils.rmchars(value = "-4.2")
'-4.2'
>>> utils.rmchars(value = "%&!@#8.32&#*;")
'8.32'
"""
value = re.sub("[^A-Za-z0-9.-]+", "", value)
return value
def is_float(value):
"""Return True if a string value can be converted to a float and False otherwise.
:param value: Value to check
:rtype: bool
>>> import utils
>>> utils.is_float(value = "2.5")
True
>>> utils.is_float(value = "hello world")
False
>>> utils.is_float(value = "5.5_")
False
"""
try:
float(value)
return True
except ValueError:
return False
def to_float(value):
"""Convert a value to a float type.
:param value: Value to convert to float
:returns: Value as a float
:rtype: float
"""
value = rmchars(value)
if is_float(value):
return float(value)
else:
raise ValueError("Can not convert {} value to a float".format(value))
def to_nan(value, msg=None):
"""Convert a value to a numpy nan and print a message if available.
:param value: Value to convert to nan
:type value:
:param msg: Optional message to print to screen
:returns: Numpy nan value
:rtype: float
"""
if msg:
print(msg)
return np.nan
def subset_data(dates, values, start_date, end_date):
"""Return a subset of date and value arrays to match the range of dates
between a given start_date and end_date. If start_date and end_date are not
within the range of dates specified in dates, then the start_date and
end_date are set to the first and last dates in the dates array.
:param dates: Array of dates as datetime objects
:type dates: numpy.ndarray
:param values: Array of numeric values
:type values: numpy.ndarray
:param start_date: A datetime object
:type start_date: datetime.datetime
:param end_date: A datetime object
:type end_date: datetime.datetime
:returns: A subset of dates and values
:rtype: tuple
"""
if len(dates) != len(values):
raise ValueError("Length of dates {} does not equal length of values {}".format(len(dates), len(values)))
else:
# if start_date or end_date are not within dates, set them to the first and last elements in dates
if start_date < dates[0] or start_date > dates[-1]:
start_date = dates[0]
if end_date > dates[-1] or end_date < dates[0]:
end_date = dates[-1]
# find start and ending indices; have to convert idx from array to int to slice
start_idx = int(np.where(dates == start_date)[0])
end_idx = int(np.where(dates == end_date)[0])
# subset variable and date range;
date_subset = dates[start_idx:end_idx + 1]
values_subset = values[start_idx:end_idx + 1]
return date_subset, values_subset
def find_start_end_dates(dates1, dates2):
"""Find start and end dates between lists (or arrays) of datetime objects
that do not have the same length.
The start date will be the later of two dates.
The end date will be the earlier of the two dates.
:param dates1: List or array of datetime objects
:type dates1: list or numpy.ndarray
:param dates2: List or array of datetime objects
:type dates2: list or numpy.ndarray
:returns: Tuple of start date and end date
:rtype: tuple
:raises: ValueError for non overlapping dates
"""
# convert dates to sets for set intersection
date1_set = set(dates1)
date2_set = set(dates2)
if date1_set.intersection(date2_set):
# start date
if dates2[0] > dates1[0]:
start_date = dates2[0]
else:
start_date = dates1[0]
# end date
if dates2[-1] > dates1[-1]:
end_date = dates1[-1]
else:
end_date = dates2[-1]
return start_date, end_date
else:
raise ValueError("No overlapping dates.")
def add_ending(file, suffix, ext, delimiter="-"):
"""Add a new ending to a filename,
:param file: File or path to file
:type file: string
:param suffix: Suffix to add to end of file
:type suffix: string
:param ext: File extension
:type ext: string
:param delimiter: Delimiter, default is the dash character
:type delimiter: string
:returns: New file
:rtype: string
.. note::
Spaces in filenames are replaced by delimiter to keep with Unix file naming conventions.
>>> import utils
>>> utils.add_ending(file="dv.txt", suffix="summary", ext=".txt")
'dv-summary.txt'
>>> utils.add_ending(file="dv.rdb", suffix="summary", ext=".rst", delimiter="_")
'dv_summary.rst'
>>> utils.add_ending(file="/home/play/dv.rdb", suffix="summary", ext=".rst")
'/home/play/dv-summary.rst'
>>> utils.add_ending(file="daily values.rdb", suffix="summary", ext=".rst")
'daily-values-summary.rst'
"""
assert isinstance(file, str), "File must be a string."
assert isinstance(suffix, str), "Suffix must be a string."
assert isinstance(ext, str), "Extension must be a string."
assert isinstance(delimiter, str), "Delimiter must be a string."
path, fullname = os.path.split(file)
name, ext_orig = os.path.splitext(fullname)
parts = name.split()
if suffix:
parts.append(suffix)
if ext:
newname = delimiter.join(parts) + ext
else:
newname = delimiter.join(parts) + ext_orig
return os.path.join(path, newname)
```
#### File: gagepy/tests/test_output.py
```python
import pytest
import os
import numpy as np
from gagepy.usgsgage import USGSGage
from gagepy.parameter import Parameter
from gagepy import output
def test_save_usgs_gage_summary_rst(dates_daily):
usgs_gage = USGSGage(name = "USGS 03290500 KENTUCKY RIVER AT LOCK 2 AT LOCKPORT, KY",
parameters = [
Parameter(name = "Discharge",
dates = dates_daily,
values = np.array([1, 2, 3, 4, 5]),
units = "cubic feet per second (Mean)",
code = "06_00060_00003"),
Parameter(name = "Stage",
dates = dates_daily,
values = np.array([2, 4, 6, 8, 10]),
units = "feet",
code = "03_00065")
])
output.save_summary(data = usgs_gage.get_summary_data(),
template_filename = usgs_gage.template_rst,
output_file = "tests/usgsgage-summary.rst")
def test_save_usgs_gage_summary_html(dates_daily):
usgs_gage = USGSGage(name = "USGS 03290500 KENTUCKY RIVER AT LOCK 2 AT LOCKPORT, KY",
parameters = [
Parameter(name = "Discharge",
dates = dates_daily,
values = np.array([1, 2, 3, 4, 5]),
units = "cubic feet per second (Mean)",
code = "06_00060_00003"),
Parameter(name = "Stage",
dates = dates_daily,
values = np.array([2, 4, 6, 8, 10]),
units = "feet",
code = "03_00065")
])
output.save_summary(data = usgs_gage.get_summary_data(),
template_filename = usgs_gage.template_html,
output_file = "tests/usgsgage-summary.html")
def test_save_parameter_summary_rst(dates_daily):
parameter = Parameter(name = "Discharge",
dates = dates_daily,
units = "cubic feet per second (Mean)",
code = "06_00060_00003",
values = np.array([1, 2, 3, 4, 5]))
output.save_summary(data = parameter.get_summary_data(),
template_filename = parameter.template_rst,
output_file = "tests/parameter-summary.rst")
def test_parameter_save_summary_html(dates_daily):
parameter = Parameter(name = "Discharge",
dates = dates_daily,
units = "cubic feet per second (Mean)",
code = "06_00060_00003",
values = np.array([1, 2, 3, 4, 5]))
output.save_summary(data = parameter.get_summary_data(),
template_filename = parameter.template_html,
output_file = "tests/parameter-summary.html")
```
#### File: gagepy/tests/test_rdb.py
```python
import pytest
import os
import numpy as np
import re
from io import StringIO
from datetime import datetime
from gagepy import rdb
from gagepy.usgsgage import USGSGage
from gagepy.usgsgage import Parameter
def test_parse_gage_name():
expected = "USGS 03290500 KENTUCKY RIVER AT LOCK 2 AT LOCKPORT, KY"
line = "# USGS 03290500 KENTUCKY RIVER AT LOCK 2 AT LOCKPORT, KY"
match = re.search(rdb.gage_name_re, line)
actual = match.group(2)
assert actual == expected
def test_match_parameter_code_with_statistic_code():
expected = ("#", "06", "00060", "\t00003", "\tDischarge, cubic feet per second (Mean)")
line = "#\t06\t00060\t00003\tDischarge, cubic feet per second (Mean)"
match = re.search(rdb.parameter_code_re, line)
actual = match.groups(0)
assert actual == expected
def test_match_parameter_code_without_statistic_code():
expected = ("#", "03", "00065", 0, "\tGage height, feet")
line = "#\t03\t00065\tGage height, feet"
match = re.search(rdb.parameter_code_re, line)
actual = match.groups(0)
assert actual == expected
def test_parse_parameter_code():
expected = ("06_00060_00003", "Discharge, cubic feet per second (Mean)")
line = "#\t06\t00060\t00003\tDischarge, cubic feet per second (Mean)"
match = re.search(rdb.parameter_code_re, line)
actual = rdb.parse_parameter_code(match)
assert actual == expected
def test_parse_daily_column_names():
expected = [
"agency_cd",
"site_no",
"datetime",
"06_00060_00003",
"06_00060_00003_cd"
]
line = "agency_cd\tsite_no\tdatetime\t06_00060_00003\t06_00060_00003_cd"
match = re.search(rdb.column_names_re, line)
actual = match.group(0).split("\t")
assert actual == expected
def test_parse_instantaneous_column_names():
expected = [
"agency_cd",
"site_no",
"datetime",
"tz_cd",
"03_00065",
"03_00065_cd"
]
line = "agency_cd\tsite_no\tdatetime\ttz_cd\t03_00065\t03_00065_cd"
match = re.search(rdb.column_names_re, line)
actual = match.group(0).split("\t")
assert actual == expected
def test_match_data_row():
expected = ("USGS", "03290500", "2012-07-01", 0, "171\tA")
line = "USGS\t03290500\t2012-07-01\t171\tA"
match = re.search(rdb.data_row_re, line)
actual = match.groups(0)
assert actual == expected
def test_parse_daily_date():
expected = datetime(2015, 8, 1, 0, 0)
actual = rdb.parse_daily_date("2015-08-01")
assert actual == expected
def test_parse_instantaneous_date():
expected = datetime(2015, 8, 1, 0, 15)
actual = rdb.parse_instantaneous_date(date = "2015-08-01", time = "00:15\tEDT")
assert actual == expected
def test_parse_description():
assert rdb.parse_description("Temperature, water, degrees Celsius") == ("Temperature", "degrees Celsius")
assert rdb.parse_description("Dissolved oxygen, water, unfiltered, milligrams per liter") == ("Dissolved oxygen", "milligrams per liter")
def test_usgs_gage_for_daily_single_parameter(dates_daily, daily_value_file_single_parameter):
filestream = StringIO(daily_value_file_single_parameter)
usgs_gage = rdb.read_rdb_in(filestream)
assert usgs_gage.name == "USGS 03290500 KENTUCKY RIVER AT LOCK 2 AT LOCKPORT, KY"
assert len(usgs_gage.parameters) == 1
assert list(usgs_gage.parameters[0].dates) == list(dates_daily)
assert usgs_gage.parameters[0].code == "06_00060_00003"
assert usgs_gage.parameters[0].name == "Discharge"
assert usgs_gage.parameters[0].units == "cubic feet per second (Mean)"
assert list(usgs_gage.parameters[0].values) == list(np.array([100, 110, 105, 107, 112]))
def test_usgs_gage_for_instantaneous_single_parameter(dates_instantaneous, instantaneous_value_file_single_parameter):
filestream = StringIO(instantaneous_value_file_single_parameter)
usgs_gage = rdb.read_rdb_in(filestream)
assert usgs_gage.name == "USGS 11143000 BIG SUR R NR BIG SUR CA"
assert len(usgs_gage.parameters) == 1
assert list(usgs_gage.parameters[0].dates) == list(dates_instantaneous)
assert usgs_gage.parameters[0].code == "03_00065"
assert usgs_gage.parameters[0].name == "Gage height"
assert usgs_gage.parameters[0].units == "feet"
assert list(usgs_gage.parameters[0].values) == list(np.array([5, 10, 15, 4.5, 5.5]))
def test_usgs_gage_for_instantaneous_multi_parameter(dates_instantaneous, instantaneous_value_file_multi_parameter):
filestream = StringIO(instantaneous_value_file_multi_parameter)
usgs_gage = rdb.read_rdb_in(filestream)
assert usgs_gage.name == "USGS 03401385 DAVIS BRANCH AT HIGHWAY 988 NEAR MIDDLESBORO, KY"
assert len(usgs_gage.parameters) == 3
assert usgs_gage.parameters[0].code == "02_00065"
assert usgs_gage.parameters[1].code == "03_00010"
assert usgs_gage.parameters[2].code == "04_00300"
assert usgs_gage.parameters[0].name == "Gage height"
assert usgs_gage.parameters[1].name == "Temperature"
assert usgs_gage.parameters[2].name == "Dissolved oxygen"
assert usgs_gage.parameters[0].units == "feet"
assert usgs_gage.parameters[1].units == "degrees Celsius"
assert usgs_gage.parameters[2].units == "milligrams per liter"
assert list(usgs_gage.parameters[0].dates) == list(dates_instantaneous)
assert list(usgs_gage.parameters[1].dates) == list(dates_instantaneous)
assert list(usgs_gage.parameters[2].dates) == list(dates_instantaneous)
assert list(usgs_gage.parameters[0].values) == list(np.array([1, 2, 3, 4, 5]))
assert list(usgs_gage.parameters[1].values) == list(np.array([5, 10, 15, 20, 25]))
assert list(usgs_gage.parameters[2].values) == list(np.array([2.0, 1.25, 1.20, 0.5, 0.75]))
def test_usgs_gage_for_daily_single_parameter_bad_data_row_formatting(dates_daily, daily_value_file_single_parameter_bad_formatting):
filestream = StringIO(daily_value_file_single_parameter_bad_formatting)
usgs_gage = rdb.read_rdb_in(filestream)
assert usgs_gage.name == "USGS 03290500 KENTUCKY RIVER AT LOCK 2 AT LOCKPORT, KY"
assert len(usgs_gage.parameters) == 1
assert usgs_gage.parameters[0].code == "06_00060_00003"
assert usgs_gage.parameters[0].name == "Discharge"
assert usgs_gage.parameters[0].units == "cubic feet per second (Mean)"
assert list(usgs_gage.parameters[0].dates) == list(dates_daily)
assert list(usgs_gage.parameters[0].values) == list(np.array([10, 20, 30, 40, 50]))
def test_usgs_gage_for_instantaneous_single_parameter_missing_data(dates_instantaneous, instantaneous_value_file_single_parameter_missing_data):
filestream = StringIO(instantaneous_value_file_single_parameter_missing_data)
usgs_gage = rdb.read_rdb_in(filestream)
assert usgs_gage.name == "USGS 11143000 BIG SUR R NR BIG SUR CA"
assert len(usgs_gage.parameters) == 1
assert usgs_gage.parameters[0].code == "03_00065"
assert usgs_gage.parameters[0].name == "Gage height"
assert usgs_gage.parameters[0].units == "feet"
assert list(usgs_gage.parameters[0].dates) == list(dates_instantaneous)
np.testing.assert_almost_equal(usgs_gage.parameters[0].values, np.array([5, 10, np.nan, np.nan, 5.5]))
def test_usgs_gage_for_instantaneous_single_parameter_bad_characters_data_raises_value_error(dates_instantaneous, instantaneous_value_file_single_parameter_bad_characters):
filestream = StringIO(instantaneous_value_file_single_parameter_bad_characters)
with pytest.raises(ValueError) as verror:
usgs_gage = rdb.read_rdb_in(filestream)
assert str(verror.value) == "Can not convert Ice value to a float"
def test_read_rdb_raises_invalidrdb():
gagepy_dirpath = os.path.abspath(__file__ + "/../../")
bad_filepath = os.path.join(gagepy_dirpath, "README.rst")
with pytest.raises(Exception) as error:
usgs_gage = rdb.read_rdb(bad_filepath)
assert str(error.value) == "{0} is not a valid RDB file.".format(bad_filepath)
```
#### File: gagepy/tests/test_utils.py
```python
import pytest
import numpy as np
from gagepy import utils
from datetime import datetime
import os
def test_is_float():
assert utils.is_float(6.25) == True
assert utils.is_float("6.25") == True
assert utils.is_float("2.5_") == False
assert utils.is_float("hello") == False
def test_to_float():
assert utils.to_float("6.25") == 6.25
assert utils.to_float("*2.5_") == 2.5
def test_rmchars():
assert utils.rmchars("6.5_") == "6.5"
assert utils.rmchars("*$^**(@4.25_+;") == "4.25"
assert utils.rmchars("-4.1") == "-4.1"
def test_subset_data_dates_within_range(dates_and_values):
start = datetime(2015, 1, 4)
end = datetime(2015, 1, 10)
expected_dates = np.array([datetime(2015, 1, 4, 0, 0),
datetime(2015, 1, 5, 0, 0),
datetime(2015, 1, 6, 0, 0),
datetime(2015, 1, 7, 0, 0),
datetime(2015, 1, 8, 0, 0),
datetime(2015, 1, 9, 0, 0),
datetime(2015, 1, 10, 0, 0)])
expected_values = np.array([3, 4, 5, 6, 7, 8, 9])
actual_dates, actual_values = utils.subset_data(dates = dates_and_values[0],
values = dates_and_values[1],
start_date = start,
end_date = end)
assert list(actual_dates) == list(expected_dates)
assert list(actual_values) == list(expected_values)
def test_subset_data_dates_outside_range(dates_and_values):
start = datetime(2014, 12, 1)
end = datetime(2015, 1, 20)
expected_dates = np.array([datetime(2015, 1, 1, 0, 0),
datetime(2015, 1, 2, 0, 0),
datetime(2015, 1, 3, 0, 0),
datetime(2015, 1, 4, 0, 0),
datetime(2015, 1, 5, 0, 0),
datetime(2015, 1, 6, 0, 0),
datetime(2015, 1, 7, 0, 0),
datetime(2015, 1, 8, 0, 0),
datetime(2015, 1, 9, 0, 0),
datetime(2015, 1, 10, 0, 0),
datetime(2015, 1, 11, 0, 0)])
expected_values = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
actual_dates, actual_values = utils.subset_data(dates = dates_and_values[0],
values = dates_and_values[1],
start_date = start,
end_date = end)
assert list(actual_dates) == list(expected_dates)
assert list(actual_values) == list(expected_values)
def test_find_start_end_dates_shorter_range(dates_and_values, dates_shorter):
expected_start_date = datetime(2015, 1, 3, 0, 0)
expected_end_date = datetime(2015, 1, 11, 0, 0)
actual_start_date, actual_end_date = utils.find_start_end_dates(dates_and_values[0], dates_shorter)
assert actual_start_date == expected_start_date
assert actual_end_date == expected_end_date
def test_find_start_end_dates_longer_range(dates_and_values, dates_longer):
expected_start_date = datetime(2015, 1, 1, 0, 0)
expected_end_date = datetime(2015, 1, 11, 0, 0)
actual_start_date, actual_end_date = utils.find_start_end_dates(dates_and_values[0], dates_longer)
assert actual_start_date == expected_start_date
assert actual_end_date == expected_end_date
def test_add_ending():
assert utils.add_ending(file="dv.txt", suffix="summary", ext=".txt") == "dv-summary.txt"
assert utils.add_ending(file="dv.txt", suffix="", ext=".rst") == "dv.rst"
assert utils.add_ending(file="dv.txt", suffix="", ext="") == "dv.txt"
assert utils.add_ending(file="dv.txt", suffix="summary", ext="") == "dv-summary.txt"
assert utils.add_ending(file="dv.rdb", suffix="summary", ext=".rst", delimiter="_") == "dv_summary.rst"
assert utils.add_ending(file=os.path.join(os.path.sep, "home", "play", "dv.rdb"), suffix="summary", ext=".rst") == os.path.join(os.path.sep, "home", "play", "dv-summary.rst")
assert utils.add_ending(file="daily values.rdb", suffix="summary", ext=".rst") == "daily-values-summary.rst"
assert utils.add_ending(file="usgs daily values.rdb", suffix="summary", ext=".rst", delimiter="_") == "usgs_daily_values_summary.rst"
with pytest.raises(AssertionError) as error:
utils.add_ending(file=True, suffix="summary", ext=".txt")
with pytest.raises(AssertionError) as error:
utils.add_ending(file="dv.txt", suffix=2, ext=".txt")
assert str(error.value) == "Suffix must be a string."
with pytest.raises(AssertionError) as error:
utils.add_ending(file="dv.txt", suffix="summary", ext=4.5)
assert str(error.value) == "Extension must be a string."
with pytest.raises(AssertionError) as error:
utils.add_ending(file="dv.txt", suffix="summary", ext=".txt", delimiter=5)
assert str(error.value) == "Delimiter must be a string."
``` |
{
"source": "jlantner/loans",
"score": 3
} |
#### File: loans/polls/tests.py
```python
import datetime
from django.utils import timezone
from django.test import TestCase
from django.urls import reverse
from .models import Question
def create_question(question_text,days):
time=timezone.now() + datetime.timedelta(days=days)
return Question.objects.create(question_text=question_text, pub_date=time)
class QuestionModelTests(TestCase):
#def test_date(self):
# self.assertIs(date(),timezone.now())
def test_was_published_recently_with_future_question(self):
"""
was_published_recently() returns False for questions whose pub_date
is in the future.
"""
time = timezone.now() + datetime.timedelta(days=30)
future_question = Question(pub_date=time)
self.assertIs(future_question.was_published_recently(), False)
def test_was_published_recently_with_old_question(self):
time = timezone.now() - datetime.timedelta(days=1,seconds=1)
old_question=Question(pub_date=time)
self.assertIs(old_question.was_published_recently(),False)
def test_was_published_recently_with_recent_question(self):
time = timezone.now()-datetime.timedelta(hours=23,minutes=59,seconds=59)
recent_question=Question(pub_date=time)
self.assertIs(recent_question.was_published_recently(),True)
class QuestionIndexViewTests(TestCase):
def test_no_questions(self):
response = self.client.get(reverse('polls:index'))
self.assertEqual(response.status_code,200)
self.assertContains(response, "No polls are available.")
self.assertQuerysetEqual(response.context['latest_question_list'],[])
def test_past_question(self):
create_question(question_text="Past question.", days=-30)
response = self.client.get(reverse('polls:index'))
self.assertQuerysetEqual(
response.context['latest_question_list'],['<Question: Past question.>'])
def test_future_question(self):
future_question = create_question(question_text="Future question.",days=30)
response = self.client.get(reverse('polls:index'))
self.assertContains(response, "Future question")
self.assertQuerysetEqual(response.context['latest_question_list'],[])
def test_future_question_and_past_question(self):
create_question(question_text="Past question.", days=-30)
create_question(question_text="Future question.", days=30)
response = self.client.get(reverse('polls:index'))
self.assertQuerysetEqual(response.context['latest_question_list'],['<Question: Past question.>'])
def test_two_past_questions(self):
create_question(question_text="Past question 1.", days=-30)
create_question(question_text="Past question 2.", days=-5)
response = self.client.get(reverse('polls:index'))
self.assertQuerysetEqual(response.context['latest_question_list'],['<Question: Past question 2.>', '<Question: Past question 1.>'])
class QuestionDetailViewTests(TestCase):
def test_future_question(self):
future_question=create_question(question_text='Future question.',days=5)
url = reverse('polls:detail',args=(future_question.id,))
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_past_question(self):
past_question = create_question(question_text='Past Question.', days=-5)
url = reverse('polls:detail', args=(past_question.id,))
response = self.client.get(url)
self.assertContains(response, past_question.question_text)
``` |
{
"source": "jlant/nwispy",
"score": 3
} |
#### File: nwispy/nwispy/nwispy_helpers.py
```python
__author__ = "<NAME>, <EMAIL>, U.S. Geological Survey, Kentucky Water Science Center."
__copyright__ = "http://www.usgs.gov/visual-id/credit_usgs.html#copyright"
__license__ = __copyright__
__contact__ = __author__
import os
import numpy as np
import datetime
import re
import logging
def now():
"""
Return current date and time in a format that can be used as a file name.
Format: year-month-day_hour.minute.second.microsecond; e.g. 2014-03-18_15.51.46.25
Returns
-------
date_time : string
String of current date and time in %Y-%m-%d_%H.%M.%S.%f format.
Notes
-----
The length of the microsecond string is trimed to 2 digits.
"""
date_time = datetime.datetime.now().strftime("%Y-%m-%d_%H.%M.%S.%f")[:-4]
return date_time
def get_file_paths(directory, file_ext = None):
"""
Return a list of full file paths from a directory including its subdirectories.
Filter fil
Parameters
----------
directory : string
String path
file_ext : string
String file extention; e.g. ".txt"
Returns
-------
file_paths : list
List of strings of full file paths from a directory.
"""
file_paths = []
# Walk the tree.
for root, directories, files in os.walk(directory):
for filename in files:
filepath = os.path.join(root, filename)
if file_ext and filepath.endswith(file_ext):
file_paths.append(filepath)
return file_paths
def get_file_info(path):
"""
Get file directory and name from a file path.
Parameters
----------
path : string
String path
Returns
-------
filedir : string
String file directory path
filename : string
String file name
"""
filedir, filename = os.path.split(path)
# filedir is an empty string when file is in current directory
if not filedir:
filedir = os.getcwd()
return filedir, filename
def make_directory(path, directory_name):
"""
Make a directory if is does not exist.
Parameters
----------
path: string
String path
directory_name : string
String name
Returns
-------
directory_path : string
String path to made directory.
"""
directory_path = os.path.join(path, directory_name)
if not os.path.exists(directory_path):
os.makedirs(directory_path)
return directory_path
def isfloat(value):
"""
Determine if string value can be converted to a float. Return True if
value can be converted to a float and False otherwise.
Parameters
----------
value : string
String value to try to convert to a float.
Returns
-------
bool : bool
Examples
--------
>>> import nwispy_helpers
>>> nwispy_helpers.isfloat(value = "2.5")
True
>>> nwispy_helpers.isfloat(value = "hello world")
False
>>> nwispy_helpers.isfloat(value = "5.5_")
False
"""
try:
float(value)
return True
except ValueError:
return False
def rmspecialchars(value):
"""
Remove any special characters except period (.) and negative (-) from numeric values
Parameters
----------
value : string
String value to remove any existing characters from
Returns
-------
value : string
String value to without any special characters
Examples
--------
>>> import helpers
>>> helpers.rmspecialchars(value = "*6.5_")
6.5
>>> helpers.rmspecialchars(value = "ICE")
ICE
>>> helpers.rmspecialchars(value = "-4.2")
-4.2
>>> helpers.rmspecialchars(value = "")
>>> helpers.rmspecialchars(value = "%&!@#8.32&#*;")
8.32
"""
value = re.sub("[^A-Za-z0-9.-]+", "", value)
return value
def convert_to_float(value, helper_str = None):
"""
Convert a value to a float. If value is not a valid float, log as an error
with a helper_str (e.g. value"s coorsponding date) to help locate the
error and replace value with a nan.
Parameters
----------
value : string
String value to convert.
helper_str : string
String message to be placed in error log if value can not be converted to a float. e.g. value"s corresponding date of occurance.
Returns
-------
value : {float, nan}
Float or numpy nan value
"""
# remove any special characters present in string value
value = rmspecialchars(value)
if isfloat(value):
value = float(value)
else:
if value == "":
error_str = "*Missing value* {}. *Solution* - Replacing with NaN value".format(helper_str)
logging.warn(error_str)
value = np.nan
else:
error_str = "*Bad value* {}. *Solution* - Replacing with NaN value".format(helper_str)
logging.warn(error_str)
value = np.nan
return value
def create_monthly_dict():
"""
Create a dictionary containing monthly keys and empty lists as initial values
Returns
-------
values_dict : dictionary
Dictionary containing monthly keys with corresponding values.
Notes
-----
{"January": [],
"February": [],
"March": [],
"April": [],
"May": [],
"June": [],
"July": [],
"August": [],
"September": [],
"October": [],
"November": [],
"December": []
}
"""
months = ["January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"]
# initialize dictionary
monthly_dict = {}
for month in months:
monthly_dict[month] = []
return monthly_dict
def compute_simple_stats(data):
"""
Compute simple statistics (mean, max, min) on a data array. Can handle nan values.
If the entire data array consists of only nan values, then log the error and raise a ValueError.
Parameters
----------
data : array
An array of numbers to compute simple statistics on.
Returns
-------
(mean, max, min) : tuple
Returns a tuple of mean, max, and min stats.
Raises
------
ValueError
If data array only contains nan values.
Examples
--------
>>> import watertxt
>>> import numpy as np
>>> watertxt.compute_simple_stats([1, 2, 3, 4])
(2.5, 4, 1)
>>> watertxt.compute_simple_stats([2, np.nan, 6, 1])
(3.0, 6.0, 1.0)
"""
# check if all values are nan
if not np.isnan(data).all():
param_mean = np.nanmean(data)
param_max = np.nanmax(data)
param_min = np.nanmin(data)
return param_mean, param_max, param_min
else:
error_str = "*Bad data* All values are NaN. Please check data"
logging.warn(error_str)
raise ValueError
def subset_data(dates, values, start_date, end_date):
"""
Subset the dates and values arrays to match the range of the start_date
and end_date. If start_date and end_date are not within the range of dates
specified in dates, then the start_date and end_date are set to the
first and last dates in the array dates.
Parameters
----------
dates : array
Array of dates as datetime objects.
data : array
Array of numbers.
start_date : datetime object
A date as a datetime object.
end_date : datetime object
A date as a datetime object.
Returns
-------
(subset_dates, subset_values) : tuple
Tuple of arrays of dates and values that were subsetted.
"""
if len(dates) != len(values):
raise ValueError("Lengths of dates and values are not equal!")
else:
# if start_date or end_date are not within dates, set them to the
# first and last elements in dates
if start_date < dates[0] or start_date > dates[-1]:
start_date = dates[0]
if end_date > dates[-1] or end_date < dates[0]:
end_date = dates[-1]
# find start and ending indices; have to convert idx from array to int to slice
start_idx = int(np.where(dates == start_date)[0])
end_idx = int(np.where(dates == end_date)[0])
# subset variable and date range;
date_subset = dates[start_idx:end_idx + 1]
values_subset = values[start_idx:end_idx + 1]
return date_subset, values_subset
def find_start_end_dates(dates1, dates2):
"""
Find start and end dates between two different sized arrays of datetime
objects.
Parameters
----------
dates1 : list
List of datetime objects.
dates2 : list
List of datetime objects.
Returns
-------
(start_date, end_date) : tuple
Tuple of datetime objects.
"""
# make sure that dates overlap
date1_set = set(dates1)
date2_set = set(dates2)
if date1_set.intersection(date2_set):
# pick later of two dates for start date; pick earlier of two dates for end date
if dates2[0] > dates1[0]:
start_date = dates2[0]
else:
start_date = dates1[0]
if dates2[-1] > dates1[-1]:
end_date = dates1[-1]
else:
end_date = dates2[-1]
return start_date, end_date
else:
raise ValueError("No matching dates for find_start_end_dates()")
def _print_test_info(expected, actual):
"""
For testing purposes, assert that all expected values and actual values match.
Prints assertion error when there is no match. Prints values to user to scan
if interested. Helps a lot for debugging. This function mirrors what is done
in nosetests.
Parameters
----------
expected : dictionary
Dictionary holding expected data values
actual : dictionary
Dictionary holding expected data values
"""
for key in actual.keys():
np.testing.assert_equal(actual[key], expected[key], err_msg = "For key * {} *, actual value(s) * {} * do not equal expected value(s) * {} *".format(key, actual[key], expected[key]))
print("*{}*".format(key))
print(" actual: {}".format(actual[key]))
print(" expected: {}\n".format(expected[key]))
def test_now():
""" Test now() """
print("--- Testing now() ---")
date_time_str = now()
print("Right now is: ")
print(" {}".format(date_time_str))
print("")
def test_get_filepaths():
""" Test get_filepaths() """
print("--- Testing get_filepaths() ---")
file_paths = get_file_paths(os.getcwd(), file_ext = "py")
print("File paths are:")
print(" {}".format(file_paths))
print("")
def test_get_file_info():
""" Test get_file_info functionality """
print("--- Testing get_file_info ---")
# expected values
expected = {}
expected["filedir"] = "C:\Users\jlant\jeremiah\projects\python-projects\waterapputils\waterapputils"
expected["filename"] = "helpers.py"
# actual values
actual = {}
actual["filedir"], actual["filename"] = get_file_info(path = os.path.join(os.getcwd(), "helpers.py"))
# print results
_print_test_info(actual, expected)
def test_make_directory():
""" Test make_directory() """
print("---- Testing make_directory() ----")
# expected values
expected = {"directory_path" : "C:\Users\jlant\jeremiah\projects\python-projects\waterapputils\waterapputils\Testing"}
# actual values
actual = {"directory_path": make_directory(path = os.getcwd(), directory_name = "Testing")}
# print results
_print_test_info(actual, expected)
def test_isfloat():
""" Test isfloat() """
print("--- Testing isfloat() ---")
# expected values
expected = {"2.5": True, "2": True, "string 2.5": True, "hello world": False, "2.5_": False}
# actual values
actual = {"2.5": isfloat(2.5), "2": isfloat(2), "string 2.5": isfloat("2.5"), "hello world": isfloat("hello world"), "2.5_": isfloat("2.5_")}
# print results
_print_test_info(actual, expected)
def test_convert_to_float():
""" Test convert_to_float() """
print("--- Testing convert_to_float() ---")
# expected values
expected = {"4.2": 4.2, "blanks": np.nan}
# actual values
actual = {"4.2": convert_to_float(value = "4.2", helper_str = "My help message"), "blanks": convert_to_float(value = "", helper_str = "My help message")}
# print results
_print_test_info(actual, expected)
def test_rmspecialchars():
""" Test rmspecialchars() """
print("--- Testing rmspecialchars() ---")
# expected values
expected = {"*6.5_": "6.5", "blanks": "", "*$^**(@4.2_+;": "4.2", "-3.6": "-3.6"}
# actual values
actual = {"*6.5_": rmspecialchars("*6.5_"), "blanks": rmspecialchars(""), "*$^**(@4.2_+;": rmspecialchars("*$^**(@4.2_+;"), "-3.6": rmspecialchars("-3.6")}
# print results
_print_test_info(actual, expected)
def test_create_monthly_dict():
""" Test create_monthly_dict """
print("--- Testing create_monthly_dict ---")
# expected values
expected = {"January": [], "February": [], "March": [], "April": [], "May": [], "June": [], "July": [], "August": [], "September": [], "October": [], "November": [], "December": []}
# actual values
actual = create_monthly_dict()
# print results
_print_test_info(actual, expected)
def test_subset_data():
""" Test subset_data() """
print("--- Testing subset_data() for dates within start date and end date ---")
# expected values
expected = {"dates_within_range": [datetime.datetime(2014, 1, 4, 0, 0), datetime.datetime(2014, 1, 5, 0, 0),
datetime.datetime(2014, 1, 6, 0, 0), datetime.datetime(2014, 1, 7, 0, 0),
datetime.datetime(2014, 1, 8, 0, 0), datetime.datetime(2014, 1, 9, 0, 0),
datetime.datetime(2014, 1, 10, 0, 0)],
"values_within_range": np.array([3, 4, 5, 6, 7, 8, 9]),
"dates_outside_range": [datetime.datetime(2014, 1, 1, 0, 0), datetime.datetime(2014, 1, 2, 0, 0), datetime.datetime(2014, 1, 3, 0, 0),
datetime.datetime(2014, 1, 4, 0, 0), datetime.datetime(2014, 1, 5, 0, 0),
datetime.datetime(2014, 1, 6, 0, 0), datetime.datetime(2014, 1, 7, 0, 0),
datetime.datetime(2014, 1, 8, 0, 0), datetime.datetime(2014, 1, 9, 0, 0),
datetime.datetime(2014, 1, 10, 0, 0), datetime.datetime(2014, 1, 11, 0, 0)],
"values_outside_range": np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
}
# data for subset_data function
dates = np.array([datetime.datetime(2014, 01, 01) + datetime.timedelta(i) for i in range(11)])
values = np.array([i for i in range(11)])
# actual values
actual = {}
actual["dates_within_range"], actual["values_within_range"] = subset_data(dates, values, start_date = datetime.datetime(2014, 01, 04), end_date = datetime.datetime(2014, 01, 10))
actual["dates_outside_range"], actual["values_outside_range"] = subset_data(dates, values, start_date = datetime.datetime(2013, 12, 01), end_date = datetime.datetime(2014, 01, 20))
# print results
_print_test_info(actual, expected)
def test_find_start_end_dates1():
""" Testing find_start_end_dates() """
print("--- Testing find_start_end_dates() part 1 ---")
# expected values
expected = {"start_date": datetime.datetime(2014, 01, 03), "end_date": datetime.datetime(2014, 01, 11)}
# first element of dates2 being 2 days later than first element of dates1")
dates1 = [datetime.datetime(2014, 01, 01) + datetime.timedelta(i) for i in range(11)]
dates2 = [datetime.datetime(2014, 01, 03)+ datetime.timedelta(i) for i in range(11)]
# actual values
actual = {}
actual["start_date"], actual["end_date"] = find_start_end_dates(dates1, dates2)
# print results
_print_test_info(actual, expected)
def test_find_start_end_dates2():
""" Testing find_start_end_dates() """
print("--- Testing find_start_end_dates() part 2 ---")
# expected values
expected = {"start_date": datetime.datetime(2014, 01, 04), "end_date": datetime.datetime(2014, 01, 12)}
# first element of dates1 being 2 days later than first element of dates1")
dates1 = [datetime.datetime(2014, 01, 04) + datetime.timedelta(i) for i in range(11)]
dates2 = [datetime.datetime(2014, 01, 02)+ datetime.timedelta(i) for i in range(11)]
# actual values
actual = {}
actual["start_date"], actual["end_date"] = find_start_end_dates(dates1, dates2)
# print results
_print_test_info(actual, expected)
def main():
""" Test functionality of helpers """
test_now()
test_get_filepaths()
test_get_file_info()
test_make_directory()
test_isfloat()
test_convert_to_float()
test_rmspecialchars()
test_create_monthly_dict()
test_subset_data()
test_find_start_end_dates1()
test_find_start_end_dates2()
if __name__ == "__main__":
main()
```
#### File: nwispy/tests/nwispy_helpers_tests.py
```python
import nose.tools
from nose import with_setup
import sys
import numpy as np
import datetime
# my module
from nwispy import nwispy_helpers as helpers
# define the global fixture to hold the data that goes into the functions you test
fixture = {}
def setup():
""" Setup fixture for testing """
print >> sys.stderr, "SETUP: helpers tests"
fixture["dates"] = np.array([datetime.datetime(2014, 01, 01, 0, 0) + datetime.timedelta(i) for i in range(11)])
fixture["values"] = np.array([i for i in range(11)])
fixture["shorter_dates"] = np.array([datetime.datetime(2014, 01, 03, 0, 0) + datetime.timedelta(i) for i in range(11)])
fixture["longer_dates"] = np.array([datetime.datetime(2013, 12, 01, 0, 0) + datetime.timedelta(i) for i in range(180)])
def teardown():
""" Print to standard error when all tests are finished """
print >> sys.stderr, "TEARDOWN: helpers tests"
def test_isfloat():
nose.tools.assert_equals(True, helpers.isfloat(6.25))
nose.tools.assert_equals(True, helpers.isfloat("6.25"))
nose.tools.assert_equals(False, helpers.isfloat("2.5_"))
nose.tools.assert_equals(False, helpers.isfloat("hello"))
def test_convert_to_float():
nose.tools.assert_equals(6.25, helpers.convert_to_float("6.25", helper_str = "My help message"))
nose.tools.assert_equals(2.5, helpers.convert_to_float("2.5_", helper_str = "My help message"))
nose.tools.assert_almost_equals(np.array(np.nan).all(), np.array(helpers.convert_to_float("", helper_str = "My help message")).all())
nose.tools.assert_almost_equals(np.array(np.nan).all(), np.array(helpers.convert_to_float("hello", helper_str = "My help message")).all())
def test_rmspecialchars():
nose.tools.assert_equals("6.5", helpers.rmspecialchars("*6.5_"))
nose.tools.assert_equals("4.25", helpers.rmspecialchars("*$^**(@4.25_+;"))
nose.tools.assert_equals("-4.1", helpers.rmspecialchars("-4.1"))
def test_create_monthly_dict():
expected = {"January": [], "February": [], "March": [], "April": [], "May": [], "June": [], "July": [], "August": [], "September": [], "October": [], "November": [], "December": []}
actual = helpers.create_monthly_dict()
nose.tools.assert_equals(len(expected.keys()), len(actual.keys()))
nose.tools.assert_equals(expected["January"], actual["January"])
nose.tools.assert_equals(expected["February"], actual["February"])
nose.tools.assert_equals(expected["April"], actual["April"])
nose.tools.assert_equals(expected["May"], actual["May"])
nose.tools.assert_equals(expected["June"], actual["June"])
nose.tools.assert_equals(expected["July"], actual["July"])
nose.tools.assert_equals(expected["August"], actual["August"])
nose.tools.assert_equals(expected["September"], actual["September"])
nose.tools.assert_equals(expected["October"], actual["October"])
nose.tools.assert_equals(expected["November"], actual["November"])
nose.tools.assert_equals(expected["December"], actual["December"])
def test_subset_data_dates_within_range():
start = datetime.datetime(2014, 01, 04)
end = datetime.datetime(2014, 01, 10)
expected_dates = np.array([datetime.datetime(2014, 1, 4, 0, 0), datetime.datetime(2014, 1, 5, 0, 0),
datetime.datetime(2014, 1, 6, 0, 0), datetime.datetime(2014, 1, 7, 0, 0),
datetime.datetime(2014, 1, 8, 0, 0), datetime.datetime(2014, 1, 9, 0, 0),
datetime.datetime(2014, 1, 10, 0, 0)])
expected_values = np.array([3, 4, 5, 6, 7, 8, 9])
actual_dates, actual_values = helpers.subset_data(dates = fixture["dates"],
values = fixture["values"],
start_date = start,
end_date = end)
nose.tools.assert_equals(actual_dates.all(), expected_dates.all())
nose.tools.assert_equals(actual_values.all(), expected_values.all())
def test_subset_data_dates_outside_range():
start = datetime.datetime(2013, 12, 01)
end = datetime.datetime(2014, 01, 20)
expected_dates = np.array([datetime.datetime(2014, 1, 1, 0, 0), datetime.datetime(2014, 1, 2, 0, 0),
datetime.datetime(2014, 1, 3, 0, 0), datetime.datetime(2014, 1, 4, 0, 0),
datetime.datetime(2014, 1, 5, 0, 0), datetime.datetime(2014, 1, 6, 0, 0),
datetime.datetime(2014, 1, 7, 0, 0), datetime.datetime(2014, 1, 8, 0, 0),
datetime.datetime(2014, 1, 9, 0, 0), datetime.datetime(2014, 1, 10, 0, 0),
datetime.datetime(2014, 1, 11, 0, 0)])
expected_values = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
actual_dates, actual_values = helpers.subset_data(dates = fixture["dates"],
values = fixture["values"],
start_date = start,
end_date = end)
nose.tools.assert_equals(actual_dates.all(), expected_dates.all())
nose.tools.assert_equals(actual_values.all(), expected_values.all())
def test_find_start_end_dates_shorter_range():
expected_start_date = datetime.datetime(2014, 01, 03, 0, 0)
expected_end_date = datetime.datetime(2014, 01, 11, 0, 0)
actual_start_date, actual_end_date = helpers.find_start_end_dates(fixture["dates"], fixture["shorter_dates"])
nose.tools.assert_equals(actual_start_date, expected_start_date)
nose.tools.assert_equals(actual_end_date, expected_end_date)
def test_find_start_end_dates_longer_range():
expected_start_date = datetime.datetime(2014, 01, 01, 0, 0)
expected_end_date = datetime.datetime(2014, 01, 11, 0, 0)
actual_start_date, actual_end_date = helpers.find_start_end_dates(fixture["dates"], fixture["longer_dates"])
nose.tools.assert_equals(actual_start_date, expected_start_date)
nose.tools.assert_equals(actual_end_date, expected_end_date)
``` |
{
"source": "jlant/playground",
"score": 4
} |
#### File: python/hello-generators/hellogenerators.py
```python
nums = [1, 2, 3, 4, 5]
# list comprehension
squares1 = [num * num for num in nums]
print(squares1)
# generator comprehension
squares2 = (num * num for num in nums)
print(next(squares2))
print(next(squares2))
#for square in squares2:
# print(square)
# using lists
def gen_squares1(nums):
results = []
for num in nums:
results.append(num * num)
return results
# using generators
def gen_squares2(nums):
for num in nums:
yield num * num
```
#### File: python/hello-pytest/test_lists.py
```python
def alist():
return [0,1,2,3,4,5]
def test_alist():
assert alist() == [0,1,2,3,4,6]
``` |
{
"source": "JLaoo/pdc",
"score": 2
} |
#### File: pdc/spack/package.py
```python
from spack import *
class Pdc(CMakePackage):
"""Proactive Data Containers (PDC) software provides an object-centric
API and a runtime system with a set of data object management services.
These services allow placing data in the memory and storage hierarchy,
performing data movement asynchronously, and providing scalable
metadata operations to find data objects."""
homepage = "https://pdc.readthedocs.io/en/latest/"
url = "https://github.com/hpc-io/pdc/archive/refs/tags/0.1.tar.gz"
maintainers = ['houjun', 'sbyna']
version('0.1', sha256='24787806a30cd1cda1fed17220a62e768bdba5de56877f2ea7126279ff2a4f69')
conflicts('%clang')
depends_on('[email protected]')
depends_on('mercury')
depends_on('mpi')
root_cmakelists_dir = 'src'
def cmake_args(self):
args = [
self.define('MPI_C_COMPILER', self.spec['mpi'].mpicc),
self.define('BUILD_MPI_TESTING', 'ON'),
self.define('BUILD_SHARED_LIBS', 'ON'),
self.define('BUILD_TESTING', 'ON'),
self.define('PDC_ENABLE_MPI', 'ON'),
self.define('CMAKE_C_COMPILER', self.spec['mpi'].mpicc)
]
if self.spec.satisfies('platform=cray'):
args.append("-DRANKSTR_LINK_STATIC=ON")
return args
``` |
{
"source": "JLaoo/wikiracer",
"score": 3
} |
#### File: wikiracer/spiders/wiki_scraper.py
```python
import scrapy
from bs4 import BeautifulSoup as bs
from scrapy.crawler import CrawlerProcess
from scrapy.utils.project import get_project_settings
import requests
import re
#Replace this
class spider(scrapy.Spider):
def __init__(self):
self.name = 'spider'
self.allowed_domains = ['en.wikipedia.org']
api_url = 'https://api.thewikigame.com/api/v1/group/22033570-e1fd-4a9f-9a96-9068082b88aa/current-round/'
headers = {
'Authorization': 'Token <PASSWORD>' #Might need to change this
}
response = requests.get(api_url, headers=headers)
start_index = response.text.index('"start_article"')
start_index = response.text[start_index:].index('link') + start_index
start_link = response.text[start_index+6:].split(',')[0]
start_link = 'https://en.wikipedia.org/wiki/'+start_link.replace('"', '')
end_index = response.text.index('"goal_article"')
end_index = response.text[end_index:].index('link') + end_index
end_link = response.text[end_index+6:].split(',')[0]
self.end_link = 'https://en.wikipedia.org/wiki/'+end_link.replace('"', '')
self.start_urls = [start_link]
self.dont_overwrite = False
def get_page_name(self, url):
return url.replace('https://en.wikipedia.org/wiki/', '')
def start_requests(self):
url = self.start_urls[0]
path = self.get_page_name(url)
yield scrapy.Request(url=url,
callback=self.parse,
meta={'path': path},
errback=self.handle_failure)
def handle_failure(self, failure):
yield scrapy.Request(url=failure.request.url,
callback=self.parse,
meta={'path': failure.request.meta['path']},
errback=self.handle_failure)
def parse(self, response):
soup = bs(response.text, 'html.parser')
links = []
for link in soup.findAll('a', attrs={'href': re.compile('^/wiki/')}):
path = link.get('href')[6:]
not_allowed = ['Special:', 'Wikipedia:', 'Portal:', 'Category:', 'File:', 'Template:', 'Template_talk:', 'Help:', 'Talk:']
allowed = True
for word in not_allowed:
if path.startswith(word):
allowed = False
break
if allowed and path != 'Main_Page':
links.append(path)
links = list(set(links))
links = ['https://en.wikipedia.org/wiki/'+l for l in links]
for link in links:
if self.get_page_name(link) in path:
continue
new_path = response.meta['path']+', '+self.get_page_name(link)
if link == self.end_link and self.dont_overwrite == False:
with open('path.txt', 'w') as outfile:
outfile.write(new_path)
raise scrapy.exceptions.CloseSpider('Path Found!')
self.dont_overwrite = True
yield scrapy.Request(url=link,
callback=self.parse,
meta={'path': new_path},
errback=self.handle_failure)
def find_best_path():
process = CrawlerProcess(get_project_settings())
process.crawl(spider)
process.start()
find_best_path()
``` |
{
"source": "jlapenna/bikebuds",
"score": 2
} |
#### File: gae/api/admin.py
```python
import logging
import flask
from flask_restx import Resource, Namespace, fields
import stravalib
from shared import auth_util
from shared import ds_util
from shared import responses
from shared import task_util
from shared.config import config
from shared.datastore.club import Club
from shared.datastore.service import Service
from shared.services.strava.club_worker import ClubWorker as StravaClubWorker
from shared.services.withings.client import create_client as withings_create_client
from models import (
EntityModel,
club_entity_model,
key_model,
service_entity_model,
service_model,
sync_model,
user_entity_model,
WrapEntity,
)
api = Namespace('admin', 'Bikebuds Admin API')
auth_url_model = api.model('AuthUrl', {'auth_url': fields.String})
user_state_model = api.model(
'UserState',
{
'user': fields.Nested(user_entity_model, skip_none=True),
'google': fields.Nested(service_model, skip_none=True),
'strava': fields.Nested(service_model, skip_none=True),
'withings': fields.Nested(service_model, skip_none=True),
'fitbit': fields.Nested(service_model, skip_none=True),
},
)
slack_workspace_model = api.model('SlackWorkspace', {})
slack_workspace_entity_model = EntityModel(slack_workspace_model)
slack_model = api.model(
'Slack',
{
'service': fields.Nested(service_entity_model, skip_none=True),
'workspaces': fields.Nested(slack_workspace_entity_model),
'installers': fields.Wildcard(fields.String),
'bots': fields.Wildcard(fields.String),
},
)
@api.route('/strava_auth_url')
class StravaAuthUrl(Resource):
@api.doc('get_strava_auth_url')
@api.marshal_with(auth_url_model, skip_none=True)
def get(self):
auth_util.get_bot(flask.request)
redirect_uri = config.frontend_url + '/services/strava/echo'
client = stravalib.client.Client()
url = client.authorization_url(
client_id=config.strava_creds['client_id'],
redirect_uri=redirect_uri,
approval_prompt='force',
scope=['activity:read_all', 'profile:read_all'],
)
return {'auth_url': url}
@api.route('/process_events')
class ProcessEventsResource(Resource):
def get(self):
auth_util.get_bot(flask.request)
sub_events_query = ds_util.client.query(kind='SubscriptionEvent')
for sub_event in sub_events_query.fetch():
task_util.process_event(sub_event.key)
return responses.OK
@api.route('/subscription/remove')
class RemoveSubscriptionResource(Resource):
@api.doc('remove_subscription')
def post(self):
auth_util.get_bot(flask.request)
callbackurl = flask.request.form.get('callbackurl', None)
logging.info('Unsubscribing: %s', callbackurl)
if callbackurl is None or 'withings' not in callbackurl:
return responses.BAD_REQUEST
services_query = ds_util.client.query(kind='Service')
services_query.add_filter('sync_enabled', '=', True)
services = [
service
for service in services_query.fetch()
if service.key.name == 'withings' and service.get('credentials') is not None
]
for service in services:
logging.info('Unsubscribing: %s from %s', callbackurl, service.key)
client = withings_create_client(service)
results = []
try:
result = client.unsubscribe(callbackurl)
logging.info(
'Unsubscribed %s from %s (%s)', callbackurl, service.key, result
)
results.append(
{
'callbackurl': callbackurl,
'result': str(result),
'service': str(service.key),
}
)
except Exception as e:
logging.exception(
'Unable to unsubscribe %s from %s', callbackurl, service.key
)
results.append(
{
'callbackurl': callbackurl,
'error': str(e),
'service': str(service.key),
}
)
return results
@api.route('/clubs')
class GetClubsResource(Resource):
@api.doc('get_clubs')
@api.marshal_with(club_entity_model, skip_none=True)
def get(self):
bot = auth_util.get_bot(flask.request)
service = Service.get('strava', parent=bot.key)
club_query = ds_util.client.query(kind='Club', ancestor=service.key)
return [WrapEntity(club) for club in club_query.fetch()]
@api.route('/users')
class GetUsersResource(Resource):
@api.doc('get_users')
@api.marshal_with(user_state_model, skip_none=True)
def get(self):
bot = auth_util.get_bot(flask.request)
user_entities = [bot]
user_entities += ds_util.client.query(kind='User').fetch()
users = []
for user_entity in user_entities:
users.append(
{
'user': WrapEntity(user_entity),
'google': Service.get('google', parent=user_entity.key),
'strava': Service.get('strava', parent=user_entity.key),
'withings': Service.get('withings', parent=user_entity.key),
'fitbit': Service.get('fitbit', parent=user_entity.key),
}
)
return users
@api.route('/delete')
class DeleteResource(Resource):
@api.doc('delete', body=key_model)
@api.marshal_with(key_model, skip_none=True)
def post(self):
auth_util.get_bot(flask.request)
key = ds_util.key_from_path(api.payload.get('path'))
if key is None or ds_util.client.get(key) is None:
logging.debug('No entity with key: %s', key)
return key
children_query = ds_util.client.query(ancestor=key)
children_query.keys_only()
ds_util.client.delete_multi(child.key for child in children_query.fetch())
return key
@api.route('/sync/club/<club_id>')
class SyncClubResource(Resource):
@api.doc('sync_club')
@api.marshal_with(club_entity_model, skip_none=True)
def get(self, club_id):
bot = auth_util.get_bot(flask.request)
service = Service.get('strava', parent=bot.key)
club = StravaClubWorker(club_id, service).sync()
return WrapEntity(club)
@api.route('/club/track/<club_id>')
class ClubTrackResource(Resource):
@api.doc('track_club')
@api.marshal_with(club_entity_model, skip_none=True)
def get(self, club_id):
bot = auth_util.get_bot(flask.request)
service = Service.get('strava', parent=bot.key)
club = StravaClubWorker(club_id, service).sync_club()
return WrapEntity(club)
@api.route('/club/untrack/<club_id>')
class ClubUntrackResource(Resource):
@api.doc('untrack_club')
@api.marshal_with(club_entity_model, skip_none=True)
def get(self, club_id):
bot = auth_util.get_bot(flask.request)
service = Service.get('strava', parent=bot.key)
club = Club.get(club_id, parent=service.key)
if club is not None:
ds_util.client.delete(club.key)
return None
@api.route('/slack')
class SlackResource(Resource):
@api.doc('get_slack')
@api.marshal_with(slack_model, skip_none=True)
def get(self):
bot = auth_util.get_bot(flask.request)
service = Service.get('slack', parent=bot.key)
query = ds_util.client.query(kind='SlackWorkspace', ancestor=service.key)
workspaces = [e for e in query.fetch()]
return {'service': WrapEntity(service), 'workspaces': workspaces}
@api.route('/service/<name>')
class ServiceResource(Resource):
@api.doc('get_admin_service')
@api.marshal_with(service_entity_model, skip_none=True)
def get(self, name):
bot = auth_util.get_bot(flask.request)
service = Service.get(name, parent=bot.key)
return WrapEntity(service)
@api.route('/sync/<name>')
class SyncResource(Resource):
@api.doc('sync_admin_service', body=sync_model)
@api.marshal_with(service_entity_model, skip_none=True)
def post(self, name):
bot = auth_util.get_bot(flask.request)
force = api.payload.get('force', False)
service = Service.get(name, parent=bot.key)
task_util.sync_service(service, force=force)
return WrapEntity(service)
@api.route('/admin_disconnect/<name>')
class ServiceDisconnect(Resource):
@api.doc('admin_disconnect')
@api.marshal_with(service_entity_model, skip_none=True)
def post(self, name):
bot = auth_util.get_bot(flask.request)
service = Service.get(name, parent=bot.key)
Service.clear_credentials(service)
ds_util.client.put(service)
return WrapEntity(service)
```
#### File: gae/api/bikebuds.py
```python
import logging
import datetime
import flask
from flask_restx import Namespace, Resource
from shared import auth_util
from shared import ds_util
from shared import task_util
from shared.datastore.athlete import Athlete
from shared.datastore.bot import Bot
from shared.datastore.client_state import ClientState
from shared.datastore.club import Club
from shared.datastore.service import Service
from shared.datastore.series import Series
from models import (
activity_entity_model,
auth_model,
backfill_model,
client_state_entity_model,
client_state_model,
club_entity_model,
connect_userpass_model,
filter_parser,
get_arg,
measure_model,
preferences_model,
profile_model,
route_entity_model,
segment_entity_model,
segments_parser,
series_entity_model,
service_entity_model,
sync_model,
WrapEntity,
)
api = Namespace('bikebuds', 'Bikebuds API')
logging.info('Loading Bikebuds API')
@api.route('/activities')
class ActivitiesResource(Resource):
@api.doc('get_activities')
@api.marshal_with(activity_entity_model, skip_none=True, as_list=True)
def get(self):
user = auth_util.get_user(flask.request)
service = Service.get('strava', parent=user.key)
activities_query = ds_util.client.query(
kind='Activity', ancestor=service.key, order=['-start_date']
)
one_year_ago = datetime.datetime.now(
datetime.timezone.utc
) - datetime.timedelta(days=365)
activities_query.add_filter('start_date', '>', one_year_ago)
return [WrapEntity(a) for a in activities_query.fetch(limit=20)]
@api.route('/clients')
class ClientsResource(Resource):
@api.doc('get_clients')
@api.marshal_with(client_state_entity_model, skip_none=True, as_list=True)
def get(self):
user = auth_util.get_user(flask.request)
clients_query = ds_util.client.query(
kind='ClientState', ancestor=user.key, order=['-modified']
)
return [WrapEntity(c) for c in clients_query.fetch()]
@api.route('/client/<client_id>')
class ClientResource(Resource):
@api.doc('get_client')
@api.marshal_with(client_state_entity_model, skip_none=True)
def get(self, client_id):
user = auth_util.get_user(flask.request)
return WrapEntity(ClientState.get(client_id, parent=user.key))
@api.route('/update_client')
class UpdateClientResource(Resource):
@api.doc('update_client', body=client_state_model)
@api.marshal_with(client_state_entity_model, skip_none=True)
def post(self):
user = auth_util.get_user(flask.request)
new_client = api.payload
existing_client = ClientState.get(new_client['token'], parent=user.key)
existing_client.update(new_client)
existing_client['modified'] = datetime.datetime.now(datetime.timezone.utc)
ds_util.client.put(existing_client)
return WrapEntity(existing_client)
@api.route('/club/<club_id>')
class ClubResource(Resource):
@api.doc('get_club')
@api.marshal_with(club_entity_model, skip_none=True)
def get(self, club_id):
club_id = int(club_id)
auth_util.get_user(flask.request)
bot_strava = Service.get('strava', parent=Bot.key())
club = Club.get(club_id, parent=bot_strava.key)
# Find the user's club reference.
if club is None:
flask.abort(404)
return WrapEntity(club)
@api.route('/club/<club_id>/activities')
class ClubActivitiesResource(Resource):
@api.doc('get_club_activities')
@api.marshal_with(activity_entity_model, skip_none=True, as_list=True)
def get(self, club_id):
club_id = int(club_id)
auth_util.get_user(flask.request)
bot_strava = Service.get('strava', parent=Bot.key())
club = Club.get(club_id, parent=bot_strava.key)
# Find the user's club reference.
if club is None:
flask.abort(404)
activities_query = ds_util.client.query(kind='Activity', ancestor=club.key)
all_activities = [a for a in activities_query.fetch()]
return [WrapEntity(a) for a in all_activities]
@api.route('/preferences')
class PreferencesResource(Resource):
@api.doc('update_preferences', body=preferences_model)
@api.marshal_with(preferences_model, skip_none=True)
def post(self):
user = auth_util.get_user(flask.request)
user['preferences'].update(api.payload)
ds_util.client.put(user)
return user['preferences']
@api.route('/profile')
class ProfileResource(Resource):
@api.doc('get_profile')
@api.marshal_with(profile_model, skip_none=True)
def get(self):
user = auth_util.get_user(flask.request)
strava = Service.get('strava', parent=user.key)
strava_connected = Service.has_credentials(strava)
athlete = Athlete.get_private(strava.key)
return dict(
user=WrapEntity(user),
athlete=WrapEntity(athlete),
signup_complete=strava_connected,
)
@api.route('/routes')
class RoutesResource(Resource):
@api.doc('get_routes')
@api.marshal_with(route_entity_model, skip_none=True, as_list=True)
def get(self):
user = auth_util.get_user(flask.request)
service = Service.get('strava', parent=user.key)
routes_query = ds_util.client.query(
kind='Route', ancestor=service.key, order=['-id']
)
routes = [WrapEntity(a) for a in routes_query.fetch()]
return routes
@api.route('/segments')
class SegmentsResource(Resource):
@api.doc('get_segments')
@api.marshal_with(segment_entity_model, skip_none=True, as_list=True)
def get(self):
user = auth_util.get_user(flask.request)
service = Service.get('strava', parent=user.key)
segments_query = ds_util.client.query(
kind='Segment', ancestor=service.key, order=['-id']
)
segments = [WrapEntity(a) for a in segments_query.fetch()]
return segments
@api.route('/segments/compare')
@api.expect(segments_parser)
class SegmentsCompareResource(Resource):
@api.doc('compare_segments')
@api.marshal_with(segment_entity_model, skip_none=True, as_list=True)
def get(self):
user = auth_util.get_user(flask.request)
service = Service.get('strava', parent=user.key)
segments_arg = get_arg('segments')
segments = []
for segment_id in segments_arg:
entity = ds_util.client.get(
ds_util.client.key('Segment', segment_id, parent=service.key)
)
segments.append(WrapEntity(entity))
return segments
@api.route('/series')
@api.expect(filter_parser)
class SeriesResource(Resource):
@api.doc('get_series')
@api.marshal_with(series_entity_model, skip_none=True)
def get(self):
user = auth_util.get_user(flask.request)
service_name = user['preferences']['weight_service'].lower()
series = Series.get(
ds_util.client.key('Service', service_name, parent=user.key)
)
if series is None:
return WrapEntity(None)
filter_arg = get_arg('filter')
if filter_arg:
series['measures'] = [m for m in series['measures'] if filter_arg in m]
return WrapEntity(series)
@api.route('/service/<name>')
class ServiceResource(Resource):
@api.doc('get_service')
@api.marshal_with(service_entity_model, skip_none=True)
def get(self, name):
user = auth_util.get_user(flask.request)
service = Service.get(name, parent=user.key)
return WrapEntity(service)
@api.doc('update_service', body=service_entity_model)
@api.marshal_with(service_entity_model, skip_none=True)
def post(self, name):
user = auth_util.get_user(flask.request)
new_service = api.payload
existing_service = Service.get(name, parent=user.key)
existing_service.update(new_service)
ds_util.client.put(existing_service)
return WrapEntity(existing_service)
@api.route('/connect_userpass/<name>')
class ConnectUserPassResource(Resource):
@api.doc('connect_userpass', body=connect_userpass_model)
@api.marshal_with(service_entity_model, skip_none=True)
def post(self, name):
user = auth_util.get_user(flask.request)
connect_userpass = api.payload
service = Service.get(name, parent=user.key)
Service.update_credentials_userpass(service, connect_userpass)
ds_util.client.put(service)
return WrapEntity(service)
@api.route('/disconnect/<name>')
class ServiceDisconnect(Resource):
@api.doc('disconnect')
@api.marshal_with(service_entity_model, skip_none=True)
def post(self, name):
user = auth_util.get_user(flask.request)
service = Service.get(name, parent=user.key)
Service.clear_credentials(service)
ds_util.client.put(service)
return WrapEntity(service)
@api.route('/sync/<name>')
class SyncResource(Resource):
@api.doc('sync_service', body=sync_model)
@api.marshal_with(service_entity_model, skip_none=True)
def post(self, name):
user = auth_util.get_user(flask.request)
force = api.payload.get('force', False)
service = Service.get(name, parent=user.key)
task_util.sync_service(service, force=force)
return WrapEntity(service)
@api.route('/weight')
class WeightResource(Resource):
@api.doc('weight', body=measure_model, validate=True)
@api.marshal_with(measure_model, skip_none=True)
def post(self):
user = auth_util.get_user(flask.request)
measure = api.payload
# In a world with useful documentation, i'd know why the date value
# isn't being converted to a date time, even though the model says it
# is. So, convert it.
measure['date'] = datetime.datetime.fromisoformat(measure['date'])
task_util.xsync_tasks_measure(user.key, measure)
return measure
@api.route('/backfill')
class BackfillResource(Resource):
@api.doc('backfill', body=backfill_model)
@api.marshal_with(backfill_model, skip_none=True)
def post(self):
user = auth_util.get_user(flask.request)
backfill = api.payload
source = Service.get(backfill['source'], parent=user.key)
dest = Service.get(backfill['dest'], parent=user.key)
start = datetime.datetime.fromisoformat(backfill.get('start'))
end = datetime.datetime.fromisoformat(backfill.get('end'))
task_util.xsync_tasks_backfill(source.key, dest.key, start, end)
# TODO: pre-check there are credentials.
return backfill
@api.route('/auth')
class AuthResource(Resource):
@api.doc('auth')
@api.marshal_with(auth_model, skip_none=True)
def get(self):
custom_token = auth_util.create_custom_token(flask.request)
return {'token': custom_token.decode('utf-8')}
@api.route('/unittest')
class UnittestResource(Resource):
def get(self):
return None
```
#### File: services/google/google.py
```python
import base64
import json
import logging
import re
import flask
from bs4 import BeautifulSoup
from google.cloud.datastore.entity import Entity
from shared import auth_util
from shared import ds_util
from shared import responses
from shared import task_util
from shared.config import config
from shared.datastore.bot import Bot
from shared.datastore.service import Service
from shared.datastore.user import User
from shared.exceptions import SyncException
from shared.services.google.client import create_gmail_client
import sync_helper
module = flask.Blueprint('google', __name__)
SUBJECT_REGEX = re.compile(r"Watch (?P<name>.*)'s live activity now!")
@module.route('/tasks/sync', methods=['POST'])
def sync():
logging.debug('Syncing: google')
payload = task_util.get_payload(flask.request)
service = ds_util.client.get(payload['service_key'])
if not Service.has_credentials(service):
Service.set_sync_finished(service, error='No credentials')
return responses.OK_NO_CREDENTIALS
try:
Service.set_sync_started(service)
client = create_gmail_client(service)
sync_helper.do(Worker(service, client), work_key=service.key)
Service.set_sync_finished(service)
return responses.OK
except SyncException as e:
Service.set_sync_finished(service, error=str(e))
return responses.OK_SYNC_EXCEPTION
@module.route('/pubsub/rides', methods=['POST'])
def pubsub_rides():
if flask.request.args.get('token', '') != config.pubsub_creds['token']:
return responses.INVALID_TOKEN
envelope = json.loads(flask.request.data)
data = json.loads(base64.b64decode(envelope['message']['data']))
logging.debug('pubsub_rides: Received data: %s', data)
uid = auth_util.get_uid_by_email(data['emailAddress'])
user = User.from_uid(uid)
task_util.google_tasks_rides(user, data)
return responses.OK
@module.route('/tasks/rides', methods=['POST'])
def tasks_rides():
payload = task_util.get_payload(flask.request)
service = Service.get('google', Bot.key())
data = payload['data']
logging.info('tasks_rides: %s', data.get('historyId'))
try:
client = create_gmail_client(service)
sync_helper.do(
PubsubWorker(service, data, client),
work_key='%s/%s' % (service.key.parent.name, data['historyId']),
)
except SyncException:
return responses.OK_SYNC_EXCEPTION
return responses.OK
class Worker(object):
def __init__(self, service, client):
self.service = service
self.client = client
def sync(self):
request = {
'labelIds': ['INBOX'],
'topicName': 'projects/%s/topics/rides' % (config.project_id,),
}
watch = self.client.users().watch(userId='me', body=request).execute()
self.service['settings']['watch'] = watch
logging.debug('Watching: %s -> %s', self.service.key, watch)
# Maybe Trigger a sync starting from the existing historyId
synced_history_id = self.service['settings'].get('synced_history_id', 0)
logging.debug(
'synced_history_id: %s, watch history id: %s',
synced_history_id,
watch['historyId'],
)
if synced_history_id == 0:
logging.debug('synced_history_id is 0, doing full_sync')
return self.full_sync()
elif synced_history_id < int(watch['historyId']):
# This shouldn't ever happen, since we use pubsub, but if it does,
# we need to sync the latest.
logging.warn(
f"synced_history_id unexpectedly low: {synced_history_id} < {watch['historyId']}"
)
user = ds_util.client.get(self.service.key.parent)
task_util.google_tasks_rides(user, {'historyId': synced_history_id})
return responses.OK
else:
logging.debug('Nothing to sync')
return responses.OK
def full_sync(self):
def process_message(request_id, response, exception):
message_history_id = int(response['historyId'])
synced_history_id = self.service['settings'].get('synced_history_id', 0)
if message_history_id > synced_history_id:
self.service['settings']['synced_history_id'] = message_history_id
ds_util.client.put(self.service)
garmin_url = _extract_garmin_url(request_id, response)
if garmin_url is not None:
task_util.garmin_tasks_livetrack(garmin_url, publish=False)
logging.debug('Fetching the latest 100 messages')
request = (
self.client.users()
.messages()
.list(
userId='me',
labelIds=['INBOX'],
maxResults=100, # Also the default, but being explicit.
)
)
batch = self.client.new_batch_http_request(callback=process_message)
batch_request_ids = []
while request is not None and len(batch_request_ids) < 100:
response = request.execute()
for message in response['messages']:
if len(batch_request_ids) >= 100:
break
if message['id'] not in batch_request_ids:
continue
batch.add(
self.client.users()
.messages()
.get(userId='me', id=message['id'], format='full'),
request_id=message['id'],
)
batch_request_ids.append(message['id'])
request = self.client.users().messages().list_next(request, response)
batch.execute()
return responses.OK
class PubsubWorker(object):
def __init__(self, service: Entity, data: dict, client):
self.service = service
self.data = data
self.client = client
def sync(self):
logging.info('PubsubWorker: sync')
def process_message(request_id, response, exception):
message_history_id = int(response['historyId'])
synced_history_id = self.service['settings'].get('synced_history_id', 0)
if message_history_id > synced_history_id:
self.service['settings']['synced_history_id'] = message_history_id
garmin_url = _extract_garmin_url(request_id, response)
if garmin_url is not None:
task_util.garmin_tasks_livetrack(garmin_url, publish=True)
request = (
self.client.users()
.history()
.list(
userId='me',
labelId='INBOX',
startHistoryId=self.service['settings'].get('synced_history_id'),
)
)
batch = self.client.new_batch_http_request(callback=process_message)
while request is not None:
response = request.execute()
for history in response.get('history', []):
for message in history.get('messages', []):
batch.add(
self.client.users()
.messages()
.get(userId='me', id=message['id'], format='full'),
request_id=message['id'],
)
request = self.client.users().history().list_next(request, response)
batch.execute()
ds_util.client.put(self.service)
return responses.OK
def _extract_garmin_url(request_id, response):
logging.debug('Extracting Garmin URL: %s', request_id)
headers = dict(
[
(header['name'], header['value'])
for header in response['payload']['headers']
if header['name'] in ('From', 'To', 'Subject')
]
)
logging.debug('Fetched message: %s', request_id)
if headers.get('From') != 'Garmin <<EMAIL>>':
logging.debug('Not a garmin email: %s (Wrong From)', request_id)
return
subject = SUBJECT_REGEX.match(headers.get('Subject', ''))
name = subject.groupdict().get('name', None) if subject else None
if name is None:
logging.debug('Not a garmin email: %s (Wrong Subject)', request_id)
return
body = base64.urlsafe_b64decode(
response['payload']['body']['data'].encode('ASCII')
).decode('utf-8')
soup = BeautifulSoup(body, features='html.parser')
livetrack_urls = [
link['href']
for link in soup.findAll('a', href=re.compile('livetrack.garmin.com'))
]
if not livetrack_urls:
logging.debug('Invalid Garmin email: %s (Unparsable)', request_id)
return
if len(livetrack_urls) != 1:
logging.debug(
'Invalid Garmin email: %s (Too many livetrack URLs: %s)',
request_id,
len(livetrack_urls),
)
return
return livetrack_urls[0]
```
#### File: services/slack/slack.py
```python
import logging
import re
import urllib
import urllib.request
import flask
from google.cloud.datastore.entity import Entity
from slack_sdk import WebClient
from slack_sdk.errors import SlackApiError
from shared import responses
from shared import task_util
from shared.datastore.bot import Bot
from shared.datastore.service import Service
from shared.services.slack.installation_store import DatastoreInstallationStore
from shared.services.strava.client import ClientWrapper
from services.slack.track_blocks import create_track_blocks
from services.slack.unfurl_activity import unfurl_activity
from services.slack.unfurl_route import unfurl_route
from shared import ds_util
from shared.config import config
_STRAVA_APP_LINK_REGEX = re.compile('(https://www.strava.com/([^/]+)/[0-9]+)')
_TRACKS_TEAM_ID = 'T01U8EC3H8T'
_TRACKS_CHANNEL_ID = 'C020755FX3L'
_DEV_TRACKS_TEAM_ID = 'T01U4PCGSQM'
_DEV_TRACKS_CHANNEL_ID = 'C01U82F2STD'
module = flask.Blueprint('slack', __name__)
@module.route('/tasks/event', methods=['POST'])
def tasks_event():
params = task_util.get_payload(flask.request)
event = params['event']
logging.info('SlackEvent: %s', event.key)
if event['event']['type'] == 'link_shared':
return _process_link_shared(event)
return responses.OK_SUB_EVENT_UNKNOWN
@module.route('/tasks/livetrack', methods=['POST'])
def tasks_livetrack():
params = task_util.get_payload(flask.request)
track = params['track']
logging.info('process/livetrack: %s', track)
return _process_track(track)
def _process_link_shared(event):
slack_client = _create_slack_client(event)
unfurls = _create_unfurls(event)
if not unfurls:
return responses.OK_NO_UNFURLS
try:
response = slack_client.chat_unfurl(
channel=event['event']['channel'],
ts=event['event']['message_ts'],
unfurls=unfurls,
)
except SlackApiError:
logging.exception('process_link_shared: failed: unfurling: %s', unfurls)
return responses.INTERNAL_SERVER_ERROR
if not response['ok']:
logging.error('process_link_shared: failed: %s with %s', response, unfurls)
return responses.INTERNAL_SERVER_ERROR
logging.debug('process_link_shared: %s', response)
return responses.OK
def _create_slack_client(event):
slack_service = Service.get('slack', parent=Bot.key())
installation_store = DatastoreInstallationStore(
ds_util.client, parent=slack_service.key
)
slack_bot = installation_store.find_bot(
enterprise_id=event.get('authorizations', [{}])[0].get('enterprise_id'),
team_id=event.get('authorizations', [{}])[0].get('team_id'),
is_enterprise_install=event.get('authorizations', [{}])[0].get(
'is_enterprise_install'
),
)
return WebClient(slack_bot.bot_token)
def _create_slack_client_for_team(team_id):
slack_service = Service.get('slack', parent=Bot.key())
installation_store = DatastoreInstallationStore(
ds_util.client, parent=slack_service.key
)
slack_bot = installation_store.find_bot(
enterprise_id=None,
team_id=team_id,
is_enterprise_install=False,
)
return WebClient(slack_bot.bot_token)
def _create_unfurls(event):
strava = Service.get('strava', parent=Bot.key())
strava_client = ClientWrapper(strava)
unfurls = {}
for link in event['event']['links']:
alt_url = _resolve_rewrite_link(link)
unfurl = _unfurl(strava_client, link, alt_url)
if unfurl:
unfurls[link['url']] = unfurl
logging.warning(f'_create_unfurls: {unfurls}')
return unfurls
def _resolve_rewrite_link(link):
if 'strava.app.link' not in link['url']:
return
try:
logging.info('_resolve_rewrite_link: fetching: %s', link['url'])
with urllib.request.urlopen(link['url']) as response:
contents = response.read()
logging.debug('_resolve_rewrite_link: fetched: %s', link['url'])
except urllib.request.HTTPError:
logging.exception('Could not fetch %s', link['url'])
return
match = _STRAVA_APP_LINK_REGEX.search(str(contents))
if match is None:
logging.warning('Could not resolve %s', link['url'])
return
resolved_url = match.group()
return resolved_url
def _unfurl(strava_client, link, alt_url=None):
url = alt_url if alt_url else link['url']
if '/routes/' in url:
return unfurl_route(strava_client, url)
elif '/activities/' in url:
return unfurl_activity(strava_client, url)
else:
return None
def _process_track(track: Entity) -> responses.Response:
if config.is_dev:
team_id = _DEV_TRACKS_TEAM_ID
channel_id = _DEV_TRACKS_CHANNEL_ID
else:
team_id = _TRACKS_TEAM_ID
channel_id = _TRACKS_CHANNEL_ID
slack_client = _create_slack_client_for_team(team_id)
blocks = create_track_blocks(track)
if not blocks:
return responses.OK_INVALID_LIVETRACK
try:
response = slack_client.chat_postMessage(
channel=channel_id, blocks=blocks, unfurl_links=False, unfurl_media=False
)
except SlackApiError:
logging.exception(f'process_track: failed: track: {track}, blocks: {blocks}')
return responses.INTERNAL_SERVER_ERROR
if not response['ok']:
logging.error(
f'process_track: failed: response: {response}, track: {track}, blocks: {blocks}'
)
return responses.INTERNAL_SERVER_ERROR
logging.debug('process_track: %s', response)
return responses.OK
```
#### File: services/slack/test_slack.py
```python
import mock
import unittest
import flask
from google.cloud.datastore.entity import Entity
from shared import ds_util
from shared import responses
from shared import task_util
from services.slack import slack
class SlackTest(unittest.TestCase):
def setUp(self):
self.app = flask.Flask(__name__)
self.app.register_blueprint(slack.module)
self.app.testing = True
self.client = self.app.test_client()
@mock.patch('main.slack._process_link_shared', return_value=responses.OK)
def test_process_link_shared_called(self, slack_process_link_shared_mock):
event_entity = Entity(
ds_util.client.key('SubscriptionEvent', 'slack-E232eq2ee')
)
event_entity.update(LINK_SHARED_EVENT)
self.client.post(
'/tasks/event',
data=task_util.task_body_for_test(event=event_entity),
)
# It doesn't matter what code gets returned, since the method returns
# whatever _process_link_shared returns, which is a mock. Only test
# that _process_link_shared is called.
slack_process_link_shared_mock.assert_called_once()
@mock.patch('main.slack._create_slack_client')
@mock.patch('main.slack._create_unfurls')
def test_process_link_shared(self, mock_create_unfurls, mock_slack_client):
mock_create_unfurls.return_value = {'http://example.com': 'unfurl'}
event_entity = Entity(
ds_util.client.key('SubscriptionEvent', 'slack-<KEY>')
)
event_entity.update(LINK_SHARED_EVENT)
r = self.client.post(
'/tasks/event',
data=task_util.task_body_for_test(event=event_entity),
)
mock_slack_client.assert_called_once()
responses.assertResponse(self, r, responses.OK)
@mock.patch('main.slack._create_slack_client')
@mock.patch('main.slack._create_unfurls')
def test_process_link_shared_no_unfurls(
self, mock_create_unfurls, mock_slack_client
):
mock_create_unfurls.return_value = {}
event_entity = Entity(
ds_util.client.key('SubscriptionEvent', 'slack-<KEY>')
)
event_entity.update(LINK_SHARED_EVENT)
r = self.client.post(
'/tasks/event',
data=task_util.task_body_for_test(event=event_entity),
)
mock_slack_client.assert_called_once()
responses.assertResponse(self, r, responses.OK_NO_UNFURLS)
LINK_SHARED_EVENT = {
'api_app_id': 'SOME_APP_ID',
'authed_users': ['SOME_USER_ID'],
'authorizations': [
{
'enterprise_id': None,
'is_bot': True,
'is_enterprise_install': False,
'team_id': 'SOME_TEAM_ID',
'user_id': 'SOME_USER_ID',
}
],
'event': {
'channel': 'SOME_CHANNEL_ID',
'event_ts': '1619381634.662237',
'is_bot_user_member': True,
'links': [
{
'domain': 'strava.com',
'url': 'https://www.strava.com/activities/3040564323',
}
],
'message_ts': '1619381633.004900',
'type': 'link_shared',
'user': 'U01V550PQ5U',
},
'event_context': '1-link_shared-SOME_TEAM_ID-SOME_CHANNEL_ID',
'event_id': 'Ev01V67ECVF0',
'event_time': 1619381634,
'is_ext_shared_channel': False,
'team_id': 'SOME_TEAM_ID',
'token': '<PASSWORD>',
'type': 'event_callback',
}
```
#### File: services/slack/unfurl_route.py
```python
import logging
from babel.dates import format_date
from measurement.measures import Distance
from stravalib import exc
from shared.datastore.route import Route
from services.slack.util import get_id, generate_url
def unfurl_route(client, url):
route_id = get_id(url)
try:
route = client.get_route(route_id)
except exc.ObjectNotFound:
logging.debug('Unknown Route: {route_id}')
return None
route_entity = Route.to_entity(route)
return {'blocks': _route_blocks(url, route_entity)}
def _route_blocks(url, route):
route_sub = {
'id': route['id'],
'timestamp': format_date(route['timestamp'], format='medium'),
'description': route['description'],
'name': route['name'],
'athlete.id': route['athlete']['id'],
'athlete.firstname': route['athlete']['firstname'],
'athlete.lastname': route['athlete']['lastname'],
'url': url,
}
blocks = [
{
"type": "section",
"text": {
"type": "mrkdwn",
"text": "<%(url)s|*%(name)s*> by <https://www.strava.com/athletes/%(athlete.id)s|%(athlete.firstname)s %(athlete.lastname)s>\n%(description)s\n\nCreated on %(timestamp)s"
% route_sub,
},
"accessory": {
"type": "image",
"image_url": generate_url(route),
"alt_text": "route map",
},
}
]
fields = []
if route.get('distance', None):
fields.append(
{
"type": "mrkdwn",
"text": "*Distance:* %smi" % round(Distance(m=route['distance']).mi, 2),
}
)
if route.get('elevation_gain', None):
fields.append(
{
"type": "mrkdwn",
"text": "*Elevation:* %sft"
% round(Distance(m=route['elevation_gain']).ft, 0),
}
)
if fields:
blocks.append({"type": "divider"})
blocks.append({"type": "section", "fields": fields})
return blocks
```
#### File: services/strava/events_worker.py
```python
import logging
from shared import ds_util
from shared.datastore.activity import Activity
from shared.datastore.athlete import Athlete
from shared.services.strava.client import ClientWrapper
class EventsWorker(object):
def __init__(self, service, event):
self.service = service
self.event = event
self.client = ClientWrapper(service)
def sync(self):
self.client.ensure_access()
object_id = self.event.get('object_id')
object_type = self.event.get('object_type')
aspect_type = self.event.get('aspect_type')
with ds_util.client.transaction():
logging.debug(
'StravaEvent: process_event_batch: %s, %s',
object_id,
self.event.key,
)
if object_type == 'activity':
if aspect_type == 'delete':
activity_key = ds_util.client.key(
'Activity', object_id, parent=self.service.key
)
ds_util.client.delete(activity_key)
logging.info(
'StravaEvent: Deleted Activity: %s: %s',
activity_key,
self.event.key,
)
else:
athlete = self.client.get_athlete()
activity = self.client.get_activity(object_id)
activity_entity = Activity.to_entity(
activity, detailed_athlete=athlete, parent=self.service.key
)
ds_util.client.put(activity_entity)
logging.info(
'StravaEvent: Created: %s: %s',
activity_entity.key,
self.event.key,
)
elif object_type == 'athlete':
athlete = self.client.get_athlete()
athlete_entity = Athlete.to_entity(athlete, parent=self.service.key)
ds_util.client.put(athlete_entity)
logging.info(
'StravaEvent: Updated Athlete: %s: %s',
athlete_entity.key,
self.event.key,
)
activities_query = ds_util.client.query(
kind='Activity', ancestor=self.service.key
)
for activity in activities_query.fetch():
activity['athlete'] = athlete
ds_util.client.put(activity)
logging.info(
'StravaEvent: Updated Activities: %s: %s',
athlete_entity.key,
self.event.key,
)
else:
logging.warning(
'StravaEvent: Update object_type %s not implemented: %s',
object_type,
self.event.key,
)
```
#### File: services/strava/test_events_worker.py
```python
import mock
import unittest
from google.cloud.datastore.entity import Entity
from stravalib.model import Activity, Athlete
from services.strava.events_worker import EventsWorker
from shared.datastore.subscription import SubscriptionEvent
from shared import ds_util
class MockQuery(object):
def __init__(self, results):
self.results = results
def fetch(self):
return self.results
class MainTest(unittest.TestCase):
def setUp(self):
pass
@mock.patch('services.strava.events_worker.ClientWrapper')
@mock.patch('shared.ds_util.client.delete')
@mock.patch('shared.ds_util.client.put')
@mock.patch('shared.ds_util.client.transaction')
def test_events_worker_delete(
self,
transaction_mock,
put_mock,
delete_mock,
ClientWrapperMock,
):
service = Entity(ds_util.client.key('Service', 'strava'))
service['credentials'] = {'access_token': 'XYZ_TOKEN'}
event = SubscriptionEvent.to_entity(
{
'aspect_type': 'delete',
'event_time': 1549151214,
'object_id': 2120517859,
'object_type': 'activity',
'owner_id': 35056021,
'subscription_id': 133263,
'updates': {},
},
parent=service.key,
)
client_mock = mock.Mock()
ClientWrapperMock.return_value = client_mock
worker = EventsWorker(service, event)
worker.sync()
activity_key = ds_util.client.key('Activity', 2120517859, parent=service.key)
delete_mock.assert_called_once_with(activity_key)
@mock.patch('services.strava.events_worker.ClientWrapper')
@mock.patch('shared.ds_util.client.delete')
@mock.patch('shared.ds_util.client.put')
@mock.patch('shared.ds_util.client.transaction')
def test_events_worker_activity_update(
self,
transaction_mock,
put_mock,
delete_mock,
ClientWrapperMock,
):
service = Entity(ds_util.client.key('Service', 'strava'))
service['credentials'] = {'access_token': 'XYZ_TOKEN'}
event = SubscriptionEvent.to_entity(
{
'aspect_type': 'create',
'event_time': 1549151211,
'object_id': 2120517859,
'object_type': 'activity',
'owner_id': 35056021,
'subscription_id': 133263,
'updates': {},
},
parent=service.key,
)
client_mock = mock.Mock()
client_mock.get_activity.side_effect = _activity_generator
client_mock.get_athlete.return_value = Athlete()
ClientWrapperMock.return_value = client_mock
worker = EventsWorker(service, event)
worker.sync()
client_mock.get_activity.assert_called_once_with(2120517859)
def _activity_generator(activity_id):
activity = Activity()
activity.name = 'Activity ' + str(activity_id)
activity.id = activity_id
activity.distance = 10
activity.moving_time = 200
activity.elapsed_time = 100
activity.total_elevation_gain = 300
return activity
def _test_events():
service_key = ds_util.client.key('Service', 'strava')
return [
SubscriptionEvent.to_entity(
{
'aspect_type': 'create',
'event_time': 1549151210,
'object_id': 2120517766,
'object_type': 'activity',
'owner_id': 35056021,
'subscription_id': 133263,
'updates': {},
},
parent=service_key,
),
SubscriptionEvent.to_entity(
{
'aspect_type': 'update',
'event_time': 1549151212,
'object_id': 2120517766,
'object_type': 'activity',
'owner_id': 35056021,
'subscription_id': 133263,
'updates': {'title': 'Updated Title'},
},
parent=service_key,
),
SubscriptionEvent.to_entity(
{
'aspect_type': 'create',
'event_time': 1549151211,
'object_id': 2120517859,
'object_type': 'activity',
'owner_id': 35056021,
'subscription_id': 133263,
'updates': {},
},
parent=service_key,
),
SubscriptionEvent.to_entity(
{
'aspect_type': 'update',
'event_time': 1549151213,
'object_id': 2120517859,
'object_type': 'activity',
'owner_id': 35056021,
'subscription_id': 133263,
'updates': {'title': 'Second Updated Title'},
},
parent=service_key,
),
SubscriptionEvent.to_entity(
{
'aspect_type': 'delete',
'event_time': 1549151214,
'object_id': 2120517859,
'object_type': 'activity',
'owner_id': 35056021,
'subscription_id': 133263,
'updates': {},
},
parent=service_key,
),
]
```
#### File: backend/xsync/xsync.py
```python
import datetime
import logging
import random
import time
import flask
from retrying import retry
from google.cloud.datastore.key import Key
from shared import ds_util
from shared import task_util
from shared import responses
from shared.datastore.series import Series
from shared.datastore.service import Service
from shared.exceptions import SyncException
from shared.services.garmin import client as garmin_client
from shared.services.trainerroad.client import (
create_client as trainerroad_create_client,
)
import sync_helper
module = flask.Blueprint('xsync', __name__)
@module.route('/tasks/measure', methods=['POST'])
def xsync_tasks_measure():
params = task_util.get_payload(flask.request)
user_key = params['user_key']
measure = params['measure']
logging.info('ProcessMeasure: %s %s', user_key, measure)
_withings_garmin(flask.request, user_key, measure)
_withings_trainerroad(flask.request, user_key, measure)
return responses.OK
def _withings_garmin(request, user_key, measure):
params = task_util.get_payload(request)
user_key = params['user_key']
measure = params['measure']
logging.info('ProcessMeasure: withings_garmin: %s %s', user_key, measure)
garmin_service = Service.get('garmin', parent=user_key)
if not Service.has_credentials(garmin_service, required_key='password'):
logging.debug('ProcessMeasure: Garmin not connected')
return responses.OK
if not measure.get('weight'):
logging.debug('ProcessMeasure: Skipping non-weight measure.')
return responses.OK
try:
client = garmin_client.create(garmin_service)
client.set_weight(measure['weight'], measure['date'])
except Exception:
logging.exception('ProcessMeasure: Failed: %s', measure)
return responses.OK_SYNC_EXCEPTION
return responses.OK
def _withings_trainerroad(request, user_key, measure):
params = task_util.get_payload(request)
user_key = params['user_key']
measure = params['measure']
logging.info('ProcessMeasure: withings_trainerroad: %s %s', user_key, measure)
trainerroad_service = Service.get('trainerroad', parent=user_key)
if not Service.has_credentials(trainerroad_service, required_key='password'):
logging.debug('ProcessMeasure: Trainerroad not connected')
return responses.OK
if not measure.get('weight'):
logging.debug('ProcessMeasure: Skipping non-weight measure.')
return responses.OK
try:
client = trainerroad_create_client(trainerroad_service)
with client:
client.weight = measure['weight']
except Exception:
logging.exception('ProcessMeasure: Failed: %s', measure)
return responses.OK_SYNC_EXCEPTION
return responses.OK
@module.route('/tasks/backfill', methods=['POST'])
def tasks_xsync_tasks_backfill():
params = task_util.get_payload(flask.request)
source_key = params['source_key']
dest_key = params['dest_key']
start = params['start']
end = params['end']
logging.info('xsync_tasks_backfill: %s->%s', source_key, dest_key)
try:
sync_helper.do(
BackfillWorker(source_key, dest_key, start, end), work_key=source_key
)
except SyncException:
return responses.OK_SYNC_EXCEPTION
return responses.OK
class BackfillWorker(object):
def __init__(
self,
source_key: Key,
dest_key: Key,
start: datetime.datetime,
end: datetime.datetime,
):
self.source_key = source_key
self.dest_key = dest_key
self.start = start
self.end = end
def sync(self):
source = ds_util.client.get(self.source_key)
dest = ds_util.client.get(self.dest_key)
if not self._check_creds(source):
raise Exception('Source does not have credentials: %s', source.key)
if not self._check_creds(dest):
raise Exception('Dest does not have credentials: %s', dest.key)
series = Series.get(source.key)
if not series['measures']:
logging.debug('Source has no measures: %s', source.key)
return
measures = series['measures']
del series
measures = [m for m in filter(lambda m: self._isbackfillable(m), measures)]
logging.debug('Syncing %s measures to %s', len(measures), dest.key.name)
if dest.key.name == 'garmin':
client = garmin_client.create(dest)
# Shorten and maybe GC measures as we iterate
while len(measures) > 0:
measure = measures.pop(0)
@retry(
wait_exponential_multiplier=1000 * 2,
wait_exponential_max=1000 * 60 * 30,
)
def _set_weight():
logging.debug('Setting weight for %s', measure['date'])
try:
client.set_weight(measure['weight'], measure['date'])
time.sleep(random.randint(1, 2))
except Exception:
logging.exception(
'Failed to set_weight for %s', measure['date']
)
raise
_set_weight()
del measure
def _check_creds(self, service):
return (
service.key.name == 'garmin'
and Service.has_credentials(service, required_key='password')
) or (Service.has_credentials(service))
def _isbackfillable(self, measure):
if not measure.get('weight'):
return False
if self.start and measure['date'] < self.start:
return False
if self.end and measure['date'] > self.end:
return False
return True
```
#### File: gae/frontend/main.py
```python
import datetime
import logging
import flask
from flask_cors import CORS
from flask_cors import cross_origin
from flask_talisman import Talisman
from firebase_admin import auth, exceptions
from shared import auth_util
from shared import logging_util
from shared.config import config
from shared import responses
from services.bbfitbit import bbfitbit
from services.google import google
from services.slack import slack
from services.strava import strava
from services.withings import withings
app = flask.Flask(__name__)
app.secret_key = config.flask_secret_creds['secret']
app.register_blueprint(bbfitbit.module, url_prefix='/services/fitbit')
app.register_blueprint(google.module, url_prefix='/services/google')
app.register_blueprint(slack.module, url_prefix='/services/slack')
app.register_blueprint(strava.module, url_prefix='/services/strava')
app.register_blueprint(withings.module, url_prefix='/services/withings')
CORS(app, origins=config.cors_origins)
Talisman(app, force_https_permanent=True)
logging_util.setup_logging(app)
@app.route('/ok', methods=['GET', 'POST'])
def ok():
return responses.OK
@app.route('/services/session', methods=['GET', 'POST'])
@cross_origin(supports_credentials=True, origins=config.cors_origins)
@auth_util.user_required
def create_session(user):
"""From https://firebase.google.com/docs/auth/admin/manage-cookies"""
try:
id_token = flask.request.headers['Authorization'].split(' ').pop()
expires_in = datetime.timedelta(minutes=10)
session_cookie = auth.create_session_cookie(id_token, expires_in=expires_in)
response = flask.make_response(flask.jsonify({'status': 'success'}))
expires = datetime.datetime.now(datetime.timezone.utc) + expires_in
response.set_cookie(
'__Secure-oauthsession',
session_cookie,
expires=expires,
httponly=True,
secure=True,
)
return response
except exceptions.FirebaseError:
logging.exception('Failed to create a session cookie.')
flask.abort(401, 'Failed to create a session cookie')
@app.route('/unittest', methods=['GET', 'POST'])
def unittest():
return responses.OK
# @app.before_request
# def before():
# logging_util.before()
#
#
# @app.after_request
# def after(response):
# return logging_util.after(response)
if __name__ == '__main__':
host, port = config.frontend_url[7:].split(':')
app.run(host='localhost', port=port, debug=True)
```
#### File: services/withings/test_withings.py
```python
import mock
import unittest
from urllib.parse import urlencode
import flask
from shared import responses
from shared.config import config
from services.withings import withings
SERVICE_KEY = '<KEY>' # noqa: E501
class WithingsTest(unittest.TestCase):
def setUp(self):
self.app = flask.Flask(__name__)
self.app.register_blueprint(withings.module)
self.app.debug = True
self.app.testing = True
self.client = self.app.test_client()
@mock.patch('shared.task_util._post_task_for_dev')
def test_withings_event_valid(self, _post_task_for_dev_mock):
query_string = urlencode(
{
'sub_secret': config.withings_creds['sub_secret'],
'service_key': SERVICE_KEY,
}
)
url = '/events?%s' % (query_string,)
r = self.client.post(
url, data={'startdate': '1532017199', 'enddate': '1532017200', 'appli': '1'}
)
responses.assertResponse(self, r, responses.OK)
_post_task_for_dev_mock.assert_called_once()
@mock.patch('shared.task_util._post_task_for_dev')
def test_withings_event_bad_service_key(self, _post_task_for_dev_mock):
query_string = urlencode(
{
'sub_secret': config.withings_creds['sub_secret'],
'service_key': "b'12345'",
}
)
url = '/events?%s' % (query_string,)
r = self.client.post(
url, data={'startdate': '1532017199', 'enddate': '1532017200', 'appli': '1'}
)
responses.assertResponse(self, r, responses.OK_SUB_EVENT_FAILED)
_post_task_for_dev_mock.assert_not_called()
```
#### File: gae/shared/config.py
```python
import json
import os
class _Config(object):
def __init__(self, base_path):
self.base_path = base_path
base_config = json.load(open(os.path.join(base_path, 'config.json')))
for key, value in base_config.items():
setattr(self, key, value)
self.firebase_app_config = json.load(
open(os.path.join(base_path, 'app_configs/firebase-web.json'))
)
self.gcp_server_creds = json.load(
open(os.path.join(base_path, 'service_keys/gcp-server.json'))
)
self.gcp_server_oauth_creds = json.load(
open(os.path.join(base_path, 'service_keys/gcp-server-oauth.json'))
)
self.gcp_web_creds = json.load(
open(os.path.join(base_path, 'service_keys/gcp-web.json'))
)
self.fitbit_creds = json.load(
open(os.path.join(base_path, 'service_keys/fitbit.json'))
)
self.strava_creds = json.load(
open(os.path.join(base_path, 'service_keys/strava.json'))
)
self.withings_creds = json.load(
open(os.path.join(base_path, 'service_keys/withings.json'))
)
self.slack_creds = json.load(
open(os.path.join(base_path, 'service_keys/slack.json'))
)
self.pubsub_creds = json.load(
open(os.path.join(base_path, 'service_keys/pubsub.json'))
)
self.flask_secret_creds = json.load(
open(os.path.join(base_path, 'service_keys/flask-secret.json'))
)
self.passkey_secret_creds = json.load(
open(os.path.join(base_path, 'service_keys/passkey.json'))
)
config = _Config(os.environ.get('BIKEBUDS_ENV', 'environments/env'))
if not os.getenv('GAE_ENV', '').startswith('standard'):
# Local - Not yet supported by python cloud apis.
if getattr(config, 'datastore_emulator_host', None):
os.environ['DATASTORE_EMULATOR_HOST'] = config.datastore_emulator_host
```
#### File: shared/datastore/athlete.py
```python
from shared import ds_util
from shared.datastore.strava.converters import StravaConverters
class Athlete(object):
@classmethod
def get_by_id(cls, strava_id):
athlete_query = ds_util.client.query(kind='Athlete')
athlete_query.add_filter('id', '=', strava_id)
athletes = [a for a in athlete_query.fetch()]
if len(athletes) == 0:
return None
elif len(athletes) > 1:
raise Exception('More athletes than expected.')
return athletes[0]
@classmethod
def get_private(cls, service_key):
result = [
r
for r in ds_util.client.query(kind='Athlete', ancestor=service_key).fetch()
]
if len(result) == 0:
return None
elif len(result) > 1:
raise Exception('Too may athletes for user: %s', service_key.parent)
else:
return result[0]
@classmethod
def to_entity(cls, athlete, parent=None):
return StravaConverters.Athlete.to_entity(athlete, parent=parent)
```
#### File: shared/datastore/test_subscription.py
```python
import unittest
from shared import ds_util
from shared.datastore.subscription import SubscriptionEvent
class SubscriptionEventTest(unittest.TestCase):
def test_hash_name(self):
args = ['AAA', 'BBB', 'CCC']
hash_name = SubscriptionEvent.hash_name(*args)
# Hash guarenteed, generated by D6. :)
self.assertEqual('2236edf1239b64ae48b309fb0bf15cdf6ddbf921', hash_name)
reversed_hash_name = SubscriptionEvent.hash_name(*reversed(args))
# Hash guarenteed, generated by D6. :)
self.assertEqual('2ef291e5b3091198086ee74855e348436a6f4b36', reversed_hash_name)
def test_hash_name_empty_args(self):
args = []
self.assertRaises(Exception, SubscriptionEvent.hash_name, *args)
def test_uses_name(self):
service_key = ds_util.client.key('Service', 'withings')
event_data = {'key1': 'AAA', 'key2': 'BBB', 'key3': 'CCC'}
event_entity = SubscriptionEvent.to_entity(
event_data,
name=SubscriptionEvent.hash_name(*sorted(event_data.values())),
parent=service_key,
)
self.assertEqual(
'2236edf1239b64ae48b309fb0bf15cdf6ddbf921', event_entity.key.name
)
```
#### File: gae/shared/hash_util.py
```python
import hashlib
def hash_name(*args):
if len(args) == 0:
raise TypeError("Expected non-zero-length hash_name args")
hash_string = '-'.join([str(arg) for arg in args])
return hashlib.sha1(hash_string.encode()).hexdigest()
```
#### File: services/garmin/client.py
```python
from functools import wraps
import json
import logging
import re
import curlify
import requests
from shared.datastore.service import Service
URL_SSO_LOGIN = "https://sso.garmin.com/sso/signin"
URL_BASE = 'https://connect.garmin.com'
URL_MODERN = URL_BASE + '/modern'
URL_ACTIVITIES = URL_MODERN + '/proxy/usersummary-service/usersummary/daily/'
URL_HEARTRATES = URL_MODERN + '/proxy/wellness-service/wellness/dailyHeartRate/'
URL_BODY_COMPOSITION = URL_MODERN + '/proxy/weight-service/weight/daterangesnapshot'
URL_USER_WEIGHT = URL_BASE + '/proxy/weight-service/user-weight'
HEADERS = {
'User-Agent': (
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) '
+ 'AppleWebKit/537.36 (KHTML, like Gecko) '
+ 'Chrome/79.0.3945.88 Safari/537.36'
),
'origin': 'https://sso.garmin.com',
'nk': 'NT', # Needed for user-weight, for some reason.
}
class Error(Exception):
pass
class SessionExpiredError(Error):
pass
def create(service):
if not Service.has_credentials(service, required_key='password'):
raise Exception('Cannot create Garmin client without creds: %s' % (service,))
creds = service.get('credentials', {})
session_state = creds.get('session_state', {})
def refresh_callback(session_state):
logging.debug('Garmin creds refresh for: %s', service.key)
Service.update_credentials(service, {'session_state': session_state})
garmin = Garmin(
creds['username'],
Service.get_credentials_password(creds),
refresh_callback=refresh_callback,
)
try:
garmin.set_session_state(**session_state)
except ValueError:
logging.exception('Invalid session_state, ignoring')
del creds['session_state']
return garmin
def require_session(client_function):
@wraps(client_function)
def check_session(*args, **kwargs):
client_object = args[0]
if not (client_object._session and client_object.profile):
logging.debug('No session established. Logging in.')
client_object.login()
try:
return client_function(*args, **kwargs)
except SessionExpiredError:
logging.debug('Retrying (once) after login.')
client_object.login()
return client_function(*args, **kwargs)
return check_session
class Garmin(object):
def __init__(self, username, password, refresh_callback=None):
self._username = username
self._password = password
self._refresh_callback = refresh_callback
self._session = None
self._preferences = None
self.profile = None
def set_session_state(self, cookies=None, profile=None, preferences=None):
if cookies or profile or preferences:
if None in (cookies and profile and preferences):
raise ValueError(
'Must pass all or nothing. cookies: %s, profile: %s, preferences: %s',
cookies,
profile,
preferences,
)
self._session = requests.Session()
self._session.headers.update(HEADERS)
if cookies:
self._session.cookies.update(cookies)
self._preferences = preferences
self.profile = profile
def get_session_state(self):
if not self._session:
return None
return {
'cookies': self._session.cookies.get_dict(),
'preferences': self._preferences,
'profile': self.profile,
}
def login(self):
logging.debug('Garmin Login')
self.set_session_state()
try:
self._authenticate()
except Exception as err:
# Clear the session.
self.set_session_state()
logging.debug('Clearing session and raising.')
raise err
finally:
logging.debug('Finally calling refresh_callback.')
if self._refresh_callback:
self._refresh_callback(self.get_session_state())
logging.debug('Login complete')
def _authenticate(self):
params = {
# 'webhost': URL_BASE,
'service': URL_MODERN,
# 'source': URL_SSO_LOGIN,
# 'redirectAfterAccountLoginUrl': URL_MODERN,
# 'redirectAfterAccountCreationUrl': URL_MODERN,
# 'gauthHost': URL_SSO_LOGIN,
# 'locale': 'en_US',
# 'id': 'gauth-widget',
# 'cssUrl': 'https://static.garmincdn.com/com.garmin.connect/ui/css/gauth-custom-v1.2-min.css',
# 'clientId': 'GarminConnect',
# 'rememberMeShown': 'true',
# 'rememberMeChecked': 'false',
# 'createAccountShown': 'true',
# 'openCreateAccount': 'false',
# 'usernameShown': 'false',
# 'displayNameShown': 'false',
# 'consumeServiceTicket': 'false',
# 'initialFocus': 'true',
# 'embedWidget': 'false',
# 'generateExtraServiceTicket': 'true',
}
data = {
'username': self._username,
'password': self._password,
'embed': 'false',
# 'lt': 'e1s1',
# '_eventId': 'submit',
# 'displayNameRequired': 'false',
}
login_response = self._session.post(URL_SSO_LOGIN, params=params, data=data)
logging.debug('SSO Request: %s', curlify.to_curl(login_response.request))
login_response.raise_for_status()
auth_ticket_url = self._extract_auth_ticket_url(login_response.text)
logging.debug("Extracted auth ticket url: %s", auth_ticket_url)
auth_response = self._session.get(auth_ticket_url)
logging.debug('Auth Request: %s', curlify.to_curl(auth_response.request))
auth_response.raise_for_status()
# There is auth info in here needed in order to fetch other services.
self._preferences = self._extract_json(
auth_response.text, 'VIEWER_USERPREFERENCES'
)
self.profile = self._extract_json(auth_response.text, 'SOCIAL_PROFILE')
@staticmethod
def _extract_json(html, key):
"""Find and return json data."""
found = re.search(key + r" = JSON.parse\(\"(.*)\"\);", html, re.M)
if found:
text = found.group(1).replace('\\"', '"')
return json.loads(text)
@staticmethod
def _extract_auth_ticket_url(auth_response):
"""Extracts an authentication ticket URL from the response of an
authentication form submission. The auth ticket URL is typically
of form:
https://connect.garmin.com/modern?ticket=ST-0123456-aBCDefgh1iJkLmN5opQ9R-cas
:param auth_response: HTML response from an auth form submission.
"""
match = re.search(r'response_url\s*=\s*"(https:[^"]+)"', auth_response)
if not match:
raise RuntimeError(
"auth failure: unable to extract auth ticket URL. did you provide a correct username/password?"
)
auth_ticket_url = match.group(1).replace("\\", "")
return auth_ticket_url
@require_session
def get_body_comp(self, start_date, end_date=None): # 'YYY-mm-dd'
end_date = end_date if end_date else start_date
url = URL_BODY_COMPOSITION + '?startDate=' + start_date + '&endDate=' + end_date
return self._get(url)
@require_session
def get_stats(self, start_date): # cDate = 'YYY-mm-dd'
url = (
URL_ACTIVITIES
+ self.profile['displayName']
+ '?'
+ 'calendarDate='
+ start_date
)
return self._get(url)
@require_session
def set_weight(self, weight, weight_date):
url = URL_USER_WEIGHT
weight_date = weight_date.replace(tzinfo=None)
return self._post(
url,
json={
'value': weight,
'unitKey': 'kg',
'date': weight_date.date().isoformat(),
'gmtTimestamp': weight_date.isoformat() + '.00',
},
)
def _get(self, url):
logging.debug('Fetching: %s', url)
response = self._session.get(url)
logging.info(
'Response code %s, and json %s',
response.status_code,
response.text,
)
logging.debug('Request: %s', curlify.to_curl(response.request))
if response.status_code == 403:
raise SessionExpiredError('Login expired')
elif response.status_code == 204:
return None
else:
response.raise_for_status()
# One last check: Its a weird behavior, this one...
# Kind of like a 403, only not.
if response.json().get('privacyProtected'):
raise SessionExpiredError('Login expired')
return response.json()
def _post(self, url, json=None):
logging.debug('Posting: %s', url)
response = self._session.post(url, json=json)
logging.info('Response code %s, and %s', response.status_code, response.text)
logging.debug('Request: %s', curlify.to_curl(response.request))
if response.status_code == 403:
raise SessionExpiredError('Login expired')
elif response.status_code == 204:
return None
else:
response.raise_for_status()
return None
```
#### File: services/strava/client.py
```python
import functools
import logging
import time
from shared.config import config
from shared.datastore.service import Service
import stravalib
from stravalib import exc
class ClientWrapper(object):
"""Auto-refresh (once) access tokens on any request."""
def __init__(self, service):
self._service = service
self._client = stravalib.client.Client(
access_token=service['credentials']['access_token'],
rate_limiter=(lambda x=None: None),
)
def ensure_access(self):
"""Ensure that an access token is good for at least 60 more seconds."""
now = time.time()
expires_around = self._service.get('credentials', {}).get('expires_at', 0) - 60
if time.time() > expires_around:
seconds_ago = now - expires_around
logging.info('Access expired %s ago; fetching new', seconds_ago)
self._refresh_access_token()
def __getattr__(self, attr):
func = getattr(self._client, attr)
@functools.wraps(func)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except exc.AccessUnauthorized:
logging.info("Token expired, refreshing.")
if self._refresh_access_token():
return func(*args, **kwargs)
return wrapper
def _refresh_access_token(self):
if self._service['credentials'].get('refresh_token') is None:
logging.warning(
'Cannot refresh_access_token for %s, no refresh_token',
self._service.key,
)
return False
new_credentials = self._client.refresh_access_token(
client_id=config.strava_creds['client_id'],
client_secret=config.strava_creds['client_secret'],
refresh_token=self._service['credentials']['refresh_token'],
)
Service.update_credentials(self._service, dict(new_credentials))
self._client.access_token = self._service['credentials']['access_token']
return True
``` |
{
"source": "jlapenna/homeforjla",
"score": 3
} |
#### File: homeforjla/kitchen/imperfect.py
```python
import csv
import io
import re
import sys
import uuid
import pprint
from typing import List
HEADERS = ('Item', 'Category', 'Size', 'Quantity','Description', 'Staple', 'Last Modified', 'Key')
# RegEx that apply to the file loaded.
ALL_YOU_WERE_NOT_CHARGED = re.compile(
r'\n(You were not charged for \d+ of this item.)\n');
# RegEx that apply to an individual line item.
FOOD_QUANTITY_PATTERN = re.compile(r'\ \((?P<Quantity>.*)\)', re.VERBOSE);
FOOD_TO_STRIP = [
re.compile(r'^Conventional '),
re.compile(r'^Imperfect Foods - '),
re.compile(r'Organic '),
]
FOOD_FIELDS = re.compile(
r'(?P<Item>.*) \((?P<Size>.*)\)\t(?P<Quantity>.*)\t(?P<Price>.*)$')
FOOD_PRIORITIES = [
r'Grapes',
r'Bacon',
r'Potatoes',
r'Eggs',
r'Ribeye',
r'Ground\ Beef',
r'Chicken\ Broth',
r'Ground\ Beef',
]
FOOD_PRIORITIES_PATTERN = re.compile(r'(?P<therest>.*)\ (?P<priority>(%s))' % '|'.join(FOOD_PRIORITIES), re.VERBOSE);
def load_items(order: List[str]):
items = []
for line in order.split('\n'):
# V0 # line = FOOD_QUANTITY_PATTERN.sub('\t\g<Quantity>', line)
for strip in FOOD_TO_STRIP:
line = strip.sub('', line)
# Re-order the words in an item's description to prioritze more
# important terms
line = FOOD_PRIORITIES_PATTERN.sub('\g<priority> \g<therest>', line)
# V0 # line = line.split('\t')
if not line:
continue
match = FOOD_FIELDS.match(line)
if not match:
sys.stdout.write('Could not match on %s', line)
item = match.groupdict()
# Strip columns we don't track
del(item['Price'])
# Add a unique Key
item['Key'] = str(uuid.uuid4().hex)
items.append(item)
return items
def main():
order = sys.stdin.read()
order = ALL_YOU_WERE_NOT_CHARGED.sub('\t', order);
items = load_items(order)
s = io.StringIO()
c = csv.DictWriter(s, HEADERS)
c.writeheader()
c.writerows(items)
s.seek(0)
print(s.read())
s.close()
if __name__ == '__main__':
main()
```
#### File: homeforjla/mls/mls.py
```python
import csv
import io
import urllib.parse
from bs4 import BeautifulSoup
def main():
soup = BeautifulSoup(open('examples/saved.html'), 'lxml')
rows = []
for tag in soup.select('.searchResult'):
num = select_str(tag, '.listingNum').split(' ')[1]
rows.append({
'address': select_str(tag, '.address span'),
'price': select_str(tag, '.rapIDXSearchResultsPriceTop'),
'dom': select_str(tag, '.listingDomCdom .display-label'),
'sqft': select_str(tag, '.listingSqFt .display-label'),
'beds': select_str(tag, '.listingBeds .display-label'),
'baths': str(select_str(tag, '.listingBaths .display-label')),
'num': num,
'url': 'https://www.google.com/search?q=%s&btnI' % urllib.parse.quote('redfin' + num),
'remarks': select_str(tag, '.remarks-long'),
})
s = io.StringIO()
c = csv.DictWriter(s, ('address', 'price', 'dom', 'sqft', 'beds', 'baths', 'num', 'url', 'remarks', 'visited', 'notes'))
c.writeheader()
c.writerows(rows)
s.seek(0)
print(s.read())
s.close()
def select_list(soup, query):
return [tag.get_text(' ', strip=True) for tag in soup.select(query)]
def select_str(soup, query):
return ' '.join(select_list(soup, query))
if __name__ == '__main__':
main()
``` |
{
"source": "jlapeyre/diffeqpy",
"score": 2
} |
#### File: jlapeyre/diffeqpy/setup.py
```python
from setuptools import setup
def readme():
with open('README.md') as f:
return f.read()
setup(name='diffeqpy',
version='1.2.0',
description='Solving Differential Equations in Python',
long_description=readme(),
long_description_content_type="text/markdown",
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Scientific/Engineering :: Physics'
],
url='http://github.com/SciML/diffeqpy',
keywords='differential equations stochastic ordinary delay differential-algebraic dae ode sde dde',
author='<NAME> and <NAME>',
author_email='<EMAIL>',
license='MIT',
packages=['diffeqpy','diffeqpy.tests'],
install_requires=['julia>=0.2'],
include_package_data=True,
zip_safe=False)
``` |
{
"source": "jlapeyre/qiskit-alt",
"score": 2
} |
#### File: qiskit-alt/bench/jordan_wigner_nature_time.py
```python
import timeit
def make_setup_code(basis, geometry):
return f"""
from qiskit_nature.drivers import UnitsType, Molecule
from qiskit_nature.drivers.second_quantization import ElectronicStructureDriverType, ElectronicStructureMoleculeDriver
h2_geometry = [['H', [0., 0., 0.]],
['H', [0., 0., 0.735]]]
h2o_geometry = [['O', [0., 0., 0.]],
['H', [0.757, 0.586, 0.]],
['H', [-0.757, 0.586, 0.]]]
basis = {basis}
molecule = Molecule(geometry={geometry},
charge=0, multiplicity=1)
driver = ElectronicStructureMoleculeDriver(molecule, basis=basis, driver_type=ElectronicStructureDriverType.PYSCF)
from qiskit_nature.problems.second_quantization import ElectronicStructureProblem
from qiskit_nature.converters.second_quantization import QubitConverter
from qiskit_nature.mappers.second_quantization import JordanWignerMapper
es_problem = ElectronicStructureProblem(driver)
second_q_op = es_problem.second_q_ops()
qubit_converter = QubitConverter(mapper=JordanWignerMapper())
hamiltonian = second_q_op[0]
"""
def run_one_basis(basis, geometry, num_repetitions):
setup_code = make_setup_code(basis, geometry)
bench_code = "qubit_converter.convert(hamiltonian)"
time = timeit.timeit(stmt=bench_code, setup=setup_code, number=num_repetitions)
t = 1000 * time / num_repetitions
print(f"geometry={geometry}, basis={basis} {t:0.2f}", "ms")
return t
def run_benchmarks():
nature_times = []
for basis, geometry, num_repetitions in (("'sto3g'", "h2_geometry", 10), ("'631g'", "h2_geometry", 10),
("'631++g'", "h2_geometry", 5),
("'sto3g'", "h2o_geometry", 5), ("'631g'", "h2o_geometry", 1)):
t = run_one_basis(basis, geometry, num_repetitions)
nature_times.append(t)
return nature_times
if __name__ == '__main__':
nature_times = run_benchmarks()
```
#### File: qiskit-alt/bench/pauli_from_list_alt.py
```python
import sys
import qiskit_alt
qiskit_alt.project.ensure_init()
import random
from timeit import timeit
Main = qiskit_alt.project.julia.Main
QuantumOps = qiskit_alt.project.simple_import("QuantumOps")
from qiskit_alt.pauli_operators import PauliSum_to_SparsePauliOp
random.seed(123)
def rand_label(k, n):
return ["".join(random.choices("IXYZ", k=k)) for _ in range(n)]
def run_benchmarks():
qkalt_times = []
for k in (10, 100):
for n in (10, 100, 1000, 5000, 10_000, 100_000):
label = rand_label(k, n)
if qiskit_alt.project._calljulia_name == 'juliacall':
label = Main.pyconvert_list(Main.String, label)
PauliSum_to_SparsePauliOp(QuantumOps.PauliSum(label))
number = 20
t = timeit(lambda: PauliSum_to_SparsePauliOp(QuantumOps.PauliSum(label)), number=number)
t = t * 1000 / number
qkalt_times.append(t)
print(f'k={k}, n={n}, {t} ms')
return qkalt_times
if __name__ == '__main__':
qkalt_times = run_benchmarks()
```
#### File: qiskit-alt/docker_tests/run_dockerfile.py
```python
r"""
This is a test docstring.
"""
import subprocess as sb_pr
import fire
def subprocess_execute(command_list):
"""Subprocess_execute executes the command on host OS,
then dumps the output to STDOUT.
Arguments:
command_list: This is a list of string making the command to be executed.
"""
sb_pr.run(command_list, text=True, check=True)
def action_build(image_name, image_tag, dockerfile_name, docker_path):
"""The function action_build builds the image
Arguments:
image_name: Name of the image file.
image_tag: Tag of the build image file.
dockerfile_name: This is the Dockerfile to be used for building the image.
docker_path: working directory of docker.
"""
image_name_with_tag = image_name + ":" + image_tag
build_command_list = [
"docker",
"build",
"-t",
image_name_with_tag,
"-f",
dockerfile_name,
docker_path,
]
return build_command_list
def action_run(image_name, image_tag, user_name, test_file_path, test_file_name):
"""The function action_run runs the container and initiates the tests.
Arguments:
image_name: Name of image to be used to build the containers.
image_tag: The tag of imgaes to be used to build the containers.
test_file_path: Path of the test file from which tests has to run.
test_file_name: Name of the file containing the tests to be done.
"""
image_name_with_tag = image_name + ":" + image_tag
run_command_list = [
"docker",
"run",
"-it",
image_name_with_tag,
"/usr/bin/su",
"-l",
user_name,
"-c",
"cd qiskit_alt; sh " + test_file_path + test_file_name,
]
return run_command_list
def action_get_into_fish(image_name, image_tag, user_name):
"""The function action_get_into_fish takes into the fish shell running in the container.
Arguments:
image_name: Name of the image to be used to build the container.
image_tag: The tag of the image which is used to build the container.
user_name: The user name which logins into the container.
"""
image_name_with_tag = image_name + ":" + image_tag
get_fish_command_list = [
"docker",
"run",
"-it",
image_name_with_tag,
"/usr/bin/su",
"-l",
user_name,
"-s",
"/usr/bin/fish",
]
return get_fish_command_list
def action_get_into_bash(image_name, image_tag, user_name):
"""The function action_get_into_bash takes into the bash shell running in the container.
Arguments:
image_name: Name of the image to be used to build the container.
image_tag: The tag of the image which is used to build the container.
user_name: The user name which logins into the container.
"""
image_name_with_tag = image_name + ":" + image_tag
get_bash_command_list = [
"docker",
"run",
"-it",
image_name_with_tag,
"/usr/bin/su",
"-l",
user_name,
"-s",
"/usr/bin/bash",
]
return get_bash_command_list
def action_get_into_rootfish(image_name, image_tag):
"""The function action_get_into_rootfish takes into the fish shell
running in the container as root.
Arguments:
image_name: Name of the image to be used to build the container.
image_tag: The tag of the image which is used to build the container.
user_name: The user name which logins into the container.
"""
image_name_with_tag = image_name + ":" + image_tag
get_rootfish_command_list = [
"docker",
"run",
"-it",
image_name_with_tag,
"/usr/bin/fish",
]
return get_rootfish_command_list
def _cli(
action="",
image_name="qiskit_alt",
image_tag="latest",
dockerfile_name="Dockerfile",
user_name="quser",
test_file_path="./",
test_file_name="run_init_tests.sh",
docker_path="..",
dry_run="false",
):
"""All the arguments of this function are supposed to be passed as command line
arguments while initiating the python script.
Arguments:
action: This are the possible actions to be performed.
Possible actions are:
build: To build the containers
Example: ./run_dockerfile.py --action=build
run: To run the containers
"": To build and then to run the containers.
get_into_fish: To get into the fish shell running in the container.
get_into_bash: To get into the bash shell running in the container.
get_into_rootfish: To get into the fish shell running in the container
as root.
image_name: The name of the image to be build.
image_tag: The tag given to the image to be build.
dockerfile_name: The name of the Dockerfile to be used for building the image.
user_name: A username in the container.
test_file_path: The path to the test file which contains all the tests to run.
docker_path: The working directory for docker.
dry_run: Either true or false. If true, then only print action, but don't execute it.
"""
if dry_run == "false":
_dry_run = False
elif dry_run == "true":
_dry_run = True
else:
print("dry_run must be either true or false. See ./run_dockerfile.py --help")
return
command_lists = []
if action == "build":
command_lists.append(action_build(image_name, image_tag, dockerfile_name, docker_path))
elif action == "run":
command_lists.append(action_run(image_name, image_tag, user_name, test_file_path, test_file_name))
elif action == "":
command_lists.append(action_build(image_name, image_tag, dockerfile_name, docker_path))
command_lists.append(action_run(image_name, image_tag, user_name, test_file_path, test_file_name))
elif action == "get_into_fish":
command_lists.append(action_get_into_fish(image_name, image_tag, user_name))
elif action == "get_into_bash":
command_lists.append(action_get_into_bash(image_name, image_tag, user_name))
elif action == "get_into_rootfish":
command_lists.append(action_get_into_rootfish(image_name, image_tag))
else:
print("Bad arguments, See ./run_dockerfile.py --help")
for command_list in command_lists:
command_string = " ".join(map(str, command_list))
print(command_string + "\n")
if not _dry_run:
subprocess_execute(command_list)
if __name__ == "__main__":
fire.Fire(_cli)
```
#### File: qiskit-alt/test_juliacall/basic_juliacall_test.py
```python
import pytest
import qiskit_alt
project = qiskit_alt.project
project.ensure_init(calljulia="juliacall")
def test_always_passes():
assert True
def test_interface_lib():
assert qiskit_alt.project.julia.__name__ == 'juliacall'
def test_Main():
Main = qiskit_alt.project.julia.Main
assert Main.sind(90) == 1.0
``` |
{
"source": "jlapeyre/qiskit-aqua",
"score": 3
} |
#### File: optimization/algorithms/optimization_algorithm.py
```python
from abc import ABC, abstractmethod
from enum import Enum
from typing import Any, Optional
from ..problems.quadratic_program import QuadraticProgram
class OptimizationAlgorithm(ABC):
"""An abstract class for optimization algorithms in Qiskit's optimization module."""
@abstractmethod
def get_compatibility_msg(self, problem: QuadraticProgram) -> str:
"""Checks whether a given problem can be solved with the optimizer implementing this method.
Args:
problem: The optimization problem to check compatibility.
Returns:
Returns the incompatibility message. If the message is empty no issues were found.
"""
def is_compatible(self, problem: QuadraticProgram) -> bool:
"""Checks whether a given problem can be solved with the optimizer implementing this method.
Args:
problem: The optimization problem to check compatibility.
Returns:
Returns True if the problem is compatible, False otherwise.
"""
return len(self.get_compatibility_msg(problem)) == 0
@abstractmethod
def solve(self, problem: QuadraticProgram) -> 'OptimizationResult':
"""Tries to solves the given problem using the optimizer.
Runs the optimizer to try to solve the optimization problem.
Args:
problem: The problem to be solved.
Returns:
The result of the optimizer applied to the problem.
Raises:
QiskitOptimizationError: If the problem is incompatible with the optimizer.
"""
raise NotImplementedError
class OptimizationResultStatus(Enum):
"""Feasible values for the termination status of an optimization algorithm."""
SUCCESS = 0
FAILURE = 1
INFEASIBLE = 2
class OptimizationResult:
"""The optimization result class.
The optimization algorithms return an object of the type `OptimizationResult`, which enforces
providing the following attributes.
Attributes:
x: The optimal value found in the optimization algorithm.
fval: The function value corresponding to the optimal value.
results: The original results object returned from the optimization algorithm. This can
contain more information than only the optimal value and function value.
status: The termination status of the algorithm.
"""
Status = OptimizationResultStatus
def __init__(self, x: Optional[Any] = None, fval: Optional[Any] = None,
results: Optional[Any] = None,
status: OptimizationResultStatus = OptimizationResultStatus.SUCCESS) -> None:
self._val = x
self._fval = fval
self._results = results
self._status = status
def __repr__(self):
return '([{}] / {} / {})'.format(','.join([str(x_) for x_ in self.x]), self.fval,
self.status)
def __str__(self):
return 'x=[{}], fval={}'.format(','.join([str(x_) for x_ in self.x]), self.fval)
@property
def x(self) -> Any:
"""Returns the optimal value found in the optimization.
Returns:
The optimal value found in the optimization.
"""
return self._val
@property
def fval(self) -> Any:
"""Returns the optimal function value.
Returns:
The function value corresponding to the optimal value found in the optimization.
"""
return self._fval
@property
def results(self) -> Any:
"""Return the original results object from the algorithm.
Currently a dump for any leftovers.
Returns:
Additional result information of the optimization algorithm.
"""
return self._results
@property
def status(self) -> OptimizationResultStatus:
"""Return the termination status of the algorithm.
Returns:
The termination status of the algorithm.
"""
return self._status
@x.setter # type: ignore
def x(self, x: Any) -> None:
"""Set a new optimal value.
Args:
x: The new optimal value.
"""
self._val = x
@fval.setter # type: ignore
def fval(self, fval: Any) -> None:
"""Set a new optimal function value.
Args:
fval: The new optimal function value.
"""
self._fval = fval
@results.setter # type: ignore
def results(self, results: Any) -> None:
"""Set results.
Args:
results: The new additional results of the optimization.
"""
self._results = results
@status.setter # type: ignore
def status(self, status: OptimizationResultStatus) -> None:
"""Set a new termination status.
Args:
status: The new termination status.
"""
self._status = status
``` |
{
"source": "Jlaqueyrie/Bornes-d-faut",
"score": 3
} |
#### File: Jlaqueyrie/Bornes-d-faut/_main_.py
```python
from tkinter import Tk
from tkinter import Label
from tkinter import OptionMenu
from tkinter import Button
from tkinter import Radiobutton
from tkinter import StringVar
from tkinter import LabelFrame
from tkinter import Entry
from Fonction import sauvDonnee, testLigne, testSfc
from tkinter import messagebox
from tkinter import N, E, W, S
from tkinter import ACTIVE, DISABLED
class MyFirstGUI():
def __init__(self, master):
self.master = master
"----------------------------Constant---------------------------------"
Classeur = r'C:\Users\jlaqueyr\Documents\MyNoteBook\ScriptPython\BorneDefaut\Feuille.xlsx'
FeuilleDefaut = r'Liste des saisies'
LBL_Z_INFO = 'Information générale'
LARGEUR_FRAME = 485
TITRE = 'Borne défaut'
DIMENSION_FENETRE = '500x350+30+30'
LBL_SFC = 'SFC'
LBL_OPERATEUR = 'Nom'
LBL_Z_LIGNE = 'Sur quel ligne est apparu le défaut ?'
LBL_Z_DEF = 'Description du défaut'
LBL_Z_DEF_INT = 'Intégration'
LBL_Z_DEF_UFT = 'UFT et CADS'
LBL_Z_DEF_ETI = 'Etiquettes'
LBL_Z_DEF_CHOIX = "Choisir le défaut"
NOM_LIGNE1 = 'Ligne Principale'
NOM_LIGNE2 = 'Ligne Secondaire'
PADX_INFO = 5
PADX_LBL_INFO = 5
PADY_INFO = 20
PADX_BTN_LIGNE = 40
PADY_BTN_LIGNE = 15
PADX_ZN_DEF = 5
PADY_ZN_DEF = 10
MSG_SAUV_TITRE = "Ticket sauvegardé"
MSG_SAUV_CONTENU = "Ticket défaut sauvegardé"
VAL_DEF_CHOIX = 'Choix X'
nomOperateur = StringVar(master)
sfc = StringVar(master)
master.title(TITRE)
master.geometry(DIMENSION_FENETRE)
"---------------------------Fonction----------------------------------"
def callback(*args):
print("tkvar changed")
popupMenu2.configure(state='disable')
popupMenu3.configure(state='disable')
def callback2(*args):
print("tkvar changed 1")
popupMenu1.configure(state='disable')
popupMenu3.configure(state='disable')
def callback3(*args):
print("tkvar changed 2")
popupMenu1.configure(state='disable')
popupMenu2.configure(state='disable')
def testDefaut(listeDefaut, valDefaut):
breakpoint()
defaut = ""
for defaut in listeDefaut:
if defaut != valDefaut:
defFinal = defaut
break
else:
pass
return defFinal
def RecupValeur():
ListeInfo = []
print(
self.txtSfc.get(),
self.txtNomOperateur.get(),
Ligne1Var.get(),
Ligne2Var.get(),
self.tkvar.get())
listeLigne = [Ligne1Var.get(), Ligne2Var.get()]
resultatLigne = testLigne(listeLigne, 'off')
resultatSfc, ErrSfc = testSfc(sfc.get())
breakpoint()
self.listeDef = [self.tkvar.get(),
self.tkvar1.get(),
self.tkvar2.get()]
resultatDefaut = testDefaut(self.listeDef, VAL_DEF_CHOIX)
ListeInfo = [
resultatSfc,
self.txtNomOperateur.get(),
resultatLigne,
resultatDefaut]
sauvDonnee(Classeur, ListeInfo, FeuilleDefaut)
self.txtSfc.delete(0, 'end')
self.txtNomOperateur.delete(0, 'end')
self.btnLigne1.deselect()
self.btnLigne2.deselect()
popupMenu1.configure(state='active')
popupMenu1.selection_clear()
popupMenu2.configure(state='active')
popupMenu2.selection_clear()
popupMenu3.configure(state='active')
popupMenu3.selection_clear()
messagebox.showinfo(MSG_SAUV_TITRE, MSG_SAUV_CONTENU)
"------------------Information sur le produit-------------------------"
self.ZoneInfoGen = LabelFrame(
master,
text=LBL_Z_INFO,
width=LARGEUR_FRAME,
height=80)
self.ZoneInfoGen.grid(row=0, column=1, sticky=N + S + W + E)
self.ZoneInfoGen.grid_propagate(0)
self.lblSfc = Label(self.ZoneInfoGen, text=LBL_SFC)
self.txtSfc = Entry(self.ZoneInfoGen, textvariable=sfc)
self.txtSfc.focus_set()
self.lblNomOperateur = Label(self.ZoneInfoGen, text=LBL_OPERATEUR)
self.txtNomOperateur = Entry(
self.ZoneInfoGen, textvariable=nomOperateur)
self.lblSfc.grid(row=0, column=1, padx=PADX_LBL_INFO, pady=PADY_INFO)
self.txtSfc.grid(
row=0,
column=2,
ipadx=25,
padx=PADX_INFO,
pady=PADY_INFO)
self.lblNomOperateur.grid(
row=0, column=3, padx=PADX_LBL_INFO, pady=PADY_INFO)
self.txtNomOperateur.grid(
row=0, column=4, ipadx=25, padx=PADX_INFO, pady=PADY_INFO)
"----------Information sur la ligne qui a produit le défaut-----------"
self.ZoneLigne = LabelFrame(
master,
text=LBL_Z_LIGNE,
width=LARGEUR_FRAME,
height=80)
self.ZoneLigne.grid(row=1, column=1)
self.ZoneLigne.grid_propagate(0)
Ligne1Var = StringVar(value="off")
self.btnLigne1 = Radiobutton(
self.ZoneLigne,
text=NOM_LIGNE1,
variable=Ligne1Var,
indicatoron=False,
value="Ligne1")
Ligne2Var = StringVar(value="off")
self.btnLigne2 = Radiobutton(
self.ZoneLigne,
text=NOM_LIGNE2,
variable=Ligne2Var,
indicatoron=False,
value="Ligne2")
self.btnLigne1.grid(
row=0,
column=1,
ipadx=30,
padx=PADX_BTN_LIGNE,
pady=PADY_BTN_LIGNE)
self.btnLigne2.grid(
row=0,
column=3,
ipadx=30,
padx=PADX_BTN_LIGNE,
pady=PADY_BTN_LIGNE)
if self.btnLigne2.state():
print(self.btnLigne2.state)
if not self.btnLigne1.select():
print(self.btnLigne1.get())
"------------------Information sur le type de défaut-------------------"
self.ZoneDefaut = LabelFrame(
master,
text=LBL_Z_DEF,
width=LARGEUR_FRAME,
height=130)
self.ZoneDefaut.grid(row=2, column=1, sticky='NW')
self.ZoneDefaut.grid_propagate(0)
self.ZoneDefautInt = LabelFrame(
self.ZoneDefaut,
text=LBL_Z_DEF_INT,
height=80,
width=150)
self.ZoneDefautInt.grid(
row='0',
column='1',
padx=PADX_ZN_DEF,
pady=PADY_ZN_DEF)
self.ZoneDefautInt.grid_propagate(0)
self.tkvar = StringVar(master)
choices = {'Choix 1', 'Choix 2', 'Choix 3', 'Choix 4', 'Choix 5'}
self.tkvar.set(VAL_DEF_CHOIX)
popupMenu1 = OptionMenu(self.ZoneDefautInt, self.tkvar, *choices)
Label(
self.ZoneDefautInt,
text=LBL_Z_DEF_CHOIX).grid(
row=1,
column=1)
popupMenu1.grid(row=2, column=1, ipadx=30)
self.ZoneDefautUFT = LabelFrame(
self.ZoneDefaut,
text=LBL_Z_DEF_UFT,
height=80,
width=150)
self.ZoneDefautUFT.grid(
row='0',
column='2',
padx=PADX_ZN_DEF,
pady=PADY_ZN_DEF)
self.ZoneDefautUFT.grid_propagate(0)
self.tkvar1 = StringVar(master)
choices = {'Choix 1', 'Choix 2', 'Choix 3', 'Choix 4', 'Choix 5'}
self.tkvar1.set(VAL_DEF_CHOIX)
popupMenu2 = OptionMenu(self.ZoneDefautUFT, self.tkvar1, *choices)
Label(
self.ZoneDefautUFT,
text=LBL_Z_DEF_CHOIX).grid(
row=1,
column=1)
popupMenu2.grid(row=2, column=1, ipadx=30)
self.ZoneDefautEti = LabelFrame(
self.ZoneDefaut,
text=LBL_Z_DEF_ETI,
height=80,
width=150)
self.ZoneDefautEti.grid(
row='0',
column='3',
padx=PADX_ZN_DEF,
pady=PADY_ZN_DEF)
self.ZoneDefautEti.grid_propagate(0)
self.tkvar2 = StringVar(master)
choices = {'Choix 1', 'Choix 2', 'Choix 3', 'Choix 4', 'Choix 5'}
self.tkvar2.set(VAL_DEF_CHOIX)
popupMenu3 = OptionMenu(self.ZoneDefautEti, self.tkvar2, *choices)
Label(
self.ZoneDefautEti,
text=LBL_Z_DEF_CHOIX).grid(
row=1,
column=1)
popupMenu3.grid(row=2, column=1, ipadx=30)
self.btnValider = Button(
master,
text="Valider",
relief="raised",
command=RecupValeur)
self.btnValider.grid(row=3, column=1)
self.tkvar.trace("w", callback)
self.tkvar1.trace('w', callback2)
self.tkvar2.trace('w', callback3)
def main():
root = Tk()
my_gui = MyFirstGUI(root)
root.mainloop()
if __name__ == '__main__':
main()
``` |
{
"source": "jlararigger/jlr_copy_deformer_weights",
"score": 2
} |
#### File: jlararigger/jlr_copy_deformer_weights/jlr_copy_deformer_weights_UI.py
```python
from PySide2 import QtCore, QtWidgets
from shiboken2 import wrapInstance
from maya import OpenMayaUI
import pymel.core as pm
class CopyDeformerWeightsUI(object):
"""
CopyDeformerWeights Class
"""
def __init__(self):
self.transfer_function = None
"""
Create the CopyDeformerWeights UI
"""
self.dialog = QtWidgets.QDialog()
self.dialog_name = "copy_deformer_weights_dialog"
self.delete_instances()
self.maya_main_window_ptr = OpenMayaUI.MQtUtil.mainWindow()
self.maya_main_window = wrapInstance(int(self.maya_main_window_ptr), QtWidgets.QMainWindow)
self.dialog.setParent(self.maya_main_window)
self.dialog.setWindowFlags(QtCore.Qt.Window)
self.dialog.setObjectName(self.dialog_name)
# self.dialog.setFixedSize(400, 500)
self.dialog.setWindowTitle("Copy Deformer Weights")
self.dialog_layout = QtWidgets.QGridLayout(self.dialog)
self.dialog_layout.setObjectName("dialog_layout")
self.main_layout = QtWidgets.QVBoxLayout()
self.main_layout.setObjectName("main_layout")
self.source_group_box = QtWidgets.QGroupBox(self.dialog)
self.source_group_box.setObjectName("source_group_box")
self.source_group_box.setTitle("Source")
self.source_gb_layout = QtWidgets.QGridLayout(self.source_group_box)
self.source_gb_layout.setObjectName("source_gb_layout")
self.source_layout = QtWidgets.QVBoxLayout()
self.source_layout.setObjectName("source_layout")
self.btn_source = QtWidgets.QPushButton(self.source_group_box)
self.btn_source.setObjectName("btn_source")
self.btn_source.setText("Get Source")
self.btn_source.clicked.connect(lambda: self.get_source_items())
self.source_layout.addWidget(self.btn_source)
self.source_list_layout = QtWidgets.QHBoxLayout()
self.source_list_layout.setObjectName("source_list_layout")
self.object_source_list = QtWidgets.QListWidget(self.source_group_box)
self.object_source_list.setObjectName("object_source_list")
self.object_source_list.currentItemChanged.connect(lambda: self.update_source_deformer_list())
self.source_list_layout.addWidget(self.object_source_list)
self.deformer_source_tree = QtWidgets.QTreeWidget(self.source_group_box)
self.deformer_source_tree.setObjectName("deformer_source_tree")
self.deformer_source_tree.setHeaderHidden(True)
self.source_list_layout.addWidget(self.deformer_source_tree)
self.source_layout.addLayout(self.source_list_layout)
self.source_gb_layout.addLayout(self.source_layout, 0, 0, 1, 1)
self.main_layout.addWidget(self.source_group_box)
self.target_group_box = QtWidgets.QGroupBox(self.dialog)
self.target_group_box.setObjectName("target_group_box")
self.target_group_box.setTitle("Target")
self.target_gb_layout = QtWidgets.QGridLayout(self.target_group_box)
self.target_gb_layout.setObjectName("target_gb_layout")
self.target_layout = QtWidgets.QVBoxLayout()
self.target_layout.setObjectName("target_layout")
self.btn_target = QtWidgets.QPushButton(self.target_group_box)
self.btn_target.setObjectName("btn_target")
self.btn_target.setText("Get Target")
self.btn_target.clicked.connect(lambda: self.get_target_items())
self.target_layout.addWidget(self.btn_target)
self.target_list_layout = QtWidgets.QHBoxLayout()
self.target_list_layout.setObjectName("target_list_layout")
self.object_target_list = QtWidgets.QListWidget(self.target_group_box)
self.object_target_list.setObjectName("object_target_list")
self.object_target_list.currentItemChanged.connect(lambda: self.update_target_deformer_list())
self.target_list_layout.addWidget(self.object_target_list)
self.deformer_target_list = QtWidgets.QListWidget(self.target_group_box)
self.deformer_target_list.setObjectName("deformer_target_list")
self.target_list_layout.addWidget(self.deformer_target_list)
self.target_layout.addLayout(self.target_list_layout)
self.target_gb_layout.addLayout(self.target_layout, 0, 0, 1, 1)
self.main_layout.addWidget(self.target_group_box)
self.progress_bar_layout = QtWidgets.QVBoxLayout()
self.progress_bar_layout.setObjectName("progress_bar_layout")
self.progress_bar = QtWidgets.QProgressBar(self.dialog)
self.progress_bar.setProperty("value", -1)
self.progress_bar.setTextVisible(False)
self.progress_bar.setObjectName("progress_bar")
self.progress_bar_layout.addWidget(self.progress_bar)
self.progress_label = QtWidgets.QLabel(self.dialog)
self.progress_label.setObjectName("progress_label")
self.progress_label.setMinimumSize(QtCore.QSize(0, 21))
self.progress_label.setAlignment(QtCore.Qt.AlignCenter)
self.progress_bar_layout.addWidget(self.progress_label)
self.main_layout.addLayout(self.progress_bar_layout)
self._progress_bar_steps = 8
self._progress_bar_value = -1
self.buttons_group_box = QtWidgets.QGroupBox(self.dialog)
self.buttons_group_box.setTitle("")
self.buttons_group_box.setObjectName("buttons_group_box")
self.buttons_gb_layout = QtWidgets.QHBoxLayout(self.buttons_group_box)
self.buttons_gb_layout.setObjectName("buttons_gb_layout")
self.copy_button = QtWidgets.QPushButton(self.buttons_group_box)
self.copy_button.setObjectName("copy_button")
self.copy_button.setText("Copy")
self.copy_button.clicked.connect(lambda: self.copy_deformer_weights())
self.buttons_gb_layout.addWidget(self.copy_button)
self.cancel_button = QtWidgets.QPushButton(self.buttons_group_box)
self.cancel_button.setObjectName("cancel_button")
self.cancel_button.setText("Cancel")
self.cancel_button.clicked.connect(lambda: self.delete_instances())
self.buttons_gb_layout.addWidget(self.cancel_button)
self.main_layout.addWidget(self.buttons_group_box)
self.dialog_layout.addLayout(self.main_layout, 0, 0, 1, 1)
@property
def progress_bar_steps(self):
return self._progress_bar_steps
@progress_bar_steps.setter
def progress_bar_steps(self, value):
assert isinstance(value, int), "Progress Bar Steps must be a integer"
self._progress_bar_steps = value
@property
def progress_bar_value(self):
return self._progress_bar_value
@progress_bar_value.setter
def progress_bar_value(self, value):
assert isinstance(value, (int, float)), "Progress Bar Value must be a integer or float"
self._progress_bar_value = value
def progress_bar_init(self):
"""
Hide the label and initialize the progress bar.
"""
self.progress_label.hide()
self.progress_bar_value = 0
self.progress_bar.show()
self.progress_bar_layout.update()
def progress_bar_next(self):
"""
Update the progress bar value.
"""
self.progress_bar_value += 1
self.progress_bar.setValue((100.0 / self.progress_bar_steps) * self.progress_bar_value)
def progress_bar_ends(self, message):
"""
Closes the progress bar and show label with a message.
"""
self.progress_bar.hide()
self.progress_label.show()
self.progress_label.setText(message)
def get_source_items(self):
"""
Gets the selected objects in the scene and fills the source lists.
"""
if pm.selected():
self.update_source_list(pm.selected())
self.update_source_deformer_list()
def update_source_list(self, l_items):
"""
Fills the source list widget.
:param l_items: list with the name of source objects.
"""
if l_items:
self.populate_list_widget(self.object_source_list, l_items)
def update_source_deformer_list(self):
"""
Fills the list of deformers according to the selected object in the source object list.
"""
item = pm.PyNode(self.object_source_list.currentItem().text())
self.populate_tree_widget(self.deformer_source_tree, self.get_deformer_list(item))
def get_target_items(self):
"""
Gets the selected objects in the scene and fills the target lists.
"""
if pm.selected():
self.update_target_list(pm.selected())
self.update_target_deformer_list()
def update_target_list(self, l_items):
"""
Updates the target list widget.
:param l_items: list with the name of target objects.
"""
if l_items:
self.populate_list_widget(self.object_target_list, l_items)
def update_target_deformer_list(self):
"""
Fills the list of deformers according to the selected object in the target object list.
"""
item = pm.PyNode(self.object_target_list.currentItem().text())
self.populate_list_widget(self.deformer_target_list, self.get_deformer_list(item))
@staticmethod
def get_deformer_list(item):
"""
Returns a list with the deformers connected to a object.
:param item: PyNode with shapes
:return: list
"""
deformer_types = ["ffd", "wire", "cluster", "softMod", "deltaMush", "textureDeformer", "nonLinear"]
if pm.objExists(item):
deformer_list = list()
for shape in item.getShapes():
deformer_list.extend(pm.listHistory(shape, ha=1, il=1, pdo=1))
valid_deformers = list()
for deformer in deformer_list:
if deformer.type() in deformer_types:
valid_deformers.append(deformer)
return valid_deformers
else:
return list()
@staticmethod
def populate_tree_widget(tree_widget, l_items):
"""
Fills a QListWidget with the passed list.
:param tree_widget: QListWidget
:param l_items: list of PyNodes.
"""
tree_widget.blockSignals(True)
tree_widget.clear()
for item in l_items:
tree_widget_item = QtWidgets.QTreeWidgetItem()
tree_widget_item.setText(0, item.nodeName())
tree_widget_item.setWhatsThis(0, "deformer")
tree_widget.addTopLevelItem(tree_widget_item)
tree_widget.blockSignals(False)
@staticmethod
def populate_list_widget(list_widget, l_items):
"""
Fills a QListWidget with the passed list.
:param list_widget: QListWidget
:param l_items: list of PyNodes.
"""
list_widget.blockSignals(True)
list_widget.clear()
for item in l_items:
list_widget_item = QtWidgets.QListWidgetItem()
list_widget_item.setText(item.nodeName())
list_widget.addItem(list_widget_item)
list_widget.setCurrentRow(0)
list_widget.blockSignals(False)
def copy_deformer_weights(self):
"""
Checks if the selected items are a valid selection and call the copy function.
"""
assert self.transfer_function is not None, "The transfer_function variable must be contain a transfer_function function."
geo_source = self.object_source_list.currentItem()
geo_target = self.object_target_list.currentItem()
deformer_source = self.deformer_source_tree.currentItem()
deformer_target = self.deformer_target_list.currentItem()
if geo_source and geo_target and deformer_source and deformer_target:
data = {"geo_source": pm.PyNode(geo_source.text()),
"geo_target": pm.PyNode(geo_target.text()),
"deformer_source": pm.PyNode(deformer_source.text(0)),
"deformer_target": pm.PyNode(deformer_target.text()),
"surface_association": "closestPoint",
"interface": self,
}
self.transfer_function(**data)
def show(self):
"""
Shows the CopyDeformerWeights UI.
If there are selected items previously in the scene, the first selected item will be loaded as source object
and the rest of objects will be loaded as target objects.
"""
self.dialog.show()
self.progress_bar.hide()
self.progress_label.setText("Select the objects and its deformers")
self.progress_bar_layout.update()
if pm.selected():
self.update_source_list(pm.selected()[0:1])
self.update_source_deformer_list()
if len(pm.selected()) > 1:
self.update_target_list(pm.selected()[1:])
self.update_target_deformer_list()
def delete_instances(self):
"""
Deletes the UI
"""
if pm.window(self.dialog_name, exists=True):
pm.deleteUI(self.dialog_name)
``` |
{
"source": "jlareck/databus-client",
"score": 2
} |
#### File: python/scripts/redeploy_from_endpoint.py
```python
from SPARQLWrapper import SPARQLWrapper, JSON
from databusclient.databus_client import DatabusFile, DatabusGroup, DatabusVersionMetadata, DatabusVersion, deploy_to_dev_databus
from databusclient.cli import parse_cv_string
from datetime import datetime
endpoint = "https://databus.dbpedia.org/repo/sparql"
db_base = "http://localhost:3000"
def redeploy_groups():
query = """
PREFIX dataid: <http://dataid.dbpedia.org/ns/core#>
PREFIX dct: <http://purl.org/dc/terms/>
PREFIX dcat: <http://www.w3.org/ns/dcat#>
PREFIX db: <https://databus.dbpedia.org/>
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX databus: <https://databus.dbpedia.org/>
SELECT DISTINCT ?group ?groupdocu WHERE {
?dataset dataid:account databus:ontologies .
?dataset dataid:group ?group .
?dataset dataid:groupdocu ?groupdocu .
}
"""
sparql_wrapper = SPARQLWrapper(endpoint)
sparql_wrapper.setReturnFormat(JSON)
sparql_wrapper.setQuery(query)
result = sparql_wrapper.queryAndConvert()
all_groups = []
for binding in result["results"]["bindings"]:
group_uri = binding["group"]["value"]
group = group_uri.split("/")[-1]
docu = binding["groupdocu"]["value"]
all_groups.append(DatabusGroup("denis", group, f"Title for {group_uri}", f"Label for {group_uri}", f"This is the comment for {group_uri}", f"This is the absrtact for {group_uri}", docu, DATABUS_BASE=db_base))
deploy_to_dev_databus("ff7d6b48-86b8-4760-ad02-9ef5de2608d9", *all_groups)
def redeploy_versions():
query = """
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX databus: <https://databus.dbpedia.org/>
PREFIX dataid: <http://dataid.dbpedia.org/ns/core#>
PREFIX dataid-cv: <http://dataid.dbpedia.org/ns/cv#>
PREFIX dct: <http://purl.org/dc/terms/>
PREFIX dcat: <http://www.w3.org/ns/dcat#>
PREFIX xsd: <http://www.w3.org/2001/XMLSchema#>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX databus: <https://databus.dbpedia.org/>
PREFIX dataid: <http://dataid.dbpedia.org/ns/core#>
PREFIX dataid-cv: <http://dataid.dbpedia.org/ns/cv#>
PREFIX dct: <http://purl.org/dc/terms/>
PREFIX dcat: <http://www.w3.org/ns/dcat#>
PREFIX xsd: <http://www.w3.org/2001/XMLSchema#>
SELECT DISTINCT ?group ?art ?version ?title ?publisher ?comment ?description ?license ?file ?extension ?type ?bytes ?shasum (group_concat(?kvstr; separator="|") as ?cvs) WHERE {
SELECT DISTINCT ?group ?art ?version ?title ?publisher ?comment ?description ?license ?file ?extension ?type ?bytes ?shasum ?kvstr WHERE {
?dataset dataid:account databus:ontologies .
?dataset dataid:group ?group .
?dataset dataid:artifact ?art.
?dataset dcat:distribution ?distribution .
?dataset dct:license ?license .
?dataset dct:publisher ?publisher .
?dataset rdfs:comment ?comment .
?dataset dct:description ?description .
?dataset dct:title ?title .
?distribution dcat:downloadURL ?file .
?distribution dataid:formatExtension ?extension .
?distribution dataid-cv:type ?type .
?distribution dcat:byteSize ?bytes .
?distribution dataid:sha256sum ?shasum .
?dataset dct:hasVersion ?version .
# get cvs as concat string
?dataset dcat:distribution ?distribution .
?distribution dataid:contentVariant ?value .
?distribution ?key ?value .
?key rdfs:subPropertyOf dataid:contentVariant .
BIND(CONCAT(str(REPLACE (str(?key), "http://dataid.dbpedia.org/ns/cv#", "")), "=", str(?value)) as ?kvstr)
# Excludes dev versions
FILTER (!regex(?art, "--DEV"))
# exclude some stuff since content variants are hard
MINUS { ?distribution dataid:contentVariant 'sorted'^^xsd:string . }
# MINUS { ?distribution dataid:contentVariant 'NONE'^^xsd:string}
# MINUS { ?distribution dataid:contentVariant 'goodLicense'^^xsd:string}
# MINUS { ?distribution dataid:contentVariant 'lodeMetadata'^^xsd:string}
# MINUS { ?distribution dataid:contentVariant 'old'^^xsd:string}
}
} GROUP BY ?group ?art ?version ?title ?publisher ?comment ?description ?license ?file ?extension ?type ?bytes ?shasum ORDER BY ?version
"""
sparql_wrapper = SPARQLWrapper(endpoint)
sparql_wrapper.setReturnFormat(JSON)
sparql_wrapper.setQuery(query)
print("Start querying....")
result = sparql_wrapper.queryAndConvert()
result_map = {}
issued = datetime.now()
for binding in result["results"]["bindings"]:
group = binding["group"]["value"].split("/")[-1]
art = binding["art"]["value"].split("/")[-1]
version = binding["version"]["value"]
dl_url = binding["file"]["value"]
file_ext = binding["extension"]["value"]
# set fileext for unknown files
if file_ext == "":
file_ext = dl_url[dl_url.rfind(".")+1:]
file_type = binding["type"]["value"]
bytesize = binding["bytes"]["value"]
shasum = binding["shasum"]["value"]
cvs_string = binding["cvs"]["value"]
publisher = "http://localhost:3000/denis#this"
title = binding["title"]["value"]
comment = binding["comment"]["value"]
description = binding["description"]["value"]
version_license = binding["license"]["value"]
version_metadata = DatabusVersionMetadata("denis", group, art, version, title, title, publisher, comment, description, description, version_license, issued=issued, DATABUS_BASE=db_base)
cv_map = parse_cv_string(cvs_string)
databus_files = result_map.get(version_metadata, []) + [DatabusFile(dl_url, cv_map, file_ext, shasum=shasum, content_length=bytesize)]
result_map[version_metadata] = databus_files
versions = []
for metadata, dbfiles in result_map.items():
versions.append(DatabusVersion(metadata, dbfiles))
for version in versions:
print(version.metadata.group, version.metadata.artifact, version.metadata.version, len(version.databus_files))
print(f"Going to deploy {len(versions)} versions...")
deploy_to_dev_databus("ff7d6b48-86b8-4760-ad02-9ef5de2608d9", *versions)
def main():
redeploy_versions()
if __name__ == "__main__":
main()
``` |
{
"source": "jlarkin21/parking-garage-python",
"score": 3
} |
#### File: parking-garage-python/test/test_4_garage_premium_parking.py
```python
from typing import List
from garage.garage import Garage
from garage.parking_level import ParkingLevel
from garage.parking_space import ParkingSpace
from garage.vehicle import Vehicle
from garage.permit import Permit
from garage.vehicle_type import VehicleType
from test.utils import TestHelpers
def test_vehicles_without_premium_permits_are_rejected_from_premium_parking_spaces():
parking_space_a = ParkingSpace(required_permit=Permit.PREMIUM)
parking_space_b = ParkingSpace(required_permit=Permit.PREMIUM)
parking_space_c = ParkingSpace(required_permit=Permit.PREMIUM)
parking_space_d = ParkingSpace(required_permit=Permit.PREMIUM)
parking_space_e = ParkingSpace(required_permit=Permit.PREMIUM)
parking_space_f = ParkingSpace(required_permit=Permit.PREMIUM)
parking_level_1 = ParkingLevel(spaces=[parking_space_a, parking_space_b])
parking_level_2 = ParkingLevel(spaces=[parking_space_c, parking_space_d])
parking_level_3 = ParkingLevel(spaces=[parking_space_e, parking_space_f])
garage = Garage(levels=[parking_level_1, parking_level_2, parking_level_3])
vehicle_1 = Vehicle(permit=Permit.PREMIUM)
vehicle_2 = Vehicle(permit=Permit.NONE)
vehicle_3 = Vehicle(permit=Permit.NONE)
vehicle_4 = Vehicle(permit=Permit.PREMIUM)
vehicle_5 = Vehicle(permit=Permit.DISABILITY)
vehicle_6 = Vehicle(permit=Permit.NONE)
expected_vehicles_rejected: List[Vehicle] = [
vehicle_2,
vehicle_3,
vehicle_5,
vehicle_6,
]
actual_vehicles_rejected = garage.add_vehicles(
[vehicle_1, vehicle_2, vehicle_3, vehicle_4, vehicle_5, vehicle_6]
)
TestHelpers.assert_expected_vehicles_are_rejected(
actual=actual_vehicles_rejected, expected=expected_vehicles_rejected
)
def test_vehicles_with_premium_permits_are_added_to_premium_parking_spaces():
parking_space_a = ParkingSpace(required_permit=Permit.PREMIUM)
parking_space_b = ParkingSpace()
parking_space_c = ParkingSpace(required_permit=Permit.PREMIUM)
parking_space_d = ParkingSpace(required_permit=Permit.PREMIUM)
parking_space_e = ParkingSpace()
parking_space_f = ParkingSpace(required_permit=Permit.PREMIUM)
parking_level_1 = ParkingLevel(spaces=[parking_space_a, parking_space_b])
parking_level_2 = ParkingLevel(spaces=[parking_space_c, parking_space_d])
parking_level_3 = ParkingLevel(spaces=[parking_space_e, parking_space_f])
garage = Garage(levels=[parking_level_1, parking_level_2, parking_level_3])
vehicle_1 = Vehicle()
vehicle_2 = Vehicle(permit=Permit.DISABILITY | Permit.PREMIUM)
vehicle_3 = Vehicle(permit=Permit.PREMIUM)
vehicle_4 = Vehicle()
vehicle_5 = Vehicle()
vehicle_6 = Vehicle(permit=Permit.PREMIUM)
expected_vehicles_on_level_1: List[Vehicle] = [vehicle_2, vehicle_1]
expected_vehicles_on_level_2: List[Vehicle] = [vehicle_3, vehicle_6]
expected_vehicles_on_level_3: List[Vehicle] = [vehicle_4, None]
garage.add_vehicles(
[vehicle_1, vehicle_2, vehicle_3, vehicle_4, vehicle_5, vehicle_6]
)
TestHelpers.assert_expected_vehicles_on_levels(
levels=garage.levels,
expected_vehicles=[
expected_vehicles_on_level_1,
expected_vehicles_on_level_2,
expected_vehicles_on_level_3,
],
)
def test_vehicles_with_premium_permits_take_priority_over_non_permitted_spaces():
parking_space_a = ParkingSpace(required_permit=Permit.PREMIUM)
parking_space_b = ParkingSpace()
parking_space_c = ParkingSpace()
parking_space_d = ParkingSpace(required_permit=Permit.PREMIUM)
parking_space_e = ParkingSpace()
parking_space_f = ParkingSpace()
parking_level_1 = ParkingLevel(spaces=[parking_space_a, parking_space_b])
parking_level_2 = ParkingLevel(spaces=[parking_space_c, parking_space_d])
parking_level_3 = ParkingLevel(spaces=[parking_space_e, parking_space_f])
garage = Garage(levels=[parking_level_1, parking_level_2, parking_level_3])
vehicle_1 = Vehicle()
vehicle_2 = Vehicle(permit=Permit.PREMIUM)
vehicle_3 = Vehicle(permit=Permit.PREMIUM)
vehicle_4 = Vehicle()
vehicle_5 = Vehicle(permit=Permit.PREMIUM)
vehicle_6 = Vehicle(permit=Permit.PREMIUM)
vehicle_7 = Vehicle(permit=Permit.PREMIUM)
expected_vehicles_on_level_1: List[Vehicle] = [vehicle_2, vehicle_5]
expected_vehicles_on_level_2: List[Vehicle] = [vehicle_6, vehicle_3]
expected_vehicles_on_level_3: List[Vehicle] = [vehicle_7, vehicle_1]
garage.add_vehicles(
[vehicle_1, vehicle_2, vehicle_3, vehicle_4, vehicle_5, vehicle_6, vehicle_7]
)
TestHelpers.assert_expected_vehicles_on_levels(
levels=garage.levels,
expected_vehicles=[
expected_vehicles_on_level_1,
expected_vehicles_on_level_2,
expected_vehicles_on_level_3,
],
)
def test_vehicles_with_dual_premium_disability_permits_take_priority_over_premium_permitted_spaces():
parking_space_a = ParkingSpace(required_permit=Permit.DISABILITY)
parking_space_b = ParkingSpace(required_permit=Permit.PREMIUM)
parking_space_c = ParkingSpace(required_permit=Permit.PREMIUM)
parking_space_d = ParkingSpace(required_permit=Permit.DISABILITY)
parking_space_e = ParkingSpace(required_permit=Permit.PREMIUM)
parking_space_f = ParkingSpace(required_permit=Permit.PREMIUM)
parking_level_1 = ParkingLevel(spaces=[parking_space_a, parking_space_b])
parking_level_2 = ParkingLevel(spaces=[parking_space_c, parking_space_d])
parking_level_3 = ParkingLevel(spaces=[parking_space_e, parking_space_f])
garage = Garage(levels=[parking_level_1, parking_level_2, parking_level_3])
vehicle_1 = Vehicle(permit=Permit.PREMIUM)
vehicle_2 = Vehicle(permit=Permit.DISABILITY | Permit.PREMIUM)
vehicle_3 = Vehicle(permit=Permit.DISABILITY | Permit.PREMIUM)
vehicle_4 = Vehicle(permit=Permit.PREMIUM)
vehicle_5 = Vehicle(permit=Permit.DISABILITY | Permit.PREMIUM)
vehicle_6 = Vehicle(permit=Permit.DISABILITY | Permit.PREMIUM)
expected_vehicles_on_level_1: List[Vehicle] = [vehicle_2, vehicle_5]
expected_vehicles_on_level_2: List[Vehicle] = [vehicle_6, vehicle_3]
expected_vehicles_on_level_3: List[Vehicle] = [vehicle_1, vehicle_4]
garage.add_vehicles(
[vehicle_1, vehicle_2, vehicle_3, vehicle_4, vehicle_5, vehicle_6]
)
TestHelpers.assert_expected_vehicles_on_levels(
levels=garage.levels,
expected_vehicles=[
expected_vehicles_on_level_1,
expected_vehicles_on_level_2,
expected_vehicles_on_level_3,
],
)
def test_compact_vehicles_with_premium_permits_are_prioritized_into_premium_parking_spaces():
parking_space_a = ParkingSpace(required_permit=Permit.PREMIUM)
parking_space_b = ParkingSpace(compact=True)
parking_space_c = ParkingSpace(compact=True)
parking_space_d = ParkingSpace(required_permit=Permit.PREMIUM)
parking_space_e = ParkingSpace(required_permit=Permit.PREMIUM)
parking_space_f = ParkingSpace(compact=True)
parking_level_1 = ParkingLevel(spaces=[parking_space_a, parking_space_b])
parking_level_2 = ParkingLevel(spaces=[parking_space_c, parking_space_d])
parking_level_3 = ParkingLevel(spaces=[parking_space_e, parking_space_f])
garage = Garage(levels=[parking_level_1, parking_level_2, parking_level_3])
vehicle_1 = Vehicle()
vehicle_2 = Vehicle(vehicle_type=VehicleType.Compact, permit=Permit.PREMIUM)
vehicle_3 = Vehicle(vehicle_type=VehicleType.Compact, permit=Permit.PREMIUM)
vehicle_4 = Vehicle()
vehicle_5 = Vehicle()
vehicle_6 = Vehicle(vehicle_type=VehicleType.Compact, permit=Permit.PREMIUM)
vehicle_7 = Vehicle(vehicle_type=VehicleType.Compact, permit=Permit.PREMIUM)
expected_vehicles_on_level_1: List[Vehicle] = [vehicle_2, vehicle_7]
expected_vehicles_on_level_2: List[Vehicle] = [None, vehicle_3]
expected_vehicles_on_level_3: List[Vehicle] = [vehicle_6, None]
garage.add_vehicles(
[vehicle_1, vehicle_2, vehicle_3, vehicle_4, vehicle_5, vehicle_6, vehicle_7]
)
TestHelpers.assert_expected_vehicles_on_levels(
levels=garage.levels,
expected_vehicles=[
expected_vehicles_on_level_1,
expected_vehicles_on_level_2,
expected_vehicles_on_level_3,
],
)
def test_compact_vehicles_with_dual_premium_disability_permits_take_the_same_priority_as_non_compact_vehicles():
parking_space_a = ParkingSpace(required_permit=Permit.DISABILITY)
parking_space_b = ParkingSpace(required_permit=Permit.PREMIUM)
parking_space_c = ParkingSpace(required_permit=Permit.PREMIUM)
parking_space_d = ParkingSpace(required_permit=Permit.DISABILITY)
parking_space_e = ParkingSpace(required_permit=Permit.PREMIUM)
parking_space_f = ParkingSpace(required_permit=Permit.PREMIUM)
parking_level_1 = ParkingLevel(spaces=[parking_space_a, parking_space_b])
parking_level_2 = ParkingLevel(spaces=[parking_space_c, parking_space_d])
parking_level_3 = ParkingLevel(spaces=[parking_space_e, parking_space_f])
garage = Garage(levels=[parking_level_1, parking_level_2, parking_level_3])
vehicle_1 = Vehicle(vehicle_type=VehicleType.Compact, permit=Permit.PREMIUM)
vehicle_2 = Vehicle(
vehicle_type=VehicleType.Truck, permit=Permit.DISABILITY | Permit.PREMIUM
)
vehicle_3 = Vehicle(
vehicle_type=VehicleType.Compact, permit=Permit.DISABILITY | Permit.PREMIUM
)
vehicle_4 = Vehicle(vehicle_type=VehicleType.Compact, permit=Permit.PREMIUM)
vehicle_5 = Vehicle(
vehicle_type=VehicleType.Truck, permit=Permit.DISABILITY | Permit.PREMIUM
)
vehicle_6 = Vehicle(
vehicle_type=VehicleType.Compact, permit=Permit.DISABILITY | Permit.PREMIUM
)
expected_vehicles_on_level_1: List[Vehicle] = [vehicle_2, vehicle_5]
expected_vehicles_on_level_2: List[Vehicle] = [vehicle_6, vehicle_3]
expected_vehicles_on_level_3: List[Vehicle] = [vehicle_1, vehicle_4]
garage.add_vehicles(
[vehicle_1, vehicle_2, vehicle_3, vehicle_4, vehicle_5, vehicle_6]
)
TestHelpers.assert_expected_vehicles_on_levels(
levels=garage.levels,
expected_vehicles=[
expected_vehicles_on_level_1,
expected_vehicles_on_level_2,
expected_vehicles_on_level_3,
],
)
``` |
{
"source": "jlarmstrongiv/libtorchjs",
"score": 3
} |
#### File: tests/data/make_jit.py
```python
import torch
def mul2(x):
return x * 2
torch.jit.trace(mul2, torch.randn(3, 3)).save("mul2.pt")
``` |
{
"source": "jlarrieux/CryptoPriceLambdaCommons",
"score": 2
} |
#### File: jlarrieux/CryptoPriceLambdaCommons/aws_util.py
```python
from decimal import Decimal
import boto3
import datetime
import indicator_util
import pickle
from crypto_price_lambda_commons_util import MovingAverageType
from my_rolling_list import MyRollingList
import crypto_price_lambda_commons_util
region = "us-east-1"
dynamodb = boto3.client('dynamodb', region_name=region)
ssm = boto3.client('ssm', region_name=region)
table_name = 'eth-price-hourly-nosql-db'
parameter_key = '0'
s3_resource = boto3.resource('s3')
default_bucket = 'com.jlarrieux.lambda'
def get_last_price() -> [None, float]:
json_string = _get_from_dynamo()
return None if json_string is None else float(json_string['Item']['last_price']['N'])
def get_last_moving_average(ma_type: MovingAverageType):
json_string = _get_from_dynamo()
ma_string = f"{str(ma_type.value)}_day_ma"
if json_string is None:
return None
try:
json_string['Item'][ma_string]
except KeyError:
return None
return json_string['Item'][ma_string]['N']
def _get_from_dynamo() -> [None, str]:
return dynamodb.get_item(
TableName=table_name, Key={'id': {'N': parameter_key}})
def save_price(val: float, is_time_to_save: bool, key: str, bucket: str, initial_size: int = 500) -> MyRollingList:
update_dynamo_table(val, "last_price")
round_val = float(Decimal(val).quantize(Decimal("0.01")))
rolling_average = _load_from_s3(bucket, key)
if is_time_to_save:
if rolling_average is None:
rolling_average = MyRollingList(initial_size)
rolling_average.add(round_val)
save_to_s3(bucket, key, rolling_average)
ma_10 = indicator_util.calculate_simple_moving_average(rolling_average.get_most_recents(10))
ma_12 = indicator_util.calculate_simple_moving_average(rolling_average.get_most_recents(12))
ma_50 = indicator_util.calculate_simple_moving_average(rolling_average.get_most_recents(50))
ma_200 = indicator_util.calculate_simple_moving_average(rolling_average.get_most_recents(200))
update_dynamo_table(ma_10, "10_day_ma")
update_dynamo_table(ma_12, "12_day_ma")
update_dynamo_table(ma_50, "50_day_ma")
update_dynamo_table(ma_200, "200_day_ma")
return rolling_average
def update_dynamo_table(val: float, item: str) -> None:
dynamodb.update_item(TableName=table_name, Key={'id': {
'N': parameter_key}}, ExpressionAttributeNames={"#name": item}, UpdateExpression=f"set #name = :v",
ExpressionAttributeValues={':v': {'N': str(val)}})
def get_parameter(parameter_name):
return ssm.get_parameter(Name=parameter_name, WithDecryption=True)['Parameter']['Value']
def _load_from_s3(bucket: str, s3_key: str) -> [MyRollingList, None]:
return load_from_s3(bucket, s3_key)
def save_to_s3_default_bucket(key: str, obj: object) -> None:
save_to_s3(default_bucket, key, obj)
def save_to_s3(bucket: str, key: str, obj: object) -> None:
pickle_byte_obj = pickle.dumps(obj)
s3_resource.Object(bucket, key).put(Body=pickle_byte_obj)
def load_from_s3_default_bucket(key: str):
return load_from_s3(default_bucket, key)
def load_from_s3(bucket: str, key: str):
try:
return pickle.loads(s3_resource.Object(bucket, key).get()['Body'].read())
except Exception as error:
if isinstance(error, s3_resource.meta.client.exceptions.NoSuchKey):
return None
def get_rolling_average(key: str) -> [MyRollingList, None]:
return load_from_s3_default_bucket(key)
``` |
{
"source": "jlarsen-usgs/HydrographTools",
"score": 3
} |
#### File: jlarsen-usgs/HydrographTools/hobs_output.py
```python
import datetime as dt
import matplotlib.pyplot as plt
import numpy as np
import os
class HobsHeader(object):
sim_head = '"SIMULATED EQUIVALENT"'
obs_head = '"OBSERVED VALUE"'
obs_name = '"OBSERVATION NAME"'
date = 'DATE'
dyear = 'DECIMAL_YEAR'
header = {sim_head: None,
obs_head: None,
obs_name: None,
date: None,
dyear: None}
class HobsOut(dict):
"""
Reads output data from Hobs file and prepares it for post processing.
Class sets observations to an ordered dictionary based on observation name
If observation name is consistant for a site, a time series is created
for plotting!
Parameters
----------
filename : str
hobs filename
strip_after : str
flag to indicate a character to strip the hobs label after for
grouping wells.
Example: OBS_1
OBS_2
strip_after could be set to "_" and then all OBS observations will
be stored under the OBS key. This is extremely useful for plotting
and calculating statistics
"""
def __init__(self, filename, strip_after=""):
super(HobsOut, self).__init__()
self.name = filename
self._strip_after = strip_after
self._dataframe = None
self.__read_hobs_output()
def __read_hobs_output(self):
"""
Method to read a hobs output file. Dynamically sets header information
and reads associated values.
Sets values to HobsOut dictionary
"""
with open(self.name) as hobout:
for ix, line in enumerate(hobout):
if ix == 0:
self.__set_header(line)
else:
self.__set_dictionary_values(line)
def __set_dictionary_values(self, line):
"""
Method to set incoming hobs line to dictionary data values
Args:
line: (str)
"""
t = line.strip().split()
obsname = t[HobsHeader.header[HobsHeader.obs_name]]
dict_name = obsname
if self._strip_after:
dict_name = obsname.split(self._strip_after)[0]
simval = float(t[HobsHeader.header[HobsHeader.sim_head]])
obsval = float(t[HobsHeader.header[HobsHeader.obs_head]])
residual = simval - obsval
date = self.__set_datetime_object(t[HobsHeader.header[HobsHeader.date]])
decimal_date = float(t[HobsHeader.header[HobsHeader.dyear]])
if dict_name in self:
self[dict_name]['simval'].append(simval)
self[dict_name]['obsval'].append(obsval)
self[dict_name]['date'].append(date)
self[dict_name]['decimal_date'].append(decimal_date)
self[dict_name]['residual'].append(residual)
self[dict_name]["obsname"].append(obsname)
else:
self[dict_name] = {"obsname": [obsname], "date": [date],
"decimal_date": [decimal_date],
"simval": [simval], "obsval": [obsval],
"residual": [residual]}
def __set_header(self, line):
"""
Reads header line and sets header index
Parameters
----------
line : str
first line of the HOB file
"""
n = 0
s = ""
for i in line:
s += i
if s in HobsHeader.header:
HobsHeader.header[s] = n
n += 1
s = ""
elif s in (" ", "\t", "\n"):
s = ""
else:
pass
for key, value in HobsHeader.header.items():
if value is None:
raise AssertionError("HobsHeader headings must be updated")
def __set_datetime_object(self, s):
"""
Reformats a string of YYYY-mm-dd to a datetime object
Parameters
----------
s : str
string of YYYY-mm-dd
Returns
-------
datetime.date
"""
return dt.datetime.strptime(s, "%Y-%m-%d")
def __get_date_string(self, date):
"""
Parmaeters
----------
date: datetime.datetime object
Returns
-------
string
"""
return date.strftime("%Y/%m/%d")
@property
def obsnames(self):
"""
Return a list of obsnames from the HobsOut dictionary
"""
return self.keys()
def to_dataframe(self):
"""
Method to get a pandas dataframe object of the
HOBs data.
Returns
-------
pd.DataFrame
"""
import pandas as pd
if self._dataframe is None:
df = None
for hobsname, d in self.items():
t = pd.DataFrame(d)
if df is None:
df = t
else:
df = pd.concat([df, t], ignore_index=True)
self._dataframe = df
return self._dataframe
def get_sum_squared_errors(self, obsname):
"""
Returns the sum of squared errors from the residual
Parameters
----------
obsname : str
observation name
Returns
-------
float: sum of square error
"""
return sum([i**2 for i in self[obsname]['residual']])
def get_rmse(self, obsname):
"""
Returns the RMSE from the residual
Parameters
----------
obsname : str
observation name
Returns
-------
float: rmse
"""
return np.sqrt(np.mean([i**2 for i in self[obsname]['residual']]))
def get_number_observations(self, obsname):
"""
Returns the number of observations for an obsname
Parameters
----------
obsname : str
observation name
Returns
-------
int
"""
return len(self[obsname]['simval'])
def get_maximum_residual(self, obsname):
"""
Returns the datetime.date and maximum residual value
Parameters
----------
obsname : str
observation name
Returns
-------
tuple: (datetime.date, residual)
"""
data = self[obsname]['residual']
index = data.index(max(data))
date = self[obsname]['date'][index]
return date, max(data)
def get_minimum_residual(self, obsname):
"""
Returns the datetime.date, minimum residual value
Parameters
----------
obsname : str
observation name
Returns
-------
tuple: (datetime.date, residual)
"""
data = self[obsname]['residual']
index = data.index(min(data))
date = self[obsname]['date'][index]
return date, min(data)
def get_mean_residual(self, obsname):
"""
Returns the datetime.date, minimum residual value
Parameters
----------
obsname : str
observation name
Returns
-------
tuple: (datetime.date, residual)
"""
data = self[obsname]['residual']
return np.mean(data)
def get_median_residual(self, obsname):
"""
Returns the datetime.date, minimum residual value
Parameters
----------
obsname : str
observation name
Returns
-------
tuple: (datetime.date, residual)
"""
data = self[obsname]['residual']
return np.median(data)
def get_maximum_residual_heads(self, obsname):
"""
Returns the datetime.date, simulated, and observed
heads at the maximum residual value
Parameters
----------
obsname : str
observation name
Returns
-------
tuple: (datetime.date, simulated head, observed head)
"""
resid = self[obsname]['residual']
index = resid.index(max(resid))
observed = self[obsname]['obsval'][index]
simulated = self[obsname]['simval'][index]
date = self[obsname]['date'][index]
return date, simulated, observed
def get_minimum_residual_heads(self, obsname):
"""
Returns the datetime.date, simulated, and observed
heads at the maximum residual value
Parameters
----------
obsname : str
observation name
Returns
-------
tuple: (datetime.date, simulated head, observed head)
"""
resid = self[obsname]['residual']
index = resid.index(min(resid))
observed = self[obsname]['obsval'][index]
simulated = self[obsname]['simval'][index]
date = self[obsname]['date'][index]
return date, simulated, observed
def get_residual_bias(self, filter=None):
"""
Method to determine the bias of measurements +-
by checking the residual. Returns fraction of residuals
> 0.
Parameters
----------
filter: (str, list, tuple, or function)
filtering criteria for writing statistics.
Function must return True for filter out, false to use
Returns
-------
(float) fraction of residuals greater than zero
"""
nobs = 0.
ngreaterzero = 0.
for obsname, meta_data in self.items():
if self.__filter(obsname, filter):
continue
residual = np.array(meta_data['residual'])
rgreaterzero = sum((residual > 0))
nobs += residual.size
ngreaterzero += rgreaterzero
try:
bias = ngreaterzero / nobs
except ZeroDivisionError:
raise ZeroDivisionError("No observations found!")
return bias
def write_dbf(self, dbfname, filter=None):
"""
Method to write a dbf file from a the HOBS dictionary
Parameters
----------
dbfname : str
dbf file name
filter: (str, list, tuple, or function)
filtering criteria for writing statistics.
Function must return True for filter out, false for write to file
"""
import shapefile
data = []
for obsname, meta_data in self.items():
if self.__filter(obsname, filter):
continue
for ix, val in enumerate(meta_data['simval']):
data.append([obsname,
self.__get_date_string(meta_data['date'][ix]),
val,
meta_data['obsval'][ix],
meta_data['residual'][ix]])
try:
# traps for pyshp 1 vs. pyshp 2
w = shapefile.Writer(dbf=dbfname)
except Exception:
w = shapefile.Writer()
w.field("HOBSNAME", fieldType="C")
w.field("HobsDate", fieldType="D")
w.field("HeadSim", fieldType='N', decimal=8)
w.field("HeadObs", fieldType="N", decimal=8)
w.field("Residual", fieldType="N", decimal=8)
for rec in data:
w.record(*rec)
try:
w.save(dbf=dbfname)
except AttributeError:
w.close()
def write_min_max_residual_dbf(self, dbfname, filter=None):
"""
Method to write a dbf of transient observations
using observation statistics
Parameters
----------
dbfname : str
dbf file name
filter: (str, list, tuple, or function)
filtering criteria for writing statistics.
Function must return True for filter out, false for write to file
"""
import shapefile
data = []
for obsname, meta_data in self.items():
if self.__filter(obsname, filter):
continue
max_date, resid_max = self.get_maximum_residual(obsname)
min_date, resid_min = self.get_minimum_residual(obsname)
simval_max, obsval_max = self.get_maximum_residual_heads(obsname)[1:]
simval_min, obsval_min = self.get_minimum_residual_heads(obsname)[1:]
data.append([obsname,
self.get_number_observations(obsname),
self.__get_date_string(max_date), resid_max,
self.__get_date_string(min_date), resid_min,
simval_max, obsval_max, simval_min, obsval_min])
try:
# traps for pyshp 1 vs. pyshp 2
w = shapefile.Writer(dbf=dbfname)
except Exception:
w = shapefile.Writer()
w.field("HOBSNAME", fieldType="C")
w.field("FREQUENCY", fieldType="N")
w.field("MaxDate", fieldType="C")
w.field("MaxResid", fieldType='N', decimal=8)
w.field("MinDate", fieldType="C", decimal=8)
w.field("MinResid", fieldType="N", decimal=8)
w.field("MaxHeadSim", fieldType="N", decimal=8)
w.field("MaxHeadObs", fieldType="N", decimal=8)
w.field("MinHeadSim", fieldType="N", decimal=8)
w.field("MinHeadObs", fieldType="N", decimal=8)
for rec in data:
w.record(*rec)
try:
w.save(dbf=dbfname)
except AttributeError:
w.close()
def __filter(self, obsname, filter):
"""
Boolean filetering method, checks if observation name
is in the filter.
Parameters
----------
obsname : str
observation name
filter: (str, list, tuple, or function)
filtering criteria for writing statistics.
Function must return True for filter out, false for write to file
Returns
-------
bool: True if obsname in filter
"""
if filter is None:
return False
elif isinstance(filter, list) or isinstance(filter, tuple):
if obsname in list:
return True
elif isinstance(filter, str):
if obsname == filter:
return True
elif callable(filter):
if filter(obsname):
return True
else:
raise Exception("Filter is not an appropriate type")
return False
def write_summary_statistics_csv(self, csvname, filter=None):
"""
Method to write summary calibration statistics to a
CSV file for analysis and reports
Parameters
----------
csvname : str
csv file name
filter: (str, list, tuple, or function)
filtering criteria for writing statistics.
Function must return True for filter out, false for write to file
"""
data = []
header = ["Well name", "Average", "Median",
"Minimum", "Maximum", "RMSE ft", "Frequency"]
for obsname, meta_data in sorted(self.items()):
if self.__filter(obsname, filter):
continue
resid_mean = self.get_mean_residual(obsname)
resid_median = self.get_median_residual(obsname)
resid_max = self.get_maximum_residual(obsname)[-1]
resid_min = self.get_minimum_residual(obsname)[-1]
rmse = self.get_rmse(obsname)
frequency = self.get_number_observations(obsname)
data.append((obsname, resid_mean, resid_median,
resid_min, resid_max, rmse, frequency))
data = np.array(data, dtype=[('id', 'O'), ('mean', float),
('med', float), ('min', float),
('max', float), ('rmse', float),
('num', np.int)])
with open(csvname, "w") as foo:
foo.write(",".join(header) + "\n")
np.savetxt(foo, data, fmt="%15s,%.2f,%2f,%2f,%2f,%2f,%d")
def plot(self, obsname, *args, **kwargs):
"""
Plotting functionality from the hobs dictionary
Parameters
----------
obsname: str
hobs package observation name
*args: matplotlib args
**kwargs: matplotlib kwargs
Returns
-------
matplotlib.pyplot.axes object
"""
simulated = True
if "observed" in kwargs:
simulated = False
kwargs.pop('observed')
observed = True
if "simulated" in kwargs:
observed = False
kwargs.pop('simulated')
if obsname not in self:
raise AssertionError("Obsname {}: not valid".format(obsname))
axes = False
if 'ax' in kwargs:
ax = kwargs.pop('ax')
axes = True
if not axes:
ax = plt.subplot(111)
obsval = self[obsname]['obsval']
simval = self[obsname]['simval']
date = self[obsname]['date']
if observed:
kwargs['label'] = "Observed"
kwargs['color'] = 'r'
ax.plot(date, obsval, *args, **kwargs)
if simulated:
kwargs['label'] = "Simulated"
kwargs['color'] = 'b'
ax.plot(date, simval, *args, **kwargs)
return ax
def plot_measured_vs_simulated(self, filter=None, **kwargs):
"""
Plots measured vs. simulated data along a 1:1 profile.
Parameters
----------
filter: (str, list, tuple, or function)
filtering criteria for writing statistics.
Function must return True for filter out, false for write to file
**kwargs: matplotlib.pyplot plotting kwargs
Returns
-------
matplotlib.pyplot.axes object
"""
axes = plt.subplot(111)
for obsname, meta_data in self.items():
if self.__filter(obsname, filter):
continue
simulated = meta_data['simval']
observed = meta_data['obsval']
axes.plot(observed, simulated, 'bo', markeredgecolor='k')
return axes
def plot_simulated_vs_residual(self, filter=None,
histogram=False, **kwargs):
"""
Creates a matplotlib plot of simulated heads vs residual
Parameters
----------
filter: (str, list, tuple, or function)
filtering criteria for writing statistics.
Function must return True for filter out, false for write to file
histogram: (bool)
Boolean variable that defines either a scatter plot (False)
or a histogram (True) of residuals
**kwargs: matplotlib.pyplot plotting kwargs
Returns
-------
matplotlib.pyplot.axes object
"""
axes = plt.subplot(111)
if not histogram:
for obsname, meta_data in self.items():
if self.__filter(obsname, filter):
continue
residual = meta_data['residual']
observed = meta_data['obsval']
axes.plot(observed, residual, 'bo', markeredgecolor="k")
else:
bins = np.arange(-25, 26, 5)
d = {}
for ix, abin in enumerate(bins):
frequency = 0
for obsname, meta_data in self.items():
if self.__filter(obsname, filter):
continue
for residual in meta_data['residual']:
if ix == 0:
if residual < abin:
frequency += 1
elif ix == (len(bins) - 1):
if residual > abin:
frequency += 1
else:
if bins[ix - 1] <= residual < abin:
frequency += 1
if ix == 0:
name = "Less than {}".format(abin)
elif ix == (len(bins) - 1):
name = "Greater than {}".format(abin)
else:
name = "{} to {}".format(bins[ix - 1] + 1, abin)
d[ix + 1] = {'name': name,
'frequency': frequency}
tick_num = []
tick_name = []
for index, meta_data in sorted(d.items()):
axes.bar(index, meta_data['frequency'], width=0.8,
**kwargs)
tick_num.append(index)
tick_name.append(meta_data['name'])
plt.xticks(tick_num, tick_name, rotation=45, fontsize=10)
plt.xlim([0.5, len(tick_num) + 1])
plt.subplots_adjust(left=0.12, bottom=0.22,
right=0.90, top=0.90,
wspace=0.20, hspace=0.20)
plt.ylabel("Frequency")
return axes
if __name__ == "__main__":
ws = r'C:\Users\jlarsen\Desktop\Lucerne\Lucerne_OWHM\V0_initial_from_MODOPTIM\output'
hobs_name = "hobs.out"
tmp = HobsOut(os.path.join(ws, hobs_name))
tmp.plot("04N01W01R04S", "o-")
plt.legend(loc=0, numpoints=1)
plt.show()
print('break')
``` |
{
"source": "jlarsen-usgs/mf2web",
"score": 3
} |
#### File: mf2web/mf88/mfbas88.py
```python
from flopy.pakbase import Package
from flopy.utils import Util3d, Util2d
import numpy as np
import sys
class Modflow88Bas(Package):
"""
Class to read modflow88 Basic package input.
See modflow 88 documentation....
"""
def __init__(self, model, nlay=1, nrow=1, ncol=1, nper=1,
itemuni=0, iunit=(1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0),
iapart=0, istrt=0, ibound=1, hnoflo=-9999.,
shead=1., perlen=1., nstp=1, tsmult=1.,
start_datetime="1-1-1970"):
unitnumber = 99
filenames = [None]
name = [Modflow88Bas.ftype()]
units = [unitnumber]
extra = [""]
fname = [filenames[0]]
extension = "bas"
super(Modflow88Bas, self).__init__(model, extension=extension,
name=name, unit_number=units, extra=extra,
filenames=fname)
self.url = 'bas.htm'
self.nlay = nlay
self.nrow = nrow
self.ncol = ncol
self.nper = nper
self.itemuni = itemuni
self.lenuni = 0
self.iunit = iunit
self.iapart = iapart
self.istrt = istrt
self.ibound = Util3d(model, (nlay, nrow, ncol), np.int32, ibound,
name='ibound', locat=self.unit_number[0])
self.hnoflo = hnoflo
self.shead = Util3d(model, (nlay, nrow, ncol), np.float32, shead,
name='shead', locat=self.unit_number[0])
self.perlen = Util2d(model, (self.nper,), np.float32, perlen,
name="perlen")
self.nstp = Util2d(model, (self.nper,), np.int32, nstp,
name='nstp')
self.tsmult = Util2d(model, (self.nper,), np.float32, tsmult,
name='tsmult')
self.start_datetime = start_datetime
self.itmuni_dict = {0: "undefined", 1: "seconds", 2: "minutes",
3: "hours", 4: "days", 5: "years"}
self.parent.add_package(self)
@staticmethod
def load(f, model, ext_unit_dict=None):
"""
Load an existing package
Parameters
----------
f : filename or file handle
File to load.
model : model object
The model object (of type :class:`flopy.modflow.mf.Modflow`) to
which this package will be added.
ext_unit_dict : dict
Dictionary of unit and file names
Returns
-------
bas : Modflow88Bas object
Modflow88Bas object (of type :class:`mf2web.mf88.Modflow88Bas`)
"""
if model.verbose:
sys.stdout.write('loading bas6 package file...\n')
if not hasattr(f, 'read'):
filename = f
f = open(filename, 'r')
heading = f.readline()
heading += f.readline()
line = f.readline()[0:50]
nlay, nrow, ncol, nper, itemuni = [int(i) for i in line.split()]
line = f.readline()[0:72]
i0 = 0
i1 = 3
iunit = []
while True:
try:
unit = int(line[i0:i1])
if unit < 0:
unit = 0
iunit.append(unit)
i0 += 3
i1 += 3
except ValueError:
break
iunit = tuple(iunit)
line = f.readline()[0:20]
iapart, istrt = [int(i) for i in line.split()]
ibound = Util3d.load(f, model, (nlay, nrow, ncol), np.int32, "ibound",
ext_unit_dict)
hnoflo = float(f.readline()[0:10])
shead = Util3d.load(f, model, (nlay, nrow, ncol), np.float32, "shead",
ext_unit_dict)
perlen = []
nstp = []
tsmult = []
for k in range(nper):
line = f.readline()[0:30]
a1, a2, a3 = line.split()
a1 = float(a1)
a2 = int(a2)
a3 = float(a3)
perlen.append(a1)
nstp.append(a2)
tsmult.append(a3)
return Modflow88Bas(model, nlay, nrow, ncol, nper, itemuni,
iunit, iapart, istrt, ibound, hnoflo,
shead, perlen, nstp, tsmult)
@staticmethod
def ftype():
return "BAS"
```
#### File: mf2web/mf88/mfrch88.py
```python
from flopy.pakbase import Package
from flopy.utils import Util2d, Transient2d
import numpy as np
import sys
class Modflow88Rch(Package):
"""
Class to read modflow 88 recharge
package
see modflow 88 manual for documentation...
"""
def __init__(self, model, nrchop=1, irchcb=90,
rech=0., irch=0.):
unitnumber = 8
filenames = [None, None]
name = [Modflow88Rch.ftype()]
units = [unitnumber]
extra = [""]
fname = [filenames[0]]
extension = "rch"
super(Modflow88Rch, self).__init__(model, extension=extension,
name=name, unit_number=units, extra=extra,
filenames=fname)
nrow, ncol, nlay, nper = self.parent.nrow_ncol_nlay_nper
self.nrchop = nrchop
self.rech = Transient2d(model, (nrow, ncol), np.float32,
rech, name='rech')
self.irch = None
if self.nrchop == 2:
self.irch = Transient2d(model, (nrow, ncol), np.int32,
irch, name='irch')
self.parent.add_package(self)
@staticmethod
def load(f, model, nper=1, nrow=1, ncol=1, ext_unit_dict=None):
"""
Parameters
----------
f : str
filename
model : mf88 object
nper : int
number of stress periods
nrow : int
number of model rows
ncol : int
number of model columns
ext_unit_dict : dict
Dictionary of unit and file names
Returns
-------
Modflow88Rch object
"""
if model.verbose:
sys.stdout.write('loading bas6 package file...\n')
if not hasattr(f, 'read'):
filename = f
f = open(filename, 'r')
if model.nrow_ncol_nlay_nper != (0, 0, 0, 0):
nrow, ncol, nlay, nper = model.nrow_ncol_nlay_nper
t = f.readline().strip().split()
nrchop, irchcb = int(t[0]), int(t[1])
rech = {}
irch = {}
for per in range(nper):
t = f.readline().strip().split()
inrech, inirech = int(t[0]), int(t[1])
if inrech < 0:
rech[per] = rech[per - 1]
else:
arr = Util2d.load(f, model, (nrow, ncol), np.float32, 'rech',
ext_unit_dict)
rech[per] = arr
if nrchop == 2:
if inirech < 0:
irch[per] = irch[per - 1]
else:
arr = Util2d.load(f, model, (nrow, ncol), np.int32, "irch",
ext_unit_dict)
irch[per] = arr
return Modflow88Rch(model, nrchop, irchcb, rech, irch)
@staticmethod
def ftype():
return "RCH"
```
#### File: mf2web/mf88/mfsip88.py
```python
from flopy.pakbase import Package
import sys
class Modflow88Sip(Package):
"""
Class to read modflow88 Strongly implicit proceedure
package input.
See modflow 88 documentation....
"""
def __init__(self, model, mxiter=50, nparm=5, accl=1.,
hclose=0.01, ipcalc=1, wseed=0.98, iprsip=10):
unitnumber = 9
filenames = [None]
name = [Modflow88Sip.ftype()]
units = [unitnumber]
extra = [""]
fname = [filenames[0]]
extension = "sip"
super(Modflow88Sip, self).__init__(model, extension=extension,
name=name, unit_number=units, extra=extra,
filenames=fname)
self.mxiter = mxiter
self.nparm = nparm
self.accl = accl
self.hclose = hclose
self.ipcalc = ipcalc
self.wseed = wseed
self.iprsip = iprsip
self.parent.add_package(self)
@staticmethod
def load(f, model, ext_unit_dict=None):
"""
Load an existing package
Parameters
----------
f : filename or file handle
File to load.
model : model object
The model object (of type :class:`flopy.modflow.mf.Modflow`) to
which this package will be added.
ext_unit_dict : dict
Dictionary of unit and file names
Returns
-------
bas : Modflow88Sip object
Modflow88Sip object (of type :class:`mf2web.mf88.Modflow88Sip`)
"""
if model.verbose:
sys.stdout.write('loading bas6 package file...\n')
if not hasattr(f, 'read'):
filename = f
f = open(filename, 'r')
if model.nrow_ncol_nlay_nper != (0, 0, 0, 0):
nrow, ncol, nlay, nper = model.nrow_ncol_nlay_nper
t = f.readline().strip().split()
mxiter, nparm = int(t[0]), int(t[1])
t = f.readline().strip().split()
accl, hclose, ipcalc = float(t[0]), float(t[1]), int(t[2])
wseed = 0.98
iprsip = 10
if ipcalc == 0:
wseed, iprsip = float(t[3]), int(t[4])
return Modflow88Sip(model, mxiter, nparm, accl, hclose,
ipcalc, wseed, iprsip)
@staticmethod
def ftype():
return "SIP"
``` |
{
"source": "jlartey-aims/Resistivity",
"score": 2
} |
#### File: EM/NSEM/RxNSEM.py
```python
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from scipy.constants import mu_0
import SimPEG
import numpy as np
from SimPEG import mkvc
class BaseRxNSEM_Point(SimPEG.Survey.BaseRx):
"""
Natural source receiver base class.
Assumes that the data locations are xyz coordinates.
:param numpy.ndarray locs: receiver locations (ie. :code:`np.r_[x,y,z]`)
:param string orientation: receiver orientation 'x', 'y' or 'z'
:param string component: real or imaginary component 'real' or 'imag'
"""
def __init__(self, locs, orientation=None, component=None):
assert(orientation in ['xx', 'xy', 'yx', 'yy', 'zx', 'zy']), "Orientation {0!s} not known. Orientation must be in 'x', 'y', 'z'. Arbitrary orientations have not yet been implemented.".format(orientation)
assert(component in ['real', 'imag']), "'component' must be 'real' or 'imag', not {0!s}".format(component)
self.orientation = orientation
self.component = component
SimPEG.Survey.BaseRx.__init__(self, locs, rxType=None) # TODO: remove rxType from baseRx
# Set a mesh property
@property
def mesh(self):
return self._mesh
@mesh.setter
def mesh(self, value):
if value is getattr(self, '_mesh', None):
pass
else:
self._mesh = value
@property
def src(self):
return self._src
@src.setter
def src(self, value):
self._src = value
@property
def f(self):
return self._f
@f.setter
def f(self, value):
self._f = value
def _locs_e(self):
if self.locs.ndim == 3:
loc = self.locs[:, :, 0]
else:
loc = self.locs
return loc
def _locs_b(self):
if self.locs.ndim == 3:
loc = self.locs[:, :, 1]
else:
loc = self.locs
return loc
# Location projection
@property
def Pex(self):
if getattr(self, '_Pex', None) is None:
self._Pex = self._mesh.getInterpolationMat(self._locs_e(), 'Ex')
return self._Pex
@property
def Pey(self):
if getattr(self, '_Pey', None) is None:
self._Pey = self._mesh.getInterpolationMat(self._locs_e(), 'Ey')
return self._Pey
@property
def Pbx(self):
if getattr(self, '_Pbx', None) is None:
self._Pbx = self._mesh.getInterpolationMat(self._locs_b(), 'Fx')
return self._Pbx
@property
def Pby(self):
if getattr(self, '_Pby', None) is None:
self._Pby = self._mesh.getInterpolationMat(self._locs_b(), 'Fy')
return self._Pby
@property
def Pbz(self):
if getattr(self, '_Pbz', None) is None:
self._Pbz = self._mesh.getInterpolationMat(self._locs_e(), 'Fz')
return self._Pbz
# Utility for convienece
def _sDiag(self, t):
return SimPEG.Utils.sdiag(mkvc(t,2))
# Get the components of the fields
# px: x-polaration and py: y-polaration.
@property
def _ex_px(self):
return self.Pex*self.f[self.src, 'e_px']
@property
def _ey_px(self):
return self.Pey*self.f[self.src, 'e_px']
@property
def _ex_py(self):
return self.Pex*self.f[self.src, 'e_py']
@property
def _ey_py(self):
return self.Pey*self.f[self.src, 'e_py']
@property
def _hx_px(self):
return self.Pbx*self.f[self.src, 'b_px']/mu_0
@property
def _hy_px(self):
return self.Pby*self.f[self.src, 'b_px']/mu_0
@property
def _hz_px(self):
return self.Pbz*self.f[self.src, 'b_px']/mu_0
@property
def _hx_py(self):
return self.Pbx*self.f[self.src, 'b_py']/mu_0
@property
def _hy_py(self):
return self.Pby*self.f[self.src, 'b_py']/mu_0
@property
def _hz_py(self):
return self.Pbz*self.f[self.src, 'b_py']/mu_0
# Get the derivatives
def _ex_px_u(self, vec):
return self.Pex*self.f._e_pxDeriv_u(self.src, vec)
def _ey_px_u(self, vec):
return self.Pey*self.f._e_pxDeriv_u(self.src, vec)
def _ex_py_u(self, vec):
return self.Pex*self.f._e_pyDeriv_u(self.src, vec)
def _ey_py_u(self, vec):
return self.Pey*self.f._e_pyDeriv_u(self.src, vec)
def _hx_px_u(self, vec):
return self.Pbx*self.f._b_pxDeriv_u(self.src, vec)/mu_0
def _hy_px_u(self, vec):
return self.Pby*self.f._b_pxDeriv_u(self.src, vec)/mu_0
def _hz_px_u(self, vec):
return self.Pbz*self.f._b_pxDeriv_u(self.src, vec)/mu_0
def _hx_py_u(self, vec):
return self.Pbx*self.f._b_pyDeriv_u(self.src, vec)/mu_0
def _hy_py_u(self, vec):
return self.Pby*self.f._b_pyDeriv_u(self.src, vec)/mu_0
def _hz_py_u(self, vec):
return self.Pbz*self.f._b_pyDeriv_u(self.src, vec)/mu_0
# Define the components of the derivative
@property
def _Hd(self):
return self._sDiag(1./(
self._sDiag(self._hx_px)*self._hy_py -
self._sDiag(self._hx_py)*self._hy_px
))
def _Hd_uV(self, v):
return (
self._sDiag(self._hy_py)*self._hx_px_u(v) +
self._sDiag(self._hx_px)*self._hy_py_u(v) -
self._sDiag(self._hx_py)*self._hy_px_u(v) -
self._sDiag(self._hy_px)*self._hx_py_u(v)
)
# Adjoint
@property
def _aex_px(self):
return mkvc(mkvc(self.f[self.src, 'e_px'], 2).T*self.Pex.T)
@property
def _aey_px(self):
return mkvc(mkvc(self.f[self.src, 'e_px'], 2).T*self.Pey.T)
@property
def _aex_py(self):
return mkvc(mkvc(self.f[self.src, 'e_py'], 2).T*self.Pex.T)
@property
def _aey_py(self):
return mkvc(mkvc(self.f[self.src, 'e_py'], 2).T*self.Pey.T)
@property
def _ahx_px(self):
return mkvc(mkvc(self.f[self.src, 'b_px'], 2).T/mu_0*self.Pbx.T)
@property
def _ahy_px(self):
return mkvc(mkvc(self.f[self.src, 'b_px'], 2).T/mu_0*self.Pby.T)
@property
def _ahz_px(self):
return mkvc(mkvc(self.f[self.src, 'b_px'], 2).T/mu_0*self.Pbz.T)
@property
def _ahx_py(self):
return mkvc(mkvc(self.f[self.src, 'b_py'], 2).T/mu_0*self.Pbx.T)
@property
def _ahy_py(self):
return mkvc(mkvc(self.f[self.src, 'b_py'], 2).T/mu_0*self.Pby.T)
@property
def _ahz_py(self):
return mkvc(mkvc(self.f[self.src, 'b_py'], 2).T/mu_0*self.Pbz.T)
# NOTE: need to add a .T at the end for the output to be (nU,)
def _aex_px_u(self, vec):
"""
"""
# vec is (nD,) and returns a (nU,)
return self.f._e_pxDeriv_u(self.src, self.Pex.T*mkvc(vec,), adjoint=True)
def _aey_px_u(self, vec):
"""
"""
# vec is (nD,) and returns a (nU,)
return self.f._e_pxDeriv_u(self.src, self.Pey.T*mkvc(vec,), adjoint=True)
def _aex_py_u(self, vec):
"""
"""
# vec is (nD,) and returns a (nU,)
return self.f._e_pyDeriv_u(self.src, self.Pex.T*mkvc(vec,), adjoint=True)
def _aey_py_u(self, vec):
"""
"""
# vec is (nD,) and returns a (nU,)
return self.f._e_pyDeriv_u(self.src, self.Pey.T*mkvc(vec,), adjoint=True)
def _ahx_px_u(self, vec):
"""
"""
# vec is (nD,) and returns a (nU,)
return self.f._b_pxDeriv_u(self.src, self.Pbx.T*mkvc(vec,), adjoint=True)/mu_0
def _ahy_px_u(self, vec):
"""
"""
# vec is (nD,) and returns a (nU,)
return self.f._b_pxDeriv_u(self.src, self.Pby.T*mkvc(vec,), adjoint=True)/mu_0
def _ahz_px_u(self, vec):
"""
"""
# vec is (nD,) and returns a (nU,)
return self.f._b_pxDeriv_u(self.src, self.Pbz.T*mkvc(vec,), adjoint=True)/mu_0
def _ahx_py_u(self, vec):
"""
"""
# vec is (nD,) and returns a (nU,)
return self.f._b_pyDeriv_u(self.src, self.Pbx.T*mkvc(vec,), adjoint=True)/mu_0
def _ahy_py_u(self, vec):
"""
"""
# vec is (nD,) and returns a (nU,)
return self.f._b_pyDeriv_u(self.src, self.Pby.T*mkvc(vec,), adjoint=True)/mu_0
def _ahz_py_u(self, vec):
"""
"""
# vec is (nD,) and returns a (nU,)
return self.f._b_pyDeriv_u(self.src, self.Pbz.T*mkvc(vec,), adjoint=True)/mu_0
# Define the components of the derivative
@property
def _aHd(self):
return self._sDiag(1./(
self._sDiag(self._ahx_px)*self._ahy_py -
self._sDiag(self._ahx_py)*self._ahy_px
))
def _aHd_uV(self, x):
return (
self._ahx_px_u(self._sDiag(self._ahy_py)*x) +
self._ahx_px_u(self._sDiag(self._ahy_py)*x) -
self._ahy_px_u(self._sDiag(self._ahx_py)*x) -
self._ahx_py_u(self._sDiag(self._ahy_px)*x)
)
def eval(self, src, mesh, f, return_complex=False):
"""
Function to evaluate datum for this receiver
"""
raise NotImplementedError('SimPEG.EM.NSEM receiver has to have an eval method')
def evalDeriv(self, src, mesh, f, v, adjoint=False):
"""
Function to evaluate datum for this receiver
"""
raise NotImplementedError('SimPEG.EM.NSEM receiver has to have an evalDeriv method')
class Point_impedance1D(SimPEG.Survey.BaseRx):
"""
Natural source 1D impedance receiver class
:param string component: real or imaginary component 'real' or 'imag'
"""
orientation = 'yx'
def __init__(self, locs, component=None):
assert(component in ['real', 'imag']), "'component' must be 'real' or 'imag', not {0!s}".format(component)
self.component = component
SimPEG.Survey.BaseRx.__init__(self, locs, rxType=None)
@property
def mesh(self):
return self._mesh
@mesh.setter
def mesh(self, value):
if value is getattr(self, '_mesh', None):
pass
else:
self._mesh = value
# Utility for convienece
def _sDiag(self, t):
return SimPEG.Utils.sdiag(mkvc(t, 2))
@property
def src(self):
return self._src
@src.setter
def src(self, value):
self._src = value
@property
def f(self):
return self._f
@f.setter
def f(self, value):
self._f = value
@property
def Pex(self):
if getattr(self, '_Pex', None) is None:
self._Pex = self._mesh.getInterpolationMat(self.locs[:, -1], 'Fx')
return self._Pex
@property
def Pbx(self):
if getattr(self, '_Pbx', None) is None:
self._Pbx = self._mesh.getInterpolationMat(self.locs[:, -1], 'Ex')
return self._Pbx
@property
def _ex(self):
return self.Pex * mkvc(self.f[self.src, 'e_1d'], 2)
@property
def _hx(self):
return self.Pbx * mkvc(self.f[self.src, 'b_1d'], 2) / mu_0
def _ex_u(self, v):
return self.Pex * self.f._eDeriv_u(self.src, v)
def _hx_u(self, v):
return self.Pbx * self.f._bDeriv_u(self.src, v) / mu_0
def _aex_u(self, v):
return self.f._eDeriv_u(self.src, self.Pex.T * v, adjoint=True)
def _ahx_u(self, v):
return self.f._bDeriv_u(self.src, self.Pbx.T * v, adjoint=True) / mu_0
@property
def _Hd(self):
return self._sDiag(1./self._hx)
def eval(self, src, mesh, f, return_complex=False):
'''
Project the fields to natural source data.
:param SimPEG.EM.NSEM.SrcNSEM src: NSEM source
:param discretize.TensorMesh mesh: Mesh defining the topology of the problem
:param SimPEG.EM.NSEM.FieldsNSEM f: NSEM fields object of the source
:param bool (optional) return_complex: Flag for return the complex evaluation
:rtype: numpy.array
:return: Evaluated data for the receiver
'''
# NOTE: Maybe set this as a property
self.src = src
self.mesh = mesh
self.f = f
rx_eval_complex = -self._Hd * self._ex
# Return the full impedance
if return_complex:
return rx_eval_complex
return getattr(rx_eval_complex, self.component)
def evalDeriv(self, src, mesh, f, v, adjoint=False):
"""method evalDeriv
The derivative of the projection wrt u
:param SimPEG.EM.NSEM.SrcNSEM src: NSEM source
:param discretize.TensorMesh mesh: Mesh defining the topology of the problem
:param SimPEG.EM.NSEM.FieldsNSEM f: NSEM fields object of the source
:param numpy.ndarray v: vector of size (nU,) (adjoint=False) and size (nD,) (adjoint=True)
:rtype: numpy.array
:return: Calculated derivative (nD,) (adjoint=False) and (nP,2) (adjoint=True) for both polarizations
"""
self.src = src
self.mesh = mesh
self.f = f
if adjoint:
Z1d = self.eval(src, mesh, f, True)
def aZ_N_uV(x):
return -self._aex_u(x)
def aZ_D_uV(x):
return self._ahx_u(x)
rx_deriv = aZ_N_uV(self._Hd.T * v) - aZ_D_uV(self._sDiag(Z1d).T * self._Hd.T * v)
if self.component == 'imag':
rx_deriv_component = 1j*rx_deriv
elif self.component == 'real':
rx_deriv_component = rx_deriv.astype(complex)
else:
Z1d = self.eval(src, mesh, f, True)
Z_N_uV = -self._ex_u(v)
Z_D_uV = self._hx_u(v)
# Evaluate
rx_deriv = self._Hd * (Z_N_uV - self._sDiag(Z1d) * Z_D_uV)
rx_deriv_component = np.array(getattr(rx_deriv, self.component))
return rx_deriv_component
class Point_impedance3D(BaseRxNSEM_Point):
"""
Natural source 3D impedance receiver class
:param numpy.ndarray locs: receiver locations (ie. :code:`np.r_[x,y,z]`)
:param string orientation: receiver orientation 'xx', 'xy', 'yx' or 'yy'
:param string component: real or imaginary component 'real' or 'imag'
"""
def __init__(self, locs, orientation=None, component=None):
BaseRxNSEM_Point.__init__(self, locs, orientation=orientation, component=component)
def eval(self, src, mesh, f, return_complex=False):
'''
Project the fields to natural source data.
:param SrcNSEM src: The source of the fields to project
:param discretize.TensorMesh mesh: topological mesh corresponding to the fields
:param FieldsNSEM f: Natural source fields object to project
:rtype: numpy.array
:return: component of the impedance evaluation
'''
# NOTE: Maybe set this as a property
self.src = src
self.mesh = mesh
self.f = f
if 'xx' in self.orientation:
Zij = ( self._ex_px * self._hy_py - self._ex_py * self._hy_px)
elif 'xy' in self.orientation:
Zij = (-self._ex_px * self._hx_py + self._ex_py * self._hx_px)
elif 'yx' in self.orientation:
Zij = ( self._ey_px * self._hy_py - self._ey_py * self._hy_px)
elif 'yy' in self.orientation:
Zij = (-self._ey_px * self._hx_py + self._ey_py * self._hx_px)
# Calculate the complex value
rx_eval_complex = self._Hd * Zij
# Return the full impedance
if return_complex:
return rx_eval_complex
return getattr(rx_eval_complex, self.component)
def evalDeriv(self, src, mesh, f, v, adjoint=False):
"""
The derivative of the projection wrt u
:param SimPEG.EM.NSEM.SrcNSEM src: NSEM source
:param discretize.TensorMesh mesh: Mesh defining the topology of the problem
:param SimPEG.EM.NSEM.FieldsNSEM f: NSEM fields object of the source
:param numpy.ndarray v: vector of size (nU,) (adjoint=False) and size (nD,) (adjoint=True)
:rtype: numpy.array
:return: Calculated derivative (nD,) (adjoint=False) and (nP,2) (adjoint=True) for both polarizations
"""
self.src = src
self.mesh = mesh
self.f = f
if adjoint:
if 'xx' in self.orientation:
Zij = self._sDiag(self._aHd * (
self._sDiag(self._ahy_py)*self._aex_px -
self._sDiag(self._ahy_px)*self._aex_py
))
def ZijN_uV(x):
return (
self._aex_px_u(self._sDiag(self._ahy_py) * x) +
self._ahy_py_u(self._sDiag(self._aex_px) * x) -
self._ahy_px_u(self._sDiag(self._aex_py) * x) -
self._aex_py_u(self._sDiag(self._ahy_px) * x)
)
elif 'xy' in self.orientation:
Zij = self._sDiag(self._aHd * (
-self._sDiag(self._ahx_py) * self._aex_px +
self._sDiag(self._ahx_px) * self._aex_py
))
def ZijN_uV(x):
return (
-self._aex_px_u(self._sDiag(self._ahx_py) * x) -
self._ahx_py_u(self._sDiag(self._aex_px) * x) +
self._ahx_px_u(self._sDiag(self._aex_py) * x) +
self._aex_py_u(self._sDiag(self._ahx_px) * x)
)
elif 'yx' in self.orientation:
Zij = self._sDiag(self._aHd * (
self._sDiag(self._ahy_py) * self._aey_px -
self._sDiag(self._ahy_px) * self._aey_py
))
def ZijN_uV(x):
return (
self._aey_px_u(self._sDiag(self._ahy_py) * x) +
self._ahy_py_u(self._sDiag(self._aey_px) * x) -
self._ahy_px_u(self._sDiag(self._aey_py) * x) -
self._aey_py_u(self._sDiag(self._ahy_px) * x)
)
elif 'yy' in self.orientation:
Zij = self._sDiag(self._aHd * (
-self._sDiag(self._ahx_py) * self._aey_px +
self._sDiag(self._ahx_px) * self._aey_py))
def ZijN_uV(x):
return (
-self._aey_px_u(self._sDiag(self._ahx_py) * x) -
self._ahx_py_u(self._sDiag(self._aey_px) * x) +
self._ahx_px_u(self._sDiag(self._aey_py) * x) +
self._aey_py_u(self._sDiag(self._ahx_px) * x)
)
# Calculate the complex derivative
rx_deriv_real = ZijN_uV(self._aHd * v) - self._aHd_uV(Zij.T * self._aHd * v)
# NOTE: Need to reshape the output to go from 2*nU array to a (nU,2) matrix for each polarization
# rx_deriv_real = np.hstack((mkvc(rx_deriv_real[:len(rx_deriv_real)/2],2),mkvc(rx_deriv_real[len(rx_deriv_real)/2::],2)))
rx_deriv_real = rx_deriv_real.reshape((2, self.mesh.nE)).T
# Extract the data
if self.component == 'imag':
rx_deriv_component = 1j * rx_deriv_real
elif self.component == 'real':
rx_deriv_component = rx_deriv_real.astype(complex)
else:
if 'xx' in self.orientation:
ZijN_uV = (
self._sDiag(self._hy_py) * self._ex_px_u(v) +
self._sDiag(self._ex_px) * self._hy_py_u(v) -
self._sDiag(self._ex_py) * self._hy_px_u(v) -
self._sDiag(self._hy_px) * self._ex_py_u(v)
)
elif 'xy' in self.orientation:
ZijN_uV = (
-self._sDiag(self._hx_py) * self._ex_px_u(v) -
self._sDiag(self._ex_px) * self._hx_py_u(v) +
self._sDiag(self._ex_py) * self._hx_px_u(v) +
self._sDiag(self._hx_px) * self._ex_py_u(v)
)
elif 'yx' in self.orientation:
ZijN_uV = (
self._sDiag(self._hy_py) * self._ey_px_u(v) +
self._sDiag(self._ey_px) * self._hy_py_u(v) -
self._sDiag(self._ey_py) * self._hy_px_u(v) -
self._sDiag(self._hy_px) * self._ey_py_u(v)
)
elif 'yy' in self.orientation:
ZijN_uV = (
-self._sDiag(self._hx_py) * self._ey_px_u(v) -
self._sDiag(self._ey_px) * self._hx_py_u(v) +
self._sDiag(self._ey_py) * self._hx_px_u(v) +
self._sDiag(self._hx_px) * self._ey_py_u(v)
)
Zij = self.eval(src, self.mesh, self.f, True)
# Calculate the complex derivative
rx_deriv_real = self._Hd * (ZijN_uV - self._sDiag(Zij) * self._Hd_uV(v))
rx_deriv_component = np.array(getattr(rx_deriv_real, self.component))
return rx_deriv_component
class Point_tipper3D(BaseRxNSEM_Point):
"""
Natural source 3D tipper receiver base class
:param numpy.ndarray locs: receiver locations (ie. :code:`np.r_[x,y,z]`)
:param string orientation: receiver orientation 'x', 'y' or 'z'
:param string component: real or imaginary component 'real' or 'imag'
"""
def __init__(self, locs, orientation=None, component=None):
BaseRxNSEM_Point.__init__(
self, locs, orientation=orientation, component=component
)
def eval(self, src, mesh, f, return_complex=False):
'''
Project the fields to natural source data.
:param SrcNSEM src: The source of the fields to project
:param discretize.TensorMesh mesh: Mesh defining the topology of the problem
:param FieldsNSEM f: Natural source fields object to project
:rtype: numpy.array
:return: Evaluated component of the impedance data
'''
# NOTE: Maybe set this as a property
self.src = src
self.mesh = mesh
self.f = f
if 'zx' in self.orientation:
Tij = (- self._hy_px * self._hz_py + self._hy_py * self._hz_px)
if 'zy' in self.orientation:
Tij = (self._hx_px * self._hz_py - self._hx_py * self._hz_px)
rx_eval_complex = self._Hd * Tij
# Return the full impedance
if return_complex:
return rx_eval_complex
return getattr(rx_eval_complex, self.component)
def evalDeriv(self, src, mesh, f, v, adjoint=False):
"""
The derivative of the projection wrt u
:param SimPEG.EM.NSEM.SrcNSEM src: NSEM source
:param discretize.TensorMesh mesh: Mesh defining the topology of the problem
:param SimPEG.EM.NSEM.FieldsNSEM f: NSEM fields object of the source
:param numpy.ndarray v: Random vector of size
:rtype: numpy.array
:return: Calculated derivative (nD,) (adjoint=False) and (nP,2) (adjoint=True)
for both polarizations
"""
self.src = src
self.mesh = mesh
self.f = f
if adjoint:
if 'zx' in self.orientation:
Tij = self._sDiag(self._aHd * (
-self._sDiag(self._ahz_py) * self._ahy_px +
self._sDiag(self._ahz_px) * self._ahy_py)
)
def TijN_uV(x):
return (
-self._ahz_py_u(self._sDiag(self._ahy_px) * x) -
self._ahy_px_u(self._sDiag(self._ahz_py) * x) +
self._ahy_py_u(self._sDiag(self._ahz_px) * x) +
self._ahz_px_u(self._sDiag(self._ahy_py) * x)
)
elif 'zy' in self.orientation:
Tij = self._sDiag(self._aHd * (
self._sDiag(self._ahz_py) * self._ahx_px -
self._sDiag(self._ahz_px) * self._ahx_py)
)
def TijN_uV(x):
return (
self._ahx_px_u(self._sDiag(self._ahz_py) * x) +
self._ahz_py_u(self._sDiag(self._ahx_px) * x) -
self._ahx_py_u(self._sDiag(self._ahz_px) * x) -
self._ahz_px_u(self._sDiag(self._ahx_py) * x)
)
# Calculate the complex derivative
rx_deriv_real = (
TijN_uV(self._aHd * v) -
self._aHd_uV(Tij.T * self._aHd * v)
)
# NOTE: Need to reshape the output to go from 2*nU array to a (nU,2) matrix for each polarization
# rx_deriv_real = np.hstack((mkvc(rx_deriv_real[:len(rx_deriv_real)/2],2),mkvc(rx_deriv_real[len(rx_deriv_real)/2::],2)))
rx_deriv_real = rx_deriv_real.reshape((2, self.mesh.nE)).T
# Extract the data
if self.component == 'imag':
rx_deriv_component = 1j * rx_deriv_real
elif self.component == 'real':
rx_deriv_component = rx_deriv_real.astype(complex)
else:
if 'zx' in self.orientation:
TijN_uV = (
-self._sDiag(self._hy_px) * self._hz_py_u(v) -
self._sDiag(self._hz_py) * self._hy_px_u(v) +
self._sDiag(self._hy_py) * self._hz_px_u(v) +
self._sDiag(self._hz_px) * self._hy_py_u(v)
)
elif 'zy' in self.orientation:
TijN_uV = (
self._sDiag(self._hz_py) * self._hx_px_u(v) +
self._sDiag(self._hx_px) * self._hz_py_u(v) -
self._sDiag(self._hx_py) * self._hz_px_u(v) -
self._sDiag(self._hz_px) * self._hx_py_u(v)
)
Tij = self.eval(src, mesh, f, True)
# Calculate the complex derivative
rx_deriv_complex = (
self._Hd * (TijN_uV - self._sDiag(Tij) * self._Hd_uV(v))
)
rx_deriv_component = np.array(
getattr(rx_deriv_complex, self.component)
)
return rx_deriv_component
```
#### File: Resistivity/SimPEG/Optimization.py
```python
from __future__ import print_function
import numpy as np
import scipy.sparse as sp
from six import string_types
from .Utils.SolverUtils import *
from . import Utils
norm = np.linalg.norm
__all__ = [
'Minimize', 'Remember', 'SteepestDescent', 'BFGS', 'GaussNewton',
'InexactGaussNewton', 'ProjectedGradient', 'NewtonRoot',
'StoppingCriteria', 'IterationPrinters'
]
SolverICG = SolverWrapI(sp.linalg.cg, checkAccuracy=False)
class StoppingCriteria(object):
"""docstring for StoppingCriteria"""
iteration = {
"str": "%d : maxIter = %3d <= iter = %3d",
"left": lambda M: M.maxIter, "right": lambda M: M.iter,
"stopType": "critical"
}
iterationLS = {
"str": "%d : maxIterLS = %3d <= iterLS = %3d",
"left": lambda M: M.maxIterLS, "right": lambda M: M.iterLS,
"stopType": "critical"
}
armijoGoldstein = {
"str": "%d : ft = %1.4e <= alp*descent = %1.4e",
"left": lambda M: M._LS_ft,
"right": lambda M: M.f + M.LSreduction * M._LS_descent,
"stopType": "optimal"
}
tolerance_f = {
"str": "%d : |fc-fOld| = %1.4e <= tolF*(1+|f0|) = %1.4e",
"left": lambda M: 1 if M.iter==0 else abs(M.f-M.f_last),
"right": lambda M: 0 if M.iter==0 else M.tolF*(1+abs(M.f0)),
"stopType": "optimal"
}
moving_x = {
"str": "%d : |xc-x_last| = %1.4e <= tolX*(1+|x0|) = %1.4e",
"left": lambda M: 1 if M.iter==0 else norm(M.xc-M.x_last),
"right": lambda M: 0 if M.iter==0 else M.tolX*(1+norm(M.x0)),
"stopType": "optimal"
}
tolerance_g = {
"str": "%d : |proj(x-g)-x| = %1.4e <= tolG = %1.4e",
"left": lambda M: norm(M.projection(M.xc - M.g) - M.xc),
"right": lambda M: M.tolG,
"stopType": "optimal"
}
norm_g = {
"str": "%d : |proj(x-g)-x| = %1.4e <= 1e3*eps = %1.4e",
"left": lambda M: norm(M.projection(M.xc - M.g) - M.xc),
"right": lambda M: 1e3*M.eps,
"stopType": "critical"
}
bindingSet = {
"str": "%d : probSize = %3d <= bindingSet = %3d",
"left": lambda M: M.xc.size,
"right": lambda M: np.sum(M.bindingSet(M.xc)),
"stopType": "critical"
}
bindingSet_LS = {
"str": "%d : probSize = %3d <= bindingSet = %3d",
"left": lambda M: M._LS_xt.size,
"right": lambda M: np.sum(M.bindingSet(M._LS_xt)),
"stopType": "critical"
}
phi_d_target_Minimize = {
"str": "%d : phi_d = %1.4e <= phi_d_target = %1.4e ",
"left": lambda M: M.parent.phi_d,
"right": lambda M: M.parent.phi_d_target,
"stopType": "critical"
}
phi_d_target_Inversion = {
"str": "%d : phi_d = %1.4e <= phi_d_target = %1.4e ",
"left": lambda I: I.phi_d, "right": lambda I: I.phi_d_target,
"stopType": "critical"
}
class IterationPrinters(object):
"""docstring for IterationPrinters"""
iteration = {
"title": "#", "value": lambda M: M.iter, "width": 5, "format": "%3d"
}
f = {
"title": "f", "value": lambda M: M.f, "width": 10, "format": "%1.2e"
}
norm_g = {
"title": "|proj(x-g)-x|",
"value": lambda M: norm(M.projection(M.xc - M.g) - M.xc),
"width": 15, "format": "%1.2e"
}
totalLS = {
"title": "LS", "value": lambda M: M.iterLS, "width": 5, "format": "%d"
}
iterationLS = {
"title": "#", "value": lambda M: (M.iter, M.iterLS), "width": 5,
"format": "%3d.%d"
}
LS_ft = {
"title": "ft", "value": lambda M: M._LS_ft, "width": 10,
"format": "%1.2e"
}
LS_t = {
"title": "t", "value": lambda M: M._LS_t, "width": 10,
"format": "%0.5f"
}
LS_armijoGoldstein = {
"title": "f + alp*g.T*p",
"value": lambda M: M.f + M.LSreduction*M._LS_descent, "width": 16,
"format": "%1.2e"
}
itType = {
"title": "itType", "value": lambda M: M._itType, "width": 8,
"format": "%s"
}
aSet = {
"title": "aSet", "value": lambda M: np.sum(M.activeSet(M.xc)),
"width": 8, "format": "%d"
}
bSet = {
"title": "bSet", "value": lambda M: np.sum(M.bindingSet(M.xc)),
"width": 8, "format": "%d"
}
comment = {
"title": "Comment", "value": lambda M: M.comment, "width": 12,
"format": "%s"
}
beta = {
"title": "beta", "value": lambda M: M.parent.beta, "width": 10,
"format": "%1.2e"
}
phi_d = {
"title": "phi_d", "value": lambda M: M.parent.phi_d, "width": 10,
"format": "%1.2e"
}
phi_m = {
"title": "phi_m", "value": lambda M: M.parent.phi_m, "width": 10,
"format": "%1.2e"
}
class Minimize(object):
"""
Minimize is a general class for derivative based optimization.
"""
name = "General Optimization Algorithm" #: The name of the optimization algorithm
maxIter = 20 #: Maximum number of iterations
maxIterLS = 10 #: Maximum number of iterations for the line-search
maxStep = np.inf #: Maximum step possible, used in scaling before the line-search.
LSreduction = 1e-4 #: Expected decrease in the line-search
LSshorten = 0.5 #: Line-search step is shortened by this amount each time.
tolF = 1e-1 #: Tolerance on function value decrease
tolX = 1e-1 #: Tolerance on norm(x) movement
tolG = 1e-1 #: Tolerance on gradient norm
eps = 1e-5 #: Small value
stopNextIteration = False #: Stops the optimization program nicely.
debug = False #: Print debugging information
debugLS = False #: Print debugging information for the line-search
comment = '' #: Used by some functions to indicate what is going on in the algorithm
counter = None #: Set this to a SimPEG.Utils.Counter() if you want to count things
parent = None #: This is the parent of the optimization routine.
def __init__(self, **kwargs):
self.stoppers = [
StoppingCriteria.tolerance_f, StoppingCriteria.moving_x,
StoppingCriteria.tolerance_g, StoppingCriteria.norm_g,
StoppingCriteria.iteration
]
self.stoppersLS = [
StoppingCriteria.armijoGoldstein, StoppingCriteria.iterationLS
]
self.printers = [
IterationPrinters.iteration, IterationPrinters.f,
IterationPrinters.norm_g, IterationPrinters.totalLS
]
self.printersLS = [
IterationPrinters.iterationLS, IterationPrinters.LS_ft,
IterationPrinters.LS_t, IterationPrinters.LS_armijoGoldstein
]
Utils.setKwargs(self, **kwargs)
@property
def callback(self):
return getattr(self, '_callback', None)
@callback.setter
def callback(self, value):
if self.callback is not None:
print(
'The callback on the {0!s} Optimization was '
'replaced.'.format(self.__name__)
)
self._callback = value
@Utils.timeIt
def minimize(self, evalFunction, x0):
"""minimize(evalFunction, x0)
Minimizes the function (evalFunction) starting at the location x0.
:param callable evalFunction: function handle that evaluates: f, g, H = F(x)
:param numpy.ndarray x0: starting location
:rtype: numpy.ndarray
:return: x, the last iterate of the optimization algorithm
evalFunction is a function handle::
(f[, g][, H]) = evalFunction(x, return_g=False, return_H=False )
def evalFunction(x, return_g=False, return_H=False):
out = (f,)
if return_g:
out += (g,)
if return_H:
out += (H,)
return out if len(out) > 1 else out[0]
The algorithm for general minimization is as follows::
startup(x0)
printInit()
while True:
doStartIteration()
f, g, H = evalFunction(xc)
printIter()
if stoppingCriteria(): break
p = findSearchDirection()
p = scaleSearchDirection(p)
xt, passLS = modifySearchDirection(p)
if not passLS:
xt, caught = modifySearchDirectionBreak(p)
if not caught: return xc
doEndIteration(xt)
printDone()
finish()
return xc
"""
self.evalFunction = evalFunction
self.startup(x0)
self.printInit()
print('x0 has any nan: {:b}'.format(np.any(np.isnan(x0))))
while True:
self.doStartIteration()
self.f, self.g, self.H = evalFunction(
self.xc, return_g=True, return_H=True
)
self.printIter()
if self.stoppingCriteria():
break
self.searchDirection = self.findSearchDirection()
del self.H #: Doing this saves memory, as it is not needed in the rest of the computations.
p = self.scaleSearchDirection(self.searchDirection)
xt, passLS = self.modifySearchDirection(p)
if not passLS:
xt, caught = self.modifySearchDirectionBreak(p)
if not caught:
return self.xc
self.doEndIteration(xt)
if self.stopNextIteration:
break
self.printDone()
self.finish()
return self.xc
@Utils.callHooks('startup')
def startup(self, x0):
"""
**startup** is called at the start of any new minimize call.
This will set::
x0 = x0
xc = x0
iter = iterLS = 0
:param numpy.ndarray x0: initial x
:rtype: None
:return: None
"""
self.iter = 0
self.iterLS = 0
self.stopNextIteration = False
x0 = self.projection(x0) # ensure that we start of feasible.
self.x0 = x0
self.xc = x0
self.f_last = np.nan
self.x_last = x0
@Utils.count
@Utils.callHooks('doStartIteration')
def doStartIteration(self):
"""doStartIteration()
**doStartIteration** is called at the start of each minimize
iteration.
:rtype: None
:return: None
"""
pass
def printInit(self, inLS=False):
"""
**printInit** is called at the beginning of the optimization
routine.
If there is a parent object, printInit will check for a
parent.printInit function and call that.
"""
pad = ' '*10 if inLS else ''
name = self.name if not inLS else self.nameLS
Utils.printTitles(
self, self.printers if not inLS else self.printersLS, name, pad
)
@Utils.callHooks('printIter')
def printIter(self, inLS=False):
"""
**printIter** is called directly after function evaluations.
If there is a parent object, printIter will check for a
parent.printIter function and call that.
"""
pad = ' '*10 if inLS else ''
Utils.printLine(
self, self.printers if not inLS else self.printersLS, pad=pad
)
def printDone(self, inLS=False):
"""
**printDone** is called at the end of the optimization routine.
If there is a parent object, printDone will check for a
parent.printDone function and call that.
"""
pad = ' '*10 if inLS else ''
stop, done = (
(' STOP! ', ' DONE! ') if not inLS else
('----------------', ' End Linesearch ')
)
stoppers = self.stoppers if not inLS else self.stoppersLS
Utils.printStoppers(self, stoppers, pad='', stop=stop, done=done)
@Utils.callHooks('finish')
def finish(self):
"""finish()
**finish** is called at the end of the optimization.
:rtype: None
:return: None
"""
pass
def stoppingCriteria(self, inLS=False):
if self.iter == 0:
self.f0 = self.f
self.g0 = self.g
return Utils.checkStoppers(
self, self.stoppers if not inLS else self.stoppersLS
)
@Utils.timeIt
@Utils.callHooks('projection')
def projection(self, p):
"""projection(p)
projects the search direction.
by default, no projection is applied.
:param numpy.ndarray p: searchDirection
:rtype: numpy.ndarray
:return: p, projected search direction
"""
return p
@Utils.timeIt
def findSearchDirection(self):
"""findSearchDirection()
**findSearchDirection** should return an approximation of:
.. math::
H p = - g
Where you are solving for the search direction, p
The default is:
.. math::
H = I
p = - g
And corresponds to SteepestDescent.
The latest function evaluations are present in::
self.f, self.g, self.H
:rtype: numpy.ndarray
:return: p, Search Direction
"""
return -self.g
@Utils.count
def scaleSearchDirection(self, p):
"""scaleSearchDirection(p)
**scaleSearchDirection** should scale the search direction if
appropriate.
Set the parameter **maxStep** in the minimize object, to scale back
the gradient to a maximum size.
:param numpy.ndarray p: searchDirection
:rtype: numpy.ndarray
:return: p, Scaled Search Direction
"""
if self.maxStep < np.abs(p.max()):
p = self.maxStep*p/np.abs(p.max())
return p
nameLS = "Armijo linesearch" #: The line-search name
@Utils.timeIt
def modifySearchDirection(self, p):
"""modifySearchDirection(p)
**modifySearchDirection** changes the search direction based on
some sort of linesearch or trust-region criteria.
By default, an Armijo backtracking linesearch is preformed with the
following parameters:
* maxIterLS, the maximum number of linesearch iterations
* LSreduction, the expected reduction expected, default: 1e-4
* LSshorten, how much the step is reduced, default: 0.5
If the linesearch is completed, and a descent direction is found,
passLS is returned as True.
Else, a modifySearchDirectionBreak call is preformed.
:param numpy.ndarray p: searchDirection
:rtype: tuple
:return: (xt, passLS) numpy.ndarray, bool
"""
# Projected Armijo linesearch
self._LS_t = 1
self.iterLS = 0
while self.iterLS < self.maxIterLS:
self._LS_xt = self.projection(self.xc + self._LS_t*p)
self._LS_ft = self.evalFunction(
self._LS_xt, return_g=False, return_H=False
)
self._LS_descent = np.inner(self.g, self._LS_xt - self.xc) # this takes into account multiplying by t, but is important for projection.
if self.stoppingCriteria(inLS=True):
break
self.iterLS += 1
self._LS_t = self.LSshorten*self._LS_t
if self.debugLS:
if self.iterLS == 1: self.printInit(inLS=True)
self.printIter(inLS=True)
if self.debugLS and self.iterLS > 0:
self.printDone(inLS=True)
return self._LS_xt, self.iterLS < self.maxIterLS
@Utils.count
def modifySearchDirectionBreak(self, p):
"""modifySearchDirectionBreak(p)
Code is called if modifySearchDirection fails
to find a descent direction.
The search direction is passed as input and
this function must pass back both a new searchDirection,
and if the searchDirection break has been caught.
By default, no additional work is done, and the
evalFunction returns a False indicating the break was not caught.
:param numpy.ndarray p: searchDirection
:rtype: tuple
:return: (xt, breakCaught) numpy.ndarray, bool
"""
self.printDone(inLS=True)
print('The linesearch got broken. Boo.')
return p, False
@Utils.count
@Utils.callHooks('doEndIteration')
def doEndIteration(self, xt):
"""doEndIteration(xt)
**doEndIteration** is called at the end of each minimize iteration.
By default, function values and x locations are shuffled to store 1
past iteration in memory.
self.xc must be updated in this code.
:param numpy.ndarray xt: tested new iterate that ensures a descent direction.
:rtype: None
:return: None
"""
# store old values
self.f_last = self.f
self.x_last, self.xc = self.xc, xt
self.iter += 1
if self.debug:
self.printDone()
if self.callback is not None:
self.callback(xt)
def save(self, group):
group.setArray('searchDirection', self.searchDirection)
if getattr(self, 'parent', None) is None:
group.setArray('x', self.xc)
else: # Assume inversion is the parent
group.attrs['phi_d'] = self.parent.phi_d
group.attrs['phi_m'] = self.parent.phi_m
group.attrs['beta'] = self.parent.beta
group.setArray('m', self.xc)
group.setArray('dpred', self.parent.dpred)
class Remember(object):
"""
This mixin remembers all the things you tend to forget.
You can remember parameters directly, naming the str in Minimize,
or pass a tuple with the name and the function that takes Minimize.
For Example::
opt.remember('f',('norm_g', lambda M: np.linalg.norm(M.g)))
opt.minimize(evalFunction, x0)
opt.recall('f')
The param name (str) can also be located in the parent (if no conflicts),
and it will be looked up by default.
"""
_rememberThese = []
def remember(self, *args):
self._rememberThese = args
def recall(self, param):
assert param in self._rememberList, (
"You didn't tell me to remember " + param +
", you gotta tell me what to remember!"
)
return self._rememberList[param]
def _startupRemember(self, x0):
self._rememberList = {}
for param in self._rememberThese:
if isinstance(param, string_types):
self._rememberList[param] = []
elif isinstance(param, tuple):
self._rememberList[param[0]] = []
def _doEndIterationRemember(self, *args):
for param in self._rememberThese:
if isinstance(param, string_types):
if self.debug: print('Remember is remembering: ' + param)
val = getattr(self, param, None)
if val is None and getattr(self, 'parent', None) is not None:
# Look to the parent for the param if not found here.
val = getattr(self.parent, param, None)
self._rememberList[param].append( val )
elif isinstance(param, tuple):
if self.debug: print('Remember is remembering: ' + param[0])
self._rememberList[param[0]].append( param[1](self) )
class ProjectedGradient(Minimize, Remember):
name = 'Projected Gradient'
maxIterCG = 5
tolCG = 1e-1
lower = -np.inf
upper = np.inf
def __init__(self,**kwargs):
super(ProjectedGradient, self).__init__(**kwargs)
self.stoppers.append(StoppingCriteria.bindingSet)
self.stoppersLS.append(StoppingCriteria.bindingSet_LS)
self.printers.extend([
IterationPrinters.itType, IterationPrinters.aSet,
IterationPrinters.bSet, IterationPrinters.comment
])
def _startup(self, x0):
# ensure bound vectors are the same size as the model
if type(self.lower) is not np.ndarray:
self.lower = np.ones_like(x0)*self.lower
if type(self.upper) is not np.ndarray:
self.upper = np.ones_like(x0)*self.upper
self.explorePG = True
self.exploreCG = False
self.stopDoingPG = False
self._itType = 'SD'
self.comment = ''
self.aSet_prev = self.activeSet(x0)
@Utils.count
def projection(self, x):
"""projection(x)
Make sure we are feasible.
"""
return np.median(np.c_[self.lower, x, self.upper], axis=1)
@Utils.count
def activeSet(self, x):
"""activeSet(x)
If we are on a bound
"""
return np.logical_or(x == self.lower, x == self.upper)
@Utils.count
def inactiveSet(self, x):
"""inactiveSet(x)
The free variables.
"""
return np.logical_not(self.activeSet(x))
@Utils.count
def bindingSet(self, x):
"""bindingSet(x)
If we are on a bound and the negative gradient points away from the
feasible set.
Optimality condition. (Satisfies Kuhn-Tucker) MoreToraldo91
"""
bind_up = np.logical_and(x == self.lower, self.g >= 0)
bind_low = np.logical_and(x == self.upper, self.g <= 0)
return np.logical_or(bind_up, bind_low)
@Utils.timeIt
def findSearchDirection(self):
"""findSearchDirection()
Finds the search direction based on either CG or steepest descent.
"""
self.aSet_prev = self.activeSet(self.xc)
allBoundsAreActive = sum(self.aSet_prev) == self.xc.size
if self.debug:
print('findSearchDirection: stopDoingPG: ', self.stopDoingPG)
if self.debug:
print('findSearchDirection: explorePG: ', self.explorePG)
if self.debug:
print('findSearchDirection: exploreCG: ', self.exploreCG)
if self.debug:
print('findSearchDirection: aSet', np.sum(self.activeSet(self.xc)))
if self.debug:
print(
'findSearchDirection: bSet', np.sum(self.bindingSet(self.xc))
)
if self.debug:
print(
'findSearchDirection: allBoundsAreActive: ', allBoundsAreActive
)
if self.explorePG or not self.exploreCG or allBoundsAreActive:
if self.debug:
print('findSearchDirection.PG: doingPG')
self._itType = 'SD'
p = -self.g
else:
if self.debug:
print('findSearchDirection.CG: doingCG')
# Reset the max decrease each time you do a CG iteration
self.f_decrease_max = -np.inf
self._itType = '.CG.'
iSet = self.inactiveSet(self.xc) # The inactive set (free variables)
bSet = self.bindingSet(self.xc)
shape = (self.xc.size, np.sum(iSet))
v = np.ones(shape[1])
i = np.where(iSet)[0]
j = np.arange(shape[1])
if self.debug:
print('findSearchDirection.CG: Z.shape', shape)
Z = sp.csr_matrix((v, (i, j)), shape=shape)
def reduceHess(v):
# Z is tall and skinny
return Z.T*(self.H*(Z*v))
operator = sp.linalg.LinearOperator(
(shape[1], shape[1]), reduceHess, dtype=self.xc.dtype
)
p, info = sp.linalg.cg(
operator, -Z.T*self.g, tol=self.tolCG, maxiter=self.maxIterCG
)
p = Z*p # bring up to full size
# aSet_after = self.activeSet(self.xc+p)
return p
@Utils.timeIt
def _doEndIteration_ProjectedGradient(self, xt):
"""_doEndIteration_ProjectedGradient(xt)"""
aSet = self.activeSet(xt)
bSet = self.bindingSet(xt)
self.explorePG = not np.all(aSet == self.aSet_prev) # explore proximal gradient
self.exploreCG = np.all(aSet == bSet) # explore conjugate gradient
f_current_decrease = self.f_last - self.f
self.comment = ''
if self.iter < 1:
# Note that this is reset on every CG iteration.
self.f_decrease_max = -np.inf
else:
self.f_decrease_max = max(self.f_decrease_max, f_current_decrease)
self.stopDoingPG = f_current_decrease < 0.25 * self.f_decrease_max
if self.stopDoingPG:
self.comment = 'Stop SD'
self.explorePG = False
self.exploreCG = True
# implement 3.8, MoreToraldo91
# self.eta_2 * max_decrease where max decrease
# if true go to CG
# don't do too many steps of PG in a row.
if self.debug:
print(
'doEndIteration.ProjGrad, f_current_decrease: ',
f_current_decrease
)
if self.debug:
print(
'doEndIteration.ProjGrad, f_decrease_max: ',
self.f_decrease_max
)
if self.debug:
print('doEndIteration.ProjGrad, stopDoingSD: ', self.stopDoingPG)
class BFGS(Minimize, Remember):
name = 'BFGS'
nbfgs = 10
def __init__(self, **kwargs):
Minimize.__init__(self, **kwargs)
@property
def bfgsH0(self):
"""
Approximate Hessian used in preconditioning the problem.
Must be a SimPEG.Solver
"""
if getattr(self, '_bfgsH0', None) is None:
print("""
Default solver: SolverDiag is being used in bfgsH0
"""
)
self._bfgsH0 = SolverDiag(sp.identity(self.xc.size))
return self._bfgsH0
@bfgsH0.setter
def bfgsH0(self, value):
self._bfgsH0 = value
def _startup_BFGS(self, x0):
self._bfgscnt = -1
self._bfgsY = np.zeros((x0.size, self.nbfgs))
self._bfgsS = np.zeros((x0.size, self.nbfgs))
if not np.any([p is IterationPrinters.comment for p in self.printers]):
self.printers.append(IterationPrinters.comment)
def bfgs(self, d):
n = self._bfgscnt
nn = ktop = min(self._bfgsS.shape[1], n)
return self.bfgsrec(ktop, n, nn, self._bfgsS, self._bfgsY, d)
def bfgsrec(self, k, n, nn, S, Y, d):
"""BFGS recursion"""
if k < 0:
d = self.bfgsH0 * d # Assume that bfgsH0 is a SimPEG.Solver
else:
khat = 0 if nn is 0 else np.mod(n-nn+k,nn)
gamma = np.vdot(S[:, khat], d)/np.vdot(Y[:, khat], S[:, khat])
d = d - gamma*Y[:, khat]
d = self.bfgsrec(k-1, n, nn, S, Y, d)
d = d + (
gamma - np.vdot(Y[:, khat], d)/np.vdot(Y[:, khat], S[:, khat])
) * S[:, khat]
return d
def findSearchDirection(self):
return self.bfgs(-self.g)
def _doEndIteration_BFGS(self, xt):
if self.iter is 0:
self.g_last = self.g
return
yy = self.g - self.g_last
ss = self.xc - xt
self.g_last = self.g
if yy.dot(ss) > 0:
self._bfgscnt += 1
ktop = np.mod(self._bfgscnt, self.nbfgs)
self._bfgsY[:, ktop] = yy
self._bfgsS[:, ktop] = ss
self.comment = ''
else:
self.comment = 'Skip BFGS'
class GaussNewton(Minimize, Remember):
name = '<NAME>'
def __init__(self, **kwargs):
Minimize.__init__(self, **kwargs)
@Utils.timeIt
def findSearchDirection(self):
return Solver(self.H) * (-self.g)
class InexactGaussNewton(BFGS, Minimize, Remember):
"""
Minimizes using CG as the inexact solver of
.. math::
\mathbf{H p = -g}
By default BFGS is used as the preconditioner.
Use *nbfgs* to set the memory limitation of BFGS.
To set the initial H0 to be used in BFGS, set *bfgsH0* to be a
SimPEG.Solver
"""
def __init__(self, **kwargs):
Minimize.__init__(self, **kwargs)
name = '<NAME>'
maxIterCG = 5
tolCG = 1e-1
@property
def approxHinv(self):
"""
The approximate Hessian inverse is used to precondition CG.
Default uses BFGS, with an initial H0 of *bfgsH0*.
Must be a scipy.sparse.linalg.LinearOperator
"""
_approxHinv = getattr(self, '_approxHinv', None)
if _approxHinv is None:
M = sp.linalg.LinearOperator(
(self.xc.size, self.xc.size), self.bfgs, dtype=self.xc.dtype
)
return M
return _approxHinv
@approxHinv.setter
def approxHinv(self, value):
self._approxHinv = value
@Utils.timeIt
def findSearchDirection(self):
Hinv = SolverICG(
self.H, M=self.approxHinv, tol=self.tolCG, maxiter=self.maxIterCG
)
p = Hinv * (-self.g)
return p
class SteepestDescent(Minimize, Remember):
name = 'Steepest Descent'
def __init__(self, **kwargs):
Minimize.__init__(self, **kwargs)
@Utils.timeIt
def findSearchDirection(self):
return -self.g
class NewtonRoot(object):
"""
Newton Method - Root Finding
root = newtonRoot(fun,x);
Where fun is the function that returns the function value as well as
the gradient.
For iterative solving of dh = -J\\r, use O.solveTol = TOL. For direct
solves, use SOLVETOL = 0 (default)
<NAME>
16-May-2013 16:29:51
University of British Columbia
<EMAIL>
"""
tol = 1.000e-06
maxIter = 20
stepDcr = 0.5
maxLS = 30
comments = False
doLS = True
Solver = Solver
solverOpts = {}
def __init__(self, **kwargs):
Utils.setKwargs(self, **kwargs)
def root(self, fun, x):
"""root(fun, x)
Function Should have the form::
def evalFunction(x, return_g=False):
out = (f,)
if return_g:
out += (g,)
return out if len(out) > 1 else out[0]
"""
if self.comments:
print('Newton Method:\n')
self.iter = 0
while True:
r, J = fun(x, return_g=True)
Jinv = self.Solver(J, **self.solverOpts)
dh = - (Jinv * r)
muLS = 1.
LScnt = 1
xt = x + dh
rt = fun(xt, return_g=False)
if self.comments and self.doLS:
print('\tLinesearch:\n')
# Enter Linesearch
while True and self.doLS:
if self.comments:
print('\t\tResid: {0:e}\n'.format(norm(rt)))
if norm(rt) <= norm(r) or norm(rt) < self.tol:
break
muLS = muLS*self.stepDcr
LScnt = LScnt + 1
print('.')
if LScnt > self.maxLS:
print('Newton Method: Line search break.')
return None
xt = x + muLS*dh
rt = fun(xt, return_g=False)
x = xt
self.iter += 1
if norm(rt) < self.tol:
break
if self.iter > self.maxIter:
print(
'NewtonRoot stopped by maxIters ({0:d}). '
'norm: {1:4.4e}'.format(self.maxIter, norm(rt))
)
break
return x
class ProjectedGNCG(BFGS, Minimize, Remember):
def __init__(self, **kwargs):
Minimize.__init__(self, **kwargs)
name = '<NAME>'
maxIterCG = 5
tolCG = 1e-1
stepOffBoundsFact = 0.1 # perturbation of the inactive set off the bounds
stepActiveset = True
lower = -np.inf
upper = np.inf
def _startup(self, x0):
# ensure bound vectors are the same size as the model
if type(self.lower) is not np.ndarray:
self.lower = np.ones_like(x0)*self.lower
if type(self.upper) is not np.ndarray:
self.upper = np.ones_like(x0)*self.upper
@Utils.count
def projection(self, x):
"""projection(x)
Make sure we are feasible.
"""
return np.median(np.c_[self.lower, x, self.upper], axis=1)
@Utils.count
def activeSet(self, x):
"""activeSet(x)
If we are on a bound
"""
return np.logical_or(x <= self.lower, x >= self.upper)
@property
def approxHinv(self):
"""
The approximate Hessian inverse is used to precondition CG.
Default uses BFGS, with an initial H0 of *bfgsH0*.
Must be a scipy.sparse.linalg.LinearOperator
"""
_approxHinv = getattr(self, '_approxHinv', None)
if _approxHinv is None:
M = sp.linalg.LinearOperator(
(self.xc.size, self.xc.size), self.bfgs, dtype=self.xc.dtype
)
return M
return _approxHinv
@approxHinv.setter
def approxHinv(self, value):
self._approxHinv = value
@Utils.timeIt
def findSearchDirection(self):
"""
findSearchDirection()
Finds the search direction based on either CG or steepest descent.
"""
Active = self.activeSet(self.xc)
temp = sum((np.ones_like(self.xc.size)-Active))
allBoundsAreActive = temp == self.xc.size
if allBoundsAreActive:
Hinv = SolverICG(
self.H, M=self.approxHinv, tol=self.tolCG,
maxiter=self.maxIterCG
)
p = Hinv * (-self.g)
return p
else:
delx = np.zeros(self.g.size)
resid = -(1-Active) * self.g
# Begin CG iterations.
cgiter = 0
cgFlag = 0
normResid0 = norm(resid)
while cgFlag == 0:
cgiter = cgiter + 1
dc = (1-Active)*(self.approxHinv*resid)
rd = np.dot(resid, dc)
# Compute conjugate direction pc.
if cgiter == 1:
pc = dc
else:
betak = rd / rdlast
pc = dc + betak * pc
# Form product Hessian*pc.
Hp = self.H*pc
Hp = (1-Active)*Hp
# Update delx and residual.
alphak = rd / np.dot(pc, Hp)
delx = delx + alphak*pc
resid = resid - alphak*Hp
rdlast = rd
if np.logical_or(
norm(resid)/normResid0 <= self.tolCG,
cgiter == self.maxIterCG
):
cgFlag = 1
# End CG Iterations
# Take a gradient step on the active cells if exist
if self.stepActiveset:
if temp != self.xc.size:
rhs_a = (Active) * -self.g
dm_i = max( abs( delx ) )
dm_a = max( abs(rhs_a) )
# perturb inactive set off of bounds so that they are included
# in the step
delx = delx + self.stepOffBoundsFact * (rhs_a * dm_i / dm_a)
# Only keep gradients going in the right direction on the active
# set
indx = (
((self.xc<=self.lower) & (delx < 0)) |
((self.xc>=self.upper) & (delx > 0))
)
delx[indx] = 0.
return delx
``` |
{
"source": "jlashner/ares",
"score": 2
} |
#### File: ares/analysis/Animation.py
```python
import numpy as np
import matplotlib.pyplot as pl
from .ModelSet import ModelSet
from ..physics import Hydrogen
from ..util.Aesthetics import Labeler
from ..physics.Constants import nu_0_mhz
from .MultiPhaseMedium import add_redshift_axis
from mpl_toolkits.axes_grid1 import inset_locator
try:
# this runs with no issues in python 2 but raises error in python 3
basestring
except:
# this try/except allows for python 2/3 compatible string type checking
basestring = str
try:
from mpi4py import MPI
rank = MPI.COMM_WORLD.rank
size = MPI.COMM_WORLD.size
except ImportError:
rank = 0
size = 1
class Animation(object): # pragma: no cover
def __init__(self, prefix=None):
self._prefix = prefix
@property
def model_set(self):
if not hasattr(self, '_model_set'):
if isinstance(self._prefix, ModelSet):
self._model_set = self._prefix
elif isinstance(self._prefix, basestring):
self._model_set = ModelSet(self._prefix)
elif type(self._prefix) in [list, tuple]:
raise NotImplementedError('help!')
return self._model_set
def _limits_w_padding(self, limits, take_log=False, un_log=False,
padding=0.1):
mi, ma = limits
if (mi <= 0) or self.model_set.is_log[0]:
mi -= padding
elif (take_log) and (not self.model_set.is_log[0]):
mi -= padding
else:
mi *= (1. - padding)
if (ma >= 0) or self.model_set.is_log[0]:
ma += padding
elif (take_log) and (not self.model_set.is_log[0]):
mi += padding
else:
ma *= (1. + padding)
return mi, ma
def build_tracks(self, plane, _pars, pivots=None, ivar=None, take_log=False,
un_log=False, multiplier=1, origin=None):
"""
Construct array of models in the order in which we'll plot them.
"""
data = self.model_set.ExtractData(_pars, ivar=ivar,
take_log=take_log, un_log=un_log, multiplier=multiplier)
par = _pars[0]
N = data[par].shape[0]
limits = data[par].min(), data[par].max()
# Re-order the data.
order = np.argsort(data[par])
data_sorted = {par:data[par][order]}
for p in plane:
# How does this work if 1-D blob?
if p in _pars:
data_sorted[p] = data[p][order]
else:
ii, jj, nd, dims = self.model_set.blob_info(p)
data_sorted[p] = self.model_set.blob_ivars[ii][jj]
if origin is None:
start = end = data_sorted[par][N // 2]
else:
start = end = origin
# By default, scan to lower values, then all the way up, then return
# to start point
if pivots is None:
pivots = [round(v, 4) for v in [start, limits[0], limits[1], end]]
for element in pivots:
assert limits[0] <= element <= limits[1], \
"Pivot point lies outside range of data!"
data_assembled = {p:[] for p in _pars}
i = np.argmin(np.abs(pivots[0] - data_sorted[par]))
for k, pivot in enumerate(pivots):
if k == 0:
continue
j = np.argmin(np.abs(pivot - data_sorted[par]))
#if par == 'pop_logN{1}':
# print i, j, k
if j < i:
step = -1
else:
step = 1
for p in _pars:
data_assembled[p].extend(list(data_sorted[p][i:j:step]))
i = 1 * j
# Add start point!
data_assembled[p].append(start)
data_assembled[par] = np.array(data_assembled[par])
self.data = {'raw': data, 'sorted': data_sorted,
'assembled': data_assembled, 'limits':limits}
def prepare_axis(self, ax=None, fig=1, squeeze_main=True,
take_log=False, un_log=False, **kwargs):
if ax is None:
fig = pl.figure(fig)
fig.subplots_adjust(right=0.7)
ax = fig.add_subplot(111)
sax = self.add_slider(ax, limits=self.data['limits'],
take_log=take_log, un_log=un_log, **kwargs)
return ax, sax
def Plot1D(self, plane, par=None, pivots=None, prefix='test', twin_ax=None,
ivar=None, take_log=False, un_log=False, multiplier=1.,
ax=None, sax=None, fig=1, clear=True, z_to_freq=True,
slider_kwargs={}, backdrop=None, backdrop_kwargs={}, squeeze_main=True,
close=False, xlim=None, ylim=None, xticks=None, yticks=None,
z_ax=True, origin=None, sticks=None, slims=None, inits=None,
**kwargs):
"""
Animate variations of a single parameter.
Parameters
----------
par : str
Parameter to vary.
pivots : list, tuple
..note:: should implement override for kwargs, like change color of
line/symbol if some condition violated (e.g., tau_e).
"""
if par is None:
assert len(self.model_set.parameters) == 1
par = self.model_set.parameters[0]
else:
assert par in self.model_set.parameters, \
"Supplied parameter '{!s}' is unavailable!".format(par)
_pars = [par]
_x = None
for _p in plane:
if _p in self.model_set.all_blob_names:
_pars.append(_p)
else:
_x = _p
if type(sticks) is dict:
sticks = sticks[par]
if type(slims) is dict:
slims = slims[par]
# This sets up all the data
self.build_tracks(plane, _pars, pivots=pivots, ivar=ivar,
take_log=[take_log, False, False], un_log=[un_log, False, False],
multiplier=multiplier, origin=origin)
if ax is None:
ax, sax = self.prepare_axis(ax, fig, **slider_kwargs)
if z_ax and 'z' in _pars:
twin_ax = add_redshift_axis(ax, twin_ax)
labeler = Labeler(_pars, **self.model_set.base_kwargs)
# What do we need to make plots?
# data_assembled, plane, ax, sax, take_log etc.
data = self.data['raw']
limits = self.data['limits']
data_assembled = self.data['assembled']
for i, val in enumerate(data_assembled[par]):
if _x is None:
x = data_assembled[plane[0]][i]
else:
x = _x
y = data_assembled[plane[1]][i]
if type(x) in [int, float]:
ax.scatter(x, y, **kwargs)
else:
if ('z' in _pars) and z_to_freq:
ax.plot(nu_0_mhz / (1.+ x), y, **kwargs)
else:
ax.plot(x, y, **kwargs)
if inits is not None:
if z_to_freq:
ax.plot(nu_0_mhz / (1. + inits['z']), inits['dTb'],
**kwargs)
else:
ax.plot(inits['z'], inits['dTb'], **kwargs)
# Need to be careful with axes limits not changing...
if ('z' in _pars) and z_to_freq:
xarr = nu_0_mhz / (1. + data[plane[0]])
else:
xarr = data[plane[0]]
if xlim is not None:
xmi, xma = xlim
elif _x is None:
_xmi, _xma = xarr.min(), xarr.max()
xmi, xma = self._limits_w_padding((_xmi, _xma))
ax.set_xlim(xmi, xma)
if twin_ax is not None:
twin_ax.set_xlim(xmi, xma)
if ylim is not None:
ax.set_ylim(ylim)
else:
_ymi, _yma = data[plane[1]].min(), data[plane[1]].max()
ymi, yma = self._limits_w_padding((_ymi, _yma))
ax.set_ylim(ymi, yma)
sax.plot([val]*2, [0, 1], **kwargs)
sax = self._reset_slider(sax, limits, take_log, un_log,
sticks=sticks, slims=slims, **slider_kwargs)
if ('z' in _pars) and z_to_freq:
ax.set_xlabel(labeler.label('nu'))
else:
ax.set_xlabel(labeler.label(plane[0]))
ax.set_ylabel(labeler.label(plane[1]))
if xticks is not None:
ax.set_xticks(xticks, minor=True)
if yticks is not None:
ax.set_yticks(yticks, minor=True)
if ('z' in _pars) and z_to_freq:
if z_ax:
twin_ax = add_redshift_axis(ax, twin_ax)
pl.draw()
pl.savefig('{0!s}_{1!s}.png'.format(prefix, str(i).zfill(4)))
if clear:
ax.clear()
sax.clear()
if twin_ax is not None:
twin_ax.clear()
return ax, twin_ax
def add_residue(self):
pass
def add_marker(self):
pass
def _reset_slider(self, ax, limits, take_log=False, un_log=False,
sticks=None, slims=None, **kwargs):
ax.set_yticks([])
ax.set_yticklabels([])
if slims is None:
lo, hi = self._limits_w_padding(limits, take_log=take_log, un_log=un_log)
else:
lo, hi = slims
ax.set_xlim(lo, hi)
ax.tick_params(axis='x', labelsize=10, length=3, width=1, which='major')
if 'label' in kwargs:
ax.set_xlabel(kwargs['label'], fontsize=14)
if sticks is not None:
ax.set_xticks(sticks)
return ax
def add_slider(self, ax, limits, take_log=False, un_log=False,
rect=[0.75, 0.7, 0.2, 0.05], **kwargs):
"""
Add inset 'slider' thing.
"""
inset = pl.axes(rect)
inset = self._reset_slider(inset, limits, take_log, un_log, **kwargs)
pl.draw()
return inset
class AnimationSet(object): # pragma: no cover
def __init__(self, prefix):
self._prefix = prefix
@property
def animations(self):
if not hasattr(self, '_animations'):
self._animations = []
for prefix in self._prefix:
self._animations.append(Animation(prefix))
return self._animations
@property
def parameters(self):
if not hasattr(self, '_parameters'):
self._parameters = []
for animation in self.animations:
if len(animation.model_set.parameters) == 1:
self._parameters.append(animation.model_set.parameters[0])
else:
self._parameters.append('unknown')
return self._parameters
@property
def labels(self):
if not hasattr(self, '_labels'):
self._labels = []
for animation in self.animations:
if len(animation.model_set.parameters) == 1:
self._labels.append(animation.model_set.parameters[0])
else:
self._labels.append('unknown')
return self._labels
@labels.setter
def labels(self, value):
if type(value) is dict:
self._labels = []
for par in self.parameters:
self._labels.append(value[par])
elif type(value) in [list, tuple]:
assert len(value) == len(self.parameters)
self._labels = value
@property
def origin(self):
if not hasattr(self, '_origin'):
self._origin = [None] * len(self.animations)
return self._origin
@origin.setter
def origin(self, value):
if type(value) is dict:
self._origin = []
for par in self.parameters:
self._origin.append(value[par])
elif type(value) in [list, tuple]:
assert len(value) == len(self.parameters)
self._origin = value
@labels.setter
def labels(self, value):
if type(value) is dict:
self._labels = []
for par in self.parameters:
self._labels.append(value[par])
elif type(value) in [list, tuple]:
assert len(value) == len(self.parameters)
self._labels = value
@property
def take_log(self):
if not hasattr(self, '_take_log'):
self._take_log = [False] * len(self.parameters)
return self._take_log
@take_log.setter
def take_log(self, value):
if type(value) is dict:
self._take_log = []
for par in self.parameters:
self._take_log.append(value[par])
elif type(value) in [list, tuple]:
assert len(value) == len(self.parameters)
self._take_log = value
@property
def un_log(self):
if not hasattr(self, '_un_log'):
self._un_log = [False] * len(self.parameters)
return self._un_log
@un_log.setter
def un_log(self, value):
if type(value) is dict:
self._un_log = []
for par in self.parameters:
self._un_log.append(value[par])
elif type(value) in [list, tuple]:
assert len(value) == len(self.parameters)
self._un_log = [False] * len(self.parameters)
@property
def inits(self):
if not hasattr(self, '_inits'):
hydr = Hydrogen()
inits = hydr.inits
anim = self.animations[0]
gr, i, nd, dims = anim.model_set.blob_info('z')
_z = anim.model_set.blob_ivars[gr][i]
z = np.arange(max(_z), 1100, 1)
dTb = hydr.dTb_no_astrophysics(z)
self._inits = {'z': z, 'dTb': dTb}
return self._inits
def Plot1D(self, plane, pars=None, ax=None, fig=1, prefix='test',
xlim=None, ylim=None, xticks=None, yticks=None, sticks=None,
slims=None, top_sax=0.75, include_inits=True, **kwargs):
"""
Basically run a series of Plot1D.
"""
if pars is None:
pars = self.parameters
assert type(pars) in [list, tuple]
N = len(pars)
if sticks is None:
sticks = {par:None for par in pars}
if slims is None:
slims = {par:None for par in pars}
##
# First: setup axes
##
ax = None
sax = []
for k in range(N):
assert len(self.animations[k].model_set.parameters) == 1
par = self.animations[k].model_set.parameters[0]
_pars = [par]
_x = None
for _p in plane:
if _p in self.animations[k].model_set.all_blob_names:
_pars.append(_p)
else:
_x = _p
self.animations[k].build_tracks(plane, _pars,
take_log=self.take_log[k], un_log=False, multiplier=1,
origin=self.origin[k])
ax, _sax = self.animations[k].prepare_axis(ax=ax, fig=fig,
squeeze_main=True, rect=[0.75, top_sax-0.15*k, 0.2, 0.05],
label=self.labels[k])
sax.append(_sax)
##
# Now do all the plotting
##
twin_ax = None
for k in range(N):
par = self.animations[k].model_set.parameters[0]
_pars = [par]
_x = None
for _p in plane:
if _p in self.animations[k].model_set.all_blob_names:
_pars.append(_p)
else:
_x = _p
kw = {'label': self.labels[k]}
# Add slider bar for all currently static parameters
# (i.e., grab default value)
for l in range(N):
if l == k:
continue
_p = self.parameters[l]
limits = self.animations[l].data['limits']
sax[l].plot([self.origin[l]]*2, [0, 1], **kwargs)
self.animations[l]._reset_slider(sax[l], limits,
take_log=self.take_log[l], un_log=self.un_log[l],
label=self.labels[l], sticks=sticks[_p], slims=slims[_p])
# Plot variable parameter
ax, twin_ax = \
self.animations[k].Plot1D(plane, par, ax=ax, sax=sax[k],
take_log=self.take_log[k], un_log=self.un_log[k],
prefix='{0!s}.{1!s}'.format(prefix, par), close=False,
slider_kwargs=kw, xlim=xlim, ylim=ylim, origin=self.origin[k],
xticks=xticks, yticks=yticks, twin_ax=twin_ax,
sticks=sticks, slims=slims, inits=self.inits, **kwargs)
```
#### File: ares/inference/CalibrateModel.py
```python
import os
import numpy as np
from ..util import read_lit
from .ModelFit import ModelFit
from ..simulations import Global21cm
from ..util import ParameterBundle as PB
from .FitGlobal21cm import FitGlobal21cm
from ..populations.GalaxyCohort import GalaxyCohort
from .FitGalaxyPopulation import FitGalaxyPopulation
from ..populations.GalaxyEnsemble import GalaxyEnsemble
try:
from distpy import DistributionSet
from distpy import UniformDistribution
except ImportError:
pass
try:
from mpi4py import MPI
rank = MPI.COMM_WORLD.rank
size = MPI.COMM_WORLD.size
except ImportError:
rank = 0
size = 1
_b14 = read_lit('bouwens2014')
filt_hst = {4: _b14.filt_shallow[4], 5: _b14.filt_shallow[5],
6: _b14.filt_shallow[6], 7: _b14.filt_deep[7]}
_zcal_lf = [3.8, 4.9, 5.9, 6.9, 7.9, 10.]
_zcal_smf = [3, 4, 5, 6, 7, 8]
_zcal_beta = [4, 5, 6, 7]
acceptable_sfe_params = ['slope-low', 'slope-high', 'norm', 'peak']
acceptable_dust_params = ['norm', 'slope', 'peak', 'fcov', 'yield', 'scatter',
'kappa', 'slope-high', 'growth']
class CalibrateModel(object):
"""
Convenience class for calibrating galaxy models to UVLFs and/or SMFs.
"""
def __init__(self, fit_lf=[5.9], fit_smf=False, fit_beta=False,
fit_gs=None, idnum=0, add_suffix=True, ztol=0.21,
free_params_sfe=[], zevol_sfe=[],
include_fshock=False, include_scatter_mar=False, name=None,
include_dust='var_beta', include_fgrowth=False,
include_fduty=False, zevol_fduty=False, include_kappa=False,
zevol_fshock=False, zevol_dust=False, free_params_dust=[],
save_lf=True, save_smf=False, save_sam=False, include_fdtmr=False,
save_sfrd=False, save_beta=False, save_dust=False, zmap={},
monotonic_beta=False):
"""
Calibrate a galaxy model to available data.
.. note :: All the `include_*` parameters control what goes into our
base_kwargs, while the `free_params_*` parameters control what
we allow to vary in the fit.
Parameters
----------
fit_lf : bool
Use available luminosity function measurements?
fit_beta : bool
Use available UV colour-magnitude measurements?
fit_smf : bool
Use available stellar mass function measurements?
fit_gs : tuple
Use constraints on global 21-cm signal?
If not None, this should be (frequencies / MHz, dTb / mK, err / mK).
idnum : int
If model being calibrated has multiple source populations, this is
the ID number of the one containing luminosity functions etc.
zevol_sfe_norm : bool
Allow redshift evolution in the normalization of the SFE?
zevol_sfe_peak : bool
Allow redshift evolution in the where the SFE peaks (in mass)?
zevol_sfe_shape: bool
Allow redshift evolution in the power-slopes of SFE?
clobber : bool
Overwrite existing data outputs?
"""
self.name = name # optional additional prefix
self.add_suffix = add_suffix
self.fit_lf = fit_lf
self.fit_smf = fit_smf
self.fit_gs = fit_gs
self.fit_beta = fit_beta
self.idnum = idnum
self.zmap = zmap
self.ztol = ztol
self.monotonic_beta = monotonic_beta
self.include_fshock = int(include_fshock)
self.include_scatter_mar = int(include_scatter_mar)
self.include_dust = include_dust
self.include_fgrowth = include_fgrowth
self.include_fduty = include_fduty
self.include_fdtmr = include_fdtmr
self.include_kappa = include_kappa
# Set SFE free parameters
self.free_params_sfe = free_params_sfe
for par in self.free_params_sfe:
if par in acceptable_sfe_params:
continue
raise ValueError("Unrecognized SFE param: {}".format(par))
# What's allowed to vary with redshift?
if zevol_sfe is None:
self.zevol_sfe = []
elif zevol_sfe == 'all':
self.zevol_sfe = free_params_sfe
else:
self.zevol_sfe = zevol_sfe
# Set SFE free parameters
self.free_params_dust = free_params_dust
for par in self.free_params_dust:
if par in acceptable_dust_params:
continue
raise ValueError("Unrecognized dust param: {}".format(par))
# What's allowed to vary with redshift?
if zevol_dust is None:
self.zevol_dust = []
elif zevol_dust == 'all':
self.zevol_dust = free_params_dust
else:
self.zevol_dust = zevol_dust
self.zevol_fduty = zevol_fduty
self.save_lf = int(save_lf)
self.save_smf = int(save_smf)
self.save_sam = int(save_sam)
self.save_sfrd = int(save_sfrd)
self.save_beta = bool(save_beta) if save_beta in [0, 1, True, False] \
else int(save_beta)
self.save_dust = int(save_dust)
def get_zstr(self, vals, okvals):
"""
Make a string showing the redshifts we're calibrating to for some
quantity.
"""
zcal = []
for z in okvals:
if z not in vals:
continue
zcal.append(z)
zs = ''
for z in zcal:
zs += '%i_' % round(z)
zs = zs.rstrip('_')
return zs
@property
def prefix(self):
"""
Generate output filename.
"""
s = ''
if self.fit_lf:
s += 'lf_' + self.get_zstr(self.fit_lf, _zcal_lf) + '_'
if self.fit_smf:
s += 'smf_' + self.get_zstr(self.fit_smf, _zcal_smf) + '_'
if self.fit_beta:
s += 'beta_' + self.get_zstr(self.fit_beta, _zcal_beta) + '_'
if self.fit_gs:
s += 'gs_{0:.0f}_{0:.0f}_'.format(self.fit_gs[0].min(),
self.fit_gs[0].max())
if self.name is not None:
if self.add_suffix:
s = self.name + '_' + s
else:
s = self.name
if rank == 0:
print("# Will save to files with prefix {}.".format(s))
return s
@property
def parameters(self):
if not hasattr(self, '_parameters'):
if self.Npops > 1:
_suff = '{{{}}}'.format(self.idnum)
else:
_suff = ''
free_pars = []
guesses = {}
is_log = []
jitter = []
ps = DistributionSet()
# Normalization of SFE
if 'norm' in self.free_params_sfe:
free_pars.append('pq_func_par0[0]{}'.format(_suff))
guesses['pq_func_par0[0]{}'.format(_suff)] = -1.5
is_log.extend([True])
jitter.extend([0.1])
ps.add_distribution(UniformDistribution(-7, 1.),
'pq_func_par0[0]{}'.format(_suff))
if 'norm' in self.zevol_sfe:
free_pars.append('pq_func_par6[0]{}'.format(_suff))
guesses['pq_func_par6[0]{}'.format(_suff)] = 0.
is_log.extend([False])
jitter.extend([0.1])
ps.add_distribution(UniformDistribution(-3, 3.),
'pq_func_par6[0]{}'.format(_suff))
# Peak mass
if 'peak' in self.free_params_sfe:
free_pars.append('pq_func_par1[0]{}'.format(_suff))
guesses['pq_func_par1[0]{}'.format(_suff)] = 11.5
is_log.extend([True])
jitter.extend([0.1])
ps.add_distribution(UniformDistribution(9., 13.),
'pq_func_par1[0]{}'.format(_suff))
if 'peak' in self.zevol_sfe:
free_pars.append('pq_func_par7[0]{}'.format(_suff))
guesses['pq_func_par7[0]{}'.format(_suff)] = 0.
is_log.extend([False])
jitter.extend([2.])
ps.add_distribution(UniformDistribution(-6, 6.),
'pq_func_par7[0]{}'.format(_suff))
# Slope at low-mass side of peak
if 'slope-low' in self.free_params_sfe:
free_pars.append('pq_func_par2[0]{}'.format(_suff))
guesses['pq_func_par2[0]{}'.format(_suff)] = 0.66
is_log.extend([False])
jitter.extend([0.1])
ps.add_distribution(UniformDistribution(0.0, 1.5),
'pq_func_par2[0]{}'.format(_suff))
# Allow to evolve with redshift?
if 'slope-low' in self.zevol_sfe:
free_pars.append('pq_func_par8[0]{}'.format(_suff))
guesses['pq_func_par8[0]{}'.format(_suff)] = 0.
is_log.extend([False])
jitter.extend([0.1])
ps.add_distribution(UniformDistribution(-3, 3.),
'pq_func_par8[0]{}'.format(_suff))
# Slope at high-mass side of peak
if 'slope-high' in self.free_params_sfe:
free_pars.append('pq_func_par3[0]{}'.format(_suff))
guesses['pq_func_par3[0]{}'.format(_suff)] = -0.3
is_log.extend([False])
jitter.extend([0.1])
ps.add_distribution(UniformDistribution(-3., 0.3),
'pq_func_par3[0]{}'.format(_suff))
# Allow to evolve with redshift?
if 'slope-high' in self.zevol_sfe:
free_pars.append('pq_func_par9[0]{}'.format(_suff))
guesses['pq_func_par9[0]{}'.format(_suff)] = 0.
is_log.extend([False])
jitter.extend([0.1])
ps.add_distribution(UniformDistribution(-6, 6.),
'pq_func_par9[0]{}'.format(_suff))
##
# fduty
##
if self.include_fduty:
# Normalization of SFE
free_pars.extend(['pq_func_par0[40]', 'pq_func_par2[40]'])
guesses['pq_func_par0[40]'] = 0.5
guesses['pq_func_par2[40]'] = 0.25
is_log.extend([False, False])
jitter.extend([0.2, 0.2])
ps.add_distribution(UniformDistribution(0., 1.), 'pq_func_par0[40]')
ps.add_distribution(UniformDistribution(-2., 2.), 'pq_func_par2[40]')
if self.zevol_fduty:
free_pars.append('pq_func_par4[40]')
guesses['pq_func_par4[40]'] = 0.
is_log.extend([False])
jitter.extend([0.1])
ps.add_distribution(UniformDistribution(-3, 3.), 'pq_func_par4[40]')
##
# DUST REDDENING
##
if self.include_dust in ['screen', 'screen-dpl']:
if 'norm' in self.free_params_dust:
free_pars.append('pq_func_par0[22]')
if 'slope-high' not in self.free_params_dust:
guesses['pq_func_par0[22]'] = 2.4
else:
guesses['pq_func_par0[22]'] = 1.2
is_log.extend([False])
jitter.extend([0.1])
ps.add_distribution(UniformDistribution(0.01, 10.), 'pq_func_par0[22]')
if 'norm' in self.zevol_dust:
assert self.include_dust == 'screen'
# If screen-dpl need to change parameter number!
free_pars.append('pq_func_par4[22]')
guesses['pq_func_par4[22]'] = 0.
is_log.extend([False])
jitter.extend([0.5])
ps.add_distribution(UniformDistribution(-2., 2.), 'pq_func_par4[22]')
if 'slope' in self.free_params_dust:
free_pars.append('pq_func_par2[22]')
guesses['pq_func_par2[22]'] = 0.5
is_log.extend([False])
jitter.extend([0.05])
ps.add_distribution(UniformDistribution(0, 2.), 'pq_func_par2[22]')
if 'slope-high' in self.free_params_dust:
assert self.include_dust == 'screen-dpl'
free_pars.append('pq_func_par3[22]')
guesses['pq_func_par3[22]'] = 0.5
is_log.extend([False])
jitter.extend([0.05])
ps.add_distribution(UniformDistribution(-1.0, 2.), 'pq_func_par3[22]')
if 'slope-high' in self.zevol_dust:
raise NotImplemented('help')
if 'peak' in self.free_params_dust:
assert self.include_dust == 'screen-dpl'
free_pars.append('pq_func_par1[22]')
guesses['pq_func_par1[22]'] = 11.
is_log.extend([True])
jitter.extend([0.2])
ps.add_distribution(UniformDistribution(9., 13.), 'pq_func_par1[22]')
if 'peak' in self.zevol_dust:
raise NotImplemented('help')
free_pars.append('pq_func_par2[24]')
guesses['pq_func_par2[24]'] = 0.0
is_log.extend([False])
jitter.extend([0.5])
ps.add_distribution(UniformDistribution(-2., 2.), 'pq_func_par2[24]')
if 'yield' in self.free_params_dust:
assert self.include_fdtmr
free_pars.extend(['pq_func_par0[50]', 'pq_func_par2[50]'])
guesses['pq_func_par0[50]'] = 0.4
guesses['pq_func_par2[50]'] = 0.
is_log.extend([False, False])
jitter.extend([0.1, 0.2])
ps.add_distribution(UniformDistribution(0., 1.0), 'pq_func_par0[50]')
ps.add_distribution(UniformDistribution(-2., 2.), 'pq_func_par2[50]')
if 'yield' in self.zevol_dust:
free_pars.append('pq_func_par4[50]')
guesses['pq_func_par4[50]'] = 0.0
is_log.extend([False])
jitter.extend([0.5])
ps.add_distribution(UniformDistribution(-3., 3.), 'pq_func_par4[50]')
if 'growth' in self.free_params_dust:
assert self.include_fgrowth
free_pars.extend(['pq_func_par0[60]', 'pq_func_par2[60]'])
guesses['pq_func_par0[60]'] = 11.
guesses['pq_func_par2[60]'] = 0.
is_log.extend([True, False])
jitter.extend([0.5, 0.2])
ps.add_distribution(UniformDistribution(7., 14.), 'pq_func_par0[60]')
ps.add_distribution(UniformDistribution(-2., 2.), 'pq_func_par2[60]')
if 'growth' in self.zevol_dust:
free_pars.append('pq_func_par4[60]')
guesses['pq_func_par4[60]'] = 0.
is_log.extend([False])
jitter.extend([0.5])
ps.add_distribution(UniformDistribution(-4., 4.), 'pq_func_par4[60]')
if 'scatter' in self.free_params_dust:
free_pars.extend(['pq_func_par0[33]'])
if 'slope-high' not in self.free_params_dust:
guesses['pq_func_par0[33]'] = 0.1
else:
guesses['pq_func_par0[33]'] = 0.05
is_log.extend([False])
jitter.extend([0.05])
ps.add_distribution(UniformDistribution(0., 0.6), 'pq_func_par0[33]')
if 'scatter-slope' in self.free_params_dust:
free_pars.extend(['pq_func_par2[33]'])
guesses['pq_func_par2[33]'] = 0.
is_log.extend([False])
jitter.extend([0.1])
ps.add_distribution(UniformDistribution(-2., 2.), 'pq_func_par2[33]')
if 'scatter' in self.zevol_dust:
free_pars.append('pq_func_par4[33]')
guesses['pq_func_par4[33]'] = 0.0
is_log.extend([False])
jitter.extend([0.5])
ps.add_distribution(UniformDistribution(-2., 2.), 'pq_func_par4[33]')
if 'kappa' in self.free_params_dust:
free_pars.extend(['pq_func_par4[20]', 'pq_func_par6[20]'])
guesses['pq_func_par4[20]'] = 0.0
guesses['pq_func_par6[20]'] = 0.0
is_log.extend([False, False])
jitter.extend([0.2, 0.2])
ps.add_distribution(UniformDistribution(-3, 3.), 'pq_func_par4[20]')
ps.add_distribution(UniformDistribution(-2, 2.), 'pq_func_par6[20]')
if 'kappa' in self.zevol_dust:
raise NotImplemented('Cannot do triply nested PQs.')
# Set the attributes
self._parameters = free_pars
self._guesses = guesses
self._is_log = is_log
self._jitter = jitter
self._priors = ps
return self._parameters
@property
def guesses(self):
if not hasattr(self, '_guesses'):
tmp = self.parameters
return self._guesses
@guesses.setter
def guesses(self, value):
if not hasattr(self, '_guesses'):
tmp = self.parameters
print("Revising default guessses...")
self._guesses.update(value)
@property
def jitter(self):
if not hasattr(self, '_jitter'):
tmp = self.parameters
return self._jitter
@jitter.setter
def jitter(self, value):
self._jitter = value
@property
def is_log(self):
if not hasattr(self, '_is_log'):
tmp = self.parameters
return self._is_log
@is_log.setter
def is_log(self, value):
self._is_log = value
@property
def priors(self):
if not hasattr(self, '_priors'):
tmp = self.parameters
return self._priors
@priors.setter
def priors(self, value):
self._priors = value
@property
def blobs(self):
##
# First: some generic redshifts, magnitudes, masses.
redshifts = np.array([4, 6, 8, 10]) # generic
if self.fit_lf:
if 'lf' in self.zmap:
red_lf = np.sort([item for item in self.zmap['lf'].values()])
else:
red_lf = np.array(self.fit_lf)
else:
red_lf = redshifts
if self.fit_smf:
if 'smf' in self.zmap:
raise NotImplemented('help')
red_smf = np.array(self.fit_smf)
# Default to saving LF at same redshifts if not specified otherwise.
if not self.fit_lf:
red_lf = red_smf
else:
red_smf = red_lf
if self.fit_beta:
red_beta = np.array(self.fit_beta)
else:
red_beta = red_lf
MUV = np.arange(-26, 5., 0.5)
Mh = np.logspace(7, 13, 61)
Ms = np.arange(7, 13.25, 0.25)
##
# Now, start assembling blobs
# Account for different location of population instance if
# fit runs an ares.simulations calculation. Just GS option now.
if self.fit_gs is not None:
_pref = 'pops[{}].'.format(self.idnum)
else:
_pref = ''
# For things like SFE, fduty, etc., need to tap into `guide`
# attribute when using GalaxyEnsemble.
if self.use_ensemble:
_pref_g = _pref + 'guide.'
else:
_pref_g = _pref
# Always save the UVLF
blob_n = ['galaxy_lf']
blob_i = [('z', red_lf), ('x', MUV)]
blob_f = ['{}LuminosityFunction'.format(_pref)]
blob_pars = \
{
'blob_names': [blob_n],
'blob_ivars': [blob_i],
'blob_funcs': [blob_f],
'blob_kwargs': [None],
}
blob_n = ['fstar']
blob_i = [('z', redshifts), ('Mh', Mh)]
blob_f = ['{}fstar'.format(_pref_g)]
blob_pars['blob_names'].append(blob_n)
blob_pars['blob_ivars'].append(blob_i)
blob_pars['blob_funcs'].append(blob_f)
blob_pars['blob_kwargs'].append(None)
if self.include_fduty:
blob_n = ['fduty']
blob_i = [('z', redshifts), ('Mh', Mh)]
blob_f = ['{}fduty'.format(_pref_g)]
blob_pars['blob_names'].append(blob_n)
blob_pars['blob_ivars'].append(blob_i)
blob_pars['blob_funcs'].append(blob_f)
blob_pars['blob_kwargs'].append(None)
if self.include_fdtmr:
blob_n = ['fyield']
blob_i = [('z', redshifts), ('Mh', Mh)]
blob_f = ['{}dust_yield'.format(_pref_g)]
blob_pars['blob_names'].append(blob_n)
blob_pars['blob_ivars'].append(blob_i)
blob_pars['blob_funcs'].append(blob_f)
blob_pars['blob_kwargs'].append(None)
# SAM stuff
if self.save_sam:
blob_n = ['SFR', 'SMHM']
blob_i = [('z', redshifts), ('Mh', Mh)]
if self.use_ensemble:
blob_f = ['guide.SFR', 'SMHM']
else:
blob_f = ['{}SFR'.format(_pref), 'SMHM']
blob_k = [{}, {'return_mean_only': True}]
if 'pop_dust_yield' in self.base_kwargs:
if self.base_kwargs['pop_dust_yield'] != 0:
blob_n.append('Md')
blob_f.append('XMHM')
blob_k.append({'return_mean_only': True, 'field': 'Md'})
blob_pars['blob_names'].append(blob_n)
blob_pars['blob_ivars'].append(blob_i)
blob_pars['blob_funcs'].append(blob_f)
blob_pars['blob_kwargs'].append(blob_k)
# SMF
if self.save_smf:
blob_n = ['galaxy_smf']
blob_i = [('z', red_smf), ('bins', Ms)]
blob_f = ['StellarMassFunction']
blob_pars['blob_names'].append(blob_n)
blob_pars['blob_ivars'].append(blob_i)
blob_pars['blob_funcs'].append(blob_f)
blob_pars['blob_kwargs'].append(None)
# Covering factor and scale length
if self.save_dust:
blob_n = ['dust_scale']
blob_i = [('z', redshifts), ('Mh', Mh)]
blob_f = ['guide.dust_scale']
if type(self.base_kwargs['pop_dust_yield']) == str:
blob_n.append('dust_yield')
blob_f.append('guide.dust_yield')
if 'pop_dust_scatter' in self.base_kwargs:
if type(self.base_kwargs['pop_dust_scatter'] == str):
blob_n.append('sigma_d')
blob_f.append('guide.dust_scatter')
if 'pop_dust_growth' in self.base_kwargs:
if type(self.base_kwargs['pop_dust_growth'] == str):
blob_n.append('fgrowth')
blob_f.append('guide.dust_growth')
blob_pars['blob_names'].append(blob_n)
blob_pars['blob_ivars'].append(blob_i)
blob_pars['blob_funcs'].append(blob_f)
blob_pars['blob_kwargs'].append(None)
# MUV-Beta
if self.save_beta != False:
Mbins = np.arange(-30, -10, 1.0)
# This is fast
blob_n = ['AUV']
blob_i = [('z', red_beta), ('MUV', MUV)]
blob_f = ['AUV']
blob_k = [{'return_binned': True,
'magbins': Mbins, 'Mwave': 1600.}]
kw_hst = {'cam': ('wfc', 'wfc3'), 'filters': filt_hst,
'dlam':20., 'rest_wave': None, 'return_binned': True,
'Mbins': Mbins, 'Mwave': 1600.}
blob_f.extend(['Beta'])
blob_n.extend(['beta_hst'])
blob_k.extend([kw_hst])
# Save also the geometric mean of photometry as a function
# of a magnitude at fixed rest wavelength.
#kw_mag = {'cam': ('wfc', 'wfc3'), 'filters': filt_hst, 'dlam':20.}
#blob_n.append('MUV_gm')
#blob_f.append('Magnitude')
#blob_k.append(kw_mag)
blob_pars['blob_names'].append(blob_n)
blob_pars['blob_ivars'].append(blob_i)
blob_pars['blob_funcs'].append(blob_f)
blob_pars['blob_kwargs'].append(blob_k)
# Cosmic SFRD
if self.save_sfrd:
blob_n = ['sfrd']
blob_i = [('z', np.arange(3.5, 30.1, 0.1))]
blob_f = ['SFRD']
blob_pars['blob_names'].append(blob_n)
blob_pars['blob_ivars'].append(blob_i)
blob_pars['blob_funcs'].append(blob_f)
blob_pars['blob_kwargs'].append(None)
# Reionization stuff
if self.fit_gs is not None:
blob_n = ['tau_e', 'z_B', 'dTb_B', 'z_C', 'dTb_C',
'z_D', 'dTb_D']
blob_pars['blob_names'].append(blob_n)
blob_pars['blob_ivars'].append(None)
blob_pars['blob_funcs'].append(None)
blob_pars['blob_kwargs'].append(None)
blob_n = ['cgm_h_2', 'igm_Tk', 'dTb']
blob_i = [('z', np.arange(5.5, 35.1, 0.1))]
blob_pars['blob_names'].append(blob_n)
blob_pars['blob_ivars'].append(blob_i)
blob_pars['blob_funcs'].append(None)
blob_pars['blob_kwargs'].append(None)
return blob_pars
@property
def use_ensemble(self):
return self.base_kwargs['pop_sfr_model'] == 'ensemble'
@property
def base_kwargs(self):
if not hasattr(self, '_base_kwargs'):
raise AttributeError("Must set `base_kwargs` by hand!")
return self._base_kwargs
@base_kwargs.setter
def base_kwargs(self, value):
self._base_kwargs = PB(**value)
def update_kwargs(self, **kwargs):
bkw = self.base_kwargs
self._base_kwargs.update(kwargs)
self.Npops = self._base_kwargs.Npops
@property
def Npops(self):
if not hasattr(self, '_Npops'):
assert isinstance(self.base_kwargs, PB)
self._Npops = max(self.base_kwargs.Npops, 1)
return self._Npops
@Npops.setter
def Npops(self, value):
if hasattr(self, '_Npops'):
if self.base_kwargs.Npops != self._Npops:
print("Updated Npops from {} to {}".format(self._Npops,
self.base_kwargs.Npops))
self._Npops = max(self.base_kwargs.Npops, 1)
else:
self._Npops = max(self.base_kwargs.Npops, 1)
def get_initial_walker_position(self):
guesses = {}
for i, par in enumerate(self.parameters):
if self.is_log[i]:
guesses[par] = 10**self.guesses[par]
else:
guesses[par] = self.guesses[par]
return guesses
def run(self, steps, burn=0, nwalkers=None, save_freq=10, prefix=None,
debug=True, restart=False, clobber=False, verbose=True,
cache_tricks=False, burn_method=0, recenter=False,
checkpoints=True):
"""
Create a fitter class and run the fit!
"""
if prefix is None:
prefix = self.prefix
# Setup LF fitter
fitter_lf = FitGalaxyPopulation()
fitter_lf.zmap = self.zmap
fitter_lf.ztol = self.ztol
fitter_lf.monotonic_beta = self.monotonic_beta
data = []
include = []
fit_galaxies = False
if self.fit_lf:
include.append('lf')
data.extend(['bouwens2015', 'oesch2018'])
fit_galaxies = True
if self.fit_smf:
include.append('smf')
data.append('song2016')
fit_galaxies = True
if self.fit_beta:
include.append('beta')
data.extend(['bouwens2014'])
fit_galaxies = True
# Must be before data is set
fitter_lf.redshifts = {'lf': self.fit_lf, 'smf': self.fit_smf,
'beta': self.fit_beta}
fitter_lf.include = include
fitter_lf.data = data
if self.fit_gs is not None:
freq, dTb, err = self.fit_gs
fitter_gs = FitGlobal21cm()
fitter_gs.frequencies = freq
fitter_gs.data = dTb
fitter_gs.error = err
##
# Stitch together parameters
##
pars = self.base_kwargs
pars.update(self.blobs)
# Master fitter
fitter = ModelFit(**pars)
if fit_galaxies:
fitter.add_fitter(fitter_lf)
if self.fit_gs is not None:
fitter.add_fitter(fitter_gs)
if self.fit_gs is not None:
fitter.simulator = Global21cm
elif self.use_ensemble:
fitter.simulator = GalaxyEnsemble
else:
fitter.simulator = GalaxyCohort
fitter.parameters = self.parameters
fitter.is_log = self.is_log
fitter.debug = debug
fitter.verbose = verbose
fitter.checkpoint_append = not checkpoints
fitter.prior_set = self.priors
if nwalkers is None:
nw = 2 * len(self.parameters)
if rank == 0:
print("# Running with {} walkers.".format(nw))
else:
nw = nwalkers
fitter.nwalkers = nw
# Set initial positions of walkers
# Important the jitter comes first!
fitter.jitter = self.jitter
if (not restart):
fitter.guesses = self.guesses
if cache_tricks:
fitter.save_hmf = True
fitter.save_hist = 'pop_histories' in self.base_kwargs
fitter.save_src = True # Ugh can't be pickled...send tables? yes.
else:
fitter.save_hmf = False
fitter.save_hist = False
fitter.save_src = False
self.fitter = fitter
# RUN
fitter.run(prefix=prefix, burn=burn, steps=steps, save_freq=save_freq,
clobber=clobber, restart=restart, burn_method=burn_method,
recenter=recenter)
```
#### File: ares/physics/ExcursionSet.py
```python
import numpy as np
from .Constants import rho_cgs
from .Cosmology import Cosmology
from ..util.Math import central_difference
from ..util.ParameterFile import ParameterFile
from scipy.integrate import simps, quad
from scipy.interpolate import interp1d
from scipy.misc import derivative
two_pi = 2. * np.pi
four_pi = 4. * np.pi
two_pi_sq = 2. * np.pi**2
class ExcursionSet(object):
def __init__(self, cosm=None, **kwargs):
self.pf = ParameterFile(**kwargs)
if cosm is not None:
self._cosm = cosm
@property
def cosm(self):
if not hasattr(self, '_cosm'):
self._cosm = Cosmology(pf=self.pf, **self.pf)
return self._cosm
@cosm.setter
def cosm(self, value):
self._cosm = value
@property
def tab_sigma(self):
if not hasattr(self, '_tab_sigma'):
raise AttributeError('must set by hand for now')
return self._tab_sigma
@tab_sigma.setter
def tab_sigma(self, value):
self._tab_sigma = value
@property
def tab_M(self):
if not hasattr(self, '_tab_M'):
raise AttributeError('must set by hand for now')
return self._tab_M
@tab_M.setter
def tab_M(self, value):
self._tab_M = value
@property
def tab_z(self):
if not hasattr(self, '_tab_z'):
raise AttributeError('must set by hand for now')
return self._tab_z
@tab_z.setter
def tab_z(self, value):
self._tab_z = value
@property
def tab_k(self):
if not hasattr(self, '_tab_k'):
raise AttributeError('must set by hand for now')
return self._tab_k
@tab_k.setter
def tab_k(self, value):
self._tab_k = value
@property
def tab_ps(self):
if not hasattr(self, '_tab_ps'):
raise AttributeError('must set by hand for now')
return self._tab_ps
@tab_ps.setter
def tab_ps(self, value):
self._tab_ps = value
@property
def tab_growth(self):
if not hasattr(self, '_tab_growth'):
raise AttributeError('must set by hand for now')
return self._tab_growth
@tab_growth.setter
def tab_growth(self, value):
self._tab_growth = value
def _growth_factor(self, z):
return np.interp(z, self.tab_z, self.tab_growth,
left=np.inf, right=np.inf)
def Mass(self, R):
return self.cosm.rho_m_z0 * rho_cgs * self.WindowVolume(R)
def PDF(self, delta, R):
pass
def WindowReal(self, x, R):
"""
Return real-space window function.
"""
assert type(x) == np.ndarray
if self.pf['xset_window'] == 'tophat-real':
W = np.zeros_like(x)
W[x <= R] = 3. / four_pi / R**3
elif self.pf['xset_window'] == 'tophat-fourier':
W = (np.sin(x / R) - (x / R) * np.cos(x / R)) \
/ R**3 / two_pi_sq / (x / R)**3
else:
raise NotImplemented('help')
return W
def WindowFourier(self, k, R):
if self.pf['xset_window'] == 'sharp-fourier':
W = np.zeros_like(k)
ok = 1. - k * R >= 0.
W[ok == 1] = 1.
elif self.pf['xset_window'] == 'tophat-real':
W = 3. * (np.sin(k * R) - k * R * np.cos(k * R)) / (k * R)**3
elif self.pf['xset_window'] == 'tophat-fourier':
W = np.zeros_like(k)
W[k <= 1./R] = 1.
else:
raise NotImplemented('help')
return W
def WindowVolume(self, R):
if self.pf['xset_window'] == 'sharp-fourier':
# Sleight of hand
return four_pi * R**3 / 3.
elif self.pf['xset_window'] == 'tophat-real':
return four_pi * R**3 / 3.
elif self.pf['xset_window'] == 'tophat-fourier':
return four_pi * R**3 / 3.
else:
raise NotImplemented('help')
def Variance(self, z, R):
"""
Compute the variance in the field on some scale `R`.
"""
iz = np.argmin(np.abs(z - self.tab_z))
# Window function
W = self.WindowFourier(self.tab_k, R)
# Dimensionless power spectrum
D = self.tab_k**3 * self.tab_ps[iz,:] / two_pi_sq
return np.trapz(D * np.abs(W)**2, x=np.log(self.tab_k))
def CollapsedFraction(self):
pass
def SizeDistribution(self, z, R, dcrit=1.686, dzero=0.0):
"""
Compute the size distribution of objects.
Parameters
----------
z: int, float
Redshift of interest.
Returns
-------
Tuple containing (in order) the radii, masses, and the
differential size distribution. Each is an array of length
self.tab_M, i.e., with elements corresponding to the masses
used to compute the variance of the density field.
"""
# Comoving matter density
rho0_m = self.cosm.rho_m_z0 * rho_cgs
M = self.Mass(R)
S = np.array([self.Variance(z, RR) for RR in R])
_M, _dlnSdlnM = central_difference(np.log(M[-1::-1]), np.log(S[-1::-1]))
_M = _M[-1::-1]
dlnSdlnM = _dlnSdlnM[-1::-1]
dSdM = dlnSdlnM * (S[1:-1] / M[1:-1])
dFdM = self.FCD(z, R, dcrit, dzero)[1:-1] * np.abs(dSdM)
# This is, e.g., Eq. 17 in Zentner (2006)
# or Eq. 9.38 in Loeb and Furlanetto (2013)
dndm = rho0_m * np.abs(dFdM) / M[1:-1]
return R[1:-1], M[1:-1], dndm
def FCD(self, z, R, dcrit=1.686, dzero=0.0):
"""
First-crossing distribution function.
i.e., dF/dS where S=sigma^2.
"""
S = np.array([self.Variance(z, RR) for RR in R])
norm = (dcrit - dzero) / np.sqrt(two_pi) / S**1.5
p = norm * np.exp(-(dcrit - dzero)**2 / 2. / S)
return p
```
#### File: ares/populations/Parameterized.py
```python
import numpy as np
from types import FunctionType
from .Population import Population
from ..phenom.ParameterizedQuantity import ParameterizedQuantity
from ..util.ParameterFile import ParameterFile, par_info, get_pq_pars
try:
# this runs with no issues in python 2 but raises error in python 3
basestring
except:
# this try/except allows for python 2/3 compatible string type checking
basestring = str
parametric_options = ['pop_Ja', 'pop_ion_rate_cgm', 'pop_ion_rate_igm',
'pop_heat_rate']
class ParametricPopulation(Population):
def __getattr__(self, name):
if (name[0] == '_'):
raise AttributeError('This will get caught. Don\'t worry!')
# This is the name of the thing as it appears in the parameter file.
full_name = 'pop_' + name
# Now, possibly make an attribute
if not hasattr(self, name):
try:
is_pq = self.pf[full_name][0:2] == 'pq'
except (IndexError, TypeError):
is_pq = False
if type(self.pf[full_name]) in [float, np.float64]:
result = lambda z: self.pf[full_name]
elif type(self.pf[full_name]) is FunctionType:
result = self.pf[full_name]
elif is_pq:
pars = get_pq_pars(self.pf[full_name], self.pf)
result = ParameterizedQuantity(**pars)
elif isinstance(self.pf[full_name], basestring):
x, y = np.loadtxt(self.pf[full_name], unpack=True)
result = interp1d(x, y, kind=self.pf['interp_hist'])
else:
raise NotImplementedError('Problem with: {!s}'.format(name))
self.__setattr__(name, result)
return getattr(self, name)
def LymanAlphaFlux(self, z):
return self.Ja(z=z)
def IonizationRateCGM(self, z):
return self.ion_rate_cgm(z=z)
def IonizationRateIGM(self, z):
return self.ion_rate_igm(z=z)
def HeatingRate(self, z):
return self.heat_rate(z=z)
```
#### File: ares/simulations/MultiPhaseMedium.py
```python
import numpy as np
from ..static import Grid
from types import FunctionType
from .GasParcel import GasParcel
from ..physics.Cosmology import Cosmology
from ..util.ReadData import _sort_history
from ..util.ParameterFile import get_pq_pars
from ..util import ParameterFile, ProgressBar
from .MetaGalacticBackground import MetaGalacticBackground
from ..phenom.ParameterizedQuantity import ParameterizedQuantity
from ..util.SetDefaultParameterValues import MultiPhaseParameters
_mpm_defs = MultiPhaseParameters()
class MultiPhaseMedium(object):
def __init__(self, pf=None, cosm=None, **kwargs):
"""
Initialize a MultiPhaseMedium object.
By default, this is a two-zone model, consisting of a "bulk IGM"
grid patch and an "HII regions" grid patch, dubbed "igm" and "cgm",
respectively. To perform a single-zone calculation, simply set
``include_cgm=False`` or ``include_igm=False``.
"""
if pf is not None:
self.pf = pf
self._cosm_ = cosm
self.kwargs = kwargs
@property
def pf(self):
if not hasattr(self, '_pf'):
self._pf = ParameterFile(**self.kwargs)
# Make sure PF gets modified by initial conditions choices
# and ensure that these changes get passed to everything else
# subsequently.
inits = self.inits
return self._pf
@pf.setter
def pf(self, val):
self._pf = val
inits = self.inits
@property
def inits(self):
if not hasattr(self, '_inits'):
if self.pf['load_ics']:
if self.pf['approx_thermal_history']:
self._inits = inits = self.grid.cosm.thermal_history
else:
self._inits = inits = self.grid.cosm.inits
zi = self.pf['initial_redshift']
if not np.all(np.diff(inits['z']) > 0):
raise ValueError('Redshifts in ICs must be in ascending order!')
Ti = np.interp(zi, inits['z'], inits['Tk'])
xe = np.interp(zi, inits['z'], inits['xe'])
#if self.pf['include_He']:
new = {'igm_initial_temperature': Ti,
'igm_initial_ionization': [1.-xe, xe, 1.-xe-1e-10, xe, 1e-10]}
# Only time we ever do this?
self.pf.update(new)
self.kwargs.update(new)
else:
self._inits = None
return self._inits
@property
def field(self):
if not hasattr(self, '_field'):
if self.pf['include_igm']:
self._field = MetaGalacticBackground(pf=self.pf,
grid=self.parcel_igm.grid, **self.kwargs)
else:
self._field = MetaGalacticBackground(pf=self.pf,
grid=self.parcel_cgm.grid, **self.kwargs)
return self._field
@property
def pops(self):
return self.field.pops
#@property
#def grid(self):
# return self.field.grid
@property
def cosm(self):
if not hasattr(self, '_cosm'):
if self._cosm_ is None:
self._cosm = Cosmology(pf=self.pf, **self.pf)
else:
self._cosm = self._cosm_
return self._cosm
@property
def grid(self):
if not hasattr(self, '_grid'):
self._grid = Grid(cosm=self.cosm, **self.pf)
self._grid.set_properties(**self.pf)
return self._grid
@property
def parcels(self):
if not hasattr(self, '_parcels'):
self._initialize_zones()
return self._parcels
@property
def parcel_igm(self):
if not hasattr(self, '_parcel_igm'):
self._parcel_igm = self.parcels[0]
return self._parcel_igm
@property
def parcel_cgm(self):
if not hasattr(self, '_parcel_cgm'):
if self.pf['include_igm']:
self._parcel_cgm = self.parcels[1]
else:
self._parcel_cgm = self.parcels[0]
return self._parcel_cgm
def rates_no_RT(self, grid):
_rates_no_RT = \
{'k_ion': np.zeros((grid.dims, grid.N_absorbers)),
'k_heat': np.zeros((grid.dims, grid.N_absorbers)),
'k_ion2': np.zeros((grid.dims, grid.N_absorbers, grid.N_absorbers)),
}
return _rates_no_RT
@property
def tf(self):
if not hasattr(self, '_tf'):
z = self.pf['initial_redshift']
zf = self.pf['final_redshift']
self._tf = self.default_parcel.grid.cosm.LookbackTime(zf, z)
self.pf['stop_time'] = self._tf / self.pf['time_units']
return self._tf
def _initialize_zones(self):
"""
Initialize (up to two) GasParcels.
"""
inits = self.inits
# Reset stop time based on final redshift.
z = self.pf['initial_redshift']
zf = self.pf['final_redshift']
self._parcels = []
for zone in ['igm', 'cgm']:
if not self.pf['include_{!s}'.format(zone)]:
continue
kw = self.pf.copy()
# Loop over defaults, pull out the ones for this zone
for key in _mpm_defs:
if key[0:4] != '{!s}_'.format(zone):
continue
# Have to rename variables so Grid class will know them
grid_key = key.replace('{!s}_'.format(zone), '')
if key in self.kwargs:
kw[grid_key] = self.kwargs[key]
else:
kw[grid_key] = _mpm_defs[key]
if zone == 'igm':
self.kw_igm = kw.copy()
parcel_igm = GasParcel(cosm=self.cosm, **self.kw_igm)
self.gen_igm = parcel_igm.step()
# Set initial values for rate coefficients
parcel_igm.update_rate_coefficients(parcel_igm.grid.data,
**self.rates_no_RT(parcel_igm.grid))
self._parcels.append(parcel_igm)
else:
self.kw_cgm = kw.copy()
parcel_cgm = GasParcel(cosm=self.cosm, **self.kw_cgm)
parcel_cgm.grid.set_recombination_rate(True)
parcel_cgm._set_chemistry()
self.gen_cgm = parcel_cgm.step()
parcel_cgm.chem.chemnet.monotonic_EoR = \
self.pf['monotonic_EoR']
parcel_cgm.update_rate_coefficients(parcel_cgm.grid.data,
**self.rates_no_RT(parcel_cgm.grid))
self._parcels.append(parcel_cgm)
self._parcels[-1].pf['stop_time'] = self.tf / self.pf['time_units']
@property
def zones(self):
if not hasattr(self, '_zones'):
self._zones = int(self.pf['include_igm']) \
+ int(self.pf['include_cgm'])
return self._zones
@property
def default_parcel(self):
if not hasattr(self, '_default_parcel'):
self._default_parcel = self.parcel_igm if self.pf['include_igm'] \
else self.parcel_cgm
return self._default_parcel
def run(self):
"""
Run simulation from start to finish.
Returns
-------
Nothing: sets `history` attribute.
"""
self._insert_inits()
pb = ProgressBar(self.tf, use=self.pf['progress_bar'])
pb.start()
# Evolve in time
for t, z, data_igm, data_cgm, RC_igm, RC_cgm in self.step():
pb.update(t)
# Save data
self.all_z.append(z)
self.all_t.append(t)
if self.pf['include_cgm']:
self.all_data_cgm.append(data_cgm.copy())
#else:
# self.all_data_cgm = []
if self.pf['include_igm']:
self.all_data_igm.append(data_igm.copy())
#else:
# self.all_data_igm = []
if self.pf['save_rate_coefficients']:
if self.pf['include_cgm']:
self.all_RCs_cgm.append(RC_cgm.copy())
if self.pf['include_igm']:
self.all_RCs_igm.append(RC_igm.copy())
pb.finish()
# Sort everything by time
if self.pf['include_igm']:
self.history_igm = \
_sort_history(self.all_data_igm, prefix='igm_', squeeze=True)
self.history = self.history_igm.copy()
else:
self.history = {}
if self.pf['include_cgm']:
self.history_cgm = \
_sort_history(self.all_data_cgm, prefix='cgm_', squeeze=True)
self.history.update(self.history_cgm)
else:
self.history_cgm = {}
# Save rate coefficients [optional]
if self.pf['save_rate_coefficients']:
if self.pf['include_igm']:
self.rates_igm = \
_sort_history(self.all_RCs_igm, prefix='igm_', squeeze=True)
self.history.update(self.rates_igm)
if self.pf['include_cgm']:
self.rates_cgm = \
_sort_history(self.all_RCs_cgm, prefix='cgm_', squeeze=True)
self.history.update(self.rates_cgm)
else:
self.rates_cgm = {}
self.history['t'] = np.array(self.all_t)
self.history['z'] = np.array(self.all_z)
def step(self):
"""
Generator for a two-phase intergalactic medium.
Returns
-------
Tuple containing the current time, redshift, and dictionaries for the
IGM and CGM data at a single snapshot.
"""
t = 0.0
z = self.pf['initial_redshift']
dt = self.pf['time_units'] * self.pf['initial_timestep']
zf = self.pf['final_redshift']
# Read initial conditions
if self.pf['include_igm']:
data_igm = self.parcel_igm.grid.data.copy()
if self.pf['include_cgm']:
data_cgm = self.parcel_cgm.grid.data.copy()
# Evolve in time!
while z > zf:
if z < self.pf['kill_redshift']:
break
# Increment time / redshift
dtdz = self.default_parcel.grid.cosm.dtdz(z)
t += dt
z -= dt / dtdz
# IGM rate coefficients
if self.pf['include_igm']:
done = False
if self.pf['stop_igm_h_2'] is not None:
if data_igm['h_2'] > self.pf['stop_igm_h_2']:
data_igm = data_igm_pre.copy()
dt1 = 1e50
done = True
if not done:
also = {}
for sp in self.field.grid.absorbers:
also['igm_{!s}'.format(sp)] = data_igm[sp]
RC_igm = self.field.update_rate_coefficients(z,
zone='igm', return_rc=True, **also)
# Now, update IGM parcel
t1, dt1, data_igm = next(self.gen_igm)
# Pass rate coefficients off to the IGM parcel
self.parcel_igm.update_rate_coefficients(data_igm, **RC_igm)
else:
dt1 = 1e50
RC_igm = data_igm = None
data_igm = {'h_1': 1.0}
if self.pf['include_cgm']:
done = False
if self.pf['stop_cgm_h_2'] is not None:
if data_cgm['h_2'] > self.pf['stop_cgm_h_2']:
data_cgm = data_cgm_pre.copy()
dt2 = 1e50
done = True
if not done:
# CGM rate coefficients
RC_cgm = self.field.update_rate_coefficients(z,
zone='cgm', return_rc=True, cgm_h_1=data_cgm['h_1'])
# Pass rate coefficients off to the CGM parcel
self.parcel_cgm.update_rate_coefficients(data_cgm, **RC_cgm)
# Now, update CGM parcel
t2, dt2, data_cgm = next(self.gen_cgm)
else:
dt2 = 1e50
RC_cgm = data_cgm = None
# Must update timesteps in unison
dt_pre = dt * 1.
dt = min(dt1, dt2)
dt = min(dt, self.pf['max_timestep'] * self.pf['time_units'])
# Might need these...
if self.pf['include_igm']:
data_igm_pre = data_igm.copy()
if self.pf['include_cgm']:
data_cgm_pre = data_cgm.copy()
# Changing attribute! A little scary, but we must make sure
# these parcels are evolved in unison
if self.pf['include_igm']:
self.parcel_igm.dt = dt
if self.pf['include_cgm']:
self.parcel_cgm.dt = dt
yield t, z, data_igm, data_cgm, RC_igm, RC_cgm
def _insert_inits(self):
"""
Prepend provided initial conditions to the data storage lists.
"""
if not self.pf['load_ics']:
self.all_t, self.all_z, self.all_data_igm, self.all_data_cgm = \
[], [], [], []
if self.pf['save_rate_coefficients']:
self.all_RCs_igm, self.all_RCs_cgm = [], []
if not self.pf['include_cgm']:
del self.all_RCs_cgm, self.all_data_cgm
#self.all_z.append(self.pf['initial_redshift'])
#self.all_t.append(0.0)
#
#igm_inits = {'Tk': self.pf['igm_initial_temperature'],
# 'xe': self.pf['igm_initial_ionization']}
#cgm_inits = {'Tk': self.pf['cgm_initial_temperature'],
# 'xe': self.pf['cgm_initial_ionization']}
#self.all_data_igm.append(igm_inits)
#self.all_data_cgm.append(cgm_inits)
return
# Flip to descending order (in redshift)
if self.pf['load_ics'] == 'cosmorec':
z_inits = self.inits['z'][-1::-1]
Tk_inits = self.inits['Tk'][-1::-1]
xe_inits = self.inits['xe'][-1::-1]
else:
z_inits = self.grid.cosm.thermal_history['z'][-1::-1]
Tk_inits = self.grid.cosm.thermal_history['Tk'][-1::-1]
xe_inits = self.grid.cosm.thermal_history['xe'][-1::-1]
inits_all = {'z': z_inits, 'Tk': Tk_inits, 'xe': xe_inits}
# Stop pre-pending once we hit the first light redshift
zi = self.pf['initial_redshift']
i_trunc = np.argmin(np.abs(z_inits - zi))
if z_inits[i_trunc] <= zi:
i_trunc += 1
self.all_t = []
self.all_data_igm = []
self.all_data_cgm = []
self.all_z = list(z_inits[0:i_trunc])
self.all_RCs_igm = [self.rates_no_RT(self.parcel_igm.grid)] * len(self.all_z)
self.all_RCs_cgm = [self.rates_no_RT(self.parcel_igm.grid)] * len(self.all_z)
# Don't mess with the CGM (much)
if self.pf['include_cgm']:
tmp = self.parcel_cgm.grid.data
self.all_data_cgm = [tmp.copy() for i in range(len(self.all_z))]
for i, cgm_data in enumerate(self.all_data_cgm):
self.all_data_cgm[i]['rho'] = \
self.parcel_cgm.grid.cosm.MeanBaryonDensity(self.all_z[i])
self.all_data_cgm[i]['n'] = \
self.parcel_cgm.grid.particle_density(cgm_data, self.all_z[i])
#else:
# self.all_data_cgm = []
if not self.pf['include_igm']:
return
# Loop over redshift and derive things for the IGM
for i, red in enumerate(self.all_z):
snapshot = {}
for key in self.parcel_igm.grid.data.keys():
if key in inits_all.keys():
snapshot[key] = inits_all[key][i]
continue
# Electron fraction
snapshot['e'] = inits_all['xe'][i]
# Hydrogen neutral fraction
xe = inits_all['xe'][i]
if 2 not in self.parcel_igm.grid.Z:
xe = min(xe, 1.0)
# Need to be careful this isn't > 1, since we're breaking
# electron fraction into H and He fractions separately.
xi = np.minimum(xe / (1. + self.parcel_igm.grid.cosm.y), 1.)
snapshot['h_1'] = 1. - xi
snapshot['h_2'] = xi
# Add helium, assuming xHeII = xHII, and xHeIII << 1
if self.parcel_igm.pf['include_He']:
snapshot['he_1'] = 1. - xi
snapshot['he_2'] = xi
snapshot['he_3'] = 1e-10
snapshot['rho'] = self.parcel_igm.grid.cosm.MeanBaryonDensity(red)
snapshot['n'] = \
self.parcel_igm.grid.particle_density(snapshot.copy(), red)
# Need to keep the cell number dimension for consistency
for element in snapshot:
snapshot[element] = np.array([snapshot[element]], dtype=float)
self.all_t.append(0.0)
self.all_data_igm.append(snapshot.copy())
```
#### File: ares/static/Fluctuations.py
```python
import numpy as np
from math import factorial
from ..physics import Cosmology
from ..util import ParameterFile
from ..util.Stats import bin_c2e
from scipy.special import erfinv
from scipy.optimize import fsolve
from scipy.interpolate import interp1d
from scipy.integrate import quad, simps
from ..physics.Hydrogen import Hydrogen
from ..physics.HaloModel import HaloModel
from ..util.Math import LinearNDInterpolator
from ..populations.Composite import CompositePopulation
from ..physics.CrossSections import PhotoIonizationCrossSection
from ..physics.Constants import g_per_msun, cm_per_mpc, dnu, s_per_yr, c, \
s_per_myr, erg_per_ev, k_B, m_p, dnu, g_per_msun
root2 = np.sqrt(2.)
four_pi = 4. * np.pi
class Fluctuations(object): # pragma: no cover
def __init__(self, grid=None, **kwargs):
"""
Initialize a FluctuatingBackground object.
Creates an object capable of modeling fields that fluctuate spatially.
"""
self._kwargs = kwargs.copy()
self.pf = ParameterFile(**kwargs)
# Some useful physics modules
if grid is not None:
self.grid = grid
self.cosm = grid.cosm
else:
self.grid = None
self.cosm = Cosmology()
self._done = {}
@property
def zeta(self):
if not hasattr(self, '_zeta'):
raise AttributeError('Must set zeta by hand!')
return self._zeta
@zeta.setter
def zeta(self, value):
self._zeta = value
@property
def zeta_X(self):
if not hasattr(self, '_zeta_X'):
raise AttributeError('Must set zeta_X by hand!')
return self._zeta_X
@zeta_X.setter
def zeta_X(self, value):
self._zeta_X = value
@property
def hydr(self):
if not hasattr(self, '_hydr'):
if self.grid is None:
self._hydr = Hydrogen(**self.pf)
else:
self._hydr = self.grid.hydr
return self._hydr
@property
def xset(self):
if not hasattr(self, '_xset'):
xset_pars = \
{
'xset_window': 'tophat-real',
'xset_barrier': 'constant',
'xset_pdf': 'gaussian',
}
xset = ares.physics.ExcursionSet(**xset_pars)
xset.tab_M = pop.halos.tab_M
xset.tab_sigma = pop.halos.tab_sigma
xset.tab_ps = pop.halos.tab_ps_lin
xset.tab_z = pop.halos.tab_z
xset.tab_k = pop.halos.tab_k_lin
xset.tab_growth = pop.halos.tab_growth
self._xset = xset
return self._xset
def _overlap_region(self, dr, R1, R2):
"""
Volume of intersection between two spheres of radii R1 < R2.
"""
Vo = np.pi * (R2 + R1 - dr)**2 \
* (dr**2 + 2. * dr * R1 - 3. * R1**2 \
+ 2. * dr * R2 + 6. * R1 * R2 - 3. * R2**2) / 12. / dr
if type(Vo) == np.ndarray:
# Small-scale vs. large Scale
SS = dr <= R2 - R1
LS = dr >= R1 + R2
Vo[LS == 1] = 0.0
if type(R1) == np.ndarray:
Vo[SS == 1] = 4. * np.pi * R1[SS == 1]**3 / 3.
else:
Vo[SS == 1] = 4. * np.pi * R1**3 / 3.
return Vo
def IV(self, dr, R1, R2):
"""
Just a vectorized version of the overlap calculation.
"""
return self._overlap_region(dr, R1, R2)
def intersectional_volumes(self, dr, R1, R2, R3):
IV = self.IV
V11 = IV(dr, R1, R1)
zeros = np.zeros_like(V11)
if np.all(R2 == 0):
return V11, zeros, zeros, zeros, zeros, zeros
V12 = IV(dr, R1, R2)
V22 = IV(dr, R2, R2)
if np.all(R3 == 0):
return V11, V12, V22, zeros, zeros, zeros
V13 = IV(dr, R1, R3)
V23 = IV(dr, R2, R3)
V33 = IV(dr, R3, R3)
return V11, V12, V22, V13, V23, V33
def overlap_volumes(self, dr, R1, R2):
"""
Overlap volumes, i.e., volumes in which a source affects two points
in different ways. For example, V11 is the volume in which a source
ionizes both points (at separation `dr`), V12 is the volume in which
a source ionizes one point and heats the other, and so on.
In this order: V11, V12, V13, V22, V23, V33
"""
IV = self.IV
V1 = 4. * np.pi * R1**3 / 3.
if self.pf['ps_temp_model'] == 1:
V2 = 4. * np.pi * (R2**3 - R1**3) / 3.
else:
V2 = 4. * np.pi * R2**3 / 3.
Vt = 4. * np.pi * R2**3 / 3.
V11 = IV(dr, R1, R1)
if self.pf['ps_include_temp'] and self.pf['ps_temp_model'] == 2:
V12 = V1
else:
V12 = 2 * IV(dr, R1, R2) - IV(dr, R1, R1)
V22 = IV(dr, R2, R2)
if self.pf['ps_temp_model'] == 1:
V22 += -2. * IV(dr, R1, R2) + IV(dr, R1, R1)
if self.pf['ps_include_temp'] and self.pf['ps_temp_model'] == 1:
V1n = V1 - IV(dr, R1, R2)
elif self.pf['ps_include_temp'] and self.pf['ps_temp_model'] == 2:
V1n = V1
else:
V1n = V1 - V11
V2n = V2 - IV(dr, R2, R2)
if self.pf['ps_temp_model'] == 1:
V2n += IV(dr, R1, R2)
# 'anything' to one point, 'nothing' to other.
# Without temperature fluctuations, same as V1n
if self.pf['ps_include_temp']:
Van = Vt - IV(dr, R2, R2)
else:
Van = V1n
return V11, V12, V22, V1n, V2n, Van
def exclusion_volumes(self, dr, R1, R2, R3):
"""
Volume in which a single source only affects one
"""
pass
@property
def heating_ongoing(self):
if not hasattr(self, '_heating_ongoing'):
self._heating_ongoing = True
return self._heating_ongoing
@heating_ongoing.setter
def heating_ongoing(self, value):
self._heating_ongoing = value
def BubbleShellFillingFactor(self, z, R_s=None):
"""
"""
# Hard exit.
if not self.pf['ps_include_temp']:
return 0.0
Qi = self.MeanIonizedFraction(z)
if self.pf['ps_temp_model'] == 1:
R_i, M_b, dndm_b = self.BubbleSizeDistribution(z)
if Qi == 1:
return 0.0
if type(R_s) is np.ndarray:
nz = R_i > 0
const_rsize = np.allclose(np.diff(R_s[nz==1] / R_i[nz==1]), 0.0)
if const_rsize:
fvol = (R_s[0] / R_i[0])**3 - 1.
Qh = Qi * fvol
else:
V = 4. * np.pi * (R_s**3 - R_i**3) / 3.
Mmin = self.Mmin(z) * self.zeta
Qh = self.get_prob(z, M_b, dndm_b, Mmin, V,
exp=False, ep=0.0, Mmax=None)
#raise NotImplemented("No support for absolute scaling of hot bubbles yet.")
if (Qh > (1. - Qi) * 1.): #or Qh > 0.5: #or Qi > 0.5:
self.heating_ongoing = 0
Qh = np.minimum(Qh, 1. - Qi)
return Qh
else:
# This will get called if temperature fluctuations are off
return 0.0
elif self.pf['ps_temp_model'] == 2:
Mmin = self.Mmin(z) * self.zeta_X
R_i, M_b, dndm_b = self.BubbleSizeDistribution(z, ion=False)
V = 4. * np.pi * R_i**3 / 3.
Qh = self.get_prob(z, M_b, dndm_b, Mmin, V,
exp=False, ep=0.0, Mmax=None)
#Qh = self.BubbleFillingFactor(z, ion=False)
#print('Qh', Qh)
return np.minimum(Qh, 1. - Qi)
else:
raise NotImplemented('Uncrecognized option for BSD.')
#return min(Qh, 1.), min(Qc, 1.)
@property
def bsd_model(self):
return self.pf['bubble_size_dist'].lower()
def MeanIonizedFraction(self, z, ion=True):
Mmin = self.Mmin(z)
logM = np.log10(Mmin)
if ion:
if not self.pf['ps_include_ion']:
return 0.0
zeta = self.zeta
return np.minimum(1.0, zeta * self.halos.fcoll_2d(z, logM))
else:
if not self.pf['ps_include_temp']:
return 0.0
zeta = self.zeta_X
# Assume that each heated region contains the same volume
# of fully-ionized material.
Qi = self.MeanIonizedFraction(z, ion=True)
Qh = zeta * self.halos.fcoll_2d(z, logM) - Qi
return np.minimum(1.0 - Qi, Qh)
def delta_shell(self, z):
"""
Relative density != relative over-density.
"""
if not self.pf['ps_include_temp']:
return 0.0
if self.pf['ps_temp_model'] == 2:
return self.delta_bubble_vol_weighted(z, ion=False)
delta_i_bar = self.delta_bubble_vol_weighted(z)
rdens = self.pf["bubble_shell_rdens_zone_0"]
return rdens * (1. + delta_i_bar) - 1.
def BulkDensity(self, z, R_s):
Qi = self.MeanIonizedFraction(z)
#Qh = self.BubbleShellFillingFactor(z, R_s)
Qh = self.MeanIonizedFraction(z, ion=False)
delta_i_bar = self.delta_bubble_vol_weighted(z)
delta_h_bar = self.delta_shell(z)
if self.pf['ps_igm_model'] == 2:
delta_hal_bar = self.mean_halo_overdensity(z)
Qhal = self.Qhal(z, Mmax=self.Mmin(z))
else:
Qhal = 0.0
delta_hal_bar = 0.0
return -(delta_i_bar * Qi + delta_h_bar * Qh + delta_hal_bar * Qhal) \
/ (1. - Qi - Qh - Qhal)
def BubbleFillingFactor(self, z, ion=True, rescale=True):
"""
Fraction of volume filled by bubbles.
This is never actually used, but for reference, the mean ionized
fraction would be 1 - exp(-this). What we actually do is re-normalize
the bubble size distribution to guarantee Q = zeta * fcoll. See
MeanIonizedFraction and BubbleSizeDistribution for more details.
"""
if ion:
zeta = self.zeta
else:
zeta = self.zeta_X
if self.bsd_model is None:
R_i = self.pf['bubble_size']
V_i = 4. * np.pi * R_i**3 / 3.
ni = self.BubbleDensity(z)
Qi = 1. - np.exp(-ni * V_i)
elif self.bsd_model in ['fzh04', 'hmf']:
# Smallest bubble is one around smallest halo.
# Don't actually need its mass, just need index to correctly
# truncate integral.
Mmin = self.Mmin(z) * zeta
# M_b should just be self.m? No.
R_i, M_b, dndm_b = self.BubbleSizeDistribution(z, ion=ion,
rescale=rescale)
V_i = 4. * np.pi * R_i**3 / 3.
iM = np.argmin(np.abs(Mmin - M_b))
Qi = np.trapz(dndm_b[iM:] * M_b[iM:] * V_i[iM:], x=np.log(M_b[iM:]))
# This means reionization is over.
if self.bsd_model == 'fzh04':
if self._B0(z, zeta) <= 0:
return 1.
else:
raise NotImplemented('Uncrecognized option for BSD.')
return min(Qi, 1.)
# Grab heated phase to enforce BC
#Rs = self.BubbleShellRadius(z, R_i)
#Vsh = 4. * np.pi * (Rs - R_i)**3 / 3.
#Qh = np.trapz(dndm * Vsh * M_b, x=np.log(M_b))
#if lya and self.pf['bubble_pod_size_func'] in [None, 'const', 'linear']:
# Rc = self.BubblePodRadius(z, R_i, zeta, zeta_lya)
# Vc = 4. * np.pi * (Rc - R_i)**3 / 3.
#
# if self.pf['powspec_rescale_Qlya']:
# # This isn't actually correct since we care about fluxes
# # not number of photons, but fine for now.
# Qc = min(zeta_lya * self.halos.fcoll_2d(z, np.log10(self.Mmin(z))), 1)
# else:
# Qc = np.trapz(dndlnm[iM:] * Vc[iM:], x=np.log(M_b[iM:]))
#
# return min(Qc, 1.)
#
#elif lya and self.pf['bubble_pod_size_func'] == 'fzh04':
# return self.BubbleFillingFactor(z, zeta_lya, None, lya=False)
#else:
@property
def tab_Mmin(self):
if not hasattr(self, '_tab_Mmin'):
raise AttributeError('Must set Mmin by hand (right now)')
return self._tab_Mmin
@tab_Mmin.setter
def tab_Mmin(self, value):
if type(value) is not np.ndarray:
value = np.ones_like(self.halos.tab_z) * value
else:
assert value.size == self.halos.tab_z.size
self._tab_Mmin = value
def Mmin(self, z):
return np.interp(z, self.halos.tab_z, self.tab_Mmin)
def mean_halo_bias(self, z):
bias = self.halos.Bias(z)
M_h = self.halos.tab_M
iz_h = np.argmin(np.abs(z - self.halos.tab_z))
iM_h = np.argmin(np.abs(self.Mmin(z) - M_h))
dndm_h = self.halos.tab_dndm[iz_h]
return 1.0
#return simps(M_h * dndm_h * bias, x=np.log(M_h)) \
# / simps(M_h * dndm_h, x=np.log(M_h))
def tab_bubble_bias(self, zeta):
if not hasattr(self, '_tab_bubble_bias'):
func = lambda z: self._fzh04_eq22(z, zeta)
self._tab_bubble_bias = np.array(map(func, self.halos.tab_z_ps))
return self._tab_bubble_bias
def _fzh04_eq22(self, z, ion=True):
if ion:
zeta = self.zeta
else:
zeta = self.zeta_X
iz = np.argmin(np.abs(z - self.halos.tab_z))
s = self.sigma
S = s**2
#return 1. + ((self.LinearBarrier(z, zeta, zeta) / S - (1. / self._B0(z, zeta))) \
# / self._growth_factor(z))
return 1. + (self._B0(z, zeta)**2 / S / self._B(z, zeta, zeta))
def bubble_bias(self, z, ion=True):
"""
Eq. 9.24 in Loeb & Furlanetto (2013) or Eq. 22 in FZH04.
"""
return self._fzh04_eq22(z, ion)
#iz = np.argmin(np.abs(z - self.halos.tab_z_ps))
#
#x, y = self.halos.tab_z_ps, self.tab_bubble_bias(zeta)[iz]
#
#
#
#m = (y[-1] - y[-2]) / (x[-1] - x[-2])
#
#return m * z + y[-1]
#iz = np.argmin(np.abs(z - self.halos.tab_z))
#s = self.sigma
#S = s**2
#
##return 1. + ((self.LinearBarrier(z, zeta, zeta) / S - (1. / self._B0(z, zeta))) \
## / self._growth_factor(z))
#
#fzh04 = 1. + (self._B0(z, zeta)**2 / S / self._B(z, zeta, zeta))
#
#return fzh04
def mean_bubble_bias(self, z, ion=True):
"""
"""
R, M_b, dndm_b = self.BubbleSizeDistribution(z, ion=ion)
#if ('h' in term) or ('c' in term) and self.pf['powspec_temp_method'] == 'shell':
# R_s, Rc = self.BubbleShellRadius(z, R_i)
# R = R_s
#else:
if ion:
zeta = self.zeta
else:
zeta = self.zeta_X
V = 4. * np.pi * R**3 / 3.
Mmin = self.Mmin(z) * zeta
iM = np.argmin(np.abs(Mmin - self.m))
bHII = self.bubble_bias(z, ion)
#tmp = dndm[iM:]
#print(z, len(tmp[np.isnan(tmp)]), len(bHII[np.isnan(bHII)]))
#imax = int(min(np.argwhere(np.isnan(R_i))))
if ion and self.pf['ps_include_ion']:
Qi = self.MeanIonizedFraction(z)
elif ion and not self.pf['ps_include_ion']:
raise NotImplemented('help')
elif (not ion) and self.pf['ps_include_temp']:
Qi = self.MeanIonizedFraction(z, ion=False)
elif ion and self.pf['ps_include_temp']:
Qi = self.MeanIonizedFraction(z, ion=False)
else:
raise NotImplemented('help')
return np.trapz(dndm_b[iM:] * V[iM:] * bHII[iM:] * M_b[iM:],
x=np.log(M_b[iM:])) / Qi
#def delta_bubble_mass_weighted(self, z, zeta):
# if self._B0(z, zeta) <= 0:
# return 0.
#
# R_i, M_b, dndm_b = self.BubbleSizeDistribution(z, zeta)
# V_i = 4. * np.pi * R_i**3 / 3.
#
# Mmin = self.Mmin(z) * zeta
# iM = np.argmin(np.abs(Mmin - self.m))
# B = self._B(z, zeta)
# rho0 = self.cosm.mean_density0
#
# dm_ddel = rho0 * V_i
#
# return simps(B[iM:] * dndm_b[iM:] * M_b[iM:], x=np.log(M_b[iM:]))
def delta_bubble_vol_weighted(self, z, ion=True):
if not self.pf['ps_include_ion']:
return 0.0
if not self.pf['ps_include_xcorr_ion_rho']:
return 0.0
if ion:
zeta = self.zeta
else:
zeta = self.zeta_X
if self._B0(z, zeta) <= 0:
return 0.
R_i, M_b, dndm_b = self.BubbleSizeDistribution(z, ion=ion)
V_i = 4. * np.pi * R_i**3 / 3.
Mmin = self.Mmin(z) * zeta
iM = np.argmin(np.abs(Mmin - self.m))
B = self._B(z, ion=ion)
return np.trapz(B[iM:] * dndm_b[iM:] * V_i[iM:] * M_b[iM:],
x=np.log(M_b[iM:]))
#def mean_bubble_overdensity(self, z, zeta):
# if self._B0(z, zeta) <= 0:
# return 0.
#
# R_i, M_b, dndm_b = self.BubbleSizeDistribution(z, zeta)
# V_i = 4. * np.pi * R_i**3 / 3.
#
# Mmin = self.Mmin(z) * zeta
# iM = np.argmin(np.abs(Mmin - self.m))
# B = self._B(z, zeta)
# rho0 = self.cosm.mean_density0
#
# dm_ddel = rho0 * V_i
#
# return simps(B[iM:] * dndm_b[iM:] * M_b[iM:], x=np.log(M_b[iM:]))
def mean_halo_abundance(self, z, Mmin=False):
M_h = self.halos.tab_M
iz_h = np.argmin(np.abs(z - self.halos.tab_z))
if Mmin:
iM_h = np.argmin(np.abs(self.Mmin(z) - M_h))
else:
iM_h = 0
dndm_h = self.halos.tab_dndm[iz_h]
return np.trapz(M_h * dndm_h, x=np.log(M_h))
def spline_cf_mm(self, z):
if not hasattr(self, '_spline_cf_mm_'):
self._spline_cf_mm_ = {}
if z not in self._spline_cf_mm_:
iz = np.argmin(np.abs(z - self.halos.tab_z_ps))
self._spline_cf_mm_[z] = interp1d(np.log(self.halos.tab_R),
self.halos.tab_cf_mm[iz], kind='cubic', bounds_error=False,
fill_value=0.0)
return self._spline_cf_mm_[z]
def excess_probability(self, z, R, ion=True):
"""
This is the excess probability that a point is ionized given that
we already know another point (at distance r) is ionized.
"""
# Function of bubble mass (bubble size)
bHII = self.bubble_bias(z, ion)
bbar = self.mean_bubble_bias(z, ion)
if R < self.halos.tab_R.min():
print("R too small")
if R > self.halos.tab_R.max():
print("R too big")
xi_dd = self.spline_cf_mm(z)(np.log(R))
#if term == 'ii':
return bHII * bbar * xi_dd
#elif term == 'id':
# return bHII * bbar * xi_dd
#else:
# raise NotImplemented('help!')
def _K(self, zeta):
return erfinv(1. - (1. / zeta))
def _growth_factor(self, z):
return np.interp(z, self.halos.tab_z, self.halos.tab_growth,
left=np.inf, right=np.inf)
def _delta_c(self, z):
return self.cosm.delta_c0 / self._growth_factor(z)
def _B0(self, z, ion=True):
if ion:
zeta = self.zeta
else:
zeta = self.zeta_X
iz = np.argmin(np.abs(z - self.halos.tab_z))
s = self.sigma
# Variance on scale of smallest collapsed object
sigma_min = self.sigma_min(z)
return self._delta_c(z) - root2 * self._K(zeta) * sigma_min
def _B1(self, z, ion=True):
if ion:
zeta = self.zeta
else:
zeta = self.zeta_X
iz = np.argmin(np.abs(z - self.halos.tab_z))
s = self.sigma #* self.halos.growth_factor[iz]
sigma_min = self.sigma_min(z)
return self._K(zeta) / np.sqrt(2. * sigma_min**2)
def _B(self, z, ion=True, zeta_min=None):
return self.LinearBarrier(z, ion, zeta_min=zeta_min)
def LinearBarrier(self, z, ion=True, zeta_min=None):
if ion:
zeta = self.zeta
else:
zeta = self.zeta_X
iz = np.argmin(np.abs(z - self.halos.tab_z))
s = self.sigma #/ self.halos.growth_factor[iz]
if zeta_min is None:
zeta_min = zeta
return self._B0(z, ion) + self._B1(z, ion) * s**2
def Barrier(self, z, ion=True, zeta_min=None):
"""
Full barrier.
"""
if ion:
zeta = self.zeta
else:
zeta = self.zeta_X
if zeta_min is None:
zeta_min = zeta
#iz = np.argmin(np.abs(z - self.halos.tab_z))
#D = self.halos.growth_factor[iz]
sigma_min = self.sigma_min(z)
#Mmin = self.Mmin(z)
#sigma_min = np.interp(Mmin, self.halos.M, self.halos.sigma_0)
delta = self._delta_c(z)
return delta - np.sqrt(2.) * self._K(zeta) \
* np.sqrt(sigma_min**2 - self.sigma**2)
#return self.cosm.delta_c0 - np.sqrt(2.) * self._K(zeta) \
# * np.sqrt(sigma_min**2 - s**2)
def sigma_min(self, z):
Mmin = self.Mmin(z)
return np.interp(Mmin, self.halos.tab_M, self.halos.tab_sigma)
#def BubblePodSizeDistribution(self, z, zeta):
# if self.pf['powspec_lya_method'] == 1:
# # Need to modify zeta and critical threshold
# Rc, Mc, dndm = self.BubbleSizeDistribution(z, zeta)
# return Rc, Mc, dndm
# else:
# raise NotImplemented('help please')
@property
def m(self):
"""
Mass array used for bubbles.
"""
if not hasattr(self, '_m'):
self._m = 10**np.arange(5, 18.1, 0.1)
return self._m
@property
def sigma(self):
if not hasattr(self, '_sigma'):
self._sigma = np.interp(self.m, self.halos.tab_M, self.halos.tab_sigma)
# Crude but chill it's temporary
bigm = self.m > self.halos.tab_M.max()
if np.any(bigm):
print("WARNING: Extrapolating sigma to higher masses.")
slope = np.diff(np.log10(self.halos.tab_sigma[-2:])) \
/ np.diff(np.log10(self.halos.tab_M[-2:]))
self._sigma[bigm == 1] = self.halos.tab_sigma[-1] \
* (self.m[bigm == 1] / self.halos.tab_M.max())**slope
return self._sigma
@property
def dlns_dlnm(self):
if not hasattr(self, '_dlns_dlnm'):
self._dlns_dlnm = np.interp(self.m, self.halos.tab_M, self.halos.tab_dlnsdlnm)
bigm = self.m > self.halos.tab_M.max()
if np.any(bigm):
print("WARNING: Extrapolating dlns_dlnm to higher masses.")
slope = np.diff(np.log10(np.abs(self.halos.tab_dlnsdlnm[-2:]))) \
/ np.diff(np.log10(self.halos.tab_M[-2:]))
self._dlns_dlnm[bigm == 1] = self.halos.tab_dlnsdlnm[-1] \
* (self.m[bigm == 1] / self.halos.tab_M.max())**slope
return self._dlns_dlnm
def BubbleSizeDistribution(self, z, ion=True, rescale=True):
"""
Compute the ionized bubble size distribution.
Parameters
----------
z: int, float
Redshift of interest.
zeta : int, float, np.ndarray
Ionizing efficiency.
Returns
-------
Tuple containing (in order) the bubble radii, masses, and the
differential bubble size distribution. Each is an array of length
self.halos.tab_M, i.e., with elements corresponding to the masses
used to compute the variance of the density field.
"""
if ion:
zeta = self.zeta
else:
zeta = self.zeta_X
if ion and not self.pf['ps_include_ion']:
R_i = M_b = dndm = np.zeros_like(self.m)
return R_i, M_b, dndm
if (not ion) and not self.pf['ps_include_temp']:
R_i = M_b = dndm = np.zeros_like(self.m)
return R_i, M_b, dndm
reionization_over = False
# Comoving matter density
rho0_m = self.cosm.mean_density0
rho0_b = rho0_m * self.cosm.fbaryon
# Mean (over-)density of bubble material
delta_B = self._B(z, ion)
if self.bsd_model is None:
if self.pf['bubble_density'] is not None:
R_i = self.pf['bubble_size']
M_b = (4. * np.pi * Rb**3 / 3.) * rho0_m
dndm = self.pf['bubble_density']
else:
raise NotImplementedError('help')
elif self.bsd_model == 'hmf':
M_b = self.halos.tab_M * zeta
# Assumes bubble material is at cosmic mean density
R_i = (3. * M_b / rho0_b / 4. / np.pi)**(1./3.)
iz = np.argmin(np.abs(z - self.halos.tab_z))
dndm = self.halos.tab_dndm[iz].copy()
elif self.bsd_model == 'fzh04':
# Just use array of halo mass as array of ionized region masses.
# Arbitrary at this point, just need an array of masses.
# Plus, this way, the sigma's from the HMF are OK.
M_b = self.m
# Radius of ionized regions as function of delta (mass)
R_i = (3. * M_b / rho0_m / (1. + delta_B) / 4. / np.pi)**(1./3.)
V_i = four_pi * R_i**3 / 3.
# This is Eq. 9.38 from Steve's book.
# The factors of 2, S, and M_b are from using dlns instead of
# dS (where S=s^2)
dndm = rho0_m * self.pcross(z, ion) * 2 * np.abs(self.dlns_dlnm) \
* self.sigma**2 / M_b**2
# Reionization is over!
# Only use barrier condition if we haven't asked to rescale
# or supplied Q ourselves.
if self._B0(z, ion) <= 0:
reionization_over = True
dndm = np.zeros_like(dndm)
#elif Q is not None:
# if Q == 1:
# reionization_over = True
# dndm = np.zeros_like(dndm)
else:
raise NotImplementedError('Unrecognized option: %s' % self.pf['bubble_size_dist'])
# This is a trick to guarantee that the integral over the bubble
# size distribution yields the mean ionized fraction.
if (not reionization_over) and rescale:
Mmin = self.Mmin(z) * zeta
iM = np.argmin(np.abs(M_b - Mmin))
Qi = np.trapz(dndm[iM:] * V_i[iM:] * M_b[iM:], x=np.log(M_b[iM:]))
xibar = self.MeanIonizedFraction(z, ion=ion)
dndm *= -np.log(1. - xibar) / Qi
return R_i, M_b, dndm
def pcross(self, z, ion=True):
"""
Up-crossing probability.
"""
if ion:
zeta = self.zeta
else:
zeta = self.zeta_X
S = self.sigma**2
Mmin = self.Mmin(z) #* zeta # doesn't matter for zeta=const
if type(zeta) == np.ndarray:
raise NotImplemented('this is wrong.')
zeta_min = np.interp(Mmin, self.m, zeta)
else:
zeta_min = zeta
zeros = np.zeros_like(self.sigma)
B0 = self._B0(z, ion)
B1 = self._B1(z, ion)
Bl = self.LinearBarrier(z, ion=ion, zeta_min=zeta_min)
p = (B0 / np.sqrt(2. * np.pi * S**3)) * np.exp(-0.5 * Bl**2 / S)
#p = (B0 / np.sqrt(2. * np.pi * S**3)) \
# * np.exp(-0.5 * B0**2 / S) * np.exp(-B0 * B1) * np.exp(-0.5 * B1**2 / S)
return p#np.maximum(p, zeros)
@property
def halos(self):
if not hasattr(self, '_halos'):
self._halos = HaloModel(**self.pf)
return self._halos
@property
def Emin_X(self):
if not hasattr(self, '_Emin_X'):
xrpop = None
for i, pop in enumerate(self.pops):
if not pop.is_src_heat_fl:
continue
if xrpop is not None:
raise AttributeError('help! can only handle 1 X-ray pop right now')
xrpop = pop
self._Emin_X = pop.src.Emin
return self._Emin_X
def get_Nion(self, z, R_i):
return 4. * np.pi * (R_i * cm_per_mpc / (1. + z))**3 \
* self.cosm.nH(z) / 3.
def _cache_jp(self, z, term):
if not hasattr(self, '_cache_jp_'):
self._cache_jp_ = {}
if z not in self._cache_jp_:
self._cache_jp_[z] = {}
if term not in self._cache_jp_[z]:
return None
else:
#print("Loaded P_{} at z={} from cache.".format(term, z))
return self._cache_jp_[z][term]
def _cache_cf(self, z, term):
if not hasattr(self, '_cache_cf_'):
self._cache_cf_ = {}
if z not in self._cache_cf_:
self._cache_cf_[z] = {}
if term not in self._cache_cf_[z]:
return None
else:
#print("Loaded cf_{} at z={} from cache.".format(term, z))
return self._cache_cf_[z][term]
def _cache_ps(self, z, term):
if not hasattr(self, '_cache_ps_'):
self._cache_ps_ = {}
if z not in self._cache_ps_:
self._cache_ps_[z] = {}
if term not in self._cache_ps_[z]:
return None
else:
return self._cache_ps_[z][term]
@property
def is_Rs_const(self):
if not hasattr(self, '_is_Rs_const'):
self._is_Rs_const = True
return self._is_Rs_const
@is_Rs_const.setter
def is_Rs_const(self, value):
self._is_Rs_const = value
def _cache_Vo(self, z):
if not hasattr(self, '_cache_Vo_'):
self._cache_Vo_ = {}
if z in self._cache_Vo_:
return self._cache_Vo_[z]
#if self.is_Rs_const and len(self._cache_Vo_.keys()) > 0:
# return self._cache_Vo_[self._cache_Vo_.keys()[0]]
return None
def _cache_IV(self, z):
if not hasattr(self, '_cache_IV_'):
self._cache_IV_ = {}
if z in self._cache_IV_:
return self._cache_IV_[z]
#if self.is_Rs_const and len(self._cache_IV_.keys()) > 0:
# return self._cache_IV_[self._cache_IV_.keys()[0]]
return None
def _cache_p(self, z, term):
if not hasattr(self, '_cache_p_'):
self._cache_p_ = {}
if z not in self._cache_p_:
self._cache_p_[z] = {}
if term not in self._cache_p_[z]:
return None
else:
return self._cache_p_[z][term]
def mean_halo_overdensity(self, z):
# Mean density of halos (mass is arbitrary)
rho_h = self.halos.MeanDensity(1e8, z) * cm_per_mpc**3 / g_per_msun
return rho_h / self.cosm.mean_density0 - 1.
def Qhal(self, z, Mmin=None, Mmax=None):
"""
This may not be quite right, since we just integrate over the mass
range we have....
"""
M_h = self.halos.tab_M
iz_h = np.argmin(np.abs(z - self.halos.tab_z))
dndm_h = self.halos.tab_dndm[iz_h]
# Volume of halos (within virial radii)
Rvir = self.halos.VirialRadius(M_h, z) / 1e3 # Convert to Mpc
Vvir = 4. * np.pi * Rvir**3 / 3.
if Mmin is not None:
imin = np.argmin(np.abs(M_h - Mmin))
else:
imin = 0
if Mmax is not None:
imax = np.argmin(np.abs(M_h - Mmax))
else:
imax = None
integ = dndm_h * Vvir * M_h
Q_hal = 1. - np.exp(-np.trapz(integ[imin:imax],
x=np.log(M_h[imin:imax])))
return Q_hal
#return self.get_prob(z, M_h, dndm_h, Mmin, Vvir, exp=False, ep=0.0,
# Mmax=Mmax)
def ExpectationValue1pt(self, z, term='i', R_s=None, R3=None,
Th=500.0, Ts=None, Tk=None, Ja=None):
"""
Compute the probability that a point is something.
These are the one point terms in brackets, e.g., <x>, <x delta>, etc.
Note that use of the asterisk is to imply that both quatities
are in the same set of brackets. Maybe a better way to handle this
notationally...
"""
##
# Check cache for match
##
cached_result = self._cache_p(z, term)
if cached_result is not None:
return cached_result
Qi = self.MeanIonizedFraction(z)
Qh = self.MeanIonizedFraction(z, ion=False)
if self.pf['ps_igm_model'] == 2:
Qhal = self.Qhal(z, Mmax=self.Mmin(z))
del_hal = self.mean_halo_overdensity(z)
else:
Qhal = 0.0
del_hal = 0.0
Qb = 1. - Qi - Qh - Qhal
Tcmb = self.cosm.TCMB(z)
del_i = self.delta_bubble_vol_weighted(z)
del_h = self.delta_shell(z)
del_b = self.BulkDensity(z, R_s)
ch = self.TempToContrast(z, Th=Th, Tk=Tk, Ts=Ts, Ja=Ja)
if Ts is not None:
cb = Tcmb / Ts
else:
cb = 0.0
##
# Otherwise, get to it.
##
if term == 'b':
val = 1. - Qi - Qh - Qhal
elif term == 'i':
val = Qi
elif term == 'n':
val = 1. - Qi
elif term == 'h':
assert R_s is not None
val = Qh
elif term in ['m', 'd']:
val = 0.0
elif term in ['n*d', 'i*d']:
# <xd> = <(1-x_i)d> = <d> - <x_i d> = - <x_i d>
if self.pf['ps_include_xcorr_ion_rho']:
if term == 'i*d':
val = Qi * del_i
else:
val = -Qi * del_i
else:
val = 0.0
elif term == 'pc':
# <psi * c> = <x (1 + d) c>
# = <(1 - i) (1 + d) c> = <(1 + d) c> - <i (1 + d) c>
# ...
# = <c> + <cd>
avg_c = Qh * ch + Qb * cb
if self.pf['ps_include_xcorr_hot_rho']:
val = avg_c \
+ Qh * ch * del_h \
+ Qb * cb * del_b
else:
val = avg_c
elif term in ['ppc', 'ppcc']:
avg_psi = self.ExpectationValue1pt(z, term='psi',
R_s=R_s, Th=Th, Ts=Ts, Tk=Tk, Ja=Ja)
avg_c = self.ExpectationValue1pt(z, term='c',
R_s=R_s, Th=Th, Ts=Ts, Tk=Tk, Ja=Ja)
if term == 'ppc':
val = avg_psi**2 * avg_c
else:
val = avg_psi**2 * avg_c**2
#cc = Qh**2 * ch**2 \
# + 2 * Qh * Qb * ch * cb \
# + Qb**2 * cb**2
#ccd = Qh**2 * ch**2 * delta_h_bar \
# + Qh * Qb * ch * cb * delta_h_bar \
# + Qh * Qb * ch * cb * delta_b_bar \
# + Qb**2 * cb**2 * delta_b_bar
elif term == 'c*d':
if self.pf['ps_include_xcorr_hot_rho']:
val = Qh * ch * del_h + Qb * cb * del_b
else:
val = 0.0
elif term.strip() == 'i*h':
val = 0.0
elif term.strip() == 'n*h':
# <xh> = <h> - <x_i h> = <h>
val = Qh
elif term.strip() == 'i*c':
val = 0.0
elif term == 'c':
val = ch * Qh + cb * Qb
elif term.strip() == 'n*c':
# <xc> = <c> - <x_i c>
val = ch * Qh
elif term == 'psi':
# <psi> = <x (1 + d)> = <x> + <xd> = 1 - <x_i> + <d> - <x_i d>
# = 1 - <x_i> - <x_i d>
#avg_xd = self.ExpectationValue1pt(z, zeta, term='n*d',
# R_s=R_s, Th=Th, Ts=Ts, Tk=Tk, Ja=Ja)
#avg_x = self.ExpectationValue1pt(z, zeta, term='n',
# R_s=R_s, Th=Th, Ts=Ts, Tk=Tk, Ja=Ja)
#
#val = avg_x + avg_xd
avg_id = self.ExpectationValue1pt(z, term='i*d',
R_s=R_s, Th=Th, Ts=Ts, Tk=Tk, Ja=Ja)
avg_i = self.ExpectationValue1pt(z, term='i',
R_s=R_s, Th=Th, Ts=Ts, Tk=Tk, Ja=Ja)
val = 1. - avg_i - avg_id
elif term == 'phi':
# <phi> = <psi * (1 - c)> = <psi> - <psi * c>
# <psi * c> = <x * c> + <x * c * d>
# = <c> - <x_i c> + <cd> - <x_i c * d>
avg_psi = self.ExpectationValue1pt(z, term='psi',
R_s=R_s, Th=Th, Ts=Ts, Tk=Tk, Ja=Ja)
avg_psi_c = self.ExpectationValue1pt(z, term='pc',
R_s=R_s, Th=Th, Ts=Ts, Tk=Tk, Ja=Ja)
val = avg_psi - avg_psi_c
# <phi> = <psi * (1 + c)> = <psi> + <psi * c>
#
# <psi * c> = <x * c> + <x * c * d>
# = <c> - <x_i c> + <cd> - <x_i c * d>
#avg_xcd = self.ExpectationValue1pt(z, zeta, term='n*d*c',
# R_s=R_s, Th=Th, Ts=Ts, Tk=Tk, Ja=Ja)
#
## Equivalent to <c> in binary field model.
#ch = self.TempToContrast(z, Th=Th, Tk=Tk, Ts=Ts, Ja=Ja)
#ci = self.BubbleContrast(z, Th=Th, Tk=Tk, Ts=Ts, Ja=Ja)
#avg_c = self.ExpectationValue1pt(z, zeta, term='c',
# R_s=R_s, Th=Th, Ts=Ts, Tk=Tk, Ja=Ja)
#avg_cd = self.ExpectationValue1pt(z, zeta, term='c*d',
# R_s=R_s, Th=Th, Ts=Ts, Tk=Tk, Ja=Ja)
#avg_id = self.ExpectationValue1pt(z, zeta, term='i*d',
# R_s=R_s, Th=Th, Ts=Ts, Tk=Tk, Ja=Ja)
#
#avg_psi = self.ExpectationValue1pt(z, zeta, term='psi',
# R_s=R_s, Th=Th, Ts=Ts, Tk=Tk, Ja=Ja)
#avg_psi_c = avg_c - ci * Qi + avg_cd - avg_id * ci
#
## Tagged on these last two terms if c=1 (ionized regions)
#val = avg_psi + avg_psi_c
elif term == '21':
# dTb = T0 * (1 + d21) = T0 * xHI * (1 + d) = T0 * psi
# so d21 = psi - 1
if self.pf['ps_include_temp']:
avg_phi = self.ExpectationValue1pt(z, term='phi',
R_s=R_s, Th=Th, Ts=Ts, Tk=Tk, Ja=Ja)
val = avg_phi - 1.
else:
avg_psi = self.ExpectationValue1pt(z, term='psi',
R_s=R_s, Th=Th, Ts=Ts, Tk=Tk, Ja=Ja)
val = avg_psi - 1.
elif term == 'o':
# <omega>^2 = <psi * c>^2 - 2 <psi> <psi * c>
avg_psi = self.ExpectationValue1pt(z, term='psi',
R_s=R_s, Th=Th, Ts=Ts, Tk=Tk, Ja=Ja)
avg_psi_c = self.ExpectationValue1pt(z, term='pc',
R_s=R_s, Th=Th, Ts=Ts, Tk=Tk, Ja=Ja)
# <omega>^2 = <psi c>^2 - 2 <psi c> <psi>
val = np.sqrt(avg_psi_c**2 - 2. * avg_psi_c * avg_psi)
elif term == 'oo':
avg_psi = self.ExpectationValue1pt(z, term='psi',
R_s=R_s, Th=Th, Ts=Ts, Tk=Tk, Ja=Ja)
avg_psi_c = self.ExpectationValue1pt(z, term='pc',
R_s=R_s, Th=Th, Ts=Ts, Tk=Tk, Ja=Ja)
# <omega>^2 = <psi c>^2 - 2 <psi c> <psi>
val = avg_psi_c**2 - 2. * avg_psi_c * avg_psi
else:
raise ValueError('Don\' know how to handle <{}>'.format(term))
self._cache_p_[z][term] = val
return val
@property
def _getting_basics(self):
if not hasattr(self, '_getting_basics_'):
self._getting_basics_ = False
return self._getting_basics_
def get_basics(self, z, R, R_s, Th, Ts, Tk, Ja):
self._getting_basics_ = True
basics = {}
for term in ['ii', 'ih', 'ib', 'hh', 'hb', 'bb']:
cache = self._cache_jp(z, term)
if self.pf['ps_include_temp'] and self.pf['ps_temp_model'] == 2:
Qi = self.MeanIonizedFraction(z)
Qh = self.MeanIonizedFraction(z, ion=False)
if term == 'ih':
P = Qi * Qh * np.ones_like(R)
P1 = P2 = np.zeros_like(R)
basics[term] = P, P1, P2
continue
elif term == 'ib':
P = Qi * (1. - Qi - Qh) * np.ones_like(R)
P1 = P2 = np.zeros_like(R)
basics[term] = P, P1, P2
continue
elif term == 'hb':
P = Qh * (1. - Qi - Qh) * np.ones_like(R)
P1 = P2 = np.zeros_like(R)
basics[term] = P, P1, P2
continue
#elif term == 'bb':
# P = (1. - Qi - Qh)**2 * np.ones_like(R)
# P1 = P2 = np.zeros_like(R)
# basics[term] = P, P1, P2
# continue
if cache is None and term != 'bb':
P, P1, P2 = self.ExpectationValue2pt(z,
R=R, term=term, R_s=R_s, Th=Th, Ts=Ts, Tk=Tk, Ja=Ja)
elif cache is None and term == 'bb':
P = 1. - (basics['ii'][0] + 2 * basics['ib'][0]
+ 2 * basics['ih'][0] + basics['hh'][0] + 2 * basics['hb'][0])
P1 = P2 = np.zeros_like(P)
self._cache_jp_[z][term] = R, P, np.zeros_like(P), np.zeros_like(P)
else:
P, P1, P2 = cache[1:]
basics[term] = P, P1, P2
self._getting_basics_ = False
return basics
def ExpectationValue2pt(self, z, R, term='ii', R_s=None, R3=None,
Th=500.0, Ts=None, Tk=None, Ja=None, k=None):
"""
Essentially a wrapper around JointProbability that scales
terms like <cc'>, <xc'>, etc., from their component probabilities
<hh'>, <ih'>, etc.
Parameters
----------
z : int, float
zeta : int, float
Ionization efficiency
R : np.ndarray
Array of scales to consider.
term : str
Returns
-------
Tuple: total, one-source, two-source contributions to joint probability.
"""
##
# Check cache for match
##
#cached_result = self._cache_jp(z, term)
#
#if cached_result is not None:
# _R, _jp, _jp1, _jp2 = cached_result
#
# if _R.size == R.size:
# if np.allclose(_R, R):
# return cached_result[1:]
#
# print("interpolating jp_{}".format(ii))
# return np.interp(R, _R, _jp), np.interp(R, _R, _jp1), np.interp(R, _R, _jp2)
# Remember, we scaled the BSD so that these two things are equal
# by construction.
xibar = Q = Qi = self.MeanIonizedFraction(z)
# Call this early so that heating_ongoing is set before anything
# else can happen.
#Qh = self.BubbleShellFillingFactor(z, R_s=R_s)
Qh = self.MeanIonizedFraction(z, ion=False)
delta_i_bar = self.delta_bubble_vol_weighted(z)
delta_h_bar = self.delta_shell(z)
delta_b_bar = self.BulkDensity(z, R_s)
Tcmb = self.cosm.TCMB(z)
ch = self.TempToContrast(z, Th=Th, Tk=Tk, Ts=Ts, Ja=Ja)
if Ts is None:
cb = 0.0
else:
cb = Tcmb / Ts
Rones = np.ones_like(R)
Rzeros = np.zeros_like(R)
# If reionization is over, don't waste our time!
if xibar == 1:
return np.ones(R.size), Rzeros, Rzeros
iz = np.argmin(np.abs(z - self.halos.tab_z_ps))
iz_hmf = np.argmin(np.abs(z - self.halos.tab_z))
# Grab the matter power spectrum
if R.size == self.halos.tab_R.size:
if np.allclose(R, self.halos.tab_R):
xi_dd = self.halos.tab_cf_mm[iz]
else:
xi_dd = self.spline_cf_mm(z)(np.log(R))
else:
xi_dd = self.spline_cf_mm(z)(np.log(R))
# Some stuff we need
R_i, M_b, dndm_b = self.BubbleSizeDistribution(z)
V_i = 4. * np.pi * R_i**3 / 3.
if self.pf['ps_include_temp']:
if self.pf['ps_temp_model'] == 1:
V_h = 4. * np.pi * (R_s**3 - R_i**3) / 3.
V_ioh = 4. * np.pi * R_s**3 / 3.
dndm_s = dndm_b
M_s = M_b
zeta_X = 0.0
elif self.pf['ps_temp_model'] == 2:
zeta_X = self.zeta_X
R_s, M_s, dndm_s = self.BubbleSizeDistribution(z, ion=False)
V_h = 4. * np.pi * R_s**3 / 3.
V_ioh = 4. * np.pi * R_s**3 / 3.
else:
raise NotImplemented('help')
else:
zeta_X = 0
if R_s is None:
R_s = np.zeros_like(R_i)
V_h = np.zeros_like(R_i)
V_ioh = V_i
zeta_X = 0.0
if R3 is None:
R3 = np.zeros_like(R_i)
##
# Before we begin: anything we're turning off?
##
if not self.pf['ps_include_ion']:
if term == 'ii':
self._cache_jp_[z][term] = R, Qi**2 * Rones, Rzeros, Rzeros
return Qi**2 * Rones, Rzeros, Rzeros
elif term in ['id']:
self._cache_jp_[z][term] = R, Rzeros, Rzeros, Rzeros
return Rzeros, Rzeros, Rzeros
elif term == 'idd':
ev2pt = Qi * xi_dd
self._cache_jp_[z][term] = R, ev2pt, Rzeros, Rzeros
return ev2pt, Rzeros, Rzeros #
elif term == 'iidd':
ev2pt = Qi**2 * xi_dd
self._cache_jp_[z][term] = R, ev2pt, Rzeros, Rzeros
return ev2pt, Rzeros, Rzeros
#elif 'i' in term:
# #self._cache_jp_[z][term] = R, Rzeros, Rzeros, Rzeros
# return Rzeros, Rzeros, Rzeros
# also iid, iidd
if not self.pf['ps_include_temp']:
if ('c' in term) or ('h' in term):
return Rzeros, Rzeros, Rzeros
##
# Handy
##
if not self._getting_basics:
basics = self.get_basics(z, R, R_s=R_s, Th=Th, Ts=Ts, Tk=Tk, Ja=Ja)
if term in basics:
return basics[term]
_P_ii, _P_ii_1, _P_ii_2 = basics['ii']
_P_hh, _P_hh_1, _P_hh_2 = basics['hh']
_P_bb, _P_bb_1, _P_bb_2 = basics['bb']
_P_ih, _P_ih_1, _P_ih_2 = basics['ih']
_P_ib, _P_ib_1, _P_ib_2 = basics['ib']
_P_hb, _P_hb_1, _P_hb_2 = basics['hb']
##
# Check for derived quantities like psi, phi
##
if term == 'psi':
# <psi psi'> = <x (1 + d) x' (1 + d')> = <xx'(1+d)(1+d')>
# = <xx'(1 + d + d' + dd')>
# = <xx'> + 2<xx'd> + <xx'dd'>
#xx, xx1, xx2 = self.ExpectationValue2pt(z, zeta, R=R, term='nn',
# R_s=R_s, R3=R3, Th=Th, Ts=Ts, Tk=Tk, Ja=Ja)
#
#xxd, xxd1, xxd2 = self.ExpectationValue2pt(z, zeta, R=R, term='nnd',
# R_s=R_s, R3=R3, Th=Th, Ts=Ts, Tk=Tk, Ja=Ja)
#
#xxdd, xxdd1, xxdd2 = self.ExpectationValue2pt(z, zeta, R=R,
# term='xxdd', R_s=R_s, R3=R3, Th=Th, Ts=Ts, Tk=Tk, Ja=Ja)
#
#ev2pt = xx + 2. * xxd + xxdd
#
ev2pt_1 = Rzeros
ev2pt_2 = Rzeros
# All in terms of ionized fraction perturbation.
dd, dd1, dd2 = self.ExpectationValue2pt(z, R=R, term='dd',
R_s=R_s, R3=R3, Th=Th, Ts=Ts, Tk=Tk, Ja=Ja)
ii, ii1, ii2 = self.ExpectationValue2pt(z, R=R, term='ii',
R_s=R_s, R3=R3, Th=Th, Ts=Ts, Tk=Tk, Ja=Ja)
di, di1, di2 = self.ExpectationValue2pt(z, R=R, term='id',
R_s=R_s, R3=R3, Th=Th, Ts=Ts, Tk=Tk, Ja=Ja)
iidd, on, tw = self.ExpectationValue2pt(z, R=R, term='iidd',
R_s=R_s, R3=R3, Th=Th, Ts=Ts, Tk=Tk, Ja=Ja)
idd, on, tw = self.ExpectationValue2pt(z, R=R, term='idd',
R_s=R_s, R3=R3, Th=Th, Ts=Ts, Tk=Tk, Ja=Ja)
iid, on, tw = self.ExpectationValue2pt(z, R=R, term='iid',
R_s=R_s, R3=R3, Th=Th, Ts=Ts, Tk=Tk, Ja=Ja)
ev_id_1 = self.ExpectationValue1pt(z, term='i*d',
R_s=R_s, R3=R3, Th=Th, Ts=Ts, Tk=Tk, Ja=Ja)
ev2pt = dd + ii - 2. * di + iidd - 2. * (idd - iid) \
+ 1. - 2 * Qi - 2 * ev_id_1
#self._cache_jp_[z][term] = R, ev2pt, ev2pt_1, ev2pt_2
return ev2pt, ev2pt_1, ev2pt_2
elif term == 'phi':
ev_psi, ev_psi1, ev_psi2 = self.ExpectationValue2pt(z, R,
term='psi', R_s=R_s, R3=R3, Th=Th, Ts=Ts, Tk=Tk, Ja=Ja)
ev_oo, ev_oo1, ev_oo2 = self.ExpectationValue2pt(z, R,
term='oo', R_s=R_s, R3=R3, Th=Th, Ts=Ts, Tk=Tk, Ja=Ja)
return ev_psi + ev_oo, ev_psi1 + ev_oo1, ev_psi2 + ev_oo2
elif term == '21':
# dTb = T0 * (1 + d21) = T0 * psi
# d21 = psi - 1
# <d21 d21'> = <(psi - 1)(psi' - 1)>
# = <psi psi'> - 2 <psi> + 1
if self.pf['ps_include_temp']:
# New formalism
# <phi phi'> = <psi psi'> + 2 <psi psi' c> + <psi psi' c c'>
ev_phi, ev_phi1, ev_phi2 = self.ExpectationValue2pt(z, R,
term='phi', R_s=R_s, R3=R3, Th=Th, Ts=Ts, Tk=Tk, Ja=Ja)
avg_phi = self.ExpectationValue1pt(z, term='phi',
R_s=R_s, Th=Th, Ts=Ts, Tk=Tk, Ja=Ja)
ev21 = ev_phi + 1. - 2. * avg_phi
else:
ev_psi, ev_psi1, ev_psi2 = self.ExpectationValue2pt(z, R,
term='psi', R_s=R_s, R3=R3, Th=Th, Ts=Ts, Tk=Tk, Ja=Ja)
avg_psi = self.ExpectationValue1pt(z, term='psi',
R_s=R_s, Th=Th, Ts=Ts, Tk=Tk, Ja=Ja)
ev21 = ev_psi + 1. - 2. * avg_psi
#raise NotImplemented('still working!')
#Phi, junk1, junk2 = self.ExpectationValue2pt(z, zeta, R, term='Phi',
# R_s=R_s, R3=R3, Th=Th, Ts=Ts, Tk=Tk, Ja=Ja, k=k)
#
#ev_psi, ev_psi1, ev_psi2 = self.ExpectationValue2pt(z, zeta, R,
# term='psi', R_s=R_s, R3=R3, Th=Th, Ts=Ts, Tk=Tk, Ja=Ja)
#ev2pt = ev_psi + Phi
#
#self._cache_jp_[z][term] = R, ev2pt, Rzeros, Rzeros
return ev21, Rzeros, Rzeros
elif term == 'oo':
# New formalism
# <phi phi'> = <psi psi'> - 2 <psi psi' c> + <psi psi' c c'>
# = <psi psi'> + <o o'>
ppc, _p1, _p2 = self.ExpectationValue2pt(z, R=R, term='ppc',
R_s=R_s, R3=R3, Th=Th, Ts=Ts, Tk=Tk, Ja=Ja)
ppcc, _p1, _p2 = self.ExpectationValue2pt(z, R=R, term='ppcc',
R_s=R_s, R3=R3, Th=Th, Ts=Ts, Tk=Tk, Ja=Ja)
ev2pt = ppcc - 2 * ppc
return ev2pt, Rzeros, Rzeros
#elif term == 'bb':
# ev_ii, one, two = self.ExpectationValue2pt(z, zeta, R,
# term='ii', R_s=R_s, R3=R3, Th=Th, Ts=Ts, Tk=Tk, Ja=Ja)
# ev_ib, one, two = self.ExpectationValue2pt(z, zeta, R,
# term='ib', R_s=R_s, R3=R3, Th=Th, Ts=Ts, Tk=Tk, Ja=Ja)
#
# if self.pf['ps_include_temp']:
# ev_ih, one, two = self.ExpectationValue2pt(z, zeta, R,
# term='ih', R_s=R_s, R3=R3, Th=Th, Ts=Ts, Tk=Tk, Ja=Ja)
# ev_hh, one, two = self.ExpectationValue2pt(z, zeta, R,
# term='hh', R_s=R_s, R3=R3, Th=Th, Ts=Ts, Tk=Tk, Ja=Ja)
# ev_hb, one, two = self.ExpectationValue2pt(z, zeta, R,
# term='hb', R_s=R_s, R3=R3, Th=Th, Ts=Ts, Tk=Tk, Ja=Ja)
# else:
# ev_ih = ev_hh = ev_hb = 0.0
#
# #return (1. - Qi - Qh)**2 * Rones, Rzeros, Rzeros
#
# ev_bb = 1. - (ev_ii + 2 * ev_ib + 2 * ev_ih + ev_hh + 2 * ev_hb)
#
# self._cache_jp_[z][term] = R, ev_bb, Rzeros, Rzeros
#
# return ev_bb, Rzeros, Rzeros
# <psi psi' c> = <cdd'> - 2 <cdi'> - 2 <ci'dd>
elif term == 'ppc':
avg_c = self.ExpectationValue1pt(z, term='c',
R_s=R_s, R3=R3, Th=Th, Ts=Ts, Tk=Tk, Ja=Ja)
avg_cd = self.ExpectationValue1pt(z, term='c*d',
R_s=R_s, Th=Th, Ts=Ts, Tk=Tk, Ja=Ja)
cd, _j1, _j2 = self.ExpectationValue2pt(z, R=R, term='cd',
R_s=R_s, R3=R3, Th=Th, Ts=Ts, Tk=Tk, Ja=Ja)
ci, _j1, _j2 = self.ExpectationValue2pt(z, R=R, term='ic',
R_s=R_s, R3=R3, Th=Th, Ts=Ts, Tk=Tk, Ja=Ja)
cdd, _j1, _j2 = self.ExpectationValue2pt(z, R=R, term='cdd',
R_s=R_s, R3=R3, Th=Th, Ts=Ts, Tk=Tk, Ja=Ja)
cdip, _j1, _j2 = self.ExpectationValue2pt(z, R=R, term='cdip',
R_s=R_s, R3=R3, Th=Th, Ts=Ts, Tk=Tk, Ja=Ja)
cddip, _j1, _j2 = self.ExpectationValue2pt(z, R=R, term='cddip',
R_s=R_s, R3=R3, Th=Th, Ts=Ts, Tk=Tk, Ja=Ja)
cdpip, _j1, _j2 = self.ExpectationValue2pt(z, R=R, term='cdpip',
R_s=R_s, R3=R3, Th=Th, Ts=Ts, Tk=Tk, Ja=Ja)
ppc = avg_c + cd - ci - cdpip + avg_cd + cdd - cdip - cddip
return ppc, Rzeros, Rzeros
elif term == 'ppcc':
ccdd, _j1, _j2 = self.ExpectationValue2pt(z, R=R, term='ccdd',
R_s=R_s, R3=R3, Th=Th, Ts=Ts, Tk=Tk, Ja=Ja)
cc, _j1, _j2 = self.ExpectationValue2pt(z, R=R, term='cc',
R_s=R_s, R3=R3, Th=Th, Ts=Ts, Tk=Tk, Ja=Ja)
ccd, _j1, _j2 = self.ExpectationValue2pt(z, R=R, term='ccd',
R_s=R_s, R3=R3, Th=Th, Ts=Ts, Tk=Tk, Ja=Ja)
return cc + 2 * ccd + ccdd, Rzeros, Rzeros
elif term in ['mm', 'dd']:
# Equivalent to correlation function since <d> = 0
return self.spline_cf_mm(z)(np.log(R)), np.zeros_like(R), np.zeros_like(R)
#elif term == 'dd':
# dd = _P_ii * delta_i_bar**2 \
# + _P_hh * delta_h_bar**2 \
# + _P_bb * delta_b_bar**2 \
# + 2 * _P_ih * delta_i_bar * delta_h_bar \
# + 2 * _P_ib * delta_i_bar * delta_b_bar \
# + 2 * _P_hb * delta_h_bar * delta_b_bar
#
# return dd, Rzeros, Rzeros
##
# For 3-zone IGM, can compute everything from permutations of
# i, h, and b.
##
if self.pf['ps_igm_model'] == 1 and not self._getting_basics:
return self.ThreeZoneModel(z, R=R, term=term,
R_s=R_s, R3=R3, Th=Th, Ts=Ts, Tk=Tk, Ja=Ja)
##
# On to things we must be more careful with.
##
# Minimum bubble size
Mmin = self.Mmin(z) * self.zeta
iM = np.argmin(np.abs(M_b - Mmin))
# Only need overlap volumes once per redshift
all_OV_z = self._cache_Vo(z)
if all_OV_z is None:
all_OV_z = np.zeros((len(R), 6, len(R_i)))
for i, sep in enumerate(R):
all_OV_z[i,:,:] = \
np.array(self.overlap_volumes(sep, R_i, R_s))
self._cache_Vo_[z] = all_OV_z.copy()
#print("Generated z={} overlap_volumes".format(z))
#else:
# print("Read in z={} overlap_volumes".format(z))
all_IV_z = self._cache_IV(z)
if all_IV_z is None:
all_IV_z = np.zeros((len(R), 6, len(R_i)))
for i, sep in enumerate(R):
all_IV_z[i,:,:] = \
np.array(self.intersectional_volumes(sep, R_i, R_s, R3))
self._cache_IV_[z] = all_IV_z.copy()
Mmin_b = self.Mmin(z) * self.zeta
Mmin_h = self.Mmin(z)
Mmin_s = self.Mmin(z) * zeta_X
if term in ['hh', 'ih', 'ib', 'hb'] and self.pf['ps_temp_model'] == 1 \
and self.pf['ps_include_temp']:
Qh_int = self.get_prob(z, M_s, dndm_s, Mmin, V_h,
exp=False, ep=0.0, Mmax=None)
f_h = -np.log(1. - Qh) / Qh_int
else:
f_h = 1.
#dR = np.diff(10**bin_c2e(np.log(R)))#np.concatenate((np.diff(R), [np.diff(R)[-1]]))
#dR = 10**np.arange(np.log(R).min(), np.log(R).max() + 2 * dlogR, dlogR)
# Loop over scales
P1 = np.zeros(R.size)
P2 = np.zeros(R.size)
PT = np.zeros(R.size)
for i, sep in enumerate(R):
##
# Note: each element of this loop we're constructing an array
# over bubble mass, which we then integrate over to get a total
# probability. The shape of every quantity should be `self.m`.
##
# Yields: V11, V12, V22, V1n, V2n, Van
# Remember: these radii arrays depend on redshift (through delta_B)
all_V = all_OV_z[i]
all_IV = all_IV_z[i]
# For two-halo terms, need bias of sources.
if self.pf['ps_include_bias']:
# Should modify for temp_model==2
if self.pf['ps_include_temp']:
if self.pf['ps_temp_model'] == 2 and 'h' in term:
_ion = False
else:
_ion = True
else:
_ion = True
ep = self.excess_probability(z, sep, ion=_ion)
else:
ep = np.zeros_like(self.m)
##
# For each zone, figure out volume of region where a
# single source can ionize/heat/couple both points, as well
# as the region where a single source is not enough (Vss_ne)
##
if term == 'ii':
Vo = all_V[0]
# Subtract off more volume if heating is ON.
#if self.pf['ps_include_temp']:
# #Vne1 = Vne2 = V_i - self.IV(sep, R_i, R_s)
# Vne1 = Vne2 = V_i - all_IV[1]
#else:
# You might think: hey! If temperature fluctuations are on,
# we need to make sure the second point isn't *heated* by
# the first point. This gets into issues of overlap. By not
# introducing this correction (commented out above), we're
# saying "yes, the second point can still lie in the heated
# region of the first (ionized) point, but that point itself
# may actually be ionized, since the way we construct regions
# doesn't know that a heated region may actually live in the
# ionized region of another bubble." That is, a heated point
# can be ionized but an ionized pt can't later be designated
# a hot point.
Vne1 = Vne2 = V_i - Vo
_P1 = self.get_prob(z, M_b, dndm_b, Mmin_b, Vo, True)
_P2_1 = self.get_prob(z, M_b, dndm_b, Mmin_b, Vne1, True)
_P2_2 = self.get_prob(z, M_b, dndm_b, Mmin_b, Vne2, True, ep)
_P2 = (1. - _P1) * _P2_1 * _P2_2
if self.pf['ps_volfix'] and Qi > 0.5:
P1[i] = _P1
P2[i] = (1. - P1[i]) * _P2_1**2
else:
P1[i] = _P1
P2[i] = _P2
# Probability that one point is ionized, other in "bulk IGM"
elif term == 'ib':
Vo_iN = all_V[3] # region in which a source ionized one point
# and does nothing to the other.
# Probability that a single source does something to
# each point. If no temp fluctuations, same as _Pis
P1_iN = self.get_prob(z, M_b, dndm_b, Mmin_b, all_V[3], True)
# "probability of an ionized pt 2 given ionized pt 1"
Pigi = self.get_prob(z, M_b, dndm_b, Mmin_b, V_i-all_V[0], True, ep)
if self.pf['ps_include_temp']:
if self.pf['ps_temp_model'] == 1:
Vne2 = V_ioh - all_IV[2] - (V_i - all_IV[1])
# "probability of a heated pt 2 given ionized pt 1"
Phgi = self.get_prob(z, M_b, dndm_b * f_h, Mmin_b, Vne2, True, ep)
P2[i] = P1_iN * (1. - Pigi - Phgi)
else:
P2[i] = Qi * (1. - Qi - Qh)
else:
P2[i] = P1_iN * (1. - Pigi)
elif term == 'hb':
#if self.pf['ps_temp_model'] == 2:
# print('Ignoring hb term for now...')
# continue
#else:
# pass
if self.pf['ps_temp_model'] == 2:
P1_hN = self.get_prob(z, M_s, dndm_s, Mmin_s, all_V[4], True)
# Given that the first point is heated, what is the probability
# that the second pt is heated or ionized by a different source?
# We want the complement of that.
# Volume in which I heat but don't ionize (or heat) the other pt,
# i.e., same as the two-source term for <hh'>
#Vne2 = Vh - self.IV(sep, R_i, R_s)
Vne2 = V_ioh - all_IV[2] - (V_i - all_IV[1])
# Volume in which single source ioniz
V2ii = V_i - all_V[0]
Phgh = self.get_prob(z, M_b, dndm_b * f_h, Mmin_b, Vne2, True, ep)
Pigh = self.get_prob(z, M_b, dndm_b, Mmin_b, V2ii, True, ep)
#P1[i] = P1_hN
#ih2 = _P_ih_2[i]
#hh2 = _P_hh_2[i]
P2[i] = P1_hN * (1. - Phgh - Pigh)
else:
# Probability that single source can heat one pt but
# does nothing to the other.
P1_hN = self.get_prob(z, M_b, dndm_b * f_h, Mmin_b, all_V[4], True)
# Given that the first point is heated, what is the probability
# that the second pt is heated or ionized by a different source?
# We want the complement of that.
# Volume in which I heat but don't ionize (or heat) the other pt,
# i.e., same as the two-source term for <hh'>
#Vne2 = Vh - self.IV(sep, R_i, R_s)
Vne2 = V_ioh - all_IV[2] - (V_i - all_IV[1])
# Volume in which single source ioniz
V2ii = V_i - all_V[0]
Phgh = self.get_prob(z, M_b, dndm_b * f_h, Mmin_b, Vne2, True, ep)
Pigh = self.get_prob(z, M_b, dndm_b, Mmin_b, V2ii, True, ep)
#P1[i] = P1_hN
#ih2 = _P_ih_2[i]
#hh2 = _P_hh_2[i]
P2[i] = P1_hN * (1. - Phgh - Pigh)
elif term == 'hh':
# Excursion set approach for temperature.
if self.pf['ps_temp_model'] == 2:
Vo = all_V[2]
Vne1 = Vne2 = V_h - Vo
_P1 = self.get_prob(z, M_s, dndm_s, Mmin_s, Vo, True)
_P2_1 = self.get_prob(z, M_s, dndm_s, Mmin_s, Vne1, True)
_P2_2 = self.get_prob(z, M_s, dndm_s, Mmin_s, Vne2, True, ep)
#_P2_1 -= Qi
#_P2_1 -= Qi
_P2 = (1. - _P1) * _P2_1 * _P2_2
#if self.pf['ps_volfix'] and Qi > 0.5:
# P1[i] = _P1
# P2[i] = (1. - P1[i]) * _P2_1**2
#
#else:
P1[i] = _P1
P2[i] = _P2
else:
#Vii = all_V[0]
#_integrand1 = dndm * Vii
#
#_exp_int1 = np.exp(-simps(_integrand1[iM:] * M_b[iM:],
# x=np.log(M_b[iM:])))
#_P1_ii = (1. - _exp_int1)
# Region in which two points are heated by the same source
Vo = all_V[2]
# Subtract off region of the intersection HH volume
# in which source 1 would do *anything* to point 2.
#Vss_ne_1 = Vh - (Vo - self.IV(sep, R_i, R_s) + all_V[0])
#Vne1 = Vne2 = Vh - Vo
# For ionization, this is just Vi - Vo
#Vne1 = V2 - all_IV[2] - (V1 - all_IV[1])
Vne1 = V_ioh - all_IV[2] - (V_i - all_IV[1])
#Vne1 = V2 - self.IV(sep, R_s, R_s) - (V1 - self.IV(sep, R_i, R_s))
Vne2 = Vne1
# Shouldn't max(Vo) = Vh?
#_P1, _P2 = self.get_prob(z, zeta, Vo, Vne1, Vne2, corr, term)
_P1 = self.get_prob(z, M_b, dndm_b * f_h, Mmin_b, Vo, True)
_P2_1 = self.get_prob(z, M_b, dndm_b * f_h, Mmin_b, Vne1, True)
_P2_2 = self.get_prob(z, M_b, dndm_b * f_h, Mmin_b, Vne2, True, ep)
# kludge! to account for Qh = 1 - Qi at late times.
# Integrals above will always get over-estimated for hot
# regions.
#_P2_1 = min(Qh, _P2_1)
#_P2_2 = min(Qh, _P2_2)
_P2 = (1. - _P1) * _P2_1 * _P2_2
# The BSD is normalized so that its integral will recover
# zeta * fcoll.
# Start chugging along on two-bubble term
if np.any(Vne1 < 1e-12):
N = sum(Vne1 < 1e-12)
print('z={}, R={}: Vss_ne_1 (hh) < 0 {} / {} times'.format(z, sep, N, len(R_s)))
#print(Vne1[Vne1 < 1e-12])
print(np.all(V_ioh > V_i), np.all(V_ioh > all_IV[2]), all_IV[2][-1], all_IV[1][-1])
# Must correct for the fact that Qi+Qh<=1
if self.heating_ongoing:
P1[i] = _P1
P2[i] = _P2
else:
P1[i] = _P1 * (1. - Qh - Qi)
P2[i] = Qh**2
elif term == 'ih':
if self.pf['ps_temp_model'] == 2:
continue
if not self.pf['ps_include_xcorr_ion_hot']:
P1[i] = 0.0
P2[i] = Qh * Qi
continue
#Vo_sh_r1, Vo_sh_r2, Vo_sh_r3 = \
# self.overlap_region_shell(sep, R_i, R_s)
#Vo = 2. * Vo_sh_r2 - Vo_sh_r3
Vo = all_V[1]
#V1 = 4. * np.pi * R_i**3 / 3.
#V2 = 4. * np.pi * R_s**3 / 3.
#Vh = 4. * np.pi * (R_s**3 - R_i**3) / 3.
# Volume in which I ionize but don't heat (or ionize) the other pt.
Vne1 = V_i - all_IV[1]
# Volume in which I heat but don't ionize (or heat) the other pt,
# i.e., same as the two-source term for <hh'>
#Vne2 = Vh - self.IV(sep, R_i, R_s)
Vne2 = V_ioh - all_IV[2] - (V_i - all_IV[1])
#Vne2 = V2 - self.IV(sep, R_s, R_s) - (V1 - self.IV(sep, R_i, R_s))
if np.any(Vne2 < 0):
N = sum(Vne2 < 0)
print('R={}: Vss_ne_2 (ih) < 0 {} / {} times'.format(sep, N, len(R_s)))
#_P1, _P2 = self.get_prob(z, zeta, Vo, Vne1, Vne2, corr, term)
_P1 = self.get_prob(z, M_b, dndm_b * f_h, Mmin_b, Vo, True)
_P2_1 = self.get_prob(z, M_b, dndm_b, Mmin_b, Vne1, True)
_P2_2 = self.get_prob(z, M_b, dndm_b * f_h, Mmin_b, Vne2, True, ep)
# Kludge!
#_P2_1 = min(_P2_2, Qi)
#_P2_2 = min(_P2_2, Qh)
_P2 = (1. - _P1) * _P2_1 * _P2_2
#
#P2[i] = min(_P2, Qh * Qi)
if self.heating_ongoing:
P1[i] = _P1
P2[i] = _P2
else:
P1[i] = _P1 * (1. - Qh - Qi)
P2[i] = Qh * Qi
##
# Density stuff from here down
##
if term.count('d') == 0:
continue
if not (self.pf['ps_include_xcorr_ion_rho'] \
or self.pf['ps_include_xcorr_hot_rho']):
# These terms will remain zero
#if term.count('d') > 0:
continue
##
# First, grab a bunch of stuff we'll need.
##
# Ionization auto-correlations
#Pii, Pii_1, Pii_2 = \
# self.ExpectationValue2pt(z, zeta, R, term='ii',
# R_s=R_s, R3=R3, Th=Th, Ts=Ts)
Vo = all_V[0]
Vne1 = V_i - Vo
Vo_hh = all_V[2]
# These are functions of mass
Vsh_sph = 4. * np.pi * R_s**3 / 3.
Vsh = 4. * np.pi * (R_s**3 - R_i**3) / 3.
# Mean bubble density
#B = self._B(z, zeta)
#rho0 = self.cosm.mean_density0
#delta = M_b / V_i / rho0 - 1.
M_h = self.halos.tab_M
iM_h = np.argmin(np.abs(self.Mmin(z) - M_h))
dndm_h = self.halos.tab_dndm[iz_hmf]
fcoll_h = self.halos.tab_fcoll[iz_hmf,iM_h]
# Luminous halos, i.e., Mh > Mmin
Q_lhal = self.Qhal(z, Mmin=Mmin)
# Dark halos, i.e., those outside bubbles
Q_dhal = self.Qhal(z, Mmax=Mmin)
# All halos
Q_hal = self.Qhal(z)
# Since integration must be perfect
Q_dhal = Q_hal - Q_lhal
R_hal = self.halos.VirialRadius(M_h, z) / 1e3 # Convert to Mpc
V_hal = four_pi * R_hal**3 / 3.
# Bias of bubbles and halos
##
db = self._B(z, ion=True)
bh = self.halos.Bias(z)
bb = self.bubble_bias(z, ion=True)
xi_dd_r = xi_dd[i]#self.spline_cf_mm(z)(np.log(sep))
bh_bar = self.mean_halo_bias(z)
bb_bar = self.mean_bubble_bias(z, ion=True)
ep_bh = bh * bb_bar * xi_dd_r
exc = bh_bar * bb_bar * xi_dd_r
# Mean density of halos (mass is arbitrary)
delta_hal_bar = self.mean_halo_overdensity(z)
nhal_avg = self.mean_halo_abundance(z)
# <d> = 0 = <d_i> * Qi + <d_hal> * Q_hal + <d_nohal> * Q_nh
# Problem is Q_hal and Q_i are not mutually exclusive.
# Actually: they are inclusive! At least above Mmin.
delta_nothal_bar = -delta_hal_bar * Q_hal / (1. - Q_hal)
avg_c = self.ExpectationValue1pt(z, term='c',
R_s=R_s, R3=R3, Th=Th, Ts=Ts, Tk=Tk, Ja=Ja)
#dnh = -dh_avg * fcoll_h / (1. - fcoll_h)
#_P1_ii = self.get_prob(z, M_b, dndm_b, Mmin_b, Vo, True)
#delta_i_bar = self.mean_bubble_overdensity(z, zeta)
if term == 'id':
##
# Analog of one source or one bubble term is P_in, i.e.,
# probability that points are in the same bubble.
# The "two bubble term" is instead P_out, i.e., the
# probability that points are *not* in the same bubble.
# In the latter case, the density can be anything, while
# in the former it will be the mean bubble density.
##
# Actually think about halos
# <x_i d'> = \int d' f(d; x_i=1) f(x_i=1) f(d'; d) dd'
# Crux is f(d'; d): could be 3-d integral in general.
# Loop over bubble density.
#ixd_inner = np.zeros(self.m.size)
#for k, d1 in enumerate(db):
#
# # Excess probability of halo with mass mh
# # given bubble nearby
# exc = 0.0#bh * bb[k] * xi_dd_r
#
# #Ph = np.minimum(Ph, 1.)
#
# # <d> = fcoll_V * dh + Qi * di + rest
# #Pn = 1. - Ph
# #if sep < R_i[k]:
# # dn = d1
# #else:
# #dn = delta_n_bar
#
# # How to guarantee that <x_i d'> -> 0 on L.S.?
# # Is the key formalizing whether we're at the
# # center of the bubble or not?
# integ = dndm_h * V_h * (1. + exc)
#
# # Don't truncate at Mmin! Don't need star-forming
# # galaxy, just need mass.
# ixd_inner[k] = np.trapz(integ * M_h, x=np.log(M_h))
#_integrand = dndm_h * (M_h / rho_bar) * bh
#fcorr = 1. - np.trapz(_integrand * M_h, x=np.log(M_h))
# Just halos *outside* bubbles
hal = np.trapz(dndm_h[:iM_h] * V_hal[:iM_h] * (1. + ep_bh[:iM_h]) * M_h[:iM_h],
x=np.log(M_h[:iM_h]))
bub = np.trapz(dndm_b[iM:] * V_i[iM:] * self.m[iM:],
x=np.log(self.m[iM:]))
P_ihal = (1. - np.exp(-bub)) * (1. - np.exp(-hal))
#P2[i] = P_ihal * delta_hal_bar + _P_ib[i] * delta_b_bar
P1[i] = _P_ii_1[i] * delta_i_bar
P2[i] = _P_ii_2[i] * delta_i_bar + _P_ib[i] * delta_b_bar \
+ P_ihal * delta_hal_bar
#P2[i] = Phal * dh_avg + np.exp(-hal) * dnih_avg
#P2[i] += np.exp(-hal) * dnih_avg * Qi
#P2[i] += _P_in[i] * delta_n_bar
elif term in ['cd', 'cdip']:
continue
elif term == 'idd':
hal = np.trapz(dndm_h[:iM_h] * V_hal[:iM_h] * (1. + ep_bh[:iM_h]) * M_h[:iM_h],
x=np.log(M_h[:iM_h]))
bub = np.trapz(dndm_b[iM:] * V_i[iM:] * self.m[iM:],
x=np.log(self.m[iM:]))
P_ihal = (1. - np.exp(-bub)) * (1. - np.exp(-hal))
#P2[i] = P_ihal * delta_hal_bar + _P_ib[i] * delta_b_bar
P1[i] = _P_ii_1[i] * delta_i_bar**2
P2[i] = _P_ii_2[i] * delta_i_bar**2 \
+ _P_ib[i] * delta_b_bar * delta_i_bar\
+ P_ihal * delta_hal_bar * delta_i_bar
#exc = bh_bar * bb_bar * xi_dd_r
#
#hal = np.trapz(dndm_h * V_hal * (1. + exc) * M_h,
# x=np.log(M_h))
#bub = np.trapz(dndm_b[iM:] * V_i[iM:] * self.m[iM:],
# x=np.log(self.m[iM:]))
#
#P2[i] = ((1. - np.exp(-hal)) * delta_hal_bar
# + np.exp(-hal) * delta_nothal_bar) \
# * (1. - np.exp(-bub)) * delta_i_bar
#_P1 = _P_ii_1[i] * delta_i_bar**2
#
##_P2 = _P_ii_2[i] * delta_i_bar**2 \
## + Qi * (1. - _P_ii_1[i]) * delta_i_bar * delta_n_bar \
## - Qi**2 * delta_i_bar**2
##
#P1[i] = _P1
#
##P2[i] = Qi * xi_dd[i] # There's gonna be a bias here
##P2[i] = _P_ii_2[i] * delta_i_bar**2 - Qi**2 * delta_i_bar**2
#
##continue
#
#idd_ii = np.zeros(self.m.size)
#idd_in = np.zeros(self.m.size)
#
## Convert from dm to dd
##dmdd = np.diff(self.m) / np.diff(db)
##dmdd = np.concatenate(([0], dmdd))
#
## Should be able to speed this up
#
#for k, d1 in enumerate(db):
#
#
# exc = bb[k] * bb * xi_dd_r
#
# grand = db[k] * dndm_b[k] * V_i[k] \
# * db * dndm_b * V_i \
# * (1. + exc)
#
# idd_ii[k] = np.trapz(grand[iM:] * self.m[iM:],
# x=np.log(self.m[iM:]))
#
# #exc_in = bb[k] * bh * xi_dd_r
# #
# #grand_in = db[k] * dndm_b[k] * V_i[k] #\
# # #* dh * dndm_h * Vvir \
# # #* (1. + exc_in)
# #
# #idd_in[k] = np.trapz(grand_in[iM_h:] * M_h[iM_h:],
# # x=np.log(M_h[iM_h:]))
#
##idd_in = np.trapz(db[iM:] * dndm_b[iM:] * V_i[iM:] * delta_n_bar * self.m[iM:],
## x=np.log(self.m[iM:]))
#
#
#P2[i] = _P_ii_2[i] \
# * np.trapz(idd_ii[iM:] * self.m[iM:],
# x=np.log(self.m[iM:]))
#
## Another term for <x_i x'> possibility. Doesn't really
## fall into the one bubble two bubble language, so just
## sticking it in P2.
## Assumes neutral phase is at cosmic mean neutral density
## Could generalize...
#P2[i] += _P_ib[i] * delta_i_bar * delta_b_bar
#
## We're neglecting overdensities by just using the
## mean neutral density
#
#continue
elif term == 'iid':
# This is like the 'id' term except the second point
# has to be ionized.
P2[i] = _P_ii[i] * delta_i_bar
elif term == 'iidd':
P2[i] = _P_ii[i] * delta_i_bar**2
continue
# Might have to actually do a double integral here.
iidd_2 = np.zeros(self.m.size)
# Convert from dm to dd
#dmdd = np.diff(self.m) / np.diff(db)
#dmdd = np.concatenate(([0], dmdd))
# Should be able to speed this up
for k, d1 in enumerate(db):
exc = bb[k] * bb * xi_dd_r
grand = db[k] * dndm_b[k] * V_i[k] \
* db * dndm_b * V_i \
* (1. + exc)
iidd_2[k] = np.trapz(grand[iM:] * self.m[iM:],
x=np.log(self.m[iM:]))
P2[i] = _P_ii_2[i] \
* np.trapz(iidd_2[iM:] * self.m[iM:],
x=np.log(self.m[iM:]))
#elif term == 'cd':
#
# if self.pf['ps_include_xcorr_hot_rho'] == 0:
# break
# elif self.pf['ps_include_xcorr_hot_rho'] == 1:
# hal = np.trapz(dndm_h * V_hal * (1. + exc) * M_h,
# x=np.log(M_h))
# hot = np.trapz(dndm_b[iM:] * V_h[iM:] * self.m[iM:],
# x=np.log(self.m[iM:]))
# P2[i] = ((1. - np.exp(-hal)) * dh_avg + np.exp(-hal) * dnih_avg) \
# * (1. - np.exp(-hot)) * avg_c
# elif self.pf['ps_include_xcorr_hot_rho'] == 2:
# P2[i] = _P_hh[i] * delta_h_bar + _P_hb[i] * delta_b_bar
# else:
# raise NotImplemented('help')
#
#elif term == 'ccd':
#
#
# _P1 = _P_hh_1[i] * delta_i_bar
# #B = self._B(z, zeta)
# #_P1 = delta_i_bar \
# # * self.get_prob(z, M_b, dndm_b, Mmin_b, Vo, True) \
#
#
# _P2 = _P_hh_2[i] * delta_i_bar
#
# #_P2 = (1. - _P_ii_1[i]) * delta_i_bar \
# # * self.get_prob(z, M_b, dndm_b, Mmin_b, Vne1, True) \
# # * self.get_prob(z, M_b, dndm_b, Mmin_b, Vne1, True, ep)
# # #* self.get_prob(z, M_h, dndm_h, Mmi\n_h, Vvir, False, ep_bb)
#
# P1[i] = _P1
# P2[i] = _P2
#
elif term == 'cdd':
raise NotImplemented('help')
hal = np.trapz(dndm_h * V_hal * (1. + exc) * M_h,
x=np.log(M_h))
hot = np.trapz(dndm_b[iM:] * Vsh[iM:] * self.m[iM:],
x=np.log(self.m[iM:]))
hoi = np.trapz(dndm_b[iM:] * Vsh_sph[iM:] * self.m[iM:],
x=np.log(self.m[iM:])) # 'hot or ionized'
# One point in shell, other point in halo
# One point ionized, other point in halo
# Q. Halos exist only in ionized regions?
elif term == 'ccdd':
raise NotImplemented('help')
# Add in bulk IGM correction.
# Add routines for calculating 'ib' and 'hb'?
P2[i] = _P_hh[i] * delta_h_bar**2 \
+ _P_bb[i] * delta_h_bar**2 \
+ _P_hb[i] * delta_h_bar * delta_b_bar
continue
# Might have to actually do a double integral here.
iidd_2 = np.zeros(self.m.size)
# Convert from dm to dd
#dmdd = np.diff(self.m) / np.diff(db)
#dmdd = np.concatenate(([0], dmdd))
# Should be able to speed this up
for k, d1 in enumerate(db):
exc = bb[k] * bb * xi_dd_r
grand = db[k] * dndm_b[k] * V_i[k] \
* db * dndm_b * V_i \
* (1. + exc)
iidd_2[k] = np.trapz(grand[iM:] * self.m[iM:],
x=np.log(self.m[iM:]))
P2[i] = _P_ii_2[i] \
* np.trapz(iidd_2[iM:] * self.m[iM:],
x=np.log(self.m[iM:]))
else:
raise NotImplementedError('No method found for term=\'{}\''.format(term))
##
# SUM UP
##
PT = P1 + P2
if term in ['ii', 'hh', 'ih', 'ib', 'hb', 'bb']:
if term not in self._cache_jp_[z]:
self._cache_jp_[z][term] = R, PT, P1, P2
return PT, P1, P2
def ThreeZoneModel(self, z, R, term='ii', R_s=None, R3=None,
Th=500.0, Ts=None, Tk=None, Ja=None, k=None):
"""
Model in which IGM partitioned into three phases: ionized, hot, bulk.
.. note :: If ps_include_temp==False, this is just a two-zone model
since there is no heated phase in this limit.
"""
if not self._getting_basics:
basics = self.get_basics(z, R, R_s=R_s, Th=Th, Ts=Ts, Tk=Tk, Ja=Ja)
if term in basics:
return basics[term]
_P_ii, _P_ii_1, _P_ii_2 = basics['ii']
_P_hh, _P_hh_1, _P_hh_2 = basics['hh']
_P_bb, _P_bb_1, _P_bb_2 = basics['bb']
_P_ih, _P_ih_1, _P_ih_2 = basics['ih']
_P_ib, _P_ib_1, _P_ib_2 = basics['ib']
_P_hb, _P_hb_1, _P_hb_2 = basics['hb']
Rones = np.zeros_like(R)
Rzeros = np.zeros_like(R)
delta_i_bar = self.delta_bubble_vol_weighted(z, ion=True)
delta_h_bar = self.delta_shell(z)
delta_b_bar = self.BulkDensity(z, R_s)
Qi = self.MeanIonizedFraction(z)
Qh = self.MeanIonizedFraction(z, ion=False)
Tcmb = self.cosm.TCMB(z)
ci = 0.0
ch = self.TempToContrast(z, Th=Th, Tk=Tk, Ts=Ts, Ja=Ja)
if Ts is None:
cb = 0.0
else:
cb = Tcmb / Ts
##
# On to derived quantities
##
if term == 'cc':
result = Rzeros.copy()
if self.pf['ps_include_temp']:
result += _P_hh * ch**2 + 2 * _P_hb * ch * cb + _P_bb * cb**2
if self.pf['ps_include_lya']:
xa = self.hydr.RadiativeCouplingCoefficient(z, Ja, Tk)
if xa < self.pf['ps_lya_cut']:
ev_aa, ev_aa1, ev_aa2 = \
self.ExpectationValue2pt(z, R, term='aa',
R_s=R_s, R3=R3, Th=Th, Ts=Ts, Tk=Tk, k=k, Ja=Ja)
Tcmb = self.cosm.TCMB(z)
result += ev_aa / (1. + xa)**2
return result, Rzeros, Rzeros
elif term == 'ic':
if not self.pf['ps_include_xcorr_ion_hot']:
return (Qi**2 * ci + Qh * Qi * ch) * Rones, Rzeros, Rzeros
ev = _P_ih * ch + _P_ib * cb
return ev, Rzeros, Rzeros
elif term == 'icc':
ch = self.TempToContrast(z, Th=Th, Tk=Tk, Ts=Ts, Ja=Ja)
ci = self.BubbleContrast(z, Th=Th, Tk=Tk, Ts=Ts, Ja=Ja)
ev_ii, ev_ii1, ev_ii2 = self.ExpectationValue2pt(z, R=R,
term='ii', R_s=R_s, R3=R3, Th=Th, Ts=Ts, Tk=Tk, Ja=Ja)
ev_ih, ev_ih1, ev_ih2 = self.ExpectationValue2pt(z, R=R,
term='ih', R_s=R_s, R3=R3, Th=Th, Ts=Ts, Tk=Tk, Ja=Ja)
return ev_ii * ci**2 + ev_ih * ch * ci, Rzeros, Rzeros
elif term == 'iidd':
if self.pf['ps_use_wick']:
ev_ii, one_pt, two_pt = self.ExpectationValue2pt(z, R,
term='ii')
ev_dd, one_pt, two_pt = self.ExpectationValue2pt(z, R,
term='dd')
ev_id, one_pt, two_pt = self.ExpectationValue2pt(z, R,
term='id')
#if self.pf['ps_include_ion']:
avg_id = self.ExpectationValue1pt(z, term='i*d')
idt, id1, id2 = \
self.ExpectationValue2pt(z, R, term='id')
return ev_ii * ev_dd + ev_id**2 + avg_id**2, Rzeros, Rzeros
else:
return _P_ii * delta_i_bar**2, Rzeros, Rzeros
elif term == 'id':
P = _P_ii * delta_i_bar \
+ _P_ih * delta_h_bar \
+ _P_ib * delta_b_bar
return P, Rzeros, Rzeros
elif term == 'iid':
return _P_ii * delta_i_bar, Rzeros, Rzeros
elif term == 'idd':
P = _P_ii * delta_i_bar**2 \
+ _P_ih * delta_i_bar * delta_h_bar \
+ _P_ib * delta_i_bar * delta_b_bar
return P, Rzeros, Rzeros
elif term == 'cd':
if not self.pf['ps_include_xcorr_hot_rho']:
return Rzeros, Rzeros, Rzeros
ev = _P_ih * ch * delta_i_bar \
+ _P_ib * cb * delta_i_bar \
+ _P_hh * ch * delta_h_bar \
+ _P_hb * ch * delta_b_bar \
+ _P_hb * cb * delta_h_bar \
+ _P_bb * cb * delta_b_bar
return ev, Rzeros, Rzeros
elif term == 'ccdd' and self.pf['ps_use_wick']:
ev_cc, one_pt, two_pt = self.ExpectationValue2pt(z, R,
term='cc')
ev_dd, one_pt, two_pt = self.ExpectationValue2pt(z, R,
term='dd')
ev_cd, one_pt, two_pt = self.ExpectationValue2pt(z, R,
term='cd')
#if self.pf['ps_include_ion']:
avg_cd = self.ExpectationValue1pt(z, term='c*d')
cdt, cd1, cd2 = \
self.ExpectationValue2pt(z, R, term='cd')
return ev_cc * ev_dd + ev_cd**2 + avg_cd**2, Rzeros, Rzeros
elif term == 'aa':
aa = self.CorrelationFunction(z, R, term='aa',
R_s=R_s, R3=R3, Th=Th, Ts=Ts, Tk=Tk, Ja=Ja, k=k)
return aa, Rzeros, Rzeros
##
# BUNCHA TEMPERATURE-DENSITY STUFF BELOW
##
elif term == 'ccd':
ev = _P_hh * ch**2 * delta_h_bar \
+ _P_hb * ch * cb * delta_h_bar \
+ _P_hb * ch * cb * delta_b_bar \
+ _P_bb * cb**2 * delta_b_bar
return ev, Rzeros, Rzeros
elif term == 'cdd':
ev = _P_ih * ch * delta_i_bar * delta_h_bar \
+ _P_ib * cb * delta_i_bar * delta_b_bar \
+ _P_hh * ch * delta_h_bar**2 \
+ _P_hb * ch * delta_h_bar * delta_b_bar \
+ _P_hb * cb * delta_h_bar * delta_b_bar \
+ _P_bb * cb * delta_b_bar**2
return ev, Rzeros, Rzeros
# <c d x_i'>
elif term == 'cdip':
ev = _P_ih * delta_h_bar * ch + _P_ib * delta_b_bar * cb
return ev, Rzeros, Rzeros
# <c d d' x_i'>
elif term == 'cddip':
ev = _P_ih * delta_i_bar * delta_h_bar * ch \
+ _P_ib * delta_i_bar * delta_b_bar * cb
return ev, Rzeros, Rzeros
elif term == 'cdpip':
ev = _P_ih * delta_i_bar * ch \
+ _P_ib * delta_i_bar * cb
return ev, Rzeros, Rzeros
# Wick's theorem approach above
elif term == 'ccdd':
ev = _P_hh * delta_h_bar**2 * ch**2 \
+ 2 * _P_hb * delta_h_bar * ch * delta_b_bar * cb \
+ _P_bb * delta_b_bar**2 * cb**2
return ev, Rzeros, Rzeros
else:
raise NotImplementedError('No model for term={} in ThreeZoneModel.'.format(term))
def get_prob(self, z, M, dndm, Mmin, V, exp=True, ep=0.0, Mmax=None):
"""
Basically do an integral over some distribution function.
"""
# Set lower integration limit
iM = np.argmin(np.abs(M - Mmin))
if Mmax is not None:
iM2 = np.argmin(np.abs(M - Mmax)) + 1
else:
iM2 = None
# One-source term
integrand = dndm * V * (1. + ep)
integr = np.trapz(integrand[iM:iM2] * M[iM:iM2], x=np.log(M[iM:iM2]))
# Exponentiate?
if exp:
exp_int = np.exp(-integr)
P = 1. - exp_int
else:
P = integr
return P
def CorrelationFunction(self, z, R=None, term='ii',
R_s=None, R3=0.0, Th=500., Tc=1., Ts=None, k=None, Tk=None, Ja=None):
"""
Compute the correlation function of some general term.
"""
Qi = self.MeanIonizedFraction(z)
Qh = self.MeanIonizedFraction(z, ion=False)
if R is None:
use_R_tab = True
R = self.halos.tab_R
else:
use_R_tab = False
if Qi == 1:
return np.zeros_like(R)
Tcmb = self.cosm.TCMB(z)
Tgas = self.cosm.Tgas(z)
##
# Check cache for match
##
cached_result = self._cache_cf(z, term)
if cached_result is not None:
_R, _cf = cached_result
if _R.size == R.size:
if np.allclose(_R, R):
return _cf
return np.interp(R, _R, _cf)
##
# 21-cm correlation function
##
if term in ['21', 'phi', 'psi']:
#if term in ['21', 'phi']:
# ev_2pt, ev_2pt_1, ev_2pt_2 = \
# self.ExpectationValue2pt(z, zeta, R=R, term='phi',
# R_s=R_s, R3=R3, Th=Th, Ts=Ts)
# avg_phi = self.ExpectationValue1pt(z, zeta, term='phi',
# R_s=R_s, Th=Th, Ts=Ts)
#
# cf_21 = ev_2pt - avg_phi**2
#
#else:
ev_2pt, ev_2pt_1, ev_2pt_2 = \
self.ExpectationValue2pt(z, R=R, term='psi',
R_s=R_s, R3=R3, Th=Th, Ts=Ts, Tk=Tk, Ja=Ja)
avg_psi = self.ExpectationValue1pt(z, term='psi',
R_s=R_s, Th=Th, Ts=Ts, Tk=Tk, Ja=Ja)
cf_psi = ev_2pt - avg_psi**2
##
# Temperature fluctuations
##
include_temp = self.pf['ps_include_temp']
include_lya = self.pf['ps_include_lya']
if (include_temp or include_lya) and term in ['phi', '21']:
ev_oo, o1, o2 = self.ExpectationValue2pt(z, R=R,
term='oo', R_s=R_s, Ts=Ts, Tk=Tk, Th=Th, Ja=Ja, k=k)
avg_oo = self.ExpectationValue1pt(z, term='oo',
R_s=R_s, Ts=Ts, Tk=Tk, Th=Th, Ja=Ja, R3=R3)
cf_omega = ev_oo - avg_oo
cf_21 = cf_psi + cf_omega # i.e., cf_phi
else:
cf_21 = cf_psi
if term == '21':
cf = cf_21
elif term == 'phi':
cf = cf_21
elif term == 'psi':
cf = cf_psi
elif term == 'nn':
cf = -self.CorrelationFunction(z, R, term='ii',
R_s=R_s, R3=R3, Th=Th, Ts=Ts)
return cf
elif term == 'nc':
cf = -self.CorrelationFunction(z, R, term='ic',
R_s=R_s, R3=R3, Th=Th, Ts=Ts)
return cf
elif term == 'nd':
cf = -self.CorrelationFunction(z, R, term='id',
R_s=R_s, R3=R3, Th=Th, Ts=Ts)
return cf
##
# Matter correlation function -- we have this tabulated already.
##
elif term in ['dd', 'mm']:
if not self.pf['ps_include_density']:
cf = np.zeros_like(R)
self._cache_cf_[z][term] = R, cf
return cf
iz = np.argmin(np.abs(z - self.halos.tab_z_ps))
if use_R_tab:
cf = self.halos.tab_cf_mm[iz]
else:
cf = np.interp(np.log(R), np.log(self.halos.tab_R),
self.halos.tab_cf_mm[iz])
##
# Ionization correlation function
##
elif term == 'ii':
if not self.pf['ps_include_ion']:
cf = np.zeros_like(R)
self._cache_cf_[z][term] = R, cf
return cf
ev_ii, ev_ii_1, ev_ii_2 = \
self.ExpectationValue2pt(z, R=R, term='ii',
R_s=R_s, R3=R3, Th=Th, Ts=Ts, Tk=Tk, Ja=Ja)
ev_i = self.ExpectationValue1pt(z, term='i',
R_s=R_s, Th=Th, Ts=Ts, Tk=Tk, Ja=Ja)
cf = ev_ii - ev_i**2
##
# Temperature correlation function
##
elif term == 'hh':
if not self.pf['ps_include_temp']:
cf = np.zeros_like(R)
self._cache_cf_[z][term] = R, cf
return cf
jp_hh, jp_hh_1, jp_hh_2 = \
self.ExpectationValue2pt(z, R=R, term='hh',
R_s=R_s, R3=R3, Th=Th, Ts=Ts, Tk=Tk, Ja=Ja)
#if self.pf['ps_volfix']:
# if (Qh < 0.5):
# ev_hh = jp_hh_1 + jp_hh_2
# else:
# # Should this 1-Qh factor be 1-Qh-Qi?
# ev_hh = (1. - Qh) * jp_hh_1 + Qh**2
#else:
# ev_hh = jp_hh
# Should just be Qh
ev_h = self.ExpectationValue1pt(z, term='h',
R_s=R_s, Ts=Ts, Th=Th, Tk=Tk, Ja=Ja)
cf = jp_hh - ev_h**2
##
# Ionization-density cross correlation function
##
elif term == 'id':
if self.pf['ps_include_xcorr_ion_rho'] == 0:
cf = np.zeros_like(R)
self._cache_cf_[z][term] = R, cf
return cf
#jp_ii, jp_ii_1, jp_ii_2 = \
# self.ExpectationValue2pt(z, R, term='ii',
# R_s=R_s, R3=R3, Th=Th, Ts=Ts)
jp_im, jp_im_1, jp_im_2 = \
self.ExpectationValue2pt(z, R, term='id',
R_s=R_s, R3=R3, Th=Th, Ts=Ts, Tk=Tk, Ja=Ja)
ev_xd = 0.0
# Add optional correction to ensure limiting behavior?
if self.pf['ps_volfix']:
#if Qi < 0.5:
ev = jp_im
#else:
# ev = jp_im_1 - jp_ii_1
else:
ev = jp_im
# Equivalent to correlation function in this case.
cf = ev - ev_xd
# c = contrast, instead of 'c' for cold, use '3' for zone 3 (later)
elif term == 'cc':
if not self.pf['ps_include_temp']:
cf = np.zeros_like(R)
self._cache_cf_[z][term] = R, cf
return cf
ev_cc, ev_cc_1, ev_cc_2 = \
self.ExpectationValue2pt(z, R=R, term='cc',
R_s=R_s, R3=R3, Th=Th, Ts=Ts, Tk=Tk, Ja=Ja)
ev_c = self.ExpectationValue1pt(z, term='c',
R_s=R_s, Ts=Ts, Th=Th, Tk=Tk, Ja=Ja)
#else:
# # Remember, this is for the hot/cold term
# Csq = (Tcmb / (Tk - Tcmb)) * delta_T[0] * delta_T[1] \
# / (1. + delta_T[0]) / (1. + delta_T[1])
# C = np.sqrt(C)
# Add optional correction to ensure limiting behavior?
#if self.pf['ps_volfix']:
# if (Qh < 0.5) and (Qi < 0.5):
# ev_cc = jp_cc_1 + jp_cc_2
# else:
# ev_cc = (1. - Qi) * jp_cc_1 + ev_c**2
#else:
# ev_cc = jp_cc
cf = ev_cc - ev_c**2
##
# Ionization-heating cross-correlation function
##
elif term == 'ih':
if not self.pf['ps_include_temp']:
cf = np.zeros_like(R)
self._cache_cf_[z][term] = R, cf
return cf
if self.pf['ps_temp_model'] == 2:
return np.zeros_like(R)
ev_2pt, ev_1, ev_2 = \
self.ExpectationValue2pt(z, R=R, term='ih',
R_s=R_s, R3=R3, Th=Th, Ts=Ts, Tk=Tk, Ja=Ja)
# Add optional correction to ensure limiting behavior?
#if self.pf['ps_volfix']:
# if (Qh < 0.5) and (Qi < 0.5):
# ev_2pt = jp_1 + jp_2
# else:
# ev_2pt = (1. - Qh) * jp_1 + Qh * Qi
#else:
# ev_2pt = jp + Qh * Qi
ev_1pt_i = self.ExpectationValue1pt(z, term='i',
R_s=R_s, Ts=Ts, Th=Th, Tk=Tk, Ja=Ja)
ev_1pt_h = self.ExpectationValue1pt(z, term='h',
R_s=R_s, Ts=Ts, Th=Th, Tk=Tk, Ja=Ja)
cf = ev_2pt - ev_1pt_i * ev_1pt_h
elif term == 'ic':
ev_2pt, ev_1, ev_2 = \
self.ExpectationValue2pt(z, R=R, term='ic',
R_s=R_s, R3=R3, Th=Th, Ts=Ts, Tk=Tk, Ja=Ja)
# Add optional correction to ensure limiting behavior?
#if self.pf['ps_volfix']:
# if (Qh < 0.5) and (Qi < 0.5):
# ev_2pt = jp_1 + jp_2
# else:
# ev_2pt = (1. - Qh) * jp_1 + Qh * Qi
#else:
# ev_2pt = jp + Qh * Qi
ev_1pt = self.ExpectationValue1pt(z, term='i*c',
R_s=R_s, Ts=Ts, Th=Th, Tk=Tk, Ja=Ja)
# Don't square the second term!
cf = ev_2pt - ev_1pt
##
# Special case: Ly-a
##
elif term == 'aa':
Mmin = lambda zz: self.Mmin(zz)
# Horizon set by distance photon can travel between n=3 and n=2
zmax = self.hydr.zmax(z, 3)
rmax = self.cosm.ComovingRadialDistance(z, zmax) / cm_per_mpc
# Light-cone effects?
if self.pf['ps_include_lya_lc'] == False:
a = None
elif type(self.pf['ps_include_lya_lc']) is float:
a = lambda zz: self.pf['ps_include_lya_lc']
else:
# Use specific mass accretion rate of Mmin halo
# to get characteristic halo growth time. This is basically
# independent of mass so it should be OK to just pick Mmin.
#oot = lambda zz: self.pops[0].dfcolldt(z) / self.pops[0].halos.fcoll_2d(zz, np.log10(Mmin(zz)))
#a = lambda zz: (1. / oot(zz)) / pop.cosm.HubbleTime(zz)
oot = lambda zz: self.halos.MAR_func(zz, Mmin(zz)) / Mmin(zz) / s_per_yr
a = lambda zz: (1. / oot(zz)) / self.cosm.HubbleTime(zz)
if a is not None:
tstar = lambda zz: a(zz) * self.cosm.HubbleTime(zz)
rstar = c * tstar(z) * (1. + z) / cm_per_mpc
ulya = lambda kk, mm, zz: self.halos.u_isl_exp(kk, mm, zz, rmax, rstar)
else:
ulya = lambda kk, mm, zz: self.halos.u_isl(kk, mm, zz, rmax)
ps_try = self._cache_ps(z, 'aa')
if ps_try is not None:
ps = ps_try
else:
ps = np.array([self.halos.PowerSpectrum(z, _k, ulya, Mmin(z)) \
for _k in k])
self._cache_ps_[z][term] = ps
cf = self.CorrelationFunctionFromPS(R, ps, k, split_by_scale=True)
else:
raise NotImplementedError('Unrecognized correlation function: {}'.format(term))
#if term not in ['21', 'mm']:
# cf /= (2. * np.pi)**3
self._cache_cf_[z][term] = R, cf.copy()
return cf
# def PowerSpectrum(self, z, zeta, Q=None, term='ii', rescale=False,
# cf=None, R=None):
#
# if cf is None:
# cf = self.CorrelationFunction(z, zeta, Q=Q, term=term,
# rescale=rescale)
# else:
#
# if R is None:
# R = self.halos.tab_R
#
# assert cf.size == R.size
# print('Correlation function supplied. Neglecting all other parameters.')
#
# # Integrate over R
# func = lambda k: self.halos._integrand_FT_3d_to_1d(cf, k, R)
#
# return np.array([np.trapz(func(k) * R, x=np.log(R)) \
# for k in self.halos.tab_k]) / 2. / np.pi
def BubbleContrast(self, z, Th=500., Tk=None, Ts=None, Ja=None):
return 0.0
Tcmb = self.cosm.TCMB(z)
Tgas = self.cosm.Tgas(z)
if Tk is None:
print("Assuming Tk=Tgas(unheated).")
Tk = Tgas
if Ts is None:
print("Assuming Ts=Tk.")
Ts = Tk
if Ja is None:
print("Assuming xa=0.")
xa = Ja = 0.0
else:
xa = self.hydr.RadiativeCouplingCoefficient(z, Ja, Tk)
return 0.0#Tcmb / (Ts - Tcmb)
def TempToContrast(self, z, Th=500., Tk=None, Ts=None, Ja=None):
"""
Find value of 'contrast' fluctuation given mean temperature Tk and
assumed kinetic temperature of heated regions, Th.
"""
if self.pf['ps_temp_model'] == 2:
return 1. / self.pf['ps_saturated']
if Th is None:
return 0.0
Tcmb = self.cosm.TCMB(z)
Tgas = self.cosm.Tgas(z)
if Tk is None:
print("Assuming Tk=Tgas(unheated).")
Tk = Tgas
if Ts is None:
print("Assuming Ts=Tk.")
Ts = Tk
# Don't let heated regions be colder than cosmic mean temperature.
# Forces delta_T -> 0 at late times
Th = max(Th, Tk)
delta_T = Th / Tk - 1.
# NEW CONTRAST DEFINITION
return Tcmb / Th
#if ii <= 1:
# Contrast of hot regions.
#return (1. - Tcmb / Th) / (1. - Tcmb / Ts) - 1.
#return (delta_T / (1. + delta_T)) * (Tcmb / (Tk - Tcmb))
def CorrelationFunctionFromPS(self, R, ps, k=None, split_by_scale=False,
kmin=None, epsrel=1-8, epsabs=1e-8, method='clenshaw-curtis',
use_pb=False, suppression=np.inf):
if np.all(ps == 0):
return np.zeros_like(R)
return self.halos.InverseFT3D(R, ps, k, kmin=kmin,
epsrel=epsrel, epsabs=epsabs, use_pb=use_pb,
split_by_scale=split_by_scale, method=method, suppression=suppression)
def PowerSpectrumFromCF(self, k, cf, R=None, split_by_scale=False,
Rmin=None, epsrel=1-8, epsabs=1e-8, method='clenshaw-curtis',
use_pb=False, suppression=np.inf):
if np.all(cf == 0):
return np.zeros_like(k)
return self.halos.FT3D(k, cf, R, Rmin=Rmin,
epsrel=epsrel, epsabs=epsabs, use_pb=use_pb,
split_by_scale=split_by_scale, method=method, suppression=suppression)
```
#### File: ares/static/SpectralSynthesis.py
```python
import time
import numpy as np
from ..util import Survey
from ..util import ProgressBar
from ..phenom import Madau1995
from ..util import ParameterFile
from scipy.optimize import curve_fit
from scipy.interpolate import interp1d
from ..physics.Cosmology import Cosmology
from scipy.interpolate import RectBivariateSpline
from ..physics.Constants import s_per_myr, c, h_p, erg_per_ev, flux_AB
nanoJ = 1e-23 * 1e-9
tiny_lum = 1e-8
all_cameras = ['wfc', 'wfc3', 'nircam']
def _powlaw(x, p0, p1):
return p0 * (x / 1.)**p1
class SpectralSynthesis(object):
def __init__(self, **kwargs):
self.pf = ParameterFile(**kwargs)
@property
def cosm(self):
if not hasattr(self, '_cosm'):
self._cosm = Cosmology(pf=self.pf, **self.pf)
return self._cosm
@property
def src(self):
return self._src
@src.setter
def src(self, value):
self._src = value
@property
def oversampling_enabled(self):
if not hasattr(self, '_oversampling_enabled'):
self._oversampling_enabled = True
return self._oversampling_enabled
@oversampling_enabled.setter
def oversampling_enabled(self, value):
self._oversampling_enabled = value
@property
def oversampling_below(self):
if not hasattr(self, '_oversampling_below'):
self._oversampling_below = 30.
return self._oversampling_below
@oversampling_below.setter
def oversampling_below(self, value):
self._oversampling_below = value
@property
def force_perfect(self):
if not hasattr(self, '_force_perfect'):
self._force_perfect = False
return self._force_perfect
@force_perfect.setter
def force_perfect(self, value):
self._force_perfect = value
@property
def careful_cache(self):
if not hasattr(self, '_careful_cache_'):
self._careful_cache_ = True
return self._careful_cache_
@careful_cache.setter
def careful_cache(self, value):
self._careful_cache_ = value
@property
def cameras(self):
if not hasattr(self, '_cameras'):
self._cameras = {}
for cam in all_cameras:
self._cameras[cam] = Survey(cam=cam,
force_perfect=self.force_perfect,
cache=self.pf['pop_synth_cache_phot'])
return self._cameras
@property
def hydr(self):
if not hasattr(self, '_hydr'):
from ..physics.Hydrogen import Hydrogen
self._hydr = Hydrogen(pf=self.pf, cosm=self.cosm, **self.pf)
return self._hydr
@property
def madau1995(self):
if not hasattr(self, '_madau1995'):
self._madau1995 = Madau1995(hydr=self.hydr, cosm=self.cosm,
**self.pf)
return self._madau1995
def OpticalDepth(self, z, owaves):
"""
Compute Lyman series line blanketing following Madau (1995).
Parameters
----------
zobs : int, float
Redshift of object.
owaves : np.ndarray
Observed wavelengths in microns.
"""
if self.pf['tau_clumpy'] is None:
return 0.0
assert self.pf['tau_clumpy'].lower() == 'madau1995', \
"tau_clumpy='madau1995' is currently the sole option!"
return self.madau1995(z, owaves)
def L_of_Z_t(self, wave):
if not hasattr(self, '_L_of_Z_t'):
self._L_of_Z_t = {}
if wave in self._L_of_Z_t:
return self._L_of_Z_t[wave]
tarr = self.src.times
Zarr = np.sort(list(self.src.metallicities.values()))
L = np.zeros((tarr.size, Zarr.size))
for j, Z in enumerate(Zarr):
L[:,j] = self.src.L_per_sfr_of_t(wave, Z=Z)
# Interpolant
self._L_of_Z_t[wave] = RectBivariateSpline(np.log10(tarr),
np.log10(Zarr), np.log10(L), kx=3, ky=3)
return self._L_of_Z_t[wave]
def Slope(self, zobs=None, tobs=None, spec=None, waves=None,
sfh=None, zarr=None, tarr=None, hist={}, idnum=None,
cam=None, rest_wave=None, band=None,
return_norm=False, filters=None, filter_set=None, dlam=20.,
method='linear', window=1, extras={}, picky=False, return_err=False):
"""
Compute slope in some wavelength range or using photometry.
Parameters
----------
zobs : int, float
Redshift of observation.
rest_wave: tuple
Rest-wavelength range in which slope will be computed (Angstrom).
dlam : int
Sample the spectrum with this wavelength resolution (Angstrom).
window : int
Can optionally operate on a smoothed version of the spectrum,
obtained by convolving with a boxcar window function if this width.
"""
assert (tobs is not None) or (zobs is not None)
if tobs is not None:
zobs = self.cosm.z_of_t(tobs * s_per_myr)
# If no camera supplied, operate directly on spectrum
if cam is None:
func = lambda xx, p0, p1: p0 * (xx / 1.)**p1
if waves is None:
waves = np.arange(rest_wave[0], rest_wave[1]+dlam, dlam)
owaves, oflux = self.ObserveSpectrum(zobs, spec=spec, waves=waves,
sfh=sfh, zarr=zarr, tarr=tarr, flux_units='Ang', hist=hist,
extras=extras, idnum=idnum, window=window)
rwaves = waves
ok = np.logical_and(rwaves >= rest_wave[0], rwaves <= rest_wave[1])
x = owaves[ok==1]
if oflux.ndim == 2:
batch_mode = True
y = oflux[:,ok==1].swapaxes(0, 1)
ma = np.max(y, axis=0)
sl = -2.5 * np.ones(ma.size)
guess = np.vstack((ma, sl)).T
else:
batch_mode = False
y = oflux[ok==1]
guess = np.array([oflux[np.argmin(np.abs(owaves - 1.))], -2.4])
else:
if filters is not None:
assert rest_wave is None, \
"Set rest_wave=None if filters are supplied"
if type(cam) not in [list, tuple]:
cam = [cam]
filt = []
xphot = []
dxphot = []
ycorr = []
for _cam in cam:
_filters, _xphot, _dxphot, _ycorr = \
self.Photometry(sfh=sfh, hist=hist, idnum=idnum, spec=spec,
cam=_cam, filters=filters, filter_set=filter_set, waves=waves,
dlam=dlam, tarr=tarr, tobs=tobs, extras=extras, picky=picky,
zarr=zarr, zobs=zobs, rest_wave=rest_wave, window=window)
filt.extend(list(_filters))
xphot.extend(list(_xphot))
dxphot.extend(list(_dxphot))
ycorr.extend(list(_ycorr))
# No matching filters? Return.
if len(filt) == 0:
if idnum is not None:
N = 1
elif sfh is not None:
N = sfh.shape[0]
else:
N = 1
if return_norm:
return -99999 * np.ones((N, 2))
else:
return -99999 * np.ones(N)
filt = np.array(filt)
xphot = np.array(xphot)
dxphot = np.array(dxphot)
ycorr = np.array(ycorr)
# Sort arrays in ascending wavelength
isort = np.argsort(xphot)
_x = xphot[isort]
_y = ycorr[isort]
# Recover flux to do power-law fit
xp, xm = dxphot.T
dx = xp + xm
# Need flux in units of A^-1
#dnphot = c / ((xphot-xm) * 1e-4) - c / ((xphot + xp) * 1e-4)
#dwdn = dx * 1e4 / dnphot
_dwdn = (_x * 1e4)**2 / (c * 1e8)
if rest_wave is not None:
r = _x * 1e4 / (1. + zobs)
ok = np.logical_and(r >= rest_wave[0], r <= rest_wave[1])
x = _x[ok==1]
else:
ok = np.ones_like(_x)
x = _x
# Be careful in batch mode!
if ycorr.ndim == 2:
batch_mode = True
_f = 10**(_y / -2.5) * flux_AB / _dwdn[:,None]
y = _f[ok==1]
ma = np.max(y, axis=0)
sl = -2.5 * np.ones(ma.size)
guess = np.vstack((ma, sl)).T
else:
batch_mode = False
_f = 10**(_y / -2.5) * flux_AB / _dwdn
y = _f[ok==1]
ma = np.max(y)
guess = np.array([ma, -2.])
if ok.sum() == 2 and self.pf['verbose']:
print("WARNING: Estimating slope at z={} from only two points: {}".format(zobs,
filt[isort][ok==1]))
##
# Fit a PL to points.
if method == 'fit':
if len(x) < 2:
if self.pf['verbose']:
print("Not enough points to estimate slope")
if batch_mode:
corr = np.ones(y.shape[1])
else:
corr = 1
if return_norm:
return -99999 * corr, -99999 * corr
else:
return -99999 * corr
if batch_mode:
N = y.shape[1]
popt = -99999 * np.ones((2, N))
pcov = -99999 * np.ones((2, 2, N))
for i in range(N):
if not np.any(y[:,i] > 0):
continue
try:
popt[:,i], pcov[:,:,i] = curve_fit(_powlaw, x, y[:,i],
p0=guess[i], maxfev=10000)
except RuntimeError:
popt[:,i], pcov[:,:,i] = -99999, -99999
else:
try:
popt, pcov = curve_fit(_powlaw, x, y, p0=guess, maxfev=10000)
except RuntimeError:
popt, pcov = -99999 * np.ones(2), -99999 * np.ones(2)
elif method == 'linear':
logx = np.log10(x)
logy = np.log10(y)
A = np.vstack([logx, np.ones(len(logx))]).T
if batch_mode:
N = y.shape[1]
popt = -99999 * np.ones((2, N))
pcov = -99999 * np.ones((2, 2, N))
for i in range(N):
popt[:,i] = np.linalg.lstsq(A, logy[:,i],
rcond=None)[0][-1::-1]
else:
popt = np.linalg.lstsq(A, logy, rcond=None)[0]
pcov = -99999 * np.ones(2)
elif method == 'diff':
assert cam is None, "Should only use to skip photometry."
# Remember that galaxy number is second dimension
logL = np.log(y)
logw = np.log(x)
if batch_mode:
# Logarithmic derivative = beta
beta = (logL[-1,:] - logL[0,:]) / (logw[-1,None] - logw[0,None])
else:
beta = (logL[-1] - logL[0]) / (logw[-1] - logw[0])
popt = np.array([-99999, beta])
else:
raise NotImplemented('help me')
if return_norm:
return popt
else:
if return_err:
return popt[1], np.sqrt(pcov[1,1])
else:
return popt[1]
def ObserveSpectrum(self, zobs, spec=None, sfh=None, waves=None,
flux_units='Hz', tarr=None, tobs=None, zarr=None, hist={},
idnum=None, window=1, extras={}, nthreads=1, load=True):
"""
Take an input spectrum and "observe" it at redshift z.
Parameters
----------
zobs : int, float
Redshift of observation.
waves : np.ndarray
Simulate spectrum at these rest wavelengths [Angstrom]
spec : np.ndarray
Specific luminosities in [erg/s/A]
Returns
-------
Observed wavelengths in microns, observed fluxes in erg/s/cm^2/Hz.
"""
if spec is None:
spec = self.Spectrum(waves, sfh=sfh, tarr=tarr, zarr=zarr,
zobs=zobs, tobs=None, hist=hist, idnum=idnum,
extras=extras, window=window, load=load)
dL = self.cosm.LuminosityDistance(zobs)
if waves is None:
waves = self.src.wavelengths
dwdn = self.src.dwdn
assert len(spec) == len(waves)
else:
#freqs = c / (waves / 1e8)
dwdn = waves**2 / (c * 1e8)
#tmp = np.abs(np.diff(waves) / np.diff(freqs))
#dwdn = np.concatenate((tmp, [tmp[-1]]))
# Flux at Earth in erg/s/cm^2/Hz
f = spec / (4. * np.pi * dL**2)
if flux_units == 'Hz':
pass
else:
f /= dwdn
owaves = waves * (1. + zobs) / 1e4
tau = self.OpticalDepth(zobs, owaves)
T = np.exp(-tau)
return owaves, f * T
def Photometry(self, spec=None, sfh=None, cam='wfc3', filters='all',
filter_set=None, dlam=20., rest_wave=None, extras={}, window=1,
tarr=None, zarr=None, waves=None, zobs=None, tobs=None, band=None,
hist={}, idnum=None, flux_units=None, picky=False, lbuffer=200.,
ospec=None, owaves=None, load=True):
"""
Just a wrapper around `Spectrum`.
Returns
-------
Tuple containing (in this order):
- Names of all filters included
- Midpoints of photometric filters [microns]
- Width of filters [microns]
- Apparent magnitudes corrected for filter transmission.
"""
assert (tobs is not None) or (zobs is not None)
if zobs is None:
zobs = self.cosm.z_of_t(tobs * s_per_myr)
# Might be stored for all redshifts so pick out zobs
if type(filters) == dict:
assert zobs is not None
filters = filters[round(zobs)]
# Get transmission curves
if cam in self.cameras.keys():
filter_data = self.cameras[cam]._read_throughputs(filter_set=filter_set,
filters=filters)
else:
# Can supply spectral windows, e.g., Calzetti+ 1994, in which case
# we assume perfect transmission but otherwise just treat like
# photometric filters.
assert type(filters) in [list, tuple, np.ndarray]
#print("Generating photometry from {} spectral ranges.".format(len(filters)))
wraw = np.array(filters)
x1 = wraw.min()
x2 = wraw.max()
x = np.arange(x1-1, x2+1, 1.) * 1e-4 * (1. + zobs)
# Note that in this case, the filter wavelengths are in rest-frame
# units, so we convert them to observed wavelengths before
# photometrizing everything.
filter_data = {}
for _window in filters:
lo, hi = _window
lo *= 1e-4 * (1. + zobs)
hi *= 1e-4 * (1. + zobs)
y = np.zeros_like(x)
y[np.logical_and(x >= lo, x <= hi)] = 1
mi = np.mean([lo, hi])
dx = np.array([hi - mi, mi - lo])
Tavg = 1.
filter_data[_window] = x, y, mi, dx, Tavg
all_filters = filter_data.keys()
# Figure out spectral range we need to model for these filters.
# Find bluest and reddest filters, set wavelength range with some
# padding above and below these limits.
lmin = np.inf
lmax = 0.0
ct = 0
for filt in filter_data:
x, y, cent, dx, Tavg = filter_data[filt]
# If we're only doing this for the sake of measuring a slope, we
# might restrict the range based on wavelengths of interest, i.e.,
# we may not use all the filters.
# Right now, will include filters as long as their center is in
# the requested band. This results in fluctuations in slope
# measurements, so to be more stringent set picky=True.
if rest_wave is not None:
if picky:
l = (cent - dx[1]) * 1e4 / (1. + zobs)
r = (cent + dx[0]) * 1e4 / (1. + zobs)
if (l < rest_wave[0]) or (r > rest_wave[1]):
continue
cent_r = cent * 1e4 / (1. + zobs)
if (cent_r < rest_wave[0]) or (cent_r > rest_wave[1]):
continue
lmin = min(lmin, cent - dx[1] * 1.2)
lmax = max(lmax, cent + dx[0] * 1.2)
ct += 1
# No filters in range requested
if ct == 0:
return [], [], [], []
# Here's our array of REST wavelengths
if waves is None:
# Convert from microns to Angstroms, undo redshift.
lmin = lmin * 1e4 / (1. + zobs)
lmax = lmax * 1e4 / (1. + zobs)
lmin = max(lmin, self.src.wavelengths.min())
lmax = min(lmax, self.src.wavelengths.max())
# Force edges to be multiples of dlam
l1 = lmin - lbuffer
l1 -= l1 % dlam
l2 = lmax + lbuffer
waves = np.arange(l1, l2+dlam, dlam)
# Get spectrum first.
if (spec is None) and (ospec is None):
spec = self.Spectrum(waves, sfh=sfh, tarr=tarr, tobs=tobs,
zarr=zarr, zobs=zobs, band=band, hist=hist,
idnum=idnum, extras=extras, window=window, load=load)
# Observed wavelengths in micron, flux in erg/s/cm^2/Hz
wave_obs, flux_obs = self.ObserveSpectrum(zobs, spec=spec,
waves=waves, extras=extras, window=window)
elif ospec is not None:
flux_obs = ospec
wave_obs = owaves
else:
raise ValueError('This shouldn\'t happen')
# Might be running over lots of galaxies
batch_mode = False
if flux_obs.ndim == 2:
batch_mode = True
# Convert microns to cm. micron * (m / 1e6) * (1e2 cm / m)
freq_obs = c / (wave_obs * 1e-4)
# Why do NaNs happen? Just nircam.
flux_obs[np.isnan(flux_obs)] = 0.0
# Loop over filters and re-weight spectrum
xphot = [] # Filter centroids
wphot = [] # Filter width
yphot_corr = [] # Magnitudes corrected for filter transmissions.
# Loop over filters, compute fluxes in band (accounting for
# transmission fraction) and convert to observed magnitudes.
for filt in all_filters:
x, T, cent, dx, Tavg = filter_data[filt]
if rest_wave is not None:
cent_r = cent * 1e4 / (1. + zobs)
if (cent_r < rest_wave[0]) or (cent_r > rest_wave[1]):
continue
# Re-grid transmission onto provided wavelength axis.
T_regrid = np.interp(wave_obs, x, T, left=0, right=0)
#func = interp1d(x, T, kind='cubic', fill_value=0.0,
# bounds_error=False)
#T_regrid = func(wave_obs)
#T_regrid = np.interp(np.log(wave_obs), np.log(x), T, left=0.,
# right=0)
# Remember: observed flux is in erg/s/cm^2/Hz
# Integrate over frequency to get integrated flux in band
# defined by filter.
if batch_mode:
integrand = -1. * flux_obs * T_regrid[None,:]
_yphot = np.sum(integrand[:,0:-1] * np.diff(freq_obs)[None,:],
axis=1)
else:
integrand = -1. * flux_obs * T_regrid
_yphot = np.sum(integrand[0:-1] * np.diff(freq_obs))
#_yphot = np.trapz(integrand, x=freq_obs)
corr = np.sum(T_regrid[0:-1] * -1. * np.diff(freq_obs), axis=-1)
xphot.append(cent)
yphot_corr.append(_yphot / corr)
wphot.append(dx)
xphot = np.array(xphot)
wphot = np.array(wphot)
yphot_corr = np.array(yphot_corr)
# Convert to magnitudes and return
return all_filters, xphot, wphot, -2.5 * np.log10(yphot_corr / flux_AB)
def Spectrum(self, waves, sfh=None, tarr=None, zarr=None, window=1,
zobs=None, tobs=None, band=None, idnum=None, units='Hz', hist={},
extras={}, load=True):
"""
This is just a wrapper around `Luminosity`.
"""
# Select single row of SFH array if `idnum` provided.
if sfh.ndim == 2 and idnum is not None:
sfh = sfh[idnum,:]
batch_mode = sfh.ndim == 2
time_series = (zobs is None) and (tobs is None)
# Shape of output array depends on some input parameters.
shape = []
if batch_mode:
shape.append(sfh.shape[0])
if time_series:
shape.append(tarr.size)
shape.append(len(waves))
# Do kappa up front?
pb = ProgressBar(waves.size, name='l(nu)', use=self.pf['progress_bar'])
pb.start()
##
# Can thread this calculation
##
if (self.pf['nthreads'] is not None):
try:
import pymp
have_pymp = True
except ImportError:
have_pymp = False
assert have_pymp, "Need pymp installed to run with nthreads!=None!"
pymp.config.num_threads = self.pf['nthreads']
if self.pf['verbose']:
print("Setting nthreads={} for spectral synthesis.".format(
self.pf['nthreads']))
spec = pymp.shared.array(shape, dtype='float64')
with pymp.Parallel(self.pf['nthreads']) as p:
for i in p.xrange(0, waves.size):
slc = (Ellipsis, i) if (batch_mode or time_series) else i
spec[slc] = self.Luminosity(wave=waves[i],
sfh=sfh, tarr=tarr, zarr=zarr, zobs=zobs, tobs=tobs,
band=band, hist=hist, idnum=idnum,
extras=extras, window=window, load=load)
pb.update(i)
else:
spec = np.zeros(shape)
for i, wave in enumerate(waves):
slc = (Ellipsis, i) if (batch_mode or time_series) else i
spec[slc] = self.Luminosity(wave=wave,
sfh=sfh, tarr=tarr, zarr=zarr, zobs=zobs, tobs=tobs,
band=band, hist=hist, idnum=idnum,
extras=extras, window=window, load=load)
pb.update(i)
pb.finish()
if units in ['A', 'Ang']:
#freqs = c / (waves / 1e8)
#tmp = np.abs(np.diff(waves) / np.diff(freqs))
#dwdn = np.concatenate((tmp, [tmp[-1]]))
dwdn = waves**2 / (c * 1e8)
spec /= dwdn
return spec
def Magnitude(self, wave=1600., sfh=None, tarr=None, zarr=None, window=1,
zobs=None, tobs=None, band=None, idnum=None, hist={}, extras={}):
L = self.Luminosity(wave=wave, sfh=sfh, tarr=tarr, zarr=zarr,
zobs=zobs, tobs=tobs, band=band, idnum=idnum, hist=hist,
extras=extras, window=window)
MAB = self.magsys.L_to_MAB(L, z=zobs)
return MAB
def _oversample_sfh(self, ages, sfh, i):
"""
Over-sample time axis while stellar populations are young if the time
resolution is worse than 1 Myr / grid point.
"""
batch_mode = sfh.ndim == 2
# Use 1 Myr time resolution for final stretch.
# final stretch is determined by `oversampling_below` attribute.
# This loop determines how many elements at the end of
# `ages` are within the `oversampling_below` zone.
ct = 0
while ages[-1-ct] < self.oversampling_below:
ct += 1
if ct + 1 == len(ages):
break
ifin = -1 - ct
ages_x = np.arange(ages[-1], ages[ifin], 1.)[-1::-1]
# `ages_x` is an array of ages at higher resolution than native data
# to-be-tagged on the end of supplied `ages`.
# Must augment ages and dt accordingly
_ages = np.hstack((ages[0:ifin], ages_x))
_dt = np.abs(np.diff(_ages) * 1e6)
if batch_mode:
xSFR = np.ones((sfh.shape[0], ages_x.size-1))
else:
xSFR = np.ones(ages_x.size-1)
# Must allow non-constant SFR within over-sampled region
# as it may be tens of Myr.
# Walk back from the end and fill in SFR
N = int((ages_x.size - 1) / ct)
for _i in range(0, ct):
if batch_mode:
slc = Ellipsis, slice(-1 * N * _i-1, -1 * N * (_i + 1) -1, -1)
else:
slc = slice(-1 * N * _i-1, -1 * N * (_i + 1) -1, -1)
if batch_mode:
_sfh_rs = np.array([sfh[:,-_i-2]]*N).T
xSFR[slc] = _sfh_rs * np.ones(N)[None,:]
else:
xSFR[slc] = sfh[-_i-2] * np.ones(N)
# Need to tack on the SFH at ages older than our
# oversampling approach kicks in.
if batch_mode:
if ct + 1 == len(ages):
_SFR = np.hstack((sfh[:,0][:,None], xSFR))
else:
_SFR = np.hstack((sfh[:,0:i+1][:,0:ifin+1], xSFR))
else:
if ct + 1 == len(ages):
_SFR = np.hstack((sfh[0], xSFR))
else:
_SFR = np.hstack((sfh[0:i+1][0:ifin+1], xSFR))
return _ages, _SFR
@property
def _cache_lum_ctr(self):
if not hasattr(self, '_cache_lum_ctr_'):
self._cache_lum_ctr_ = 0
return self._cache_lum_ctr_
def _cache_kappa(self, wave):
if not hasattr(self, '_cache_kappa_'):
self._cache_kappa_ = {}
if wave in self._cache_kappa_:
return self._cache_kappa_[wave]
return None
def _cache_lum(self, kwds):
"""
Cache object for spectral synthesis of stellar luminosity.
"""
if not hasattr(self, '_cache_lum_'):
self._cache_lum_ = {}
notok = -1
t1 = time.time()
# If we set order by hand, it greatly speeds things up because
# more likely than not, the redshift and wavelength are the only
# things that change and that's an easy logical check to do.
# Checking that SFHs, histories, etc., is more expensive.
ok_keys = ('wave', 'zobs', 'tobs', 'idnum', 'sfh', 'tarr', 'zarr',
'window', 'band', 'hist', 'extras', 'load')
ct = -1
# Loop through keys to do more careful comparison for unhashable types.
#all_waves = self._cache_lum_waves_
all_keys = self._cache_lum_.keys()
# Search in reverse order since we often the keys represent different
# wavelengths, which are generated in ascending order.
for keyset in all_keys:
ct += 1
# Remember: keyset is just a number.
kw, data = self._cache_lum_[keyset]
# Check wavelength first. Most common thing.
# If we're not being as careful as possible, retrieve cached
# result so long as wavelength and zobs match requested values.
# This should only be used when SpectralSynthesis is summoned
# internally! Likely to lead to confusing behavior otherwise.
if (self.careful_cache == 0) and ('wave' in kw) and ('zobs' in kw):
if (kw['wave'] == kwds['wave']) and (kw['zobs'] == kwds['zobs']):
notok = 0
break
notok = 0
# Loop over cached keywords, compare to those supplied.
for key in ok_keys:
if key not in kwds:
notok += 1
break
#if isinstance(kw[key], collections.Hashable):
# if kwds[key] == kw[key]:
# continue
# else:
# notok += 1
# break
#else:
# For unhashable types, must work on case-by-case basis.
if type(kwds[key]) != type(kw[key]):
notok += 1
break
elif type(kwds[key]) == np.ndarray:
if np.array_equal(kwds[key], kw[key]):
continue
else:
# This happens when, e.g., we pass SFH by hand.
notok += 1
break
elif type(kwds[key]) == dict:
if kwds[key] == kw[key]:
continue
else:
#for _key in kwds[key]:
# print(_key, kwds[key][_key] == kw[key][_key])
#
#raw_input('<enter>')
notok += 1
break
else:
if kwds[key] == kw[key]:
continue
else:
notok += 1
break
if notok > 0:
#print(keyset, key)
continue
# If we're here, load this thing.
break
t2 = time.time()
if notok < 0:
return kwds, None
elif notok == 0:
if (self.pf['verbose'] and self.pf['debug']):
print("Loaded from cache! Took N={} iterations, {} sec to find match".format(ct, t2 - t1))
# Recall that this is (kwds, data)
return self._cache_lum_[keyset]
else:
return kwds, None
def Luminosity(self, wave=1600., sfh=None, tarr=None, zarr=None, window=1,
zobs=None, tobs=None, band=None, idnum=None, hist={}, extras={},
load=True, use_cache=True, energy_units=True):
"""
Synthesize luminosity of galaxy with given star formation history at a
given wavelength and time.
Parameters
----------
sfh : np.ndarray
Array of SFRs. If 1-D, should be same shape as time or redshift
array. If 2-D, first dimension should correspond to galaxy number
and second should be time.
tarr : np.ndarray
Array of times in ascending order [Myr].
zarr : np.ndarray
Array of redshift in ascending order (so decreasing time). Only
supply if not passing `tarr` argument.
wave : int, float
Wavelength of interest [Angstrom]
window : int
Average over interval about `wave`. [Angstrom]
zobs : int, float
Redshift of observation.
tobs : int, float
Time of observation (will be computed self-consistently if `zobs`
is supplied).
hist : dict
Extra information we may need, e.g., metallicity, dust optical
depth, etc. to compute spectrum.
Returns
-------
Luminosity at wavelength=`wave` in units of erg/s/Hz.
"""
setup_1 = (sfh is not None) and \
((tarr is not None) or (zarr is not None))
setup_2 = hist != {}
do_all_time = False
if (tobs is None) and (zobs is None):
do_all_time = True
#assert (tobs is not None) or (zobs is not None), \
# "Must supply time or redshift of observation, `tobs` or `zobs`!"
assert setup_1 or setup_2
if setup_1:
assert (sfh is not None)
elif setup_2:
assert ('z' in hist) or ('t' in hist), \
"`hist` must contain redshifts, `z`, or times, `t`."
sfh = hist['SFR'] if 'SFR' in hist else hist['sfr']
if 'z' in hist:
zarr = hist['z']
else:
tarr = hist['t']
kw = {'sfh':sfh, 'zobs':zobs, 'tobs':tobs, 'wave':wave, 'tarr':tarr,
'zarr':zarr, 'band':band, 'idnum':idnum, 'hist':hist,
'extras':extras, 'window': window}
if load:
_kwds, cached_result = self._cache_lum(kw)
else:
self._cache_lum_ = {}
cached_result = None
if cached_result is not None:
return cached_result
if sfh.ndim == 2 and idnum is not None:
sfh = sfh[idnum,:]
if 'Z' in hist:
Z = hist['Z'][idnum,:]
# Don't necessarily need Mh here.
if 'Mh' in hist:
Mh = hist['Mh'][idnum,:]
else:
if 'Mh' in hist:
Mh = hist['Mh']
if 'Z' in hist:
Z = hist['Z']
# If SFH is 2-D it means we're doing this for multiple galaxies at once.
# The first dimension will be number of galaxies and second dimension
# is time/redshift.
batch_mode = sfh.ndim == 2
# Parse time/redshift information
if tarr is not None:
zarr = self.cosm.z_of_t(tarr * s_per_myr)
else:
assert tarr is None
tarr = self.cosm.t_of_z(zarr) / s_per_myr
assert np.all(np.diff(tarr) > 0), \
"Must supply SFH in time-ascending (i.e., redshift-descending) order!"
# Convert tobs to redshift.
if tobs is not None:
zobs = self.cosm.z_of_t(tobs * s_per_myr)
if type(tobs) == np.ndarray:
assert (tobs.min() >= tarr.min()) and (tobs.max() <= tarr.max()), \
"Requested times of observation (`tobs={}-{}`) not in supplied range ({}, {})!".format(tobs.min(),
tobs.max(), tarr.min(), tarr.max())
else:
assert tarr.min() <= tobs <= tarr.max(), \
"Requested time of observation (`tobs={}`) not in supplied range ({}, {})!".format(tobs,
tarr.min(), tarr.max())
# Prepare slice through time-axis.
if zobs is None:
slc = Ellipsis
izobs = None
else:
# Need to be sure that we grab a grid point exactly at or just
# below the requested redshift (?)
izobs = np.argmin(np.abs(zarr - zobs))
if zarr[izobs] > zobs:
izobs += 1
if batch_mode:
#raise NotImplemented('help')
# Need to slice over first dimension now...
slc = Ellipsis, slice(0, izobs+1)
else:
slc = slice(0, izobs+1)
if not (zarr.min() <= zobs <= zarr.max()):
if batch_mode:
return np.ones(sfh.shape[0]) * -99999
else:
return -99999
fill = np.zeros(1)
tyr = tarr * 1e6
dt = np.hstack((np.diff(tyr), fill))
# Figure out if we need to over-sample the grid we've got to more
# accurately solve for young stellar populations.
oversample = self.oversampling_enabled and (dt[-2] > 1.01e6)
# Used to also require zobs is not None. Why?
##
# Done parsing time/redshift
# Is this luminosity in some bandpass or monochromatic?
if band is not None:
# Will have been supplied in Angstroms
b = h_p * c / (np.array(band) * 1e-8) / erg_per_ev
Loft = self.src.IntegratedEmission(b[1], b[0],
energy_units=energy_units)
# Need to get Hz^-1 units back
#db = b[0] - b[1]
#Loft = Loft / (db * erg_per_ev / h_p)
#raise NotImplemented('help!')
else:
Loft = self.src.L_per_sfr_of_t(wave=wave, avg=window)
assert energy_units
#print("Synth. Lum = ", wave, window)
#
# Setup interpolant for luminosity as a function of SSP age.
Loft[Loft == 0] = tiny_lum
_func = interp1d(np.log(self.src.times), np.log(Loft),
kind=self.pf['pop_synth_age_interp'], bounds_error=False,
fill_value=(Loft[0], Loft[-1]))
# Extrapolate linearly at times < 1 Myr
_m = (Loft[1] - Loft[0]) / (self.src.times[1] - self.src.times[0])
L_small_t = lambda age: _m * age + Loft[0]
if not (self.src.pf['source_aging'] or self.src.pf['source_ssp']):
L_asympt = np.exp(_func(np.log(self.src.pf['source_tsf'])))
#L_small_t = lambda age: Loft[0]
# Extrapolate as PL at t < 1 Myr based on first two
# grid points
#m = np.log(Loft[1] / Loft[0]) \
# / np.log(self.src.times[1] / self.src.times[0])
#func = lambda age: np.exp(m * np.log(age) + np.log(Loft[0]))
#if zobs is None:
Lhist = np.zeros(sfh.shape)
#if hasattr(self, '_sfh_zeros'):
# Lhist = self._sfh_zeros.copy()
#else:
# Lhist = np.zeros_like(sfh)
# self._sfh_zeros = Lhist.copy()
#else:
# pass
# Lhist will just get made once. Don't need to initialize
##
# Loop over the history of object(s) and compute the luminosity of
# simple stellar populations of the corresponding ages (relative to
# zobs).
##
# Start from initial redshift and move forward in time, i.e., from
# high redshift to low.
for i, _tobs in enumerate(tarr):
# If zobs is supplied, we only have to do one iteration
# of this loop. This is just a dumb way to generalize this function
# to either do one redshift or return a whole history.
if not do_all_time:
if (zarr[i] > zobs):
continue
##
# Life if easy for constant SFR models
if not (self.src.pf['source_aging'] or self.src.pf['source_ssp']):
if not do_all_time:
Lhist = L_asympt * sfh[:,i]
break
raise NotImplemented('does this happne?')
Lhist[:,i] = L_asympt * sfh[:,i]
continue
# If we made it here, it's time to integrate over star formation
# at previous times. First, retrieve ages of stars formed in all
# past star forming episodes.
ages = tarr[i] - tarr[0:i+1]
# Note: this will be in order of *descending* age, i.e., the
# star formation episodes furthest in the past are first in the
# array.
# Recall also that `sfh` contains SFRs for all time, so any
# z < zobs will contain zeroes, hence all the 0:i+1 slicing below.
# Treat metallicity evolution? If so, need to grab luminosity as
# function of age and Z.
if self.pf['pop_enrichment']:
assert batch_mode
logA = np.log10(ages)
logZ = np.log10(Z[:,0:i+1])
L_per_msun = np.zeros_like(ages)
logL_at_wave = self.L_of_Z_t(wave)
L_per_msun = np.zeros_like(logZ)
for j, _Z_ in enumerate(range(logZ.shape[0])):
L_per_msun[j,:] = 10**logL_at_wave(logA, logZ[j,:],
grid=False)
# erg/s/Hz
if batch_mode:
Lall = L_per_msun[:,0:i+1] * sfh[:,0:i+1]
else:
Lall = L_per_msun[0:i+1] * sfh[0:i+1]
if oversample:
raise NotImplemented('help!')
else:
_dt = dt[0:i]
_ages = ages
else:
##
# If time resolution is >= 2 Myr, over-sample final interval.
if oversample and len(ages) > 1:
if batch_mode:
_ages, _SFR = self._oversample_sfh(ages, sfh[:,0:i+1], i)
else:
_ages, _SFR = self._oversample_sfh(ages, sfh[0:i+1], i)
_dt = np.abs(np.diff(_ages) * 1e6)
# `_ages` is in order of old to young.
# Now, compute luminosity at expanded ages.
L_per_msun = np.exp(_func(np.log(_ages)))
# Interpolate linearly at t < 1 Myr
L_per_msun[_ages < 1] = L_small_t(_ages[_ages < 1])
#L_per_msun[_ages < 10] = 0.
# erg/s/Hz/yr
if batch_mode:
Lall = L_per_msun * _SFR
else:
Lall = L_per_msun * _SFR
else:
L_per_msun = np.exp(_func(np.log(ages)))
#L_per_msun = np.exp(np.interp(np.log(ages),
# np.log(self.src.times), np.log(Loft),
# left=np.log(Loft[0]), right=np.log(Loft[-1])))
_dt = dt[0:i]
# Fix early time behavior
L_per_msun[ages < 1] = L_small_t(ages[ages < 1])
_ages = ages
# erg/s/Hz/yr
if batch_mode:
Lall = L_per_msun * sfh[:,0:i+1]
else:
Lall = L_per_msun * sfh[0:i+1]
# Correction for IMF sampling (can't use SPS).
#if self.pf['pop_sample_imf'] and np.any(bursty):
# life = self._stars.tab_life
# on = np.array([life > age for age in ages])
#
# il = np.argmin(np.abs(wave - self._stars.wavelengths))
#
# if self._stars.aging:
# raise NotImplemented('help')
# lum = self._stars.tab_Ls[:,il] * self._stars.dldn[il]
# else:
# lum = self._stars.tab_Ls[:,il] * self._stars.dldn[il]
#
# # Need luminosity in erg/s/Hz
# #print(lum)
#
# # 'imf' is (z or age, mass)
#
# integ = imf[bursty==1,:] * lum[None,:]
# Loft = np.sum(integ * on[bursty==1], axis=1)
#
# Lall[bursty==1] = Loft
# Apply local reddening
#tau_bc = self.pf['pop_tau_bc']
#if tau_bc > 0:
#
# corr = np.ones_like(_ages) * np.exp(-tau_bc)
# corr[_ages > self.pf['pop_age_bc']] = 1
#
# Lall *= corr
###
## Integrate over all times up to this tobs
if batch_mode:
# Should really just np.sum here...using trapz assumes that
# the SFH is a smooth function and not a series of constant
# SFRs. Doesn't really matter in practice, though.
if not do_all_time:
Lhist = np.trapz(Lall, dx=_dt, axis=1)
else:
Lhist[:,i] = np.trapz(Lall, dx=_dt, axis=1)
else:
if not do_all_time:
Lhist = np.trapz(Lall, dx=_dt)
else:
Lhist[i] = np.trapz(Lall, dx=_dt)
##
# In this case, we only need one iteration of this loop.
##
if not do_all_time:
break
##
# Redden spectra
##
if 'Sd' in hist:
# Redden away!
if np.any(hist['Sd'] > 0) and (band is None):
assert 'kappa' in extras
#_kappa = self._cache_kappa(wave)
#if _kappa is None:
kappa = extras['kappa'](wave=wave, Mh=Mh, z=zobs)
#self._cache_kappa_[wave] = kappa
#else:
# kappa = _kappa
kslc = idnum if idnum is not None else Ellipsis
if idnum is not None:
Sd = hist['Sd'][kslc]
if type(hist['fcov']) in [int, float, np.float64]:
fcov = hist['fcov']
else:
fcov = hist['fcov'][kslc]
rand = hist['rand'][kslc]
else:
Sd = hist['Sd']
fcov = hist['fcov']
rand = hist['rand']
tau = kappa * Sd
clear = rand > fcov
block = ~clear
if idnum is not None:
Lout = Lhist * np.exp(-tau[izobs])
#if self.pf['pop_dust_holes'] == 'big':
# Lout = Lhist * clear[izobs] \
# + Lhist * np.exp(-tau[izobs]) * block[izobs]
#else:
# Lout = Lhist * (1. - fcov[izobs]) \
# + Lhist * fcov[izobs] * np.exp(-tau[izobs])
else:
Lout = Lhist * np.exp(-tau[:,izobs])
#if self.pf['pop_dust_holes'] == 'big':
# print(Lhist.shape, clear.shape, tau.shape, block.shape)
# Lout = Lhist * clear[:,izobs] \
# + Lhist * np.exp(-tau[:,izobs]) * block[:,izobs]
#else:
# Lout = Lhist * (1. - fcov[:,izobs]) \
# + Lhist * fcov[:,izobs] * np.exp(-tau[:,izobs])
else:
Lout = Lhist.copy()
else:
Lout = Lhist.copy()
#del Lhist, tau, Lall
#gc.collect()
##
# Sum luminosity of parent halos along merger tree
##
# Don't change shape, just zero-out luminosities of
# parent halos after they merge?
if hist is not None:
do_mergers = self.pf['pop_mergers'] and batch_mode
if 'children' in hist:
if (hist['children'] is not None) and do_mergers:
child_iz, child_iM = children.T
is_central = child_iM == -1
if np.all(is_central == 1):
pass
else:
print("Looping over {} halos...".format(sfh.shape[0]))
pb = ProgressBar(sfh.shape[0], use=self.pf['progress_bar'])
pb.start()
# Loop over all 'branches'
for i in range(SFR.shape[0]):
# This means the i'th halo is alive and well at the
# final redshift, i.e., it's a central
if is_central[i]:
continue
pb.update(i)
# At this point, need to figure out which child halos
# to dump mass and SFH into...
# Be careful with redshift array.
# We're now working in ascending time, reverse redshift,
# so we need to correct the child iz values. We've also
# chopped off elements at z < zobs.
#iz = Nz0 - child_iz[i]
# This `iz` should not be negative despite us having
# chopped up the redshift array since getting to this
# point in the loop is predicated on being a parent of
# another halo, i.e., not surviving beyond this redshift.
# Lout is just 1-D at this point, i.e., just luminosity
# *now*.
# Add luminosity to child halo. Zero out luminosity of
# parent to avoid double counting. Note that nh will
# also have been zeroed out but we're just being careful.
Lout[child_iM[i]] += 1 * Lout[i]
Lout[i] = 0.0
pb.finish()
##
# Will be unhashable types so just save to a unique identifier
##
if use_cache:
self._cache_lum_[self._cache_lum_ctr] = kw, Lout
self._cache_lum_ctr_ += 1
# Get outta here.
return Lout
```
#### File: ares/util/Warnings.py
```python
import sys, os
import numpy as np
import sys, textwrap, os
from .PrintInfo import twidth, line, tabulate
ARES = os.getenv('ARES')
have_ARES_env = ARES is not None
separator = '|'*twidth
separator2 = '-'*twidth
dt_msg = 'WARNING: something wrong with the time-step.'
gen_msg = 'WARNING: something wrong with solver.'
def dt_error(grid, z, q, dqdt, new_dt, cell, method, msg=dt_msg):
print("")
print(line(separator))
print(line(msg))
print(line(separator))
print(line(separator2))
if new_dt <= 0:
print(line("current dt : {0:.4e}".format(new_dt)))
else:
print(line("current dt : NaN or inf"))
print(line(separator2))
print(line("method : {!s}".format(method)))
print(line("cell # : {}".format(cell)))
if z is not None:
print(line("redshift : {0:.4g}".format(z)))
print(line(separator2))
cols = ['value', 'derivative']
rows = []
data = []
for i in range(len(grid.qmap)):
name = grid.qmap[i]
rows.append(name)
data.append([q[cell][i], dqdt[cell][i]])
# Print quantities and their rates of change
tabulate(data, rows, cols, cwidth=12)
print(line(separator2))
print(line(separator))
print("")
sys.exit(1)
def solver_error(grid, z, q, dqdt, new_dt, cell, method, msg=gen_msg):
dt_error(grid, z, q, dqdt, new_dt, cell, method, msg=gen_msg)
tab_warning = \
"""
WARNING: must supply redshift_bins or tau_table to compute the X-ray background
flux on-the-fly."""
wrong_tab_type = \
"""
WARNING: Supplied tau_table does not have logarithmically spaced redshift bins!
"""
hmf_no_tab = \
"""
No halo mass function table found. Run glorb/examples/generate_hmf_tables.py
to create a lookup table, then, either set an environment variable $ARES that
points to your glorb install directory, or supply the path to the resulting
table by hand via the hmf_table parameter. You may also want to check out
https://bitbucket.org/mirochaj/glorb/Downloads for standard HMF tables.
"""
lf_constraints = \
"""
WARNING: The contents of `pop_constraints` will override the values of
`pop_lf_Mstar`, `pop_lf_pstar`, and `pop_lf_alpha`.
"""
def not_a_restart(prefix, has_burn):
print("")
print(line(separator))
print(line("WARNING: This doesn't look like a restart:"))
print(line("{!s}.chain.pkl is empty!".format(prefix)))
if not has_burn:
print(line("No burn-in data found. Continuing on as if from scratch."))
else:
print(line("Burn-in data found. Restarting from end of burn-in."))
print(line(separator))
def tau_tab_z_mismatch(igm, zmin_ok, zmax_ok, ztab):
print("")
print(line(separator))
print(line('WARNING: optical depth table shape mismatch (in redshift)'))
print(line(separator))
if type(igm.tabname) is dict:
which = 'dict'
else:
which = 'tab'
print(line("found : {!s}".format(\
igm.tabname[igm.tabname.rfind('/')+1:])))
zmax_pop = min(igm.pf['pop_zform'], igm.pf['first_light_redshift'])
print(line("zmin (pf) : {0:g}".format(igm.pf['final_redshift'])))
print(line("zmin ({0}) : {1:g}".format(which, ztab.min())))
print(line("zmax (pf) : {0:g}".format(zmax_pop)))
print(line("zmax ({0}) : {1:g}".format(which, ztab.max())))
if not zmin_ok:
print(line(("this is OK : we'll transition to an on-the-fly tau " +\
"calculator at z={0:.2g}").format(ztab.min())))
if (0 < igm.pf['EoR_xavg'] < 1):
print(line((" : or whenever x > {0:.1e}, whichever " +\
"comes first").format(igm.pf['EoR_xavg'])))
print(line(separator))
print("")
def tau_tab_E_mismatch(pop, tabname, Emin_ok, Emax_ok, Etab):
print("")
print(line(separator))
print(line('WARNING: optical depth table shape mismatch (in photon ' +\
'energy)'))
print(line(separator))
if type(tabname) is dict:
which = 'dict'
else:
which = 'tab'
print(line("found : {!s}".format(\
tabname[tabname.rfind('/')+1:])))
print(line("Emin (pf) : {0:g}".format(pop.pf['pop_Emin'])))
print(line("Emin ({0}) : {1:g}".format(which, Etab.min())))
print(line("Emax (pf) : {0:g}".format(pop.pf['pop_Emax'])))
print(line("Emax ({0}) : {1:g}".format(which, Etab.max())))
if Etab.min() < pop.pf['pop_Emin']:
print(line(("this is OK : we'll discard E < {0:.2e} eV entries in " +\
"table").format(pop.pf['pop_Emin'])))
if Etab.max() > pop.pf['pop_Emax']:
print(line(("this is OK : we'll discard E > {0:.2e} eV entries in " +\
"table").format(pop.pf['pop_Emax'])))
print(line(separator))
def no_tau_table(urb):
print("")
print(line(separator))
print(line('WARNING: no optical depth table found'))
print(line(separator))
print(line("looking for : {!s}".format(urb.tabname)))
if urb.pf['tau_prefix'] is not None:
print(line("in : {!s}".format(urb.pf['tau_prefix'])))
elif ARES is not None:
print(line("in : {!s}/input/optical_depth".format(ARES)))
else:
print(line("in : nowhere! set $ARES or tau_prefix"))
print(line(separator))
print(line("Generating a new table will take 5-10 minutes..."))
print(line(separator))
def negative_SFRD(z, Tmin, fstar, dfcolldt, sfrd):
print("")
print(line(separator))
print(line('ERROR (SFRD < 0)'))
print(line(separator))
print(line("z : {0:.3g}".format(z)))
print(line("Tmin : {0:.3e}".format(Tmin)))
print(line("fstar : {0:.3e}".format(fstar)))
print(line("dfcoll / dt : {0:.3e}".format(dfcolldt)))
print(line("SFRD : {0:.3e}".format(sfrd)))
print(line(separator))
def tau_quad(igm):
print("")
print(line(separator))
print(line('ERROR (SFRD < 0)'))
print(line(separator))
print(line("z : {0:.3g}".format(z)))
print(line("Tmin : {0:.3e}".format(Tmin)))
print(line("fstar : {0:.3e}".format(fstar)))
print(line("dfcoll / dt : {0:.3e}".format(dfcolldt)))
print(line("SFRD : {0:.3e}".format(sfrd)))
print(line(separator))
def missing_hmf_tab(hmf):
print("")
print(line(separator))
print(line('WARNING: Could not find supplied hmf table.'))
print(line(separator))
print(line('Was looking for:'))
print(line(''))
print(line(' {!s}'.format(hmf.pf['hmf_table'])))
print(line(''))
print(line('Will search for a suitable replacement in:'))
print(line(''))
print(line(' {!s}/input/hmf'.format(ARES)))
print(line(''))
print(line(separator))
def no_hmf(hmf):
print("")
print(line(separator))
print(line('ERROR: Cannot generate halo mass function'))
print(line(separator))
if not have_ARES_env:
s = \
"""
It looks like you have not yet set the ARES environment variable,
which is needed to locate various input files. Make sure to source
your .bashrc or .cshrc (or equivalent) when finished!
"""
else:
s = \
"""
It looks like you have set the ARES environment variable. Is it
correct? Have you sourced your .bashrc or .cshrc (or equivalent) to
ensure that it is defined?
"""
try:
from hmf import MassFunction
have_hmf = True
except ImportError:
have_hmf = False
try:
import pycamb
have_pycamb = True
except ImportError:
have_pycamb = False
if not (have_pycamb and have_hmf):
s = \
"""
If you've made no attempt to use non-default cosmological or HMF
parameters, it could just be that you forgot to run the remote.py script,
which will download a default HMF lookup table.
If you'd like to generate halo mass function lookup tables of your
own, e.g., using fits other than the Sheth-Tormen form, or with
non-default cosmological parameters, you'll need to install hmf and
pycamb.
"""
dedented_s = textwrap.dedent(s).strip()
snew = textwrap.fill(dedented_s, width=twidth)
snew_by_line = snew.split('\n')
for l in snew_by_line:
print(line(l))
if not (have_pycamb and have_hmf):
print(line(''))
print(line('It looks like you\'re missing both hmf and pycamb.'))
elif not have_pycamb:
print(line(''))
print(line('It looks like you\'re missing pycamb.'))
elif not have_hmf:
print(line(''))
print(line('It looks like you\'re missing hmf.'))
print(line(separator))
modelgrid_loadbalance = ""
def no_lya_warning(pop):
print("")
print(line(separator))
print(line("WARNING: pop_Emin{} is just blue-ward of Ly-a.".format(pop.id_num)))
print(line("Reset to E_LyA (or just below) to ensure non-zero Ly-a background."))
print(line(separator))
print("")
```
#### File: input/litdata/mesinger2016.py
```python
import os
import numpy as np
_input = os.getenv('ARES') + '/input/eos'
def load(model='faint_galaxies'):
"""
Assumes you downloaded the raw EoS data into $ARES/input/eos.
"""
k = []
z = []
ps = []
QHII = []
dTb = []
for fn in os.listdir('%s/EoS_%s' % (_input, model)):
if not fn.startswith('ps_no_halos'):
continue
_z = float(fn[13:19])
_dTb = float(fn[fn.index('aveTb')+5:fn.index('aveTb')+11])
_QHII = float(fn[fn.index('nf')+2:fn.index('nf')+10])
z.append(_z)
dTb.append(_dTb)
QHII.append(1. - _QHII)
x, y, err = np.loadtxt('%s/EoS_%s/%s' % (_input,model,fn), unpack=True)
k.append(x)
ps.append(y)
z = np.array(z)
s = np.argsort(z)
dTb = np.array(dTb)[s]
k = np.array(k)[s]
ps = np.array(ps)[s]
QHII = np.array(QHII)[s]
return {'z': z[s], 'k': k[0], 'ps_21_dl': ps, 'dTb': dTb, 'Qi': QHII}
```
#### File: input/litdata/ueda2014.py
```python
import numpy as np
from ueda2003 import _evolution_factor_pde, _evolution_factor_ldde, \
_DoublePowerLaw
#-------------------------------------------------
qsolf_LDDE2_hardpars = \
{
'A': 10**-6 *.70**3 * 2.91,
'loglstar': 10**43.97,
'gamma1': 0.96,
'gamma2': 2.71,
'p1': 4.78,
'p2': -1.5,
'p3': -6.2,
'beta1': 0.84,
'zstar': 1.86,
'zstarc2': 3.0,
'logLa1': 10**44.61,
'logLa2': 10**45.67,#???
'alpha1': 0.29,
'alpha2': -0.1
}
qsolf_LDDE2_harderr = \
{
'A_err': 0.07,
'loglstar_err': 10**0.06,
'gamma1_err': 0.04,
'gamma2_err': 0.09,
'p1_err': 0.16,
'p2_err': 0,
'p3_err': 0,
'beta1_err': 0.18,
'zstar_err': 0.07,
'zstarc2_err': 0,
'logLa_err': 10**0.07,
'logLa2_err': 0,
'alpha_err': 0.02,
'alpha2_err': 0
}
def _zc_of_L(L, **kwargs):
"""
Compute cutoff redshift for luminosity-dependent density evolution.
"""
La = 10**kwargs['logLa']
if L < La:
zc_ast = kwargs['zc'] * (L / La)**kwargs['alpha']
elif L >= La:
zc_ast = kwargs['zc']
return zc_ast
def _evolution_factor(z, **kwargs):
if z < kwargs['zc1']:
eofz = (1. + z)**p1
elif kwargs['zc1'] < z < kwargs['zc2']:
eofz = (1. + kwargs['zc1'])**kwargs['p1'] \
* ((1. + z) / (1. + kwargs['zc1']))**p2
else:
eofz = (1. + kwargs['zc1'])**kwargs['p1'] \
* ((1. + kwargs['zc2']) / (1+kwargs['zc1']))**kwargs['p2'] \
* ((1. + z) / (1. + kwargs['zc2']))**kwargs['p3']
return eofz
def _evolution_factor_ldde(z, L, **kwargs):
try:
kw = kwargs.copy()
for i in range(1, 2):
kw['zc'] = kwargs['zc{}'.format(i)]
kwargs['zc{}'.format(i)] = _zc_of_L(z, L, **kw)
eofz = _evolution_factor(z, **kwargs)
except ValueError:
eofz = np.zeros_like(L)
zcarr = np.array([_zc_L(LL, **kwargs) for LL in L])
for i, zcval in enumerate(zcarr):
kwargs['zc'] = zcval
eofz[i] = _evolution_factor_pde(z, **kwargs)
return eofz
#-------------------------------------------------
def randomsamples(samples, K = None, loglstar = None, \
gamma1 = None, gamma2 = None, p1 = None, p2 = None,\
p3 = None, beta1 = None, zstar = None, zstarc2 = None,
logLa = None, logLa2 = None, alpha = None, alpha2 = None,\
K_err = None, loglstar_err = None, gamma1_err = None,
gamma2_err = None, p1_err = None, p2_err = None, p3_err = None, \
beta1_err = None, zstar_err = None, zstarc2_err = None,\
logLa_err = None, logLa2_err = None, alpha_err = None, \
alpha2_err = None, **kwargs):
randomsamples = []
for i in range(samples):
randomsample = {
#'K': np.random.normal(K, K_err, samples),
'A': 10**-6 *.70**3 * 2.91,
'loglstar': np.random.normal(loglstar, loglstar_err, samples)[i],\
'gamma1': np.random.normal(gamma1, gamma1_err, samples)[i],\
'gamma2': np.random.normal(gamma2, gamma2_err, samples)[i],\
'p1': np.random.normal(p1, p1_err, samples)[i],\
'p2': -1.5,\
'p3': -6.2,\
'beta1': np.random.normal(beta1, beta1_err, samples)[i],\
'zstar': np.random.normal(zstar, zstar_err, samples)[i],\
'zstarc2': 3.0,\
'logLa': np.random.normal(logLa, logLa_err, samples)[i],\
'logLa2': 10**45.67,\
'alpha': np.random.normal(alpha, alpha_err, samples)[i],\
'alpha2': -0.1\
}
randomsamples.append(randomsample)
return randomsamples
#-------------------------------------------------
def LuminosityFunction_LDDE(Lx, z, loglstar = None, A = None, gamma1 = None, gamma2 = None, p1 = None, p2 = None,\
p3 = None, beta1 = None, zstar = None, zstarc2 = None, logLa = None, logLa2 = None, alpha = None, alpha2 = None, **kwargs):
if Lx <= logLa:
zc1 = zstar*(Lx / logLa)**alpha
elif Lx > logLa:
zc1 = zstar
if Lx <= logLa2:
zc2 = zstarc2*(Lx / logLa2)**alpha2
elif Lx > logLa2:
zc2 = zstarc2
##################################################
if z <= zc1:
ex = (1+z)**p1
elif zc1 < z <= zc2:
ex = (1+zc1)**p1*((1+z)/(1+zc1))**p2
elif z > zc2:
ex = (1+zc1)**p1*((1+zc2)/(1+zc1))**p2*((1+z)/(1+zc2))**p3
return A * ((Lx / loglstar)**gamma1 + (Lx / loglstar)**gamma2)**-1 * ex
```
#### File: tests/adv/test_pop_cohort.py
```python
import ares
import numpy as np
import matplotlib.pyplot as pl
def test():
# First population: sources only in atomic halos
m16 = ares.util.ParameterBundle('mirocha2016:dpl')
pars = m16.pars_by_pop(0, strip_id=True)
pars['pop_Tmin'] = 1.1e4
pop = ares.populations.GalaxyPopulation(**pars)
sim = ares.simulations.Global21cm(**m16)
pop_fs = sim.pops[0] # 'fs' = 'from sim'
# Second population: sources in all halos, but shut off below atomic threshold
m17 = ares.util.ParameterBundle('mirocha2016:dpl')
m17.update(ares.util.ParameterBundle('mirocha2017:step'))
pars2 = m17.pars_by_pop(0, strip_id=True)
pars2['pop_Tmin'] = 1.1e4
pars2['pq_func_par0[1]'] = 0
pars2['pq_func_par0[2]'] = 0
pars2['feedback_LW_Mmin'] = None
pop2 = ares.populations.GalaxyPopulation(**pars2)
sim2 = ares.simulations.Global21cm(**m16)
pop2_fs = sim2.pops[0] # 'fs' = 'from sim'
#
## Reference values to use in calculations
z_ref = 20.
Emin_ref = 10.2
Emax_ref = 13.6
tol = 1e-4
##
#
# Test a few quantities above and below the threshold
assert abs(pop.SFRD(z_ref) - pop2.SFRD(z_ref)) <= tol, \
"Error in SFRD!"
assert abs(pop.Emissivity(z=z_ref, Emin=Emin_ref, Emax=Emax_ref) - \
pop2.Emissivity(z=z_ref, Emin=Emin_ref, Emax=Emax_ref)) <= tol, \
"Error in Emissivity!"
# Make sure that the populations extracted from simulation instance are
# identical to those created inependently.
assert abs(pop.SFRD(z_ref) - pop_fs.SFRD(z_ref)) <= tol, \
"Error in SFRD!"
assert abs(pop.Emissivity(z=z_ref, Emin=Emin_ref, Emax=Emax_ref) - \
pop_fs.Emissivity(z=z_ref, Emin=Emin_ref, Emax=Emax_ref)) <= tol, \
"Error in Emissivity!"
assert abs(pop2.SFRD(z_ref) - pop2_fs.SFRD(z_ref)) <= tol, \
"Error in SFRD!"
assert abs(pop2.Emissivity(z=z_ref, Emin=Emin_ref, Emax=Emax_ref) - \
pop2_fs.Emissivity(z=z_ref, Emin=Emin_ref, Emax=Emax_ref)) <= tol, \
"Error in Emissivity!"
if __name__ == '__main__':
test()
```
#### File: tests/adv/test_pop_galaxy.py
```python
import ares
import numpy as np
import matplotlib.pyplot as pl
from ares.physics.Constants import cm_per_mpc, erg_per_ev, s_per_yr
def test():
z = np.arange(10, 25)
# Initialize a GalaxyPopulation
pop = ares.populations.GalaxyPopulation(pop_sed='pl', pop_Emin=2e2,
pop_Emax=1e4, pop_EminNorm=5e2, pop_EmaxNorm=8e3, pop_fX=1.0,
pop_yield=2.6e39, pop_yield_units='erg/s/SFR')
# Compute the luminosity density in two bands
Lx1 = np.array(list(map(pop.LuminosityDensity, z))) * cm_per_mpc**3
Lx2 = np.array([pop.LuminosityDensity(zz, 2e2, 5e2) for zz in z]) * cm_per_mpc**3
# Plot 'em
pl.semilogy(z, Lx1, color='k')
pl.semilogy(z, Lx2, color='b')
# Try again with different units
erg_per_phot = pop.src.AveragePhotonEnergy(500., 8e3) * erg_per_ev
y = 2.6e39 * s_per_yr / erg_per_phot
pop = ares.populations.GalaxyPopulation(pop_sed='pl', pop_Emin=2e2,
pop_Emax=1e4, pop_EminNorm=5e2, pop_EmaxNorm=8e3, pop_fX=1.0,
pop_yield=y, pop_yield_units='photons/Msun')
# Compute the luminosity density in two bands
Lx1 = np.array(list(map(pop.LuminosityDensity, z))) * cm_per_mpc**3
Lx2 = np.array([pop.LuminosityDensity(zz, 2e2, 5e2) for zz in z]) * cm_per_mpc**3
# Plot 'em
pl.scatter(z, Lx1, s=100, facecolors='none', color='k')
pl.scatter(z, Lx2, s=100, facecolors='none', color='b')
assert True
if __name__ == '__main__':
test()
```
#### File: tests/adv/test_pop_sfrd.py
```python
import ares
import matplotlib.pyplot as pl
PB = ares.util.ParameterBundle
def test():
# Create a simple population
pars_1 = PB('pop:fcoll') + PB('sed:bpass')
pop_fcoll = ares.populations.GalaxyPopulation(**pars_1)
#pop_fcoll_XR = ares.populations.GalaxyPopulation(**pars_1)
# Mimic the above population to check our different SFRD/SED techniques
sfrd_pars = {'pop_sfr_model': 'sfrd-func'}
sfrd_pars['pop_sfrd'] = pop_fcoll.SFRD
sfrd_pars['pop_sfrd_units'] = 'internal'
sed = PB('sed:toy')
sed['pop_Nion'] = pop_fcoll.src.Nion
sed['pop_Nlw'] = pop_fcoll.src.Nlw
# pop_Ex?
sed['pop_ion_src_igm'] = False
sed['pop_heat_src_igm'] = False
pars_2 = sed + sfrd_pars
pop_sfrd = ares.populations.GalaxyPopulation(**pars_2)
assert pop_fcoll.SFRD(20.) == pop_sfrd.SFRD(20.), "Error in SFRD."
# Check the emissivities too
#print(pop_fcoll.PhotonLuminosityDensity(20., Emin=10.2, Emax=13.6))
#print(pop_sfrd.PhotonLuminosityDensity(20., Emin=10.2, Emax=13.6))
#assert pop_fcoll.PhotonLuminosityDensity(20., Emin=10.2, Emax=13.6) \
# == pop_sfrd.PhotonLuminosityDensity(20., Emin=10.2, Emax=13.6), \
# "Error in photon luminosity density."
if __name__ == '__main__':
test()
```
#### File: tests/adv/test_solver_chem_he.py
```python
import ares
import numpy as np
import matplotlib.pyplot as pl
def test():
pf = \
{
'grid_cells': 64,
'include_He': True,
'isothermal': True,
'stop_time': 1e2,
'radiative_transfer': False,
'density_units': 1.0,
'initial_timestep': 1,
'max_timestep': 1e2,
'initial_temperature': np.logspace(4, 6, 64),
'initial_ionization': [1.-1e-8, 1e-8, 1-2e-8, 1e-8, 1e-8], # neutral
}
sim = ares.simulations.GasParcel(**pf)
sim.run()
data = sim.history
# Plot last time snapshot
pl.loglog(data['Tk'][0], data['h_1'][-1,:], color='k')
pl.loglog(data['Tk'][0], data['h_2'][-1,:], color='k', ls='--')
pl.loglog(data['Tk'][0], data['he_1'][-1,:], color='b')
pl.loglog(data['Tk'][0], data['he_2'][-1,:], color='b', ls='--')
pl.loglog(data['Tk'][0], data['he_3'][-1,:], color='b', ls=':')
pl.ylim(1e-8, 1)
pl.savefig('{!s}.png'.format(__file__.rstrip('.py')))
pl.close()
if __name__ == '__main__':
test()
```
#### File: tests/adv/test_solver_crt_xrb.py
```python
import ares
import numpy as np
import matplotlib.pyplot as pl
from ares.physics.Constants import erg_per_ev, c, ev_per_hz
# Initialize radiation background
pars = \
{
# Source properties
'pop_type': 'galaxy',
'pop_sfrd': lambda z: 0.1,
'pop_sfrd_units': 'msun/yr/mpc^3',
'pop_sed': 'pl',
'pop_alpha': -1.5,
'pop_Emin': 1e2,
'pop_Emax': 3e4,
'pop_EminNorm': 5e2,
'pop_EmaxNorm': 8e3,
'pop_yield': 2.6e39,
'pop_yield_units': 'erg/s/sfr',
'pop_solve_rte': True,
"pop_tau_Nz": 400,
'initial_redshift': 40.,
'final_redshift': 10.,
}
def test(Ecomp=8e3, tol=1e-2):
mgb = ares.simulations.MetaGalacticBackground(**pars)
mgb.run()
"""
First, look at background flux itself.
"""
z, E, flux = mgb.get_history(flatten=True)
flux_thin = flux * E * erg_per_ev
mask = slice(0, -1, 50)
pl.scatter(E[mask], flux_thin[-1][mask], color='b', facecolors='none', s=100)
# Grab GalaxyPopulation
pop = mgb.pops[0]
# Cosmologically-limited solution to the RTE
# [Equation A1 in Mirocha (2014)]
zi, zf = 40., 10.
e_nu = np.array([pop.Emissivity(10., EE) for EE in E])
e_nu *= c / 4. / np.pi / pop.cosm.HubbleParameter(10.)
e_nu *= (1. + 10.)**6. / -3.
e_nu *= ((1. + 40.)**-3. - (1. + 10.)**-3.)
e_nu *= ev_per_hz
# Plot it
pl.loglog(E, e_nu, color='k', ls='-')
pl.xlabel(ares.util.labels['E'])
pl.ylabel(ares.util.labels['flux_E'])
"""
Do neutral absorption in IGM.
"""
pars['pop_solve_rte'] = True
pars['pop_approx_tau'] = 'neutral'
pars['pop_tau_Nz'] = 400
mgb = ares.simulations.MetaGalacticBackground(**pars)
mgb.run()
z, E2, flux2 = mgb.get_history(flatten=True)
flux_thick = flux2 * E2 * erg_per_ev
pl.loglog(E2, flux_thick[-1], color='k', ls=':')
pl.ylim(0.5 * e_nu[-1], e_nu[0] * 2)
# Compare results at optically thin energy away from edges
flux_comp_anl = e_nu[np.argmin(np.abs(Ecomp - E))]
flux_comp_thin = flux_thin[-1][np.argmin(np.abs(Ecomp - E))]
flux_comp_thick = flux_thick[-1][np.argmin(np.abs(Ecomp - E2))]
thin_OK = abs((flux_comp_thin - flux_comp_anl) / flux_comp_anl) \
< tol
thick_OK = abs((flux_comp_thick - flux_comp_anl) / flux_comp_anl) \
< tol
print("\n# Analytic (thin) ; Numerical (thin) ; Numerical (neutral)")
print("----------------------------------------------------------")
print("{0:.8e} ; {1:.8e} ; {2:.8e}".format(\
flux_comp_anl, flux_comp_thin, flux_comp_thick))
print("----------------------------------------------------------")
print("relative error : {0:.12f} ; {1:.12f}".format(\
abs((flux_comp_thin - flux_comp_anl) / flux_comp_anl),
abs((flux_comp_thick - flux_comp_anl) / flux_comp_anl)))
print("----------------------------------------------------------")
pl.savefig('{!s}.png'.format(__file__.rstrip('.py')))
pl.close()
assert thin_OK and thick_OK, \
"Relative error between analytical and numerical solutions exceeds {:.3g}.".format(tol)
if __name__ == '__main__':
test()
```
#### File: ares/tests/test_analysis_uvlf.py
```python
import ares
import numpy as np
import matplotlib.pyplot as pl
gpop = ares.analysis.GalaxyPopulation()
redshifts = [3.8, 4.9, 5.9, 6.9, 7.9]
pars = \
{
'pop_sfr_model': 'uvlf',
# Stellar pop + fesc
'pop_sed': 'eldridge2009',
'pop_binaries': False,
'pop_Z': 0.02,
'pop_Emin': 10.19,
'pop_Emax': 24.6,
'pop_rad_yield': 'from_sed', # EminNorm and EmaxNorm arbitrary now
# should make this automatic
'pop_uvlf': 'pq',
'pq_func': 'schechter_evol',
'pq_func_var': 'MUV',
'pq_func_var2': 'z',
# Bouwens+ 2015 Table 6 for z=5.9
#'pq_func_par0[0]': 0.39e-3,
#'pq_func_par1[0]': -21.1,
#'pq_func_par2[0]': -1.90,
#
# phi_star
'pq_func_par0': np.log10(0.47e-3),
'pq_func_par4': -0.27,
# z-pivot
'pq_func_par3': 6.,
# Mstar
'pq_func_par1': -20.95,
'pq_func_par5': 0.01,
# alpha
'pq_func_par2': -1.87,
'pq_func_par6': -0.1,
}
def test():
mags = np.arange(-24, -10, 0.1)
# Schechter fit from B15
pop_sch = ares.populations.GalaxyPopulation(**pars)
# DPL SFE fit from my paper
m17 = ares.util.ParameterBundle('mirocha2017:base').pars_by_pop(0,1)
# Test suite doesn't download BPASS models, so supply L1600 by hand.
m17['pop_sed'] = None
m17['pop_lum_per_sfr'] = 1.019e28
m17['pop_calib_lum'] = None
# Make the population
pop_dpl = ares.populations.GalaxyPopulation(**m17)
ax1 = None
ax2 = None
colors = 'b', 'g', 'gray', 'k', 'r'
for i, z in enumerate(redshifts):
# Plot the data
ax1 = gpop.Plot(z=z, sources='bouwens2015', ax=ax1, color=colors[i],
mec=colors[i], label=r'$z\sim {:d}$'.format(int(round(z, 0))))
# Plot the Bouwens Schechter fit
ax1.semilogy(mags, pop_sch.LuminosityFunction(z, mags), color=colors[i],
ls='-', lw=1)
# My 2017 paper only looked at z > 6
if z < 5:
continue
ax2 = gpop.Plot(z=z, sources='bouwens2015', ax=ax2, color=colors[i], fig=2,
mec=colors[i], label=r'$z\sim {:d}$'.format(int(round(z, 0))))
# Plot the physical model fit
ax2.semilogy(mags, pop_dpl.LuminosityFunction(z, mags), color=colors[i],
ls='-', lw=1)
# Add z = 10 models and data from Oesch+ 2014
ax1.semilogy(mags, pop_sch.UVLF_M(MUV=mags, z=10.), color='m',
ls=':', alpha=1, lw=1)
ax2.semilogy(*pop_dpl.phi_of_M(z=10), color='m', ls=':', alpha=0.5)
ax1 = gpop.Plot(z=10., sources='oesch2014', ax=ax1, color='m',
mec='m', label=r'$z\sim 10$')
ax2 = gpop.Plot(z=10., sources='oesch2014', ax=ax2, color='m',
mec='m', label=r'$z\sim 10$')
# Make nice
ax1.legend(loc='lower right', fontsize=12, numpoints=1)
ax2.legend(loc='lower right', fontsize=12, numpoints=1)
ax1.set_title('Bouwens+ 2015 Fig. 15 (reproduced)', fontsize=12)
ax2.set_title(r'Models from Mirocha+ 2017 (cal at $z \sim 6$ only)',
fontsize=12)
ax1.set_xlim(-24, -10)
ax1.set_ylim(1e-7, 1)
ax2.set_xlim(-24, -10)
ax2.set_ylim(1e-7, 1)
pl.show()
pl.figure(1)
pl.savefig('{!s}_1.png'.format(__file__[0:__file__.rfind('.')]))
pl.close()
pl.figure(2)
pl.savefig('{!s}_2.png'.format(__file__[0:__file__.rfind('.')]))
pl.close()
assert True
if __name__ == '__main__':
test()
```
#### File: ares/tests/test_inference_cal_model.py
```python
import os
import glob
import ares
def test():
gpop = ares.analysis.GalaxyPopulation()
pars = ares.util.ParameterBundle('mirocha2020:univ')
pars['pop_thin_hist'] = 1 # speed-up
pars['pop_dust_yield'] = 0
pars.update(ares.util.ParameterBundle('testing:galaxies'))
# Test with standard 4-parameter SFE model
cal = ares.inference.CalibrateModel(fit_lf=[6], fit_beta=False,
free_params_sfe=['norm'],
zevol_sfe=None,
include_fduty=False, include_fdtmr=False,
save_sam=False, save_smf=False, save_lf=True, save_beta=False,
save_sfrd=True, ztol=0.21)
cal.base_kwargs = pars
blobs = cal.blobs
assert len(cal.parameters) == 1
assert len(cal.guesses.keys()) == 1
assert cal.free_params_dust == []
# Run for a few steps
cal.run(steps=1, burn=0, save_freq=1, prefix='test_lfcal', clobber=True)
anl = ares.analysis.ModelSet('test_lfcal')
assert anl.chain.shape == (2, 1), "Chain not the right shape."
assert anl.logL.size == 2, "logL not the right size."
axes = gpop.PlotSummary(anl, fig=1, use_best=True)
# Add some dust
cal = ares.inference.CalibrateModel(fit_lf=[6], fit_beta=[6],
free_params_sfe=['norm', 'peak', 'slope-low', 'slope-high'],
zevol_sfe=None,
include_dust='screen',
free_params_dust=['norm', 'slope', 'scatter'],
zevol_dust=None,
include_fduty=False, include_fdtmr=False,
save_sam=True, save_smf=True, save_lf=True, save_beta=True,
save_sfrd=True, ztol=0.21)
cal.base_kwargs = pars
blobs = cal.blobs
assert len(cal.parameters) == 7
assert len(cal.guesses.keys()) == 7
assert len(cal.free_params_dust) == 3
guesses = cal.get_initial_walker_position()
assert len(guesses.keys()) == 7
pars['pop_dust_yield'] = 0.4
pop = ares.populations.GalaxyPopulation(**pars)
axes = gpop.PlotSummary(pop, fig=2)
axes = gpop.PlotColorColor(pop, fig=3)
# Clean-up
mcmc_files = glob.glob('{}/test_lfcal*'.format(os.environ.get('ARES')))
# Iterate over the list of filepaths & remove each file.
for fn in mcmc_files:
try:
os.remove(fn)
except:
print("Error while deleting file : ", filePath)
if __name__ == '__main__':
test()
```
#### File: ares/tests/test_physics_cosmology.py
```python
import numpy as np
from ares.physics import Cosmology
from ares.physics.Constants import s_per_gyr, m_H, m_He, cm_per_mpc
def test(rtol=1e-3):
cosm = Cosmology()
# Check some high-z limits
cosm_appr = Cosmology(approx_highz=True)
# Check critical density
assert cosm.CriticalDensity(0.) == cosm.CriticalDensityNow
# Make sure energy densities sum to unity
assert np.allclose(cosm.omega_m_0, 1. - cosm.omega_l_0)
# Make sure the age of the Universe is OK
assert 13.5 <= cosm.t_of_z(0.) / s_per_gyr <= 14.
# Check high-z limit for Hubble parameter. Better than 1%?
H_n = cosm.HubbleParameter(30.)
H_a = cosm_appr.HubbleParameter(30.)
assert abs(H_n - H_a) / H_a < rtol, \
"Hubble parameter @ high-z not accurate to < {:.3g}%.".format(rtol)
# Check high-z limit for comoving radial distance
R_n = cosm_appr.ComovingRadialDistance(20., 30.) / cm_per_mpc
R_a = cosm.ComovingRadialDistance(20., 30.) / cm_per_mpc
assert abs(R_a - R_n) / R_a < rtol, \
"Comoving radial distance @ high-z not accurate to < {:.3g}%.".format(rtol)
# Test a user-supplied cosmology and one that grabs a row from Planck chain
# Remember: test suite doesn't have CosmoRec, so don't use get_inits_rec.
cosm = Cosmology(cosmology_name='user', cosmology_id='jordan')
cosm = Cosmology(cosmology_id=100)
if __name__ == '__main__':
test()
```
#### File: ares/tests/test_physics_cross_sections.py
```python
import numpy as np
import matplotlib.pyplot as pl
from ares.physics.CrossSections import *
def test():
E = np.logspace(np.log10(13.6), 4)
sigma = PhotoIonizationCrossSection
sigma_approx = ApproximatePhotoIonizationCrossSection
fig1, ax1 = pl.subplots(1, 1)
ax1.loglog(E, [sigma(EE, 0) for EE in E], color='k', ls='-', label=r'H')
ax1.loglog(E, [sigma(EE, 1) for EE in E], color='k', ls='--', label=r'HeI')
ax1.loglog(E, [sigma_approx(EE, 0) for EE in E], color='b', ls='-')
ax1.loglog(E, [sigma_approx(EE, 1) for EE in E], color='b', ls='--')
ax1.legend(frameon=False)
ax1.set_xlim(10, 1e4)
ax1.set_ylim(1e-25, 1e-16)
ax1.set_xlabel(r'$h\nu \ (\mathrm{eV})$')
ax1.set_ylabel(r'$\sigma_{\nu} \ (\mathrm{cm}^2)$')
ax1.annotate(r'Verner & Ferland (1996)', (20, 1e-24), ha='left')
ax1.annotate(r'Approximate', (20, 1e-25), color='b', ha='left')
pl.savefig('{!s}.png'.format(__file__[0:__file__.rfind('.')]))
pl.close()
assert True
if __name__ == '__main__':
test()
```
#### File: ares/tests/test_physics_HI_wf.py
```python
import ares
import numpy as np
import matplotlib.pyplot as pl
def test():
Tarr = np.logspace(-1, 2)
res = []
ls = ['-', '--', '--', ':', '-.']
labels = 'Chuzhoy+ \'05', 'Furlanetto \& Pritchard \'06', None, 'Hirata \'06'
for i, method in enumerate([2,3,3.5,4]):
hydr = ares.physics.Hydrogen(approx_Salpha=method)
Sa = np.array([hydr.Sa(20., Tarr[k]) for k in range(Tarr.size)])
pl.plot(Tarr, 1 - Sa, color='k', ls=ls[i], label=labels[i])
res.append(Sa)
# Check Ts while we're here
Ts = hydr.SpinTemperature(20., hydr.cosm.Tgas(20.), 1, 0., 0.)
pl.xlim(0.5, 100)
pl.ylim(1e-1, 1.1)
pl.xscale('log')
pl.yscale('log')
pl.xlabel(r'$T_K / \mathrm{K}$')
pl.ylabel(r'$1-S_{\alpha}$')
pl.legend(loc='lower left', frameon=False, fontsize=14)
pl.savefig('{!s}.png'.format(__file__[0:__file__.rfind('.')]))
pl.close()
# Compare at T > 1 K
ok = Tarr > 1.
diff = np.abs(np.diff(res, axis=1))
# Just set to a level that I know is tight enough to pickup
# any errors we might accidentally introduce later.
assert np.all(diff.ravel() < 0.3)
# Check frec
for n in range(2, 31):
frec = hydr.frec(n)
assert hydr.Tbg is None
# Check various limits
dTb_sat = hydr.saturated_limit(10.)
dTb_low = hydr.adiabatic_floor(10.)
dTb_phy = hydr.dTb_no_astrophysics(10.)
assert 0 <= dTb_sat <= 50
assert -350 <= dTb_low <= -200
assert abs(dTb_phy) < 1
if __name__ == '__main__':
test()
```
#### File: ares/tests/test_sources_sps.py
```python
import ares
import numpy as np
def test():
src = ares.sources.SynthesisModel(source_sed='eldridge2009',
source_sed_degrade=100, source_Z=0.02)
Ebar = src.AveragePhotonEnergy(13.6, 1e2)
assert 13.6 <= Ebar <= 1e2
nu = src.frequencies
ehat = src.emissivity_per_sfr
beta = src.get_beta()
assert -3 <= np.mean(beta) <= 2
if __name__ == '__main__':
test()
```
#### File: ares/tests/test_util_readdata.py
```python
import ares
def test():
for src in ['mirocha2017', 'bouwens2015', 'finkelstein2015']:
data = ares.util.read_lit(src)
if __name__ == '__main__':
test()
``` |
{
"source": "jlashner/hmf",
"score": 3
} |
#### File: hmf/hmf/cosmo.py
```python
from . import _cache
from astropy.cosmology import Planck13, FLRW, WMAP5, WMAP7, WMAP9, Planck15
from . import _framework
import sys
import astropy.units as u
class Cosmology(_framework.Framework):
"""
Basic Cosmology object.
This class thinly wraps cosmology objects from the astropy package. The full
functionality of the astropy cosmology objects are available in the
:attr:`cosmo` attribute. What the class adds to the existing astropy
implementation is the specification of the cosmological parameters
as `parameter` inputs to an over-arching Framework.
In particular, while any instance of a subclass of :class:`astropy.cosmology.FLRW`
may be passed as the base cosmology, the specific parameters can be updated
individually by passing them through the `cosmo_params` dictionary
(both in the constructor and the :meth:`update` method.
This dictionary is kept in memory and so adding a different parameter on a later
update will *update* the dictionary, rather than replacing it.
To read a standard documented list of parameters, use ``Cosmology.parameter_info()``.
If you want to just see the plain list of available parameters, use ``Cosmology.get_all_parameters()``.
To see the actual defaults for each parameter, use ``Cosmology.get_all_parameter_defaults()``.
"""
def __init__(self, cosmo_model=Planck15, cosmo_params=None):
# Call Framework init
super(Cosmology, self).__init__()
# Set all given parameters
self.cosmo_model = cosmo_model
self.cosmo_params = cosmo_params or {}
#===========================================================================
# Parameters
#===========================================================================
@_cache.parameter("model")
def cosmo_model(self, val):
"""
The basis for the cosmology -- see astropy documentation. Can be a custom
subclass. Defaults to Planck15.
:type: instance of `astropy.cosmology.FLRW` subclass
"""
if isinstance(val, str):
cosmo = get_cosmo(val)
return cosmo
if not isinstance(val, FLRW):
raise ValueError("cosmo_model must be an instance of astropy.cosmology.FLRW")
else:
return val
@_cache.parameter("param")
def cosmo_params(self, val):
"""
Parameters for the cosmology that deviate from the base cosmology passed.
This is useful for repeated updates of a single parameter (leaving others
the same). Default is the empty dict. The parameters passed must match
the allowed parameters of `cosmo_model`. For the basic class this is
:Tcmb0: Temperature of the CMB at z=0
:Neff: Number of massless neutrino species
:m_nu: Mass of neutrino species (list)
:H0: The hubble constant at z=0
:Om0: The normalised matter density at z=0
:type: dict
"""
return val
#===========================================================================
# DERIVED PROPERTIES AND FUNCTIONS
#===========================================================================
@_cache.cached_quantity
def cosmo(self):
"""
Cosmographic object (:class:`astropy.cosmology.FLRW` object), with custom
cosmology from :attr:`~.cosmo_params` applied.
"""
return self.cosmo_model.clone(**self.cosmo_params)
@_cache.cached_quantity
def mean_density0(self):
"""
Mean density of universe at z=0, [Msun h^2 / Mpc**3]
"""
return (self.cosmo.Om0 * self.cosmo.critical_density0 / self.cosmo.h ** 2).to(u.Msun/u.Mpc**3).value
def get_cosmo(name):
"""
Returns a FLRW cosmology given a string (must be one defined in this module).
Parameters
----------
name : str
The class name of the appropriate model
"""
if isinstance(getattr(sys.modules[__name__], name), FLRW):
return getattr(sys.modules[__name__], name)
else:
raise ValueError("%s is not a valid cosmology" % name)
```
#### File: hmf/fitting/fit.py
```python
import numpy as np
from scipy.stats import norm
from scipy.optimize import minimize
from multiprocessing import cpu_count
import time
import warnings
import pickle
import copy
import traceback
import hmf.transfer_models as tm
try:
from emcee import EnsembleSampler as es
HAVE_EMCEE = True
# The following redefines the EnsembleSampler so that the pool object is not
# pickled along with it (it can't be).
def should_pickle(k):
return k!="pool"
# try:
# pickle.dumps(v)
# return True
# except NotImplementedError:
# return False
class EnsembleSampler(es):
def __getstate__(self):
return dict((k, v) for (k, v) in list(self.__dict__.items()) if should_pickle(k))
except ImportError:
HAVE_EMCEE = False
def model(parm, h, self):
"""
Calculate the log probability of a model `h`
[instance of :class:`hmf._framework.Framework`] with parameters ``parm``.
At the moment, this is a little hacky, because the parameters have to
be the first argument (for both Minimize and MCMC), so we use a
function and pass self last.
Parameters
----------
parm : list of floats
The position of the model. Takes arbitrary parameters.
h : instance of :class:`~_framework.Framework`
An instance of any subclass of :class:`~_framework.Framework` with the
desired options set. Variables of the estimation are updated within the
routine.
Returns
-------
ll : float
The log likelihood of the model at the given position.
"""
if self.verbose > 1:
print(("Params: ", list(zip(self.attrs, parm))))
ll = 0
p = copy.copy(parm)
for prior in self.priors:
if type(prior.name) == list:
index = [self.attrs.index(name) for name in prior.name]
else:
index = self.attrs.index(prior.name)
ll += prior.ll(parm[index])
if np.isinf(ll):
return ret_arg(ll, self.blobs)
# If it is a log distribution, un-log it for use.
if isinstance(prior, Log):
p[index] = 10 ** parm[index]
# Rebuild the hod dict from given vals
# Any attr starting with <name>: is put into a dictionary.
param_dict = {}
for attr, val in zip(self.attrs, p):
if ":" in attr:
if attr.split(":")[0] not in param_dict:
param_dict[attr.split(":")[0]] = {}
param_dict[attr.split(":")[0]][attr.split(":")[1]] = val
else:
param_dict[attr] = val
# Update the actual model
try: # This try: except: should capture poor parameter choices quickly.
h.update(**param_dict)
except ValueError as e:
if self.relax:
print(("WARNING: PARAMETERS FAILED ON UPDATE, RETURNING INF: ", list(zip(self.attrs, parm))))
print(e)
print((traceback.format_exc()))
return ret_arg(-np.inf, self.blobs)
else:
print((traceback.format_exc()))
raise e
# Get the quantity to compare (if exceptions are raised, treat properly)
try:
q = getattr(h, self.quantity)
except Exception as e:
if self.relax:
print(("WARNING: PARAMETERS FAILED WHEN CALCULATING QUANTITY, RETURNING INF: ", list(zip(self.attrs, parm))))
print(e)
print((traceback.format_exc()))
return ret_arg(-np.inf, self.blobs)
else:
print((traceback.format_exc()))
raise e
# The logprob of the model
if self.cov:
ll += _lognormpdf(q, self.data, self.sigma)
else:
ll += np.sum(norm.logpdf(self.data, loc=q, scale=self.sigma))
# Add the likelihood of the contraints
for k, v in list(self.constraints.items()):
ll += norm.logpdf(getattr(h, k), loc=v[0], scale=v[1])
if self.verbose > 2:
print(("CONSTRAINT: ", k, getattr(h, k)))
if self.verbose:
print(("Likelihood: ", ll))
if self.verbose > 1 :
print(("Update Dictionary: ", param_dict))
if self.verbose > 2:
print(("Final Quantity: ", q))
# Get blobs to return as well.
if self.blobs is not None:
out = []
for b in self.blobs:
if ":" not in b:
out.append(getattr(h, b))
elif ":" in b:
out.append(getattr(h, b.split(":")[0])[b.split(":")[1]])
return ll, out
else:
return ll
def ret_arg(ll,blobs):
if blobs is None:
return ll
else:
return ll, blobs
class Fit(object):
"""
Parameters
----------
priors : list of prior classes
A list containing instances of :class:`.Prior` subclasses. These specify
the prior information on each parameter.
data : array_like
The data to be compared to -- must be the same length as the intended
quantity. Also must be free from NaN values or a ValueError will be raised.
quantity : str
The quantity to be compared (eg. ``"dndm"``)
constraints : dict
A dictionary with keys being quantity names, and values being length 2
collections with element 0 the desired value of the quantity, and
element 1 the uncertainty. This is used in addition to the data to
calculate the likelihood
sigma : array_like
If a vector, this is taken to be the standard deviation of the data. If
a matrix, it is taken to be the covariance matrix of the data.
blobs : list of str
Names of quantities to be returned along with the chain. Must be a
verbose : int, default 0
How much to write to screen.
relax : bool, default False
If relax is true, the call to get the quantity is wrapped in a try:except:.
If an error occurs, the lognorm is set to -inf, rather than raising an exception.
This can be helpful if a flat prior is used on cosmology, for which extreme
values can sometimes cause exceptions.
"""
def __init__(self, priors, data, quantity, constraints, sigma, guess=[], blobs=None,
verbose=0, relax=False):
if len(priors) == 0:
raise ValueError("priors must be at least length 1")
else:
self.priors = priors
# Save which attributes are updatable as a list
self.attrs = []
for prior in self.priors:
if isinstance(prior.name, str):
self.attrs += [prior.name]
else:
self.attrs += prior.name
# Get the number of variables for MCMC
self.ndim = len(self.attrs)
# Ensure guess was set correctly.
if guess and len(guess) != self.ndim:
warnings.warn("Guess was set incorrectly: %s" % guess)
guess = []
self.guess = self.get_guess(guess)
if np.any(np.isnan(data)):
raise ValueError("The data must contain no NaN values")
self.data = data
self.quantity = quantity
self.sigma = sigma
self.blobs = blobs
self.verbose = verbose
self.relax = relax
self.constraints = constraints
# Make sure sigma has right rank
if len(self.sigma.shape) == 2:
self.cov = True
elif len(self.sigma.shape) == 1:
self.cov = False
else:
raise ValueError("sigma must be an array of 1 or 2 dimensions, but has %s dim" % len(sigma.shape))
def get_guess(self, guess):
# Set guess if not set
if not guess:
for prior in self.priors:
if isinstance(prior, Uniform):
guess += [(prior.high + prior.low) / 2]
elif isinstance(prior, Normal):
guess += [prior.mean]
elif isinstance(prior, MultiNorm):
guess += prior.means.tolist()
return np.array(guess)
def model(self, p, h):
return model(p, h, self)
class MCMC(Fit):
def __init__(self, *args, **kwargs):
if not HAVE_EMCEE:
raise TypeError("You need emcee to use this class, aborting. ['pip install emcee']")
super(MCMC, self).__init__(*args, **kwargs)
def fit(self, sampler=None,h=None, nwalkers=100, nsamples=100, burnin=0,
nthreads=0, chunks=None):
"""
Estimate the parameters in :attr:`.priors` using AIES MCMC.
This routine uses the emcee package to run an MCMC procedure, fitting
parameters passed in :attr:`.priors` to the given quantity.
Parameters
----------
sampler : instance of :class:`EnsembleSampler`
A sampler instance, which may already include samples from a previous
run.
h : instance of :class:`~hmf._framework.Framework` subclass, optional
This instance will be updated with the variables of the minimization.
Other desired options should have been set upon instantiation.
Needed if `sampler` not present.
nwalkers : int
Number of walkers to use for Affine-Invariant Ensemble Sampler.
nsamples : int, optional
Number of samples that *each walker* will perform.
burnin : int, optional
Number of samples from each walker that will be initially erased as
burnin. Note, this performs *additional* iterations, rather than
consuming iterations from `nsamples`.
nthreads : int, optional
Number of threads to use in sampling. If nought, will automatically
detect number of cores available.
chunks : int, optional
Number of samples to run before appending results to file. Only
applicable if :attr:`.filename` is provided.
Yields
------
sampler : :class:`EnsembleSampler` object
The full sampling object, with chain, blobs, acceptance fraction etc.
"""
if sampler is None and h is None:
raise ValueError("Either sampler or h must be given")
# If using CAMB, nthreads MUST BE 1
if (h.transfer_model == "CAMB" or h.transfer_model == tm.CAMB):
if any(p.startswith("cosmo_params:") for p in self.attrs):
nthreads = 1
if not nthreads:
# auto-calculate the number of threads to use if not set.
nthreads = cpu_count()
# This just makes sure that the caching works
getattr(h, self.quantity)
initial_pos=None
if sampler is not None:
if sampler.iterations>0:
initial_pos = sampler.chain[:,-1,:]
else:
# Note, sampler CANNOT be an attribute of self, since self is passed to emcee.
sampler = EnsembleSampler(nwalkers, self.ndim, model,
args=[h, self], threads=nthreads)
# Get initial positions
if initial_pos is None:
initial_pos = self.get_initial_pos(nwalkers)
# Run a burn-in
# If there are some samples already in the sampler, only run the difference.
if burnin:
initial_pos, lnprob,rstate,blobs0 = self._run_burnin(burnin,initial_pos)
else:
lnprob = None
rstate = None
blobs0 = None
# Run the actual run
if chunks == 0 or chunks > nsamples:
chunks = nsamples
start = time.time()
for i, result in enumerate(sampler.sample(initial_pos, iterations=nsamples,
lnprob0=lnprob, rstate0=rstate,
blobs0=blobs0)):
if (i + 1) % chunks == 0 or i + 1 == nsamples:
yield sampler
self.__sampler = sampler
def get_and_del_sampler(self):
"""
Returns the sampler object if it exists (ie. fit has been called) and deletes it.
This must be used to get the sampler if no chunks are being used. That is
```
F = MCMC(...)
F.fit()
sampler = F.get_and_del_sampler()
```
After being assigned, it is deleted since it cannot exist in the class
when `.fit()` is called.
"""
sampler = self.__sampler
del self.__sampler
return sampler
def get_initial_pos(self, nwalkers):
# Get an initial value for all walkers, around a small ball near the initial guess
stacked_val = self.guess.copy()
for i in range(nwalkers - 1):
stacked_val = np.vstack((self.guess, stacked_val))
i = 0
for prior in self.priors:
if isinstance(prior, Uniform):
stacked_val[:, i] += np.random.normal(loc=0.0, scale=0.05 *
min((self.guess[i] - prior.low),
(prior.high - self.guess[i])),
size=nwalkers)
i += 1
elif isinstance(prior, Normal):
stacked_val[:, i] += np.random.normal(loc=0.0, scale=prior.sd,
size=nwalkers)
i += 1
elif isinstance(prior, MultiNorm):
for j in range(len(prior.name)):
stacked_val[:, i] += np.random.normal(loc=0.0, scale=np.sqrt(prior.cov[j, j]),
size=nwalkers)
i += 1
return stacked_val
def _run_burnin(self,burnin,initial_pos):
if type(burnin) == int:
if burnin - self.sampler.iterations >0:
initial_pos, lnprob, rstate,blobs0 = self.sampler.run_mcmc(initial_pos, burnin-self.sampler.iterations)
self.sampler.reset()
else:
if burnin[0]-self.sampler.iterations > 0:
initial_pos, lnprob, rstate,blobs0 = self.sampler.run_mcmc(initial_pos, burnin[0]-self.sampler.iterations)
else:
return initial_pos,None,None,None
it_needed = burnin[1]*np.max(self.sampler.acor)
while it_needed > self.sampler.iterations or it_needed<0: # if negative, probably ran fewer samples than lag.
initial_pos, lnprob, rstate, blobs0 = self.sampler.run_mcmc(initial_pos, burnin[0]/2)
it_needed = burnin[1]*np.max(self.sampler.acor)
if self.sampler.iterations > burnin[2]:
warnings.warn("Burnin FAILED... continuing (acor=%s)" % (it_needed))
if self.verbose > 0:
burnin = self.sampler.iterations
print(("Used %s samples for burnin" % self.sampler.iterations))
self.sampler.reset()
return initial_pos, lnprob,rstate,blobs0
#===========================================================
# Minimize Fitting Routine
#===========================================================
class Minimize(Fit):
def __init__(self,*args,**kwargs):
super(Minimize,self).__init__(*args,**kwargs)
self.original_blobs = self.blobs + [] #add [] to copy it
self.blobs = None
def fit(self, h, disp=False, maxiter=50,tol=None,**minimize_kwargs):
"""
Run an optimization procedure to fit a model to data.
Parameters
----------
h : instance of :class:`~hmf.framework.Framework` subclass
This instance will be updated with the variables of the minimization.
Other desired options should have been set upon instantiation.
method : str, default ``"Nelder-Mead"``
The optimizing routine (see `scipy.optimize.minimize` for details).
disp : bool, default False
Whether to display optimization information while running.
maxiter : int, default 30
Maximum number of iterations
tol : float, default None
Tolerance for termination
\*\*kwargs :
Arguments passed directly to :func:`scipy.optimize.minimize`.
Returns
-------
res : instance of :class:`scipy.optimize.Result`
Contains the results of the minimization. Important attributes are the
solution vector :attr:`x`, the number of iterations :attr:`nit`, whether
the minimization was a success :attr:`success`, and the exit message
:attr:`message`.
"""
# try to set some bounds
bounds = []
for p in self.priors:
if type(p.name) is list:
bounds += p.bounds()
else:
bounds.append(p.bounds())
res = minimize(self.negmod, self.guess, (h,), tol=tol,
options={"disp":disp, "maxiter":maxiter},
**minimize_kwargs)
if hasattr(res,"hess_inv"):
self.cov_matrix = res.hess_inv
return res
def negmod(self, *args):
ll = self.model(*args)
if np.isinf(ll):
return 1e30
else:
return -ll
#===============================================================================
# Classes for different prior models
#===============================================================================
class Prior(object):
def ll(self, param):
"""
Returns the log-likelihood of the given parameter given the Prior
"""
pass
def guess(self, *p):
"""
Returns an "initial guess" for the prior
"""
pass
class Uniform(Prior):
"""
A Uniform prior.
Parameters
----------
param : str
The name of the parameter
low : float
The lower bound of the parameter
high : float
The upper bound of the parameter
"""
def __init__(self, param, low, high):
self.name = param
self.low = low
self.high = high
def ll(self, param):
if param < self.low or param > self.high:
return -np.inf
else:
return 0
def guess(self, *p):
return (self.low + self.high) / 2
def bounds(self):
return (self.low,self.high)
class Log(Uniform):
pass
class Normal(Prior):
"""
A Gaussian prior.
Parameters
----------
param : str
Name of the parameter
mean : float
Mean of the prior distribution
sd : float
The standard deviation of the prior distribution
"""
def __init__(self, param, mean, sd):
self.name = param
self.mean = mean
self.sd = sd
def ll(self, param):
return norm.logpdf(param, loc=self.mean, scale=self.sd)
def guess(self, *p):
return self.mean
def bounds(self):
return (self.mean-5*self.sd,self.mean+5*self.sd)
class MultiNorm(Prior):
"""
A Multivariate Gaussian prior
Parameters
----------
params : list of str
Names of the parameters (in order)
means : list of float
Mean vector of the prior distribution
cov : ndarray
Covariance matrix of the prior distribution
"""
def __init__(self, params, mean, cov):
self.name = params
self.mean = mean
self.cov = cov
def ll(self, params):
"""
Here params should be a dict of key:values
"""
#params = np.array([params[k] for k in self.name])
return _lognormpdf(params, self.mean, self.cov)
def guess(self, *p):
"""
p should be the parameter name
"""
return self.mean[self.name.index(p[0])]
def bounds(self):
return [(m+5*sd,m-5*sd) for m,sd in zip(self.mean,np.sqrt(np.diag(self.cov)))]
def _lognormpdf(x, mu, S):
""" Log of Multinormal PDF at x, up to scale-factors."""
err = x - mu
return -0.5 * np.linalg.solve(S, err).T.dot(err)
#===============================================================================
# COVARIANCE DATA FROM CMB MISSIONS
#===============================================================================
# # Some data from CMB missions.
# # All cov and mean data is in order of ["omegab_h2", "omegac_h2", "n", "sigma_8", "H0"]
class CosmoCovData(object):
def __init__(self, cov, mean, params):
self.cov = cov
self.mean = mean
self.params = params
def get_cov(self, *p):
"""
Return covariance matrix of given parameters *p
"""
if not all([str(pp) in self.params for pp in p]):
raise AttributeError("One or more parameters passed are not in the data")
indices = [self.params.index(str(k)) for k in p]
return self.cov[indices, :][:, indices]
def get_mean(self, *p):
indices = [self.params.index(str(k)) for k in p]
return self.mean[indices]
def get_std(self, *p):
cov = self.get_cov(*p)
return np.sqrt([(cov[i, i]) for i in range(cov.shape[0])])
def get_normal_priors(self, *p):
std = self.get_std(*p)
mean = self.get_mean(*p)
return [Normal("cosmo_params:" + pp, m, s) if p not in ["sigma_8", "n"]
else Normal(pp, m, s) for pp, m, s in zip(p, mean, std)]
def get_cov_prior(self, *p):
cov = self.get_cov(*p)
mean = self.get_mean(*p)
p = ["cosmo_params:" + pp if pp not in ["sigma_8", "n"] else pp for pp in p]
return MultiNorm(p, mean, cov)
class FlatCovData(CosmoCovData):
def __init__(self, cov, mean):
params = ['Om0', 'Ob0', 'sigma_8', 'n', 'H0']
super(FlatCovData, self).__init__(cov, mean, params)
WMAP3 = FlatCovData(cov=np.array([[ 1.294e-03, 1.298e-04, 1.322e-03, -1.369e-04, -1.153e-01],
[1.298e-04, 1.361e-05, 1.403e-04, -7.666e-06, -1.140e-02],
[1.322e-03, 1.403e-04, 2.558e-03, 2.967e-04, -9.972e-02],
[-1.369e-04, -7.666e-06, 2.967e-04, 2.833e-04, 2.289e-02],
[-1.153e-01, -1.140e-02, -9.972e-02, 2.289e-02, 1.114e+01 ]]),
mean=np.array([ 2.409e-01, 4.182e-02, 7.605e-01, 9.577e-01, 7.321e+01 ]))
WMAP5 = FlatCovData(cov=np.array([[ 9.514e-04, 9.305e-05, 8.462e-04, -1.687e-04, -8.107e-02],
[9.305e-05, 9.517e-06, 8.724e-05, -1.160e-05, -7.810e-03],
[8.462e-04, 8.724e-05, 1.339e-03, 1.032e-04, -6.075e-02],
[-1.687e-04, -1.160e-05, 1.032e-04, 2.182e-04, 2.118e-02],
[-8.107e-02, -7.810e-03, -6.075e-02, 2.118e-02, 7.421e+00 ]]),
mean=np.array([ 2.597e-01, 4.424e-02, 7.980e-01, 9.634e-01, 7.180e+01 ]))
WMAP7 = FlatCovData(cov=np.array([[ 8.862e-04, 8.399e-05, 7.000e-04, -2.060e-04, -7.494e-02],
[8.399e-05, 8.361e-06, 7.000e-05, -1.500e-05, -7.003e-03],
[7.000e-04, 7.000e-05, 1.019e-03, 4.194e-05, -4.987e-02],
[-2.060e-04, -1.500e-05, 4.194e-05, 2.103e-04, 2.300e-02],
[-7.494e-02, -7.003e-03, -4.987e-02, 2.300e-02, 6.770e+00 ]]),
mean=np.array([ 2.675e-01, 4.504e-02, 8.017e-01, 9.634e-01, 7.091e+01 ]))
WMAP9 = FlatCovData(cov=np.array([[ 6.854e-04, 6.232e-05, 4.187e-04, -2.180e-04, -5.713e-02],
[6.232e-05, 5.964e-06, 4.048e-05, -1.643e-05, -5.134e-03],
[4.187e-04, 4.048e-05, 5.644e-04, -1.037e-05, -2.945e-02],
[-2.180e-04, -1.643e-05, -1.037e-05, 1.766e-04, 2.131e-02],
[-5.713e-02, -5.134e-03, -2.945e-02, 2.131e-02, 5.003e+00]]),
mean=np.array([ 2.801e-01, 4.632e-02, 8.212e-01, 9.723e-01, 6.998e+01 ]))
Planck13 = FlatCovData(cov=np.array([[ 3.884e-04, 3.017e-05, -1.508e-04, -1.619e-04, -2.834e-02],
[3.017e-05, 2.459e-06, -9.760e-06, -1.236e-05, -2.172e-03],
[-1.508e-04, -9.760e-06, 7.210e-04, 1.172e-04, 1.203e-02],
[-1.619e-04, -1.236e-05, 1.172e-04, 8.918e-05, 1.196e-02],
[-2.834e-02, -2.172e-03, 1.203e-02, 1.196e-02, 2.093e+00 ]]),
mean=np.array([ 3.138e-01, 4.861e-02, 8.339e-01, 9.617e-01, 6.741e+01 ]))
Planck15 = FlatCovData(cov=np.array([[ 1.021e-04, 8.034e-06, -5.538e-05, -4.492e-05, -7.479e-03],
[8.034e-06, 6.646e-07, -3.924e-06, -3.542e-06, -5.803e-04],
[-5.538e-05, -3.924e-06, 3.308e-04, 4.343e-05, 4.250e-03],
[-4.492e-05, -3.542e-06, 4.343e-05, 2.940e-05, 3.291e-03],
[-7.479e-03, -5.803e-04, 4.250e-03, 3.291e-03, 5.531e-01 ]]),
mean=np.array([ 3.114e-01, 4.888e-02, 8.460e-01, 9.669e-01, 6.758e+01 ]))
```
#### File: hmf/hmf/growth_factor.py
```python
import numpy as np
from scipy import integrate as intg
from ._framework import Component as Cmpt
from scipy.interpolate import InterpolatedUnivariateSpline as _spline
from ._utils import inherit_docstrings as _inherit
class GrowthFactor(Cmpt):
r"""
General class for a growth factor calculation.
Each of the methods in this class is defined using a numerical
integral, following [1]_.
Parameters
----------
cosmo : ``astropy.cosmology.FLRW`` instance
Cosmological model.
\*\*model_parameters : unpack-dict
Parameters specific to this model. In this case, available
parameters are as follows.To see their default values, check
the :attr:`_defaults` class attribute.
:dlna: Step-size in log-space for scale-factor integration
:amin: Minimum scale-factor (i.e.e maximum redshift) to integrate to.
Only used for :meth:`growth_factor_fn`.
References
----------
.. [1] Lukic et. al., ApJ, 2007, http://adsabs.harvard.edu/abs/2007ApJ...671.1160L
"""
_defaults = {"dlna":0.01, "amin":1e-8}
def __init__(self, cosmo, **model_parameters):
self.cosmo = cosmo
super(GrowthFactor, self).__init__(**model_parameters)
def _d_plus(self, z, getvec=False):
"""
Finds the factor :math:`D^+(a)`, from Lukic et. al. 2007, eq. 8.
Parameters
----------
z : float
The redshift
getvec : bool, optional
Whether to treat `z` as a maximum redshift and return a whole vector
of values up to `z`. In this case, the minimum scale factor and the
step size are defined in :attr:`_defaults` and can be over-ridden
at instantiation.
Returns
-------
dplus : float
The un-normalised growth factor.
"""
a_upper = 1.0 / (1.0 + z)
lna = np.arange(np.log(self.params["amin"]), np.log(a_upper), self.params['dlna'])
lna = np.hstack((lna,np.log(a_upper)))
self._zvec = 1.0 / np.exp(lna) - 1.0
integrand = 1.0 / (np.exp(lna) * self.cosmo.efunc(self._zvec)) ** 3
if not getvec:
integral = intg.simps(np.exp(lna) * integrand, x=lna,even="avg")
dplus = 5.0 * self.cosmo.Om0 * self.cosmo.efunc(z) * integral / 2.0
else:
integral = intg.cumtrapz(np.exp(lna) * integrand, x=lna, initial=0.0)
dplus = 5.0 * self.cosmo.Om0 * self.cosmo.efunc(self._zvec) * integral / 2.0
return dplus
def growth_factor(self, z):
"""
Calculate :math:`d(a) = D^+(a)/D^+(a=1)`, from Lukic et. al. 2007, eq. 7.
Parameters
----------
z : float
The redshift
Returns
-------
float
The normalised growth factor.
"""
growth = self._d_plus(z)/self._d_plus(0.0)
return growth
def growth_factor_fn(self, zmin=0.0, inverse=False):
"""
Calculate :math:`d(a) = D^+(a)/D^+(a=1)`, from Lukic et. al. 2007, eq. 7.
Returns a function G(z).
Parameters
----------
zmin : float, optional
The minimum redshift of the function. Default 0.0
inverse: bool, optional
Whether to return the inverse relationship [z(g)]. Default False.
Returns
-------
callable
The normalised growth factor as a function of redshift, or
redshift as a function of growth factor if ``inverse`` is True.
"""
dp = self._d_plus(0.0, True)
growth = dp / dp[-1]
if not inverse:
s = _spline(self._zvec[::-1], growth[::-1])
else:
s = _spline(growth, self._zvec)
return s
def growth_rate(self, z):
"""
Growth rate, dln(d)/dln(a) from Hamilton 2000 eq. 4
Parameters
----------
z : float
The redshift
"""
return (-1 - self.cosmo.Om(z) / 2 + self.cosmo.Ode(z) +
5 * self.cosmo.Om(z) / (2 * self.growth_factor(z)))
def growth_rate_fn(self, zmin=0):
"""
Growth rate, dln(d)/dln(a) from Hamilton 2000 eq. 4, as callable.
Parameters
----------
zmin : float, optional
The minimum redshift of the function. Default 0.0
Returns
-------
callable
The normalised growth rate as a function of redshift.
"""
gfn = self.growth_factor_fn(zmin)
return lambda z: (-1 - self.cosmo.Om(z) / 2 + self.cosmo.Ode(z) +
5 * self.cosmo.Om(z) / (2 * gfn(z)))
@_inherit
class GenMFGrowth(GrowthFactor):
"""
Port of growth factor routines found in the ``genmf`` code.
Parameters
----------
cosmo : ``astropy.cosmology.FLRW`` instance
Cosmological model.
\*\*model_parameters : unpack-dict
Parameters specific to this model. In this case, available
parameters are as follows.To see their default values, check
the :attr:`_defaults` class attribute.
:dz: Step-size for redshift integration
:zmax: Maximum redshift to integrate to. Only used for :meth:`growth_factor_fn`.
"""
_defaults = {"dz":0.01, "zmax":1000.0}
def _d_plus(self, z, getvec=False):
"""
This is not implemented in this class. It is not
required to calculate :meth:`growth_factor`.
"""
raise NotImplementedError()
def _general_case(self, w, x):
x = np.atleast_1d(x)
xn_vec = np.linspace(0, x.max(), 1000)
func = _spline(xn_vec,(xn_vec / (xn_vec ** 3 + 2)) ** 1.5)
g = np.array([func.integral(0,y) for y in x])
return ((x ** 3.0 + 2.0) ** 0.5) * (g / x ** 1.5)
def growth_factor(self, z):
"""
The growth factor, :math:`d(a) = D^+(a)/D^+(a=1)`.
This uses an approximation only valid in closed or
flat cosmologies, ported from ``genmf``.
Parameters
----------
z : array_like
Redshift.
Returns
-------
gf : array_like
The growth factor at `z`.
"""
a = 1 / (1 + z)
w = 1 / self.cosmo.Om0 - 1.0
s = 1 - self.cosmo.Ok0
if (s > 1 or self.cosmo.Om0 < 0 or (s != 1 and self.cosmo.Ode0 > 0)):
if np.abs(s - 1.0) > 1.e-10:
raise ValueError('Cannot cope with this cosmology!')
if self.cosmo.Om0 == 1:
return a
elif self.cosmo.Ode0 > 0:
xn = (2.0 * w) ** (1.0 / 3)
aofxn = self._general_case(w, xn)
x = a * xn
aofx = self._general_case(w, x)
return aofx / aofxn
else:
dn = 1 + 3 / w + (3 * ((1 + w) ** 0.5) / w ** 1.5) * np.log((1 + w) ** 0.5 - w ** 0.5)
x = w * a
return (1 + 3 / x + (3 * ((1 + x) ** 0.5) / x ** 1.5) * np.log((1 + x) ** 0.5 - x ** 0.5)) / dn
def growth_factor_fn(self, zmin=0.0, inverse=False):
"""
Return the growth factor as a callable function.
Parameters
----------
zmin : float, optional
The minimum redshift of the function. Default 0.0
inverse: bool, optional
Whether to return the inverse relationship [z(g)]. Default False.
Returns
-------
callable
The normalised growth factor as a function of redshift, or
redshift as a function of growth factor if ``inverse`` is True.
"""
if not inverse:
return self.growth_factor
else:
self._zvec = np.arange(zmin, self.params['zmax'], self.params['dz'])
gf = self.growth_factor(self._zvec)
return _spline(gf[::-1], self._zvec[::-1])
@_inherit
class Carroll1992(GrowthFactor):
"""
Analytic approximation for the growth factor from Carroll et al. 1992.
Adapted from chomp project.
Parameters
----------
cosmo : ``astropy.cosmology.FLRW`` instance
Cosmological model.
\*\*model_parameters : unpack-dict
Parameters specific to this model. In this case, available
parameters are as follows.To see their default values, check
the :attr:`_defaults` class attribute.
:dz: Step-size for redshift spline
:zmax: Maximum redshift of spline. Only used for :meth:`growth_factor_fn`, when `inverse=True`.
"""
_defaults = {"dz":0.01, "zmax":1000.0}
def _d_plus(self, z, getvec=False):
"""
Calculate un-normalised growth factor as a function
of redshift. Note that the `getvec` argument is not
used in this function.
"""
a = 1 / (1 + z)
om = self.cosmo.Om0/a ** 3
denom = self.cosmo.Ode0 + om
Omega_m = om/denom
Omega_L = self.cosmo.Ode0/denom
coeff = 5.*Omega_m/(2./a)
term1 = Omega_m**(4./7.)
term3 = (1. + 0.5*Omega_m)*(1. + Omega_L/70.)
return coeff/(term1 - Omega_L + term3)
def growth_factor(self, z):
"""
The growth factor, :math:`d(a) = D^+(a)/D^+(a=1)`.
Parameters
----------
z : array_like
Redshift.
Returns
-------
gf : array_like
The growth factor at `z`.
"""
return self._d_plus(z)/self._d_plus(0.0)
def growth_factor_fn(self, zmin=0.0, inverse=False):
"""
Return the growth factor as a callable function.
Parameters
----------
zmin : float, optional
The minimum redshift of the function. Default 0.0
inverse: bool, optional
Whether to return the inverse relationship [z(g)]. Default False.
Returns
-------
callable
The normalised growth factor as a function of redshift, or
redshift as a function of growth factor if ``inverse`` is True.
"""
if not inverse:
return self.growth_factor
else:
self._zvec = np.arange(zmin, self.params['zmax'], self.params['dz'])
gf = self.growth_factor(self._zvec)
return _spline(gf[::-1], self._zvec[::-1])
```
#### File: hmf/hmf/integrate_hmf.py
```python
from scipy.interpolate import InterpolatedUnivariateSpline as _spline
import numpy as np
import scipy.integrate as intg
class NaNException(Exception):
pass
def hmf_integral_gtm(M, dndm, mass_density=False):
"""
Cumulatively integrate dn/dm.
Parameters
----------
M : array_like
Array of masses.
dndm : array_like
Array of dn/dm (corresponding to M)
mass_density : bool, `False`
Whether to calculate mass density (or number density).
Returns
-------
ngtm : array_like
Cumulative integral of dndm.
Examples
--------
Using a simple power-law mass function:
>>> import numpy as np
>>> m = np.logspace(10,18,500)
>>> dndm = m**-2
>>> ngtm = hmf_integral_gtm(m,dndm)
>>> np.allclose(ngtm,1/m) #1/m is the analytic integral to infinity.
True
The function always integrates to m=1e18, and extrapolates with a spline
if data not provided:
>>> m = np.logspace(10,12,500)
>>> dndm = m**-2
>>> ngtm = hmf_integral_gtm(m,dndm)
>>> np.allclose(ngtm,1/m) #1/m is the analytic integral to infinity.
True
"""
# Eliminate NaN's
m = M[np.logical_not(np.isnan(dndm))]
dndm = dndm[np.logical_not(np.isnan(dndm))]
dndlnm = m * dndm
if len(m) < 4:
raise NaNException("There are too few real numbers in dndm: len(dndm) = %s, #NaN's = %s" % (len(M), len(M) - len(dndm)))
# Calculate the mass function (and its integral) from the highest M up to 10**18
if m[-1] < m[0] * 10 ** 18 / m[3]:
m_upper = np.arange(np.log(m[-1]), np.log(10 ** 18), np.log(m[1]) - np.log(m[0]))
mf_func = _spline(np.log(m), np.log(dndlnm), k=1)
mf = mf_func(m_upper)
if not mass_density:
int_upper = intg.simps(np.exp(mf), dx=m_upper[2] - m_upper[1], even='first')
else:
int_upper = intg.simps(np.exp(m_upper + mf), dx=m_upper[2] - m_upper[1], even='first')
else:
int_upper = 0
# Calculate the cumulative integral (backwards) of [m*]dndlnm
if not mass_density:
ngtm = np.concatenate((intg.cumtrapz(dndlnm[::-1], dx=np.log(m[1]) - np.log(m[0]))[::-1], np.zeros(1)))
else:
ngtm = np.concatenate((intg.cumtrapz(m[::-1] * dndlnm[::-1], dx=np.log(m[1]) - np.log(m[0]))[::-1], np.zeros(1)))
return (ngtm + int_upper)
```
#### File: hmf/hmf/sample.py
```python
import numpy as np
from . import hmf
from scipy.interpolate import InterpolatedUnivariateSpline as _spline
def _prepare_mf(log_mmin, **mf_kwargs):
h = hmf.MassFunction(Mmin=log_mmin, **mf_kwargs)
mask = h.ngtm>0
icdf = _spline((h.ngtm[mask] / h.ngtm[0])[::-1], np.log10(h.m[mask][::-1]), k=3)
return icdf, h
def _choose_halo_masses_num(N,icdf):
# Generate random variates from 0 to maxcum
x = np.random.random(int(N))
# Generate halo masses from mf distribution
m = 10 ** icdf(x)
return m
def sample_mf(N,log_mmin,
sort=False, **mf_kwargs):
"""
Create a sample of halo masses from a theoretical mass function.
Parameters
----------
N : int
Number of samples to draw
log_mmin : float
Log10 of the minimum mass to sample [Msun/h]
sort : bool, optional
Whether to sort (in descending order of mass) the output masses.
mf_kwargs : keywords
Anything passed to :class:`hmf.MassFunction` to create the mass function
which is sampled.
Returns
-------
m : array_like
The masses
hmf : `hmf.MassFunction` instance
The instance used to define the mass function.
Examples
--------
Simplest example:
>>> m,hmf = sample_mf(1e5,11.0)
Or change the mass function:
>>> m,hmf = sample_mf(1e6,10.0,hmf_model="PS",Mmax=17)
"""
icdf, h = _prepare_mf(log_mmin, **mf_kwargs)
m = _choose_halo_masses_num(N,icdf)
if sort:
m.sort()
return m[::-1], h
def dndm_from_sample(m,V,nm=None, bins=50):
"""
Generate a binned dn/dm from a sample of halo masses.
Parameters
----------
m : array_like
A sample of masses
V : float
Physical volume of the sample
nm : array_like
A multiplicity of each of the masses -- useful for
samples from simulations in which the number of unique masses
is much smaller than the total sample.
bins : int or array
Specifies bins (in log10-space!) for the sample.
See `numpy.histogram` for more details.
Returns
-------
centres : array_like
The centres of the bins.
hist : array_like
The value of dn/dm in each bin.
Notes
-----
The "centres" of the bins are located as the midpoint in log10-space.
If one does not have the volume, it can be calculated as N/n(>mmin).
"""
hist, edges = np.histogram(np.log10(m), bins,weights=nm)
centres = (edges[1:] + edges[:-1]) / 2
dx = centres[1] - centres[0]
hist = hist.astype("float") / (10 ** centres * float(V) * dx * np.log(10))
if hist[0]==0:
try:
hist0 = np.where(hist != 0)[0][0]
hist[hist0] = np.nan
except IndexError:
pass
if hist[-1]==0:
try:
histN = np.where(hist != 0)[0][-1]
hist[histN] = np.nan
except IndexError:
pass
return centres, hist
```
#### File: hmf/tests/test_filters.py
```python
import inspect
import os
LOCATION = "/".join(os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))).split("/")[:-1])
import sys
sys.path.insert(0, LOCATION)
from hmf import filters
import numpy as np
from numpy import sin, cos, pi
import warnings
from scipy.special import gamma
# Need to do the following to catch repeated warnings.
warnings.simplefilter("always",UserWarning)
class TestTopHat(object):
def __init__(self):
k = np.logspace(-6,0,10000)
pk = k**2
self.cls = filters.TopHat(k,pk)
def test_sigma(self):
R = 1.0
true =(9*R**2*sin(R)**2/2 + 9*R**2*cos(R)**2/2 + 9*R*sin(R)*cos(R)/2 - 9*sin(R)**2)/(2*pi**2*R**6)
print(true,self.cls.sigma(R)**2)
assert np.isclose(self.cls.sigma(R)[0]**2,true)
def test_sigma1(self):
R = 1.0
true = (3*R**2*sin(R)**2/2 + 3*R**2*cos(R)**2/2 + 9*R*sin(R)*cos(R)/2 - 9*sin(R)**2/4 + 45*cos(R)**2/4 - 45*sin(R)*cos(R)/(4*R))/(2*pi**2*R**6)
print(true,self.cls.sigma(R,1)**2)
assert np.isclose(self.cls.sigma(R,1)[0]**2,true)
def test_dwdlnkr(self):
x = 1.0
true = x*(3*sin(x)/x**2 - 3*(-3*x*cos(x) + 3*sin(x))/x**4)
assert np.isclose(self.cls.dw_dlnkr(x),true)
def test_dlnssdlnr(self):
R = 1.0
true = 2*R**4*(-45*sin(R)**2/(4*R**2) - 27*cos(R)**2/(4*R**2) - 81*sin(R)*cos(R)/(4*R**3) + 27*sin(R)**2/R**4)/(9*R**2*sin(R)**2/2 + 9*R**2*cos(R)**2/2 + 9*R*sin(R)*cos(R)/2 - 9*sin(R)**2)
print(true, self.cls.dlnss_dlnr(R))
assert np.isclose(self.cls.dlnss_dlnr(R),true)
class TestSharpK(object):
def __init__(self):
k = np.logspace(-6,0,10000)
pk = k**2
self.cls = filters.SharpK(k,pk)
def test_sigma(self):
R = 1.0
t = 2 + 2 + 1
true = 1./(2*pi**2 * t * R**t)
print(true,self.cls.sigma(R)**2)
assert np.isclose(self.cls.sigma(R)[0]**2,true)
def test_sigma1(self):
R = 1.0
t = 4 + 2 + 1
true = 1./(2*pi**2 * t * R**t)
print(true,self.cls.sigma(R,1)**2)
assert np.isclose(self.cls.sigma(R,1)[0]**2,true)
def test_dlnssdlnr(self):
R = 1.0
t = 2 + 2 + 1
sigma2 = 1./(2*pi**2 * t * R**t)
true = -1./(2*pi**2*sigma2 * R**(3+2))
print(true, self.cls.dlnss_dlnr(R))
assert np.isclose(self.cls.dlnss_dlnr(R),true)
def test_sigma_R3(self):
R = 3.0
t = 2 + 2 + 1
true = 1./(2*pi**2 * t * R**t)
print(true,self.cls.sigma(R)**2)
assert np.isclose(self.cls.sigma(R)[0]**2,true)
def test_sigma1_R3(self):
R = 3.0
t = 4 + 2 + 1
true = 1./(2*pi**2 * t * R**t)
print(true,self.cls.sigma(R,1)**2)
assert np.isclose(self.cls.sigma(R,1)[0]**2,true)
def test_dlnssdlnr_R3(self):
R = 3.0
t = 2 + 2 + 1
sigma2 = 1./(2*pi**2 * t * R**t)
true = -1./(2*pi**2*sigma2 * R**(3+2))
print(true, self.cls.dlnss_dlnr(R))
assert np.isclose(self.cls.dlnss_dlnr(R),true)
def test_sigma_Rhalf(self):
# should also raise a warning
R = 0.5
thisr = 1./self.cls.k.max()
t = 2 + 2 + 1
true = 1./(2*pi**2 * t * thisr**t)
with warnings.catch_warnings(record=True) as w:
s2 = self.cls.sigma(R)[0]**2
assert w
print(s2,true)
assert np.isclose(s2,true)
def test_sigma1_Rhalf(self):
# should also raise a warning
R = 0.5
thisr = 1./self.cls.k.max()
t = 4 + 2 + 1
true = 1./(2*pi**2 * t * thisr**t)
with warnings.catch_warnings(record=True) as w:
s2 = self.cls.sigma(R,1)[0]**2
assert w
print(s2,true)
assert np.isclose(s2,true)
def test_dlnssdlnr_Rhalf(self):
R = 3.0
t = 2 + 2 + 1
sigma2 = 1./(2*pi**2 * t * R**t)
true = -1./(2*pi**2*sigma2 * R**(3+2))
print(true, self.cls.dlnss_dlnr(R))
assert np.isclose(self.cls.dlnss_dlnr(R),true)
class TestGaussian(object):
def __init__(self):
k = np.logspace(-6,1,80)
pk = k**2
self.cls = filters.Gaussian(k,pk)
def test_sigma(self):
R = 10.0
true = 3./(16*pi**(3./2.)*R**5)
print(true,self.cls.sigma(R)**2)
assert np.isclose(self.cls.sigma(R)[0]**2,true)
def test_sigma1(self):
R = 10.0
true = 15/(32*pi**(3./2.)*R**7)
print(true,self.cls.sigma(R,1)**2)
assert np.isclose(self.cls.sigma(R,1)[0]**2,true)
def test_dlnssdlnr(self):
R = 10.0
true = -5
print(true, self.cls.dlnss_dlnr(R))
assert np.isclose(self.cls.dlnss_dlnr(R),true)
```
#### File: hmf/tests/test_framework.py
```python
import inspect
import os
LOCATION = "/".join(os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))).split("/")[:-1])
from nose.tools import raises
import sys
sys.path.insert(0, LOCATION)
from hmf import hmf
@raises(TypeError)
def test_incorrect_argument():
t = hmf.MassFunction(wrong_arg=3)
@raises(ValueError)
def test_incorrect_update_arg():
t = hmf.MassFunction()
t.update(wrong_arg=3)
class TestIntrospection(object):
def __init__(self):
self.cls = hmf.MassFunction
self.inst = self.cls(z=10)
def test_parameter_names(self):
assert "cosmo_model" in self.cls.get_all_parameter_names()
def test_parameter_defaults(self):
assert type(self.cls.get_all_parameter_defaults(recursive=False)) is dict
assert self.cls.get_all_parameter_defaults()['z'] == 0
def test_parameter_default_rec(self):
pd = self.cls.get_all_parameter_defaults(recursive=True)
assert type(pd['cosmo_params']) is dict
def test_param_values(self):
assert type(self.inst.parameter_values) is dict
assert self.inst.parameter_values['z'] == 10
def test_qnt_avail(self):
assert 'dndm' in self.cls.quantities_available()
def test_parameter_info(self):
assert self.cls.parameter_info() is None
assert self.cls.parameter_info(names=['z']) is None
``` |
{
"source": "jlaska/pytest-trello",
"score": 2
} |
#### File: jlaska/pytest-trello/test_pytest_trello.py
```python
import pytest
import inspect
import re
from _pytest.main import EXIT_OK, EXIT_NOTESTSCOLLECTED
pytest_plugins = 'pytester',
OPEN_CARDS = ['https://trello.com/c/open1234', 'https://trello.com/c/open4321']
CLOSED_CARDS = ['https://trello.com/c/closed12', 'https://trello.com/c/closed21']
ALL_CARDS = OPEN_CARDS + CLOSED_CARDS
def assert_outcome(result, passed=0, failed=0, skipped=0, xpassed=0, xfailed=0):
'''This method works around a limitation where pytester assertoutcome()
doesn't support xpassed and xfailed.
'''
actual_count = dict(passed=0, failed=0, skipped=0, xpassed=0, xfailed=0)
reports = filter(lambda x: hasattr(x, 'when'), result.getreports())
for report in reports:
if report.when == 'setup':
if report.skipped:
actual_count['skipped'] += 1
elif report.when == 'call':
if hasattr(report, 'wasxfail'):
if report.failed:
actual_count['xpassed'] += 1
elif report.skipped:
actual_count['xfailed'] += 1
else:
actual_count[report.outcome] += 1
else:
continue
assert passed == actual_count['passed']
assert failed == actual_count['failed']
assert skipped == actual_count['skipped']
assert xfailed == actual_count['xfailed']
assert xpassed == actual_count['xpassed']
class PyTestOption(object):
def __init__(self, config=None):
self.config = config
@property
def args(self):
args = list()
if self.config.getoption('trello_api_key') is not None:
args.append('--trello-api-key')
args.append(self.config.getoption('trello_api_key'))
if self.config.getoption('trello_api_token') is not None:
args.append('--trello-api-token')
args.append(self.config.getoption('trello_api_token'))
for completed in self.config.getoption('trello_completed'):
args.append('--trello-completed')
args.append('"%s"' % completed)
return args
def mock_trello_card_get(self, card_id, **kwargs):
'''Returns JSON representation of an trello card.'''
if card_id.startswith("closed"):
is_closed = True
else:
is_closed = False
return {
"labels": [],
"pos": 33054719,
"manualCoverAttachment": False,
"badges": {},
"id": "550c37c5226dd7241a61372f",
"idBoard": "54aeece5d8b09a1947f34050",
"idShort": 334,
"shortUrl": "https://trello.com/c/%s" % card_id,
"closed": False,
"email": "<EMAIL>",
"dateLastActivity": "2015-03-20T15:12:29.735Z",
"idList": "%s53f20bbd90cfc68effae9544" % (is_closed and 'closed' or 'open'),
"idLabels": [],
"idMembers": [],
"checkItemStates": [],
"name": "mock trello card - %s" % (is_closed and 'closed' or 'open'),
"desc": "mock trello card - %s" % (is_closed and 'closed' or 'open'),
"descData": {},
"url": "https://trello.com/c/%s" % card_id,
"idAttachmentCover": None,
"idChecklists": []
}
def mock_trello_list_get(self, list_id, **kwargs):
'''Returns JSON representation of a trello list containing open cards.'''
if list_id.startswith("closed"):
is_closed = True
else:
is_closed = False
return {
"pos": 124927.75,
"idBoard": "54aeece5d8b09a1947f34050",
"id": list_id,
"closed": False,
"name": is_closed and "Done" or "Not Done"
}
@pytest.fixture()
def option(request):
return PyTestOption(request.config)
@pytest.fixture()
def monkeypatch_trello(request, monkeypatch):
monkeypatch.delattr("requests.get")
monkeypatch.delattr("requests.sessions.Session.request")
monkeypatch.setattr('trello.cards.Cards.get', mock_trello_card_get)
monkeypatch.setattr('trello.lists.Lists.get', mock_trello_list_get)
def test_plugin_markers(testdir):
'''Verifies expected output from of py.test --markers'''
result = testdir.runpytest('--markers')
result.stdout.fnmatch_lines([
'@pytest.mark.trello(*cards): Trello card integration',
])
def test_plugin_help(testdir):
'''Verifies expected output from of py.test --help'''
result = testdir.runpytest('--help')
result.stdout.fnmatch_lines([
'pytest-trello:',
'* --trello-cfg=TRELLO_CFG',
'* --trello-api-key=TRELLO_API_KEY',
'* --trello-api-token=TRELLO_API_TOKEN',
'* --trello-completed=TRELLO_COMPLETED',
'* --show-trello-cards *',
])
def test_param_trello_cfg_without_value(testdir, option, monkeypatch_trello):
'''Verifies failure when not providing a value to the --trello-cfg parameter'''
result = testdir.runpytest(*['--trello-cfg'])
assert result.ret == 2
result.stderr.fnmatch_lines([
'*: error: argument --trello-cfg: expected one argument',
])
def test_param_trello_cfg_with_no_such_file(testdir, option, monkeypatch_trello, capsys):
'''Verifies pytest-trello ignores any bogus files passed to --trello-cfg'''
result = testdir.runpytest(*['--trello-cfg', 'asdfasdf'])
assert result.ret == EXIT_NOTESTSCOLLECTED
# FIXME - assert actual log.warning message
# No trello configuration file found matching:
def test_param_trello_cfg_containing_no_data(testdir, option, monkeypatch_trello, capsys):
'''Verifies pytest-trello ignores --trello-cfg files that contain bogus data'''
# Create bogus config file for testing
cfg_file = testdir.makefile('.txt', '')
# Run with parameter (expect pass)
result = testdir.runpytest(*['--trello-cfg', str(cfg_file)])
assert result.ret == EXIT_OK
# FIXME - assert actual log.warning message
# No trello configuration file found matching:
def test_param_trello_cfg(testdir, option, monkeypatch_trello, capsys):
'''Verifies pytest-trello loads completed info from provided --trello-cfg parameter'''
# Create trello.yml config for testing
contents = '''
trello:
key: ''
token: ''
completed:
- 'Not Done'
'''
cfg_file = testdir.makefile('.yml', contents)
# The following would normally xpass, but when completed=['Not Done'], it
# will just pass
src = """
import pytest
@pytest.mark.trello('%s')
def test_func():
assert True
""" % OPEN_CARDS[0]
result = testdir.inline_runsource(src, *['--trello-cfg', str(cfg_file)])
assert result.ret == EXIT_OK
assert_outcome(result, passed=1)
def test_param_trello_api_key_without_value(testdir, option, monkeypatch_trello, capsys):
'''Verifies failure when not passing --trello-api-key an option'''
# Run without parameter (expect fail)
result = testdir.runpytest(*['--trello-api-key'])
assert result.ret == 2
result.stderr.fnmatch_lines([
'*: error: argument --trello-api-key: expected one argument',
])
def test_param_trello_api_key_with_value(testdir, option, monkeypatch_trello, capsys):
'''Verifies success when passing --trello-api-key an option'''
result = testdir.runpytest(*['--trello-api-key', 'asdf'])
assert result.ret == EXIT_NOTESTSCOLLECTED
# TODO - would be good to assert some output
def test_param_trello_api_token_without_value(testdir, option, monkeypatch_trello, capsys):
'''Verifies failure when not passing --trello-api-token an option'''
result = testdir.runpytest(*['--trello-api-token'])
assert result.ret == 2
result.stderr.fnmatch_lines([
'*: error: argument --trello-api-token: expected one argument',
])
def test_param_trello_api_token_with_value(testdir, option, monkeypatch_trello, capsys):
'''Verifies success when passing --trello-api-token an option'''
result = testdir.runpytest(*['--trello-api-token', 'asdf'])
assert result.ret == EXIT_NOTESTSCOLLECTED
# TODO - would be good to assert some output
def test_pass_without_trello_card(testdir, option):
'''Verifies test success when no trello card is supplied'''
testdir.makepyfile("""
import pytest
def test_func():
assert True
""")
result = testdir.runpytest(*option.args)
assert result.ret == EXIT_OK
assert result.parseoutcomes()['passed'] == 1
def test_fail_without_trello_card(testdir, option):
'''Verifies test failure when no trello card is supplied'''
testdir.makepyfile("""
import pytest
def test_func():
assert False
""")
result = testdir.runpytest(*option.args)
assert result.ret == 1
assert result.parseoutcomes()['failed'] == 1
def test_success_with_open_card(testdir, option, monkeypatch_trello):
'''Verifies when a test succeeds with an open trello card'''
src = """
import pytest
@pytest.mark.trello('%s')
def test_func():
assert True
""" % OPEN_CARDS[0]
# result = testdir.runpytest(*option.args)
# assert result.ret == EXIT_OK
# assert result.parseoutcomes()['xpassed'] == 1
result = testdir.inline_runsource(src, *option.args)
assert_outcome(result, xpassed=1)
def test_success_with_open_cards(testdir, option, monkeypatch_trello):
'''Verifies when a test succeeds with open trello cards'''
src = """
import pytest
@pytest.mark.trello(*%s)
def test_func():
assert True
""" % OPEN_CARDS
# testdir.makepyfile(src)
# result = testdir.runpytest(*option.args)
# assert result.ret == EXIT_OK
# assert result.parseoutcomes()['xpassed'] == 1
result = testdir.inline_runsource(src, *option.args)
assert_outcome(result, xpassed=1)
def test_failure_with_open_card(testdir, option, monkeypatch_trello):
'''Verifies when a test fails with an open trello card'''
src = """
import pytest
@pytest.mark.trello('%s')
def test_func():
assert False
""" % OPEN_CARDS[0]
# testdir.makepyfile(src)
# result = testdir.runpytest(*option.args)
# assert result.ret == EXIT_OK
# assert result.parseoutcomes()['xfailed'] == 1
result = testdir.inline_runsource(src, *option.args)
assert_outcome(result, xfailed=1)
def test_failure_with_open_cards(testdir, option, monkeypatch_trello):
'''Verifies when a test fails with open trello cards'''
src = """
import pytest
@pytest.mark.trello(*%s)
def test_func():
assert False
""" % OPEN_CARDS
# testdir.makepyfile(src)
# result = testdir.runpytest(*option.args)
# assert result.ret == EXIT_OK
# assert result.parseoutcomes()['xfailed'] == 1
result = testdir.inline_runsource(src, *option.args)
assert_outcome(result, xfailed=1)
def test_failure_with_closed_card(testdir, option, monkeypatch_trello):
'''Verifies when a test fails with a closed trello card'''
src = """
import pytest
@pytest.mark.trello('%s')
def test_func():
assert False
""" % CLOSED_CARDS[0]
# testdir.makepyfile(src)
# result = testdir.runpytest(*option.args)
# assert result.ret == 1
# assert result.parseoutcomes()['failed'] == 1
result = testdir.inline_runsource(src, *option.args)
assert_outcome(result, failed=1)
def test_failure_with_closed_cards(testdir, option, monkeypatch_trello):
'''Verifies when a test fails with closed trello cards'''
src = """
import pytest
@pytest.mark.trello(*%s)
def test_func():
assert False
""" % CLOSED_CARDS
# testdir.makepyfile(src)
# result = testdir.runpytest(*option.args)
# assert result.ret == 1
# assert result.parseoutcomes()['failed'] == 1
result = testdir.inline_runsource(src, *option.args)
assert_outcome(result, failed=1)
def test_failure_with_open_and_closed_cards(testdir, option, monkeypatch_trello):
'''Verifies test failure with open and closed trello cards'''
src = """
import pytest
@pytest.mark.trello(*%s)
def test_func():
assert False
""" % ALL_CARDS
# testdir.makepyfile(src)
# result = testdir.runpytest(*option.args)
# assert result.ret == EXIT_OK
# assert result.parseoutcomes()['xfailed'] == 1
result = testdir.inline_runsource(src, *option.args)
assert_outcome(result, xfailed=1)
def test_skip_with_open_card(testdir, option, monkeypatch_trello):
'''Verifies skipping with an open trello card'''
src = """
import pytest
@pytest.mark.trello('%s', skip=True)
def test_func():
assert False
""" % OPEN_CARDS[0]
# testdir.makepyfile(src)
# result = testdir.runpytest(*option.args)
# assert result.ret == EXIT_OK
# assert result.parseoutcomes()['skipped'] == 1
result = testdir.inline_runsource(src, *option.args)
assert_outcome(result, skipped=1)
def test_skip_with_closed_card(testdir, option, monkeypatch_trello):
'''Verifies test failure (skip=True) with a closed trello card'''
src = """
import pytest
@pytest.mark.trello('%s', skip=True)
def test_func():
assert False
""" % CLOSED_CARDS[0]
# testdir.makepyfile(src)
# result = testdir.runpytest(*option.args)
# assert result.ret == 1
# assert result.parseoutcomes()['failed'] == 1
result = testdir.inline_runsource(src, *option.args)
assert_outcome(result, failed=1)
def test_collection_reporter(testdir, option, monkeypatch_trello, capsys):
'''Verifies trello marker collection'''
src = """
import pytest
@pytest.mark.trello(*%s)
def test_foo():
assert True
@pytest.mark.trello(*%s)
def test_bar():
assert False
""" % (CLOSED_CARDS, OPEN_CARDS)
# (items, result) = testdir.inline_genitems(src, *option.args)
result = testdir.inline_runsource(src, *option.args)
assert_outcome(result, passed=1, xfailed=1)
stdout, stderr = capsys.readouterr()
assert 'collected %s trello markers' % (len(CLOSED_CARDS) + len(OPEN_CARDS)) in stdout
def test_show_trello_report_with_no_cards(testdir, option, monkeypatch_trello, capsys):
'''Verifies when a test succeeds with an open trello card'''
src = """
import pytest
def test_func():
assert True
"""
# Run pytest
args = option.args + ['--show-trello-cards',]
result = testdir.inline_runsource(src, *args)
# Assert exit code
assert result.ret == EXIT_OK
# Assert no tests ran
assert_outcome(result)
# Assert expected trello card report output
stdout, stderr = capsys.readouterr()
assert '= trello card report =' in stdout
assert 'No trello cards collected' in stdout
def test_show_trello_report_with_cards(testdir, option, monkeypatch_trello, capsys):
'''Verifies when a test succeeds with an open trello card'''
# Used for later introspection
cls = 'Test_Foo'
module = inspect.stack()[0][3]
method = 'test_func'
src = """
import pytest
class Test_Class():
@pytest.mark.trello(*%s)
def test_method():
assert True
@pytest.mark.trello(*%s)
def test_func():
assert True
""" % (CLOSED_CARDS, OPEN_CARDS)
# Run pytest
args = option.args + ['--show-trello-cards',]
result = testdir.inline_runsource(src, *args)
# Assert exit code
assert result.ret == EXIT_OK
# Assert no tests ran
assert_outcome(result)
# Assert expected trello card report output
stdout, stderr = capsys.readouterr()
# Assert expected banner
assert re.search(r'^={1,} trello card report ={1,}', stdout, re.MULTILINE)
# Assert expected cards in output
for card in CLOSED_CARDS:
assert re.search(r'^%s \[Done\]' % card, stdout, re.MULTILINE)
for card in OPEN_CARDS:
assert re.search(r'^%s \[Not Done\]' % card, stdout, re.MULTILINE)
# this is weird, oh well
assert ' * {0}0/{0}.py:Test_Class().test_method'.format(module) in stdout
assert ' * {0}0/{0}.py:test_func'.format(module) in stdout
``` |
{
"source": "JlassiSeif/Cave-Exploring-Robot",
"score": 3
} |
#### File: Cave-Exploring-Robot/teleop_twist_keyboard-master/teleop_twist_keyboard.py
```python
from __future__ import print_function
import threading
import roslib; roslib.load_manifest('teleop_twist_keyboard')
import rospy
from geometry_msgs.msg import Twist
import sys, select, termios, tty
msg = """
Reading from the keyboard and Publishing to Twist!
CTRL-C to quit
"""
moveBindings = {
'i':(1,0),
'k':(-1,0),
'j':(0,1),
'l':(0,-1),
}
class PublishThread(threading.Thread):
def __init__(self, rate):
super(PublishThread, self).__init__()
self.publisher = rospy.Publisher('cmd_vel', Twist, queue_size = 1)
self.speed = 0.0
self.turn = 0.0
self.condition = threading.Condition()
self.done = False
# Set timeout to None if rate is 0 (causes new_message to wait forever
# for new data to publish)
if rate != 0.0:
self.timeout = 1.0 / rate
else:
self.timeout = None
self.start()
def wait_for_subscribers(self):
i = 0
while not rospy.is_shutdown() and self.publisher.get_num_connections() == 0:
if i == 4:
print("Waiting for subscriber to connect to {}".format(self.publisher.name))
rospy.sleep(0.5)
i += 1
i = i % 5
if rospy.is_shutdown():
raise Exception("Got shutdown request before subscribers connected")
def update(self, speed, turn):
self.condition.acquire()
self.speed = speed
self.turn = turn
# Notify publish thread that we have a new message.
self.condition.notify()
self.condition.release()
def stop(self):
self.done = True
self.update(0, 0)
self.join()
def run(self):
twist = Twist()
while not self.done:
self.condition.acquire()
# Wait for a new message or timeout.
self.condition.wait(self.timeout)
# Copy state into twist message.
twist.linear.x = self.speed
twist.angular.z = self.turn
self.condition.release()
# Publish.
self.publisher.publish(twist)
# Publish stop message when thread exits.
twist.linear.x = 0
twist.linear.y = 0
twist.linear.z = 0
twist.angular.x = 0
twist.angular.y = 0
twist.angular.z = 0
self.publisher.publish(twist)
def getKey(key_timeout):
tty.setraw(sys.stdin.fileno())
rlist, _, _ = select.select([sys.stdin], [], [], key_timeout)
if rlist:
key = sys.stdin.read(1)
else:
key = ''
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, settings)
return key
def vels(speed, turn):
return "currently:\tspeed %s\tturn %s " % (speed,turn)
if __name__=="__main__":
settings = termios.tcgetattr(sys.stdin)
rospy.init_node('control')
speed = rospy.get_param("~speed", 0.5)
turn = rospy.get_param("~turn", 1.0)
repeat = rospy.get_param("~repeat_rate", 0.0)
key_timeout = rospy.get_param("~key_timeout", 0.0)
if key_timeout == 0.0:
key_timeout = None
pub_thread = PublishThread(repeat)
speed=0
turn=0
try:
pub_thread.wait_for_subscribers()
print(vels(speed,turn))
while(1):
key = getKey(key_timeout)
if key in moveBindings.keys():
speed=speed+0.5*moveBindings[key][0]
turn=turn+0.5*moveBindings[key][1]
if speed>2 :
speed=2
if turn>2 :
turn=2
print(vels(speed,turn))
else:
if key == '' and x == 0 and y == 0 and z == 0 and th == 0:
continue
x = 0
y = 0
z = 0
th = 0
if (key == '\x03'):
break
pub_thread.update(speed, turn)
except Exception as e:
print(e)
finally:
pub_thread.stop()
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, settings)
``` |
{
"source": "JlassiSeif/Line-follower-with-qlearning",
"score": 2
} |
#### File: robot_q_learning/scripts/robot.py
```python
import rospy
from geometry_msgs.msg import Twist
import cv2
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
class robot :
def __init__(self) :
self.vel = Twist()
self.cmdPub = rospy.Publisher('/cmd_vel',Twist,queue_size=1)
self.imgPub = rospy.Publisher('/image/thresh',Image,queue_size=1)
self.bridge = CvBridge()
self.CameraSub = rospy.Subscriber('/camera/image_raw',Image,self.CameraCallback)
self.yaw = 0.0
self.noContour = True
self.cx = 0
self.array = []
self.distanceFromCenter=0;
def CameraCallback(self, data) :
cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8")
(height,width,channels) = cv_image.shape
crop_img = cv_image[int(height/2)+100:height-100, 0:width]
gray = cv2.cvtColor(crop_img, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray,(5,5),0)
ret,thresh = cv2.threshold(blur,60,255,cv2.THRESH_BINARY_INV)
contours,hierarchy = cv2.findContours(thresh.copy(), 1, cv2.CHAIN_APPROX_NONE)
if len(contours) > 0 :
c = max(contours, key=cv2.contourArea)
M = cv2.moments(c)
self.noContour = False
if not M['m00']== 0 :
self.cx = int(M['m10']/M['m00'])
else :
self.noContour = True
# pubImage = self.bridge.cv2_to_imgmsg(thresh)
# self.imgPub.publish(pubImage)
self.distanceFromCenter=width/2-self.cx
def move(self, action, speed):
if speed == 0 :
val = 0.2
elif speed == 1 :
val = 0.5
elif speed == 2 :
val = 0.7
else :
val = 0.3
if( action == 0 ):
self.vel.linear.x = 0.25
self.vel.angular.z = 0
self.cmdPub.publish(self.vel)
if( action == 1):
self.vel.linear.x = 0.25
self.vel.angular.z = -val
self.cmdPub.publish(self.vel)
if( action == 2):
self.vel.linear.x = 0.25
self.vel.angular.z = val
self.cmdPub.publish(self.vel)
if(action == 9):
self.vel=Twist()
self.cmdPub.publish(self.vel)
def index_f(self, data) :
if -400 <= data < -280 :
return 0
elif -280 <= data < -200 :
return 1
elif -200 <= data < -120:
return 2
elif -120 <= data < -40 :
return 3
elif -40 <= data < 40 :
return 4
if 40 <= data < 120 :
return 5
elif 120 <= data < 200 :
return 6
elif 200 <= data < 280 :
return 7
else :
return 8
def get_noContour(self):
return self.noContour
def computeState(self):
stateIndex=self.index_f(self.distanceFromCenter)
return stateIndex
``` |
{
"source": "jlat07/PandasDataTypes",
"score": 4
} |
#### File: jlat07/PandasDataTypes/salescleanup.py
```python
import pandas as pd
import numpy as np
def convert_currency(val):
"""
$125,000.00 -> 125000.00
Convert the string number value to a float
- Remove $
- Remove commas
- Convert to float type
"""
new_val = val.replace(',','').replace('$', '')
return float(new_val)
def convert_percent(val):
"""
Convert the percentage string to an actual floating point percent
"""
new_val = val.replace('%', '')
return float(new_val) / 100
df_2 = pd.read_csv("https://github.com/chris1610/pbpython/blob/master/data/sales_data_types.csv?raw=True",
dtype={'Customer Number':'int'},
converters={'2016':convert_currency,
'2017': convert_currency,
'Percent Growth': convert_percent,
'Jan Units': lambda x: pd.to_numeric(x, errors='coerce'),
'Active': lambda x: np.where(x == "Y", True, False)
})
df_2["Start_Date"] = pd.to_datetime(df_2[['Month', 'Day', 'Year']])
print(df_2)
# Should output something like:
# (base) Aeneid:notebooks kristofer$ python3 ./salescleanup.py
# Customer Number Customer Name 2016 2017 Percent Growth Jan Units Month Day Year Active Start_Date
# 0 10002 Quest Industries 125000.0 162500.0 0.30 500.0 1 10 2015 True 2015-01-10
# 1 552278 Smith Plumbing 920000.0 1012000.0 0.10 700.0 6 15 2014 True 2014-06-15
# 2 23477 ACME Industrial 50000.0 62500.0 0.25 125.0 3 29 2016 True 2016-03-29
# 3 24900 Brekke LTD 350000.0 490000.0 0.04 75.0 10 27 2015 True 2015-10-27
# 4 651029 Harbor Co 15000.0 12750.0 -0.15 NaN 2 2 2014 False 2014-02-02
``` |
{
"source": "jlatko/vdvae",
"score": 2
} |
#### File: vdvae/scripts/gather_latent_covariances.py
```python
import itertools
from time import sleep
import numpy as np
import os
import torch
from torch.utils.data import DataLoader
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm
from vdvae.data.data import set_up_data
from vdvae.train_helpers import set_up_hyperparams, load_vaes
from vdvae.constants import BASE_DIR
def all_finite(stats):
for block_idx, block_stats in enumerate(stats):
qm = block_stats["qm"]
pm = block_stats["pm"]
qv = block_stats["qv"]
pv = torch.exp(block_stats["pv"])
qstd = torch.exp(block_stats["qv"])
pstd = torch.exp(block_stats["pv"])
for x in [qm, pm, qv, pv, qstd, pstd]:
if not torch.all(torch.isfinite(x)):
return False
return True
def update_running_covariance(current_mean, new_value, n):
return current_mean + (new_value - current_mean) / (n + 1)
def get_current_stats(stats, i, cutoff_masks=None, pca=None):
current_stats = {}
for block_idx, block_stats in enumerate(stats):
qm = block_stats["qm"][i].cpu().numpy().reshape(-1)
pm = block_stats["pm"][i].cpu().numpy().reshape(-1)
qstd = torch.exp(block_stats["qv"][i]).cpu().numpy().reshape(-1)
pstd = torch.exp(block_stats["pv"][i]).cpu().numpy().reshape(-1)
if cutoff_masks is not None:
qm = qm[cutoff_masks[f"mask_{block_idx}"]]
pm = pm[cutoff_masks[f"mask_{block_idx}"]]
qstd = qstd[cutoff_masks[f"mask_{block_idx}"]]
pstd = pstd[cutoff_masks[f"mask_{block_idx}"]]
if pca is not None: # TODO: do pca ?
qm = qm @ pca[block_idx]
pm = pm @ pca[block_idx]
qstd = qstd @ pca[block_idx]
pstd = pstd @ pca[block_idx]
current_stats[f"qm_{block_idx}"] = qm
current_stats[f"pm_{block_idx}"] = pm
current_stats[f"qstd_{block_idx}"] = qstd
current_stats[f"pstd_{block_idx}"] = pstd
current_stats[f"qv_{block_idx}"] = np.power(current_stats[f"qstd_{block_idx}"], 2).reshape(-1)
current_stats[f"pv_{block_idx}"] = np.power(current_stats[f"pstd_{block_idx}"], 2).reshape(-1)
return current_stats
def update_latent_cov(means_dict, stat_dict, current_stats, n, block_pairs, keys):
deviations = {}
layers = set(i for i, j in block_pairs) | set(j for i, j in block_pairs)
for l in layers:
for k in keys:
deviations[f"{k}_{l}"] = current_stats[f"{k}_{l}"] - means_dict[f"{k}_{l}"]
for i, j in block_pairs:
for k in keys:
x = np.outer(deviations[f"{k}_{i}"], deviations[f"{k}_{j}"])
if n == 0:
stat_dict[f"{k}_{i}_{j}"] = x
else:
stat_dict[f"{k}_{i}_{j}"] = update_running_covariance(stat_dict[f"{k}_{i}_{j}"], x, n)
def get_kl_cutoff_mask(means_dict, cutoff):
cutoff_masks = {}
for k in means_dict:
if "kl" in k:
i = k.split("_")[-1]
cutoff_masks[f"mask_{i}"] = means_dict[k].reshape(-1) > cutoff
return cutoff_masks
def update_means_dict(means_dict, cutoff_masks):
new_dict = {}
for k in means_dict:
i = k.split("_")[-1]
new_dict[k] = means_dict[k][cutoff_masks[f"mask_{i}"]]
return new_dict
def get_stats(H, ema_vae, data_valid, preprocess_fn):
means_dict = {}
with open(os.path.join(H.means_dir, f"{H.dataset}_latent_means.npz"), 'rb') as fh:
npz = np.load(fh)
for k in npz.keys():
means_dict[k] = npz[k].reshape(-1)
cutoff_masks = None
if H.kl_cutoff is not None:
cutoff_masks = get_kl_cutoff_mask(means_dict, H.kl_cutoff)
means_dict = update_means_dict(means_dict, cutoff_masks)
valid_sampler = DistributedSampler(data_valid, num_replicas=H.mpi_size, rank=H.rank)
stat_dict = {}
n = 0
for x in tqdm(DataLoader(data_valid, batch_size=H.n_batch, drop_last=True, pin_memory=True, sampler=valid_sampler)):
data_input, target = preprocess_fn(x)
with torch.no_grad():
stats = ema_vae.forward_get_latents(data_input, get_mean_var=True)
if not all_finite(stats):
print("encountered nan/inf, skipping")
continue
for i in range(data_input.shape[0]):
current_stats = get_current_stats(stats, i, cutoff_masks)
if H.layers_set == "small":
layers = [1,2, 3, 4, 20, 24]
block_pairs = \
list(itertools.combinations(layers, 2)) \
+ [(i, i) for i in layers]
elif H.layers_set == "mid":
layers = [1, 2, 3, 4, 5, 6, 8, 10, 20, 24, 30, 40]
block_pairs = \
list(itertools.combinations(layers, 2)) \
+ [(i, i) for i in layers]
elif H.layers_set == "in_layer_small":
layers = list(range(20)) + [24, 30, 40]
block_pairs = [(i, i) for i in layers]
elif H.layers_set == "in_layer":
layers = list(range(66))
block_pairs = [(i, i) for i in layers]
else:
raise ValueError(f"layers set {H.layers_set} unknown")
keys = ["<KEY>"]
update_latent_cov(means_dict, stat_dict, current_stats, n, block_pairs, keys)
n += 1
if H.n is not None and n >= H.n:
break
if cutoff_masks is not None:
stat_dict.update(cutoff_masks)
np.savez(os.path.join(H.destination_dir, f"{H.dataset}_{H.file_name}_{H.layers_set}.npz"), **stat_dict)
# all_stats = pd.DataFrame(all_stats)
# all_stats.to_pickle(os.path.join(H.destination_dir, f"{H.dataset}_latent_stats.pkl"))
def add_params(parser):
parser.add_argument('--destination_dir', type=str, default=f'{BASE_DIR}/vdvae/latent_stats/')
parser.add_argument('--pca_path', type=str, default=None)
parser.add_argument('--means_dir', type=str, default=None)
parser.add_argument('--file_name', type=str, default='latent_cov')
parser.add_argument('--use_train', dest='use_train', action='store_true')
parser.add_argument('-n', type=int, default=None)
parser.add_argument('--kl_cutoff', type=float, default=None)
parser.add_argument('--layers_set', type=float, default="small")
return parser
def main():
H, logprint = set_up_hyperparams(extra_args_fn=add_params)
if os.path.exists(H.destination_dir):
if len(os.listdir(H.destination_dir)) > 0:
print("WARNING: destination non-empty")
sleep(5)
print("continuing")
else:
os.makedirs(H.destination_dir)
H, data_train, data_valid_or_test, preprocess_fn = set_up_data(H)
vae, ema_vae = load_vaes(H, logprint)
if H.use_train:
dataset = data_train
else:
dataset = data_valid_or_test
if H.means_dir is None:
H.means_dir = H.destination_dir
get_stats(H, ema_vae, dataset, preprocess_fn)
if __name__ == "__main__":
main()
```
#### File: vdvae/vdvae/latents.py
```python
import logging
import os
from zipfile import BadZipFile
import numpy as np
import pandas as pd
from tqdm import tqdm
CELEBAHQ_DIR = f"{BASE_DIR}/celebahq2/CelebAMask-HQ/"
def get_available_latents(latents_dir=f"{BASE_DIR}/vdvae/latents/"):
fname = os.listdir(latents_dir)[0]
keys = list(np.load(os.path.join(latents_dir, fname)))
latent_ids = list(sorted(set(int(k.split("_")[1]) for k in keys)))
return latent_ids
def get_latents(latents_dir, layer_ind, splits=(1,2,3), root_dir=CELEBAHQ_DIR, allow_missing=False, handle_nan=None, key="z"):
metadata = pd.read_csv(os.path.join(root_dir, 'metadata.csv'))
metadata = metadata[metadata.split.isin(splits)]
# z = np.load(os.path.join(latents_dir, f"{metadata.iloc[0].idx}.npz"))[f"{key}_{layer_ind}"]
z = np.load(os.path.join(latents_dir, f"19.npz"))[f"{key}_{layer_ind}"]
shape = [len(metadata)] + list(z.shape)
latents = np.zeros(shape, dtype=np.float32)
rows_found = []
rows_missing = []
i = 0
# for _, row in tqdm(metadata.iterrows(), total=metadata.shape[0]):
for i, row in metadata.iterrows():
if i % 10000 == 0 and i > 0:
print(i)
try:
z = np.load(os.path.join(latents_dir, f"{row.idx}.npz"))[f"{key}_{layer_ind}"].astype(np.float32)
if not np.isfinite(z).all():
if handle_nan == "to_num":
logging.warning(f"{row.idx}: {key}_{layer_ind} contains NaN or inf. Converting to num.")
z = np.nan_to_num(z)
if handle_nan == "skip":
logging.warning(f"{row.idx}: {key}_{layer_ind} contains NaN or inf. Skipping.")
rows_missing.append(row)
continue
else:
raise ValueError(f"{row.idx}: {key}_{layer_ind} contains NaN or inf")
latents[i] = z
rows_found.append(row)
i += 1
except (FileNotFoundError, EOFError, BadZipFile) as e:
if allow_missing:
rows_missing.append(row)
else:
raise e
if len(rows_missing) > 0:
logging.warning(f"Missing/incorrect {len(rows_missing)}/{len(metadata)} files")
metadata = pd.DataFrame(rows_found)
latents = latents[:len(metadata)]
return latents, metadata
```
#### File: vdvae/visualization_scripts/enhance_attr_vis.py
```python
import argparse
import re
import traceback
import matplotlib
import pandas as pd
import numpy as np
import wandb
import matplotlib.pyplot as plt
from PIL import Image
import os
from vdvae.hps import Hyperparams
from vdvae.wandb_utils import _download, WANDB_USER, WANDB_DIR
from vdvae.constants import BASE_DIR
api = wandb.Api()
fig, ax = plt.subplots()
DPI = fig.dpi
plt.close()
project_viz=f"{WANDB_USER}/vae_visualizations"
project_scores = f"{WANDB_USER}/vdvae_analysis"
def get_scores(H, run_scores, attr):
files_scores = run_scores.files()
name2file_scores = {f.name: f for f in files_scores}
if f'{attr}.csv' not in name2file_scores:
print(f"{attr}.csv not found in run {run_scores.id}")
return None
path_score = _download(name2file_scores[f'{attr}.csv'], f"./.data/{run_scores.id}/")
scores = pd.read_csv(path_score)
scores = scores.set_index("layer_ind")
return scores
def get_img(H, run_viz, f):
lv_points = run_viz.config['lv_points']
path = _download(f, f"./.data/{H.run_id_viz}/")
img = Image.open(path)
return img, lv_points
def enhance_attribute_visualization(H, file, runs_scores, run_viz,
temp=0.1, size=64):
attr = re.match(r"([a-zA-Z_]+)_t.*", file.name).group(1)
scores = [get_scores(H, run, attr) for run in runs_scores]
scores = [s for s in scores if s is not None]
if len(scores) == 0:
print(f"No scores for {attr}")
scores = [pd.DataFrame()]
img, lv_points = get_img(H, run_viz, file)
f, (a0, a1) = plt.subplots(1, 2, gridspec_kw={'width_ratios': [1, 8]},
figsize=(1.5 * img.width / DPI, 1.1 * img.height / DPI))
a1.imshow(img, aspect=1)
plt.title(f"{attr} (t={temp})", fontsize=24)
scores_picked = [
[f"{df.loc[i, H.scores_key]:.3f}" if i in df.index else "?" for i in lv_points]
for df in scores
]
res_picked = [scores[0].loc[i, 'resolution'] if i in scores[0].index else "?" for i in lv_points]
yticks = [
f"{i}\n({s})\n{res}x{res}"
for i, s, res in zip(lv_points, scores_picked[0], res_picked)
]
plt.sca(a1)
plt.yticks(size / 2 + size * np.arange(len(lv_points)), yticks)
plt.xticks([size / 2, img.width - size / 2], [f"Less {attr}", f"More {attr}"])
plt.tick_params(axis='both', labelsize=18, length=0)
plt.sca(a0)
a0.set_xlim((0.45, 0.8))
a0.invert_xaxis()
a0.spines['top'].set_visible(False)
# a0.spines['right'].set_visible(False)
# a0.spines['bottom'].set_visible(False)
a0.spines['left'].set_visible(False)
# plt.axis('off')
# sns.barplot(x=lv_points, y=scores_picked, ax=a0, orient='h', width=size/2/DPI)
for j, df in enumerate(scores):
scores_picked = [df.loc[i, H.scores_key] if i in df.index else 0 for i in lv_points]
a0.barh(np.arange(len(lv_points))[::-1] * size / DPI - j * size / 3 / DPI,
scores_picked,
height= size / 4 / DPI,
color=matplotlib.cm.get_cmap("cool")(scores_picked)
)
a0.set_ylim((-size / 2 / DPI, (len(lv_points) * size - size / 2) / DPI))
plt.xlabel(H.scores_key)
plt.title(','.join(run.config['model'] for run in runs_scores), fontsize=24)
# yticks = [
# f"{i}"
# for i, s in zip(lv_points, scores_picked)
# ]
# plt.yticks(np.arange(len(lv_points))[::-1]*size/DPI, yticks);
plt.yticks([], [])
a0.yaxis.tick_right()
# a0.xaxis.set_visible(False)
plt.subplots_adjust(wspace=0.02)
plt.savefig(os.path.join(wandb.run.dir, f"{file.name.split('.')[0]}_{H.scores_key}.jpg"), bbox_inches='tight')
wandb.log({f"{attr}": wandb.Image(plt, caption=f"{file.name.split('.')[0]}_{H.scores_key}")})
return scores
def parse_args(s=None):
H = Hyperparams()
parser = argparse.ArgumentParser()
parser.add_argument('--run_name', type=str, default=None)
# parser.add_argument('--size', type=int, default=128)
# parser.add_argument('--temp', type=float, default=0.1)
parser.add_argument('--run_id_viz', type=str, default=None)
parser.add_argument('--run_id_scores', help='delimited list input',
type=lambda s: [item for item in s.split(',')], default=None)
parser.add_argument('--scores_key', type=str, default="roc_auc_score_avg")
H.update(parser.parse_args(s).__dict__)
return H
def init_wandb(H, run_viz, runs_scores):
tags = []
tags.append(str(len(runs_scores)))
if run_viz.config["grouped"]:
tags.append("grouped")
if run_viz.config["use_group_direction"]:
tags.append("group_direction")
if run_viz.config["has_attr"]:
tags.append("has_attr")
if run_viz.config["fixed"]:
tags.append("fixed")
if "latent_key" in run_viz.config:
tags.append(run_viz.config["latent_key"])
else:
tags.append("z")
# wandb.init(project='vae_visualizations', entity=WANDB_USER, dir=WANDB_DIR, tags=tags)
wandb.init(project='vae_visualizations', entity=WANDB_USER, tags=tags)
wandb.config.update({"script": "enhance"})
if H.run_name is None:
wandb.run.name = "e_" + run_viz.name + "__" + '_'.join([run.config['model'] for run in runs_scores]) + "__" + wandb.run.name.split('-')[-1]
wandb.run.save()
else:
wandb.run.name = "e_" + H.run_name + "-" + wandb.run.name.split('-')[-1]
wandb.run.save()
def main():
H = parse_args()
run_viz = api.run(f"{project_viz}/{H.run_id_viz}")
runs_scores = [
api.run(f"{project_scores}/{run_id}")
for run_id in H.run_id_scores]
init_wandb(H, run_viz, runs_scores)
temp = run_viz.config["temp"]
size = run_viz.config["size"]
print(run_viz.config)
files = run_viz.files()
name2file = {f.name: f for f in files}
for f in name2file:
if f.endswith(".png") and "/" not in f:
print(f)
try:
enhance_attribute_visualization(H, name2file[f], runs_scores, run_viz, temp=temp, size=size)
except Exception as e:
print(f"Caught error for {f}")
print(e)
traceback.print_exc()
print("continuing...")
if __name__ == "__main__":
main()
``` |
{
"source": "JLaTondre/JL-Bot",
"score": 3
} |
#### File: JL-Bot/dois/dois-prior.py
```python
import csv
import os
import re
import sys
import traceback
from mwclient import Site
#
# Configuration
#
if 'WIKI_WORKING_DIR' not in os.environ:
sys.stderr.write('ERROR: WIKI_WORKING_DIR environment variable not set\n')
sys.exit(1)
if 'WIKI_CONFIG_DIR' not in os.environ:
sys.stderr.write('ERROR: WIKI_CONFIG_DIR environment variable not set\n')
sys.exit(1)
STORAGE = os.environ['WIKI_WORKING_DIR'] + '/Dois/doi-registrants-prior'
BOTINFO = os.environ['WIKI_CONFIG_DIR'] + '/bot-info.txt'
#
# Functions
#
def extractRecords(contents):
# extract the doi information from the page contents
# prefix, crossref registrant, wikipedia registrant, crossref target, wikipedia target
records = []
for line in contents.splitlines():
prefix = ''
crossrefRegistrant = ''
wikipediaRegistrant = ''
crossrefTarget = ''
wikipediaTarget = ''
# two possible patterns
match = re.search(r'^{{JCW-DOI-prefix\|(.+?)\|(.+?)\|(.+?)\|4=Crossref = \[\[(.+?)\]\]<br/>Wikipedia = \[\[(.+?)\]\]', line)
if match:
prefix = match.group(1)
crossrefRegistrant = match.group(2)
wikipediaRegistrant = match.group(3)
crossrefTarget = match.group(4)
wikipediaTarget = match.group(5)
else:
match = re.search(r'^{{JCW-DOI-prefix\|(.+?)\|(.+?)\|(.+?)\|(.+?)}}', line)
if match:
prefix = match.group(1)
crossrefRegistrant = match.group(2)
wikipediaRegistrant = match.group(3)
crossrefTarget = 'NONE'
wikipediaTarget = match.group(4)
# if either pattern found
if (prefix):
if crossrefRegistrant == '-':
crossrefRegistrant = 'NONE'
if wikipediaRegistrant == '-':
wikipediaRegistrant = 'NONE'
if crossrefTarget == '-':
crossrefTarget = 'NONE'
if wikipediaTarget == '-':
wikipediaTarget = 'NONE'
if crossrefTarget.startswith(':'):
crossrefTarget = crossrefTarget[1:]
if wikipediaTarget.startswith(':'):
wikipediaTarget = wikipediaTarget[1:]
records.append((prefix, crossrefRegistrant, wikipediaRegistrant, crossrefTarget, wikipediaTarget))
return records
def getPages(site):
# find pages from summary page
title = 'User:JL-Bot/DOI'
page = site.pages[title]
pages = []
for line in page.text().splitlines():
match = re.search(r'^\* \[\[User:JL-Bot/DOI/\d+.\d+\|(\d+.\d+)\]\]$', line)
if match:
pages.append(match.group(1))
return pages
def getUserInfo(filename):
# read in bot userinfo
userinfo = {}
try:
with open(filename, 'r') as file:
for line in file:
match = re.search(r'^USERNAME = (.+?)\s*$', line)
if match:
userinfo['username'] = match.group(1)
match = re.search(r'^PASSWORD = (.+?)\s*$', line)
if match:
userinfo['password'] = match.group(1)
if 'username' not in userinfo:
sys.stderr.write('ERROR: username not found\n')
sys.exit(1)
if 'password' not in userinfo:
sys.stderr.write('ERROR: password not found\n')
sys.exit(1)
except Exception:
traceback.print_exc()
sys.exit(1)
return userinfo
def retrievePage(doi):
# retrieve contents of Wikipedia page
title = 'User:JL-Bot/DOI/' + doi
page = site.pages[title]
return page.text()
def writeRecords(file, records):
# write the records to output file
for record in records:
file.write('\t'.join(record) + '\n')
return
#
# Main
#
# initiate bot
userinfo = getUserInfo(BOTINFO)
try:
site = Site('en.wikipedia.org')
site.login(userinfo['username'], userinfo['password'])
except Exception:
traceback.print_exc()
sys.exit(1)
# find pages and iterate through them
pages = getPages(site)
try:
file = open(STORAGE, 'w')
for page in pages:
print('Precessing', page, '...')
contents = retrievePage(page)
records = extractRecords(contents)
writeRecords(file, records)
except Exception:
traceback.print_exc()
sys.exit(1)
``` |
{
"source": "jlau323/oppia",
"score": 2
} |
#### File: jlau323/oppia/constants.py
```python
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
import json
import os
import re
import python_utils
def parse_json_from_js(js_file):
"""Extracts JSON object from JS file."""
text = js_file.read()
text_without_comments = remove_comments(text)
first_bracket_index = text_without_comments.find('= {')
last_bracket_index = text_without_comments.rfind('}')
json_text = (
text_without_comments[first_bracket_index + 2:last_bracket_index + 1]
)
return json.loads(json_text)
def remove_comments(text):
"""Removes comments from given text."""
return re.sub(r' //.*\n', r'', text)
class Constants(dict):
"""Transforms dict to object, attributes can be accessed by dot notation."""
__getattr__ = dict.__getitem__
with python_utils.open_file(os.path.join('assets', 'constants.ts'), 'r') as f:
constants = Constants(parse_json_from_js(f)) # pylint:disable=invalid-name
with python_utils.open_file('release_constants.json', 'r') as f:
release_constants = Constants(json.loads(f.read())) # pylint:disable=invalid-name
```
#### File: core/domain/email_services.py
```python
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
import re
from core.domain import email_domain
from core.platform import models
import feconf
import python_utils
(email_models,) = models.Registry.import_models([models.NAMES.email])
platform_email_services = models.Registry.import_email_services()
def get_feedback_thread_reply_info_from_model(model):
"""Converts GeneralFeedbackEmailReplyToIdModel to a FeedbackThreadReplyInfo.
Args:
model: GeneralFeedbackEmailReplyToIdModel. The model to be converted.
Returns:
FeedbackThreadReplyInfo. The resulting domain object.
"""
return email_domain.FeedbackThreadReplyInfo(
model.id, model.reply_to_id)
def get_feedback_thread_reply_info_by_reply_to_id(reply_to_id):
"""Gets the domain object corresponding to the model which is fetched by
reply-to-id field.
Args:
reply_to_id: str. The reply_to_id to search for.
Returns:
FeedbackThreadReplyInfo or None. The corresponding domain object.
"""
model = email_models.GeneralFeedbackEmailReplyToIdModel.get_by_reply_to_id(
reply_to_id)
if model is None:
return None
return get_feedback_thread_reply_info_from_model(model)
def get_feedback_thread_reply_info_by_user_and_thread_ids(user_id, thread_id):
"""Gets the domain object corresponding to the model which is fetched by
user_id and thread_id.
Args:
user_id: str. The ID of the user.
thread_id: str. The ID of the thread.
Returns:
FeedbackThreadReplyInfo or None. The corresponding domain object.
"""
model = email_models.GeneralFeedbackEmailReplyToIdModel.get(
user_id, thread_id, strict=False)
if model is None:
return None
return get_feedback_thread_reply_info_from_model(model)
def _get_incoming_email_address(reply_to_id):
"""Gets the incoming email address. The client is responsible for recording
any audit logs.
Args:
reply_to_id: str. The unique id of the sender.
Returns:
str. The email address of the sender.
"""
return 'reply+%s@%s' % (reply_to_id, feconf.INCOMING_EMAILS_DOMAIN_NAME)
def _is_email_valid(email_address):
"""Determines whether an email address is valid.
Args:
email_address: str. Email address to check.
Returns:
bool. Whether the specified email address is valid.
"""
if not isinstance(email_address, python_utils.BASESTRING):
return False
stripped_address = email_address.strip()
if not stripped_address:
return False
# Regex for a valid email.
# Matches any characters before the "@" sign, a series of characters until
# a ".", and then a series of characters after the period.
regex = r'^.+@[a-zA-Z0-9-.]+\.([a-zA-Z]+|[0-9]+)$'
return re.search(regex, email_address)
def _is_sender_email_valid(sender_email):
"""Gets the sender_email address and validates that it is of the form
'SENDER_NAME <SENDER_EMAIL_ADDRESS>' or 'email_address'.
Args:
sender_email: str. The email address of the sender.
Returns:
bool. Whether the sender_email is valid.
"""
split_sender_email = sender_email.split(' ')
if len(split_sender_email) < 2:
return _is_email_valid(sender_email)
email_address = split_sender_email[-1]
if not email_address.startswith('<') or not email_address.endswith('>'):
return False
return _is_email_valid(email_address[1:-1])
def send_mail(
sender_email, recipient_email, subject, plaintext_body,
html_body, bcc_admin=False, reply_to_id=None):
"""Sends an email.
In general this function should only be called from
email_manager._send_email().
Args:
sender_email: str. The email address of the sender. This should be in
the form 'SENDER_NAME <SENDER_EMAIL_ADDRESS>' or
'SENDER_EMAIL_ADDRESS'. Format must be utf-8.
recipient_email: str. The email address of the recipient. Format must
be utf-8.
subject: str. The subject line of the email. Format must be utf-8.
plaintext_body: str. The plaintext body of the email. Format must be
utf-8.
html_body: str. The HTML body of the email. Must fit in a datastore
entity. Format must be utf-8.
bcc_admin: bool. Whether to bcc feconf.ADMIN_EMAIL_ADDRESS on the email.
reply_to_id: str|None. The unique id of the sender. Format must be
utf-8.
Raises:
Exception. The configuration in feconf.py forbids emails from being
sent.
Exception. Any recipient email address is malformed.
Exception. Any sender email address is malformed.
Exception. The email was not sent correctly. In other words, the
send_email_to_recipients() function returned False
(signifying API returned bad status code).
"""
if not feconf.CAN_SEND_EMAILS:
raise Exception('This app cannot send emails to users.')
if not _is_email_valid(recipient_email):
raise ValueError(
'Malformed recipient email address: %s' % recipient_email)
if not _is_sender_email_valid(sender_email):
raise ValueError(
'Malformed sender email address: %s' % sender_email)
bcc = [feconf.ADMIN_EMAIL_ADDRESS] if bcc_admin else None
reply_to = (
_get_incoming_email_address(reply_to_id)
if reply_to_id else '')
response = platform_email_services.send_email_to_recipients(
sender_email, [recipient_email], subject.encode(encoding='utf-8'),
plaintext_body.encode(encoding='utf-8'),
html_body.encode(encoding='utf-8'), bcc, reply_to, None)
if not response:
raise Exception((
'Email to %s failed to send. Please try again later or ' +
'contact us to report a bug at ' +
'https://www.oppia.org/contact.') % recipient_email)
def send_bulk_mail(
sender_email, recipient_emails, subject, plaintext_body, html_body):
"""Sends emails to all recipients in recipient_emails.
In general this function should only be called from
email_manager._send_bulk_mail().
Args:
sender_email: str. The email address of the sender. This should be in
the form 'SENDER_NAME <SENDER_EMAIL_ADDRESS>' or
'SENDER_EMAIL_ADDRESS'. Format must be utf-8.
recipient_emails: list(str). List of the email addresses of recipients.
Format must be utf-8.
subject: str. The subject line of the email. Format must be utf-8.
plaintext_body: str. The plaintext body of the email. Format must be
utf-8.
html_body: str. The HTML body of the email. Must fit in a datastore
entity. Format must be utf-8.
Raises:
Exception. The configuration in feconf.py forbids emails from being
sent.
Exception. Any recipient email addresses are malformed.
Exception. Any sender email address is malformed.
Exception. The emails were not sent correctly. In other words, the
send_email_to_recipients() function returned False
(signifying API returned bad status code).
"""
if not feconf.CAN_SEND_EMAILS:
raise Exception('This app cannot send emails to users.')
for recipient_email in recipient_emails:
if not _is_email_valid(recipient_email):
raise ValueError(
'Malformed recipient email address: %s' % recipient_email)
if not _is_sender_email_valid(sender_email):
raise ValueError(
'Malformed sender email address: %s' % sender_email)
response = platform_email_services.send_email_to_recipients(
sender_email, recipient_emails, subject.encode(encoding='utf-8'),
plaintext_body.encode(encoding='utf-8'),
html_body.encode(encoding='utf-8'))
if not response:
raise Exception(
'Bulk email failed to send. Please try again later or contact us ' +
'to report a bug at https://www.oppia.org/contact.')
```
#### File: core/domain/search_services_test.py
```python
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
from core.domain import collection_services
from core.domain import exp_fetchers
from core.domain import exp_services
from core.domain import rating_services
from core.domain import rights_manager
from core.domain import search_services
from core.domain import user_services
from core.platform import models
from core.tests import test_utils
import python_utils
gae_search_services = models.Registry.import_search_services()
class SearchServicesUnitTests(test_utils.GenericTestBase):
"""Test the search services module."""
EXP_ID = 'An_exploration_id'
COLLECTION_ID = 'A_collection_id'
def setUp(self):
super(SearchServicesUnitTests, self).setUp()
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME)
self.signup(self.VOICE_ARTIST_EMAIL, self.VOICE_ARTIST_USERNAME)
self.signup(self.VIEWER_EMAIL, self.VIEWER_USERNAME)
self.signup(self.ADMIN_EMAIL, self.ADMIN_USERNAME)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
self.editor_id = self.get_user_id_from_email(self.EDITOR_EMAIL)
self.voice_artist_id = self.get_user_id_from_email(
self.VOICE_ARTIST_EMAIL)
self.viewer_id = self.get_user_id_from_email(self.VIEWER_EMAIL)
self.owner = user_services.UserActionsInfo(self.owner_id)
self.set_admins([self.ADMIN_USERNAME])
self.user_id_admin = self.get_user_id_from_email(self.ADMIN_EMAIL)
def test_get_search_rank(self):
self.save_new_valid_exploration(self.EXP_ID, self.owner_id)
exp_summary = exp_fetchers.get_exploration_summary_by_id(self.EXP_ID)
base_search_rank = 20
self.assertEqual(
search_services.get_search_rank_from_exp_summary(exp_summary),
base_search_rank)
rights_manager.publish_exploration(self.owner, self.EXP_ID)
self.assertEqual(
search_services.get_search_rank_from_exp_summary(exp_summary),
base_search_rank)
rating_services.assign_rating_to_exploration(
self.owner_id, self.EXP_ID, 5)
exp_summary = exp_fetchers.get_exploration_summary_by_id(self.EXP_ID)
self.assertEqual(
search_services.get_search_rank_from_exp_summary(exp_summary),
base_search_rank + 10)
rating_services.assign_rating_to_exploration(
self.user_id_admin, self.EXP_ID, 2)
exp_summary = exp_fetchers.get_exploration_summary_by_id(self.EXP_ID)
self.assertEqual(
search_services.get_search_rank_from_exp_summary(exp_summary),
base_search_rank + 8)
def test_search_ranks_cannot_be_negative(self):
self.save_new_valid_exploration(self.EXP_ID, self.owner_id)
exp_summary = exp_fetchers.get_exploration_summary_by_id(self.EXP_ID)
base_search_rank = 20
self.assertEqual(
search_services.get_search_rank_from_exp_summary(exp_summary),
base_search_rank)
# A user can (down-)rate an exploration at most once.
for i in python_utils.RANGE(50):
rating_services.assign_rating_to_exploration(
'user_id_1', self.EXP_ID, 1)
exp_summary = exp_fetchers.get_exploration_summary_by_id(self.EXP_ID)
self.assertEqual(
search_services.get_search_rank_from_exp_summary(exp_summary),
base_search_rank - 5)
for i in python_utils.RANGE(50):
rating_services.assign_rating_to_exploration(
'user_id_%s' % i, self.EXP_ID, 1)
# The rank will be at least 0.
exp_summary = exp_fetchers.get_exploration_summary_by_id(self.EXP_ID)
self.assertEqual(search_services.get_search_rank_from_exp_summary(
exp_summary), 0)
def test_search_explorations(self):
expected_query_string = 'a query string'
expected_cursor = 'cursor'
expected_sort = 'title'
expected_limit = 30
expected_result_cursor = 'rcursor'
doc_ids = ['id1', 'id2']
def mock_search(
query_string, index, cursor=None, limit=20, sort='',
ids_only=False, retries=3):
self.assertEqual(query_string, expected_query_string)
self.assertEqual(index, search_services.SEARCH_INDEX_EXPLORATIONS)
self.assertEqual(cursor, expected_cursor)
self.assertEqual(limit, expected_limit)
self.assertEqual(sort, expected_sort)
self.assertEqual(ids_only, True)
self.assertEqual(retries, 3)
return doc_ids, expected_result_cursor
with self.swap(gae_search_services, 'search', mock_search):
result, cursor = search_services.search_explorations(
expected_query_string,
expected_limit,
sort=expected_sort,
cursor=expected_cursor,
)
self.assertEqual(cursor, expected_result_cursor)
self.assertEqual(result, doc_ids)
def test_search_collections(self):
expected_query_string = 'a query string'
expected_cursor = 'cursor'
expected_sort = 'title'
expected_limit = 30
expected_result_cursor = 'rcursor'
doc_ids = ['id1', 'id2']
def mock_search(
query_string, index, cursor=None, limit=20, sort='',
ids_only=False, retries=3):
self.assertEqual(query_string, expected_query_string)
self.assertEqual(
index, collection_services.SEARCH_INDEX_COLLECTIONS)
self.assertEqual(cursor, expected_cursor)
self.assertEqual(limit, expected_limit)
self.assertEqual(sort, expected_sort)
self.assertEqual(ids_only, True)
self.assertEqual(retries, 3)
return doc_ids, expected_result_cursor
with self.swap(gae_search_services, 'search', mock_search):
result, cursor = search_services.search_collections(
expected_query_string,
expected_limit,
sort=expected_sort,
cursor=expected_cursor,
)
self.assertEqual(cursor, expected_result_cursor)
self.assertEqual(result, doc_ids)
def test_demo_collections_are_added_to_search_index(self):
results = search_services.search_collections('Welcome', 2)[0]
self.assertEqual(results, [])
collection_services.load_demo('0')
results = search_services.search_collections('Welcome', 2)[0]
self.assertEqual(results, ['0'])
def test_demo_explorations_are_added_to_search_index(self):
results, _ = search_services.search_explorations('Welcome', 2)
self.assertEqual(results, [])
exp_services.load_demo('0')
results, _ = search_services.search_explorations('Welcome', 2)
self.assertEqual(results, ['0'])
def test_clear_exploration_search_index(self):
exp_services.load_demo('0')
result = search_services.search_explorations('Welcome', 2)[0]
self.assertEqual(result, ['0'])
search_services.clear_exploration_search_index()
result = search_services.search_explorations('Welcome', 2)[0]
self.assertEqual(result, [])
def test_clear_collection_search_index(self):
collection_services.load_demo('0')
result = search_services.search_collections('Welcome', 2)[0]
self.assertEqual(result, ['0'])
search_services.clear_collection_search_index()
result = search_services.search_collections('Welcome', 2)[0]
self.assertEqual(result, [])
def test_delete_explorations_from_search_index(self):
def _mock_delete_docs(ids, index):
"""Mocks delete_documents_from_index()."""
self.assertEqual(ids, [self.EXP_ID])
self.assertEqual(index, search_services.SEARCH_INDEX_EXPLORATIONS)
delete_docs_counter = test_utils.CallCounter(_mock_delete_docs)
delete_docs_swap = self.swap(
gae_search_services, 'delete_documents_from_index',
delete_docs_counter)
with delete_docs_swap:
search_services.delete_explorations_from_search_index([self.EXP_ID])
self.assertEqual(delete_docs_counter.times_called, 1)
def test_delete_collections_from_search_index(self):
def _mock_delete_docs(ids, index):
"""Mocks delete_documents_from_index()."""
self.assertEqual(ids, [self.COLLECTION_ID])
self.assertEqual(index, search_services.SEARCH_INDEX_COLLECTIONS)
delete_docs_counter = test_utils.CallCounter(_mock_delete_docs)
delete_docs_swap = self.swap(
gae_search_services, 'delete_documents_from_index',
delete_docs_counter)
with delete_docs_swap:
search_services.delete_collections_from_search_index(
[self.COLLECTION_ID])
self.assertEqual(delete_docs_counter.times_called, 1)
```
#### File: core/domain/wipeout_domain_test.py
```python
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
from core.domain import user_services
from core.domain import wipeout_domain
from core.tests import test_utils
import utils
class PendingDeletionRequestUnitTests(test_utils.GenericTestBase):
"""Tests for topic domain objects."""
def setUp(self):
super(PendingDeletionRequestUnitTests, self).setUp()
self.signup('<EMAIL>', 'A')
self.signup('<EMAIL>', 'B')
self.user_id_a = self.get_user_id_from_email('<EMAIL>')
self.role = user_services.get_user_settings(self.user_id_a).role
def test_create_default_pending_deletion_request(self):
"""Tests the create_default_topic() function."""
default_pending_deletion = (
wipeout_domain.PendingDeletionRequest.create_default(
self.user_id_a, '<EMAIL>', self.role))
self.assertEqual(default_pending_deletion.user_id, self.user_id_a)
self.assertEqual(default_pending_deletion.email, '<EMAIL>')
self.assertEqual(default_pending_deletion.role, self.role)
self.assertEqual(default_pending_deletion.deletion_complete, False)
self.assertEqual(
default_pending_deletion.pseudonymizable_entity_mappings, {})
def test_validate_fails_for_wrong_key_in_activity_mappings(self):
"""Tests the create_default_topic() function."""
pending_deletion_request = (
wipeout_domain.PendingDeletionRequest.create_default(
self.user_id_a, '<EMAIL>', self.role))
pending_deletion_request.pseudonymizable_entity_mappings = {
'wrong_key': {}
}
with self.assertRaisesRegexp(
utils.ValidationError,
'pseudonymizable_entity_mappings contain wrong key'
):
pending_deletion_request.validate()
``` |
{
"source": "jlauener/Legend-of-Lanea",
"score": 3
} |
#### File: Legend-of-Lanea/src/main.py
```python
import upygame as pygame
import urandom as random
import umachine as machine
import data
import data_type as dt
import gfx
import sfx
import gc
pygame.display.init(False)
pygame.display.set_palette_16bit([
4195,16678,12717,19017,13092,33382,53801,29580,23545,54245,33972,27973,28185,54611,57003,57210
]);
screen = pygame.display.set_mode()
SCREEN_WIDTH = 110
SCREEN_HEIGHT = 88
#SKIP_INTRO = False
#DEBUG_SWORD = True
#WORLD_DATA = data.world
TILE_WIDTH = 16
TILE_HEIGHT = 16
MAP_WIDTH = 32
MAP_HEIGHT = 32
MAP_WIDTH_PX = MAP_WIDTH * TILE_WIDTH
MAP_HEIGHT_PX = MAP_HEIGHT * TILE_HEIGHT
DIR_NONE = -1
DIR_LEFT = 0
DIR_RIGHT = 1
DIR_UP = 2
DIR_DOWN = 3
STATE_IDLE = 0
STATE_WALK = 1
STATE_ATTACK = 2
STATE_DIE = 3
STATE_HURT = 4
STATE_COLLECT = 5
STATE_SLEEP = 6
STATE_DEAD = 7
TEXT_RECT = pygame.Rect(0, 46, 110, 42)
# Temporary rect to prevent filling up GC (most likely a bad practice...)
rect_1 = pygame.Rect(0, 0, 0, 0)
rect_2 = pygame.Rect(0, 0, 0, 0)
class Camera:
def __init__(self, bounds):
self.bounds = bounds
self.x = 0
self.y = 0
self.look_x = 0
self.look_y = 0
self.shake_x = 0
self.shake_y = 0
self.bounce_x = 0
self.bounce_y = 0
def look_at(self, x, y):
self.look_x = x
self.look_y = y
def shake(self, x, y):
self.shake_x = x
if self.shake_x < 0: self.shake_x = -self.shake_x
self.shake_y = y
if self.shake_y < 0: self.shake_y = -self.shake_y
def bounce(self, x, y):
self.bounce_x = x
self.bounce_y = y
def update(self):
self.x = self.look_x
self.y = self.look_y
if self.shake_x > 0:
self.x += rand_range(-self.shake_x, self.shake_x)
self.shake_x -= 1
if self.shake_y > 0:
self.y += rand_range(-self.shake_y, self.shake_y)
self.shake_y -= 1
self.x += self.bounce_x
if self.bounce_x > 0: self.bounce_x -= 1
elif self.bounce_x < 0: self.bounce_x += 1
self.y += self.bounce_y
if self.bounce_y > 0: self.bounce_y -= 1
elif self.bounce_y < 0: self.bounce_y += 1
if self.x < self.bounds.x: self.x = self.bounds.x
elif self.x > self.bounds.x + self.bounds.width - SCREEN_WIDTH:
self.x = self.bounds.x + self.bounds.width - SCREEN_WIDTH
if self.y < self.bounds.x: self.y = self.bounds.y
elif self.y > self.bounds.y + self.bounds.height - SCREEN_HEIGHT:
self.y = self.bounds.y + self.bounds.height - SCREEN_HEIGHT
camera = Camera(pygame.Rect(0, 0, MAP_WIDTH * TILE_WIDTH, MAP_HEIGHT * TILE_HEIGHT))
sound = pygame.mixer.Sound()
def draw_text_centered(x, y, text, color):
machine.draw_text(x - len(text) * 2, y, text, color)
def rand_int(max):
return random.getrandbits(16) % max
def rand_range(min, max):
return min + random.getrandbits(16) % (max - min)
#def rand_dir():
# rnd = rand_int(4)
# if rnd == 0:
# return DIR_LEFT
# elif rnd == 1:
# return DIR_RIGHT
# elif rnd == 2:
# return DIR_UP
# else:
# return DIR_DOWN
def rand_dir_xy():
rnd = rand_int(8)
if rnd == 0: return -1, 0
elif rnd == 1: return 1, 0
elif rnd == 2: return 0, -1
elif rnd == 3: return 0, 1
elif rnd == 4: return -1, -1
elif rnd == 5: return 1, 1
elif rnd == 6: return 1, -1
else: return -1, 1
def get_dir_xy(dir):
if dir == DIR_LEFT:
return -1, 0
elif dir == DIR_RIGHT:
return 1, 0
elif dir == DIR_UP:
return 0, -1
else:
return 0, 1
def get_dir(dx, dy):
if dx < 0: return DIR_LEFT
elif dx > 0 : return DIR_RIGHT
elif dy < 0: return DIR_UP
elif dy > 0: return DIR_DOWN
else: return DIR_NONE
def get_opposite_dir(dir):
if dir == DIR_LEFT:
return DIR_RIGHT
elif dir == DIR_RIGHT:
return DIR_LEFT
elif dir == DIR_UP:
return DIR_DOWN
else:
return DIR_UP
def get_tile_at(map_data, x, y):
if x % 2 == 0:
return (map_data[y * MAP_WIDTH // 2 + x // 2] & 0xF0) >> 4
else:
return map_data[y * MAP_WIDTH // 2 + x // 2] & 0x0F
def set_tile_at(map_data, x, y, tid):
index = y * MAP_WIDTH // 2 + x // 2
if x % 2 == 0:
map_data[index] = (map_data[index] & 0x0F) | tid << 4
else:
map_data[index] = (map_data[index] & 0xF0) | tid
class Entity:
def __init__(self, tile_x, tile_y, entity_data):
self.x = tile_x * TILE_WIDTH + TILE_WIDTH // 2
self.y = tile_y * TILE_HEIGHT + TILE_HEIGHT
self.data = entity_data
def move_by(self, dx, dy):
collide = False
if dx != 0:
if not game.map_collide(self.x + dx, self.y, self.data.hitbox):
self.x += dx
else:
collide = True
if dy != 0:
if not game.map_collide(self.x, self.y + dy, self.data.hitbox):
self.y +=dy
else:
collide = True
return collide
def collide_with(self, x, y, other):
rect_1.x = x + self.data.hitbox.x
rect_1.y = y + self.data.hitbox.y
rect_1.width = self.data.hitbox.width
rect_1.height = self.data.hitbox.height
rect_2.x = other.x + other.data.hitbox.x
rect_2.y = other.y + other.data.hitbox.y
rect_2.width = other.data.hitbox.width
rect_2.height = other.data.hitbox.height
return rect_1.colliderect(rect_2)
def collide_with_rect(self, rect):
rect_2.x = self.x + self.data.hitbox.x
rect_2.y = self.y + self.data.hitbox.y
rect_2.width = self.data.hitbox.width
rect_2.height = self.data.hitbox.height
return rect.colliderect(rect_2)
def draw_anim(self, x, y, anim, interval, loop):
screen.blit(anim[self.frame_index], x - camera.x, y - camera.y)
if not game.text: # FIXME this is a bit hackish, don't play anims while text is shown
self.anim_counter += 1
if self.anim_counter == interval:
self.anim_counter = 0
self.frame_index += 1
if self.frame_index == len(anim):
if loop: self.frame_index = 0
else:
self.frame_index -=1 # Keep the last frame (FIXME?).
return True # Indicate a one-shot anim is finished.
class Player(Entity):
def __init__(self, tile_x, tile_y, entity_data):
Entity.__init__(self, tile_x, tile_y, entity_data)
self.dir = DIR_DOWN
self.state = STATE_IDLE
self.state_counter = 0
self.anim_counter = 0
self.frame_index = 0
self.immune_counter = 0
def attack(self):
attack_hitbox = self.data.attack_hitbox[self.dir]
rect_1.x = self.x + attack_hitbox.x
rect_1.y = self.y + attack_hitbox.y
rect_1.width = attack_hitbox.width
rect_1.height = attack_hitbox.height
# Special case: signpost (SHOULD BE RENAMED)
for e in game.signposts:
if e.collide_with_rect(rect_1):
e.activate()
return
if not game.has_sword:
# TODO feedback?
return
self.state = STATE_ATTACK
self.state_counter = 8
damaged_enemy = False
for e in game.enemies:
if e.collide_with_rect(rect_1) and e.damage(1, self.dir, 10):
damaged_enemy = True
damaged_any = damaged_enemy
for e in game.bushes:
if e.collide_with_rect(rect_1) and e.damage(1, self.dir, 10):
damaged_any = True
if damaged_enemy:
dx, dy = get_dir_xy(self.dir)
camera.shake(dx * 4, dy * 4)
if damaged_any:
sound.play_sfx(sfx.attack_hit, len(sfx.attack_hit), True)
else:
sound.play_sfx(sfx.attack_miss, len(sfx.attack_miss), True)
def damage(self, value):
if self.immune_counter > 0: return
if self.state == STATE_DEAD: return
game.hp -= value
if game.hp <= 0:
self.state = STATE_DEAD
game.hp = 0
camera.shake(10, 10)
else:
self.state = STATE_HURT
self.state_counter = 6
self.immune_counter = 24
camera.shake(8, 8)
sound.play_sfx(sfx.hurt, len(sfx.hurt), True)
def collect(self, collectible):
if collectible.data.collectible_id == dt.ENTITY_KEY:
game.key += 1
if game.world.text_key: game.show_text(game.world.text_key)
sound.play_sfx(sfx.collect, len(sfx.collect), True)
elif collectible.data.collectible_id == dt.ENTITY_POTION:
game.hp += 1
if game.world.text_potion: game.show_text(game.world.text_potion)
sound.play_sfx(sfx.heal, len(sfx.heal), True)
elif collectible.data.collectible_id == dt.ENTITY_SWORD:
game.has_sword = True
self.collected_sprite = gfx.collectible_sword_collect
if game.world.text_sword: game.show_text(game.world.text_sword)
sound.play_sfx(sfx.collect, len(sfx.collect), True)
game.current_map.remove_collectible_at(collectible.tile_x, collectible.tile_y)
self.collected_sprite = collectible.data.sprite_collect
self.state = STATE_COLLECT
self.state_counter = 4
return True
def use_key(self):
if game.key == 0: return False
game.key -= 1
return True
def update(self):
if self.state == STATE_DEAD: return
if self.immune_counter > 0: self.immune_counter -= 1
if self.state == STATE_ATTACK:
self.state_counter -= 1
if self.state_counter == 0:
self.state = STATE_IDLE
self.anim_counter = 0
self.frame_index = 0
elif self.state == STATE_COLLECT:
self.state_counter -= 1
if self.state_counter == 0:
self.state = STATE_IDLE
self.anim_counter = 0
self.frame_index = 0
else:
if input_x != 0: self.move_by_and_collide(input_x, 0)
if input_y != 0: self.move_by_and_collide(0, input_y)
if self.state == STATE_HURT:
self.state_counter -= 1
if self.state_counter == 0:
self.state = STATE_IDLE
self.anim_counter = 0
self.frame_index = 0
elif input_a:
self.attack()
#elif input_b:
# self.act()
elif input_x < 0:
self.dir = DIR_LEFT
self.state = STATE_WALK
elif input_x > 0:
self.dir = DIR_RIGHT
self.state = STATE_WALK
elif input_y < 0:
self.dir = DIR_UP
self.state = STATE_WALK
elif input_y > 0:
self.dir = DIR_DOWN
self.state = STATE_WALK
else:
self.state = STATE_IDLE
self.anim_counter = 0
self.frame_index = 0
def move_by_and_collide(self, dx, dy):
blocked = False
for e in game.doors:
if self.collide_with(self.x + dx, self.y + dy, e) and e.hit(self): blocked = True
for e in game.bushes:
if self.collide_with(self.x + dx, self.y + dy, e) and e.hit(self): blocked = True
for e in game.signposts:
if self.collide_with(self.x + dx, self.y + dy, e):
self.signpost = e
blocked = True
# Check if the player goes out of the map
if dx < 0 and self.x < 8: game.load_next_map(-1, 0)
elif dx > 0 and self.x > MAP_WIDTH_PX - 8: game.load_next_map(1, 0)
elif dy < 0 and self.y < 16: game.load_next_map(0, -1)
elif dy > 0 and self.y > MAP_HEIGHT_PX - 1: game.load_next_map(0, 1)
elif not blocked: self.move_by(dx, dy)
def draw(self):
if self.immune_counter > 0 and self.state != STATE_HURT:
# Blink while immune.
if (self.immune_counter // 3) % 2 == 0:
return
if self.state == STATE_IDLE:
screen.blit(self.data.anim_idle[self.dir], (self.x - 8) - camera.x, (self.y - 16) - camera.y)
elif self.state == STATE_WALK:
self.draw_anim(self.x - 8, self.y - 16, self.data.anim_walk[self.dir], 8, True)
elif self.state == STATE_COLLECT:
screen.blit(gfx.player_collect, (self.x - 8) - camera.x, (self.y - 16) - camera.y)
screen.blit(self.collected_sprite, (self.x - 9) - camera.x, (self.y - 19) - camera.y)
elif self.state == STATE_ATTACK:
if self.state_counter < 4:
screen.blit(self.data.anim_idle[self.dir], (self.x - 8) - camera.x, (self.y - 16) - camera.y)
else:
if self.dir == DIR_UP:
screen.blit(gfx.item_sword[DIR_UP], (self.x + 1) - camera.x, (self.y - 19) - camera.y)
screen.blit(self.data.anim_attack[self.dir], (self.x - 8) - camera.x, (self.y - 16) - camera.y)
if self.dir == DIR_LEFT:
screen.blit(gfx.item_sword[DIR_LEFT], (self.x - 15) - camera.x, (self.y - 9) - camera.y)
elif self.dir == DIR_RIGHT:
screen.blit(gfx.item_sword[DIR_RIGHT], (self.x + 6) - camera.x, (self.y - 9) - camera.y)
elif self.dir == DIR_DOWN:
screen.blit(gfx.item_sword[DIR_DOWN], (self.x - 6) - camera.x, (self.y - 3) - camera.y)
elif self.state == STATE_HURT:
screen.blit(self.data.anim_hurt[self.dir], (self.x - 8) - camera.x, (self.y - 16) - camera.y)
elif self.state == STATE_SLEEP:
screen.blit(gfx.player_sleep, (self.x - 8) - camera.x, (self.y - 16) - camera.y)
elif self.state == STATE_DEAD:
screen.blit(gfx.player_dead, (self.x - 8) - camera.x, (self.y - 16) - camera.y)
class Enemy(Entity):
def __init__(self, tile_x, tile_y, entity_data):
Entity.__init__(self, tile_x, tile_y, entity_data)
self.hp = entity_data.hp
self.dir_x = 0
self.dir_y = 1
self.dir = DIR_DOWN
self.state = STATE_IDLE
self.state_counter = 1
self.move_counter = 0
self.anim_counter = 0
self.frame_index = 0
def damage(self, value, dir, knockback):
if self.hp == 0: return False
self.hp -= value
if self.hp < 0: self.hp = 0
self.dir_x, self.dir_y = get_dir_xy(dir)
self.dir = get_opposite_dir(dir)
self.state = STATE_HURT
self.state_counter = knockback
return True
def update(self):
dist_x = self.x - game.player.x
if dist_x < 0: dist_x = -dist_x
if dist_x > 60: return
dist_y = self.y - game.player.y
if dist_y < 0: dist_y = -dist_y
if dist_y > 50: return
#if dist_x + dist_y > 128: return
if self.state == STATE_WALK:
self.state_counter -= 1
if self.state_counter == 0:
self.state = STATE_IDLE
self.state_counter = rand_range(self.data.idle_duration_min, self.data.idle_duration_max)
else:
self.move_counter += 1
if self.move_counter == 2: # FIXME add value for that (removed to gain some RAM)
if self.move_by(self.dir_x, self.dir_y):
self.state = STATE_IDLE
self.state_counter = rand_range(self.data.idle_duration_min, self.data.idle_duration_max)
self.move_counter = 0
elif self.state == STATE_IDLE:
self.state_counter -= 1
if self.state_counter == 0:
dir_x, dir_y = rand_dir_xy()
while dir_x == self.dir_x and dir_y == self.dir_y:
dir_x, dir_y = rand_dir_xy()
self.dir_x = dir_x
self.dir_y = dir_y
if self.dir_x < 0: self.dir = DIR_LEFT
elif self.dir_x > 0: self.dir = DIR_RIGHT
elif self.dir_y < 0: self.dir = DIR_UP
elif self.dir_y > 0: self.dir = DIR_DOWN
self.state = STATE_WALK
self.state_counter = self.state_counter = rand_range(self.data.walk_duration_min, self.data.walk_duration_max)
self.anim_counter = 0
self.frame_index = 0
elif self.state == STATE_HURT:
self.move_by(self.dir_x * (self.state_counter // 3), self.dir_y * (self.state_counter // 3))
self.state_counter -= 1
if self.state_counter == 0:
if self.hp == 0:
self.state = STATE_DIE
self.state_counter = 12
self.anim_counter = 0
self.frame_index = 0
camera.bounce(0, 3)
sound.play_sfx(sfx.enemy_die, len(sfx.enemy_die), True)
else:
self.state = STATE_IDLE
self.state_counter = 20
elif self.state == STATE_DIE:
self.state_counter -= 1
if self.state_counter == 0:
game.enemies.remove(self)
if self.state == STATE_IDLE or self.state == STATE_WALK:
if self.collide_with(self.x, self.y, game.player):
game.player.damage(1)
def draw(self):
if self.state == STATE_IDLE:
screen.blit(self.data.anim_idle[self.dir], (self.x - 8) - camera.x, (self.y - 13) - camera.y)
elif self.state == STATE_WALK:
self.draw_anim(self.x - 8, self.y - 13, self.data.anim_walk[self.dir], 4, True)
elif self.state == STATE_HURT:
if self.state_counter < 8:
screen.blit(self.data.anim_idle[self.dir], (self.x - 8) - camera.x, (self.y - 13) - camera.y)
else:
screen.blit(self.data.anim_hurt[self.dir], (self.x - 8) - camera.x, (self.y - 13) - camera.y)
elif self.state == STATE_DIE:
self.draw_anim(self.x - 4, self.y - 9, gfx.fx_enemy_die, 3, False)
class Collectible(Entity):
def __init__(self, tile_x, tile_y, entity_data):
Entity.__init__(self, tile_x, tile_y, entity_data)
self.tile_x = tile_x
self.tile_y = tile_y
def update(self):
if self.collide_with(self.x, self.y, game.player) and game.player.collect(self):
# TODO collect anim
game.collectibles.remove(self)
def draw(self):
screen.blit(self.data.sprite_idle, (self.x - 5) - camera.x, (self.y - 14) - camera.y)
# Create a single big entity to gain RAM ...
class GenericEntity(Entity):
def __init__(self, tile_x, tile_y, entity_data):
Entity.__init__(self, tile_x, tile_y, entity_data)
if entity_data == data.bush:
self.alive = True
elif entity_data == data.door:
self.closed = True
self.anim_counter = 0
self.frame_index = 0
elif entity_data == data.switch:
self.on = False
elif entity_data == data.barrier:
self.switch_count = 2 # FIXME
elif entity_data == data.spike:
self.up = (tile_x % 2 == 0 and tile_y % 2 == 0) or (tile_x % 2 == 1 and tile_y % 2 == 1)
if self.up: self.counter = 40
else: self.counter = 55 # Shift start to sync the spikes: (70-40) / 2 = 15
def hit(self, player):
if self.data == data.bush:
return self.alive
elif self.data == data.door:
if self.closed and player.use_key():
self.closed = False
sound.play_sfx(sfx.attack_miss, len(sfx.attack_miss), True)
return self.closed
elif self.data == data.barrier:
return True
def damage(self, value, dir, knockback):
if self.data == data.bush:
if not self.alive: return False
self.alive = False
self.anim_counter = 0
self.frame_index = 0
return True
def activate(self):
if self.data == data.switch:
if not self.on:
self.on = True
game.barrier.activate() # FIXME
sound.play_sfx(sfx.attack_miss, len(sfx.attack_miss), True)
else:
game.show_text([['It seems that', 'it\'s activated.']])
elif self.data == data.barrier:
self.switch_count -= 1
if self.switch_count == 0: game.doors.remove(self)
elif self.data == data.signpost:
game.show_text(self.text)
sound.play_sfx(sfx.select, len(sfx.select), True)
def update(self):
#if self.data == data.spike: # small optimization to make dungeon a bit faster...
if self.up and self.counter > 6 and self.collide_with(self.x, self.y, game.player):
game.player.damage(1)
self.counter -= 1
if self.counter == 0:
self.up = not self.up
if self.up: self.counter = 40
else: self.counter = 70
def draw(self):
if self.data == data.bush:
if self.alive:
screen.blit(gfx.bush_idle, (self.x - 8) - camera.x, (self.y - 15) - camera.y)
else:
if self.draw_anim(self.x - 4, self.y - 13, gfx.fx_enemy_die, 3, False):
game.bushes.remove(self)
elif self.data == data.door:
if self.closed:
screen.blit(gfx.door_closed, (self.x - 8) - camera.x, (self.y - 16) - camera.y)
else:
self.draw_anim(self.x - 8, self.y - 16, self.data.anim_open, 4, False)
elif self.data == data.switch:
if self.on:
screen.blit(gfx.switch_on, (self.x - 7) - camera.x, (self.y - 12) - camera.y)
else:
screen.blit(gfx.switch_off, (self.x - 7) - camera.x, (self.y - 12) - camera.y)
elif self.data == data.barrier:
screen.blit(gfx.barrier_closed, (self.x - 8) - camera.x, (self.y - 16) - camera.y)
elif self.data == data.spike:
if self.counter < 6:
screen.blit(gfx.spike_middle, (self.x - 7) - camera.x, (self.y - 16) - camera.y)
elif self.up:
screen.blit(gfx.spike_up, (self.x - 7) - camera.x, (self.y - 16) - camera.y)
else:
screen.blit(gfx.spike_down, (self.x - 7) - camera.x, (self.y - 16) - camera.y)
elif self.data == data.signpost:
screen.blit(gfx.signpost_idle, (self.x - 5) - camera.x, (self.y - 12) - camera.y)
''''
class Bush(Entity):
def __init__(self, tile_x, tile_y, entity_data):
Entity.__init__(self, tile_x, tile_y, entity_data)
self.alive = True
def hit(self, player):
return self.alive
def damage(self, value, dir, knockback):
if not self.alive: return False
self.alive = False
self.anim_counter = 0
self.frame_index = 0
return True
def draw(self):
if self.alive:
screen.blit(gfx.bush_idle, (self.x - 8) - camera.x, (self.y - 15) - camera.y)
else:
if self.draw_anim(self.x - 4, self.y - 13, gfx.fx_enemy_die, 3, False):
game.bushes.remove(self)
'''
'''
class Door(Entity):
def __init__(self, tile_x, tile_y, entity_data):
Entity.__init__(self, tile_x, tile_y, entity_data)
self.closed = True
self.anim_counter = 0
self.frame_index = 0
def hit(self, player):
if self.closed and player.use_key():
self.closed = False
sound.play_sfx(sfx.attack_miss, len(sfx.attack_miss), True)
return self.closed
def draw(self):
if self.closed:
screen.blit(gfx.door_closed, (self.x - 8) - camera.x, (self.y - 16) - camera.y)
else:
self.draw_anim(self.x - 8, self.y - 16, self.data.anim_open, 4, False)
'''
'''
class Switch(Entity):
def __init__(self, tile_x, tile_y, entity_data):
Entity.__init__(self, tile_x, tile_y, entity_data)
self.on = False
def activate(self):
if not self.on:
self.on = True
game.barrier.activate_switch() # FIXME
sound.play_sfx(sfx.attack_miss, len(sfx.attack_miss), True)
else:
game.show_text([['It seems that', 'it\'s activated.']])
def draw(self):
if self.on:
screen.blit(gfx.switch_on, (self.x - 7) - camera.x, (self.y - 12) - camera.y)
else:
screen.blit(gfx.switch_off, (self.x - 7) - camera.x, (self.y - 12) - camera.y)
'''
'''
class Barrier(Entity):
def __init__(self, tile_x, tile_y, entity_data):
Entity.__init__(self, tile_x, tile_y, entity_data)
self.switch_count = 2 # FIXME
def hit(self, player):
return True
def activate_switch(self):
# FIXME
self.switch_count -= 1
if self.switch_count == 0: game.doors.remove(self)
def draw(self):
screen.blit(gfx.barrier_closed, (self.x - 8) - camera.x, (self.y - 16) - camera.y)
'''
'''
class Spike(Entity):
def __init__(self, tile_x, tile_y, entity_data):
Entity.__init__(self, tile_x, tile_y, entity_data)
self.up = (tile_x % 2 == 0 and tile_y % 2 == 0) or (tile_x % 2 == 1 and tile_y % 2 == 1)
if self.up: self.counter = 40
else: self.counter = 55 # Shift start to sync the spikes: (70-40) / 2 = 15
def update(self):
if self.up and self.counter > 6 and self.collide_with(self.x, self.y, game.player):
game.player.damage(1)
self.counter -= 1
if self.counter == 0:
self.up = not self.up
if self.up: self.counter = 40
else: self.counter = 70
def draw(self):
if self.counter < 6:
screen.blit(gfx.spike_middle, (self.x - 7) - camera.x, (self.y - 16) - camera.y)
elif self.up:
screen.blit(gfx.spike_up, (self.x - 7) - camera.x, (self.y - 16) - camera.y)
else:
screen.blit(gfx.spike_down, (self.x - 7) - camera.x, (self.y - 16) - camera.y)
'''
'''
class Signpost(Entity):
def __init__(self, tile_x, tile_y, entity_data, text):
Entity.__init__(self, tile_x, tile_y, entity_data)
self.text = text
def activate(self):
game.show_text(self.text)
sound.play_sfx(sfx.select, len(sfx.select), True)
def draw(self):
screen.blit(gfx.signpost_idle, (self.x - 5) - camera.x, (self.y - 12) - camera.y)
'''
class Game:
def __init__(self):
self.map_data = bytearray((MAP_WIDTH * MAP_HEIGHT) // 2)
self.tilemap = pygame.tilemap.Tilemap(MAP_WIDTH, MAP_HEIGHT, self.map_data)
self.tilemap.set_tile(0x3, TILE_WIDTH, TILE_HEIGHT, gfx.tile_cliff)
self.tilemap.set_tile(0x4, TILE_WIDTH, TILE_HEIGHT, gfx.tile_stalk_1)
self.tilemap.set_tile(0x5, TILE_WIDTH, TILE_HEIGHT, gfx.tile_tree)
self.tilemap.set_tile(0x8, TILE_WIDTH, TILE_HEIGHT, gfx.tile_water_1)
self.tilemap.set_tile(dt.TILE_GROUND, TILE_WIDTH, TILE_HEIGHT, gfx.tile_path)
self.tilemap.set_tile(dt.TILE_GRASS, TILE_WIDTH, TILE_HEIGHT, gfx.tile_grass)
def new_game(self, world):
self.world = world
world.reset()
self.hp = data.player.hp
self.key = 0
self.has_sword = False #DEBUG_SWORD
self.text = None
self.load_map(world.starting_map)
self.game_over_counter = 50
self.paused = False
self.lanea = True
#if not self.player: print("ERROR no player found!")
camera.look_at(self.player.x - 55, self.player.y - 48)
#if not SKIP_INTRO and world.intro_text:
if world.intro_text:
self.player.state = STATE_SLEEP
self.show_text(world.intro_text)
sound.play_sfx(sfx.select, len(sfx.select), True)
def load_map(self, map_data):
self.current_map = map_data
self.player = None
self.enemies = []
self.collectibles = []
self.doors = []
self.bushes = []
self.spikes = []
self.signposts = []
if map_data.custom_ground: self.tilemap.set_tile(0x6, TILE_WIDTH, TILE_HEIGHT, map_data.custom_ground)
if map_data.custom_solid: self.tilemap.set_tile(0xA, TILE_WIDTH, TILE_HEIGHT, map_data.custom_solid)
if map_data.tile_wall:
self.tilemap.set_tile(0xC, TILE_WIDTH, TILE_HEIGHT, map_data.tile_wall[0])
self.tilemap.set_tile(0xD, TILE_WIDTH, TILE_HEIGHT, map_data.tile_wall[1])
self.tilemap.set_tile(0xE, TILE_WIDTH, TILE_HEIGHT, map_data.tile_wall[2])
self.tilemap.set_tile(0xF, TILE_WIDTH, TILE_HEIGHT, map_data.tile_wall[3])
self.tile_anim_counter = 1
self.tile_anim_index = 0
gc.collect()
#print ("free RAM before map load:", gc.mem_free())
for ix in range(0, MAP_WIDTH):
for iy in range(0, MAP_HEIGHT):
tid = get_tile_at(map_data.tiles, ix, iy)
if tid == 0x0:
self.player = Player(ix, iy, data.player)
tid = dt.TILE_GRASS
elif tid == dt.TILE_ENTITY_1:
self.create_entity(ix, iy, map_data.entity_1)
tid = map_data.entity_1_tid
elif tid == dt.TILE_ENTITY_2:
self.create_entity(ix, iy, map_data.entity_2)
tid = map_data.entity_2_tid
elif tid == dt.TILE_ENTITY_3:
self.create_entity(ix, iy, map_data.entity_3)
tid = map_data.entity_3_tid
elif tid == dt.TILE_ENTITY_4:
self.create_entity(ix, iy, map_data.entity_4)
tid = map_data.entity_4_tid
elif tid == dt.TILE_ENTITY_5:
self.create_entity(ix, iy, map_data.entity_5)
tid = map_data.entity_5_tid
elif tid == dt.TILE_ENTITY_6:
self.create_entity(ix, iy, map_data.entity_6)
tid = map_data.entity_6_tid
elif tid == 0x7:
# Wall resolution
top = False
if iy > 0 and get_tile_at(map_data.tiles, ix, iy - 1) == 0x7: top = True
bottom = False
if iy < MAP_HEIGHT - 1 and get_tile_at(map_data.tiles, ix, iy + 1) == 0x7: bottom = True
if top and bottom:
tid = 0xF
elif top and not bottom:
tid = 0xE
elif not top and bottom:
tid = 0xD
else:
tid = 0xC
set_tile_at(self.map_data, ix, iy, tid)
for sp in map_data.signposts:
signpost = GenericEntity(sp['x'], sp['y'], data.signpost)
signpost.text = sp['text']
self.signposts.append(signpost)
# print ("free RAM after map load:", gc.mem_free())
def create_entity(self, x, y, id):
if id == dt.ENTITY_BUSH:
self.bushes.append(GenericEntity(x, y, data.bush))
elif id == dt.ENTITY_DOOR:
self.doors.append(GenericEntity(x, y, data.door))
elif id == dt.ENTITY_SLIME:
self.enemies.append(Enemy(x, y, data.enemy_slime))
elif id == dt.ENTITY_SLIME_STRONG:
self.enemies.append(Enemy(x, y, data.enemy_slime_strong))
elif id == dt.ENTITY_SPIKE:
self.spikes.append(GenericEntity(x, y, data.spike))
elif id == dt.ENTITY_BARRIER:
self.barrier = GenericEntity(x, y, data.barrier) # FIXME
self.doors.append(self.barrier)
elif id == dt.ENTITY_SWITCH:
self.signposts.append(GenericEntity(x, y, data.switch))
elif id >= dt.ENTITY_KEY:
if not self.current_map.is_collected_at(x, y):
self.collectibles.append(Collectible(x, y, data.collectible[id]))
def load_next_map(self, dx, dy):
next_map = self.world.get_map_at(self.current_map.x + dx, self.current_map.y + dy)
if next_map:
previous_x = self.player.x
previous_y = self.player.y
self.load_map(next_map)
self.player = Player(0, 0, data.player)
dir = get_dir(dx, dy)
if dir == DIR_UP:
self.player.x = previous_x
self.player.y = MAP_HEIGHT_PX - 1
elif dir == DIR_DOWN:
self.player.x = previous_x
self.player.y = 16
elif dir == DIR_LEFT:
self.player.x = MAP_WIDTH_PX - 8
self.player.y = previous_y
elif dir == DIR_RIGHT:
self.player.x = 8
self.player.y = previous_y
self.player.dir = dir
camera.look_at(self.player.x - 55, self.player.y - 48)
else:
show_scene(EndScene())
def is_tile_solid(self, x, y):
# This method inlines get_tile_at() in order to reduce stack usage.
tid = self.map_data[y * MAP_WIDTH // 2 + x // 2]
if x % 2 == 0:
tid = (tid & 0xF0) >> 4
else:
tid = tid & 0x0F
return tid == 0x3 or tid == 0x5 or tid == 0x8 or tid == 0xA or tid >= 0xC
def map_collide(self, x, y, hitbox):
# In order to avoid 2 for loop this collision routine only works with hitbox smaller than a tile.
# This most likely is a premature optimization but well...
x1 = (x + hitbox.x) // TILE_WIDTH
y1 = (y + hitbox.y) // TILE_HEIGHT
if self.is_tile_solid(x1, y1): return True
x2 = (x + hitbox.x + hitbox.width) // TILE_WIDTH
if self.is_tile_solid(x2, y1): return True
y2 = (y + hitbox.y + hitbox.height) // TILE_HEIGHT
if self.is_tile_solid(x1, y2): return True
if self.is_tile_solid(x2, y2): return True
def show_text(self, text):
self.text = text
self.text_index = 0
def update(self):
if not self.text and not self.player.state == STATE_DEAD and input_c:
#if SKIP_INTRO:
# game.new_game(data.world)
# show_scene(game)
#else:
self.paused = not self.paused
if self.paused: return
if self.text:
if input_a:
self.text_index += 1
if self.text_index == len(self.text):
self.text = None
if self.player.state == STATE_SLEEP: self.player.state = STATE_IDLE
sound.play_sfx(sfx.select, len(sfx.select), True)
else:
for e in self.enemies: e.update()
for e in self.collectibles: e.update()
# Updating spikes seems to be a bottleneck, so it's inlined here..
# Also player rect is calculated only once
rect_1.x = self.player.x + self.player.data.hitbox.x
rect_1.y = self.player.y + self.player.data.hitbox.y
rect_1.width = self.player.data.hitbox.width
rect_1.height = self.player.data.hitbox.height
for e in self.spikes:
if e.up and e.counter > 6 and e.collide_with_rect(rect_1):
game.player.damage(1)
e.counter -= 1
if e.counter == 0:
e.up = not e.up
if e.up: e.counter = 40
else: e.counter = 70
self.player.update()
camera.look_at(self.player.x - 55, self.player.y - 48)
if self.player.state == STATE_DEAD:
self.game_over_counter -= 1
if self.game_over_counter == 0:
show_scene(GameOverScene())
def draw(self):
if self.paused:
draw_text_centered(SCREEN_WIDTH // 2 + 1, 42, "PAUSED", 0x1)
draw_text_centered(SCREEN_WIDTH // 2, 41, "PAUSED", 0xF)
return
self.tile_anim_counter -= 1
if self.tile_anim_counter == 0:
if self.tile_anim_index == 0:
self.tilemap.set_tile(0x4, TILE_WIDTH, TILE_HEIGHT, gfx.tile_stalk_1)
self.tilemap.set_tile(0x8, TILE_WIDTH, TILE_HEIGHT, gfx.tile_water_1)
self.tile_anim_index = 1
else:
self.tilemap.set_tile(0x4, TILE_WIDTH, TILE_HEIGHT, gfx.tile_stalk_2)
self.tilemap.set_tile(0x8, TILE_WIDTH, TILE_HEIGHT, gfx.tile_water_2)
self.tile_anim_index = 0
self.tile_anim_counter = 30
self.tilemap.draw(-camera.x, -camera.y)
for e in self.spikes: e.draw()
for e in self.collectibles: e.draw()
for e in self.enemies: e.draw()
for e in self.bushes: e.draw()
for e in self.doors: e.draw()
for e in self.signposts: e.draw()
self.player.draw()
for i in range(0, self.hp):
screen.blit(gfx.ui_hearth, SCREEN_WIDTH - 11 * (i + 1), 2)
item_x = 1
if self.has_sword:
screen.blit(gfx.collectible_sword_idle, item_x, 1)
item_x += 11
for i in range(0, self.key):
screen.blit(gfx.collectible_key_idle, item_x, 1)
item_x += 11
if self.text:
screen.fill(0x0, TEXT_RECT)
txt = self.text[self.text_index]
if type(txt) is list:
if len(txt) == 1:
#machine.draw_text(36, 64, txt[0], 0xF)
draw_text_centered(68, 64, txt[0], 0xF)
else:
machine.draw_text(40, 59, txt[0], 0xF)
machine.draw_text(48, 69, txt[1], 0xF)
screen.blit(gfx.portrait_lanea, -12, TEXT_RECT.y)
if self.player.state == STATE_SLEEP: self.player.state = STATE_IDLE
else:
draw_text_centered(SCREEN_WIDTH // 2, 64, txt, 0xF)
camera.look_at(self.player.x - 55, self.player.y - 45)
class TitleScene:
def __init__(self):
self.press_start_counter = 90
self.press_start_visible = False
def update(self):
if input_c:
game.new_game(data.world)
show_scene(game)
def draw(self):
screen.fill(0x0, screen.get_rect())
screen.blit(gfx.title, 17, 8)
self.press_start_counter -= 1
if self.press_start_counter == 0:
self.press_start_visible = not self.press_start_visible
self.press_start_counter = 12
if self.press_start_visible: draw_text_centered(SCREEN_WIDTH // 2, 74, "Press Start", 0xF)
class EndScene:
def update(self):
if input_c:
sound.play_sfx(sfx.select, len(sfx.select), True)
show_scene(TitleScene())
def draw(self):
screen.fill(0x0, screen.get_rect())
draw_text_centered(SCREEN_WIDTH // 2, 41, "To be continued...", 0xF)
class GameOverScene:
def update(self):
if input_c:
sound.play_sfx(sfx.select, len(sfx.select), True)
show_scene(TitleScene())
def draw(self):
screen.fill(0x0, screen.get_rect())
draw_text_centered(SCREEN_WIDTH // 2, 41, "GAME OVER", 0xF)
input_x = 0
input_y = 0
scene = None
def show_scene(s):
global scene
scene = s
gc.collect()
#print ("free RAM:", gc.mem_free())
game = Game()
#if SKIP_INTRO:
# game.new_game(data.world)
# show_scene(game)
#else:
show_scene(TitleScene())
while True:
# Main update loop.
input_a = False
input_b = False
input_c = False
eventtype = pygame.event.poll()
if eventtype != pygame.NOEVENT:
if eventtype.type == pygame.KEYDOWN:
if eventtype.key == pygame.K_RIGHT: input_x = 1
if eventtype.key == pygame.K_LEFT: input_x = -1
if eventtype.key == pygame.K_DOWN: input_y = 1
if eventtype.key == pygame.K_UP: input_y = -1
if eventtype.key == pygame.BUT_A: input_a = True
if eventtype.key == pygame.BUT_B: input_b = True
if eventtype.key == pygame.BUT_C: input_c = True
if eventtype.type == pygame.KEYUP:
if eventtype.key == pygame.K_RIGHT: input_x = 0
if eventtype.key == pygame.K_LEFT: input_x = 0
if eventtype.key == pygame.K_DOWN: input_y = 0
if eventtype.key == pygame.K_UP: input_y = 0
scene.update()
camera.update()
scene.draw()
pygame.display.flip()
``` |
{
"source": "jlauha/FixPointCS",
"score": 2
} |
#### File: FixPointCS/Polyfit/remez.py
```python
import math
import random
import mpmath as mp
import numpy.polynomial as P
import numpy.polynomial.chebyshev as C
# set precision
#dps = 30
dps = 120
mp.mp.dps = dps
mp.mp.pretty = True
def vzeros(size):
return mp.matrix([0.0] * size)
def vones(size):
return mp.matrix([1.0] * size)
def nth(size):
return mp.matrix([0.0] * size + [1.0])
class Remez:
def __init__(self, func, weightFunc, domain, order):
#print('Remez()')
self.func = func
self.weightFunc = weightFunc
self.domain = domain
self.order = order
self.limit = mp.mpf(f'1e-{dps}')
self.errorThreshold = mp.mpf(f'1e-{dps}')
self.initRemez()
def initRemez(self):
#print('Remez.init()')
# constants for domain
(xMin, xMax) = self.domain
self.k1 = (xMax + xMin) * 0.5
self.k2 = (xMax - xMin) * 0.5
# initial estimates for function roots (where error == 0.0)
size = self.order + 1
roots = vzeros(size)
fxn = vzeros(size)
# \todo [petri] use linspace
for i in range(size):
roots[i] = (2.0 * i - self.order) / size
fxn[i] = self.evalFunc(roots[i])
# build matrix of Chebyshev evaluations
system = mp.zeros(size)
for order in range(size):
for i in range(size):
system[i,order] = mp.chebyt(order, roots[i])
# solve system
solved = system ** -1
# compute Chebyshev weights of new approximation
weights = vzeros(size)
for n in range(size):
weights[n] = mp.fdot(solved[n,:], fxn)
#print(f' weights: {weights.T}')
# store roots & weights
self.roots = roots
self.weights = weights
self.maxError = 1000.0
# estimate error
#est = [self.evalEstimate(x) for x in roots]
#print(' est:', est)
#print(' fxn:', fxn.T)
def step(self):
(control, maxErr) = self.findExtrema(self.roots)
#print(f' maxErr: {maxErr} ({-math.log(maxErr, 2.0)} bits)')
# for x in control:
# print(f' ctrl: {x}')
self.weights = self.remezStep(control)
# update error
errDiff = mp.fabs(maxErr - self.maxError)
self.maxError = maxErr
if errDiff < self.errorThreshold:
return True
self.roots = self.findRoots(control)
return False
# for (ax, bx, rx, rerr) in scannedRoots:
# print(f' scanned: {rx} in [{ax, bx}]')
# scannedRoots = self.scanRoots(1000)
# for ndx in range(len(self.roots)):
# x = self.roots[ndx]
# (ax, bx, rx, rerr) = scannedRoots[ndx]
# print(f' root: {x} [{ax}, {bx}] {rx}')
# if x < ax or x > bx:
# print(' ROOT FIND MISMATCH')
def findRoot(self, ax, bx, aerr, berr):
cx = (ax + bx) * 0.5
cerr = self.evalEstimate(cx) - self.evalFunc(cx)
if cerr == 0.0 or (bx - ax) <= self.limit:
return (cx, cerr)
else:
if (aerr < 0.0 and cerr < 0.0) or (aerr > 0.0 and cerr > 0.0):
(ax, aerr) = (cx, cerr)
else:
(bx, berr) = (cx, cerr)
return self.findRoot(ax, bx, aerr, berr)
def findRoots(self, control):
roots = vzeros(self.order + 1)
for n in range(self.order + 1):
ax = control[n]
bx = control[n+1]
aerr = self.evalEstimate(ax) - self.evalFunc(ax)
berr = self.evalEstimate(bx) - self.evalFunc(bx)
(rx, rerr) = self.findRoot(ax, bx, aerr, berr)
assert(rx >= ax and rx <= bx) # root must be inside search range
roots[n] = rx
#print(f' root[{n}]: {rx} <{rerr}> || {ax} {bx}')
return roots
def remezStep(self, control):
# eval at control points
fxn = mp.matrix([self.evalFunc(c) for c in control])
# create linear system with chebyshev polynomials
size = self.order + 2
system = mp.zeros(size)
for n in range(self.order + 1):
for i in range(self.order + 2):
system[i,n] = mp.chebyt(n, control[i])
# last column is oscillating error
for i in range(size):
sign = -1 if ((i & 1) == 0) else +1
scale = 0.0 if i in [0, size-1] else 1.0
system[i,size-1] = sign * scale * mp.fabs(self.evalWeight(control[i]))
#print(system)
# solve the system
solved = system ** -1
#print(solved)
# compute polynomial estimate (as Chebyshev weights)
weights = vzeros(size-1)
for n in range(size-1):
weights[n] = mp.fdot(solved[n,:], fxn)
#print(f' weights: {weights}')
# estimate error
# self.weights = weights
# est = [self.evalEstimate(x) for x in control]
# print(' est:', est)
# print(' fxn:', fxn.T)
return weights
def findExtremum(self, ax, bx, cx, aerr, berr, cerr):
# \todo [petri] implement golden ratio search?
dx = (ax + bx) * 0.5
derr = self.evalError(dx)
# update coords based on error
if derr < cerr:
if dx > cx:
(bx, berr) = (dx, derr)
else:
(ax, aerr) = (dx, derr)
else:
if dx > cx:
(ax, aerr) = (cx, cerr)
else:
(bx, berr) = (cx, cerr)
(cx, cerr) = (dx, derr)
# check if limit reached
if (bx - ax) <= self.limit:
#print(f' fin: {cx} <{cerr}> || {ax} {bx}')
return (cx, cerr)
else:
#print(f' cur: {cx} <{cerr}>')
return self.findExtremum(ax, bx, cx, aerr, berr, cerr)
def findExtrema(self, roots):
control = vzeros(self.order + 2)
control[0] = -1
control[self.order + 1] = 1
maxErr = 0.0
for n in range(self.order):
ax = roots[n]
bx = roots[n+1]
cx = ax + (bx - ax) * random.uniform(0.4, 0.6)
aerr = self.evalError(ax)
berr = self.evalError(bx)
cerr = self.evalError(cx)
#print(f' find[{n}]: {ax}, {bx}')
(rx, rerr) = self.findExtremum(ax, bx, cx, aerr, berr, cerr)
assert(rx >= ax and rx <= bx) # extremum must be inside search range
#print(f' extrema[{n}]: {rx} <{rerr}> || {ax} {bx}')
control[n + 1] = rx
maxErr = max(maxErr, rerr)
return (control, maxErr)
def scanRoots(self, numSteps):
found = []
coords = mp.linspace(-1.0, 1.0, numSteps)
for ndx in range(len(coords)-1):
ax = coords[ndx]
bx = coords[ndx+1]
aerr = self.evalEstimate(ax) - self.evalFunc(ax)
berr = self.evalEstimate(bx) - self.evalFunc(bx)
#print(f'bucket: {ax} <{aerr}>')
#print(f' to: {bx} <{berr}>')
if mp.sign(aerr) != mp.sign(berr):
(rx, rerr) = self.findRoot(ax, bx, aerr, berr)
#print(f' root in range: [{ax}, {bx}]: {rx} <{rerr}>')
found.append((ax, bx, rx, rerr))
return found
def evalFunc(self, x):
return self.func(x * self.k2 + self.k1)
def evalWeight(self, x):
return self.weightFunc(x * self.k2 + self.k1)
def evalEstimate(self, x):
sum = mp.mpf(0.0)
for i in range(len(self.weights)):
sum += self.weights[i] * mp.chebyt(i, x)
return sum
def evalError(self, x):
return mp.fabs(self.evalEstimate(x) - self.evalFunc(x)) / self.evalWeight(x)
# Main
MAX_REMEZ_ITER = 10
# convert polynomials to assume input in [0.0, 1.0] range (instead of [-1.0, 1.0])
rebase = P.Polynomial([-1.0, 2.0])
def remezFit(name, func, weightFunc, domain, order):
remez = Remez(func, weightFunc, domain, order)
for iter in range(MAX_REMEZ_ITER):
#print(f'ITER {iter}:')
if remez.step():
break
return remez
def remezToPoly(remez):
cheby = C.Chebyshev(remez.weights)
p = cheby.convert(kind=P.Polynomial)
p = p(rebase)
# (x0, x1) = remez.domain
# p = p(P.Polynomial([-x0 / (x1 - x0), 1.0 / (x1 - x0)]))
return p
def writeCoefficients(file, name, maxError, order, segments):
file.write('\n')
if len(segments) == 1:
precision = -math.log(maxError, 2.0)
file.write(f'\t\t// Precision: {precision:.2f} bits\n')
file.write('\t\t[MethodImpl(AggressiveInlining)]\n')
file.write(f'\t\tpublic static int {name}Poly{order}(int a)\n')
file.write('\t\t{\n')
# get polynomial
p = remezToPoly(segments[0])
for ndx in reversed(range(order + 1)):
c = p.coef[ndx]
ic = (int)(c * (1 << 30) + 0.5)
# file.write(f'\t\t\tconst int C{ndx} = {ic}; // {c}\n')
if ndx == len(p.coef)-1:
file.write(f'\t\t\tint y = Qmul30(a, {ic}); // {c}\n')
elif ndx > 0:
file.write(f'\t\t\ty = Qmul30(a, y + {ic}); // {c}\n')
else:
file.write(f'\t\t\ty = y + {ic}; // {c}\n')
file.write('\t\t\treturn y;\n')
else:
numSegments = len(segments)
precision = -math.log(maxError, 2.0)
funcName = f'{name}Poly{order}Lut{numSegments}'
tableName = f'{funcName}Table'
# write constant table
file.write(f'\t\tprivate static readonly int[] {tableName} =\n')
file.write('\t\t{\n')
for remez in segments:
# get polynomial
p = remezToPoly(remez)
# map [x0, x1] to [0.0, 1.0]
(x0, x1) = remez.domain
p = p(P.Polynomial([-x0 / (x1 - x0), 1.0 / (x1 - x0)]))
# write coefficients
coefs = ' '.join(f'{int(c * (1<<30) + 0.5)},' for c in reversed(p.coef))
file.write(f'\t\t\t{coefs}\n')
file.write('\t\t};\n')
# write function
file.write('\n')
file.write(f'\t\t// Precision: {precision:.2f} bits\n')
file.write('\t\t[MethodImpl(AggressiveInlining)]\n')
file.write(f'\t\tpublic static int {funcName}(int a)\n')
file.write('\t\t{\n')
segmentBits = int(math.log2(numSegments))
file.write(f'\t\t\tint offset = (a >> {30 - segmentBits}) * {order + 1};\n')
for ndx in reversed(range(order + 1)):
if ndx == order:
file.write(f'\t\t\tint y = Qmul30(a, {tableName}[offset + {0}]);\n')
elif ndx > 0:
file.write(f'\t\t\ty = Qmul30(a, y + {tableName}[offset + {order - ndx}]);\n')
else:
file.write(f'\t\t\ty = y + {tableName}[offset + {order}];\n')
file.write('\t\t\treturn y;\n')
file.write('\t\t}\n')
file.flush()
def remezFitSegmented(name, func, weightFunc, domain, numSegments, order):
(xMin, xMax) = domain
maxError = 0.0
segments = []
for segment in range(numSegments):
x0 = xMin + (xMax - xMin) * mp.mpf(segment) / numSegments
x1 = xMin + (xMax - xMin) * mp.mpf(segment + 1) / numSegments
remez = remezFit(name, func, weightFunc, (x0, x1), order)
#print(f' segment[{segment}]: {-math.log(remez.maxError, 2.0):.2f} bits [{x0} .. {x1}]')
maxError = max(maxError, remez.maxError)
segments.append(remez)
return (maxError, segments)
epsilon = mp.mpf(f'1e-{dps//3}')
# Implementation of sin() with the assumption that input has been normalized to [-1.0, 1.0] range
# and squared. Also assumes the output will be multiplied by x once more (to make it odd).
# See: https://github.com/samhocevar/lolremez/wiki/Tutorial-3-of-5:-changing-variables-for-simpler-polynomials
def sinSqr(x):
x = x * 0.25 * mp.pi * mp.pi
xx = mp.sqrt(x)
y = mp.sin(xx) / xx
return y * 0.5*mp.pi
FUNCS = [
('Exp', lambda x: mp.exp(x), lambda x: mp.exp(x), (0.0, 1.0)),
('Exp2', lambda x: 2.0**x, lambda x: 2.0**x, (0.0, 1.0)),
('Log', lambda x: mp.log(x+1), lambda x: mp.log(x+1) * (mp.log(2.0) - mp.log(x+1)), (epsilon, 1.0-epsilon)),
('Log2', lambda x: mp.log(x+1, 2.0), lambda x: mp.log(x+1, 2.0) * (1 - mp.log(x+1, 2.0)), (epsilon, 1.0-epsilon)),
('Rcp', lambda x: 1.0 / (x+1), lambda x: 1.0 / (x+1), (0.0, 1.0)),
('Sqrt', lambda x: mp.sqrt(x+1), lambda x: mp.sqrt(x+1), (0.0, 1.0)),
('RSqrt', lambda x: 1.0 / mp.sqrt(x+1), lambda x: 1.0 / mp.sqrt(x+1), (0.0, 1.0)),
('Atan', lambda x: mp.atan(x), lambda x: mp.atan(x), (0.0, 1.0)),
('Sin', sinSqr, sinSqr, (epsilon, 1.0)),
]
def generateCode():
with open(f'fitted.txt', 'w', newline='\n') as file:
file.write('\t// AUTO-GENERATED POLYNOMIAL APPROXIMATIONS\n')
file.write('\n')
file.write('\tpublic static class Util\n')
file.write('\t{\n')
for (name, func, weightFunc, domain) in FUNCS:
print()
print(f'{name}():')
orderRange = range(3, 10) if name != 'Sin' else range(1, 5)
# orderRange = range(3, 6) if name != 'sin' else range(1, 4)
for order in orderRange:
remez = remezFit(name, func, weightFunc, domain, order)
print(f' {name}<{order}>(): {-math.log(remez.maxError, 2.0):.2f} bits')
writeCoefficients(file, name, remez.maxError, order, [remez])
for numSegments in [4, 8, 16, 32]:
orders = [2, 3, 4, 5] if name != 'Sin' else [1, 2, 3]
# orders = [3, 4] if name != 'sin' else [1, 2, 3]
for order in orders:
(maxError, segments) = remezFitSegmented(name, func, weightFunc, domain, numSegments, order)
print(f' {name}<{order}>[{numSegments}](): {-math.log(maxError, 2.0):.2f} bits')
writeCoefficients(file, name, maxError, order, segments)
file.write('\t}\n')
def plotError():
print('Plotting error..')
#func = lambda x: 1.0 / (x + 1.0)
func = sinSqr
remez = remezFit('Sin', func, func, (epsilon, 1.0), 4)
# remez = remezFit('Rcp', func, func, (1.0, 2.0), 3)
print('err:', remez.maxError)
est = remezToPoly(remez)
err = lambda x: (func(x) - est(x)) / func(x)
mp.plot([err], [0.0, 1.0])
#mp.plot([func], [0.0, 1.0])
# Main
generateCode()
#plotError()
``` |
{
"source": "jlauman/data_engineering_project_03",
"score": 3
} |
#### File: jlauman/data_engineering_project_03/rs_etl.py
```python
import configparser, os, glob, csv, json, hashlib, time
import pandas as pd
import psycopg2
from pprint import pprint
from rs_sql_queries import staging_events_insert, staging_songs_insert
from rs_sql_queries import insert_table_queries
import boto3
from botocore import UNSIGNED
from botocore.config import Config
DEND_BUCKET='udacity-dend'
# global lookup table
NAME_TO_GENDER = {}
def load_gender_lookup():
"""Load lookup dictionary to find gender given a name.
"""
base_path = os.getcwd() + '/data/names'
for root, dirs, files in os.walk(base_path):
file_paths = glob.glob(os.path.join(root,'*.txt'))
for file_path in file_paths:
print('names: %s' % file_path)
with open(file_path) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
for row in csv_reader:
# pprint(row)
NAME_TO_GENDER[row[0]] = row[1]
# pprint(NAME_TO_GENDER)
True
def get_object_paths(s3, bucket, prefix):
"""List objects in S3 bucket with given prefix.
Uses paginator to ensure a complete list of object paths is returned.
"""
# r1 = s3.list_objects(Bucket=DEND_BUCKET, Prefix=prefix)
# r2 = list(map(lambda obj: obj['Key'], r1['Contents']))
# r3 = list(filter(lambda str: str.endswith('.json'), r2))
# s3 client does not need to be closed
object_paths = []
paginator = s3.get_paginator('list_objects')
pages = paginator.paginate(Bucket=bucket, Prefix=prefix)
for page in pages:
# print("len(page['Contents'])=" + str(len(page['Contents'])))
r1 = list(map(lambda obj: obj['Key'], page['Contents']))
r2 = list(filter(lambda str: str.endswith('.json'), r1))
object_paths.extend(r2)
print('%s/%s total object paths = %d' % (bucket, prefix, len(object_paths)))
time.sleep(2)
return object_paths
def load_staging_log_data(cur, conn):
"""Load song-play event records into s_songplay_event table.
"""
# import pdb; pdb.set_trace()
# load log_data (events) into s_event table
s3 = boto3.client('s3', config=Config(signature_version=UNSIGNED))
file_paths = get_object_paths(s3, DEND_BUCKET, 'log_data')
pprint(file_paths)
for file_path in file_paths:
sql = str(staging_events_insert)
print('log_data: %s' % file_path)
obj1 = s3.get_object(Bucket='udacity-dend', Key=file_path)
str1 = obj1['Body'].read().decode('utf-8').strip()
df = pd.read_json(str1, lines=True)
df = df[df.page == 'NextSong']
df['timestamp'] = pd.to_datetime(df['ts'], unit='ms')
df['year'] = df['timestamp'].dt.year
df['week'] = df['timestamp'].dt.weekofyear
df['month'] = df['timestamp'].dt.month
df['day'] = df['timestamp'].dt.day
df['hour'] = df['timestamp'].dt.hour
df['weekday'] = df['timestamp'].dt.weekday
# pprint(df)
for index, row in df.iterrows():
# create a sha256 hash for event's unique id
event_id = hashlib.sha256((str(row.userId) + ' ' + str(row.sessionId) + ' ' + row.timestamp.strftime('%Y%m%d%H%M') + ' ' + row.song).encode('utf-8')).hexdigest()
str1 = ("(" +
"'" + event_id + "', " +
"'" + row.artist.replace("'", "''") + "', " +
"'" + row.auth + "', " +
"'" + row.firstName.replace("'", "''") + "', " +
"" + str(row.itemInSession) + ", " +
"'" + row.lastName.replace("'", "''") + "', " +
"'" + NAME_TO_GENDER[row.firstName] + "', " +
"" + str(row.length) + ", " +
"'" + row.level + "', " +
"'" + row.location.replace("'", "''") + "', " +
"'" + row.method + "', " +
"'" + row.page + "', " +
"'" + str(row.registration) + "', " +
"'" + str(row.sessionId) + "', " +
"'" + row.song.replace("'", "''") + "', " +
"'" + str(row.status) + "', " +
"'" + row.timestamp.strftime('%Y-%m-%d %H') + "', " +
"" + str(row.year) + ", " +
"" + str(row.week) + ", " +
"" + str(row.month) + ", " +
"" + str(row.day) + ", " +
"" + str(row.hour) + ", " +
"" + str(row.weekday) + ", " +
"'" + row.userAgent.replace("'", "''") + "', " +
"'" + str(row.userId) + "'" +
"),\n")
sql += str1
sql = ''.join(sql).strip()[:-1] + ';'
# print(sql)
# import pdb; pdb.set_trace()
cur.execute(sql)
conn.commit()
def load_staging_song_data(cur, conn):
"""Load song records into s_song staging table.
"""
sql = str(staging_songs_insert)
s3 = boto3.client('s3', config=Config(signature_version=UNSIGNED))
file_paths = get_object_paths(s3, DEND_BUCKET, 'song_data')
pprint(file_paths)
for file_path in file_paths:
print('song_data: %s' % file_path)
obj1 = s3.get_object(Bucket='udacity-dend', Key=file_path)
str1 = obj1['Body'].read().decode('utf-8').strip()
data = json.loads(str1)
if data['year'] == 0: data['year'] = None
# fix link string...
if str(data['artist_location']).startswith('<a'): data['artist_location'] = None
# pprint(data)
str2 = ("(" +
"'" + data['artist_id'] + "', " +
"" + (str(data['artist_latitude']) if not data['artist_latitude'] == None else 'null') + ", " +
"'" + str(data['artist_location']).replace("'", "''") + "', " +
"" + (str(data['artist_longitude']) if not data['artist_longitude'] == None else 'null') + ", " +
"'" + str(data['artist_name']).replace("'", "''") + "', " +
"" + str(data['duration']) + ", " +
"" + str(data['num_songs']) + ", " +
"'" + data['song_id'] + "', " +
"'" + str(data['title']).replace("'", "''") + "', " +
"" + (str(data['year']) if not data['year'] == None else 'null') + "" +
"),\n")
sql += str2
# print(str2)
# batch inserts at 8k character threshold
if len(sql) > 8192:
print(' 8k insert...')
sql = ''.join(sql).strip()[:-1] + ';'
cur.execute(sql)
conn.commit()
sql = str(staging_songs_insert)
print('last insert...')
sql = ''.join(sql).strip()[:-1] + ';'
# print(sql)
# import pdb; pdb.set_trace()
cur.execute(sql)
conn.commit()
def load_staging_tables(cur, conn):
load_staging_song_data(cur, conn)
load_staging_log_data(cur, conn)
def insert_tables(cur, conn):
"""Populate staging, dimension and fact tables.
The fact table must be the last item in the query list.
"""
for query in insert_table_queries:
if query.strip() != "":
pprint(query)
cur.execute(query)
conn.commit()
def main():
"""Run Redshift ETL for staging, dimension and fact tables.
"""
config = configparser.ConfigParser()
config.read('rs_dwh.cfg')
conn = psycopg2.connect("host={} dbname={} user={} password={} port={}".format(*config['CLUSTER'].values()))
cur = conn.cursor()
load_gender_lookup()
load_staging_tables(cur, conn)
insert_tables(cur, conn)
conn.close()
if __name__ == "__main__":
main()
``` |
{
"source": "jlaumonier/mlsurvey",
"score": 3
} |
#### File: mlsurvey/mlsurvey/fairness_utils.py
```python
import operator
from functools import reduce
from operator import eq
class FairnessUtils:
@classmethod
def calculate_cond_probability(cls, data, ofs, givens):
""" calculate conditional probability of 'ofs' given 'givens' """
givens_conditions = []
ofs_conditions = []
for of in ofs:
ofs_conditions.append(eq(data.df[of[0]], of[1]))
for given in givens:
givens_conditions.append(eq(data.df[given[0]], given[1]))
ofs_conditions.append(eq(data.df[given[0]], given[1]))
ofs_logical_conditions = reduce(operator.and_, ofs_conditions)
givens_logical_conditions = reduce(operator.and_, givens_conditions)
set_ofs_inter_givens = data.df.loc[ofs_logical_conditions]
card_ofs_inter_givens = len(set_ofs_inter_givens.index)
set_givens = data.df.loc[givens_logical_conditions]
card_givens = len(set_givens.index)
result = card_ofs_inter_givens / card_givens
return result
```
#### File: mlsurvey/mlsurvey/fileoperation.py
```python
import json
import os
import pandas as pd
import mlsurvey as mls
class FileOperation:
@classmethod
def save_dict_as_json(cls, filename, directory, data):
"""
Save a dictionary into a json file. Transform the dictionary into mlsurvey json format (for lists).
Create the directory of not exists
:param filename: name of the file
:param directory: directory to save the file
:param data: data to save into the file
"""
os.makedirs(directory, exist_ok=True)
full_path = os.path.join(directory, filename)
with open(full_path, 'w') as outfile:
json.dump(mls.Utils.transform_to_json(data), outfile)
outfile.close()
@classmethod
def load_json_as_dict(cls, filename, directory, tuple_to_string=False):
"""
Load a json file (mlsurvey json format) into dictionary
:param filename: name of the file
:param directory: directory to load the file
:param tuple_to_string: if True the tuple identified with "__type__": "__tuple__" are store as string in the
dictionary. If False, the tuple is converted to a tuple type
:return: the dictionary
"""
full_path = os.path.join(directory, filename)
with open(full_path, 'r') as infile:
data = mls.Utils.transform_to_dict(json.load(infile), tuple_to_string)
return data
@classmethod
def save_hdf(cls, filename, directory, data, params=None):
"""
Save a dataframe into a hdf file.
Create the directory of not exists
:param filename: name of the file
:param directory: directory to save the file
:param data: data to save into the file
:param params: parameters passed to the to_json() method
"""
if params is None:
params = {}
os.makedirs(directory, exist_ok=True)
full_path = os.path.join(directory, filename)
data.to_hdf(full_path, 'key', mode='w', **params)
@classmethod
def read_hdf(cls, filename, directory, df_format):
"""
Load a dataframe from a hdf file.
:param filename: name of the file
:param directory: directory to load the file
:param df_format: 'Pandas'
"""
full_path = os.path.join(directory, filename)
data = None
if df_format == 'Pandas':
data = pd.read_hdf(full_path, 'key')
return data
@classmethod
def save_json(cls, filename, directory, data, params=None):
"""
Save a dataframe into a json file.
Create the directory of not exists
:param filename: name of the file
:param directory: directory to save the file
:param data: data to save into the file
:param params: parameters passed to the to_json() method
"""
if params is None:
params = {}
os.makedirs(directory, exist_ok=True)
full_path = os.path.join(directory, filename)
with open(full_path, 'w', encoding='utf-8') as file:
data.to_json(file, force_ascii=False, **params)
@classmethod
def read_json(cls, filename, directory, df_format):
"""
Load a dataframe from a json file.
:param filename: name of the file
:param directory: directory to load the file
:param df_format: 'Pandas'
"""
full_path = os.path.join(directory, filename)
data = None
if df_format == 'Pandas':
data = pd.read_json(full_path)
return data
@classmethod
def save_plotly_figure(cls, filename, directory, figure):
"""
Save a plotly figure into an image file.
Create the directory of not exists
:param filename: name of the file
:param directory: directory to save the file
:param figure: data to save into the file
"""
os.makedirs(directory, exist_ok=True)
full_path = os.path.join(directory, filename)
figure.write_image(full_path)
@classmethod
def load_input(cls, filename, directory):
"""
load inputs from json file. The file may contains multiple input {"input1": input1, "input2": input2"}
:param filename: the name of the file
:param directory directory of the files to load
:return dictionary containing each inputs as mlsurvey.sl.models.Data format
"""
data = cls.load_json_as_dict(filename=filename, directory=directory)
result = {}
for k, v in data.items():
df = mls.FileOperation.read_hdf(v['data_path'], directory, v['df_format'])
i = mls.sl.models.DataFactory.create_data_from_dict(v['df_format'], v['metadata'], df)
result[k] = i
return result
```
#### File: sl/models/algorithm.py
```python
import mlsurvey as mls
class Algorithm:
def __init__(self, config, base_directory=''):
"""
Initialize the algorithm class using the config
:param config dictionary containing keys 'type' and 'hyperparamters'.
The config 'type' is the name of the class defining the algorithm (e.g. sklearn.svm.SVC)
the config 'hyperparamters' is a dictionary used to initialize the class
:param base_directory if needed by the algorithm to load file (submodels...), '' by default
Raise a mlsurvey.exceptions.ConfigError if keys are not found in config
"""
self.algorithm_family = None
self.hyperparameters = None
self.base_directory = base_directory
try:
self.algorithm_family = config['type']
self.hyperparameters = config['hyperparameters']
except KeyError as e:
raise mls.exceptions.ConfigError(e)
def learn(self, x, y):
"""learn a classifier from input x and y"""
try:
classifier_class = mls.Utils.import_from_dotted_path(self.algorithm_family)
except AttributeError as e:
raise mls.exceptions.ConfigError(e)
classifier = classifier_class(**self.hyperparameters)
classifier.fit(x, y)
return classifier
def to_dict(self):
"""
transform an algorithm to a dictionary {'type': ..., 'hyperparameters': ...}
:return: a dictionary
"""
result = {'type': self.algorithm_family, 'hyperparameters': self.hyperparameters}
return result
@staticmethod
def from_dict(d):
"""
Create an Algorithm from a dictionary
:param d: the dictionary
:return: an instance of Algorithm
"""
return Algorithm(d)
```
#### File: sl/models/datapandas.py
```python
import numpy as np
import pandas as pd
import mlsurvey as mls
from .data import Data
from .data_factory import DataFactory
class DataPandas(Data):
@property
def x(self):
return self._inner_data.iloc[:, 0:self.max_x_column].to_numpy()
@property
def x_df(self):
return self._inner_data.iloc[:, 0:self.max_x_column]
@property
def y(self):
return self._inner_data[self.y_col_name].to_numpy()
@property
def y_pred(self):
return self._inner_data[self.y_pred_col_name].to_numpy()
@staticmethod
def from_dict(dictionary, df):
""" see mls.models.Data.from_dict() """
try:
result = DataPandas(df,
dictionary['df_contains'],
dictionary['y_col_name'],
dictionary['y_pred_col_name']
)
except KeyError as e:
raise mls.exceptions.ModelError(e)
return result
def copy_with_new_data(self, data_array):
""" see mls.models.Data.copy_with_new_data() """
data_array = np.concatenate((data_array[0], np.array([data_array[1]]).T), axis=1)
df = pd.DataFrame(data_array, columns=self.df.columns)
data = mls.sl.models.DataPandas(df,
df_contains=self.df_contains,
y_col_name=self.y_col_name)
return data
def copy_with_new_data_dataframe(self, dataframe):
""" see mls.models.Data.copy_with_new_data_dataframe() """
data = mls.sl.models.DataPandas(dataframe,
df_contains=self.df_contains,
y_col_name=self.y_col_name)
return data
class Factory:
@staticmethod
def create(df, df_contains='xy', y_col_name=None, y_pred_col_name=None):
return DataPandas(df, df_contains, y_col_name, y_pred_col_name)
@staticmethod
def from_dict(d, df):
return DataPandas.from_dict(d, df)
DataFactory.add_factory('Pandas', DataPandas.Factory)
```
#### File: sl/models/evaluation_fairness.py
```python
from .evaluation import Evaluation
from .evaluation_factory import EvaluationFactory
class EvaluationFairness(Evaluation):
def __init__(self):
super().__init__()
self.demographic_parity = 0
self.equal_opportunity = None
self.statistical_parity = None
self.average_equalized_odds = None
self.disparate_impact_rate = None
def to_dict(self):
eval_dict = super().to_dict()
fairness_dict = {'demographic_parity': self.demographic_parity,
'equal_opportunity': self.equal_opportunity,
'statistical_parity': self.statistical_parity,
'average_equalized_odds': self.average_equalized_odds,
'disparate_impact_rate': self.disparate_impact_rate}
result = {**eval_dict, **fairness_dict}
return result
def from_dict(self, d_src):
self.demographic_parity = d_src['demographic_parity']
self.equal_opportunity = d_src['equal_opportunity']
self.statistical_parity = d_src['statistical_parity']
self.average_equalized_odds = d_src['average_equalized_odds']
self.disparate_impact_rate = d_src['disparate_impact_rate']
class Factory:
@staticmethod
def create(): return EvaluationFairness()
EvaluationFactory.add_factory('EvaluationFairness', EvaluationFairness.Factory)
```
#### File: sl/workflows/multiple_learning_workflow.py
```python
from kedro.io import DataCatalog, MemoryDataSet
from kedro.pipeline import Pipeline
from kedro.runner import SequentialRunner
import mlsurvey as mls
from mlsurvey.workflows.learning_workflow import LearningWorkflow
class MultipleLearningWorkflow(LearningWorkflow):
def run(self):
"""
Run the workflow : run each config
"""
# data
data_catalog = DataCatalog({'config': MemoryDataSet(),
'log': MemoryDataSet(),
'base_directory': MemoryDataSet()})
data_catalog.save('config', self.config)
data_catalog.save('log', self.log)
data_catalog.save('base_directory', self.base_directory)
expand_config_node = mls.sl.workflows.tasks.ExpandConfigTask.get_node()
multiple_learning_node = mls.sl.workflows.tasks.MultipleLearningTask.get_node()
# Assemble nodes into a pipeline
pipeline = Pipeline([expand_config_node, multiple_learning_node])
# Create a runner to run the pipeline
runner = SequentialRunner()
# Run the pipeline
result = runner.run(pipeline, data_catalog)
if len(result) == 0:
self.terminate()
```
#### File: workflows/tasks/learn_task.py
```python
import os
from kedro.pipeline import node
import mlsurvey as mls
from mlsurvey.workflows.tasks import BaseTask
class LearnTask(BaseTask):
"""
learn model from train data
"""
@classmethod
def _log_inputs_outputs(cls, log, d):
log.set_sub_dir(str(cls.__name__))
model_fullpathname = os.path.join(log.directory, 'model.joblib')
# Log model metadata
log.save_dict_as_json('model.json', d['model_metadata'])
log.save_dict_as_json('algorithm.json', d['algorithm'].to_dict())
log.save_classifier(d['model'], filename='model.joblib')
log.set_sub_dir('')
return model_fullpathname
@staticmethod
def learn(config, log, train_data):
algorithm_params = config.data['learning_process']['parameters']['algorithm']
algorithm = mls.sl.models.Algorithm(algorithm_params)
classifier = algorithm.learn(train_data.x, train_data.y)
# Logging
metadata = {'type': config.data['learning_process']['parameters']['algorithm']['type']}
d = {'model_metadata': metadata,
'algorithm': algorithm,
'model': classifier}
model_fullpathname = LearnTask._log_inputs_outputs(log, d)
return [model_fullpathname]
@classmethod
def get_node(cls):
return node(LearnTask.learn, inputs=['config', 'log', 'train_data'], outputs=['model_fullpath'])
```
#### File: workflows/tasks/split_data.py
```python
from kedro.pipeline import node
from mlsurvey.workflows.tasks import BaseTask
class SplitDataTask(BaseTask):
"""
split data from prepared data (train/test)
"""
@classmethod
def get_node(cls):
return node(SplitDataTask.split_data,
inputs=['config', 'log', 'raw_data', 'prepared_data'],
outputs=['train_data', 'test_data', 'train_raw_data', 'test_raw_data'])
@staticmethod
def split_data(config, log, raw_data, prepared_data):
"""
split the data for training/testing process.
At the moment, only the split 'traintest' to split into train and test set is supported
"""
split_params = config.data['learning_process']['parameters']['split']
if split_params['type'] == 'traintest':
# TODO test shuffle False
if split_params['parameters']['shuffle']:
df_test = prepared_data.df.sample(frac=split_params['parameters']['test_size'] / len(prepared_data.df),
random_state=split_params['parameters']['random_state'])
else:
df_test = prepared_data.df.head(len(prepared_data.df) * split_params['parameters']['test_size'])
df_train = prepared_data.df.drop(df_test.index)
data_train = prepared_data.copy_with_new_data_dataframe(df_train)
data_test = prepared_data.copy_with_new_data_dataframe(df_test)
raw_data_train_df = raw_data.df.iloc[data_train.df.index]
raw_data_train = raw_data.copy_with_new_data_dataframe(raw_data_train_df)
raw_data_test_df = raw_data.df.iloc[data_test.df.index]
raw_data_test = raw_data.copy_with_new_data_dataframe(raw_data_test_df)
# reindex
data_train.df.reset_index(drop=True, inplace=True)
data_test.df.reset_index(drop=True, inplace=True)
raw_data_train.df.reset_index(drop=True, inplace=True)
raw_data_test.df.reset_index(drop=True, inplace=True)
data_to_save = {'train': data_train,
'test': data_test,
'raw_train': raw_data_train,
'raw_test': raw_data_test}
SplitDataTask.log_inputs_outputs(log, data_to_save)
return [data_train, data_test, raw_data_train, raw_data_test]
@classmethod
def log_inputs_outputs(cls, log, d):
# Log inside sub directory
log.set_sub_dir(str(cls.__name__))
inputs = {'train': d['train'],
'test': d['test'],
'raw_train': d['raw_train'],
'raw_test': d['raw_test']}
log.save_input(inputs, metadata_filename='split_data.json')
log.set_sub_dir('')
```
#### File: mlsurvey/mlsurvey/utils.py
```python
import ast
import hashlib
import importlib
import numpy as np
import pandas as pd
class Utils:
"""
Utils functions
"""
@classmethod
def md5_file(cls, filename):
"""
Calculate the md5 of a file
thanks <NAME> https://www.pythoncentral.io/hashing-files-with-python/
Raise FileNotFoundError if the file does not exist
"""
blocksize = 65536
hasher = hashlib.md5()
with open(filename, 'rb') as afile:
buf = afile.read(blocksize)
while len(buf) > 0:
hasher.update(buf)
buf = afile.read(blocksize)
return hasher.hexdigest()
@classmethod
def import_from_dotted_path(cls, dotted_names):
""" import_from_dotted_path('foo.bar') -> from foo import bar; return bar
"""
module_name, class_name = dotted_names.rsplit('.', 1)
module = importlib.import_module(module_name)
handler_class = getattr(module, class_name)
return handler_class
@classmethod
def make_meshgrid(cls, x, y, h=.02):
"""
Create a mesh of points to plot in
(src, thanks : https://scikit-learn.org/stable/auto_examples/svm/plot_iris.html)
:param x: data to base x-axis meshgrid on (type numpy.ndarray)
:param y: data to base y-axis meshgrid on (type numpy.ndarray)
:param h: stepsize for meshgrid, optional
:return: xx, yy : ndarray
"""
x_min, x_max = x.min() - 1, x.max() + 1
y_min, y_max = y.min() - 1, y.max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
return xx, yy
@classmethod
def transform_to_dict(cls, dictionary: dict, tuple_to_string=False):
"""
Transform a dictionary containing dictionary such as
{ "__type__": "__tuple__", "__value__": "(1, 2, 3)"}
to dictionary containing the real type (tuple)
:param dictionary: dictionary containing __tuple__ values
:param tuple_to_string: if True the tuple identified with "__type__": "__tuple__" are store as string in the
dictionary. If False, the tuple is converted to a tuple type
:return dictionary containing the real type
"""
def change_one_dict_element(value):
if '__type__' in value:
if value['__type__'] == '__tuple__':
if tuple_to_string:
result_element = value['__value__']
else:
result_element = ast.literal_eval(value['__value__'])
if not isinstance(result_element, tuple):
raise TypeError(v['__value__'] + " is not a tuple")
else:
result_element = Utils.transform_to_dict(value, tuple_to_string)
else:
result_element = Utils.transform_to_dict(value, tuple_to_string)
return result_element
result = dictionary.copy()
for k, v in result.items():
if isinstance(v, dict):
result[k] = change_one_dict_element(v)
if isinstance(v, list):
result[k] = []
for e in v:
if isinstance(e, dict):
result[k].append(change_one_dict_element(e))
else:
result[k].append(e)
return result
@classmethod
def transform_to_json(cls, dictionary):
"""
Transform a dictionary containing tuple to dictionary
such as { "__type__": "__tuple__", "__value__": "(1, 2, 3)"}
:param dictionary: dictionary containing tuple
:return dictionary containing __tuple__ values
"""
def change_one_dict_element(value):
result_element = {'__type__': '__tuple__', '__value__': value.__str__()}
return result_element
result = dictionary.copy()
for k, v in result.items():
if isinstance(v, tuple):
result[k] = change_one_dict_element(v)
if isinstance(v, dict):
result[k] = Utils.transform_to_json(v)
if isinstance(v, list):
result[k] = []
for e in v:
if isinstance(e, tuple):
result[k].append(change_one_dict_element(e))
else:
if isinstance(e, dict):
result[k].append(Utils.transform_to_json(e))
else:
result[k].append(e)
return result
@classmethod
def check_dict_python_ready(cls, dictionary):
"""
Check if a dictionary (and nested) does not contains a __type__ key,
which means is not ready to be handle by python
:param dictionary: the dictionary to check
:return: False if the dictionary contains one __type__ key, True otherwise
"""
result = True
for k, v in dictionary.items():
if not isinstance(v, list):
v = [v]
for e in v:
if isinstance(e, dict):
if '__type__' in e:
result = False
else:
result = result & Utils.check_dict_python_ready(e)
return result
@classmethod
def flatten_dict(cls, dictionary, separator='_', prefix=''):
"""SRC : https://www.geeksforgeeks.org/python-convert-nested-dictionary-into-flattened-dictionary/"""
result = {prefix + separator + k if prefix else k: v
for kk, vv in dictionary.items()
for k, v in Utils.flatten_dict(vv, separator, kk).items()
} if isinstance(dictionary, dict) else {prefix: dictionary}
return result
@classmethod
def func_create_dataframe(cls, storage):
""" return the function that create a DataFrame from an array"""
if storage == 'Pandas':
return pd.DataFrame
@classmethod
def is_dataframe_empty(cls, df):
result = True
if isinstance(df, pd.DataFrame):
result = df.empty
return result
@classmethod
def str2bool(cls, v: str):
return v.lower() in ("yes", "true", "t", "1")
```
#### File: mlsurvey/visualize/visualize_log_detail.py
```python
import dash_dangerously_set_inner_html as ddsih
import dash_html_components as html
import json2table
import mlsurvey as mls
class VisualizeLogDetail:
""" Generation of the visualization for a log experiment """
def __init__(self, directory):
"""
initialize the workflow reading the files from directory
:param directory: the directory where the results are stored
"""
super().__init__()
self.source_directory = directory
self.config = None
self.configText = None
self.log = mls.Logging(self.source_directory, base_dir='')
def task_load_data(self):
"""
Load config from directory
"""
self.config = mls.Config('config.json', self.source_directory)
def task_display_data(self):
"""
Display with dash.
"""
self.config.compact()
# This line makes a cannot find reference warning and i do not know why and how i can fix it
self.configText = html.Div([ddsih.DangerouslySetInnerHTML(json2table.convert(self.config.data))])
def run(self):
self.task_load_data()
self.task_display_data()
def get_result(self, parameters):
div_children = [html.Div(self.configText,
className='six columns',
style={'display': parameters['display_config']})]
result = html.Div(children=div_children,
className='one_result')
return [result]
```
#### File: mlsurvey/test/test_fairness_utils.py
```python
import unittest
import numpy as np
import pandas as pd
import mlsurvey as mls
class TestFairnessUtils(unittest.TestCase):
def test_calculate_cond_probability_calculated_pandas(self):
x = np.array([[1, 2], [3, 4], [3, 2], [3, 3], [1, 3]])
y = np.array([0, 1, 0, 0, 1])
data_array = np.concatenate((x, np.array([y]).T), axis=1)
df = pd.DataFrame(data=data_array)
data = mls.sl.models.DataFactory.create_data('Pandas', df)
proba1 = mls.FairnessUtils.calculate_cond_probability(data, [('target', 0)], [('C0', 1)])
expected_proba1 = 0.5
proba2 = mls.FairnessUtils.calculate_cond_probability(data, [('target', 1)], [('C0', 1)])
expected_proba2 = 0.5
proba3 = mls.FairnessUtils.calculate_cond_probability(data, [('target', 1)], [('C0', 3)])
expected_proba3 = 0.3333333333333333
self.assertEqual(proba1, expected_proba1)
self.assertEqual(proba2, expected_proba2)
self.assertEqual(proba3, expected_proba3)
def test_calculate_cond_probability_calculated_two_givens(self):
x = np.array([[1, 2], [3, 4], [3, 2], [3, 3], [1, 3], [1, 2]])
y = np.array([0, 1, 0, 0, 1, 1])
data_array = np.concatenate((x, np.array([y]).T), axis=1)
df = pd.DataFrame(data=data_array)
data = mls.sl.models.DataFactory.create_data('Pandas', df)
proba1 = mls.FairnessUtils.calculate_cond_probability(data, [('target', 0)], [('C0', 1), ('C1', 2)])
expected_proba1 = 0.5
self.assertEqual(proba1, expected_proba1)
def test_calculate_cond_probability_calculated_two_ofs(self):
x = np.array([[1, 2], [3, 4], [3, 2], [3, 3], [1, 3], [1, 2]])
y = np.array([0, 1, 0, 0, 1, 1])
data_array = np.concatenate((x, np.array([y]).T), axis=1)
df = pd.DataFrame(data=data_array)
data = mls.sl.models.DataFactory.create_data('Pandas', df)
proba1 = mls.FairnessUtils.calculate_cond_probability(data, [('target', 0), ('C0', 1)], [('C1', 2)])
expected_proba1 = 0.3333333333333333
self.assertEqual(proba1, expected_proba1)
```
#### File: test/test_import/test_explicit_import.py
```python
import unittest
import mlsurvey.config as config
class TestExplicitImport(unittest.TestCase):
def test_import(self):
c = {'c': 'test'}
var = config.Config(config=c)
self.assertIsInstance(var, config.Config)
```
#### File: test_rl/test_common/test_agent.py
```python
import unittest
import mlsurvey as mls
class TestAgent(unittest.TestCase):
def test_init(self):
"""
:test : mls.rl.common.Agent()
:condition : -
:main_result : Agent is initialized
"""
expected_name = 'AgentName1'
env = mls.rl.common.Environment()
ag = mls.rl.common.Agent(environment=env, name=expected_name)
self.assertIsInstance(ag, mls.rl.common.BaseObject)
self.assertIsInstance(ag, mls.rl.common.Object)
self.assertIsInstance(ag, mls.rl.common.Agent)
self.assertEqual(ag.name, expected_name)
self.assertIsNone(ag.action)
self.assertIsNone(ag.observation)
def test_choose_action(self):
"""
:test : mls.rl.common.Agent.setObservation()
:condition: observation is set
:main_return : the action of the action is set
"""
env = mls.rl.common.Environment()
observation = mls.rl.common.State(environment=env, name='state')
expected_action_type = mls.rl.common.Action.ACTION_TYPE_1
env = mls.rl.common.Environment()
ag = mls.rl.common.Agent(environment=env, name='AgentName1')
ag.observation = observation
ag.choose_action()
self.assertEqual(ag.action.type, expected_action_type)
```
#### File: test_rl/test_common/test_object_state.py
```python
from unittest import TestCase
import mlsurvey as mls
class TestObjectState(TestCase):
def test_init(self):
"""
:test : mls.rl.common.ObjectState()
:condition : -
:main_result : ObjectState is initialized
"""
expected_name = 'state'
env = mls.rl.common.Environment()
object_state = mls.rl.common.ObjectState(environment=env, name='false_name')
self.assertIsInstance(object_state, mls.rl.common.BaseObject)
self.assertIsInstance(object_state, mls.rl.common.ObjectState)
self.assertEqual(env, object_state.environment)
self.assertEqual(expected_name, object_state.name)
self.assertEqual(1, len(object_state.characteristics))
self.assertEqual('Step0', list(object_state.characteristics.keys())[0])
charac = object_state.characteristics['Step0']
self.assertEqual(0, charac.value)
self.assertEqual('state.Step0', charac.get_fullname())
self.assertIn(charac.get_fullname(), env.objects)
```
#### File: test_sl/test_model/test_data.py
```python
import unittest
import numpy as np
import pandas as pd
import mlsurvey as mls
class TestData(unittest.TestCase):
def test_to_dict_dict_should_be_set(self):
"""
:test : mlsurvey.model.Data.to_dict()
:condition : x,y, y_pred data are filled.
:main_result : the dictionary generated is the same as expected
"""
x = np.array([[1, 2, 3], [4, 5, 6]])
y = np.array([0, 1])
y_pred = np.array([1, 0])
data_array = np.concatenate((x, np.array([y]).T, np.array([y_pred]).T), axis=1)
df = pd.DataFrame(data=data_array)
data = mls.sl.models.DataPandas(df, df_contains='xyypred')
expected = {'df_contains': 'xyypred',
'y_col_name': 'target',
'y_pred_col_name': 'target_pred'}
result = data.to_dict()
self.assertDictEqual(expected, result)
def test_from_dict_df_empty(self):
"""
:test : mlsurvey.model.DataPandas.from_dict()
:condition : the input dict is set and an empty dataframe is given.
:main_result : a ModelError occurs
"""
df = pd.DataFrame(data=np.array([]))
d = None
input_dict = {'df_contains': 'xyypred',
'y_col_name': 'target',
'y_pred_col_name': 'target_pred'}
try:
d = mls.sl.models.DataPandas.from_dict(input_dict, df)
self.assertTrue(False)
except mls.exceptions.ModelError:
self.assertIsNone(d)
self.assertTrue(True)
def test_from_dict_dict_empty(self):
"""
:test : mlsurvey.model.Data.from_dict()
:condition : the input dict does not contains all keys and an full dataframe is given
:main_result : a ModelError occurs
"""
x = np.array([[1, 2], [3, 4]])
y = np.array([0, 1])
y_pred = np.array([1, 0])
data_array = np.concatenate((x, np.array([y]).T, np.array([y_pred]).T), axis=1)
df = pd.DataFrame(data=data_array)
data = None
input_dict = {'df_contains': 'xyypred',
'y_pred_col_name': 'target_pred'}
try:
data = mls.sl.models.DataPandas.from_dict(input_dict, df)
self.assertTrue(False)
except mls.exceptions.ModelError:
self.assertIsNone(data)
self.assertTrue(True)
```
#### File: test_sl/test_model/test_evaluation_factory.py
```python
import unittest
import numpy as np
import mlsurvey as mls
class TestEvaluationFactory(unittest.TestCase):
fac = {}
@classmethod
def setUpClass(cls):
cls.fac = dict(mls.sl.models.EvaluationFactory.factories)
@classmethod
def tearDownClass(cls):
mls.sl.models.EvaluationFactory.factories = dict(cls.fac)
def setUp(self):
mls.sl.models.EvaluationFactory().factories.clear()
def test_init_evaluation_factory_should_be_initialized(self):
evaluation_factory = mls.sl.models.EvaluationFactory()
self.assertIsNotNone(evaluation_factory)
self.assertDictEqual({}, evaluation_factory.factories)
def test_add_factory_should_be_added(self):
mls.sl.models.EvaluationFactory.add_factory('EvaluationSupervised',
mls.sl.models.EvaluationFactory)
self.assertEqual(1, len(mls.sl.models.EvaluationFactory.factories))
def test_create_evaluation_should_be_generated(self):
evaluation_factory = mls.sl.models.EvaluationFactory()
mls.sl.models.EvaluationFactory.add_factory('EvaluationSupervised',
mls.sl.models.EvaluationSupervised.Factory)
evaluation = evaluation_factory.create_instance('EvaluationSupervised')
self.assertIsInstance(evaluation, mls.sl.models.EvaluationSupervised)
self.assertEqual(0.0, evaluation.score)
def test_create_evaluation_from_dict_created(self):
expected_cm = np.array([[1, 2], [3, 4]])
source = {'type': 'EvaluationSupervised', 'score': 0.55, 'precision': 0.7, 'recall': 0.65,
'accuracy': 0.6, 'f1': 0.5, 'confusion_matrix': expected_cm.tolist(),
'per_label': {}}
evaluation_factory = mls.sl.models.EvaluationFactory()
mls.sl.models.EvaluationFactory.add_factory('EvaluationSupervised',
mls.sl.models.EvaluationSupervised.Factory)
evaluation = evaluation_factory.create_instance_from_dict(source)
self.assertIsInstance(evaluation, mls.sl.models.EvaluationSupervised)
self.assertEqual(0.55, evaluation.score)
np.testing.assert_array_equal(expected_cm, evaluation.confusion_matrix)
```
#### File: test_workflows/test_tasks/test_learn_task.py
```python
import os
import shutil
from kedro.io import DataCatalog, MemoryDataSet
from kedro.pipeline import Pipeline
from kedro.runner import SequentialRunner
from kedro.pipeline.node import Node
from sklearn import neighbors
import mlflow
import mlsurvey as mls
class TestLearnTask(mls.testing.TaskTestCase):
config_directory = ''
base_directory = ''
mlflow_client = None
mlflow_experiments = None
@classmethod
def setUpClass(cls):
directory = os.path.dirname(__file__)
cls.base_directory = os.path.join(directory, '../../../')
cls.config_directory = os.path.join(cls.base_directory, 'config/')
cls.mlflow_client = mlflow.tracking.MlflowClient()
cls.mlflow_experiments = cls.mlflow_client.list_experiments()
@classmethod
def tearDownClass(cls):
log = mls.Logging()
shutil.rmtree(os.path.join(cls.base_directory, log.base_dir), ignore_errors=True)
def test_get_node(self):
"""
:test : mlsurvey.sl.workflows.tasks.LearnTask.get_node()
:condition : -
:main_result : create a kedro with input and output parameter
"""
prepare_data_node = mls.sl.workflows.tasks.LearnTask.get_node()
self.assertIsInstance(prepare_data_node, Node)
def test_learn(self):
"""
:test : mlsurvey.sl.workflows.tasks.LearnTask.run()
:condition : data file are split, saved in hdf database and logged
:main_result : model is trained
"""
config, log = self._init_config_log('complete_config_loaded.json',
self.base_directory,
self.config_directory)
df_train_data = mls.FileOperation.read_hdf('train-content.h5',
os.path.join(self.base_directory, 'files/tasks/split_data'),
'Pandas')
train_data = mls.sl.models.DataFactory.create_data('Pandas', df_train_data)
[model_fullpath] = mls.sl.workflows.tasks.LearnTask.learn(config, log, train_data)
self.assertTrue(os.path.isfile(model_fullpath))
log.set_sub_dir(str(mls.sl.workflows.tasks.LearnTask.__name__))
classifier = log.load_classifier()
self.assertIsInstance(classifier, neighbors.KNeighborsClassifier)
self.assertEqual(30, classifier.get_params()['leaf_size'])
def _run_one_task(self, config_filename):
# create node from Task
load_data_node = mls.workflows.tasks.LoadDataTask.get_node()
prepare_data_node = mls.sl.workflows.tasks.PrepareDataTask.get_node()
split_data_node = mls.sl.workflows.tasks.SplitDataTask.get_node()
learn_data_node = mls.sl.workflows.tasks.LearnTask.get_node()
config, log = self._init_config_log(config_filename, self.base_directory, self.config_directory)
# Prepare a data catalog
data_catalog = DataCatalog({'config': MemoryDataSet(),
'log': MemoryDataSet(),
'base_directory': MemoryDataSet()})
data_catalog.save('config', config)
data_catalog.save('log', log)
data_catalog.save('base_directory', self.base_directory)
# Assemble nodes into a pipeline
pipeline = Pipeline([load_data_node, prepare_data_node, split_data_node, learn_data_node])
# Create a runner to run the pipeline
runner = SequentialRunner()
# Run the pipeline
runner.run(pipeline, data_catalog)
return log, config, data_catalog
def test_run(self):
"""
:test : mlsurvey.sl.workflows.tasks.LearnTask.run()
:condition : data file are split, saved in hdf database and logged
:main_result : model is trained
"""
config_filename = 'complete_config_loaded.json'
log, config, data_catalog = self._run_one_task(config_filename)
log.set_sub_dir(str(mls.sl.workflows.tasks.LearnTask.__name__))
self.assertTrue(os.path.isfile(os.path.join(log.directory, 'model.json')))
self.assertTrue(os.path.isfile(os.path.join(log.directory, 'model.joblib')))
self.assertTrue(os.path.isfile(os.path.join(log.directory, 'algorithm.json')))
```
#### File: test_workflows/test_tasks/test_prepare_data_task.py
```python
import os
import shutil
from kedro.pipeline.node import Node
from kedro.io import DataCatalog, MemoryDataSet
from kedro.pipeline import Pipeline
from kedro.runner import SequentialRunner
import mlflow
import mlsurvey as mls
class TestPrepareDataTask(mls.testing.TaskTestCase):
config_directory = ''
base_directory = ''
mlflow_client = None
mlflow_experiments = None
@classmethod
def setUpClass(cls):
directory = os.path.dirname(__file__)
cls.base_directory = os.path.join(directory, '../../../')
cls.config_directory = os.path.join(cls.base_directory, 'config/')
cls.mlflow_client = mlflow.tracking.MlflowClient()
cls.mlflow_experiments = cls.mlflow_client.list_experiments()
@classmethod
def tearDownClass(cls):
log = mls.Logging()
shutil.rmtree(os.path.join(cls.base_directory, log.base_dir), ignore_errors=True)
def test_get_node(self):
"""
:test : mlsurvey.workflows.tasks.PrepareDataTask.get_node()
:condition : -
:main_result : create a kedro with input and output parameter
"""
prepare_data_node = mls.sl.workflows.tasks.PrepareDataTask.get_node()
self.assertIsInstance(prepare_data_node, Node)
def test_prepare_data(self):
"""
:test : mlsurvey.workflows.tasks.PrepareDataTask.prepare_data()
:condition : -
:main_result : load the data
"""
config, log = self._init_config_log('complete_config_loaded.json',
self.base_directory,
self.config_directory)
df_raw_data = mls.FileOperation.read_hdf('raw_data-content.h5',
os.path.join(self.base_directory, 'files/tasks/load_data'),
'Pandas')
raw_data = mls.sl.models.DataFactory.create_data('Pandas', df_raw_data)
lx = len(raw_data.x)
ly = len(raw_data.y)
[prepared_data] = mls.sl.workflows.tasks.PrepareDataTask.prepare_data(config, log, raw_data)
self.assertEqual(-0.7655005998158294, prepared_data.x[0][0])
self.assertEqual(lx, len(prepared_data.x))
self.assertEqual(ly, len(prepared_data.y))
def _run_one_task(self, config_filename):
# create node from Task
load_data_node = mls.workflows.tasks.LoadDataTask.get_node()
prepare_data_node = mls.sl.workflows.tasks.PrepareDataTask.get_node()
config, log = self._init_config_log(config_filename, self.base_directory, self.config_directory)
# Prepare a data catalog
data_catalog = DataCatalog({'config': MemoryDataSet(),
'log': MemoryDataSet(),
'base_directory': MemoryDataSet()})
data_catalog.save('config', config)
data_catalog.save('log', log)
data_catalog.save('base_directory', self.base_directory)
# Assemble nodes into a pipeline
pipeline = Pipeline([load_data_node, prepare_data_node])
# Create a runner to run the pipeline
runner = SequentialRunner()
# Run the pipeline
runner.run(pipeline, data_catalog)
return log, config, data_catalog
def test_run(self):
"""
:test : mlsurvey.sl.workflows.tasks.PrepareDataTask.run()
:condition : data file are loaded, saved in hdf database and logged
:main_result : data are prepared.
"""
config_filename = 'complete_config_loaded.json'
log, config, data_catalog = self._run_one_task(config_filename)
log.set_sub_dir(str(mls.sl.workflows.tasks.PrepareDataTask.__name__))
self.assertTrue(os.path.isfile(os.path.join(log.directory, 'data-content.h5')))
df_data = mls.FileOperation.read_hdf('data-content.h5', os.path.join(log.directory), 'Pandas')
data = mls.sl.models.DataFactory.create_data('Pandas', df_data)
self.assertEqual(-0.7655005998158294, data.x[0][0])
def test_run_prepare_textual_data(self):
"""
:test : mlsurvey.sl.workflows.tasks.PrepareDataTask.run()
:condition : data is textual
:main_result : data are prepared.
Warning : need at least 12GB of (free) ram to execute this test because text into dataframe is not optimized
"""
config_filename = 'config_dataset_text.json'
log, config, data_catalog = self._run_one_task(config_filename)
log.set_sub_dir(str(mls.workflows.tasks.LoadDataTask.__name__))
df_raw_data = mls.FileOperation.read_hdf('raw_data-content.h5',
os.path.join(log.directory),
'Pandas')
raw_data = mls.sl.models.DataFactory.create_data('Pandas', df_raw_data)
lx = len(raw_data.x)
ly = len(raw_data.y)
self.assertEqual('7', raw_data.y[0])
log.set_sub_dir(str(mls.sl.workflows.tasks.PrepareDataTask.__name__))
self.assertTrue(os.path.isfile(os.path.join(log.directory, 'data-content.h5')))
df_data = mls.FileOperation.read_hdf('data-content.h5', os.path.join(log.directory), 'Pandas')
data = mls.sl.models.DataFactory.create_data('Pandas', df_data)
self.assertEqual(0.23989072176612425, data.x[0][0])
self.assertEqual(lx, len(data.x))
self.assertEqual(ly, len(data.y))
```
#### File: mlsurvey/test/test_utils.py
```python
import unittest
import os
import numpy as np
import pandas as pd
import sklearn.neighbors as neighbors
import mlsurvey as mls
class TestUtils(unittest.TestCase):
@classmethod
def setUpClass(cls):
directory = os.path.dirname(__file__)
cls.base_directory = os.path.join(directory, '')
def test_md5_file(self):
md5 = mls.Utils.md5_file(os.path.join(self.base_directory, 'files/test_md5.txt'))
self.assertEqual('70a4b9f4707d258f559f91615297a3ec', md5)
def test_md5_file_not_exists(self):
"""
:test : mlsurvey.Utils.md5_file()
:condition : unknown file
:main_result : raise FileNotFoundError
"""
try:
_ = mls.Utils.md5_file('files/test_md5_unknown.txt')
self.assertTrue(False)
except FileNotFoundError:
self.assertTrue(True)
def test_import_from_dotted_path_class_created(self):
to_import = 'sklearn.neighbors.KNeighborsClassifier'
classdef = mls.Utils.import_from_dotted_path(to_import)
self.assertEqual(neighbors.KNeighborsClassifier, classdef)
def test_import_from_dotted_path_not_exists(self):
"""
:test : mlsurvey.Utils.import_from_dotted_path()
:condition : unknown module and class
:main_result : AttributeError
"""
to_import = 'sklearn.neighbors.UnknownClass'
try:
_ = mls.Utils.import_from_dotted_path(to_import)
self.assertTrue(False)
except AttributeError:
self.assertTrue(True)
def test_make_meshgrid(self):
x = np.array([1, 2])
y = np.array([3, 4])
xx, yy = mls.Utils.make_meshgrid(x, y, h=.5)
self.assertListEqual(xx[0].tolist(), [0, 0.5, 1, 1.5, 2, 2.5])
self.assertListEqual(yy[0].tolist(), [2, 2, 2, 2, 2, 2])
def test_transform_to_dict_tuple_should_transform(self):
"""
:test : mlsurvey.Util.transform_to_dict()
:condition : dictionary contains one __tuple__
:main_result : transformation into tuple
"""
base_dictionary = {'test': {'__type__': '__tuple__', '__value__': '(1, 2, 3)'},
'nottuple': {'t': 1},
'nottupleeither': 'string'}
expected_dictionary = {'test': (1, 2, 3),
'nottuple': {'t': 1},
'nottupleeither': 'string'}
result = mls.Utils.transform_to_dict(base_dictionary)
self.assertDictEqual(expected_dictionary, result)
def test_transform_to_dict_tuple_should_transform_to_string(self):
"""
:test : mlsurvey.Util.transform_to_dict()
:condition : dictionary contains one __tuple__
:main_result : transforme tupple into string
"""
base_dictionary = {'test': {'__type__': '__tuple__', '__value__': '(1, 2, 3)'},
'nottuple': {'t': 1},
'nottupleeither': 'string'}
expected_dictionary = {'test': '(1, 2, 3)',
'nottuple': {'t': 1},
'nottupleeither': 'string'}
result = mls.Utils.transform_to_dict(base_dictionary, tuple_to_string=True)
self.assertDictEqual(expected_dictionary, result)
def test_transform_to_dict_not_tuple_should_raise_error(self):
"""
:test : mlsurvey.Util.transform_dict()
:condition : dictionary contains one __tuple__ where __value__ is not a tuple as string
:main_result : raise a TypeError
"""
base_dictionary = {'test': {'__type__': '__tuple__', '__value__': '234'}}
try:
_ = mls.Utils.transform_to_dict(base_dictionary)
self.assertTrue(False)
except TypeError:
self.assertTrue(True)
def test_transform_to_dict_tuple_nested_should_transform(self):
"""
:test : mlsurvey.Util.transform_to_dict()
:condition : dictionary contains __tuple__ with nested dictionaries
:main_result : transformation into tuples and lists
"""
base_dictionary = {'test': {'__type__': '__tuple__', '__value__': '(1, 2, 3)'},
'nottuple': {'a': {'__type__': '__tuple__', '__value__': '(1, 2, 3)'},
'b': {'c': 1,
'd': {'__type__': '__tuple__', '__value__': '(1, 2, 3)'}}},
'nottupleeither': 'string'}
expected_dictionary = {'test': (1, 2, 3),
'nottuple': {'a': (1, 2, 3),
'b': {'c': 1,
'd': (1, 2, 3)}},
'nottupleeither': 'string'}
result = mls.Utils.transform_to_dict(base_dictionary)
self.assertDictEqual(expected_dictionary, result)
def test_transform_to_dict_list_of_tuple_nested_should_transform(self):
"""
:test : mlsurvey.Utils.transform_to_dict()
:condition : dictionary contains lists of __tuple__ with nested dictionaries
:main_result : transformation into tuples
"""
base_dictionary = {'test': [{'__type__': '__tuple__', '__value__': '(1, 2, 3)'},
{'__type__': '__tuple__', '__value__': '(4, 5, 6)'}],
'nottuple': {'a': {'__type__': '__tuple__', '__value__': '(1, 2, 3)'},
'b': {'c': 1,
'd': [{'__type__': '__tuple__', '__value__': '(1, 2, 3)'},
{'__type__': '__tuple__', '__value__': '(4, 5, 6)'}]}},
'nottupleeither': 'string'}
expected_dictionary = {'test': [(1, 2, 3), (4, 5, 6)],
'nottuple': {'a': (1, 2, 3),
'b': {'c': 1,
'd': [(1, 2, 3), (4, 5, 6)]}},
'nottupleeither': 'string'}
result = mls.Utils.transform_to_dict(base_dictionary)
self.assertDictEqual(expected_dictionary, result)
def test_transform_to_json_tuple_should_transform(self):
"""
:test : mlsurvey.Util.transform_to_json()
:condition : dictionary contains one tuple
:main_result : transformation into __tuple__
"""
base_dictionary = {'test': (1, 2, 3),
'nottuple': {'t': 1},
'nottupleeither': 'string'}
expected_dictionary = {'test': {'__type__': '__tuple__', '__value__': '(1, 2, 3)'},
'nottuple': {'t': 1},
'nottupleeither': 'string'}
result = mls.Utils.transform_to_json(base_dictionary)
self.assertDictEqual(expected_dictionary, result)
def test_transform_to_json_tuple_nested_should_transform(self):
"""
:test : mlsurvey.Util.transform_to_json()
:condition : dictionary contains __tuple__ with nested dictionaries
:main_result : transformation into tuples and lists
"""
base_dictionary = {'test': (1, 2, 3),
'nottuple': {'a': (1, 2, 3),
'b': {'c': 1,
'd': (1, 2, 3)}},
'nottupleeither': 'string'}
expected_dictionary = {'test': {'__type__': '__tuple__', '__value__': '(1, 2, 3)'},
'nottuple': {'a': {'__type__': '__tuple__', '__value__': '(1, 2, 3)'},
'b': {'c': 1,
'd': {'__type__': '__tuple__', '__value__': '(1, 2, 3)'}}},
'nottupleeither': 'string'}
result = mls.Utils.transform_to_json(base_dictionary)
self.assertDictEqual(expected_dictionary, result)
def test_transform_to_json_list_of_tuple_nested_should_transform(self):
"""
:test : mlsurvey.Utils.transform_to_json()
:condition : dictionary contains lists of __tuple__ with nested dictionaries
:main_result : transformation into tuples and lists
"""
base_dictionary = {'test': [(1, 2, 3), (4, 5, 6)],
'nottuple': {'a': (1, 2, 3),
'b': {'c': 1,
'd': [(1, 2, 3), (4, 5, 6)]}},
'nottupleeither': 'string'}
expected_dictionary = {'test': [{'__type__': '__tuple__', '__value__': '(1, 2, 3)'},
{'__type__': '__tuple__', '__value__': '(4, 5, 6)'}],
'nottuple': {'a': {'__type__': '__tuple__', '__value__': '(1, 2, 3)'},
'b': {'c': 1,
'd': [{'__type__': '__tuple__', '__value__': '(1, 2, 3)'},
{'__type__': '__tuple__', '__value__': '(4, 5, 6)'}]}},
'nottupleeither': 'string'}
result = mls.Utils.transform_to_json(base_dictionary)
self.assertDictEqual(expected_dictionary, result)
def test_check_dict_python_ready_should_be_ready(self):
"""
:test : mlsurvey.Utils.check_dict_python_ready()
:condition : dictionary does not contains '__type__': '__tuple__'
:main result : dictionary is python-ready
"""
base_dictionary = {'test': [(1, 2, 3), (4, 5, 6)],
'nottuple': {'a': (1, 2, 3),
'b': {'c': 1,
'd': [[1, 2, 3], [4, 5, 6]]}},
'nottupleeither': 'string'}
result = mls.Utils.check_dict_python_ready(base_dictionary)
self.assertTrue(result)
def test_check_dict_python_ready_should_not_be_ready(self):
"""
:test : mlsurvey.Utils.check_dict_python_ready()
:condition : dictionary does contains '__type__': '__tuple__'
:main result : dictionary is not python-ready
"""
base_dictionary = {'test': [{'__type__': '__tuple__', '__value__': '(1, 2, 3)'},
{'__type__': '__tuple__', '__value__': '(4, 5, 6)'}],
'nottuple': {'a': {'__type__': '__tuple__', '__value__': '(1, 2, 3)'},
'b': {'c': 1}},
'nottupleeither': 'string'}
result = mls.Utils.check_dict_python_ready(base_dictionary)
self.assertFalse(result)
def test_check_dict_python_ready_should_not_be_ready_with_list_only(self):
"""
:test : mlsurvey.Utils.check_dict_python_ready()
:condition : dictionary does contains '__type__': '__tuple__' into list
:main result : dictionary is not python-ready
"""
base_dictionary = {'test': [{'__type__': '__tuple__', '__value__': '(1, 2, 3)'},
{'__type__': '__tuple__', '__value__': '(4, 5, 6)'}],
'nottupleeither': 'string'}
result = mls.Utils.check_dict_python_ready(base_dictionary)
self.assertFalse(result)
def test_is_dataframe_empty_pandas_is_empty(self):
"""
:test : mlsurvey.Utils.is_dataframe_empty()
:condition : dataframe is pandas and empty
:main_result : True
"""
df = pd.DataFrame()
result = mls.Utils.is_dataframe_empty(df)
self.assertTrue(result)
def test_is_dataframe_empty_pandas_is_not_empty(self):
"""
:test : mlsurvey.Utils.is_dataframe_empty()
:condition : dataframe is pandas and not empty
:main_result : False
"""
x = np.array([[1, 2], [3, 4]])
df = pd.DataFrame(data=x)
result = mls.Utils.is_dataframe_empty(df)
self.assertFalse(result)
``` |
{
"source": "jlaundry/crowdstrike-fdr-azure",
"score": 2
} |
#### File: crowdstrike-fdr-azure/StoreFile/__init__.py
```python
import io
import logging
import os
import azure.functions as func
import boto3
def main(msg: func.QueueMessage, outfile: func.Out[func.InputStream]) -> None:
filename = msg.get_body().decode('utf-8')
logging.info(f"Working on queue item: {filename}")
s3 = boto3.client('s3',
region_name=os.environ['FDR_REGION'],
aws_access_key_id=os.environ['FDR_AWS_KEY'],
aws_secret_access_key=os.environ['FDR_AWS_SECRET']
)
(bucket, path) = filename.split("/", 1)
content = io.BytesIO()
s3.download_fileobj(bucket, path, content)
content.seek(0)
outfile.set(content)
logging.info("Done")
``` |
{
"source": "jlaundry/pihole-sentinel",
"score": 2
} |
#### File: jlaundry/pihole-sentinel/pihole-sentinel.py
```python
import logging
logging.basicConfig(level=logging.INFO)
from datetime import datetime
import socket
import sqlite3
from azure_log_analytics import LogAnalytics
from local_settings import AZURE_WORKSPACE_ID, AZURE_SECRET_KEY
DEVICE_HOSTNAME = socket.gethostname()
DEVICE_IP6 = socket.getaddrinfo("www.google.com", 443, socket.AF_INET6)[0][4][0]
def dict_factory(cursor, row):
d = {}
for idx, col in enumerate(cursor.description):
d[col[0]] = row[idx]
return d
QUERY_TYPES = {
1: "A",
2: "AAAA",
3: "ANY",
4: "SRV",
5: "SOA",
6: "PTR",
7: "TXT",
8: "NAPTR",
9: "MX",
10: "DS",
11: "RRSIG",
12: "DNSKEY",
13: "NS",
14: "OTHER",
15: "SVCB",
16: "HTTPS",
}
QUERY_STATUS = {
0: "Failure: Unknown status (not yet known)",
1: "Failure: Domain contained in gravity database",
2: "Success: Forwarded",
3: "Success: Known, replied to from cache",
4: "Failure: Domain matched by a regex blacklist filter",
5: "Failure: Domain contained in exact blacklist",
6: "Failure: By upstream server (known blocking page IP address)",
7: "Failure: By upstream server (0.0.0.0 or ::)",
8: "Failure: By upstream server (NXDOMAIN with RA bit unset)",
9: "Failure: Domain contained in gravity database",
10: "Failure: Domain matched by a regex blacklist filter",
11: "Failure: Domain contained in exact blacklist",
12: "Success: Retried query",
13: "Success: Retried but ignored query (this may happen during ongoing DNSSEC validation)",
14: "Success: Already forwarded, not forwarding again",
}
sentinel = LogAnalytics(AZURE_WORKSPACE_ID, AZURE_SECRET_KEY)
last_filename = '.pihole-latest'
LAST_ID = 0
try:
with open(last_filename, 'r') as of:
LAST_ID = int(of.read())
except FileNotFoundError:
pass
except ValueError:
pass
now = datetime.now().isoformat()
logging.info(f"Starting at {now} from queries.id={LAST_ID}")
def update_latest(rowid, force=False):
global LAST_ID
if rowid < LAST_ID + 100 and not force:
return
logging.info(f"Writing LAST_ID {rowid}")
with open(last_filename, 'w') as of:
of.write(str(rowid))
LAST_ID = rowid
#con = sqlite3.connect('/etc/pihole/pihole-FTL.db')
con = sqlite3.connect('/tmp/pihole-FTL.db')
con.row_factory = dict_factory
cur = con.cursor()
for row in cur.execute('SELECT * FROM queries WHERE id >:id ORDER BY id', {"id": LAST_ID}):
record = {
"TimeGenerated": datetime.utcfromtimestamp(row['timestamp']).isoformat() + "Z",
"EventCount": 1,
"EventOriginalUid": str(row['id']),
"EventType": "lookup",
"EventResult": QUERY_STATUS.get(row['status']).split(":")[0],
"EventResultDetails": QUERY_TYPES.get(row['type']),
"EventProduct": "Pi Hole",
"EventVendor": "Pi Hole",
"EventSchemaVersion": "0.1.1",
"Dvc": DEVICE_HOSTNAME,
"DvcIpAddr": DEVICE_IP6,
"DvcHostname": DEVICE_HOSTNAME,
"SrcIpAddr": row['client'],
"DnsQuery": row['domain'],
"DnsQueryTypeName": QUERY_TYPES.get(row['type']),
"DnsResponseCodeName": "NA",
}
logging.debug(record)
sentinel.post(record, "Normalized")
update_latest(row['id'])
con.close()
update_latest(row['id'], force=True)
``` |
{
"source": "jlaunonen/man2qhelp",
"score": 2
} |
#### File: jlaunonen/man2qhelp/man2qhelp.py
```python
import argparse
from collections import OrderedDict
import glob
import os.path
import re
import subprocess
import sys
from html.parser import HTMLParser
from typing import Callable, List, NamedTuple, Optional, Tuple
__version__ = "0.2"
DEFAULT_NAMESPACE = "man.linux.org.1.0"
IN_PATH = "/usr/share/man/man%s"
MAN_LINK = re.compile(r"<b>(\w+)</b>\((\d+p?)\)")
IMAGE_NAME_RE = re.compile(r"(?P<keyword>.+?)-\d+\.\w+")
QHP_TEMPLATE = """<?xml version="1.0" encoding="UTF-8"?>
<QtHelpProject version="1.0">
<namespace>{namespace}</namespace>
<virtualFolder>man-pages</virtualFolder>
<customFilter name="Linux Man 1.0">
<filterAttribute>man</filterAttribute>
</customFilter>
""", """</QtHelpProject>
"""
CATEGORY_TEMPLATE = """<filterSection>
<filterAttribute>man</filterAttribute>
<filterAttribute>{filter_category}</filterAttribute>
<keywords>
""", """\
</keywords>
<files>
""", """\
</files>
</filterSection>
"""
class BasePath(object):
def __init__(self, path: str):
self._path = path
def join(self, *paths: str) -> str:
return os.path.join(self._path, *paths)
Options = NamedTuple("Options", [
("cache_path", BasePath),
("qhp", str),
("force", bool),
("sources", List[str]),
("qhp_namespace", str),
("quiet", bool),
("print", Callable)
])
LevelResult = NamedTuple("LevelResult", [
("keywords", List["Keyword"]),
("cross_references", List[Tuple[str, str]]),
("has_errors", bool),
])
def man_path(level: int, page: Optional[str]=None) -> str:
if page is None:
return IN_PATH % level
return os.path.join(IN_PATH % level, page)
def src_bzip(path: str) -> str:
return subprocess.check_output(["bunzip2", "-c", path]).decode("utf-8", errors="replace")
def src_raw(path: str) -> str:
with open(path, "r") as f:
return f.read()
def remove_extensions(source: str, *extensions: str) -> str:
base, ext = os.path.splitext(source)
if ext in extensions:
return remove_extensions(base, *extensions)
return source
def result_name(source_name: str, level: str) -> str:
stripped = remove_extensions(os.path.basename(source_name), ".bz2", "." + level)
return stripped + ".html"
def src(path: str) -> Optional[Tuple[Optional[str], str, Optional[str]]]:
if not os.path.exists(path):
print("Does not exist:", path)
return None
base = os.path.basename(path)
if path.endswith(".bz2"):
data = src_bzip(path)
name = os.path.splitext(base)[0]
else:
data = src_raw(path)
name = base
name = os.path.splitext(name)[0]
if data.startswith(".so "):
alias = data.strip().split("\n")
if len(alias) == 1:
alias = alias[0]
alias_info = re.match(r"\.so\s+(?:.*?/)?man(\d+)/([\w_-]+)", alias)
if alias_info is not None:
alias_path = man_path(int(alias_info.group(1)), alias_info.group(2))
else:
alias_info = re.match(r"\.so\s+([\w_-]+\.(\d))", alias)
if alias_info is not None:
alias_path = man_path(int(alias_info.group(2)), alias_info.group(1))
else:
print("not understood alias:", name, data)
return None
candidates = glob.glob(alias_path + ".*")
if len(candidates) == 0:
print("No matching alias source:", alias_path)
return None
elif len(candidates) > 1:
print("Too many candidates:", name, "/", alias)
print("\n".join(candidates))
return None
else:
return None, name, candidates[0]
else:
return data, name, None
class TitleFinder(HTMLParser):
def __init__(self):
super(TitleFinder, self).__init__()
self._in_title = False
self._title = ""
@property
def title(self):
return self._title
def error(self, message):
print(message)
def handle_starttag(self, tag, attrs):
if tag == "title" and not self._in_title:
if len(self._title) == 0:
self._in_title = True
else:
print("Multiple title-elements")
super().handle_starttag(tag, attrs)
def handle_endtag(self, tag):
if tag == "title" and self._in_title:
self._in_title = False
super().handle_endtag(tag)
def handle_data(self, data):
if self._in_title:
self._title += data
super().handle_data(data)
def title_tag(text: str) -> str:
return "<title>" + text + "</title>"
class Keyword(object):
def __init__(self, keyword: str, target: str, is_alias: bool = False):
self.keyword = keyword
"Keyword, such as `select`."
self.target = target
"Output or target filename."
self.is_alias = is_alias
"If `True`, `target` points to the alias target."
def link_replacer(ref_list: List[Tuple[str, str]]):
def fn(match) -> str:
name = match.group(1)
level = match.group(2)
ref_list.append((level, name))
return '<a href="../html.' + level + '/' + name + '.html">' + match.group(0) + '</a>'
return fn
def do_level(level: str, options: Options) -> LevelResult:
level_keywords = [] # type: List[Keyword]
cross_references = [] # type: List[Tuple[str, str]]
has_errors = False
out_dir = options.cache_path.join("html.%s" % level)
if not os.path.exists(out_dir):
os.mkdir(out_dir)
images_dir = os.path.join(out_dir, "images")
if not os.path.exists(images_dir):
os.mkdir(images_dir)
in_dir = IN_PATH % level
# Needed for images to work correctly with relative path.
original_dir = os.getcwd()
os.chdir(out_dir)
for f in os.listdir(in_dir):
source_filename = os.path.join(in_dir, f)
source_mtime = os.path.getmtime(source_filename)
src_result = src(source_filename)
if src_result is None:
continue
man_data, name, alias = src_result
if man_data is None:
base_name = result_name(alias, level)
target = options.cache_path.join("html.%s" % level, base_name)
options.print("alias", name, "=", target)
level_keywords.append(Keyword(name, target, is_alias=True))
continue
base_name = result_name(name, level)
target = options.cache_path.join("html.%s" % level, base_name)
out_file = base_name
level_keywords.append(Keyword(name, target))
if not options.force and os.path.exists(out_file) and abs(os.path.getmtime(out_file) - source_mtime) < 1.0:
options.print("keyword", name, "=", out_file, " # UNCHANGED delta %ss" %
str(os.path.getmtime(out_file) - source_mtime))
continue
options.print("keyword", name, "=", target)
# Define path and name for images.
image_args = [
"-P", "-D" + "images",
"-P", "-I" + name + "-",
]
process = subprocess.run("groff -t -m mandoc -mwww -Thtml".split() + image_args,
input=man_data, stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding="utf-8")
html_data = process.stdout
error_text = process.stderr
if error_text:
print("entry %s:" % name, error_text, file=sys.stderr)
if process.returncode != 0:
print("error running groff: %d. output not written" % process.returncode)
has_errors = True
continue
parser = TitleFinder()
parser.feed(html_data)
# Replace all caps title to something more informative.
html_data = html_data.replace(title_tag(parser.title), title_tag(parser.title.lower() + " | man" + str(level)))
# Replace all cross-references to other man-pages with links to them, regardless whether they exist or not.
html_data = MAN_LINK.sub(link_replacer(cross_references), html_data)
with open(out_file, "w") as o:
o.write(html_data)
# Set result file modification time to source time to allow checking changes in future.
os.utime(out_file, (source_mtime, source_mtime))
# Restore working directory.
os.chdir(original_dir)
level_files = set(os.path.basename(kw.target) for kw in level_keywords if not kw.is_alias)
for file in os.listdir(out_dir):
if os.path.isfile(file) and file not in level_files:
to_remove = os.path.join(out_dir, file)
options.print("delete", to_remove)
os.remove(to_remove)
keywords = set(kw.keyword for kw in level_keywords if not kw.is_alias)
for file in os.listdir(images_dir):
match = IMAGE_NAME_RE.match(file)
if match is not None:
kw = match.group(1)
if kw in keywords:
continue
to_remove = os.path.join(images_dir, file)
options.print("delete", to_remove)
os.remove(to_remove)
return LevelResult(level_keywords, cross_references, has_errors)
def do_levels(options: Options):
kws = OrderedDict()
cross_references = []
has_errors = False
for level in options.sources:
options.print("category", level)
lkw, cross, errors = do_level(level, options)
options.print("end category", level)
kws[level] = lkw
cross_references.extend(cross)
has_errors |= errors
# Qt Help requires that the files included and the project file are in same directory.
catalog = options.cache_path.join(options.qhp)
with open(catalog, "w") as o:
o.write(QHP_TEMPLATE[0].format(namespace=options.qhp_namespace))
for level, keywords in kws.items():
o.write(CATEGORY_TEMPLATE[0].format(filter_category="man" + str(level)))
for kw in keywords:
o.write(' <keyword name="{}" ref="{}" />\n'.format(kw.keyword, kw.target))
o.write(CATEGORY_TEMPLATE[1])
o.write(" <file>html." + level + "/*.html</file>\n")
o.write(" <file>html." + level + "/images/*</file>\n")
o.write(CATEGORY_TEMPLATE[2])
o.write(QHP_TEMPLATE[1])
print("Wrote catalog to", catalog)
if has_errors:
print("Processing had errors and some files were skipped.")
else:
print("To actually create the help file, use qhelpgenerator", catalog)
def check_system() -> bool:
def which(name: str, message: str) -> bool:
try:
subprocess.check_output(["which", name], stderr=subprocess.STDOUT)
return True
except subprocess.CalledProcessError:
print("Missing", message)
return False
e = which("groff", "main part, groff, the document formatting system")
e &= which("pnmtopng", "netpbm (or pnmtopng)")
e &= which("psselect", "psutils (or psselect)")
return e
def make_argument_parser():
parser = argparse.ArgumentParser(
description="man-page to Qt Help converter."
)
parser.add_argument("levels", nargs="+", metavar="LEVEL",
help="man-page level to add for conversion, such as 2")
parser.add_argument("--cache-dir", type=str, metavar="DIR", default=".",
help="Use given cache root directory instead of current directory.")
parser.add_argument("-f", "--force", action="store_true", default=False,
help="Re-write all files.")
parser.add_argument("-o", "--output", type=str, default="man.qhp",
help="Write to given file instead of man.qhp."
" Note, the file will be forced into the cache directory!")
parser.add_argument("--ignore-system-check", action="store_true", default=False,
help="Ignore system check results and process anyways.")
parser.add_argument("-q", "--quiet", action="store_true", default=False,
help="Make less noise.")
qhp = parser.add_argument_group("Qt Help Project options")
qhp.add_argument("--namespace", default=DEFAULT_NAMESPACE,
help="Namespace to use instead of %s" % DEFAULT_NAMESPACE)
parser.add_argument("--version", action="version", version="%(prog)s " + __version__)
return parser
def main(*argv):
parser = make_argument_parser()
args = parser.parse_args(args=None if len(argv) == 0 else argv)
if not (check_system() or args.ignore_system_check):
sys.exit(1)
quiet = args.quiet
def q_print(*p_args, **p_kwargs):
if not quiet:
print(*p_args, **p_kwargs)
options = Options(
cache_path=BasePath(args.cache_dir),
qhp=os.path.basename(args.output),
force=args.force,
sources=args.levels,
qhp_namespace=args.namespace,
quiet=args.quiet,
print=q_print,
)
do_levels(options)
if __name__ == "__main__":
main()
``` |
{
"source": "jlaunonen/turska",
"score": 2
} |
#### File: turska/access/helpers.py
```python
from functools import wraps
from django.contrib import messages
from django.shortcuts import get_object_or_404, redirect
def access_admin_required(view_func):
@wraps(view_func)
def wrapper(request, organization_slug, *args, **kwargs):
from core.models import Organization
from core.utils import login_redirect
from core.views.admin_views import organization_admin_menu_items
organization = get_object_or_404(Organization, slug=organization_slug)
meta = organization.access_organization_meta
if not meta:
messages.error(request, u"Tämä organisaatio ei käytä Kompassia pääsyoikeuksien hallintaan.")
return redirect('core_organization_view', organization.slug)
if not organization.access_organization_meta.is_user_admin(request.user):
return login_redirect(request)
vars = dict(
organization=organization,
admin_menu_items=organization_admin_menu_items(request, organization),
admin_title=u'Yhdistyksen ylläpito'
)
return view_func(request, vars, organization, *args, **kwargs)
return wrapper
def access_organization_required(view_func):
@wraps(view_func)
def wrapper(request, organization_slug, *args, **kwargs):
from core.models import Organization
organization = get_object_or_404(Organization, slug=organization_slug)
meta = organization.access_organization_meta
if not meta:
messages.error(request, u"Tämä organisaatio ei käytä Kompassia pääsyoikeuksien hallintaan.")
return redirect('core_organization_view', organization.slug)
return view_func(request, organization, *args, **kwargs)
return wrapper
```
#### File: turska/access/tests.py
```python
from unittest import TestCase as NonDatabaseTestCase
from django.test import TestCase
from core.models import Person
from labour.models import LabourEventMeta
from .utils import emailify
from .email_aliases import firstname_surname
from .models import EmailAlias, GroupEmailAliasGrant, EmailAliasType
class FakePerson(object):
first_name = 'Santtu'
surname = u'Pajukanta'
class EmailifyTestCase(NonDatabaseTestCase):
def test_emailify(self):
self.assertEqual(emailify(u''), u'')
self.assertEqual(emailify(u'Santtu Pajukanta'), u'santtu.pajukanta')
self.assertEqual(emailify(u'Kalle-Jooseppi Mäki-Kangas-Ketelä'), u'kalle-jooseppi.maki-kangas-ketela')
def test_firstname_surname(self):
self.assertEqual(firstname_surname(FakePerson()), 'santtu.pajukanta')
class EmailAliasesTestCase(TestCase):
def setUp(self):
self.meta, unused = LabourEventMeta.get_or_create_dummy()
self.group = self.meta.get_group('admins')
self.person, unused = Person.get_or_create_dummy()
def test_email_alias_create(self):
email_alias, unused = EmailAlias.get_or_create_dummy()
self.assertEqual(email_alias.email_address, '<EMAIL>')
def test_ensure_aliases(self):
alias_type, unused = EmailAliasType.get_or_create_dummy()
self.group_grant, unused = GroupEmailAliasGrant.objects.get_or_create(group=self.group, type=alias_type)
GroupEmailAliasGrant.ensure_aliases(person=self.person)
self.assertEqual(alias_type.email_aliases.count(), 0)
self.person.user.groups.add(self.group)
GroupEmailAliasGrant.ensure_aliases(person=self.person)
self.assertEqual(alias_type.email_aliases.count(), 1)
def test_account_name_generator_returning_none(self):
alias_type, unused = EmailAliasType.get_or_create_dummy(metavar='nick', defaults=dict(
account_name_code='access.email_aliases:nick',
))
self.person.nick = u''
self.person.save()
self.assertEqual(alias_type.email_aliases.count(), 0)
GroupEmailAliasGrant.ensure_aliases(self.person)
self.assertEqual(alias_type.email_aliases.count(), 0)
```
#### File: badges/models/badge.py
```python
from django.conf import settings
from django.db import models, transaction
from django.utils.html import escape
from django.utils.translation import ugettext_lazy as _
from core.csv_export import CsvExportMixin
from core.utils import time_bool_property
from ..proxies.badge.privacy import BadgePrivacyAdapter
class Badge(models.Model, CsvExportMixin):
person = models.ForeignKey('core.Person',
null=True,
blank=True,
verbose_name=_(u'Person'),
)
personnel_class = models.ForeignKey('labour.PersonnelClass',
verbose_name=_(u'Personnel class'),
)
printed_separately_at = models.DateTimeField(
null=True,
blank=True,
verbose_name=_(u'Printed separately at'),
)
revoked_by = models.ForeignKey(settings.AUTH_USER_MODEL,
null=True,
blank=True,
related_name='badges_revoked',
verbose_name=_(u'Revoked by'),
)
revoked_at = models.DateTimeField(
null=True,
blank=True,
verbose_name=_(u'Revoked at'),
)
first_name = models.CharField(
blank=True,
max_length=1023,
verbose_name=_(u'First name'),
)
is_first_name_visible = models.BooleanField(
default=True,
verbose_name=_(u'Is first name visible'),
)
surname = models.CharField(
blank=True,
max_length=1023,
verbose_name=_(u'Surname'),
)
is_surname_visible = models.BooleanField(
default=True,
verbose_name=_(u'Is surname visible'),
)
nick = models.CharField(
blank=True,
max_length=1023,
verbose_name=_(u'Nick name'),
help_text=_(u'If you only have a single piece of information to print on the badge, use this field.'),
)
is_nick_visible = models.BooleanField(
default=True,
verbose_name=_(u'Is nick visible'),
)
job_title = models.CharField(max_length=63,
blank=True,
default=u'',
verbose_name=_(u'Job title'),
help_text=_(u'Please stay civil with the job title field.'),
)
created_by = models.ForeignKey(settings.AUTH_USER_MODEL,
null=True,
blank=True,
related_name='badges_created',
verbose_name=_(u'Created by'),
)
created_at = models.DateTimeField(
auto_now_add=True,
verbose_name=_(u'Created at'),
)
updated_at = models.DateTimeField(
auto_now=True,
verbose_name=_(u'Updated at'),
)
batch = models.ForeignKey('badges.Batch',
null=True,
blank=True,
db_index=True,
verbose_name=_(u'Printing batch'),
)
is_revoked = time_bool_property('revoked_at')
is_printed = time_bool_property('printed_at')
is_printed_separately = time_bool_property('printed_separately_at')
@property
def printed_at(self):
if self.printed_separately_at:
return self.printed_separately_at
elif self.batch:
return self.batch.printed_at
else:
return None
@property
def formatted_printed_at(self):
# XXX not really "formatted"
return self.printed_at if self.printed_at is not None else u''
@classmethod
def get_or_create_dummy(cls):
from core.models import Person
from labour.models import PersonnelClass
person, unused = Person.get_or_create_dummy()
personnel_class, unused = PersonnelClass.get_or_create_dummy()
return cls.objects.get_or_create(
person=person,
personnel_class=personnel_class,
)
@classmethod
def ensure(cls, event, person):
"""
Makes sure the person has a badge of the correct class and up-to-date information for a given event.
"""
from badges.utils import default_badge_factory
assert person is not None
with transaction.atomic():
try:
existing_badge = cls.objects.get(
personnel_class__event=event,
person=person,
revoked_at__isnull=True,
)
except cls.DoesNotExist:
existing_badge = None
expected_badge_opts = default_badge_factory(event=event, person=person)
if existing_badge:
# There is an existing un-revoked badge. Check that its information is correct.
if any(getattr(existing_badge, key) != value for key, value in expected_badge_opts.iteritems()):
existing_badge.revoke()
else:
return existing_badge, False
if expected_badge_opts.get('personnel_class') is None:
# They should not have a badge.
return None, False
badge_opts = dict(expected_badge_opts, person=person)
badge = cls(**badge_opts)
badge.save()
return badge, True
@classmethod
def get_csv_fields(cls, event):
from labour.models import PersonnelClass
meta = event.badges_event_meta
if meta.badge_layout == 'trad':
# Chief Technology Officer
# <NAME>
# Japsu
return [
(cls, 'personnel_class_name'),
(BadgePrivacyAdapter, 'surname'),
(BadgePrivacyAdapter, 'first_name'),
(BadgePrivacyAdapter, 'nick'),
(cls, 'job_title'),
]
elif meta.badge_layout == 'nick':
# JAPSU
# <NAME>
# Chief Technology Officer
# -OR-
# SANTTU
# Pajukanta
# Chief Technology Officer
return [
(cls, 'personnel_class_name'),
(BadgePrivacyAdapter, 'nick_or_first_name'),
(BadgePrivacyAdapter, 'surname_or_full_name'),
(cls, 'job_title'),
]
else:
raise NotImplementedError(meta.badge_layout)
def get_csv_related(self):
from core.models import Person
return {
BadgePrivacyAdapter: BadgePrivacyAdapter(self),
}
def get_name_fields(self):
return [
(self.surname.strip(), self.is_surname_visible),
(self.first_name.strip(), self.is_first_name_visible),
(self.nick.strip(), self.is_nick_visible),
]
@property
def personnel_class_name(self):
return self.personnel_class.name if self.personnel_class else u''
@property
def event(self):
return self.personnel_class.event
@property
def meta(self):
return self.event.badges_event_meta
@property
def event_name(self):
return self.personnel_class.event.name if self.personnel_class else u''
def to_html_print(self):
def format_name_field(value, is_visible):
if is_visible:
return u"<strong>{value}</strong>".format(value=escape(value))
else:
return escape(value)
vars = dict(
surname=format_name_field(self.surname.strip(), self.is_surname_visible),
first_name=format_name_field(self.first_name.strip(), self.is_first_name_visible),
nick=format_name_field(self.nick.strip(), self.is_nick_visible),
)
if self.nick:
return u"{surname}, {first_name}, {nick}".format(**vars)
else:
return u"{surname}, {first_name}".format(**vars)
def revoke(self, user=None):
"""
Revoke the badge.
When a badge that is not yet assigned to a batch or printed separately is revoked, it is
removed altogether.
When a badge that is already assigned to a batch or printed separately is revoked, it will be
marked as such but not removed, because it needs to be manually removed from distribution.
Note that the batch does not need to be marked as printed yet for a badge to stay around revoked,
because a batch that is already created but not yet printed may have been downloaded as Excel
already. A Batch should never change after being created.
"""
assert not self.is_revoked
if self.is_printed_separately or self.batch:
self.is_revoked = True
self.revoked_by = user
self.save()
return self
else:
self.delete()
return None
def unrevoke(self):
assert self.is_revoked
self.is_revoked = False
self.revoked_by = None
self.save()
return self
def admin_get_full_name(self):
if self.nick:
return u'{self.first_name} "{self.nick}" {self.surname}'.format(self=self)
else:
return u'{self.first_name} {self.surname}'.format(self=self)
admin_get_full_name.short_description = _(u'Name')
admin_get_full_name.admin_order_field = ('surname', 'first_name', 'nick')
def __unicode__(self):
return u"{person_name} ({personnel_class_name}, {event_name})".format(
person_name=self.admin_get_full_name(),
personnel_class_name=self.personnel_class_name,
event_name=self.event_name,
)
```
#### File: management/commands/setup_concon9.py
```python
from datetime import datetime, timedelta
from django.core.management.base import BaseCommand
from dateutil.tz import tzlocal
from core.models import Venue, Event
from programme.models import (
Category,
ProgrammeEventMeta,
Room,
SpecialStartTime,
TimeBlock,
View,
)
class Command(BaseCommand):
def handle(*args, **options):
tz = tzlocal()
venue, unused = Venue.objects.get_or_create(
name=u'Metropolia AMK Hämeentie',
defaults=dict(
name_inessive=u'Metropolia AMK:n Hämeentien toimipisteessä'
)
)
room_order = 0
for room_name in [
u'Zeus',
u'Athene',
]:
room_order += 100
Room.objects.get_or_create(
venue=venue,
name=room_name,
defaults=dict(
order=room_order,
)
)
event, unused = Event.objects.get_or_create(slug='concon9', defaults=dict(
name='Concon 9',
name_genitive='Concon 9 -seminaarin',
name_illative='Concon 9 -seminaariin',
name_inessive='Concon 9 -seminaarissa',
homepage_url='http://concon.nakkikone.org',
organization_name='Yliopiston anime ja manga ry',
organization_url='http://yama.animeunioni.org',
start_time=datetime(2014, 5, 24, 10, 0, tzinfo=tz),
end_time=datetime(2014, 5, 24, 18, 0, tzinfo=tz),
venue=venue,
))
admin_group, = ProgrammeEventMeta.get_or_create_groups(event, ['admins'])
programme_event_meta, unused = ProgrammeEventMeta.objects.get_or_create(event=event, defaults=dict(
public=False,
admin_group=admin_group,
))
view, unused = View.objects.get_or_create(
event=event,
name='Ohjelmakartta',
)
if not view.rooms.exists():
view.rooms = Room.objects.filter(venue=venue)
view.save()
for category_name, category_style in [
(u'Ohjelma', u'anime'),
(u'Tauko', u'muu'),
]:
Category.objects.get_or_create(
event=event,
title=category_name,
defaults=dict(
style=category_style,
)
)
for start_time, end_time in [
(
event.start_time,
event.end_time,
),
]:
TimeBlock.objects.get_or_create(
event=event,
start_time=start_time,
defaults=dict(
end_time=end_time
)
)
# half_hour = event.start_time + timedelta(minutes=30)
# while half_hour < end_time:
# SpecialStartTime.objects.create(
# event=event,
# start_time=half_hour,
# )
# half_hour += timedelta(minutes=60)
```
#### File: features/steps/programme_condb_105_steps.py
```python
from behave import when, then
from programme.models import Programme, ProgrammeRole
@when(u'I assign it a schedule slot')
def step_impl(context):
assert False
@when(u'I publish it')
def step_impl(context):
assert False
@when(u'I create another programme')
def create_another_programme(context):
context.another_programme, unused = Programme.get_or_create_dummy(title=u'Another dummy programme')
ProgrammeRole.get_or_create_dummy(programme=context.another_programme)
@when(u'I do not assign it a schedule slot')
def step_impl(context):
assert False
@then(u'I should see the first programme in the schedule')
def step_impl(context):
assert False
@then(u'I should not see the second programme in the schedule')
def step_impl(context):
assert False
@then(u'I should see the second programme on the non-schedule programme page')
def step_impl(context):
assert False
@then(u'I should not see the first programme on the non-schedule programme page')
def step_impl(context):
assert False
```
#### File: turska/labour_common_qualifications/models.py
```python
from django.conf import settings
from django.core.validators import RegexValidator
from django.db import models
from django.utils.dateformat import format as format_date
from labour.models import QualificationExtraBase
validate_jv_card_number = RegexValidator(
regex=r'.+/.+/.+',
message=u"Tarkista JV-kortin numero"
)
class JVKortti(QualificationExtraBase):
card_number = models.CharField(
max_length=13,
validators=[validate_jv_card_number,],
verbose_name=u"JV-kortin numero",
help_text=u"Muoto: 0000/J0000/00 tai XX/0000/00"
)
expiration_date = models.DateField(verbose_name=u"Viimeinen voimassaolopäivä")
def __unicode__(self):
n = self.card_number
d = format_date(self.expiration_date, settings.DATE_FORMAT)
return u"{n}, voimassa {d} asti".format(**locals())
@classmethod
def get_form_class(cls):
from .forms import JVKorttiForm
return JVKorttiForm
class Meta:
verbose_name = u"JV-kortti"
verbose_name_plural = u"JV-kortit"
```
#### File: labour/migrations/0003_populate_pclasses.py
```python
from __future__ import unicode_literals
from django.db import models, migrations
def forwards_func(apps, schema_editor):
Badge = apps.get_model('badges', 'Badge')
Signup = apps.get_model('labour', 'Signup')
PersonnelClass = apps.get_model('labour', 'PersonnelClass')
for signup in Signup.objects.all():
event = signup.event
person = signup.person
try:
badge = Badge.objects.get(template__event=event, person=person)
except Badge.DoesNotExist:
personnel_class, unused = PersonnelClass.objects.get_or_create(
event=event,
slug='tyovoima',
defaults=dict(
app_label='labour',
name=u'Työvoima',
)
)
else:
personnel_class, unused = PersonnelClass.objects.get_or_create(
event=event,
slug=badge.template.slug,
defaults=dict(
app_label='labour',
name=badge.template.name,
)
)
signup.personnel_classes = [personnel_class]
signup.save()
class Migration(migrations.Migration):
dependencies = [
('labour', '0002_auto_20141115_1102'),
('badges', '0001_initial'),
]
operations = [
migrations.RunPython(forwards_func),
]
```
#### File: labour/models/info_link.py
```python
from django.db import models
from django.utils.translation import ugettext_lazy as _
class InfoLink(models.Model):
event = models.ForeignKey('core.Event', verbose_name=u'Tapahtuma')
group = models.ForeignKey('auth.Group',
verbose_name=u'Ryhmä',
help_text=u'Linkki näytetään vain tämän ryhmän jäsenille.',
)
url = models.CharField(
max_length=255,
verbose_name=u'Osoite',
help_text=u'Muista aloittaa ulkoiset linkit <i>http://</i> tai <i>https://</i>.'
)
title = models.CharField(max_length=255, verbose_name=u'Teksti')
class Meta:
verbose_name = _(u'info link')
verbose_name_plural = _(u'info links')
def __unicode__(self):
return self.title
```
#### File: labour/models/labour_event_meta.py
```python
from datetime import datetime, timedelta
from django.conf import settings
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.utils.timezone import now
from core.models import EventMetaBase
from core.utils import full_hours_between, is_within_period
from .constants import GROUP_VERBOSE_NAMES_BY_SUFFIX, SIGNUP_STATE_GROUPS
class LabourEventMeta(EventMetaBase):
signup_extra_content_type = models.ForeignKey('contenttypes.ContentType')
registration_opens = models.DateTimeField(
null=True,
blank=True,
verbose_name=u'työvoimahaku alkaa'
)
registration_closes = models.DateTimeField(
null=True,
blank=True,
verbose_name=u'työvoimahaku päättyy'
)
work_begins = models.DateTimeField(verbose_name=u'Ensimmäiset työvuorot alkavat')
work_ends = models.DateTimeField(verbose_name=u'Viimeiset työvuorot päättyvät')
monitor_email = models.CharField(
max_length=255,
blank=True,
verbose_name=u'tarkkailusähköposti',
help_text=u'Kaikki työvoimajärjestelmän lähettämät sähköpostiviestit lähetetään myös '
u'tähän osoitteeseen.',
)
contact_email = models.CharField(
max_length=255,
blank=True,
verbose_name=u'yhteysosoite',
help_text=u'Kaikki työvoimajärjestelmän lähettämät sähköpostiviestit lähetetään tästä '
u'osoitteesta, ja tämä osoite näytetään työvoimalle yhteysosoitteena. Muoto: Selite <<EMAIL>>.',
)
signup_message = models.TextField(
null=True,
blank=True,
default=u'',
verbose_name=u'Ilmoittautumisen huomautusviesti',
help_text=u'Tämä viesti näytetään kaikille työvoimailmoittautumisen alussa. Käytettiin '
u'esimerkiksi Tracon 9:ssä kertomaan, että työvoimahaku on avoinna enää JV:ille ja '
u'erikoistehtäville.',
)
work_certificate_signer = models.TextField(
null=True,
blank=True,
default=u'',
verbose_name=u'Työtodistuksen allekirjoittaja',
help_text=u'Tämän kentän sisältö näkyy työtodistuksen allekirjoittajan nimenselvennyksenä. '
u'On suositeltavaa sisällyttää tähän omalle rivilleen allekirjoittajan tehtävänimike.'
)
class Meta:
verbose_name = _(u'labour event meta')
verbose_name_plural = _(u'labour event metas')
def __unicode__(self):
return self.event.name if self.event else 'None'
@property
def signup_extra_model(self):
return self.signup_extra_content_type.model_class()
@classmethod
def events_registration_open(cls):
from core.models import Event
t = now()
return Event.objects.filter(
laboureventmeta__registration_opens__isnull=False,
laboureventmeta__registration_opens__lte=t,
).exclude(
laboureventmeta__registration_closes__isnull=False,
laboureventmeta__registration_closes__lte=t,
)
@classmethod
def get_or_create_dummy(cls):
from django.contrib.contenttypes.models import ContentType
from core.models import Event
from .signup_extras import EmptySignupExtra
event, unused = Event.get_or_create_dummy()
content_type = ContentType.objects.get_for_model(EmptySignupExtra)
admin_group, = LabourEventMeta.get_or_create_groups(event, ['admins'])
t = now()
labour_event_meta, created = cls.objects.get_or_create(
event=event,
defaults=dict(
admin_group=admin_group,
signup_extra_content_type=content_type,
registration_opens=t - timedelta(days=60),
registration_closes=t + timedelta(days=60),
work_begins=event.start_time - timedelta(days=1),
work_ends=event.end_time + timedelta(days=1),
contact_email='<EMAIL>',
monitor_email='<EMAIL>',
)
)
labour_event_meta.create_groups()
return labour_event_meta, created
@classmethod
def get_or_create_groups(cls, event, job_categories_or_suffixes):
suffixes = [
jc_or_suffix if isinstance(jc_or_suffix, basestring) else jc_or_suffix.slug
for jc_or_suffix in job_categories_or_suffixes
]
groups = super(LabourEventMeta, cls).get_or_create_groups(event, suffixes)
if 'mailings' in settings.INSTALLED_APPS:
from mailings.models import RecipientGroup
for jc_or_suffix, group in zip(job_categories_or_suffixes, groups):
if isinstance(jc_or_suffix, basestring):
verbose_name = GROUP_VERBOSE_NAMES_BY_SUFFIX[jc_or_suffix]
else:
verbose_name = jc_or_suffix.name
RecipientGroup.objects.get_or_create(
event=event,
app_label='labour',
group=group,
defaults=dict(
verbose_name=verbose_name,
),
)
return groups
def is_user_supervisor(self, user):
supervisor_group, = LabourEventMeta.get_or_create_groups(self.event, ['supervisors'])
return self.is_user_admin(user) or self.is_user_in_group(user, supervisor_group)
def create_groups(self):
from .job_category import JobCategory
job_categories_or_suffixes = list(SIGNUP_STATE_GROUPS)
job_categories_or_suffixes.extend(JobCategory.objects.filter(event=self.event))
job_categories_or_suffixes.append('supervisors')
return LabourEventMeta.get_or_create_groups(self.event, job_categories_or_suffixes)
@property
def is_registration_open(self):
return is_within_period(self.registration_opens, self.registration_closes)
def is_person_signed_up(self, person):
return self.event.signup_set.filter(person=person).exists()
def get_signup_for_person(self, person):
from .signup import Signup
try:
return self.event.signup_set.get(person=person)
except Signup.DoesNotExist:
return Signup(person=person, event=self.event)
@property
def work_hours(self):
return full_hours_between(self.work_begins, self.work_ends)
@property
def applicants_group(self):
return self.get_group('applicants')
@property
def accepted_group(self):
return self.get_group('accepted')
@property
def finished_group(self):
return self.get_group('finished')
@property
def rejected_group(self):
return self.get_group('rejected')
```
#### File: labour/models/qualifications.py
```python
from django.db import models
from django.utils.translation import ugettext_lazy as _
from core.utils import SLUG_FIELD_PARAMS
class Qualification(models.Model):
slug = models.CharField(**SLUG_FIELD_PARAMS)
name = models.CharField(max_length=63, verbose_name=u'pätevyyden nimi')
description = models.TextField(blank=True, verbose_name=u'kuvaus')
qualification_extra_content_type = models.ForeignKey('contenttypes.ContentType', null=True, blank=True)
class Meta:
verbose_name = _(u'qualification')
verbose_name_plural = _(u'qualifications')
def __unicode__(self):
return self.name
@property
def qualification_extra_model(self):
if self.qualification_extra_content_type:
return self.qualification_extra_content_type.model_class()
else:
return None
@classmethod
def create_dummy(cls):
return cls.objects.create(
name='Dummy qualification'
)
@classmethod
def get_or_create_dummies(cls):
qual1, unused = Qualification.objects.get_or_create(slug='dummy1', defaults=dict(
name='Dummy qualification 1'
))
qual2, unused = Qualification.objects.get_or_create(slug='dummy2', defaults=dict(
name='Dummy qualification 2'
))
return [qual1, qual2]
class PersonQualification(models.Model):
person = models.ForeignKey('core.Person', verbose_name=u'henkilö')
qualification = models.ForeignKey(Qualification, verbose_name=u'pätevyys')
class Meta:
verbose_name = _(u'qualification holder')
verbose_name_plural = _(u'qualification holders')
def __unicode__(self):
return self.qualification.name if self.qualification else 'None'
@property
def qualification_extra(self):
if not self.qualification:
return None
QualificationExtra = self.qualification.qualification_extra_model
if not QualificationExtra:
return None
try:
return QualificationExtra.objects.get(personqualification=self)
except QualificationExtra.DoesNotExist:
return QualificationExtra(personqualification=self)
class QualificationExtraBase(models.Model):
personqualification = models.OneToOneField(PersonQualification,
related_name="+",
primary_key=True)
@classmethod
def get_form_class(cls):
raise NotImplemented(
'Remember to override get_form_class in your QualificationExtra model'
)
class Meta:
abstract = True
```
#### File: programme/migrations/0020_make_role_event_specific.py
```python
from __future__ import unicode_literals
from django.db import migrations
def make_role_event_specific(apps, schema_editor):
ProgrammeRole = apps.get_model('programme', 'programmerole')
Role = apps.get_model('programme', 'role')
PersonnelClass = apps.get_model('labour', 'personnelclass')
PersonnelClass.objects.filter(name__icontains='ohjelma').update(app_label='programme')
for programme_role in ProgrammeRole.objects.all():
if programme_role.role.personnel_class is None:
try:
personnel_class = PersonnelClass.objects.get(
event=programme_role.programme.category.event,
name__in=[u'Ohjelma', u'Ohjelmanjärjestäjä']
)
except PersonnelClass.DoesNotExist:
personnel_class = PersonnelClass(
event=programme_role.programme.category.event,
app_label='programme',
name=u'Ohjelmanjärjestäjä',
slug=u'ohjelma',
priority=40, # values of 0, 30 and 40 present; 40 most prevalent
)
personnel_class.save()
old_role = programme_role.role
programme_role.role, unused = Role.objects.get_or_create(
personnel_class=personnel_class,
title=old_role.title,
defaults=dict(
require_contact_info=old_role.require_contact_info,
is_default=old_role.is_default,
is_public=old_role.is_public,
)
)
programme_role.save()
Role.objects.filter(personnel_class__isnull=True).delete()
class Migration(migrations.Migration):
dependencies = [
('labour', '0016_auto_20160128_1805'),
('programme', '0019_auto_20160201_0003'),
]
operations = [
migrations.RunPython(make_role_event_specific),
]
```
#### File: programme/migrations/0025_auto_20160202_2237.py
```python
from __future__ import unicode_literals
import re
from django.db import migrations
SLUGIFY_CHAR_MAP = {
u'ä': u'a',
u'å': u'a',
u'ö': u'o',
u'ü': u'u',
u' ': u'-',
u'_': u'-',
u'.': u'-',
}
SLUGIFY_FORBANNAD_RE = re.compile(ur'[^a-z0-9-]', re.UNICODE)
SLUGIFY_MULTIDASH_RE = re.compile(ur'-+', re.UNICODE)
def slugify(ustr):
ustr = ustr.lower()
ustr = u''.join(SLUGIFY_CHAR_MAP.get(c, c) for c in ustr)
ustr = SLUGIFY_FORBANNAD_RE.sub(u'', ustr)
ustr = SLUGIFY_MULTIDASH_RE.sub(u'-', ustr)
return ustr
def populate_slug(apps, schema_editor):
Programme = apps.get_model('programme', 'programme')
for programme in Programme.objects.all():
if not programme.slug:
programme.slug = slugify(programme.title)
programme.save()
class Migration(migrations.Migration):
dependencies = [
('programme', '0024_auto_20160202_2236'),
]
operations = [
migrations.RunPython(populate_slug)
]
```
#### File: turska/sms/models.py
```python
from datetime import datetime
from django.conf import settings
from django.db import models
from django.db.models import F
from nexmo.models import InboundMessage, OutboundMessage, RetryError
import regex
from core.models import EventMetaBase
MAX_TRIES = 20
RETRY_DELAY_SECONDS = 0.4
class Hotword(models.Model):
hotword = models.CharField(
max_length=255,
verbose_name=u"Avainsanan kuvaus",
help_text=u"Tällä nimellä erotat avainsanan muista, esim. toisen tapahtuman AMV-äänestyksestä"
)
slug = models.SlugField(
verbose_name=u"Avainsana",
help_text=u"Tämä tekstinpätkä on varsinainen avainsana, joka tulee löytyä tekstiviestistä. Kirjoita pienillä!"
)
valid_from = models.DateTimeField()
valid_to = models.DateTimeField()
assigned_event = models.ForeignKey('core.Event')
def __unicode__(self):
return u'%s' % (self.hotword)
class Meta:
verbose_name = u'Avainsana'
verbose_name_plural = u'Avainsanat'
class VoteCategory(models.Model):
category = models.CharField(
max_length=255,
verbose_name=u'Kategorian kuvaus'
)
slug = models.SlugField(
max_length=20,
verbose_name=u'Avainsana'
)
hotword = models.ForeignKey(Hotword)
primary = models.BooleanField(default=False)
def __unicode__(self):
return u'%s' % (self.category)
class Meta:
verbose_name = u'Kategoria'
verbose_name_plural = u'Kategoriat'
class Nominee(models.Model):
category = models.ManyToManyField(VoteCategory)
number = models.IntegerField()
name = models.CharField(
max_length=255,
blank=True,
null=True
)
def __unicode__(self):
return u'%s - %s' % (self.number, self.name)
class Meta:
verbose_name = u'Osallistuja'
verbose_name_plural = u'Osallistujat'
class Vote(models.Model):
category = models.ForeignKey(VoteCategory)
vote = models.ForeignKey(Nominee)
message = models.ForeignKey('nexmo.InboundMessage')
class Meta:
verbose_name = u'Ääni'
verbose_name_plural = u'Äänet'
class SMSEventMeta(EventMetaBase):
sms_enabled = models.BooleanField(default=False)
current = models.BooleanField(default=False)
used_credit = models.IntegerField(default=0)
def save(self, *args, **kwargs):
if self.current:
try:
temp = SMSEventMeta.objects.get(current=True)
if self != temp:
temp.current = False
temp.save()
except SMSEventMeta.DoesNotExist:
pass
return super(SMSEventMeta, self).save(*args, **kwargs)
@property
def is_sms_enabled(self):
return self.sms_enabled is not None
@property
def is_current(self):
return self.current is not None
def __unicode__(self):
return self.event.name
@classmethod
def get_or_create_dummy(cls):
from core.models import Event
event, unused = Event.get_or_create_dummy()
group, unused = cls.get_or_create_groups(event, ['admins'])
return cls.objects.get_or_create(event=event, defaults=dict(admin_group=group))
class Meta:
verbose_name = u'Tekstiviestejä käyttävä tapahtuma'
verbose_name_plural = u'Tekstiviestejä käyttävät tapahtumat'
class SMSMessageIn(models.Model):
message = models.ForeignKey('nexmo.InboundMessage')
SMSEventMeta = models.ForeignKey(SMSEventMeta)
def __unicode__(self):
return self.message.message
class Meta:
verbose_name = u'Vastaanotettu viesti'
verbose_name_plural = u'Vastaanotetut viestit'
class SMSMessageOut(models.Model):
message = models.TextField()
to = models.CharField(max_length=20)
event = models.ForeignKey(SMSEventMeta)
ref = models.ForeignKey('nexmo.OutboundMessage', blank=True, null=True)
@classmethod
def send(cls, *args, **kwargs):
message = SMSMessageOut(*args, **kwargs)
message.save()
return message._send()
def _send(self, *args, **kwargs):
from time import sleep
if not self.event.sms_enabled:
return False
# TODO replace this with a generic phone number normalization code (perhaps a library)
to = regex.match(r'\d{9,15}', self.to.replace(' ','').replace('-','').replace('+',''))
if to is None:
return False
if to[0].startswith('0'):
actual_to = u'+358' + to[0][1:]
else:
actual_to = u'+' + to[0]
nexmo_message = OutboundMessage(message=self.message, to=actual_to)
nexmo_message.save()
self.to = actual_to
self.ref = nexmo_message
self.save()
succeeded = False
for i in xrange(MAX_TRIES):
try:
sent_message = self.ref._send()
except RetryError:
# Back off! Stop everything for a while.
sleep(RETRY_DELAY_SECONDS)
else:
succeeded = True
break
if not succeeded:
raise RuntimeError('Max retries exceeded for SMSMessageOut(id={})'.format(self.id))
used_credit = sum(
float(sent['message-price']) * 100
for sent in sent_message['messages']
if int(sent['status']) == 0
)
meta = SMSEventMeta.objects.get(event=self.event.event)
meta.used_credit = F('used_credit') + int(used_credit)
meta.save()
if 'background_tasks' in settings.INSTALLED_APPS:
# This assumes that if background_tasks is installed, it will be used in sms sending.
# Otherwise you will be hitting RetryError constantly.
pass
else:
sleep(0.25 * float(sent_message['message-count']))
return True
class Meta:
verbose_name = u'Lähetetty viesti'
verbose_name_plural = u'Lähetetyt viestit'
from . import signal_handlers
```
#### File: turska/sms/tasks.py
```python
from __future__ import absolute_import
from celery import shared_task
@shared_task(ignore_result=True)
def message_send(message_id):
from .models import SMSMessageOut
smsmessage = SMSMessageOut.objects.get(pk=message_id)
smsmessage._send()
```
#### File: tickets/migrations/0009_accom_limit_group_refactor.py
```python
from __future__ import unicode_literals
from django.db import migrations, models
def populate_accommodation_limit_group(apps, schema_editor):
AccommodationInformation = apps.get_model('tickets', 'accommodationinformation')
for info in AccommodationInformation.objects.all():
info.limit_groups = info.order_product.product.limit_groups.all()
info.save()
class Migration(migrations.Migration):
dependencies = [
('tickets', '0008_auto_20151108_1905'),
]
operations = [
migrations.AddField(
model_name='accommodationinformation',
name='limit_groups',
field=models.ManyToManyField(related_name='accommodation_information_set', to='tickets.LimitGroup', blank=True),
),
migrations.AlterField(
model_name='accommodationinformation',
name='order_product',
field=models.ForeignKey(related_name='accommodation_information_set', blank=True, to='tickets.OrderProduct', null=True),
),
migrations.RunPython(populate_accommodation_limit_group),
]
```
#### File: tickets/views/public_views.py
```python
from collections import defaultdict
import datetime
from time import mktime
from django.conf import settings
from django.http import HttpResponseRedirect, HttpResponseNotAllowed, HttpResponse
from django.shortcuts import render, get_object_or_404, redirect
from django.template import RequestContext
from django.contrib.auth.decorators import login_required, permission_required
from django.contrib import messages
from django.views.decorators.http import require_POST, require_safe, require_http_methods
from django.core.paginator import Paginator, InvalidPage, EmptyPage
from django.db.models import Sum
try:
from reportlab.pdfgen import canvas
except ImportError:
from warnings import warn
warn('Failed to import ReportLab. Generating receipts will fail.')
from core.utils import initialize_form, url
# XXX * imports
from ..models import Order, Product, OrderProduct, Customer, AccommodationInformation
from ..forms import *
from ..helpers import *
from ..utils import *
__all__ = [
"ALL_PHASES",
"tickets_accommodation_view",
"tickets_address_view",
"tickets_confirm_view",
"tickets_thanks_view",
"tickets_tickets_view",
"tickets_welcome_view",
]
def multiform_validate(forms):
return ["syntax"] if not all(
i.is_valid() and (i.instance.target.available or i.cleaned_data["count"] == 0)
for i in forms
) else []
def multiform_save(forms):
return [i.save() for i in forms]
def decorate(view_obj):
"""
Applying decorators to our makeshift class based views seems a bit tricky.
Let's do the decorator dance in this helper instead.
NB. can't use functools.wraps due to Phase not having a __name__.
Usage:
realized_phase = ClassBasedView()
realized_view = decorate(realized_phase)
"""
@tickets_event_required
def wrapper(request, event, *args, **kwargs):
return view_obj(request, event, *args, **kwargs)
return wrapper
class Phase(object):
name = "XXX_fill_me_in"
friendly_name = "XXX Fill Me In"
methods = ["GET", "POST"]
template = "tickets_dummy_phase.html"
prev_phase = None
next_phase = None
payment_phase = None
next_text = "Seuraava »"
can_cancel = True
index = None
delay_complete = False
def __call__(self, request, event):
if request.method not in self.methods:
return HttpResponseNotAllowed(self.methods)
order = get_order(request, event)
if not self.available(request, event):
if order.is_confirmed:
if order.is_paid:
return redirect('tickets_thanks_view', event.slug)
else:
return redirect('tickets_confirm_view', event.slug)
else:
return redirect('tickets_welcome_view', event.slug)
form = self.make_form(request, event)
errors = []
if request.method == "POST":
# Which button was clicked?
action = request.POST.get("action", "cancel")
# On "Cancel" there's no need to do form validation, just bail out
# right away.
if action == "cancel":
return self.cancel(request, event)
if action not in ("next", "prev"):
# TODO the user is manipulating the POST data
raise NotImplementedError("evil user")
# Data validity is checked before even attempting save.
errors = self.validate(request, event, form)
if not errors:
self.save(request, event, form)
# The "Next" button should only proceed with valid data.
if action == "next":
if not self.delay_complete:
complete_phase(request, event, self.name)
return self.next(request, event)
# The "Previous" button should work regardless of form validity.
if action == "prev":
# Clear any nastygrams left behind by validate
for message in messages.get_messages(request):
pass
return self.prev(request, event)
# POST with invalid data and GET are handled the same.
return self.get(request, event, form, errors)
def available(self, request, event):
order = get_order(request, event)
return is_phase_completed(request, event, self.prev_phase) and not order.is_confirmed
def validate(self, request, event, form):
if not form.is_valid():
messages.error(request, 'Tarkista lomakkeen sisältö.')
return ["syntax"]
else:
return []
def get(self, request, event, form, errors):
order = get_order(request, event)
phases = []
for phase in ALL_PHASES:
phases.append(dict(
url=url(phase.name, event.slug),
friendly_name=phase.friendly_name,
current=phase is self
))
phase = dict(
url=url(self.name, event.slug),
next_phase=bool(self.next_phase),
prev_phase=bool(self.prev_phase),
can_cancel=self.can_cancel,
next_text=self.next_text,
payment_phase=self.payment_phase,
name=self.name
)
vars = dict(self.vars(request, event, form),
event=event,
form=form,
errors=errors,
order=order,
phase=phase,
phases=phases,
# XXX hack to hide the login form
login_page=True
)
return render(request, self.template, vars)
def make_form(self, request, event):
return initialize_form(NullForm, request)
def save(self, request, event, form):
form.save()
def next(self, request, event):
return redirect(self.next_phase, event.slug)
def prev(self, request, event):
return redirect(self.prev_phase, event.slug)
def cancel(self, request, event):
destroy_order(request, event)
return HttpResponseRedirect(event.homepage_url)
def vars(self, request, event, form):
return {}
class WelcomePhase(Phase):
name = "tickets_welcome_view"
friendly_name = "Tervetuloa"
template = "tickets_welcome_phase.jade"
prev_phase = None
next_phase = "tickets_tickets_view"
permit_new = True
def save(self, request, event, form):
order = get_order(request, event)
order.save()
set_order(request, event, order)
def available(self, request, event):
order = get_order(request, event)
return not order.is_confirmed
tickets_welcome_phase = WelcomePhase()
tickets_welcome_view = decorate(tickets_welcome_phase)
class TicketsPhase(Phase):
name = "tickets_tickets_view"
friendly_name = "Liput"
template = "tickets_tickets_phase.jade"
prev_phase = "tickets_welcome_view"
next_phase = "tickets_address_view"
def make_form(self, request, event):
order = get_order(request, event)
return OrderProductForm.get_for_order(request, order)
def validate(self, request, event, form):
errors = multiform_validate(form)
# If the above step failed, not all forms have cleaned_data.
if errors:
messages.error(request, u'Tarkista lomakkeen sisältö.')
return errors
if sum(i.cleaned_data["count"] for i in form) <= 0:
messages.info(request, u'Valitse vähintään yksi tuote.')
errors.append("zero")
return errors
if any(i.instance.product.amount_available < i.cleaned_data["count"] for i in form):
messages.error(request, u'Valitsemasi tuote on valitettavasti juuri myyty loppuun.')
errors.append("soldout")
return errors
return []
def save(self, request, event, form):
multiform_save(form)
def next(self, request, event):
order = get_order(request, event)
if order.requires_accommodation_information:
return redirect('tickets_accommodation_view', event.slug)
else:
return redirect(self.next_phase, event.slug)
tickets_tickets_phase = TicketsPhase()
tickets_tickets_view = decorate(tickets_tickets_phase)
class AccommodationPhase(Phase):
name = "tickets_accommodation_view"
friendly_name = u"Lisätiedot"
template = "tickets_accommodation_phase.jade"
prev_phase = "tickets_tickets_view"
next_phase = "tickets_address_view"
def available(self, request, event):
order = get_order(request, event)
return order.requires_accommodation_information and not order.is_confirmed
def validate(self, request, event, form):
errors = ['syntax'] if not all(i.is_valid() for i in form) else []
# If the above step failed, not all forms have cleaned_data.
if errors:
messages.error(request, u'Tarkista lomakkeen sisältö.')
return errors
def make_form(self, request, event):
order = get_order(request, event)
return AccommodationInformationForm.get_for_order(request, order)
def save(self, request, event, form):
forms = form
for form in forms:
info = form.save()
info.limit_groups = info.order_product.product.limit_groups.all()
info.save()
tickets_accommodation_phase = AccommodationPhase()
tickets_accommodation_view = decorate(tickets_accommodation_phase)
class AddressPhase(Phase):
name = "tickets_address_view"
friendly_name = "Toimitusosoite"
template = "tickets_address_phase.jade"
prev_phase = "tickets_tickets_view"
next_phase = "tickets_confirm_view"
def make_form(self, request, event):
order = get_order(request, event)
return initialize_form(CustomerForm, request, instance=order.customer)
def save(self, request, event, form):
order = get_order(request, event)
cust = form.save()
order.customer = cust
order.save()
def prev(self, request, event):
order = get_order(request, event)
if order.requires_accommodation_information:
return redirect('tickets_accommodation_view', event.slug)
else:
return redirect(self.prev_phase, event.slug)
tickets_address_phase = AddressPhase()
tickets_address_view = decorate(tickets_address_phase)
class ConfirmPhase(Phase):
name = "tickets_confirm_view"
friendly_name = "Vahvistaminen"
template = "tickets_confirm_phase.jade"
prev_phase = "tickets_address_view"
next_phase = "payments_redirect_view"
next_text = "<NAME> ✓"
payment_phase = True
delay_complete = True
def validate(self, request, event, form):
errors = multiform_validate(form)
order = get_order(request, event)
order_products = order.order_product_set.filter(count__gt=0)
if any(i.product.amount_available < i.count for i in order_products):
messages.error(request, u'Valitsemasi tuote on valitettavasti juuri myyty loppuun.')
errors.append("soldout_confirm")
return errors
return []
def vars(self, request, event, form):
order = get_order(request, event)
products = order.order_product_set.filter(order=order, count__gt=0)
return dict(
products=products,
)
def available(self, request, event):
order = get_order(request, event)
return is_phase_completed(request, event, self.prev_phase) and not order.is_paid
def prev(self, request, event):
order = get_order(request, event)
if order.is_confirmed:
order.deconfirm_order()
return super(ConfirmPhase, self).prev(request, event)
def save(self, request, event, form):
order = get_order(request, event)
if not order.is_confirmed:
order.confirm_order()
tickets_confirm_phase = ConfirmPhase()
tickets_confirm_view = decorate(tickets_confirm_phase)
class ThanksPhase(Phase):
name = "tickets_thanks_view"
friendly_name = "Kiitos!"
template = "tickets_thanks_phase.jade"
prev_phase = None
next_phase = "tickets_welcome_view"
next_text = "Uusi tilaus"
can_cancel = False
def available(self, request, event):
order = get_order(request, event)
return order.is_confirmed and order.is_paid
def vars(self, request, event, form):
order = get_order(request, event)
products = OrderProduct.objects.filter(order=order)
return dict(products=products)
def save(self, request, event, form):
# Start a new order
clear_order(request, event)
tickets_thanks_phase = ThanksPhase()
tickets_thanks_view = decorate(tickets_thanks_phase)
ALL_PHASES = [
tickets_welcome_phase,
tickets_tickets_phase,
tickets_accommodation_phase,
tickets_address_phase,
tickets_confirm_phase,
tickets_thanks_phase,
]
for num, phase in enumerate(ALL_PHASES):
phase.index = num
def tickets_event_box_context(request, event):
if event.tickets_event_meta:
is_tickets_admin = event.tickets_event_meta.is_user_admin(request.user)
else:
is_tickets_admin = False
return dict(
is_tickets_admin=is_tickets_admin
)
``` |
{
"source": "jlaura/ale",
"score": 3
} |
#### File: ale/base/label_isis.py
```python
import pvl
class IsisLabel():
@property
def label(self):
if not hasattr(self, "_label"):
if isinstance(self._file, pvl.PVLModule):
self._label = self._file
try:
self._label = pvl.loads(self._file)
except Exception:
self._label = pvl.load(self._file)
except:
raise ValueError("{} is not a valid label".format(self._file))
return self._label
@property
def image_lines(self):
"""
Returns
-------
: int
Number of lines in image
"""
return self.label['IsisCube']['Core']['Dimensions']['Lines']
@property
def image_samples(self):
"""
Returns
-------
: int
Number of samples in image
"""
return self.label['IsisCube']['Core']['Dimensions']['Samples']
@property
def sample_summing(self):
"""
Returns
-------
: int
Sample summing
"""
try:
summing = self.label['IsisCube']['Instrument']['SummingMode']
except:
summing = 1
return summing
@property
def line_summing(self):
"""
Returns
-------
: int
Line summing
"""
try:
summing = self.label['IsisCube']['Instrument']['SummingMode']
except:
summing = 1
return summing
@property
def target_name(self):
"""
Target body name used in various Spice calls to acquire
target specific ephemeris data.
Returns
-------
: str
Target name
"""
return self.label['IsisCube']['Instrument']['TargetName']
@property
def spacecraft_clock_start_count(self):
"""
The spacecraft clock start count, frequently used to determine the start time
of the image.
Returns
-------
: str
Spacecraft clock start count
"""
try:
start_count = self.label['IsisCube']['Instrument']['SpacecraftClockStartCount']
except:
start_count = self.label['IsisCube']['Archive']['SpacecraftClockStartCount']
return start_count
@property
def exposure_duration(self):
"""
The exposure duration of the image, in seconds
Returns
-------
: float
Exposure duration in seconds
"""
try:
units = self.label['IsisCube']['Instrument']['ExposureDuration'].units
if "ms" in units.lower():
exposure_duration = self.label['IsisCube']['Instrument']['ExposureDuration'].value * 0.001
else:
# if not milliseconds, the units are probably seconds
exposure_duration = self.label['IsisCube']['Instrument']['ExposureDuration'].value
except:
# if no units are available, assume the exposure duration is given in milliseconds
exposure_duration = self.label['IsisCube']['Instrument']['ExposureDuration'].value * 0.001
return exposure_duration
```
#### File: tests/pytests/test_lro_drivers.py
```python
from collections import namedtuple
from unittest import mock
import pytest
import ale
from ale.drivers import lro_drivers
from ale.base import data_naif
from ale.drivers.lro_drivers import LroLrocPds3LabelNaifSpiceDriver
from ale import util
# 'Mock' the spice module where it is imported
from conftest import SimpleSpice, get_mockkernels
simplespice = SimpleSpice()
data_naif.spice = simplespice
lro_drivers.spice = simplespice
LroLrocPds3LabelNaifSpiceDriver.metakernel = get_mockkernels
@pytest.fixture
def lro_lroclabel():
return """
PDS_VERSION_ID = PDS3
/*FILE CHARACTERISTICS*/
RECORD_TYPE = FIXED_LENGTH
RECORD_BYTES = 5064
FILE_RECORDS = 13313
LABEL_RECORDS = 1
^IMAGE = 2
/*DATA IDENTIFICATION*/
DATA_SET_ID = "LRO-L-LROC-2-EDR-V1.0"
ORIGINAL_PRODUCT_ID = nacl0002fc60
PRODUCT_ID = M128963531LE
MISSION_NAME = "LUNAR RECONNAISSANCE ORBITER"
MISSION_PHASE_NAME = "NOMINAL MISSION"
INSTRUMENT_HOST_NAME = "LUNAR RECONNAISSANCE ORBITER"
INSTRUMENT_HOST_ID = LRO
INSTRUMENT_NAME = "LUNAR RECONNAISSANCE ORBITER CAMERA"
INSTRUMENT_ID = LROC
LRO:PREROLL_TIME = 2010-05-20T02:57:44.373
START_TIME = 2010-05-20T02:57:44.720
STOP_TIME = 2010-05-20T02:57:49.235
LRO:SPACECRAFT_CLOCK_PREROLL_COUNT = "1/296017064:22937"
SPACECRAFT_CLOCK_START_COUNT = "1/296017064:45694"
SPACECRAFT_CLOCK_STOP_COUNT = "1/296017069:13866"
ORBIT_NUMBER = 4138
PRODUCER_ID = LRO_LROC_TEAM
PRODUCT_CREATION_TIME = 2013-09-16T19:57:12
PRODUCER_INSTITUTION_NAME = "ARIZONA STATE UNIVERSITY"
PRODUCT_TYPE = EDR
PRODUCT_VERSION_ID = "v1.8"
UPLOAD_ID = "SC_2010140_0000_A_V01.txt"
/*DATA DESCRIPTION*/
TARGET_NAME = "MOON"
RATIONALE_DESC = "TARGET OF OPPORTUNITY"
FRAME_ID = LEFT
DATA_QUALITY_ID = "0"
DATA_QUALITY_DESC = "The DATA_QUALITY_ID is set to an 8-bit
value that encodes the following data quality information for the
observation. For each bit a value of 0 means FALSE and a value of 1 means
TRUE. More information about the data quality ID can be found in the LROC
EDR/CDR SIS, section 3.3 'Label and Header Descriptions'.
Bit 1: Temperature of focal plane array is out of bounds.
Bit 2: Threshold for saturated pixels is reached.
Bit 3: Threshold for under-saturated pixels is reached.
Bit 4: Observation is missing telemetry packets.
Bit 5: SPICE information is bad or missing.
Bit 6: Observation or housekeeping information is bad or missing.
Bit 7: Spare.
Bit 8: Spare."
/*ENVIRONMENT*/
LRO:TEMPERATURE_SCS = 4.51 <degC>
LRO:TEMPERATURE_FPA = 17.88 <degC>
LRO:TEMPERATURE_FPGA = -12.33 <degC>
LRO:TEMPERATURE_TELESCOPE = 5.91 <degC>
LRO:TEMPERATURE_SCS_RAW = 2740
LRO:TEMPERATURE_FPA_RAW = 2107
LRO:TEMPERATURE_FPGA_RAW = 3418
LRO:TEMPERATURE_TELESCOPE_RAW = 2675
/*IMAGING PARAMETERS*/
CROSSTRACK_SUMMING = 1
BANDWIDTH = 300 <nm>
CENTER_FILTER_WAVELENGTH = 600 <nm>
LINE_EXPOSURE_DURATION = 0.337600 <ms>
LRO:LINE_EXPOSURE_CODE = 0
LRO:DAC_RESET_LEVEL = 198
LRO:CHANNEL_A_OFFSET = 60
LRO:CHANNEL_B_OFFSET = 123
LRO:COMPAND_CODE = 3
LRO:LINE_CODE = 13
LRO:BTERM = (0,16,69,103,128)
LRO:MTERM = (0.5,0.25,0.125,0.0625,0.03125)
LRO:XTERM = (0,64,424,536,800)
LRO:COMPRESSION_FLAG = 1
LRO:MODE = 7
/*DATA OBJECT*/
OBJECT = IMAGE
LINES = 13312
LINE_SAMPLES = 5064
SAMPLE_BITS = 8
SAMPLE_TYPE = LSB_INTEGER
UNIT = "RAW_INSTRUMENT_COUNT"
MD5_CHECKSUM = "0fe91f4b2e93083ee0093e7c8d05f3bc"
END_OBJECT = IMAGE
END
"""
def test_lro_creation(lro_lroclabel):
#with LroLrocPds3LabelNaifSpiceDriver(lro_lroclabel) as m:
# d = m.to_dict()
# assert isinstance(d, dict)
# Need to insert new tests here, one for each property unique to this driver
assert True
``` |
{
"source": "jlaura/autocnet",
"score": 3
} |
#### File: autocnet/transformation/fundamental_matrix.py
```python
import warnings
import numpy as np
import pandas as pd
from scipy import optimize
from autocnet.camera import camera
from autocnet.camera import utils as camera_utils
from autocnet.utils.utils import make_homogeneous, normalize_vector
try:
import cv2
cv2_avail = True
except: # pragma: no cover
cv_avail = False
def compute_epipolar_lines(F, x, index=None):
"""
Given a fundamental matrix and a set of homogeneous points
Parameters
----------
F : ndarray
of shape (3,3) that represents the fundamental matrix
x : ndarray
of shape (n, 3) of homogeneous coordinates
Returns
-------
lines : ndarray
of shape (n,3) of epipolar lines in standard form
"""
if isinstance(x, pd.DataFrame):
x = x.values
if not x.shape[1] == 3:
raise ValueError('The input points must be homogenous with shape (n,3)')
# Compute the unnormalized epipolar lines
lines = np.inner(F, x)
# Normalize the lines
nu = lines[0] ** 2 + lines[1] ** 2
try:
nu = 1 / np.sqrt(nu)
except:
nu = 1
lines *= nu
lines = lines.T
if index is not None:
lines = pd.DataFrame(lines, columns=['a', 'b', 'c'], index=index)
# Inner transposes the result, so transpose back into the 3 column form
return lines
def epipolar_distance(lines, pts):
"""
Given a set of epipolar lines and a set of points, compute the euclidean
distance between each point and the corresponding epipolar line
Parameters
----------
lines : ndarray
of shape (n,3) of epipolar lines in standard form
pts : ndarray
of shape (n, 3) of homogeneous coordinates
"""
num = np.abs(lines[:,0] * pts[:,0] + lines[:,1] * pts[:,1] + lines[:,2])
denom = np.sqrt(lines[:,0] ** 2 + lines[:,1] ** 2)
return num / denom
def compute_reprojection_error(F, x, x1, index=None):
"""
Given a set of matches and a known fundamental matrix,
compute distance between match points and the associated
epipolar lines.
The distance between a point and the associated epipolar
line is computed as: $d = \frac{\lvert ax_{0} + by_{0} + c \rvert}{\sqrt{a^{2} + b^{2}}}$.
Parameters
----------
F : ndarray
(3,3) Fundamental matrix
x : arraylike
(n,2) or (n,3) array of homogeneous coordinates
x1 : arraylike
(n,2) or (n,3) array of homogeneous coordinates with the same
length as argument x
Returns
-------
F_error : ndarray
n,1 vector of reprojection errors
"""
if isinstance(x, (pd.Series, pd.DataFrame)):
x = x.values
if isinstance(x1, (pd.Series, pd.DataFrame)):
x1 = x1.values
if x.shape[1] != 3:
x = make_homogeneous(x)
if x1.shape[1] != 3:
x1 = make_homogeneous(x1)
# Compute the epipolar lines
lines1 = compute_epipolar_lines(F,x)
lines2 = compute_epipolar_lines(F.T, x1)
# Compute the euclidean distance from the pt to the line
d1 = epipolar_distance(lines2, x)
d2 = epipolar_distance(lines1, x1)
# Grab the max err from either reprojection
err = np.max(np.column_stack((d1,d2)), axis=1)
if index is not None:
err = pd.Series(err, index=index)
return err
def compute_fundamental_error(F, x, x1):
"""
Compute the fundamental error using the idealized error metric.
Ideal error is defined by $x^{\intercal}Fx = 0$,
where $x$ are all matchpoints in a given image and
$x^{\intercal}F$ defines the standard form of the
epipolar line in the second image.
This method assumes that x and x1 are ordered such that x[0]
correspondes to x1[0].
Parameters
----------
F : ndarray
(3,3) Fundamental matrix
x : arraylike
(n,2) or (n,3) array of homogeneous coordinates
x1 : arraylike
(n,2) or (n,3) array of homogeneous coordinates with the same
length as argument x
Returns
-------
F_error : ndarray
n,1 vector of reprojection errors
"""
# TODO: Can this be vectorized for performance?
if x.shape[1] != 3:
x = make_homogeneous(x)
if x1.shape[1] != 3:
x1 = make_homogeneous(x1)
if isinstance(x, pd.DataFrame):
x = x.values
if isinstance(x1, pd.DataFrame):
x1 = x1.values
err = np.empty(len(x))
for i in range(len(x)):
err[i] = x1[i].T.dot(F).dot(x[i])
return err
def update_fundamental_mask(F, x1, x2, threshold=1.0, index=None, method='reprojection'):
"""
Given a Fundamental matrix and two sets of points, compute the
reprojection error between x1 and x2. A mask is returned with all
repojection errors greater than the error set to false.
Parameters
----------
F : ndarray
(3,3) Fundamental matrix
x1 : arraylike
(n,2) or (n,3) array of homogeneous coordinates
x2 : arraylike
(n,2) or (n,3) array of homogeneous coordinates
threshold : float
The new upper limit for error. If using
reprojection this is measured in pixels (the default). If
using fundamental, the idealized error is 0. Values +- 0.05
should be good.
index : ndarray
Optional index for mapping between reprojective error
and an associated dataframe (e.g., an indexed matches dataframe).
Returns
-------
mask : dataframe
"""
if method == 'reprojection':
error = compute_reprojection_error(F, x1, x2)
elif method == 'fundamental':
error = compute_fundamental_error(F, x1, x2)
else:
warnings.warn('Unknown error method. Options are "reprojection" or "fundamental".')
mask = pd.DataFrame(np.abs(error) <= threshold, index=index, columns=['fundamental'])
if index is not None:
mask.index = index
return mask
def enforce_singularity_constraint(F):
"""
The fundamental matrix should be rank 2. In instances when it is not,
the singularity constraint should be enforced. This is forces epipolar lines
to be conincident.
Parameters
----------
F : ndarray
(3,3) Fundamental Matrix
Returns
-------
F : ndarray
(3,3) Singular Fundamental Matrix
References
----------
.. [Hartley2003]
"""
if np.linalg.matrix_rank(F) != 2:
u, d, vt = np.linalg.svd(F)
F = u.dot(np.diag([d[0], d[1], 0])).dot(vt)
return F
def compute_fundamental_matrix(kp1, kp2, method='mle', reproj_threshold=2.0,
confidence=0.99, mle_reproj_threshold=0.5):
"""
Given two arrays of keypoints compute the fundamental matrix. This function
accepts two dataframe of keypoints that have
Parameters
----------
kp1 : arraylike
(n, 2) of coordinates from the source image
kp2 : ndarray
(n, 2) of coordinates from the destination image
method : {'ransac', 'lmeds', 'normal', '8point'}
The openCV algorithm to use for outlier detection
reproj_threshold : float
The maximum distances in pixels a reprojected points
can be from the epipolar line to be considered an inlier
confidence : float
[0, 1] that the estimated matrix is correct
Returns
-------
F : ndarray
A 3x3 fundamental matrix
mask : pd.Series
A boolean mask identifying those points that are valid.
Notes
-----
While the method is user definable, if the number of input points
is < 7, normal outlier detection is automatically used, if 7 > n > 15,
least medians is used, and if 7 > 15, ransac can be used.
"""
if method == 'mle':
# Grab an initial estimate using RANSAC, then apply MLE
method_ = cv2.FM_RANSAC
elif method == 'ransac':
method_ = cv2.FM_RANSAC
elif method == 'lmeds':
method_ = cv2.FM_LMEDS
elif method == 'normal':
method_ = cv2.FM_7POINT
elif method == '8point':
method_ = cv2.FM_8POINT
else:
raise ValueError("Unknown estimation method. Choices are: 'lme', 'ransac', 'lmeds', '8point', or 'normal'.")
if len(kp1) == 0 or len(kp2) == 0:
warnings.warn("F-matix computation failed. One of the keypoint args is empty. kp1:{}, kp2:{}.".format(len(kp1), len(kp2)))
return None, None
# OpenCV wants arrays
try: # OpenCV < 3.4.1
F, mask = cv2.findFundamentalMat(np.asarray(kp1),
np.asarray(kp2),
method_,
param1=reproj_threshold,
param2=confidence)
except: # OpenCV >= 3.4.1
F, mask = cv2.findFundamentalMat(np.asarray(kp1),
np.asarray(kp2),
method_,
ransacReprojThreshold=reproj_threshold,
confidence=confidence)
if F is None:
warnings.warn("F computation failed with no result. Returning None.")
return None, None
if F.shape != (3,3):
warnings.warn('F computation fell back to 7-point algorithm, not setting F.')
return None, None
# Ensure that the singularity constraint is met
F = enforce_singularity_constraint(F)
try:
mask = mask.astype(bool).ravel() # Enforce dimensionality
except:
return # pragma: no cover
if method == 'mle':
# Now apply the gold standard algorithm to refine F
if kp1.shape[1] != 3:
kp1 = make_homogeneous(kp1)
if kp2.shape[1] != 3:
kp2 = make_homogeneous(kp2)
# Generate an idealized and to be updated camera model
p1 = camera.camera_from_f(F)
p = camera.idealized_camera()
if kp1[mask].shape[0] <=12 or kp2[mask].shape[0] <=12:
warnings.warn("Unable to apply MLE. Not enough correspondences. Returning with a RANSAC computed F matrix.")
return F, mask
# Apply Levenber-Marquardt to perform a non-linear lst. squares fit
# to minimize triangulation error (this is a local bundle)
result = optimize.least_squares(camera.projection_error, p1.ravel(),
args=(p, kp1[mask].T, kp2[mask].T),
method='lm')
gold_standard_p = result.x.reshape(3, 4) # SciPy Lst. Sq. requires a vector, camera is 3x4
optimality = result.optimality
gold_standard_f = camera_utils.crossform(gold_standard_p[:,3]).dot(gold_standard_p[:,:3])
F = gold_standard_f
mask = update_fundamental_mask(F, kp1, kp2,
threshold=mle_reproj_threshold).values
return F, mask
``` |
{
"source": "jlaura/knoten",
"score": 2
} |
#### File: knoten/knoten/bundle.py
```python
import numpy as np
import pandas as pd
import pvl
import os
import csmapi
import itertools
from math import floor
from plio.io.isis_serial_number import generate_serial_number
from ale.drivers import loads
from collections import OrderedDict
from knoten.csm import create_csm
def generate_sensors(cubes, directory=None, clean=False):
"""
Generate a set of USGSCSM sensor models from a list of ISIS cube files
Parameters
----------
cubes : str
Directory/filename of a file containing ISIS cube file paths
directory : str
Output directory to save resulting json files. Defaults to the
same directory as cube list file
clean : flag
Option to delete json file outputs
Returns
-------
sensors : dictionary
Dictionary mapping ISIS serial numbers to USGSCSM sensor models
"""
if directory is None:
directory = os.path.dirname(cubes)
isd_files = []
sensors = {}
for line in open(cubes):
basename = os.path.splitext(os.path.basename(line.strip()))[0]
isd = os.path.join(directory, basename+'.json')
isd_files.append(isd)
with open(isd, 'w+') as f:
f.write(loads(line.strip(), formatter='usgscsm'))
sn = generate_serial_number(line.strip())
sensors[sn] = create_csm(isd)
if clean:
for isd in isd_files:
os.remove(isd)
return sensors
def closest_approach(points, direction):
"""
Compute the point of closest approach between lines.
Parameters
----------
points : ndarray
An n x 3 array of points on each line
direction : ndarray
An n x 3 array of vectors that defines the direction of each line
Returns
-------
: array
The (x, y, z) point that is closest to all of the lines
: ndarray
The (x, y, z) covariance matrix that describes the uncertaintly of the
point
"""
num_lines = points.shape[0]
design_mat = np.zeros((num_lines * 3, 3))
rhs = np.zeros(num_lines * 3)
for i in range(num_lines):
point = points[i]
line = direction[i] / np.linalg.norm(direction[i])
design_mat[3*i:3*i+3] = np.identity(3) - np.outer(line, line)
rhs[3*i:3*i+3] = np.dot(point,line) * line + point
N_inv = np.linalg.inv(design_mat.T.dot(design_mat))
closest_point = N_inv.dot(design_mat.T).dot(rhs)
return closest_point, N_inv
def compute_apriori_ground_points(network, sensors):
"""
Compute a priori ground points for all of the free points in a control network.
Parameters
----------
network : DataFrame
The control network as a dataframe generated by plio
sensors : dict
A dictionary that maps ISIS serial numbers to CSM sensors
Returns
-------
: DataFrame
The control network dataframe with updated ground points
"""
for point_id, group in network.groupby('id'):
# Free points are type 2 for V2 and V5 control networks
if group.iloc[0]["pointType"] != 2:
continue
positions = []
look_vecs = []
for measure_id, row in group.iterrows():
measure = csmapi.ImageCoord(row["line"], row["sample"])
locus = sensors[row["serialnumber"]].imageToRemoteImagingLocus(measure)
positions.append([locus.point.x, locus.point.y, locus.point.z])
look_vecs.append([locus.direction.x, locus.direction.y, locus.direction.z])
ground_pt, covar_mat = closest_approach(np.array(positions), np.array(look_vecs))
covar_vec = [covar_mat[0,0], covar_mat[0,1], covar_mat[0,2],
covar_mat[1,1], covar_mat[1,2], covar_mat[2,2]]
network.loc[network.id == point_id, ["aprioriX", "aprioriY", "aprioriZ"]] = ground_pt
network.loc[network.id == point_id, ["adjustedX", "adjustedY", "adjustedZ"]] = ground_pt
network.loc[network.id == point_id, ["adjustedX", "adjustedY", "adjustedZ"]] = ground_pt
# We have to do a separate loop to assign a list to a single cell
for measure_id, row in group.iterrows():
network.at[measure_id, 'aprioriCovar'] = covar_vec
return network
class CsmParameter:
"""
Container class that describes a parameter for a CSM sensor model
"""
def __init__(self, sensor, index):
self.index = index
self.name = sensor.getParameterName(index)
self.type = sensor.getParameterType(index)
self.units = sensor.getParameterUnits(index)
self.value = sensor.getParameterValue(index)
def __repr__(self):
return f'{self.index} {self.name.strip()} ({self.type}): {self.value} {self.units}'
def get_sensor_parameters(sensor, set="adjustable"):
"""
Get a set of the CSM parameters for a CSM sensor
Parameters
----------
sensor : CSM sensor
The CSM sensor model
set : str
The CSM parameter set to get. Either valid, adjustable, or non_adjustable
Returns
-------
: List
A list of CsmParameters
"""
if (set.upper() == "VALID"):
param_set = csmapi.VALID
elif (set.upper() == "ADJUSTABLE"):
param_set = csmapi.ADJUSTABLE
elif (set.upper() == "NON_ADJUSTABLE"):
param_set = csmapi.NON_ADJUSTABLE
else:
raise ValueError(f"Invalid parameter set \"{set}\".")
return [CsmParameter(sensor, index) for index in sensor.getParameterSetIndices(param_set)]
def compute_sensor_partials(sensor, parameters, ground_pt):
"""
Compute the partial derivatives of the line and sample with respect to a set
of parameters.
Parameters
----------
sensor : CSM sensor
The CSM sensor model
parameters : list
The list of CsmParameter to compute the partials W.R.T.
ground_pt : array
The (x, y, z) ground point to compute the partial derivatives at
Returns
-------
: ndarray
The 2 x n array of partial derivatives. The first array is the line
partials and the second array is the sample partials. The partial
derivatives are in the same order as the parameter list passed in.
"""
partials = np.zeros((2, len(parameters)))
csm_ground = csmapi.EcefCoord(ground_pt[0], ground_pt[1], ground_pt[2])
for i in range(len(parameters)):
partials[:, i] = sensor.computeSensorPartials(parameters[i].index, csm_ground)
return partials
def compute_ground_partials(sensor, ground_pt):
"""
Compute the partial derivatives of the line and sample with respect to a
ground point.
Parameters
----------
sensor : CSM sensor
The CSM sensor model
ground_pt : array
The (x, y, z) ground point to compute the partial derivatives W.R.T.
Returns
-------
: ndarray
The 2 x 3 array of partial derivatives. The first array is the line
partials and the second array is the sample partials. The partial
derivatives are in (x, y, z) order.
"""
csm_ground = csmapi.EcefCoord(ground_pt[0], ground_pt[1], ground_pt[2])
partials = np.array(sensor.computeGroundPartials(csm_ground))
return np.reshape(partials, (2, 3))
def compute_coefficient_columns(network, sensors, parameters):
"""
Compute the columns for different coefficients
Parameters
----------
network : DataFrame
The control network as a dataframe generated by plio.
sensors : dict
Dictionary that maps ISIS serial numbers to CSM sensor models
parameters : dict
Dictionary that maps serial numbers to lists of parameters to
solve for.
Returns
-------
: OrderedDict
Dictionary that maps serial numbers and point IDs to the column range
their parameters are in the Jacobian matrix.
"""
num_columns = 0
coefficient_columns = OrderedDict()
for serial in network["serialnumber"].unique():
coefficient_columns[serial] = num_columns
num_columns += len(parameters[serial])
coefficient_columns[serial] = (coefficient_columns[serial], num_columns)
for point_id in network["id"].unique():
# Skip fixed points
if network.loc[network.id == point_id].iloc[0]["pointType"] == 4:
continue
coefficient_columns[point_id] = num_columns
num_columns += 3
coefficient_columns[point_id] = (coefficient_columns[point_id], num_columns)
return coefficient_columns
def compute_jacobian(network, sensors, parameters, coefficient_columns):
"""
Compute the Jacobian matrix.
Parameters
----------
network : DataFrame
The control network as a dataframe generated by plio.
sensors : dict
Dictionary that maps ISIS serial numbers to CSM sensor models
parameters : dict
Dictionary that maps serial numbers to lists of parameters to
solve for.
coefficient_columns : OrderedDict
Dictionary that maps serial numbers and point IDs to
the column range their parameters are in the Jacobian
matrix.
Returns
-------
: ndarray
The Jacobian matrix
"""
num_columns = max([col_range[1] for col_range in coefficient_columns.values()])
num_rows = len(network) * 2
jacobian = np.zeros((num_rows, num_columns))
for i in range(len(network)):
row = network.iloc[i]
serial = row["serialnumber"]
ground_pt = row[["adjustedX", "adjustedY", "adjustedZ"]]
sensor = sensors[serial]
params = parameters[serial]
image_range = coefficient_columns[serial]
point_range = coefficient_columns[row["id"]]
jacobian[2*i : 2*i+2, image_range[0] : image_range[1]] = compute_sensor_partials(sensor, params, ground_pt)
jacobian[2*i : 2*i+2, point_range[0] : point_range[1]] = compute_ground_partials(sensor, ground_pt)
return jacobian
def compute_parameter_weights(network, sensors, parameters, coefficient_columns):
"""
Compute the parameter weight matrix
Parameters
----------
network : DataFrame
The control network as a dataframe generated by plio.
sensors : dict
Dictionary that maps ISIS serial numbers to CSM sensor models
parameters : dict
Dictionary that maps serial numbers to lists of parameters to
solve for.
coefficient_columns : OrderedDict
Dictionary that maps serial numbers and point IDs to
the column range their parameters are in the Jacobian
matrix. Their parameters weights will have the same
ordering in the weight matrix.
Returns
-------
: ndarray
The parameter weight matrix
"""
num_params = max([col_range[1] for col_range in coefficient_columns.values()])
weight_mat = np.zeros((num_params, num_params))
# Image parameters
for sn, params in parameters.items():
col_range = coefficient_columns[sn]
weight_mat[col_range[0]:col_range[1], col_range[0]:col_range[1]] = compute_image_weight(sensors[sn], params)
# Point parameters
for point_id, group in network.groupby('id'):
## If there is no covariance matrix, then just continue on
point_covar = list(group.iloc[0]["aprioriCovar"])
if len(point_covar) != 6:
continue
# The covariance matrix is stored as just one triangle, so we have
# to unpack it.
if len(point_covar) == 6:
covar_mat = np.array(
[[point_covar[0], point_covar[1], point_covar[2]],
[point_covar[1], point_covar[3], point_covar[4]],
[point_covar[2], point_covar[4], point_covar[5]]]
)
col_range = coefficient_columns[point_id]
weight_mat[col_range[0]:col_range[1], col_range[0]:col_range[1]] = np.linalg.inv(covar_mat)
return weight_mat
def compute_image_weight(sensor, parameters):
"""
Compute the weight matrix for the sensor parameters of a single image.
Parameters
----------
sensor : csmapi.RasterGm
The CSM sensor model for the image
parameters : list
The list of parameters to solve for
"""
param_count = len(parameters)
covar_mat = np.zeros((param_count, param_count))
for a, b in itertools.product(range(param_count), range(param_count)):
covar_mat[a, b] = sensor.getParameterCovariance(parameters[a].index, parameters[b].index)
return np.linalg.inv(covar_mat)
def compute_point_weight(cnet, point_id):
"""
Compute the weight matrix for a single point.
Parameters
----------
cnet : DataFrame
The control network dataframe
point_id : str
The point ID string of the point to compute the weight matrix for
"""
covar_vec = cnet.loc[(cnet['id'] == point_id).idxmax(), 'aprioriCovar']
if hasattr(covar_vec, '__len__') and len(covar_vec) == 6:
return np.linalg.inv(np.array(
[[covar_vec[0], covar_vec[1], covar_vec[2]],
[covar_vec[1], covar_vec[3], covar_vec[4]],
[covar_vec[2], covar_vec[4], covar_vec[5]]]))
return np.identity(3)
def compute_residuals(network, sensors):
"""
Compute the error in the observations by taking the difference between the
ground points groundToImage projections and measure values.
Parameters
----------
network : DataFrame
The control network as a dataframe generated by plio
sensors : dict
A dictionary that maps ISIS serial numbers to CSM sensors
Returns
-------
V : np.array
The control network dataframe with updated ground points
"""
num_meas = len(network)
V = np.zeros((num_meas, 2))
for i in range(num_meas):
row = network.iloc[i]
serial = row["serialnumber"]
ground_pt = row[["adjustedX", "adjustedY", "adjustedZ"]].values
ground_pt = csmapi.EcefCoord(ground_pt[0], ground_pt[1], ground_pt[2])
sensor = sensors[serial]
img_coord = sensor.groundToImage(ground_pt)
V[i,:] = [row['line'] - img_coord.line, row['sample'] - img_coord.samp]
V = V.reshape(num_meas*2)
return V
def update_parameters(sensors, parameters, network, updates, coefficient_columns):
"""
Updates the sensor objects parameter values and the ground point values in the
networks DataFrame. The update occurs directly to variables, so nothing is returned.
Parameters
----------
sensors : dict
A dictionary that maps ISIS serial numbers to CSM sensors
parameters : list
The list of CsmParameter to compute the partials W.R.T.
network : DataFrame
The control network as a dataframe generated by plio.
updates : np.ndarray
An array of updated parameter values
coefficient_columns: OrderedDict
Dictionary that maps serial numbers and point IDs to
the column range their parameters are in the Jacobian
matrix.
Returns
-------
"""
# update the sensor partials
for sn, sensor in sensors.items():
for i, param in enumerate(parameters[sn]):
if i > coefficient_columns[sn][1]:
raise IndexError(f'parameter [{param.name}] at index [{i}] is beyond the max index [{coefficient_columns[sn][1]}].')
current_value = sensor.getParameterValue(param.index)
sensor.setParameterValue(param.index, current_value+updates[coefficient_columns[sn][0]+i])
# update ground points
for _, row in network.iterrows():
point_id = row['id']
ground_pt = row[['adjustedX', 'adjustedY', 'adjustedZ']].values
adj = updates[coefficient_columns[point_id][0]:coefficient_columns[point_id][1]]
network.loc[network.id == point_id, ["adjustedX", "adjustedY", "adjustedZ"]] = ground_pt + adj
def compute_sigma0(V, dX, W_parameters, W_observations):
"""
Computes the resulting standard deviation of the residuals for the current state of the bundle network.
Parameters
----------
V : ndarray
An array of residuals of the difference between registered measure
and back projected ground points in image space.
dX : ndarray
The array of parameter updates
W_parameters: ndarray
The parameter weight matrix (i.e.: sensor parameters and point weights)
W_observations : ndarray
The observation weight matrix (i.e.: measure weights)
Returns
-------
: float64
Standard deviation of the residuals
"""
num_parameters = W_parameters.shape[0]
num_observations = W_observations.shape[0]
dof = num_observations - num_parameters
VTPV = V.dot(W_observations).dot(V) + dX.dot(W_parameters).dot(dX)
sigma0 = np.sqrt(VTPV/dof)
return sigma0
def compute_sigma0_sparse(V, dX, W_sensors, W_points, W_observations, column_dict):
"""
Computes the resulting standard deviation of the residuals for the current state of the bundle network.
Parameters
----------
V : ndarray
An array of residuals of the difference between registered measure
and back projected ground points in image space.
dX : ndarray
The array of parameter updates ordered according to column_dict
W_sensors : scipy.sparse.matrix
The sensor weight matrix
W_points : dict
Dictionary that maps point IDs to their weight matrices.
W_observations : ndarray
The observation weight matrix (i.e.: measure weights)
column_dict : dict
Dictionary that maps serial numbers and point IDs to index ranges in dX
Returns
-------
: float64
Standard deviation of the residuals
"""
num_image_parameters = W_sensors.shape[0]
num_observations = W_observations.shape[0]
VTPV = V.dot(W_observations).dot(V)
VTPV += dX[:num_image_parameters].dot(W_sensors.dot(dX[:num_image_parameters]))
for point_id, W_p in W_points.items():
point_update = dX[column_dict[point_id][0]:column_dict[point_id][1]]
VTPV += point_update.dot(W_p.dot(point_update))
dof = num_observations - num_image_parameters - 3 * len(W_points)
return np.sqrt(VTPV/dof)
``` |
{
"source": "jlaura/krispy",
"score": 2
} |
#### File: krispy/krispy/flaskapp.py
```python
import ast
import cPickle
import glob
import inspect
import json
import logging
import os
import sqlite3
import subprocess
import tempfile
import zipfile
import config
import numpy as np
from pandas.io.json import read_json
import pysal as ps
from flask import Flask, jsonify, request, g, render_template, session, redirect, url_for, escape
from flask.ext.login import LoginManager
from werkzeug.utils import secure_filename
import fiona #Yeah - I punked out...
from api import funcs, CustomJsonEncoder
from pmd import pmdwrapper
import amdparser
'''
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
handler = logging.FileHandler(config.LOGLEVEL)
handler.setLevel(logging.DEBUG)
logger.addHandler(handler)
logger.debug('Starting...')
'''
#Make the Flask App
app = Flask(__name__)
app.config.from_object('config')
#Login items
lm = LoginManager()
lm.init_app(app)
#Setup a cache to store transient python objects
#Upload Setup
UPLOAD_FOLDER = '/var/www/pysalREST/uploads'
ALLOWED_EXTENSIONS = set(['shp', 'dbf', 'shx', 'prj', 'zip', 'amd', 'pmd'])
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
@app.route('/index.html/', methods=['GET', 'POST'])
def showtest():
return render_template('index.html')
def update_file_list(UPLOADED_FOLDER):
"""
Globs the upload directory and get s listing of the available files.
Parameters
-----------
UPLOAD_FOLDER (str) Path supplied on launch to the upload directory.
"""
return set([os.path.basename(i) for i in glob.glob(UPLOAD_FOLDER + '/*')])
UPLOADED_FILES = update_file_list(UPLOAD_FOLDER)
def get_db():
db = getattr(g, '_database', None)
if db is None:
db = g._database = sqlite3.connect('/var/www/pysalREST/test.db')
return db
def allowed_file(filename):
"""
Check that the uploaded file extensions is in the approved list.
Parameters
-----------
filename (str) The filename, with extension
Returns
--------
Boolean (bool) True if accepted, else false
"""
return '.' in filename and \
filename.rsplit('.',1)[1] in ALLOWED_EXTENSIONS
def unzip(filename, path):
"""
Safe file unzip function.
"""
with zipfile.ZipFile(filename) as zf:
for m in zf.infolist():
words = m.filename.split('/')
destination = path
for w in words[:-1]:
drive, w = os.path.splitdrive(w)
head, w = os.path.split(w)
if w in (os.curdir, os.pardir, ''):
continue
destination = os.path.join(path, w)
zf.extract(m, destination)
return
#Three routes for static items.
@app.route('/js/<path>/', methods=['GET'])
def static_proxy(path):
"""
When using Flask, this server static files
"""
# send_static_file will guess the correct MIME type
return app.send_static_file(os.path.join('js', path))
@app.route('/css/<path>/', methods=['GET'])
def static_css_proxy(path):
return app.send_static_file(os.path.join('css', path))
@app.route('/fonts/<path>', methods=['GET'])
def static_font_proxy(path):
return app.send_static_file(os.path.join('fonts', path))
@app.route('/images/<path>', methods=['GET'])
def static_image_proxy(path):
return app.send_static_file(os.path.join('images', path))
@app.route('/login', methods=['GET', 'POST'])
def login():
if request.method == 'POST':
session['username'] = request.form['username']
return redirect(url_for('/'))
return '''
<form action="" method="post">
<p><input type=text name=username>
<p><input type=submit value=Login>
</form>
'''
@app.route('/', methods=['GET'])
def home():
if 'username' not in session:
return redirct(url_for('login'))
response = {'status':'success','data':{}}
response['data']['links'] = [{'id':'api', 'href':'/api/'},
{'id':'listdata', 'href':'/listdata/'},
{'id':'upload', 'href':'/upload/'},
{'id':'cached', 'href':'/cached/'}]
return jsonify(response)
@app.route('/cached/', methods=['GET'])
def get_cached():
response = {'status':'success','data':{'cacheditems':{}}}
cacheditems = response['data']['cacheditems']
cur = get_db().cursor()
cur.execute("select * from WObj")
result = cur.fetchall()
for row in result:
cacheditems[row[0]] = {'id':row[0],
'source':row[3],
'type':row[1],
'href':'/cached/{}/'.format(row[0])}
return jsonify(response)
@app.route('/cached/<cachedid>/', methods=['GET'])
def get_cached_entry(cachedid):
"""
Using the database Id (Unique) add a URL endpoint for all cached
items. This queries the DB for the item, reconstructs the
binary blob into a python object, and introspects for available
methods and attributes.
"""
response = {'status':'success','data':{'Unable to parse available methods.'}}
query = "SELECT Obj FROM WObj WHERE ID = {}".format(cachedid)
cur = get_db().cursor().execute(query)
result = cur.fetchone()[0]
obj = cPickle.loads(str(result))
#Check if the OBJ is a W Object
methodlist = {}
if isinstance(obj, ps.W):
response['data'] = {'methods': {}, 'attrs': []}
#Parse the method list
methods = inspect.getmembers(obj, predicate=inspect.ismethod)
for m in methods:
if m[0][0] != '_':
methodlist[m[0]] = inspect.getargspec(m[1])
response['data']['methods'] = methodlist
#Parse the available attributes
attrs = inspect.getmembers(obj,lambda i : not(inspect.ismethod(i)))
for a in attrs:
if not a[0].startswith('__'):
response['data']['attrs'].append(a[0])
return jsonify(response)
cur.close()
return (rv[0] if rv else None) if one else rv
@app.route('/cached/<cachedid>/<method>/', methods=['POST'])
def update_db(cachedid, method):
"""
Load an object from the DB and call one of its methods.
Example
-------
Assuming that a shapefile W Obj occupies a DB row with ID 3, row
standardization can be applied and the object updated in the database
using a POST request:
curl -i -H "Content-Type: application/json" -X POST -d '{"args":["r"]}' http://localhost:5000/cached/3/set_transform/
This example uses the flask development server on port 5000.
Then to confirm that this worked, inspect the weights attribute via a browser:
http://localhost:5000/cached/3/weights/
To revert to binary:
curl -i -H "Content-Type: application/json" -X POST -d '{"args":["b"]}' http://localhost:5000/cached/3/set_transform/
"""
if request.json:
response = {'status':'success','data':{}}
query = "SELECT Obj FROM WObj WHERE ID = {}".format(cachedid)
cur = get_db().cursor().execute(query)
result = cur.fetchone()[0]
obj = cPickle.loads(str(result))
#Parse the method list
methods = inspect.getmembers(obj, predicate=inspect.ismethod)
for m in methods:
if m[0] == method:
call = m[1]
break
#Duplicated in the method POST - move to a helper module
#Parse the args
keys = request.json.keys()
req = request.json
#Setup the python arg / kwarg containers
args = []
kwargs = {}
#Parse the request args and call the method
if 'args' in keys:
for a in req['args']:
try:
args.append(ast.literal_eval(a))
except:
args.append(a)
if 'kwargs' in keys:
for k, v in req['kwargs'].iteritems():
try:
kwargs[k] = ast.literal_eval(v)
except:
kwargs[k] = v
#Check args / kwargs to see if they are python objects
for i, a in enumerate(args):
if a in UPLOADED_FILES:
args[i] = os.path.join(UPLOAD_FOLDER, a)
#elif a in db.keys():
#args[i] = db[a]
for k, v in kwargs.iteritems():
if v in UPLOADED_FILES:
kwargs[k] = os.path.join(UPLOAD_FOLDER, v)
#elif v in db.keys():
#kwargs[k] = db[k]
#Make the call and get the return items
funcreturn = call(*args, **kwargs)
#Update the database since the object might have been changed
pObj = cPickle.dumps(obj)
cur.execute("UPDATE WObj SET Obj=? WHERE Id=?", (sqlite3.Binary(pObj), cachedid))
get_db().commit()
cur.close()
return jsonify(response)
@app.route('/cached/<cachedid>/<attr>/', methods=['GET'])
def get_cached_entry_attr(cachedid, attr):
"""
Load an object from the DB and return the requested
attribute as a json object.
"""
response = {'status':'success','data':{}}
query = "SELECT Obj FROM WObj WHERE ID = {}".format(cachedid)
cur = get_db().cursor().execute(query)
result = cur.fetchone()[0]
obj = cPickle.loads(str(result))
#Could be cached - here it is not, we reinspect with each call
attrs = inspect.getmembers(obj,lambda i : not(inspect.ismethod(i)))
for a in attrs:
if a[0] == attr:
ret = a[1]
break
response['data'] = {attr : ret}
return jsonify(response)
@app.route('/amd/', methods=['POST'])
def executeamd():
response = {'status':'success','data':{}}
amd = request.json
wspecs = amd['input']['weights']
wobj = amdparser.generateW(wspecs['uri'], wspecs['type'], uploaddir=UPLOAD_FOLDER)
attribute = amd['input']['attribute']
y = amdparser.gety(attribute, uploaddir=UPLOAD_FOLDER)
kwargs = amd['parameters']
args = [y, wobj]
#A hack to handle mismatches between the AMD keywords and the PySAL keywords
for k, v in kwargs.iteritems():
if k == 'transform':
kwargs['transformation'] = v
kwargs.pop(k, None)
callpath, call = amdparser.parse_analysis(funcs, amd['analysis_type'])
#Setup the call, the args and the kwargs
call = pmdwrapper(call)
for k, v in kwargs.iteritems():
try:
kwargs[k] = ast.literal_eval(v)
except: pass
funcreturn = call(*args, **kwargs)
funcreturn = recursedict(vars(funcreturn))
response['data'] = funcreturn
return jsonify(response)
@app.route('/api/<module>/', methods=['GET'])
def get_modules(module):
methods = funcs[module].keys()
response = {'status':'success','data':{}}
response['data']['links'] = []
for i in methods:
response['data']['links'].append({'id':'{}'.format(i),
'href':'/api/{}/{}/'.format(module,i)})
return jsonify(response)
@app.route('/api/<module>/<method>/', methods=['GET'])
def get_single_depth_method(module, method):
if isinstance(funcs[module][method], dict):
methods = funcs[module][method].keys()
response = {'status':'success','data':{}}
response['data']['links'] = []
for i in methods:
response['data']['links'].append({'id':'{}'.format(i),
'href':'/api/{}/{}/{}'.format(module,method,i)})
return jsonify(response)
else:
return get_method(module, method)
@app.route('/api/<module>/<module2>/<method>/', methods=['GET'])
def get_nested_method(module, module2, method):
return get_method(module, method, module2=module2)
def get_method(module, method, module2 = None):
"""
Query the API to get the POST parameters.
"""
#Setup the response strings
response = {'status':'success','data':{}}
response['data']['post_template'] = {}
mname = method
#Extract the method from the method dict
if module2 == None:
method = funcs[module][method]
else:
method = funcs[module][module2][method]
#Inspect the method to get the arguments
try:
reqargs = inspect.getargspec(method)
except:
reqargs = inspect.getargspec(method.__init__)
args = reqargs.args
defaults = list(reqargs.defaults)
try:
args.remove('self')
except:
pass
#Pack the arguments into the pos_template
response['data']['post_template'] = {'args':[], 'kwargs':{}}
diff = len(defaults) - len(args)
for i, arg in enumerate(args):
if diff < 0:
diff += 1
response['data']['post_template']['args'].append(arg)
else:
response['data']['post_template']['kwargs'][arg] = defaults[diff]
response['data']['links'] = {'id':'docs',
'href':'{}/{}/docs/'.format(module, mname)}
return jsonify(response)
@app.route('/api/<module>/<method>/docs/', methods=['GET'])
def get_single_docs(module, method):
return get_docs(module, method)
@app.route('/api/<module>/<module2>/<method>/docs/', methods=['GET'])
def get_nested_docs(module, module2, method):
return get_docs(module, method, module2=module2)
def get_docs(module, method, module2=None):
"""
Query the API to get the doc string of the method
"""
response = {'status':'success','data':{}}
response['data']['docstring'] = []
#Extract the method from the method dict
if module2 == None:
method = funcs[module][method]
else:
method = funcs[module][module2][method]
#Introspect the docs
docs = inspect.getdoc(method)
for l in docs.split('\n'):
response['data']['docstring'].append(l)
return jsonify(response)
@app.route('/api/<module>/<method>/', methods=['POST'])
def single_post(module, method):
return post(module, method)
@app.route('/api/<module>/<module2>/<method>/', methods=['POST'])
def nested_post(module, module2, method):
return post(module, method, module2=module2)
def post(module,method, module2=None):
"""
To make a POST using CURL to the flask dev server:
Fisher-Jenks using the Hartigan Olympic time example
curl -i -H "Content-Type: application/json" -X POST -d '{"args":["[12, 10.8, 11, 10.8, 10.8, 10.6, 10.8, 10.3, 10.3,10.3,10.4,10.5,10.2,10.0,9.9]"], "kwargs":{"k":5}}' http://localhost:5000/ap/esda/fisher_jenks/
or
Sample Jenks Caspall using the same example - note that sample
percentage is not passed.
curl -i -H "Content-Type: application/json" -X POST -d '{"args":["[12, 10.8, 11, 10.8, 10.8, 10.6, 10.8, 10.3, 10.3,10.3,10.4,10.5,10.2,10.0,9.9]"], "kwargs":{"k":5}}' http://localhost:5000/ai/esda/jenks_caspall_sampled/
or
Using the CherryPy server on port 8080
Queen from shapefile - NOTE: The file must be uploaded already
curl -i -H "Content-Type: application/json" -X POST -d '{"args":[NAT.shp]}' http://localhost:8080/api/weights/queen_from_shapefile/
"""
if not request.json:
response = {'status':'error','data':{}}
response['data'] = 'Post datatype was not json'
return jsonify(response), 400
else:
response = {'status':'success','data':{}}
#Setup the call, the args and the kwargs
if module2 == None:
call = pmdwrapper(funcs[module][method])
else:
call = pmdwrapper(funcs[module][module2][method])
#Parse the args
keys = request.json.keys()
req = request.json
#Setup the python arg / kwarg containers
args = []
kwargs = {}
if 'args' in keys:
for a in req['args']:
try:
args.append(ast.literal_eval(a))
except:
args.append(a)
if 'kwargs' in keys:
for k, v in req['kwargs'].iteritems():
try:
kwargs[k] = ast.literal_eval(v)
except:
kwargs[k] = v
# or if they are uploaded shapefiles
for i, a in enumerate(args):
try:
if a in UPLOADED_FILES:
args[i] = UPLOAD_FOLDER + '/' + a
shpname = a.split('.')[0]
except: pass
try:
if a.split('_')[0] == 'cached':
cacheid = a.split('_')[1]
query = "SELECT Obj FROM WObj WHERE ID = {}".format(cacheid)
cur = get_db().cursor().execute(query)
result = cur.fetchone()[0]
obj = cPickle.loads(str(result))
args[i] = obj
cur.close()
except: pass
for k, v in kwargs.iteritems():
try:
if v in UPLOADED_FILES:
kwargs[k] = os.path.join(UPLOAD_FOLDER, v)
except: pass
try:
if v.split('_')[0] == 'cached':
cacheid = v.split('_')[1]
query = "SELECT Obj FROM WObj WHERE ID = {}".format(cacheid)
cur = get_db().cursor().execute(query)
result = cur.fetchone()[0]
obj = cPickle.loads(str(result))
kwargs[k] = obj
cur.close()
except: pass
#This is a hack until I get the vector/list checking going on
if module == 'esda':
args[0] = np.array(args[0])
#Make the call and get the return items
funcreturn = call(*args, **kwargs)
#Write the W Object to the database
if isinstance(funcreturn, ps.W):
pdata = cPickle.dumps(funcreturn, cPickle.HIGHEST_PROTOCOL)
cur = get_db().cursor()
if method == 'queen_from_shapefile':
m = 'Queen'
else:
m = 'Rook'
indb = False
#Query the db to see if the name / type is in the db
query = "SELECT Type, Shapefile FROM WObj"
cur = get_db().cursor().execute(query)
result = cur.fetchall()
for r in result:
if r[0] == m and r[1] == shpname:
indb = True
break
if indb == False:
obj = (m, sqlite3.Binary(pdata), funcreturn._shpName)
cur.execute("INSERT INTO WObj values (NULL, ?, ?, ?)",obj)
get_db().commit()
cur.close()
response['data'] = {'Shapefile':funcreturn._shpName,
'Weight Type':method}
else:
funcreturn = recursedict(vars(funcreturn))
response['data'] = funcreturn
return jsonify(response)
def recursedict(inputdict):
"""
TODO: Make recursive - once I understand the PMD structure
"""
for k, v in inputdict.iteritems():
if k == 'meta_data':
newpositionalvalues = []
oldpositionalvalues = v['positional_values']
for i in oldpositionalvalues:
if isinstance(i, np.ndarray):
newpositionalvalues.append(i.tolist())
elif isinstance(i, ps.W):
newpositionalvalues.append(i.__repr__())
else:
newpositionalvalues.append(i)
v['positional_values'] = newpositionalvalues
elif isinstance(v, np.ndarray):
inputdict[k] = v.tolist()
elif isinstance(v, ps.W):
inputdict[k] = v.__repr__()
return inputdict
#This is not API - can I abstract away and have this in the front-end?
@app.route('/upload/', methods=['POST'])
def upload_file():
"""
POST - Upload a file to the server (a directory)
Examples:
curl -X POST -F [email protected] http://localhost:8081/upload/
curl -X POST -F [email protected] -F [email protected] -F [email protected] http:/localhost:8081/upload/
"""
if request.method == 'POST':
files = request.files
#Create a temporary directory to store the uploaded data
tmpdir = tempfile.mkdtemp()
logger.debug(tmpdir)
for k, f in request.files.iteritems():
#Discard the keys - are they ever important since the user
# has named the file prior to upload?
if f and allowed_file(f.filename):
logger.debug(f)
filename = secure_filename(f.filename)
savepath = os.path.join(tmpdir, filename)
f.save(savepath)
if filename.split('.')[1] == 'zip':
unzip(savepath, tmpdir)
#Iterate through the tmp directory, glob all the shapefiles, and load into postgreSQL
shps = glob.glob(os.path.join(tmpdir, '*.shp'))
for shp in shps:
logger.debug(shp)
cmd = ['/usr/bin/ogr2ogr', '-f', 'PostgreSQL', 'PG:host=10.0.23.5 user=pysal password=<PASSWORD> dbname=cybergis']
cmd.append(shp)
logger.debug(cmd)
logger.debug(subprocess.call(cmd))
#Ideally we store metadata about the upload, but for now just return
response = {'status':'success','data':{}}
for u in uploaded:
response['data'][u.filename] = '{}/{}'.format(tmpdir, u.filename)
return jsonify(response)
else:
response = {'status':'error','data':{'message':'Either "." not in filename or extensions not in approved list.'}}
return jsonify(response)
#Clean up the temporary directory
#os.removedirs(tmpdir)
return jsonify(response)
@app.route('/api/', methods=['GET'])
def get_api():
"""
The api start page.
"""
response = {'status':'success','data':{}}
response['data']['links'] = []
toplevel = funcs.keys()
for i in toplevel:
response['data']['links'].append({'id':'{}'.format(i), 'href':'/api/{}'.format( i)})
return jsonify(response)
@app.route('/listdata/', methods=['GET'])
def get_listdata():
"""
List the data that is in the upload directory
"""
response = {'status':'success','data':{}}
shapefiles = {}
pmd = {}
for f in os.listdir(UPLOAD_FOLDER):
basename = f.split('.')
if f[0] == '.':
continue
if basename[1] in ['zip']:
continue
if basename[1] in ['wmd']:
continue
if basename[1] in ['amd', 'pmd']:
pmd[basename[0]] = basename[0]
continue
if basename[0] not in shapefiles.keys():
shapefiles[basename[0]] = []
shapefiles[basename[0]].append(os.path.join(UPLOAD_FOLDER, f))
else:
shapefiles[basename[0]].append(os.path.join(UPLOAD_FOLDER, f))
response['data']['shapefiles'] = shapefiles
response['data']['pmd'] = pmd
return jsonify(response)
@app.route('/listdata/#/', methods=['GET'])
def get_shpinfo(filename):
#Wrap in a try/except
files = os.path.join(UPLOAD_FOLDER, filename)
#Info about the shapefile
try:
response = {'status':'success','data':{'attributes':{}}}
fhandler = ps.open(files + '.shp', 'r')
response['data']['geomheader'] = fhandler.header
fhandler = ps.open(files + '.dbf', 'r')
response['data']['fields'] = fhandler.header
response['data']['fields'] += ['thegeom']
except:
response = {'status':'success','data':{}}
jsondata = open(files + '.amd')
data = json.load(jsondata)
response['data'] = data
return jsonify(response)
@app.route('/listdata/#/<field>/', methods=['GET'])
def get_shpdbf(filename, field):
"""
Extract a column from a shapefile (geom) or dbf (attribute)
"""
files = (os.path.join(UPLOAD_FOLDER, filename))
if field == 'thegeom':
geoms = []
with fiona.collection(files + '.shp', "r") as source:
for feat in source:
geoms.append(feat)
geojson = {
"type": "FeatureCollection",
"features": geoms
}
response = {'status':'success','data':{'geojson':geojson}}
else:
dbf = ps.open(files + '.dbf', 'r')
attr = dbf.by_col(field)
response = {'status':'success','data':{field:attr}}
return jsonify(response)
@app.teardown_appcontext
def close_connection(exception):
"""
Gracefully close the DB conncetion
"""
db = getattr(g, '_database', None)
if db is not None:
db.close()
if __name__ == '__main__':
app.config.update(DEBUG=True)
app.run()
```
#### File: krispy/mod_api/api_helpers.py
```python
import cPickle
import inspect
from urlparse import urlparse
import requests
from pmd import pmdwrapper
def getargorder(call):
"""
Get the order of required args from a function.
Does not support *args currently.
"""
try:
args = inspect.getargspec(call)
try:
nargs = len(args.args)
except:
nargs = 0
try:
ndef = len(args.defaults)
except:
ndef = 0
nreqargs = nargs - ndef
argorder = args.args[:nreqargs]
except:
args = inspect.getargspec(call.__init__)
try:
nargs = len(args.args)
except:
nargs = 0
try:
ndef = len(args.defaults)
except:
ndef = 0
nreqargs = nargs - ndef
argorder = args.args[1:nreqargs] #Ignore self
return argorder
def gettoken(a):
url = False
try:
o = urlparse(a)
if o.scheme in ['http', 'https', 'ftp']:
url = True
except:pass
#If 'a' is a url, get the json data
if url:
if a[-1] == '/':
url = a + 'raw'
else:
url = a + '/raw'
r = requests.get(url, verify=False)
try:
#This is dangerous, get load working without guessing at encoding, e.g. utf-8
response = cPickle.loads(str(r.text))
except:
response = r.json()
return response
'''
if 'raw' in o.path.split('/'):
r = requests.get(a, verify=False)
try:
a = cPickle.loads(r.content)
return a
except:
print "ERROR: Can not load RAW data as a Python Object"
else:
r = requests.get(a, verify=False).json()
try:
a = r['data']
return a
except: pass
return False
'''
```
#### File: krispy/mod_data/controllers.py
```python
import ast
import cPickle
import decimal
import glob
import hashlib
import json
import os
import shutil
import subprocess
import tempfile
import time
import numpy as np
from flask import Blueprint, request, jsonify, g, current_app
from flask.ext.login import login_required, current_user
from werkzeug.utils import secure_filename
import geoalchemy2.functions as geofuncs
from app import db, seen_classes, cachedobjs
from app.mod_data.models import UserData, UserPyObj, GeoPoly, GEOMLOOKUP
from app.mod_data import upload_helpers as uph
import config
mod_data = Blueprint('mod_data', __name__)
def getdatalist(cuid, tabular = True):
cuid = int(cuid)
if tabular:
availabledata = UserData.query.filter_by(userid = cuid).all()
else:
availabledata = UserPyObj.query.filter_by(userid = cuid).all()
entries = {}
for a in availabledata:
dataname = a.datahash.split('_')
entry = {'name':a.dataname,
'href':config.baseurl + '/data/{}/'.format(a.datahash)}
#Expiration time also goes here.
entries[a.id] = entry
return entries
@mod_data.route('/', methods=['GET'])
@login_required
def listdata():
"""
List the available datasets by querying the DB and
returning metadata about the available user uploaded data.
"""
cuid = current_user.get_id()
response = {'status':'success','links':[]}
links = response['links']
for i in getdatalist(cuid).itervalues():
response['links'].append(i)
for i in getdatalist(cuid, tabular=False).itervalues():
response['links'].append(i)
response['links'].append({'name':'upload', 'href':config.baseurl + '/data/upload/'})
return jsonify(response)
@mod_data.route('/nontabular/', methods=['GET'])
@login_required
def list_nontabular_data():
cuid = current_user.get_id()
response = {'status':'success', 'data':{'nontabular'}}
response['data'] = getdatalist(cuid, tabular=False)
return jsonify(response)
@mod_data.route('/tabular/', methods=['GET'])
@login_required
def list_tabular_data():
cuid = current_user.get_id()
response = {'status':'success', 'data':{'nontabular'}}
response['data'] = getdatalist(cuid)
return jsonify(response)
def parse_nontabular(response, row):
"""
Parse a row containing a nontabular data entry, e.g. a PySAL object,
and return a listing of available methods and attributes
"""
#response['data']['name'] = row.dataname
#response['data']['date_created'] = row.date_created
#response['data']['date_last_accessed'] = row.date_accessed
#Expiration goes here as maxstorage time - row.data_accessed
row.get_pyobj()
response['links']['fields']['methods'] = row.methods
response['links']['fields']['attributes'] = row.attributes
response['links']['fields']['attributes'].append('full_result')
response['links']['fields']['provenance'] = {}
return response
def parse_tabular(response, tablename, tablehash):
"""
Open a table containing tabular data and return a listing of fields
"""
if tablehash in seen_classes:
cls = current_app.class_references[tablehash]
else:
db.metadata.reflect(bind=db.engine)
seen_classes.add(tablehash)
#Dynamic class creation using metaclasses
geomtype = "Polygon"
basegeomcls = GEOMLOOKUP[geomtype]
cls = type(str(tablehash), (basegeomcls, db.Model,), {'__tablename__':tablehash,
'__table_args__' : {'extend_existing': True}})
current_app.class_references[tablehash] = cls
#response['data']['name'] = tablename
response['links'][2] = {'fields': [c.name for c in cls.__table__.columns]}
#response['data']['fields'].append('geojson')
#TODO: Add topojson support if the db is postgresql
return response
@mod_data.route('/<objhash>/', methods=['GET'])
#@login_required
def get_cached_entry(objhash):
response = {'status':'success',
'methods': ['GET', 'POST','PUT', 'DELETE'],
'links':[{'name':'raw', 'href':'{}/data/{}/raw/'.format(config.baseurl, objhash)},
{'name':'geojson', 'href':'{}/data/{}/geojson/'.format(config.baseurl, objhash)},
{'name':'fields','href':'{}/data/{}/fields/'.format(config.baseurl, objhash)}]}
'''
row = UserPyObj.query.filter_by(datahash = objhash).first()
if row != None:
response = parse_nontabular(response, row)
else:
row = UserData.query.filter_by(datahash = objhash).first()
tablehash = row.datahash
tablename = row.dataname
response = parse_tabular(response, tablename, tablehash)
'''
return jsonify(response)
@mod_data.route('/<objhash>/<value>/', methods=['GET'])
#@login_required
def get_stored_entry(objhash, value):
"""
This is a dispatcher function which dispatches the request to either a function
to return an value of an object or a field of a table.
"""
response = {'status':'success','data':{}}
row = UserPyObj.query.filter_by(datahash = objhash).first()
if row != None:
row.get_pyobj()
if value != 'full_result' and value != 'raw':
try:
responsedata = getattr(row.liveobj, value)
if isinstance(responsedata, np.ndarray):
responsedata = responsedata.tolist()
response['data'] = responsedata
return jsonify(response)
except:
return jsonify({'status':'failure', 'data':'Unable to find value'})
elif value == 'raw':
try:
data = row.pyobj
if isinstance(data, np.ndarray):
return json.dumps(data.tolist())
else:
return cPickle.dumps(row.pyobj)
except:
response = {'status':'error', 'message':'The object has no raw representation.'}
return jsonify(response)
else:
#serialize the full row
pass
else:
response = get_dataset_field(objhash, value)
return jsonify(response)
@mod_data.route('/cached/<uid>/<objhash>/<method>', methods=['POST'])
@login_required
def call_cached_centry_method(uid, objhash, method):
raise NotImplementedError
def get_dataset_field(tablename, field):
response = {'status':'success','data':{}}
if tablename in seen_classes:
cls = current_app.class_references[tablename]
else:
db.metadata.reflect(bind=db.engine)
seen_classes.add(tablename)
cls = type(str(tablename), (GeoPoly, db.Model,), {'__tablename__':tablename,
'__table_args__' : {'extend_existing': True}})
current_app.class_references[tablename] = cls
if field == config.geom_column:
vector = cls.query.with_entities(geofuncs.ST_AsGeoJSON(getattr(cls, field))).all()
response['data'] = [v[0] for v in vector]
elif field == 'geojson':
#TODO: How can this be cleaner? Do I need 2 queries go get geojson?
#rows = cls.query.all()
geoms = cls.query.with_entities(geofuncs.ST_AsGeoJSON(getattr(cls, config.geom_column))).all()
features = []
for i, row in enumerate(geoms):
#attributes = row.as_dict()
#attributes.pop('wkb_geometry', None)
#for k, v in attributes.iteritems():
#if isinstance(v, decimal.Decimal):
#attributes[k] = float(v)
current_feature = {'type':'Feature',
'geometry':ast.literal_eval(geoms[i][0])}
#'properties':attributes}
features.append(current_feature)
geojson = {"type": "FeatureCollection","features": features}
#geojson = {"type":"FeatureCollection", "features": geoms}
response['data']['geojson'] = geojson
elif field == 'topojson':
#TODO: Add topojson support if the DB is postgresql
pass
elif field == 'raw':
return {'status':'error', 'message':'Tabular data does not have a raw representation, yet.'}
else:
vector = cls.query.with_entities(getattr(cls, field)).all()
responsevector = [v[0] for v in vector]
if isinstance(responsevector[0], decimal.Decimal):
for i, v in enumerate(responsevector):
responsevector[i] = float(v)
response['data'] = responsevector
return response
@mod_data.route('/upload/', methods=['GET'])
@login_required
def upload_get():
"""
The upload homepage.
"""
response = {'status':'success'}
response['methods'] = ['GET', 'POST']
response['arguments'] = {}
response['arguments']['required_arguments'] = {'filename':{'description':'.zip file containing ogr2ogr convertables spatial data',
'default': 'None',
'type': 'file object'}}
response['description'] = "Endpoint to upload data to this data service using a POST request."
return jsonify(response)
@mod_data.route('/upload/', methods=['POST'])
@login_required
def upload():
"""
Upload to a temporary directory, validate, call ogr2ogr and write to the DB
Using curl via the command line.
---------------------------------
Example 1 is from pysal examples (from that dir)
Example 2 is a subset of NAT, zipped.
Example 3 is columbus via the webpool and a sample user.
curl -X POST -F [email protected] -F [email protected] -F [email protected] http://localhost:8080/mydata/upload/
curl -X POST -F filename=@NAT_Subset.zip http://localhost:8080/mydata/upload/
curl -i -k -u <EMAIL>:jay -X POST -F [email protected] https://webpool.csf.asu.edu/pysalrest/mydata/upload/
"""
cuid = current_user.get_id()
tmpdir = tempfile.mkdtemp()
for f in request.files.values():
if f and uph.allowed_file(f.filename):
filename = secure_filename(f.filename)
savepath = os.path.join(tmpdir, filename)
f.save(savepath)
basename, ext = filename.split('.')
if ext == 'zip':
uph.unzip(savepath, tmpdir)
#Now iterate over all the shapefiles and call ogr2ogr
shps = glob.glob(os.path.join(tmpdir, '*.shp'))
for shp in shps:
shptablename = os.path.splitext(os.path.basename(shp))[0]
#datahashvalue = '{}_{}_{}'.format(cuid, shptablename, time.time())
datahash = hashlib.sha1(shp).hexdigest()
host, port = config.dbhost.split(':')
cmd = [config.ogr2ogr, '-f', "{}".format(config.dbtypename),
"{}:host={} port={} user={} password={} dbname={}".format(config.dbabbrev,
host,
port,
config.dbusername,
config.dbpass,
config.dbname),
shp,
'-nlt', 'PROMOTE_TO_MULTI',
'-nln', datahash,
'-lco', 'GEOMETRY_NAME={}'.format(config.geom_column),
'-skipfailures']
response = subprocess.call(cmd)
uploadeddata = UserData(cuid, datahash, shptablename)
db.session.add(uploadeddata)
db.session.commit()
#Cleanup
shutil.rmtree(tmpdir)
return jsonify({'status':'success', 'data':{'href': config.baseurl + '/data/{}'.format(datahash)}})
```
#### File: krispy/mod_data/models.py
```python
import cPickle
import inspect
from app import db
from geoalchemy2 import Geometry
#Base DB Model - All other tables subclass this class
class Base(db.Model):
__abstract__ = True
id = db.Column(db.Integer, primary_key=True)
date_created = db.Column(db.DateTime, default=db.func.current_timestamp())
date_accessed = db.Column(db.DateTime, default=db.func.current_timestamp(),
onupdate=db.func.current_timestamp())
#User to Data Lookup Table
class UserData(Base):
__tablename__ = 'userdata'
userid = db.Column(db.Integer, nullable=False)
datahash = db.Column(db.String(256), nullable=False)
dataname = db.Column(db.String(256), nullable=False)
def __init__(self, userid, datahash, dataname):
self.userid = userid
self.datahash = datahash
self.dataname = dataname
def __repr__(self):
return '<User: {} | Data: {}>'.format(self.userid,
self.datahash)
class UserPyObj(Base):
__tablename__ = 'userpyobj'
userid = db.Column(db.Integer, nullable=False)
dataname = db.Column(db.String(256), nullable=False)
pyobj = db.Column(db.PickleType, nullable=True)
datahash = db.Column(db.String(256), nullable=False)
'''
def __init__(self, userid, pyobj, dataname, datahash=None):
self.userid = userid
self.pyobj = pyobj
self.datahash = datahash
self.dataname = dataname
'''
def get_pyobj(self):
"""
Return a python object, loaded via cPickle, from the DB and parse
out the methods and attributes.
"""
#self.liveobj = cPickle.loads(str(self.pyobj))
self.liveobj = self.pyobj
self.methods = {}
methods = inspect.getmembers(self.liveobj, predicate=inspect.ismethod)
for m in methods:
if m[0][0] != '_':
self.methods[m[0]] = inspect.getargspec(m[1])
#Parse the available attributes
self.attributes = []
attrs = inspect.getmembers(self.liveobj,lambda i : not(inspect.ismethod(i)))
for a in attrs:
if not a[0].startswith('__'):
self.attributes.append(a[0])
def __repr__(self):
return '<User: {} has a pyobj names {}.>'.format(self.userid, self.datahash)
class GeoPoly():
wkb_geometry = db.Column(Geometry("POLYGON"))
def as_dict(self):
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
class GeoMultiPoly():
wkb_geometry = db.Column(Geometry("MULTIPOLYGON"))
def as_dict(self):
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
GEOMLOOKUP = {'Polygon':GeoPoly,
'MultiPolygon':GeoMultiPoly}
``` |
{
"source": "jlaura/plio",
"score": 2
} |
#### File: plio/io/io_controlnetwork.py
```python
from enum import IntEnum
from time import gmtime, strftime
import warnings
import pandas as pd
import numpy as np
import pvl
import struct
from plio.io import ControlNetFileV0002_pb2 as cnf
from plio.io import ControlNetFileHeaderV0005_pb2 as cnh5
from plio.io import ControlPointFileEntryV0005_pb2 as cnp5
from plio.utils.utils import xstr, find_in_dict
HEADERSTARTBYTE = 65536
DEFAULTUSERNAME = 'None'
def write_filelist(lst, path="fromlist.lis"):
"""
Writes a filelist to a file so it can be used in ISIS3.
Parameters
----------
lst : list
A list containing full paths to the images used, as strings.
path : str
The name of the file to write out. Default: fromlist.lis
"""
handle = open(path, 'w')
for filename in lst:
handle.write(filename)
handle.write('\n')
return
class MeasureMessageType(IntEnum):
"""
An enum to mirror the ISIS3 MeasureLogData enum.
"""
GoodnessOfFit = 2
MinimumPixelZScore = 3
MaximumPixelZScore = 4
PixelShift = 5
WholePixelCorrelation = 6
SubPixelCorrelation = 7
class MeasureLog():
def __init__(self, messagetype, value):
"""
A protobuf compliant measure log object.
Parameters
----------
messagetype : int or str
Either the integer or string representation from the MeasureMessageType enum
value : int or float
The value to be stored in the message log
"""
if isinstance(messagetype, int):
# by value
self.messagetype = MeasureMessageType(messagetype)
else:
# by name
self.messagetype = MeasureMessageType[messagetype]
if not isinstance(value, (float, int)):
raise TypeError(f'{value} is not a numeric type')
self.value = value
def __repr__(self):
return f'{self.messagetype.name}: {self.value}'
def to_protobuf(self, version=2):
"""
Return protobuf compliant measure log object representation
of this class.
Returns
-------
log_message : obj
MeasureLogData object suitable to append to a MeasureLog
repeated field.
"""
# I do not see a better way to get to the inner MeasureLogData obj than this
# imports were not working because it looks like these need to instantiate off
# an object
if version == 2:
log_message = cnf.ControlPointFileEntryV0002().Measure().MeasureLogData()
elif version == 5:
log_message = cnp5.ControlPointFileEntryV0005().Measure().MeasureLogData()
log_message.doubleDataValue = self.value
log_message.doubleDataType = self.messagetype
return log_message
@classmethod
def from_protobuf(cls, protobuf):
return cls(protobuf.doubleDataType, protobuf.doubleDataValue)
class IsisControlNetwork(pd.DataFrame):
# normal properties
_metadata = ['header']
@property
def _constructor(self):
return IsisControlNetwork
def from_isis(path, remove_empty=True):
# Now get ready to work with the binary
with IsisStore(path, mode='rb') as store:
df = store.read()
return df
def to_isis(obj, path, mode='wb', version=2,
headerstartbyte=HEADERSTARTBYTE,
networkid='None', targetname='None',
description='None', username=DEFAULTUSERNAME,
creation_date=None, modified_date=None,
pointid_prefix=None, pointid_suffix=None):
if targetname == 'None':
warnings.warn("Users should provide a targetname to this function such as 'Moon' or 'Mars' in order to generate a valid ISIS control network.")
with IsisStore(path, mode) as store:
if not creation_date:
creation_date = strftime("%Y-%m-%d %H:%M:%S", gmtime())
if not modified_date:
modified_date = strftime("%Y-%m-%d %H:%M:%S", gmtime())
point_messages, point_sizes = store.create_points(obj, pointid_prefix, pointid_suffix)
points_bytes = sum(point_sizes)
buffer_header, buffer_header_size = store.create_buffer_header(networkid,
targetname,
description,
username,
point_sizes,
creation_date,
modified_date)
# Write the buffer header
store.write(buffer_header, HEADERSTARTBYTE)
# Then write the points, so we know where to start writing, + 1 to avoid overwrite
point_start_offset = HEADERSTARTBYTE + buffer_header_size
for i, point in enumerate(point_messages):
store.write(point, point_start_offset)
point_start_offset += point_sizes[i]
header = store.create_pvl_header(version, headerstartbyte, networkid,
targetname, description, username,
buffer_header_size, points_bytes,
creation_date, modified_date)
store.write(header.encode('utf-8'))
class IsisStore(object):
"""
Class to manage IO of an ISIS control network (version 2).
Attributes
----------
pointid : int
The current index to be assigned to newly added points
"""
point_field_map = {
'type' : 'pointType',
'chooserName' : 'pointChoosername',
'datetime' : 'pointDatetime',
'editLock' : 'pointEditLock',
'ignore' : 'pointIgnore',
'jigsawRejected' : 'pointJigsawRejected',
'log' : 'pointLog'
}
measure_field_map = {
'type' : 'measureType',
'choosername' : 'measureChoosername',
'datetime' : 'measureDatetime',
'editLock' : 'measureEditLock',
'ignore' : 'measureIgnore',
'jigsawRejected' : 'measureJigsawRejected',
'log' : 'measureLog'
}
def __init__(self, path, mode=None, **kwargs):
self.nmeasures = 0
self.npoints = 0
# Conversion from buffer types to Python types
bt = {1: float,
5: int,
8: bool,
9: str,
11: list,
14: int}
self.header_attrs = [(i.name, bt[i.type]) for i in cnf._CONTROLNETFILEHEADERV0002.fields]
self.point_attrs = [(i.name, bt[i.type]) for i in cnf._CONTROLPOINTFILEENTRYV0002.fields]
self.measure_attrs = [(i.name, bt[i.type]) for i in cnf._CONTROLPOINTFILEENTRYV0002_MEASURE.fields]
self._path = path
if not mode:
mode = 'a' # pragma: no cover
self._mode = mode
self._handle = None
self._open()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, traceback):
self.close()
def close(self):
if self._handle is not None:
self._handle.close()
self._handle = None
def _open(self):
self._handle = open(self._path, self._mode)
def read(self):
"""
Given an ISIS store, read the underlying ISIS3 compatible control network and
return an IsisControlNetwork dataframe.
"""
pvl_header = pvl.load(self._path, grammar=pvl.grammar.ISISGrammar())
header_start_byte = find_in_dict(pvl_header, 'HeaderStartByte')
header_bytes = find_in_dict(pvl_header, 'HeaderBytes')
point_start_byte = find_in_dict(pvl_header, 'PointsStartByte')
version = find_in_dict(pvl_header, 'Version')
if version == 2:
self.point_attrs = [i for i in cnf._CONTROLPOINTFILEENTRYV0002.fields_by_name if i != 'measures']
self.measure_attrs = [i for i in cnf._CONTROLPOINTFILEENTRYV0002_MEASURE.fields_by_name]
cp = cnf.ControlPointFileEntryV0002()
self._handle.seek(header_start_byte)
pbuf_header = cnf.ControlNetFileHeaderV0002()
pbuf_header.ParseFromString(self._handle.read(header_bytes))
self._handle.seek(point_start_byte)
cp = cnf.ControlPointFileEntryV0002()
pts = []
for s in pbuf_header.pointMessageSizes:
cp.ParseFromString(self._handle.read(s))
pt = [getattr(cp, i) for i in self.point_attrs if i != 'measures']
for measure in cp.measures:
meas = pt + [getattr(measure, j) for j in self.measure_attrs]
pts.append(meas)
elif version == 5:
self.point_attrs = [i for i in cnp5._CONTROLPOINTFILEENTRYV0005.fields_by_name if i != 'measures']
self.measure_attrs = [i for i in cnp5._CONTROLPOINTFILEENTRYV0005_MEASURE.fields_by_name]
cp = cnp5.ControlPointFileEntryV0005()
self._handle.seek(header_start_byte)
pbuf_header = cnh5.ControlNetFileHeaderV0005()
pbuf_header.ParseFromString(self._handle.read(header_bytes))
self._handle.seek(point_start_byte)
cp = cnp5.ControlPointFileEntryV0005()
pts = []
byte_count = 0
while byte_count < find_in_dict(pvl_header, 'PointsBytes'):
message_size = struct.unpack('I', self._handle.read(4))[0]
cp.ParseFromString(self._handle.read(message_size))
pt = [getattr(cp, i) for i in self.point_attrs if i != 'measures']
for measure in cp.measures:
meas = pt + [getattr(measure, j) for j in self.measure_attrs]
pts.append(meas)
byte_count += 4 + message_size
# Some point and measure fields have the same name, so mangle them as point_ and measure_
point_cols = [self.point_field_map[attr] if attr in self.point_field_map else attr for attr in self.point_attrs]
measure_cols = [self.measure_field_map[attr] if attr in self.measure_field_map else attr for attr in self.measure_attrs]
cols = point_cols + measure_cols
df = IsisControlNetwork(pts, columns=cols)
# Convert the (0.5, 0.5) origin pixels back to (0,0) pixels
df['line'] -= 0.5
df['sample'] -= 0.5
if 'aprioriline' in df.columns:
df['aprioriline'] -= 0.5
df['apriorisample'] -= 0.5
# Munge the MeasureLogData into Python objs
df['measureLog'] = df['measureLog'].apply(lambda x: [MeasureLog.from_protobuf(i) for i in x])
df.header = pvl_header
return df
def write(self, data, offset=0):
"""
Parameters
----------
data : bytes
Encoded header to be written to the file
offset : int
The byte offset into the output binary
"""
self._handle.seek(offset)
self._handle.write(data)
def create_points(self, df, pointid_prefix, pointid_suffix):
"""
Step through a control network (C) and return protocol buffer point objects
Parameters
----------
df : DataFrame
with the appropriate attributes: point_id, point_type, serial,
measure_type, x, y required.
The entries in the list must support grouping by the point_id attribute.
Returns
-------
point_messages : list
of serialized points buffers
point_sizes : list
of integer point sizes
"""
def _set_pid(pointid):
return '{}{}{}'.format(xstr(pointid_prefix),
pointid,
xstr(pointid_suffix))
# TODO: Rewrite using apply syntax for performance
point_sizes = []
point_messages = []
for i, g in df.groupby('id'):
# Get the point specification from the protobuf
point_spec = cnf.ControlPointFileEntryV0002()
# Set the ID and then loop over all of the attributes that the
# point has and check for corresponding columns in the group and
# set with the correct type
#point_spec.id = _set_pid(i)
point_spec.id = _set_pid(i)
point_spec.type = g.iloc[0].pointType
try:
point_spec.referenceIndex = g.iloc[0].referenceIndex
except:
warnings.warn(f'Unable to identify referenceIndex for point {point_spec.id}. Defaulting to index 0.')
point_spec.referenceIndex = 0
for attr, attrtype in self.point_attrs:
# Un-mangle common attribute names between points and measures
df_attr = self.point_field_map.get(attr, attr)
if df_attr in g.columns:
if df_attr == 'pointLog':
# Currently pointLog is not supported.
warnings.warn('The pointLog field is currently unsupported. Any pointLog data will not be saved.')
continue
# As per protobuf docs for assigning to a repeated field.
if df_attr == 'aprioriCovar' or df_attr == 'adjustedCovar':
arr = g.iloc[0][df_attr]
if isinstance(arr, np.ndarray):
arr = arr.ravel().tolist()
if arr:
point_spec.aprioriCovar.extend(arr)
# If field is repeated you must extend instead of assign
elif cnf._CONTROLPOINTFILEENTRYV0002.fields_by_name[attr].label == 3:
getattr(point_spec, attr).extend(g.iloc[0][df_attr])
else:
setattr(point_spec, attr, attrtype(g.iloc[0][df_attr]))
# A single extend call is cheaper than many add calls to pack points
measure_iterable = []
for node_id, m in g.iterrows():
measure_spec = point_spec.Measure()
# For all of the attributes, set if they are an dict accessible attr of the obj.
for attr, attrtype in self.measure_attrs:
# Un-mangle common attribute names between points and measures
df_attr = self.measure_field_map.get(attr, attr)
if df_attr in g.columns:
if df_attr == 'measureLog':
[getattr(measure_spec, attr).extend([i.to_protobuf()]) for i in m[df_attr]]
# If field is repeated you must extend instead of assign
elif cnf._CONTROLPOINTFILEENTRYV0002_MEASURE.fields_by_name[attr].label == 3:
getattr(measure_spec, attr).extend(m[df_attr])
else:
setattr(measure_spec, attr, attrtype(m[df_attr]))
# ISIS pixels are centered on (0.5, 0.5). NDArrays are (0,0) based.
measure_spec.sample = m['sample'] + 0.5
measure_spec.line = m['line'] + 0.5
if 'apriorisample' in g.columns:
measure_spec.apriorisample = m['apriorisample'] + 0.5
measure_spec.aprioriline = m['aprioriline'] + 0.5
measure_iterable.append(measure_spec)
self.nmeasures += 1
self.npoints += 1
point_spec.measures.extend(measure_iterable)
point_message = point_spec.SerializeToString()
point_sizes.append(point_spec.ByteSize())
point_messages.append(point_message)
return point_messages, point_sizes
def create_buffer_header(self, networkid, targetname,
description, username, point_sizes,
creation_date,
modified_date):
"""
Create the Google Protocol Buffer header using the
protobuf spec.
Parameters
----------
networkid : str
The user defined identifier of this control network
targetname : str
The name of the target, e.g. Moon
description : str
A description for the network.
username : str
The name of the user / application that created the control network
point_sizes : list
of the point sizes for each point message
Returns
-------
header_message : str
The serialized message to write
header_message_size : int
The size of the serialized header, in bytes
"""
raw_header_message = cnf.ControlNetFileHeaderV0002()
raw_header_message.created = creation_date
raw_header_message.lastModified = modified_date
raw_header_message.networkId = networkid
raw_header_message.description = description
raw_header_message.targetName = targetname
raw_header_message.userName = username
raw_header_message.pointMessageSizes.extend(point_sizes)
header_message_size = raw_header_message.ByteSize()
header_message = raw_header_message.SerializeToString()
return header_message, header_message_size
def create_pvl_header(self, version, headerstartbyte,
networkid, targetname, description, username,
buffer_header_size, points_bytes,
creation_date, modified_date):
"""
Create the PVL header object
Parameters
----------
version : int
The current ISIS version to write, defaults to 2
headerstartbyte : int
The seek offset that the protocol buffer header starts at
networkid : str
The name of the network
targetname : str
The name of the target, e.g. Moon
description : str
A description for the network.
username : str
The name of the user / application that created the control network
buffer_header_size : int
Total size of the header in bytes
points_bytes : int
The total number of bytes all points require
Returns
-------
: object
An ISIS compliant PVL header object
"""
encoder = pvl.encoder.ISISEncoder(end_delimiter=False)
header_bytes = buffer_header_size
points_start_byte = HEADERSTARTBYTE + buffer_header_size
header = pvl.PVLModule([
('ProtoBuffer',
({'Core':{'HeaderStartByte': headerstartbyte,
'HeaderBytes': header_bytes,
'PointsStartByte': points_start_byte,
'PointsBytes': points_bytes},
'ControlNetworkInfo': pvl.PVLGroup([
('NetworkId', networkid),
('TargetName', targetname),
('UserName', username),
('Created', creation_date),
('LastModified', modified_date),
('Description', description),
('NumberOfPoints', self.npoints),
('NumberOfMeasures', self.nmeasures),
('Version', version)
])
}),
)
])
return pvl.dumps(header, encoder=encoder)
```
#### File: jlaura/plio/setup.py
```python
import os
from setuptools import setup, find_packages
#Grab the README.md for the long description
with open('README.md', 'r') as f:
long_description = f.read()
def setup_package():
setup(
name = "plio",
version = '1.5.0',
author = "USGS Astrogeology",
author_email = "<EMAIL>",
description = ("I/O API to support planetary data formats."),
long_description = long_description,
license = "Public Domain",
keywords = "planetary io",
url = "http://packages.python.org/plio",
packages=find_packages(),
include_package_data=True,
package_data={'plio' : ['sqlalchemy_json/*.py', 'sqlalchemy_json/LICENSE']},
zip_safe=True,
scripts=['bin/socetnet2isis', 'bin/isisnet2socet'],
install_requires=[
'gdal',
'numpy',
'pyproj',
'pvl',
'protobuf',
'h5py',
'pandas',
'sqlalchemy',
'pyyaml',
'networkx',
'affine',
'scipy'],
classifiers=[
"Development Status :: 3 - Alpha",
"Topic :: Utilities",
"License :: Public Domain",
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
)
if __name__ == '__main__':
setup_package()
``` |
{
"source": "jlaura/socet_gxp_dev",
"score": 3
} |
#### File: socet_gxp_dev/Workflow_Scripts/ode_csv2shapefile.py
```python
import os
import sys
import csv
import glob
import math
import argparse
from osgeo import ogr
#ocentric to ographic latitudes
def oc2og(dlat, dMajorRadius, dMinorRadius):
try:
dlat = math.radians(dlat)
dlat = math.atan(((dMajorRadius / dMinorRadius)**2) * (math.tan(dlat)))
dlat = math.degrees(dlat)
except:
print ("Error in oc2og conversion")
return dlat
#ographic to ocentric latitudes
def og2oc(dlat, dMajorRadius, dMinorRadius):
try:
dlat = math.radians(dlat)
dlat = math.atan((math.tan(dlat) / ((dMajorRadius / dMinorRadius)**2)))
dlat = math.degrees(dlat)
except:
print ("Error in oc2og conversion")
return dlat
# Convert longitudes to -180 to 180 degrees
def LonTo180(dlon):
if (dlon > 180.0):
dlon = dlon - 360.0
return dlon
def parse_arguments():
## Parse commandline args with argparse
parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter,
description="""Convert ODE created LOLA, MOLA, or MLA shot data to an Esri pointZ Shapefile.
The CSV is expected to be generated from the script ode_get_laser_alt.py""",
epilog="""EXAMPLES:
%(prog)s Mars --input ode_lolardr.csv
%(prog)s Moon --input ode_molapedr.csv
%(prog)s Mercury --pattern "*_pts_csv.csv"
""")
#parser.add_argument('product', choices=["lolardr","molapedr","mla"],
# help="Specify desired product type: LOLA RDR or MOLA PEDR")
parser.add_argument('target', choices=["mars","moon","mercury"],
type = str.lower,
help="Specify which target: Mars, Moon, Mercury")
## User must specify exactly one of --coords or --raster
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--input', nargs=1, metavar="file.csv",
help='an input ODE csv file as downloaded using ode_get_laser_alt.py.')
group.add_argument('--pattern', nargs=1, metavar='"*_pts_csv.csv"',
default='"*_pts_csv.csv"', dest='pattern',
help='pattern to find and run on many strings. Default pattern is "*_pts_csv.csv".')
args = parser.parse_args()
return args
args = parse_arguments()
target = args.target
if args.input is not None:
files = args.input
elif args.pattern is not None:
pattern = args.pattern[0]
files = glob.glob(pattern)
else:
argparse.parser.print_usage()
sys.exit(1)
if target == "mars":
targetWKT = "Mars"
majorRadius = 3396190.0
minorRadius = 3376200.0
year = 2000
#From ode Mars run the fields should be:
#LONG_EAST, LAT_NORTH, TOPOGRAPHY, MOLA_RANGE, PLANET_RAD, C, A, EMPHEMERIS_TIME, UTC, ORBIT
longField = 'LONG_EAST'
latField = 'LAT_NORTH'
elevField = 'TOPOGRAPHY'
radiusField = 'PLANET_RAD'
utcField = 'UTC'
orbitField = 'ORBIT'
elif target == "mercury":
targetWKT = "Mercury"
majorRadius = minorRadius = 2439400.0
year = 2015
#From ode Mercury run the fields should be:
#longitude,latitude,altitude,radius,EphemerisTime,MET,frm,chn,Pulswd,thrsh,gain,1way_range,Emiss,TXmJ,UTC,TOF_ns_ET,Sat_long,Sat_lat,Sat_alt,Offnad,Phase,Sol_inc,SCRNGE,seqid,Product_id
longField = 'longitude'
latField = 'latitude'
elevField = 'altitude'
radiusField = 'radius'
utcField = 'UTC'
orbitField = 'chn'
elif target == "moon":
targetWKT = "Moon"
majorRadius = minorRadius = 1737400.0
year = 2000
#From ode Moon run the fields should be:
#Coordinated_Universal_Time,Pt_Longitude,Pt_Latitude,Pt_Radius,Pt_Range,Pt_PulseW,Pt_Energy,Pt_noi,Pt_Thr,Pt_Gn,Flg,S,Frm,Mission_ET,Subseconds,Terrestrial_Dyn_Time,TX_Energy_mJ,TX_PulseW,SC_Longitude,SC_Latitude,SC_radius,Geoid,Offnadir,Emission,Sol_INC,Sol_Phs,Earth_Centr.,Earth_PW,Earth_E.
longField = 'Pt_Longitude'
latField = 'Pt_Latitude'
elevField = 'Pt_Radius'
radiusField = 'Pt_Radius'
utcField = 'Coordinated_Universal_Time'
orbitField = 'S' #not named correctly in the CSV, should be PRODUCT_SHOT_NUMBER
else:
print("Error: " + target + " currently not supported.")
argparse.parser.print_usage()
sys.exit(1)
#based on target and radius write out projection
#
#New prj for GXP
#GEOGCS["GCS_Mars_2000",DATUM["D_Mars_2000",SPHEROID["Mars_2000_IAU_IAG",3396190.0,169.8944472]],PRIMEM["Reference_Meridian",0.0],UNIT["Degree",0.0174532925199433]],VERTCS["Mars_2000",DATUM["D_Mars_2000",SPHEROID["Mars_2000_IAU_IAG",3396190.0,169.8944472]],PARAMETER["Vertical_Shift",0.0],PARAMETER["Direction",1.0],UNIT["Meter",1.0]]
#
if majorRadius - minorRadius > 0.00001:
ecc = majorRadius / (majorRadius - minorRadius)
else:
ecc = 0.0
thePrj = 'GEOGCS["GCS_{0}_{1}",DATUM["D_{0}_{1}",SPHEROID["{0}_{1}_IAU",{2:.1f},{3:.14f}]],PRIMEM["Reference_Meridian",0.0],UNIT["Degree",0.0174532925199433]],VERTCS["Mars_2000",DATUM["D_{0}_{1}",SPHEROID["{0}_{1}_IAU",{2:.1f},{3:.14f}]],PARAMETER["Vertical_Shift",0.0],PARAMETER["Direction",1.0],UNIT["Meter",1.0]]' \
.format(targetWKT,year,majorRadius,ecc)
#loop over files, if the user passed --input then just one file
for input in files:
filename = os.path.basename(input)
nameList = os.path.splitext(filename)[0].split("_")
#VRT didn't like long "name" so, I am creating a shorter name
if (len(nameList) > 3):
shortName = nameList[0]+"_"+nameList[1]+"_"+nameList[2]
else:
shortName = os.path.splitext(filename)[0]
#create output shapefile and temporary csv
outcsv = shortName+"_tmp.csv"
outvrt = shortName+"_tmp.vrt"
outprj = shortName+"_Z.prj"
outshp = shortName+"_Z.shp"
#open ODE csv file for reformating, Lons -180 to 180, and for Mars oc2og lats
outCSV = open(outcsv,'w')
fieldnames = 'Longitude,Latitude,Elev_m,Radius_m,UTC,Orbit\n'
outCSV.write(fieldnames)
with open(input) as csvfile:
reader = csv.DictReader(csvfile, skipinitialspace=True)
header = reader.fieldnames
for row in reader:
#convert to -180 to 180 Longitude domain
lon180 = LonTo180(float(row[longField]))
latOG = float(row[latField])
#if Mars convert to ographic Latitudes
if target == "mars":
latOG = oc2og(latOG, majorRadius, minorRadius)
newl = '{0:.5f},{1:.5f},'.format(lon180, latOG)
newl = newl + row[elevField]+','+row[radiusField]+','+row[utcField]+','+row[orbitField]
if target == "moon":
#convert radius from km to meters
radius = float(row[radiusField]) * 1000.0
#subtract radius from LOLA radius to get 'elevation' in meters
elev = radius - majorRadius
newl = '{0:.5f},{1:.5f},{2:.5f},{3:.2f},'.format(lon180, latOG, elev, radius)
newl = newl + row[utcField] +','+ row[orbitField]
if target == "mercury":
#convert radius from km to meters
radius = float(row[radiusField]) * 1000.0
#subtract radius from LOLA radius to get 'elevation' in meters
#elev = radius - majorRadius
#OR
#convert elevation from km to meters
elev = float(row[elevField]) * 1000.0
newl = '{0:.5f},{1:.5f},{2:.5f},{3:.2f},'.format(lon180, latOG, elev, radius)
newl = newl + row[utcField] +','+ row[orbitField]
outCSV.write(newl+'\n')
outCSV.close()
# Create ogr2ogr virtual header (*.vrt)
outVRT = open(outvrt , 'w')
outVRT.write('<OGRVRTDataSource>\n')
outVRT.write(' <OGRVRTLayer name="'+shortName+'_tmp">\n')
outVRT.write(' <SrcDataSource>'+outcsv+'</SrcDataSource>\n')
outVRT.write(' <LayerSRS>'+thePrj+'</LayerSRS>\n')
outVRT.write(' <GeometryType>wkbPoint</GeometryType>\n')
outVRT.write(' <GeometryField encoding="PointFromColumns" x="Longitude" y="Latitude" z="Elev_m"/>\n')
outVRT.write(' <Field name=\"Longitude\" src=\"Longitude\" type=\"Real\"/>\n')
outVRT.write(' <Field name=\"Latitude\" src=\"Latitude\" type=\"Real\"/>\n')
outVRT.write(' <Field name=\"Elev_m\" src=\"Elev_m\" type=\"Real\"/>\n')
outVRT.write(' <Field name=\"Radius_m\" src=\"Radius_m\" type=\"Real\"/>\n')
outVRT.write(' <Field name=\"UTC\" src=\"UTC\" type=\"String\"/>\n')
outVRT.write(' <Field name=\"Orbit\" src=\"Orbit\" type=\"Integer\"/>\n')
outVRT.write(' </OGRVRTLayer>\n')
outVRT.write('</OGRVRTDataSource>\n')
outVRT.close()
#convert to shapefile using GDAL's ogr
in_ds = ogr.Open(outvrt)
ogr.GetDriverByName("ESRI Shapefile").CopyDataSource(in_ds, outshp)
in_ds = None
outPRJ = open(outprj , 'w')
outPRJ.write(thePrj)
outPRJ.close()
#clean up temporary files
if os.path.exists(outshp):
print (" -deleting temporary csv and vrt files")
os.remove(outcsv)
os.remove(outvrt)
print (" -output shapefile file generated: "+ outshp)
else:
print ("\n Shapefile not generated...something's wrong\n\n")
``` |
{
"source": "jlausuch/pcw",
"score": 2
} |
#### File: ocw/lib/cleanup.py
```python
from webui.settings import PCWConfig
from ocw.lib.azure import Azure
from ocw.lib.EC2 import EC2
from ocw.lib.gce import GCE
from ocw.lib.emailnotify import send_mail
from ocw.lib.emailnotify import send_cluster_notification
import logging
import traceback
from ocw.apps import getScheduler
logger = logging.getLogger(__name__)
def cleanup_run():
for namespace in PCWConfig.get_namespaces_for('cleanup'):
try:
providers = PCWConfig.get_providers_for('cleanup', namespace)
logger.debug("[{}] Run cleanup for {}".format(namespace, ','.join(providers)))
if 'azure' in providers:
Azure(namespace).cleanup_all()
if 'ec2' in providers:
EC2(namespace).cleanup_all()
if 'gce' in providers:
GCE(namespace).cleanup_all()
except Exception as e:
logger.exception("[{}] Cleanup failed!".format(namespace))
send_mail('{} on Cleanup in [{}]'.format(type(e).__name__, namespace), traceback.format_exc())
def list_clusters():
for namespace in PCWConfig.get_namespaces_for('clusters'):
try:
clusters = EC2(namespace).all_clusters()
logger.info("%d clusters found", len(clusters))
send_cluster_notification(namespace, clusters)
except Exception as e:
logger.exception("[{}] List clusters failed!".format(namespace))
send_mail('{} on List clusters in [{}]'.format(type(e).__name__, namespace), traceback.format_exc())
def init_cron():
getScheduler().add_job(cleanup_run, trigger='interval', minutes=60, id='cleanup_all')
getScheduler().add_job(list_clusters, trigger='interval', hours=18, id='list_clusters')
```
#### File: ocw/lib/provider.py
```python
from webui.settings import PCWConfig
import re
from datetime import datetime
from datetime import timedelta
from datetime import timezone
from distutils.version import LooseVersion
import logging
class Provider:
def __init__(self, namespace: str):
self._namespace = namespace
self.dry_run = PCWConfig.getBoolean('default/dry_run')
self.logger = logging.getLogger(self.__module__)
def older_than_min_age(self, age):
return datetime.now(timezone.utc) > age + timedelta(
hours=PCWConfig.get_feature_property('cleanup', 'min-image-age-hours', self._namespace))
def needs_to_delete_image(self, order_number, image_date):
if self.older_than_min_age(image_date):
max_images_per_flavor = PCWConfig.get_feature_property('cleanup', 'max-images-per-flavor',
self._namespace)
max_image_age = image_date + timedelta(
hours=PCWConfig.get_feature_property('cleanup', 'max-image-age-hours', self._namespace))
return order_number >= max_images_per_flavor or max_image_age < datetime.now(timezone.utc)
else:
return False
def parse_image_name_helper(self, img_name, regex_s, group_key=['version', 'flavor', 'type', 'arch'],
group_build=['kiwi', 'build']):
for regex in regex_s:
m = re.match(regex, img_name)
if m:
gdict = m.groupdict()
return {
'key': '-'.join([gdict[k] for k in group_key if k in gdict and gdict[k] is not None]),
'build': "-".join([gdict[k] for k in group_build if k in gdict and gdict[k] is not None]),
}
return None
def get_keeping_image_names(self, images):
images_by_flavor = dict()
for img in images:
if (img.flavor not in images_by_flavor):
images_by_flavor[img.flavor] = list()
images_by_flavor[img.flavor].append(img)
keep_images = list()
for img_list in [images_by_flavor[x] for x in sorted(images_by_flavor)]:
img_list.sort(key=lambda x: LooseVersion(x.build), reverse=True)
for i in range(0, len(img_list)):
img = img_list[i]
if (not self.needs_to_delete_image(i, img.date)):
keep_images.append(img.name)
return keep_images
def log_info(self, message: str, *args: object):
if args:
message = message.format(*args)
self.logger.info("[{}] {}".format(self._namespace, message))
def log_warn(self, message: str, *args: object):
if args:
message = message.format(*args)
self.logger.warning("[{}] {}".format(self._namespace, message))
def log_err(self, message: str, *args: object):
if args:
message = message.format(*args)
self.logger.error("[{}] {}".format(self._namespace, message))
def log_dbg(self, message: str, *args: object):
if args:
message = message.format(*args)
self.logger.debug("[{}] {}".format(self._namespace, message))
class Image:
def __init__(self, name, flavor, build, date, img_id=None):
self.name = name
self.flavor = flavor
self.build = build
self.date = date
self.id = img_id if img_id else name
def __str__(self):
return "[{} {} {} {}]".format(self.name, self.flavor, self.build, self.date)
```
#### File: pcw/tests/generators.py
```python
from faker import Faker
from datetime import datetime
fake = Faker()
min_image_age_hours = 7
max_images_per_flavor = 1
max_image_age_hours = 20
azure_storage_resourcegroup = 'openqa'
ec2_max_snapshot_age_days = 1
ec2_max_volumes_age_days = 5
class MockImage:
def __init__(self, name, last_modified=None):
self.name = name
self.last_modified = last_modified
def mock_get_feature_property(feature: str, property: str, namespace: str = None):
if property == 'min-image-age-hours':
return min_image_age_hours
elif property == 'max-images-per-flavor':
return max_images_per_flavor
elif property == 'max-image-age-hours':
return max_image_age_hours
elif property == 'azure-storage-resourcegroup':
return azure_storage_resourcegroup
elif property == 'ec2-max-snapshot-age-days':
return ec2_max_snapshot_age_days
elif property == 'ec2-max-volumes-age-days':
return ec2_max_volumes_age_days
class ec2_meta_mock:
def __init__(self):
self.data = fake.uuid4()
class ec2_image_mock:
def __init__(self):
self.image_id = fake.uuid4()
self.meta = ec2_meta_mock()
self.name = fake.uuid4()
def ec2_tags_mock(tags={fake.uuid4(): fake.uuid4()}):
return [ {'Key': key, 'Value': tags[key]} for key in tags]
class ec2_instance_mock:
def __init__(self, **kwargs):
self.state = {'Name': fake.uuid4()}
self.instance_id = fake.uuid4()
self.image_id = fake.uuid4()
self.instance_lifecycle = fake.uuid4()
self.instance_type = fake.uuid4()
self.kernel_id = fake.uuid4()
self.launch_time = datetime.now()
self.public_ip_address = fake.uuid4()
self.security_groups = [{'GroupName': fake.uuid4()}, {'GroupName': fake.uuid4()}]
self.sriov_net_support = fake.uuid4()
self.tags = ec2_tags_mock(**kwargs)
self.state_reason = {'Message': fake.uuid4()}
self.image = ec2_image_mock()
class azure_instance_mock:
def __init__(self):
self.tags = fake.uuid4()
self.name = fake.uuid4()
self.id = fake.uuid4()
self.type = fake.uuid4()
self.location = fake.uuid4()
def gce_instance_mock():
return {
'name': fake.uuid4(),
'id': fake.uuid4(),
'machineType': fake.uuid4() + '/qq',
'zone': fake.uuid4() + '/qq',
'status': fake.uuid4(),
'creationTimestamp': datetime.now(),
'metadata': fake.uuid4(),
'tags': {'sshKeys': fake.uuid4()}
}
```
#### File: pcw/tests/test_provider.py
```python
from ocw.lib.provider import Provider, Image
from datetime import datetime
from datetime import timezone
from datetime import timedelta
from tests import generators
from webui.settings import PCWConfig
from .generators import mock_get_feature_property
from .generators import max_images_per_flavor
from .generators import min_image_age_hours
from .generators import max_image_age_hours
def test_older_than_min_age_older(monkeypatch):
monkeypatch.setattr(PCWConfig, 'get_feature_property', lambda *args, **kwargs: 24)
provider = Provider('testolderminage')
assert provider.older_than_min_age(datetime.now(timezone.utc) - timedelta(hours=25)) == True
def test_older_than_min_age_younger(monkeypatch):
monkeypatch.setattr(PCWConfig, 'get_feature_property', lambda *args, **kwargs: 24)
provider = Provider('testolderminage')
assert provider.older_than_min_age(datetime.now(timezone.utc) - timedelta(hours=23)) == False
def test_needs_to_delete_image(monkeypatch):
monkeypatch.setattr(PCWConfig, 'get_feature_property', mock_get_feature_property)
provider = Provider('testneedstodelete')
too_many_images = max_images_per_flavor+1
not_enough_images = max_images_per_flavor-3
older_than_min_age = datetime.now(timezone.utc) - timedelta(hours=min_image_age_hours+1)
assert provider.needs_to_delete_image(too_many_images, datetime.now(timezone.utc)) == False
assert provider.needs_to_delete_image(too_many_images, older_than_min_age) == True
assert provider.needs_to_delete_image(not_enough_images, older_than_min_age) == False
def test_get_keeping_image_names(monkeypatch):
monkeypatch.setattr(PCWConfig, 'get_feature_property', mock_get_feature_property)
provider = Provider('testneedstodelete')
newer_then_min_age = datetime.now(timezone.utc)
older_then_min_age = datetime.now(timezone.utc) - timedelta(hours=min_image_age_hours+1)
older_then_max_age = datetime.now(timezone.utc) - timedelta(hours=max_image_age_hours+1)
generators.max_images_per_flavor = 1
images = [
Image('foo-A-0.0.1-0.1', 'A', '0.0.1-0.1', older_then_min_age),
Image('foo-A-0.0.1-0.2', 'A', '0.0.1-0.2', older_then_min_age),
]
assert provider.get_keeping_image_names(images) == ['foo-A-0.0.1-0.2']
images = [
Image('foo-A-0.0.1-0.1', 'A', '0.0.1-0.1', older_then_min_age),
Image('foo-A-0.0.1-0.2', 'A', '0.0.1-0.2', older_then_max_age),
]
assert provider.get_keeping_image_names(images) == []
images = [
Image('foo-A-0.0.1-0.1', 'A', '0.0.1-0.1', newer_then_min_age),
Image('foo-A-0.0.1-0.2', 'A', '0.0.1-0.2', older_then_min_age),
]
assert provider.get_keeping_image_names(images) == ['foo-A-0.0.1-0.2', 'foo-A-0.0.1-0.1']
images = [
Image('foo-A-0.0.1-0.1', 'A', '0.0.1-0.1', older_then_min_age),
Image('foo-A-0.0.1-0.2', 'A', '0.0.1-0.2', older_then_min_age),
Image('foo-B-0.0.1-0.1', 'B', '0.0.1-0.1', older_then_min_age),
Image('foo-B-0.1.1-0.1', 'B', '0.1.1-0.1', older_then_min_age)
]
assert provider.get_keeping_image_names(images) == ['foo-A-0.0.1-0.2', 'foo-B-0.1.1-0.1']
generators.max_images_per_flavor = 2
images = [
Image('foo-A-0.0.1-0.1', 'A', '0.0.1-0.1', older_then_min_age),
Image('foo-A-0.0.1-0.2', 'A', '0.0.1-0.2', older_then_min_age),
]
assert provider.get_keeping_image_names(images) == ['foo-A-0.0.1-0.2', 'foo-A-0.0.1-0.1']
``` |
{
"source": "jlavallee/HotGator",
"score": 3
} |
#### File: HotGator/heatmap/process.py
```python
import os
try:
import simplejson
except:
import json as simplejson
class Point:
def __init__(self,x,y):
#assert (x > -180) and (x < 180)
#assert (y > -90) and (y < 90)
self.x = x
self.y = y
def __str__(self):
return self.wkt
@property
def wkt(self):
return 'POINT( %f %F )' % (self.x,self.y)
@property
def json(self):
return simplejson.dumps({"type": "Point", "coordinates": [self.x,self.y]})
def intersects(self,bbox):
assert isinstance(bbox,tuple)
minx,miny,maxx,maxy = bbox
return not (self.x>maxx or self.x<minx or self.y>maxy or self.y<miny)
def read_caligator_json(filename):
# get data from:
#http://calagator.org/events.json?date%5Bstart%5D=2010-01-01&date%5Bend%5D=2010-12-31&commit=Filter
return simplejson.loads(open(filename,'r').read())
def stream_csv(header,events_list):
e_list = []
ids = {}
csv_items = ['%s,%s,%s' % header]
for e in events_list:
venue_id = e.get('venue_id')
if venue_id:
if venue_id in ids.keys():
ids[venue_id] += 1
else:
ids[venue_id] = 1
for e in events_list:
venue = e.get('venue')
if venue:
pnt = Point(venue['longitude'],venue['latitude'])
v_id = e['venue_id']
if v_id not in e_list:
if pnt.x and pnt.y and pnt.intersects(bbox):
e_list.append(v_id)
csv_items.append('%s,%s,%s' % (pnt.x, pnt.y, ids[v_id]))
return '\n'.join(csv_items)
def stream_vrt(name,csv,x,y,z):
vrt = '''<OGRVRTDataSource>
<OGRVRTLayer name="%(name)s">
<SrcDataSource>%(csv)s</SrcDataSource>
<GeometryType>wkbPoint</GeometryType>
<LayerSRS>WGS84</LayerSRS>
<GeometryField encoding="PointFromColumns" x="%(x)s" y="%(y)s" z="%(z)s"/>
</OGRVRTLayer>
</OGRVRTDataSource>''' % locals()
return vrt
def interpolate(vrt_file,bbox,filename,radius=0,smooth=0,width=256,height=256):
xmin, ymin, xmax, ymax = bbox#-123.115196,45.351904,-122.237663,45.69395
layer = vrt_file.replace('.vrt','')
cmd = 'gdal_grid -a invdist:power=2.0:smoothing=%(smooth)f -txe %(xmin)f %(xmax)f -tye %(ymin)f %(ymax)f -outsize %(width)d %(height)d -l %(layer)s -of GTiff -ot Float32 %(vrt_file)s %(filename)s' % locals()
os.system(cmd)
if __name__ == '__main__':
# lets restrict points to this area (roughly PDX and neighborhoods)
# bounding coordinates are in degrees
bbox = (-123.115196,45.351904,-122.237663,45.69395)
x,y,z = 'lon','lat','event_count'
csv_header = x,y,z
json_file = 'testdata/events.json'
csv_file = 'heat.csv'
vrt_file = 'heat.vrt'
events_list = read_caligator_json(json_file)
open(csv_file,'w').write(stream_csv(csv_header,events_list))
open(vrt_file,'w').write(stream_vrt('heat',csv_file,x,y,z))
# then open to demo in QGIS
# it will be greyscale until you display values with color range
interpolate(vrt_file,bbox,'pdx.tif',radius=.5,smooth=.001)
``` |
{
"source": "jlaw9/rlmolecule",
"score": 2
} |
#### File: examples/hallway/hallway_config.py
```python
class HallwayConfig:
def __init__(self,
size: int = 5,
max_steps: int = 32):
self.size = size
self.max_steps = max_steps
```
#### File: old/tf_serving_example/layers.py
```python
import numpy as np
import tensorflow as tf
from tensorflow.keras import layers
from tensorflow.python.keras.layers.pooling import GlobalPooling1D
from tensorflow.python.keras import backend
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import array_ops
import nfp
class ConcatDense(layers.Layer):
""" Layer to combine the concatenation and two dense layers """
def build(self, input_shape):
num_features = input_shape[0][-1]
self.concat = layers.Concatenate()
self.dense1 = layers.Dense(2 * num_features, activation='relu')
self.dense2 = layers.Dense(num_features)
def call(self, inputs, mask=None):
output = self.concat(inputs)
output = self.dense1(output)
output = self.dense2(output)
return output
class GraphLayer(layers.Layer):
""" Base class for all GNN layers """
def __init__(self, dropout: float=0.0, **kwargs):
super().__init__(**kwargs)
self.dropout = dropout
def build(self, input_shape):
if len(input_shape) == 4:
self.use_global = True
self.tile = Tile()
elif len(input_shape) == 3:
self.use_global = False
else:
raise RuntimeError("wrong input shape")
if self.dropout > 0.:
self.dropout_layer = layers.Dropout(self.dropout)
def get_config(self):
return {"dropout": self.dropout}
class EdgeUpdate(GraphLayer):
def build(self, input_shape):
""" inputs = [atom_state, bond_state, connectivity]
shape(bond_state) = [batch, num_bonds, bond_features]
"""
super().build(input_shape)
bond_features = input_shape[1][-1]
self.gather = nfp.Gather()
self.slice1 = nfp.Slice(np.s_[:, :, 1])
self.slice0 = nfp.Slice(np.s_[:, :, 0])
self.concat = ConcatDense()
self.add = layers.Add()
def call(self, inputs, mask=None):
""" Inputs: [atom_state, bond_state, connectivity]
Outputs: bond_state
"""
if not self.use_global:
atom_state, bond_state, connectivity = inputs
else:
atom_state, bond_state, connectivity, global_state = inputs
global_state = self.tile([global_state, bond_state])
# Get nodes at start and end of edge
source_atom = self.gather([atom_state, self.slice1(connectivity)])
target_atom = self.gather([atom_state, self.slice0(connectivity)])
if not self.use_global:
new_bond_state = self.concat([bond_state, source_atom, target_atom])
else:
new_bond_state = self.concat([bond_state, source_atom, target_atom, global_state])
if self.dropout > 0.:
new_bond_state = self.dropout_layer(new_bond_state)
new_bond_state = self.add([bond_state, new_bond_state])
return new_bond_state
def compute_output_shape(self, input_shape):
return input_shape[1]
class NodeUpdate(GraphLayer):
def build(self, input_shape):
super().build(input_shape)
num_features = input_shape[1][-1]
self.gather = nfp.Gather()
self.slice0 = nfp.Slice(np.s_[:, :, 0])
self.slice1 = nfp.Slice(np.s_[:, :, 1])
self.concat = ConcatDense()
self.reduce = nfp.Reduce(reduction='sum')
self.dense1 = layers.Dense(2 * num_features, activation='relu')
self.dense2 = layers.Dense(num_features)
self.add = layers.Add()
def call(self, inputs, mask=None):
""" Inputs: [atom_state, bond_state, connectivity]
Outputs: atom_state
"""
if not self.use_global:
atom_state, bond_state, connectivity = inputs
else:
atom_state, bond_state, connectivity, global_state = inputs
global_state = self.tile([global_state, bond_state])
source_atom = self.gather([atom_state, self.slice1(connectivity)])
if not self.use_global:
messages = self.concat([source_atom, bond_state])
else:
messages = self.concat([source_atom, bond_state, global_state])
new_atom_state = self.reduce([messages, self.slice0(connectivity), atom_state])
# Dense net after message reduction
new_atom_state = self.dense1(new_atom_state)
new_atom_state = self.dense2(new_atom_state)
if self.dropout > 0.:
new_atom_state = self.dropout_layer(new_atom_state)
new_atom_state = self.add([atom_state, new_atom_state])
return new_atom_state
def compute_output_shape(self, input_shape):
return input_shape[0]
class Tile(layers.Layer):
def call(self, inputs):
global_state, target = inputs
target_shape = tf.shape(target)[1] # number of edges or nodes
expanded = tf.expand_dims(global_state, 1)
return tf.tile(expanded, tf.stack([1, target_shape, 1]))
class GlobalUpdate(GraphLayer):
def __init__(self, units, num_heads, **kwargs):
super().__init__(**kwargs)
self.units = units # H
self.num_heads = num_heads # N
def build(self, input_shape):
super().build(input_shape)
dense_units = self.units * self.num_heads # N*H
self.query_layer = layers.Dense(self.num_heads, name='query')
self.value_layer = layers.Dense(dense_units, name='value')
self.add = layers.Add()
def transpose_scores(self, input_tensor):
input_shape = tf.shape(input_tensor)
output_shape = [input_shape[0], input_shape[1], self.num_heads, self.units]
output_tensor = tf.reshape(input_tensor, output_shape)
return tf.transpose(a=output_tensor, perm=[0, 2, 1, 3]) # [B,N,S,H]
def call(self, inputs, mask=None):
if not self.use_global:
atom_state, bond_state, connectivity = inputs
else:
atom_state, bond_state, connectivity, global_state = inputs
batch_size = tf.shape(atom_state)[0]
graph_elements = tf.concat([atom_state, bond_state], axis=1)
query = self.query_layer(graph_elements) # [B,N,S,H]
query = tf.transpose(query, perm=[0, 2, 1])
value = self.transpose_scores(self.value_layer(graph_elements)) # [B,N,S,H]
attention_probs = tf.nn.softmax(query)
context = tf.matmul(tf.expand_dims(attention_probs, 2), value)
context = tf.reshape(context, [batch_size, self.num_heads*self.units])
if self.dropout > 0.:
context = self.dropout_layer(context)
if self.use_global:
global_state = self.add([global_state, context])
else:
global_state = context
return global_state
def get_config(self):
config = super(GlobalUpdate, self).get_config()
config.update(
{"units": self.units,
"num_heads": self.num_heads})
return config
class GlobalSumPooling1D(GlobalPooling1D):
def __init__(self, data_format='channels_last', **kwargs):
super().__init__(data_format=data_format, **kwargs)
self.supports_masking = True
def call(self, inputs, mask=None):
steps_axis = 1 if self.data_format == 'channels_last' else 2
if mask is not None:
mask = math_ops.cast(mask, backend.floatx())
mask = array_ops.expand_dims(
mask, 2 if self.data_format == 'channels_last' else 1)
inputs *= mask
return backend.sum(inputs, axis=steps_axis)
def compute_mask(self, inputs, mask=None):
return None
```
#### File: rlmolecule/molecule_game/policy_data.py
```python
import io
import nfp
import numpy as np
import tensorflow as tf
import molecule_game.config as config
def parse_binary_data(binary_data, reward):
""" Use io and numpy to parse the binary data from postgresQL
"""
with io.BytesIO(binary_data.numpy()) as f:
parsed_data = dict(np.load(f, allow_pickle=True).items())
# This is something we could talk about; but I'm wondering if the best
# loss function for a boolean reward is a binary crossentropy
if reward == -1:
reward = 0
visit_probs = parsed_data.pop('visit_probs')
return (parsed_data['atom'], parsed_data['bond'],
parsed_data['connectivity'], int(reward), visit_probs)
def parse_data_tf(binary_data, reward):
"""tf.py_func wants a flat list of outputs, but here we restructure to
keras's desired (inputs, outputs) format"""
atom, bond, connectivity, reward, visit_probs = tf.py_function(
parse_binary_data, inp=[binary_data, reward],
Tout=[tf.int64, tf.int64, tf.int64, tf.int64, tf.float32])
# The py_func doesn't provide tensor shapes, and we'll need these for the
# padded batch operation
atom.set_shape([None, None])
bond.set_shape([None, None])
connectivity.set_shape([None, None, 2])
reward.set_shape([])
visit_probs.set_shape([None])
return ({'atom': atom, 'bond': bond, 'connectivity': connectivity},
(reward, visit_probs))
def create_dataset(sql_generator):
""" Given a generator that yields data (bytes), ranked_rewards (float) pairs,
zip these together into a tensorflow dataset.
"""
dataset = tf.data.Dataset.from_generator(sql_generator, output_types=(tf.string, tf.float32)) \
.repeat() \
.shuffle(config.policy_buffer_max_size) \
.map(parse_data_tf, num_parallel_calls=tf.data.experimental.AUTOTUNE) \
.padded_batch(config.batch_size,
padding_values=({'atom': nfp.zero, 'bond': nfp.zero, 'connectivity': nfp.zero}, (nfp.zero, 0.))) \
.prefetch(tf.data.experimental.AUTOTUNE)
return dataset
```
#### File: rlmolecule/alphazero/alphazero_problem.py
```python
import os
import random
from abc import abstractmethod
from typing import Optional
import sqlalchemy
from rlmolecule.alphazero.alphazero_vertex import AlphaZeroVertex
from rlmolecule.tree_search.reward import Reward
from rlmolecule.mcts.mcts_problem import MCTSProblem
from rlmolecule.sql import Base, Session
from rlmolecule.sql.tables import GameStore, RewardStore
from rlmolecule.tree_search.graph_search_state import GraphSearchState
class AlphaZeroProblem(MCTSProblem):
def __init__(self,
engine: sqlalchemy.engine.Engine,
run_id: Optional[str] = None,
max_buffer_size: int = 200,
min_buffer_size: int = 50,
batch_size: int = 32,
**kwargs):
super(AlphaZeroProblem, self).__init__(**kwargs)
Base.metadata.create_all(engine, checkfirst=True)
Session.configure(bind=engine)
self._session = Session()
self.run_id = run_id if run_id is not None else os.environ.get('AZ_RUNID', 'not specified')
self.batch_size = batch_size
self.max_buffer_size = max_buffer_size
self.min_buffer_size = min_buffer_size
@abstractmethod
def get_value_and_policy(self, parent: AlphaZeroVertex) -> (float, {AlphaZeroVertex: float}):
"""
A user-provided function to get value and child prior estimates for the given vertex.
:return: (value_of_current_vertex, {child_vertex: child_prior for child_vertex in children})
"""
pass
@property
def session(self) -> 'sqlalchemy.orm.session.Session':
return self._session
def reward_wrapper(self, state: GraphSearchState) -> Reward:
"""A wrapper that caches reward calculations in a SQL database, and calls self.get_scaled_reward
:param state: The state for which rewards are cached
:return: the scaled reward
"""
existing_record = self.session.query(RewardStore).get((hash(state), self.run_id, state.serialize()))
if existing_record is not None:
reward = existing_record.reward
else:
reward, data = self.get_reward(state)
record = RewardStore(hash=hash(state),
run_id=self.run_id,
state=state.serialize(),
reward=reward,
data=data)
self.session.merge(record)
self.session.commit()
return self.reward_class(reward)
def _store_search_statistics(self, path: [], reward: Reward) -> None:
"""Store the game data in the replay buffer
:param path: The path data collected by AlphaZero._accumulate_path_data
:param reward: The final state's unscaled reward
"""
# path[-1] is the terminal state with no children
search_statistics = [
(vertex.state.serialize(), visit_probabilities)
for (vertex, visit_probabilities) in path[:-1]]
record = GameStore(id=str(self.id),
run_id=self.run_id,
raw_reward=reward.raw_reward,
scaled_reward=reward.scaled_reward,
search_statistics=search_statistics)
self.session.add(record)
self.session.commit()
def iter_recent_games(self) -> (str, 'np.ndarray', float):
"""Iterate over randomly chosen positions in games from the replay buffer
:returns: a generator of (serialized_parent, visit_probabilities, scaled_reward) pairs
"""
recent_games = self.session.query(GameStore).filter_by(run_id=self.run_id) \
.order_by(GameStore.index.desc()).limit(self.max_buffer_size)
for game in recent_games:
parent_state_string, visit_probabilities = random.choice(game.search_statistics)
yield parent_state_string, game.scaled_reward, visit_probabilities
```
#### File: rlmolecule/mcts/mcts.py
```python
import logging
import math
import random
from typing import Callable, List, Optional, Type
import numpy as np
from rlmolecule.tree_search.reward import Reward
from rlmolecule.mcts.mcts_problem import MCTSProblem
from rlmolecule.mcts.mcts_vertex import MCTSVertex
from rlmolecule.tree_search.graph_search import GraphSearch
from rlmolecule.tree_search.graph_search_state import GraphSearchState
logger = logging.getLogger(__name__)
class MCTS(GraphSearch[MCTSVertex]):
def __init__(
self,
problem: MCTSProblem,
ucb_constant: float = math.sqrt(2),
vertex_class: Optional[Type[MCTSVertex]] = None,
) -> None:
super().__init__(MCTSVertex if vertex_class is None else vertex_class)
self._problem: MCTSProblem = problem
self.ucb_constant: float = ucb_constant
@property
def problem(self) -> MCTSProblem:
return self._problem
def run(
self,
state: Optional[GraphSearchState] = None,
num_mcts_samples: int = 256,
max_depth: int = 1000000,
action_selection_function: Optional[Callable[[MCTSVertex], MCTSVertex]] = None,
reset_canonicalizer: bool = True,
) -> ([], float):
"""
Run the MCTS search from the given starting state (or the root node if not provided). This function runs a
given number of MCTS iterations per step, and then descends the action space according to the
provided `action_selection_function` (softmax sampling of visit counts if not provided).
:param num_mcts_samples: number of samples to perform at each level of the MCTS search
:param max_depth: the maximum search depth.
:param state: the starting state, or if not provided, the state returned from _get_root()
:param action_selection_function: a function used to select among the possible next actions. Defaults to
softmax sampling by visit counts.
:param reset_canonicalizer: whether to reset the graph canonicalizer in advance of the run
:return: The search path (as a list of vertexes) and the reward from this search.
"""
self.problem.initialize_run()
if reset_canonicalizer:
self.canonicalizer.reset()
vertex = self._get_root() if state is None else self.get_vertex_for_state(state)
action_selection_function = action_selection_function if action_selection_function is not None \
else self.softmax_selection
path: [] = []
for _ in range(max_depth):
# todo: this loop is odd, we're sampling terminal nodes a whole bunch of extra times
self.sample(vertex, num_mcts_samples)
self._accumulate_path_data(vertex, path)
if len(vertex.children) == 0:
return path, self.problem.reward_wrapper(vertex.state)
logger.debug(f'{vertex} has children { {child: (round(child.value, 2), child.visit_count) for child in vertex.children} }')
vertex = action_selection_function(vertex)
logger.warning(f"{self} reached max_depth.")
return path, math.nan # todo: make sure this returns a reward class
def sample(
self,
vertex: MCTSVertex,
num_mcts_samples: int = 1,
) -> None:
"""
Perform MCTS sampling from the given vertex.
"""
for _ in range(num_mcts_samples):
search_path = self._select(vertex)
value = self._evaluate(search_path)
self._backpropagate(search_path, value)
# noinspection PyMethodMayBeStatic
def _accumulate_path_data(self, vertex: MCTSVertex, path: []):
path.append(vertex)
def _select(
self,
root: MCTSVertex,
) -> [MCTSVertex]:
"""
Selection step of MCTS
From Wikipedia (https://en.wikipedia.org/wiki/Monte_Carlo_tree_search):
Selection: Start from root R and select successive child vertices until a leaf vertex L is reached.
The root is the current game state and a leaf is any vertex that has a potential child from which no simulation
(playout) has yet been initiated. The section below says more about a way of biasing choice of child vertices that
lets the game tree expand towards the most promising moves, which is the essence of Monte Carlo tree search.
"""
search_path = [root]
while True:
current = search_path[-1]
children = current.children
if children is None or len(children) == 0:
return search_path
search_path.append(max(children, key=lambda child: self._ucb_score(current, child)))
def _expand(self, leaf: MCTSVertex) -> None:
"""
Expansion step of MCTS
From Wikipedia (https://en.wikipedia.org/wiki/Monte_Carlo_tree_search):
Expansion: Unless L ends the game decisively (e.g. win/loss/draw) for either player, create one (or more) child
vertices and choose vertex C from one of them. Child vertices are any valid moves from the game position defined by L.
"""
if leaf.children is None:
leaf.children = [self.get_vertex_for_state(state) for state in leaf.state.get_next_actions()]
def _evaluate(
self,
search_path: [MCTSVertex],
) -> Reward:
"""
Estimates the value of a leaf vertex.
Simulation step of MCTS.
From Wikipedia (https://en.wikipedia.org/wiki/Monte_Carlo_tree_search):
Simulation: Complete one random playout from vertex C. This step is sometimes also called playout or rollout.
A playout may be as simple as choosing uniform random moves until the game is decided (for example in chess,
the game is won, lost, or drawn).
:return: value estimate of the given leaf vertex
"""
assert len(search_path) > 0, 'Invalid attempt to evaluate an empty search path.'
leaf = search_path[-1]
# This `expand` call sets up further visits for this node, but visits to children
# aren't tracked below the given leaf node
self._expand(leaf)
state = leaf.state
while True:
children = state.get_next_actions()
if len(children) == 0:
return self.problem.reward_wrapper(state)
state = random.choice(children)
@staticmethod
def _backpropagate(search_path: [MCTSVertex], value: Reward):
"""
Backpropagation step of MCTS
From Wikipedia (https://en.wikipedia.org/wiki/Monte_Carlo_tree_search):
Backpropagation: Use the result of the playout to update information in the vertices on the search_path from C to R.
"""
for vertex in reversed(search_path):
vertex.update(value.scaled_reward)
@staticmethod
def visit_selection(parent: MCTSVertex) -> MCTSVertex:
return max(parent.children, key=lambda child: child.visit_count)
@staticmethod
def softmax_selection(parent: MCTSVertex) -> MCTSVertex:
children: List[MCTSVertex] = parent.children
visit_counts = np.array([child.visit_count for child in children])
visit_counts -= visit_counts.max()
visit_softmax = np.exp(visit_counts) / sum(np.exp(visit_counts))
return children[np.random.choice(range(len(children)), size=1, p=visit_softmax)[0]]
def _get_root(self) -> MCTSVertex:
return self.get_vertex_for_state(self.problem.get_initial_state())
def _ucb_score(self, parent: MCTSVertex, child: MCTSVertex) -> float:
"""Calculates the UCB1 score for the given child vertex. From <NAME>., <NAME>., & <NAME>. (2002).
Machine Learning, 47(2/3), 235–256. doi:10.1023/a:1013689704352
:param child: Vertex for which the UCB score is desired
:return: UCB1 score.
"""
if parent.visit_count == 0:
raise RuntimeError("Child {} of parent {} with zero visits".format(child, self))
if child.visit_count == 0:
return math.inf
return child.value + self.ucb_constant * math.sqrt(math.log(parent.visit_count) / child.visit_count)
```
#### File: rlmolecule/molecule/molecule_problem.py
```python
import logging
from abc import ABC
from typing import Dict, Optional
import rdkit
import sqlalchemy
from rlmolecule.alphazero.tfalphazero_problem import TFAlphaZeroProblem
from rlmolecule.mcts.mcts_problem import MCTSProblem
from rlmolecule.molecule.molecule_config import MoleculeConfig
from rlmolecule.molecule.molecule_state import MoleculeState
from rlmolecule.molecule.policy.model import policy_model
from rlmolecule.molecule.policy.preprocessor import MolPreprocessor, load_preprocessor
logger = logging.getLogger(__name__)
class MoleculeProblem(MCTSProblem, ABC):
def __init__(self,
config: MoleculeConfig,
*args,
**kwargs):
self._config = config
super(MoleculeProblem, self).__init__(*args, **kwargs)
def get_initial_state(self) -> MoleculeState:
return MoleculeState(rdkit.Chem.MolFromSmiles('C'), self._config)
class MoleculeTFAlphaZeroProblem(MoleculeProblem, TFAlphaZeroProblem, ABC):
def __init__(self,
engine: sqlalchemy.engine.Engine,
config: MoleculeConfig,
preprocessor: Optional[MolPreprocessor] = None,
preprocessor_data: Optional[str] = None,
features: int = 64,
num_heads: int = 4,
num_messages: int = 3,
**kwargs) -> None:
self.num_messages = num_messages
self.num_heads = num_heads
self.features = features
self.preprocessor = preprocessor if preprocessor else load_preprocessor(preprocessor_data)
super(MoleculeTFAlphaZeroProblem, self).__init__(config=config, engine=engine, **kwargs)
def policy_model(self) -> 'tf.keras.Model':
return policy_model(
self.preprocessor,
features=self.features,
num_heads=self.num_heads,
num_messages=self.num_messages)
def get_policy_inputs(self, state: MoleculeState) -> Dict:
return self.preprocessor.construct_feature_matrices(state.molecule)
```
#### File: molecule/policy/preprocessor.py
```python
import os
from typing import Dict, Optional
import nfp
import numpy as np
import rdkit
def atom_featurizer(atom: rdkit.Chem.Atom) -> str:
""" Return an string representing the atom type
:param atom: the rdkit.Atom object
:return: a string representation for embedding
"""
return str((
atom.GetSymbol(),
atom.GetNumRadicalElectrons(),
atom.GetFormalCharge(),
atom.GetChiralTag().name,
atom.GetIsAromatic(),
nfp.get_ring_size(atom, max_size=6),
atom.GetDegree(),
atom.GetTotalNumHs(includeNeighbors=True)
))
def bond_featurizer(bond: rdkit.Chem.Bond, flipped: bool = False) -> str:
"""Return a string representation of the given bond
:param bond: The rdkit bond object
:param flipped: Whether the bond is considered in the forward or reverse direction
:return: a string representation of the bond type
"""
if not flipped:
atoms = "{}-{}".format(
*tuple((bond.GetBeginAtom().GetSymbol(),
bond.GetEndAtom().GetSymbol())))
else:
atoms = "{}-{}".format(
*tuple((bond.GetEndAtom().GetSymbol(),
bond.GetBeginAtom().GetSymbol())))
bstereo = bond.GetStereo().name
btype = str(bond.GetBondType())
ring = 'R{}'.format(nfp.get_ring_size(bond, max_size=6)) if bond.IsInRing() else ''
return " ".join([atoms, btype, ring, bstereo]).strip()
def filter_keys(attribute: Dict) -> Dict:
"""Remove unnecessary model inputs from nfp.SmilesPreprocessor outputs
:param attribute: A dictionary containing unnecessary keys
:return: The same dictionary with only 'atom', 'bond', and 'connectivity' arrays
"""
return {key: value for key, value in attribute.items()
if key in {'atom', 'bond', 'connectivity'}}
class MolPreprocessor(nfp.preprocessing.SmilesPreprocessor):
output_types = filter_keys(nfp.preprocessing.SmilesPreprocessor.output_types)
output_shapes = filter_keys(nfp.preprocessing.SmilesPreprocessor.output_shapes)
padding_values = filter_keys(nfp.preprocessing.SmilesPreprocessor.padding_values)
def padded_shapes(self, *args, **kwargs):
return filter_keys(super().padded_shapes(*args, **kwargs))
def construct_feature_matrices(self, mol: rdkit.Chem.Mol, train: bool = False) -> {}:
""" Convert an rdkit Mol to a list of tensors
'atom' : (n_atom,) length list of atom classes
'bond' : (n_bond,) list of bond classes
'connectivity' : (n_bond, 2) array of source atom, target atom pairs.
"""
self.atom_tokenizer.train = train
self.bond_tokenizer.train = train
if self.explicit_hs:
mol = rdkit.Chem.AddHs(mol)
n_atom = mol.GetNumAtoms()
n_bond = 2 * mol.GetNumBonds()
# If its an isolated atom, add a self-link
if n_bond == 0:
n_bond = 1
atom_feature_matrix = np.zeros(n_atom, dtype='int')
bond_feature_matrix = np.zeros(n_bond, dtype='int')
connectivity = np.zeros((n_bond, 2), dtype='int')
if n_bond == 1:
bond_feature_matrix[0] = self.bond_tokenizer('self-link')
bond_index = 0
for n, atom in enumerate(mol.GetAtoms()):
# Atom Classes
atom_feature_matrix[n] = self.atom_tokenizer(
self.atom_features(atom))
start_index = atom.GetIdx()
for bond in atom.GetBonds():
# Is the bond pointing at the target atom
rev = bond.GetBeginAtomIdx() != start_index
# Bond Classes
bond_feature_matrix[bond_index] = self.bond_tokenizer(
self.bond_features(bond, flipped=rev))
# Connectivity
if not rev: # Original direction
connectivity[bond_index, 0] = bond.GetBeginAtomIdx()
connectivity[bond_index, 1] = bond.GetEndAtomIdx()
else: # Reversed
connectivity[bond_index, 0] = bond.GetEndAtomIdx()
connectivity[bond_index, 1] = bond.GetBeginAtomIdx()
bond_index += 1
return {
'atom': atom_feature_matrix,
'bond': bond_feature_matrix,
'connectivity': connectivity,
}
def load_preprocessor(saved_preprocessor_file: Optional[str] = None) -> MolPreprocessor:
"""Load the MolPreprocessor object from either the default json file or a provided data file
:param saved_preprocessor_file: directory of the saved nfp.Preprocessor json data
:return: a MolPreprocessor instance for the molecule policy network
"""
preprocessor = MolPreprocessor(atom_features=atom_featurizer,
bond_features=bond_featurizer,
explicit_hs=False)
if not saved_preprocessor_file:
saved_preprocessor_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'preprocessor.json')
preprocessor.from_json(saved_preprocessor_file)
return preprocessor
```
#### File: tests/mcts/test_hash_canonicalization.py
```python
import random
from rlmolecule.mcts.mcts import MCTS
from rlmolecule.mcts.mcts_vertex import MCTSVertex
from rlmolecule.molecule.molecule_config import MoleculeConfig
from tests.qed_optimization_problem import QEDWithRandomPolicy
def test_get_successors(engine):
config = MoleculeConfig(max_atoms=4,
min_atoms=1,
tryEmbedding=False,
sa_score_threshold=None,
stereoisomers=False)
problem = QEDWithRandomPolicy(config=config, engine=engine)
game = MCTS(problem)
root: MCTSVertex = game._get_root()
game._expand(root)
root.update(1.0)
successor0: MCTSVertex = root.children[0]
game._expand(successor0)
successor0.update(1.0)
successor1: MCTSVertex = root.children[1]
game._expand(successor1)
successor1.update(1.0)
random.seed(42)
game.sample(root, 5)
child1 = root.children[1].children[0] # CCN
child2 = root.children[0].children[1] # CCN
assert (child1 == child2)
assert (child1.value == child2.value)
assert (child1.visit_count == child2.visit_count)
child1.update(1.)
assert (child1.value == child2.value)
assert (child1.visit_count == child2.visit_count)
``` |
{
"source": "jlawcordova/coordinatesystem",
"score": 4
} |
#### File: coordinatesystem/coordinatesystem/component.py
```python
import sys
from math import cos, sin, atan, pow, sqrt
class Point2D:
"""
Represents a point in a 2-dimensional plane.
:param xCoordinate: X-coordinate of the point.
:param yCoordinate: Y-coordinate of the point.
"""
def __init__(self, x = 0.0, y = 0.0):
self.x = x
self.y = y
def __str__(self):
return '(%f, %f)' % (self.x, self.y)
@classmethod
def from_polar_coordinate(cls, magnitude, angle):
"""
Creates a point in a 2-dimensional plane
from a given polar coordinate.
:param magnitude: Magnitude of the polar coodinate.
:param angle: Angle of the polar coordinate.
:returns: Point2D object.
"""
x = magnitude * cos(angle)
y = magnitude * sin(angle)
point2d = cls(x, y)
return point2d
def rotate(self, angle):
"""
Rotates the point by an angle in radians.
:param angle: Angle in radians.
"""
cosang = cos(angle)
sinang = sin(angle)
temp = self.x
self.x = (self.x * cosang) + (self.y * sinang)
self.y = (-temp * sinang) + (self.y * cosang)
def offset(self, xoffset, yoffset):
"""
Offsets the point by an x and y displacement.
:param xoffset: Offset in the x-axis.
:param yoffset: Offset in the y-axis
"""
self.x += xoffset
self.y += yoffset
def get_distance(self, point):
"""
Gets the distance from the point and another point.
:param point: Point to calculate the distance with.
:returns: Distance between the two points.
"""
xdistance = self.x - point.x
ydistance = self.y - point.y
distance = sqrt(pow(xdistance, 2) + pow(ydistance, 2))
return distance
def get_slope(self, point):
"""
Gets the slope of the line formed from the point
and another point.
:param point: Point to calculate the slope with.
:returns: Slope of the line formed from the two points.
"""
xdistance = self.x - point.x
ydistance = self.y - point.y
if xdistance != 0:
slope = ydistance/xdistance
else:
# Avoid a divide by zero error. Use the smallest possible float instead.
slope = ydistance/sys.float_info.min
return slope
class Line2D:
"""
Represents an infinite-lengthed line in a 2-dimensional plane.
:param slope: Slope of the line.
:param yintercept: Y-intercept of the line.
"""
def __init__(self, slope = 1.0, yintercept = 0.0):
self.slope = slope
self.yintercept = yintercept
def __str__(self):
return 'y = %2fx + %f' % (self.slope, self.yintercept)
@classmethod
def from_two_points(cls, point1, point2):
"""
Creates an infinite-lengthed line in a 2-dimensional plane given
two points on the line.
:param point1: First point on the line.
:param point2: Second point on the line.
:returns: Line2D object.
"""
slope = point1.get_slope(point2)
# Since the equation of a line in slope-intercept form is y = mx - m(x1) + (y1),
# where - m(x1) + (y1) is the y-intercept.
yintercept = (-(slope * point1.x) + point1.y)
line2d = cls(slope, yintercept)
return line2d
@classmethod
def from_point_slope(cls, slope, point):
"""
Creates an infinite-lengthed line in a 2-dimensional plane given
a point on the line and the line's slope.
:param slope: Slope of the line.
:param point: Point on the line.
:returns: Line2D object.
"""
# Since the equation of a line in slope-intercept form is y = mx - m(x1) + (y1),
# where - m(x1) + (y1) is the y-intercept.
yintercept = (-(slope * point.x) + point.y)
line2d = cls(slope, yintercept)
return line2d
def includes_point(self, point):
"""
Determines if a point is on or included in the line.
:param point: Point to be determine if on the line.
:returns: Boolean stating if the point is on the line.
"""
# Given a value x-coordinate, determine the corresponding y-coordinate
# on the line (y = mx + b).
liney = (self.slope * point.x) - self.yintercept
return (liney == point.y)
def is_above_point(self, point):
"""
Determines if the line is above a given point.
.. note::
If the line is vertical with a positive infinite slope,
the result of this function will be true if the point is on
the left side of the line.
:param point: Point to determine the position with.
:returns: Boolean stating if the line is above the given point.
"""
# Given a value x-coordinate, determine the corresponding y-coordinate
# on the line (y = mx + b).
liney = (self.slope * point.x) - self.yintercept
return (liney > point.y)
def get_angle_between(self, line):
"""
Gets the angle between the line and another line.
:param line: Line used to calculate the angle in between.
:returns: Angle in between the two line in radians.
"""
angle = atan((self.slope - line.slope) / (1 + (self.slope * line.slope)))
return angle
``` |
{
"source": "JLawrence-SADA/memegen",
"score": 2
} |
#### File: app/utils/meta.py
```python
from urllib.parse import unquote
import aiohttp
from sanic.log import logger
from .. import settings
def get_watermark(request, watermark: str) -> tuple[str, bool]:
updated = False
if watermark == "none":
watermark = ""
elif watermark:
if watermark == settings.DEFAULT_WATERMARK:
logger.warning(f"Redundant watermark: {watermark}")
updated = True
elif watermark not in settings.ALLOWED_WATERMARKS:
logger.warning(f"Unknown watermark: {watermark}")
watermark = settings.DEFAULT_WATERMARK
updated = True
else:
watermark = settings.DEFAULT_WATERMARK
return watermark, updated
async def track(request, lines: list[str]):
text = " ".join(lines).strip()
trackable = not any(
name in request.args for name in ["height", "width", "watermark"]
)
if text and trackable and settings.REMOTE_TRACKING_URL:
async with aiohttp.ClientSession() as session:
params = dict(
text=text,
source="memegen.link",
context=unquote(request.url),
)
logger.info(f"Tracking request: {params}")
response = await session.get(settings.REMOTE_TRACKING_URL, params=params)
if response.status != 200:
try:
message = await response.json()
except aiohttp.client_exceptions.ContentTypeError:
message = response.text
logger.error(f"Tracker response: {message}")
``` |
{
"source": "JLazarte/notebook_convert",
"score": 2
} |
#### File: notebook_convert/nb_lib/formatter.py
```python
import os
import nbformat
from nbconvert import MarkdownExporter, PDFExporter, RSTExporter
from nbconvert.writers import FilesWriter
SUPPORTED_FORMATS = {"md", "pdf", "rst"}
SUPPORTED_DESTINATIONS_MODES = {"same_place", "mirror_folder"}
class Formatter:
def __init__(self, output_format, destination_mode):
assert output_format in SUPPORTED_FORMATS, f"supported formats are {SUPPORTED_FORMATS}"
assert (
destination_mode in SUPPORTED_DESTINATIONS_MODES
), f"supported destination modes are {SUPPORTED_DESTINATIONS_MODES}"
self.read_encoding = "utf-8"
self.write_encoding = "utf-8"
self.format = output_format
self.destination_mode = destination_mode
if self.format == "pdf":
pdf = PDFExporter()
pdf.exclude_output_prompt = True
pdf.exclude_input = True
self.exporter = pdf
elif self.format == "rst":
self.exporter = RSTExporter()
else:
self.exporter = MarkdownExporter()
def get_output_dir_base(self, file):
isMirrorFolder = self.destination_mode == "mirror_folder"
return f"converted/to_{self.format}/" if isMirrorFolder else ""
def get_output_file_path(self, file):
filePath = file.split("/")[0:-1]
isMultiFilesFormat = self.format in ["rst", "md"]
if isMultiFilesFormat:
filePath.append(self.dest_file(file, withFormat=False))
return "/".join(filePath) + "/" if len(filePath) > 0 else ""
def dst_folder(self, file):
return self.get_output_dir_base(file) + self.get_output_file_path(file)
def dest_file(self, file, withFormat=True):
return file.split("/")[-1].replace(".ipynb", "." + self.format if withFormat else "")
def dst_path(self, file):
return self.dst_folder(file) + self.dest_file(file)
def convert(self, file):
assert os.path.exists(file), f"this should not happen, path {file} must exist"
body, resources = self.export(file)
fw = FilesWriter()
fw._makedir(self.dst_folder(file))
fw.build_directory = self.dst_folder(file)
fw.write(body, resources, notebook_name=self.dest_file(file, withFormat=False))
def export(self, file):
with open(file, "r", encoding=self.read_encoding) as f:
nb = nbformat.read(f, as_version=4)
body, resources = self.exporter.from_notebook_node(nb)
return body, resources
def needs_format(self, file):
f_path = self.dst_path(file)
if not os.path.exists(f_path):
return True
notebook_modified = os.stat(file).st_mtime
formatted_modified = os.stat(f_path).st_mtime
return notebook_modified > formatted_modified
def save_figures(self, resources):
if "outputs" not in resources:
return
for name, bytes_ in resources["outputs"]:
print(f"name = {name}, bytes = {len(bytes_)}")
for key, value in resources.items():
pass
```
#### File: notebook_convert/nb_lib/git.py
```python
import pygit2
from pygit2 import GIT_STATUS_INDEX_NEW, GIT_STATUS_INDEX_MODIFIED
STAGED_STATUSES = {GIT_STATUS_INDEX_NEW, GIT_STATUS_INDEX_MODIFIED}
def is_staged(file):
repo = pygit2.Repository(".")
staged_files = [f for f, flag in repo.status().items() if flag in STAGED_STATUSES]
return file in staged_files
``` |
{
"source": "jlaz/BISTRO-Starter-Kit",
"score": 3
} |
#### File: BISTRO-Starter-Kit/utilities/input_sampler.py
```python
from collections import Counter
from pathlib import Path
import numpy as np
import pandas as pd
from utils import lazyprop
MASS_TRANSIT_FARE_FILE = "MassTransitFares.csv"
AGE_RANGE_LOWER = np.array([i for i in range(1, 116, 5)])
AGE_RANGE_UPPER = np.array([i for i in range(5, 120, 5)])
INCOME_RANGE_LOWER = np.array([0] + [i for i in range(5000, 150000, 5000)])
INCOME_RANGE_UPPER = np.array([i for i in range(4999, 149999, 5000)] + [150000])
def scenario_agencies(data_dir, scenario_name):
"""Given root data directory and scenario name, computes a mapping
of agency names to their respective paths.
Parameters
----------
data_dir : Path
Absolute path to root of data directory
scenario_name : str
Name of scenario with GTFS data
Returns
-------
dict
Dictionary of agency names mapped to directories containing files comprising their GTFS
data.
"""
gtfs_root = (data_dir / scenario_name).absolute()
return {p.stem: p for p in
gtfs_root.iterdir()}
class AgencyGtfsDataManager(object):
def __init__(self, agency_gtfs_path):
"""Used to cache an agency's GTFS data for sampling purposes
Parameters
----------
agency_gtfs_path : pahtlib.Path object
Directory containing the agency's gtfs data
"""
self.agency_gtfs_path = agency_gtfs_path
@lazyprop
def routes(self):
return pd.read_csv(self.agency_gtfs_path / "gtfs_data/routes.txt", header=0, index_col=1,
na_values=None,
delimiter=',')
@lazyprop
def vehicle_types(self):
return pd.read_csv(self.agency_gtfs_path / "availableVehicleTypes.csv", header=0,
index_col=0, na_values=None,
delimiter=',')
@lazyprop
def trips(self):
return pd.read_csv(self.agency_gtfs_path / "gtfs_data/trips.txt", header=0, index_col=2,
na_values=None,
delimiter=',')
def sample_vehicle_fleet_mix_input(num_records, gtfs_manager, bus_set=None):
"""Generate random `VehicleFleetMix` input according to possible substitute
vehicle trip ids available for an agency.
Parameters
----------
num_records : int
Number of randomly sampled records to create.
gtfs_manager : `AgencyGtfsDataManager`
An instance of the `AgencyGtfsDataManager` for the target agency.
bus_set : list of strings
A list of possible bus types that we want to sample from if we don't want to sample from all
bus types. If bus_set = None samples from all bus types
Returns
-------
`pd.DataFrame`
`num_records` `VehicleFleetMix` records. These are unique by `routeId`
for the `agencyId` specified on the `gtfs_manager`
Raises
------
`ValueError`
If the `num_records` is in excess of the number of routes that an agency schedules buses on.
"""
df_columns = ["agencyId", "routeId", "vehicleTypeId"]
if num_records == 0:
return pd.DataFrame({k: [] for k in df_columns})
max_num_routes = gtfs_manager.routes.shape[0]
if num_records > max_num_routes:
raise ValueError(
"More samples requested than the number of routes available in agency; please enter a "
"number less than {}".format(
max_num_routes))
route_agency_sample = gtfs_manager.routes.sample(num_records)
routes = pd.Series(route_agency_sample.index.values)
agency = pd.Series(route_agency_sample.agency_id.values)
if bus_set is None:
vehicles = pd.Series((gtfs_manager.vehicle_types.filter(like="BUS", axis=0))
.sample(num_records, replace=True).index)
else:
vehicles = pd.Series(bus_set).sample(num_records, replace=True).reset_index(drop=True)
df = pd.concat([agency, routes, vehicles], axis=1, ignore_index=True)
df.columns = df_columns
return df
def _sample_census_interval(census_type):
if census_type == "age":
eps = AGE_RANGE_LOWER, AGE_RANGE_UPPER
elif census_type == "income":
eps = INCOME_RANGE_LOWER, INCOME_RANGE_UPPER
else:
raise ValueError("Undefined census type!")
right_ep = np.random.choice(eps[1])
left_ep = np.random.choice(eps[0][eps[0] < right_ep])
return "[{}:{}]".format(left_ep, right_ep)
def sample_frequency_adjustment_input(num_service_periods, gtfs_manager):
"""Generate random `FrequencyAdjustment` inputs according to trips run by
an agency.
Creates `num_records` frequency adjustment records where fields for each record where the
`headway_secs` field is randomly chosen from a range of between `min_headway_seconds`
(per the route.txt file in the corresponding gtfs data for the trip) and 7200
seconds at intervals of 60 seconds and the `min_time` and `max_time` field
are sampled between the 0 and 86340 seconds, respectively (i.e.,
the possible minimum and maximum number of seconds in a day of 86359
seconds, given the headway interval).
Note that a frequency adjustment is really tied to a route based on a particular trip.
The trip serves as a template for the frequency adjustment. See the documentation
for further details.
Parameters
----------
num_service_periods : int
Number of service periods with a new headway that can be added to a route.
gtfs_manager : `AgencyGtfsDataManager`
An instance of the `AgencyGtfsDataManager` for the target agency.
Returns
-------
`pd.DataFrame`
`num_records` `FrequencyAdjustmentInput` records.
"""
if num_service_periods > 5:
raise ValueError(
"The maximum number of service periods per route is equal to {0} although it should not exceed 5.".format(
num_service_periods))
df_columns = ['route_id', 'start_time', 'end_time', 'headway_secs']
grouped = [np.random.randint(num_service_periods) * [route_id] for route_id in
gtfs_manager.routes.index.values.astype(int)]
route_id_list = [i for j in grouped for i in j]
min_secs = 0
max_secs = 86399
min_headway_seconds = 180
max_headway_seconds = 7199
frequency_data = []
route_frequency = Counter(route_id_list).items()
for route_id, route_num_service_periods in route_frequency:
st_et_flat = np.sort(
np.random.choice(np.arange(min_secs, max_secs, 60), route_num_service_periods, replace=False))
for st, et in zip(st_et_flat, st_et_flat[1:]):
headway = np.random.choice(np.arange(min_headway_seconds, max_headway_seconds, 60))
frequency_data.append([route_id, st, et, headway])
frequency_adjustment_df = pd.DataFrame(frequency_data, columns=df_columns)
frequency_adjustment_df['exact_times'] = 0
return frequency_adjustment_df
def sample_mode_incentives_input(num_records, gtfs_manager=None, min_incentive=0.1, max_incentive=50):
"""Generate random mode incentives inputs based on modes available for
subsidies.
Creates `num_records` ModeIncentivesInput records where fields for each record
are randomly sampled as follows:
* `age` : an interval with age values sampled uniformly from [1 .. 116]\cup[120] inclusive, in steps of 5.
* `mode` : uniformly from list of available modes for scenario.
* `income` : an interval with income values sampled uniformly from integers values
[0 to 144,999]\cup[150,000] in steps of 5,000.
* `amount` : uniformly from `range(0.1,50)`.
The amount of subsidy is rounded to the nearest $0.10.
Parameters
----------
num_records : int
Number of randomly sampled records to create.
gtfs_manager : `AgencyGtfsDataManager`, optional
An instance of the `AgencyGtfsDataManager` for the target agency.
min_incentive : float
Minimum amount accepted for an incentive according to the inputs specifications of the Starter-Kit
max_incentive : float
Maximum amount accepted for an incentive according to the inputs specifications of the Starter-Kit
Notes
-----
`gtfs_manager` added to support duck-typing this field.
Returns
-------
`DataFrame`
`num_records` `ModeIncentivesInput` records.
"""
df_columns = ['mode', 'age', 'income', 'amount']
if num_records == 0:
return pd.DataFrame({k: [] for k in df_columns})
possible_modes = ['OnDemand_ride', 'walk_transit', 'drive_transit']
modes = np.random.choice(possible_modes, num_records).tolist()
ages = [_sample_census_interval("age") for _ in range(num_records)]
incomes = [_sample_census_interval("income") for _ in range(num_records)]
amounts = [np.round(np.random.uniform(min_incentive, max_incentive), 1) for _ in range(num_records)]
return pd.DataFrame(np.array([modes, ages, incomes, amounts]).T,
columns=df_columns)
def sample_mass_transit_fares_input(num_records, gtfs_manager, max_fare_amount=10.0):
"""Generate `num_records` random `PtFares` for an
agency (specified within `gtfs_manager`) by randomly sampling age and fare amount.
The fare amount will not exceed the maximum fare amount and cannot be less than $0.10 (else,
there shouldn't have been a fare assigned in the first place).
The age will be sampled from 1 to 120 inclusive (maximum age in scenario).
Parameters
----------
num_records : int
Number of randomly sampled records to create.
gtfs_manager : `AgencyGtfsDataManager`
An instance of the `AgencyGtfsDataManager` for the target agency.
max_fare_amount : float
The maximum fare amount that should be charged.
Returns
-------
`pd.DataFrame`
`num_records` `PtFares` records. These are unique by `routeId`
for the `agencyId` specified on the `gtfs_manager`
Raises
------
`ValueError`
If the `num_records` is in excess of the number of routes that an agency schedules
buses on.
"""
df_columns = ['agencyId', 'routeId', 'age', 'amount']
if num_records == 0:
return pd.read_csv('../submission-inputs/{0}'.format(MASS_TRANSIT_FARE_FILE))
max_num_routes = gtfs_manager.routes.shape[0]
if num_records > max_num_routes:
raise ValueError(
"More samples requested than the number of routes available in agency; please enter a "
"number less than {}".format(max_num_routes))
route_agency_sample = gtfs_manager.routes.sample(num_records)
routes = pd.Series(route_agency_sample.index.values)
agency = pd.Series(route_agency_sample.agency_id.values)
amounts = [np.round(np.random.uniform(0.1, max_fare_amount), 1) for _ in range(num_records)]
ages = [_sample_census_interval("age") for _ in range(num_records)]
return pd.DataFrame(np.array([agency, routes, ages, amounts]).T,
columns=df_columns)
```
#### File: BISTRO-Starter-Kit/utilities/plans_parser.py
```python
import pandas as pd
import numpy as np
from pathlib import Path
from data_parsing import extract_dataframe, open_xml
import gzip
from collections import defaultdict
# ########### 1. INTERMEDIARY FUNCTIONS ###########
def unzip_file(path: Path):
""" Unzips a file ending with .gz
Parameters
----------
path: pathlib.Path object or os.path object
Returns
-------
path: string
Path of the unzipped folder or file
"""
if Path(path).suffix == ".gz":
return gzip.open(path)
else:
return path
def parse_bus_fare_input(bus_fare_data_df, route_ids):
"""Processes the `MassTransitFares.csv` input file into a dataframe with rows = ages and columns = routes
Parameters
----------
bus_fare_data_df: pandas DataFrame
Bus fares extracted from the "submission-inputs/MassTransitFares.csv"
route_ids: list of strings
All routes ids where buses operate (from `routes.txt` file in the GTFS data)
Returns
-------
bus_fare_per_route_df: pandas DataFrame
Dataframe with rows = ages and columns = routes
"""
bus_fare_per_route_df = pd.DataFrame(np.zeros((120, len(route_ids))), columns=route_ids)
routes = bus_fare_data_df['routeId'].unique()
for r in routes:
if np.isnan(r):
cols = route_ids
else:
cols = int(r)
# get all fare rows for this route:
r_fares = bus_fare_data_df.loc[np.isnan(bus_fare_data_df['routeId']),]
for i, row in r_fares.iterrows():
age_group = row['age']
left = age_group[0]
ages = age_group[1:-1].split(':')
right = age_group[-1]
if left == '(':
min_a = int(ages[0]) + 1
else:
min_a = int(ages[0])
if right == ')':
max_a = int(ages[1]) - 1
else:
max_a = int(ages[1])
bus_fare_per_route_df.loc[min_a:max_a, cols] = float(row['amount'])
return bus_fare_per_route_df
def calc_fuel_costs(legs_df, fuel_cost_dict):
# legs_df: legs_dataframe
# fuel_cost_dict: {fuel_type: $/MJoule}
# returns: legs_df augmented with an additional column of estimated fuel costs
legs_df.loc[:, "FuelCost"] = np.zeros(legs_df.shape[0])
for f in fuel_cost_dict.keys():
legs_df.loc[legs_df["fuelType"] == f.capitalize(), "FuelCost"] = (pd.to_numeric(
legs_df.loc[legs_df["fuelType"] == f.capitalize(), "fuel"]) * float(fuel_cost_dict[f])) / 1000000
return legs_df
def calc_transit_fares(row, bus_fare_dict, person_df, trip_to_route):
pid = row['PID']
age = person_df.loc[pid,'Age']
vehicle = row['Veh']
route = trip_to_route[vehicle.split(':')[1]]
fare = bus_fare_dict.loc[age,route]
return fare
def calc_fares(legs_df, ride_hail_fares, bus_fare_dict, person_df, trip_to_route):
# legs_df: legs_dataframe
# ride_hail_fares: {'base': $, 'duration': $/hour, 'distance': $/km}
# transit_fares isnt being used currently - would need to be updated to compute fare based on age
# returns: legs_df augmented with an additional column of estimated transit and on-demand ride fares
legs_df["Fare"] = np.zeros(legs_df.shape[0])
legs_df.loc[legs_df["Mode"] == 'bus', "Fare"] = legs_df.loc[legs_df["Mode"] == 'bus'].apply(
lambda row: calc_transit_fares(row, bus_fare_dict, person_df, trip_to_route), axis=1)
legs_df.loc[legs_df["Mode"] == 'OnDemand_ride', "Fare"] = ride_hail_fares['base'] + (
pd.to_timedelta(legs_df['Duration_sec']).dt.seconds / 60) * float(ride_hail_fares['duration']) + (
pd.to_numeric(
legs_df['Distance_m']) / 1000) * (
0.621371) * float(ride_hail_fares['distance'])
return legs_df
def one_path(path_trav, leg_id, pid, trip_id):
# extracts leg data from path traversal
# returns a leg_dataframe row
l_row = path_trav
leg_id_this = leg_id + l_row.name
leg_id_full = trip_id + "_l-" + str(leg_id)
veh_id = l_row["vehicle"]
leg_start_time = l_row['departureTime']
veh_type = l_row['vehicleType']
distance = l_row['length']
leg_end_time = l_row['arrivalTime']
leg_duration = int(leg_end_time) - int(leg_start_time)
leg_path = l_row['links']
leg_mode = l_row['mode']
leg_fuel = l_row['fuel']
leg_fuel_type = l_row['fuelType']
# return the leg record
return [pid, trip_id, leg_id_full, leg_mode, veh_id, veh_type, leg_start_time, leg_end_time, leg_duration, distance,
leg_path, leg_fuel, leg_fuel_type]
def parse_transit_trips(row, non_bus_path_traversal_events, bus_path_traversal_events, enter_veh_events):
# inputs:
# row: a row from transit_trips_df
# path_traversal_events: non-transit path traversal df
# bus_path_traversal_events: transit path traversal df
# enter_veh_events: enter vehicle events
leg_array = []
pid = row['PID']
trip_id = row['Trip_ID']
start_time = row['Start_time'].total_seconds()
duration = row['Duration_sec']
end_time = row['End_time']
mode = row['Mode']
# initiate the leg ID counter
leg_id = 0
# get all path traversals occuring within the time frame of this trip in which the driver is the person making this trip
path_trav = non_bus_path_traversal_events[
(non_bus_path_traversal_events['driver'] == pid) & (non_bus_path_traversal_events['arrivalTime'] <= end_time) & (
non_bus_path_traversal_events['departureTime'] >= start_time)]
path_trav = path_trav.reset_index(drop=True)
# get the vehicle entry events corresponding to this person during the time frame of this trip
veh_entries = enter_veh_events[
(enter_veh_events['person'] == pid) & (enter_veh_events['time'] >= start_time) & (
enter_veh_events['time'] <= end_time)]
# get bus entry events for this person & trip
bus_entries = veh_entries[veh_entries['vehicle'].str.startswith('siouxareametro-sd-us:', na=False)]
bus_entries = bus_entries.reset_index(drop=True)
if len(bus_entries) > 0:
prev_entry_time = start_time
for idx, bus_entry in bus_entries.iterrows():
if idx < len(bus_entries)-1:
next_entry = bus_entries.loc[idx+1]
next_entry_time = next_entry['time']
else:
next_entry_time = end_time
# get all path traversals occuring before the bus entry
prev_path_trav = path_trav[
(path_trav['arrivalTime'] <= bus_entry['time']) & (path_trav['arrivalTime'] >= prev_entry_time)]
# get all path traversals occuring after the bus entry
post_path_trav = path_trav[(path_trav['arrivalTime'] > bus_entry['time']) & (path_trav['arrivalTime'] <= next_entry_time)]
prev_path_trav = prev_path_trav.reset_index(drop=True)
post_path_trav = post_path_trav.reset_index(drop=True)
prev_entry_time = bus_entry['time']
# iterate through the path traversals prior to the bus entry
if len(prev_path_trav)>0:
these_legs = prev_path_trav.apply(lambda row1: one_path(row1, leg_id, pid, trip_id), axis=1)
leg_array.extend(these_legs)
# record transit leg
leg_id += 1
leg_id_full = trip_id + "_l-" + str(leg_id)
veh_id = bus_entry['vehicle']
leg_start_time = int(bus_entry['time'])
if len(post_path_trav)> 0:
leg_end_time = int(post_path_trav['departureTime'].values[0])
bus_path_trav = bus_path_traversal_events[(bus_path_traversal_events['vehicle'] == veh_id) & (
bus_path_traversal_events['arrivalTime'] <= leg_end_time) & (bus_path_traversal_events[
'departureTime']>=leg_start_time)]
else:
leg_end_time = next_entry_time
bus_path_trav = bus_path_traversal_events[(bus_path_traversal_events['vehicle'] == veh_id) & (
bus_path_traversal_events['arrivalTime'] < leg_end_time) & (bus_path_traversal_events[
'departureTime']>=leg_start_time)]
leg_duration = int(leg_end_time - bus_entry['time'])
# find the path traversals of the bus corresponding to the bus entry for this trip, occuring between the last prev_path_traversal and the first post_path_traversal
if len(bus_path_trav) > 0:
veh_type = bus_path_trav['vehicleType'].values[0]
distance = bus_path_trav['length'].sum()
leg_path = [path['links'] for p, path in bus_path_trav.iterrows()]
leg_mode = bus_path_trav['mode'].values[0]
leg_fuel = 0
leg_fuel_type = 'Diesel'
leg_array.append(
[pid, trip_id, leg_id_full, leg_mode, veh_id, veh_type, leg_start_time, leg_end_time,
leg_duration,
distance, leg_path, leg_fuel, leg_fuel_type])
# iterate through the path traversals after the bus entry
if len(post_path_trav) > 0:
these_legs = post_path_trav.apply(lambda row1: one_path(row1, leg_id, pid, trip_id), axis=1)
leg_array.extend(these_legs)
# if the agent underwent replanning, there will be no bus entry
else:
leg_array = parse_walk_car_trips(row, non_bus_path_traversal_events, enter_veh_events)
return leg_array
def parse_walk_car_trips(row, path_traversal_events, enter_veh_events):
# inputs:
# row: a row from transit_trips_df
# path_traversal_events: non-transit path traversal df
# person_costs: person cost events df
# enter_veh_events: enter vehicle events
leg_array = []
pid = row['PID']
trip_id = row['Trip_ID']
start_time = row['Start_time']
duration = row['Duration_sec']
end_time = row['End_time']
mode = row['Mode']
# initiate the leg ID counter
leg_id = 0
# get all path traversals occuring within the time frame of this trip in which the driver is the person making this trip
path_trav = path_traversal_events.loc[
(path_traversal_events['driver'] == pid) & (path_traversal_events['arrivalTime'] <= end_time) & (
path_traversal_events['departureTime'] >= start_time.total_seconds()),]
path_trav.reset_index(drop=True, inplace=True)
# iterate through the path traversals
if len(path_trav > 0):
these_legs = path_trav.apply(lambda row: one_path(row, leg_id, pid, trip_id), axis=1)
leg_array.extend(these_legs)
return leg_array
def parse_ridehail_trips(row, path_traversal_events, enter_veh_events):
# inputs:
# row: a row from transit_trips_df
# path_traversal_events: non-transit path traversal df
# person_costs: person cost events df
# enter_veh_events: enter vehicle events
leg_array = []
pid = row['PID']
trip_id = row['Trip_ID']
start_time = row['Start_time']
duration = row['Duration_sec']
end_time = row['End_time']
mode = row['Mode']
# initiate the leg ID counter
leg_id = 0
# get all vehicle entry events corresponding to this person during the time frame of this trip, not including those corresponding to a walking leg
veh_entry = enter_veh_events.loc[
(enter_veh_events['person'] == pid) & (enter_veh_events['time'] >= start_time.total_seconds()) & (
enter_veh_events['time'] <= end_time),]
veh_entry2 = veh_entry.loc[(veh_entry['vehicle'] != 'body-' + pid),]
try:
veh_id = veh_entry2['vehicle'].item()
leg_start_time = veh_entry2['time'].item()
# get the path traversal corresponding to this ridehail trip
path_trav = path_traversal_events.loc[(path_traversal_events['vehicle'] == veh_id) & (
path_traversal_events['departureTime'] == int(leg_start_time)) & (path_traversal_events['numPassengers'] > 0),]
except:
path_trav = []
print(row)
leg_id += 1
# create leg ID
leg_id_full = trip_id + "_l-" + str(leg_id)
if len(path_trav) > 0:
veh_type = path_trav['vehicleType'].values[0]
distance = path_trav['length'].item()
leg_end_time = path_trav['arrivalTime'].item()
leg_duration = int(leg_end_time) - int(leg_start_time)
leg_path = path_trav['links'].item()
leg_mode = 'OnDemand_ride'
leg_fuel = path_trav['fuel'].item()
leg_fuel_type = path_trav['fuelType'].item()
leg_array.append(
[pid, trip_id, leg_id_full, leg_mode, veh_id, veh_type, leg_start_time, leg_end_time, leg_duration,
distance, leg_path, leg_fuel, leg_fuel_type])
return leg_array
def label_trip_mode(modes):
if ('walk' in modes) and ('car' in modes) and ('bus' in modes):
return 'drive_transit'
elif ('car' in modes) and ('bus' in modes):
return 'drive_transit'
elif ('walk' in modes) and ('bus' in modes):
return 'walk_transit'
elif ('walk' in modes) and ('car' in modes):
return 'car'
elif ('car' == modes):
return 'car'
elif ('OnDemand_ride' in modes):
return 'OnDemand_ride'
elif ('walk' == modes):
return 'walk'
else:
print(modes)
def merge_legs_trips(legs_df, trips_df):
trips_df = trips_df[ ['PID', 'Trip_ID', 'Origin_Activity_ID', 'Destination_activity_ID', 'Trip_Purpose',
'Mode']]
trips_df.columns = ['PID', 'Trip_ID', 'Origin_Activity_ID', 'Destination_activity_ID', 'Trip_Purpose',
'plannedTripMode']
legs_grouped = legs_df.groupby("Trip_ID")
unique_modes = legs_grouped['Mode'].unique()
unique_modes_df = pd.DataFrame(unique_modes)
unique_modes_df.columns = ['legModes']
merged_trips = trips_df.merge(legs_grouped['Duration_sec','Distance_m','fuel','FuelCost','Fare'].sum(),on='Trip_ID')
merged_trips.set_index('Trip_ID',inplace=True)
legs_transit = legs_df.loc[legs_df['Mode']=='bus',]
legs_transit_grouped = legs_transit.groupby("Trip_ID")
count_modes = legs_transit_grouped['Mode'].count()
merged_trips.loc[count_modes.loc[count_modes.values >1].index.values,'Fare'] = merged_trips.loc[count_modes.loc[count_modes.values >1].index.values,'Fare']/count_modes.loc[count_modes.values >1].values
merged_trips = merged_trips.merge(unique_modes_df,on='Trip_ID')
legs_grouped_start_min = pd.DataFrame(legs_grouped['Start_time'].min())
legs_grouped_end_max = pd.DataFrame(legs_grouped['End_time'].max())
merged_trips= merged_trips.merge(legs_grouped_start_min,on='Trip_ID')
merged_trips= merged_trips.merge(legs_grouped_end_max,on='Trip_ID')
merged_trips['realizedTripMode'] = merged_trips['legModes'].apply(lambda row: label_trip_mode(row))
return merged_trips
# ########### 2. PARSING AND PROCESSING THE XML FILES INTO PANDAS DATA FRAMES ###############
def get_person_output_from_households_xml(households_xml, output_folder_path):
"""
- Parses the outputHouseholds file to create the households_dataframe gathering each person's household attributes
(person id, household id, number of vehicles in the household, overall income of the household)
- Saves the household dataframe to csv
Parameters
----------
households_xml: ElementTree object
Output of the open_xml() function for the `outputHouseholds.xml` file
output_folder_path: pathlib.Path object
Absolute path of the output folder of the simulation
(format of the output folder name: `<scenario_name>-<sample_size>__<date and time>`)
Returns
-------
households_df: pandas Dataframe
Record of each person's household attributes
(person id, household id, number of vehicles in the household, overall income of the household)
"""
# get root of the `outputHouseholds.xml` file
households_root = households_xml.getroot()
hhd_array = []
for hhd in households_root.getchildren():
hhd_id = hhd.get('id').strip()
hhd_children = hhd.getchildren()
# check for vehicles; record household attributes
if len(hhd_children) == 3:
members = hhd_children[0]
vehicles = hhd_children[1]
income = hhd_children[2]
vehs = vehicles.getchildren()
hdd_num_veh = len(vehs)
else:
members = hhd_children[0]
vehicles = []
income = hhd_children[1]
hdd_num_veh = 0
hhd_income = income.text.strip()
# get list of persons in household and make a record of each person
list_members = members.getchildren()
for person in list_members:
pid = person.attrib['refId'].strip()
hhd_array.append([pid, hhd_id, hdd_num_veh, hhd_income])
# convert array to dataframe and save
households_df = pd.DataFrame(hhd_array, columns=['PID', 'Household_ID', 'Household_num_vehicles', 'Household_income [$]'])
households_df.to_csv(str(output_folder_path) + "/households_dataframe.csv")
return households_df
def get_person_output_from_output_plans_xml(output_plans_xml):
""" Parses the outputPlans file to create the person_dataframe gathering individual attributes of each person
(person id, age, sex, home location)
Parameters
----------
output_plans_xml: ElementTree object
Output of the open_xml() function for the `outputPlans.xml` file
Returns
-------
person_df: pandas DataFrame
Record of some of each person's individual attributes (person id, age, sex, home location)
"""
# get root of the `outputPlans.xml` file
output_plans_root = output_plans_xml.getroot()
person_array = []
for person in output_plans_root.findall('./person'):
pid = person.get('id')
attributes = person.findall('./attributes')[0]
age = int(attributes.findall('./attribute[<EMAIL>="age"]')[0].text)
sex = attributes.findall('./attribute[<EMAIL>="sex"]')[0].text
plan = person.findall('./plan')[0]
home = plan.findall('./activity')[0]
home_x = home.get('x')
home_y = home.get('y')
person_array.append([pid, age, sex, home_x, home_y])
# convert person array to dataframe
person_df = pd.DataFrame(person_array, columns=['PID', 'Age', 'Sex', 'Home_X', 'Home_Y'])
return person_df
def get_person_output_from_output_person_attributes_xml(persons_xml):
""" Parses outputPersonAttributes.xml file to create population_attributes_dataframe gathering individual attributes
of the population (person id, excluded modes (i.e. transportation modes that the peron is not allowed to use),
income, rank, value of time).
Parameters
----------
persons_xml: ElementTree object
Output of the open_xml() function for the `outputPersonAttributes.xml` file
Returns
-------
person_df_2: pandas DataFrame
Record of some of each person's individual attributes (person id, excluded modes (i.e. transportation modes
that the peron is not allowed to use), income, rank, value of time)
"""
# get root of the `outputPersonAttributes.xml` file
persons_root = persons_xml.getroot()
population_attributes = []
population = persons_root.getchildren()
for person in population:
pid = person.get('id')
attributes = person.findall("./attribute")
population_attributes_dict = {}
population_attributes_dict['PID'] = pid
for attribute in attributes:
population_attributes_dict[attribute.attrib['name']] = attribute.text
population_attributes.append(population_attributes_dict)
# convert attribute array to dataframe
person_df_2 = pd.DataFrame(population_attributes)
return person_df_2
import time
def get_persons_attributes_output(output_plans_xml, persons_xml, households_xml, output_folder_path):
"""Outputs the augmented persons dataframe, including all individual and household attributes for each person
Parameters
----------
output_plans_xml: ElementTree object
Output of the open_xml() function for the `outputPlans.xml` file
persons_xml: ElementTree object
Output of the open_xml() function for the `outputPersonAttributes.xml` file
households_xml: ElementTree object
Output of the open_xml() function for the `outputHouseholds.xml` file
output_folder_path: pathlib.Path object
Absolute path of the output folder of the simulation (format of the output folder name: `<scenario_name>-<sample_size>__<date and time>`)
Returns
-------
persons_attributes_df: pandas DataFrame
Record of all individual and household attributes for each person
"""
# get the person attributes dataframes
households_df = get_person_output_from_households_xml(households_xml, output_folder_path)
person_df = get_person_output_from_output_plans_xml(output_plans_xml)
person_df_2 = get_person_output_from_output_person_attributes_xml(persons_xml)
# set the index of all dataframes to PID (person ID)
person_df.set_index('PID', inplace=True)
person_df_2.set_index('PID', inplace=True)
households_df.set_index('PID', inplace=True)
# join the three dataframes together
persons_attributes_df = person_df.join(person_df_2)
persons_attributes_df = persons_attributes_df.join(households_df)
return persons_attributes_df
def get_activities_output(experienced_plans_xml):
""" Parses the experiencedPlans.xml file to create the activities_dataframe, gathering each person's activities' attributes
(person id, activity id, activity type, activity start time, activity end time)
Parameters
----------
experienced_plans_xml: ElementTree object
Output of the open_xml() function for the `<num_iterations>.experiencedPlans.xml` file located in
the `/ITERS/it.<num_iterations> folder
Returns
-------
activities_df: pandas DataFrame
Record of each person's activities' attributes
trip_purposes: list of string
purpose of each trip, ege, "Work", "Home".etc...
"""
# get root of experiencedPlans xml file
plans_root = experienced_plans_xml.getroot()
acts_array = []
# iterate through persons, recording activities and trips for each person
for person in plans_root.findall('./person'):
# we use the person ID from the raw output
pid = person.get('id')
plan = person.getchildren()[0]
activities = plan.findall('./activity')
# initialize activity ID counters (we create activity IDs using these)
act_id = 0
trip_purposes = []
# iterate through activities and make record of each activity
for activity in activities:
act_id += 1
# create activity ID
activity_id = pid + "_a-" + str(act_id)
act_type = activity.get('type')
if activity.get('start_time') is None:
act_start_time = None
else:
act_start_time = activity.get('start_time')
if activity.get('end_time') is None:
act_end_time = None
else:
act_end_time = activity.get('end_time')
# record all activity types to determine trip trip_purposes
trip_purposes.append([act_type])
acts_array.append([pid, activity_id, act_type, act_start_time, act_end_time])
# convert the activity_array to a dataframe
activities_df = pd.DataFrame(acts_array, columns=['PID', 'Activity_ID', 'Activity_Type', 'Start_time', 'End_time'])
return activities_df, trip_purposes
def get_trips_output(experienced_plans_xml_path):
""" Parses the experiencedPlans.xml file to create the trips dataframe, gathering each person's trips' attributes
(person id, trip id, id of the origin activity of the trip, id of the destination activity of the trip, trip purpose,
mode used, start time of the trip, duration of the trip, distance of the trip, path of the trip)
Parameters
----------
experienced_plans_xml_path: str
Output of the open_xml() function for the `<num_iterations>.experiencedPlans.xml` file located in
the `/ITERS/it.<num_iterations> folder
Returns
-------
trips_df: pandas DataFrame
Record of each person's trips' attributes
"""
# get root of experiencedPlans xml file
experienced_plans_xml = open_xml(experienced_plans_xml_path)
plans_root = experienced_plans_xml.getroot()
trip_array = []
# Getting the activities dataframe
_, trip_purposes = get_activities_output(experienced_plans_xml)
# iterate through persons, recording activities and trips for each person
for person in plans_root.findall('./person'):
# we use the person ID from the raw output
pid = person.get('id')
plan = person.getchildren()[0]
legs = plan.findall('./leg')
# initialize trip ID counters (we create trip IDs using these)
trip_id = 0
# iterate through trips (called legs in the `experiencedPlans.xml` file) and make record of each trip
for trip in legs:
trip_id += 1
# create trip ID
trip_id_full = pid + "_t-" + str(trip_id)
# record activity IDs for origin and destination activities of the trip
o_act_id = pid + "_a-" + str(trip_id)
d_act_id = pid + "_a-" + str(trip_id + 1)
# identify the activity type of the trip destination to record as the trip trip_purpose
trip_purpose = trip_purposes[trip_id][0]
mode = trip.get('mode')
dep_time = trip.get('dep_time')
duration = trip.get('trav_time')
route = trip.find('./route')
distance = route.get('distance')
path = route.text
trip_array.append(
[pid, trip_id_full, o_act_id, d_act_id, trip_purpose, mode, dep_time, duration, distance, path])
# convert the trip_array to a dataframe
trips_df = pd.DataFrame(trip_array,
columns=['PID', 'Trip_ID', 'Origin_Activity_ID', 'Destination_activity_ID', 'Trip_Purpose',
'Mode', 'Start_time', 'Duration_sec', 'Distance_m', 'Path_linkIds'])
return trips_df
# def get_events_output(events):
# """ Parses the outputEvents.xml to gather the event types into a pandas DataFrame
#
# Parameters
# ----------
# events: xml.etree.ElementTree.ElementTree
# Element tree instance of the xml file of interest
#
# Returns
# -------
# :pandas Dataframe
#
# """
# event_data = {}
# root = events.getroot()
# for event in root.getchildren():
# add_event_type_data_to_library(event, event_data)
# return pd.DataFrame(event_data)
#
#
# def add_event_type_data_to_library(event, event_data):
# """For each child element in the tree, creates a dictionary with the "type" attribute.
#
# Parameters
# ----------
# event: xml.etree.ElementTree.Element
# Child of the element tree instance
#
# event_data: dictionary
# Dictionary where the "type" attribute of the child element will be stored
#
# """
# attrib = event.attrib
# event_type = attrib['type']
# if event_type not in event_data:
# dd = defaultdict(list)
# event_data[event_type] = dd
# else:
# dd = event_data[event_type]
# for k, v in attrib.items():
# dd[k].append(v)
def get_path_traversal_output(events_df):
""" Parses the experiencedPlans.xml file to create the trips dataframe, gathering each person's trips' attributes
(person id, trip id, id of the origin activity of the trip, id of the destination activity of the trip, trip purpose,
mode used, start time of the trip, duration of the trip, distance of the trip, path of the trip)
Parameters
----------
events_df: pandas DataFrame
DataFrame extracted from the outputEvents.xml` file: output of the extract_dataframe() function
trips_df: pandas DataFrame
Record of each person's trips' attributes: output of the get_trips_output() function
Returns
-------
path_traversal_events_df: pandas DataFrame
"""
# creates a dataframe of trip legs using the events dataframe; creates and saves a pathTraversal dataframe to csv
# outputs the legs dataframe
# Selecting the columns of interest
events_df = events_df[['time', 'type', 'person', 'vehicle', 'driver', 'vehicleType', 'length',
'numPassengers', 'departureTime', 'arrivalTime', 'mode', 'links',
'fuelType', 'fuel']]
# get all path traversal events (all vehicle movements, and all person walk movements)
path_traversal_events_df = events_df[(events_df['type'] == 'PathTraversal') & (events_df['length'] > 0)]
path_traversal_events_df = path_traversal_events_df.reset_index(drop=True)
path_traversal_events_df = path_traversal_events_df
return path_traversal_events_df
def get_legs_output(events_df, trips_df):
""" Parses the outputEvents.xml and trips_df file to create the legs dataframe, gathering each person's trips' legs' attributes
(PID, Trip_ID, Leg_ID, Mode, Veh, Veh_type, Start_time, End_time,
Duration, Distance, Path, fuel, fuelType)
Parameters
----------
events_df: pandas DataFrame
DataFrame extracted from the outputEvents.xml` file: output of the extract_dataframe() function
trips_df: pandas DataFrame
Record of each person's trips' attributes: output of the get_trips_output() function
Returns
-------
legs_df: pandas DataFrame
Records the legs attributes for each person's trip
"""
# convert trip times to timedelta; calculate end time of trips
trips_df['Start_time'] = pd.to_timedelta(trips_df['Start_time'])
trips_df['Duration_sec'] = pd.to_timedelta(trips_df['Duration_sec'])
trips_df['End_time'] = trips_df['Start_time'].dt.seconds + trips_df['Duration_sec'].dt.seconds + (
3600 * 24 * trips_df['Start_time'].dt.days)
path_traversal_events_full = get_path_traversal_output(events_df)
# get all relevant personEntersVehicle events (those occurring at time ==0 are all ridehail/bus drivers)
enter_veh_events = events_df[(events_df['type'] == 'PersonEntersVehicle') & (events_df['time'] > 0)]
# filter for bus path traversals only
bus_path_traversal_events = path_traversal_events_full[path_traversal_events_full['mode'] == "bus"]
# filter for car & body path traversals only
non_bus_path_traversal_events = path_traversal_events_full[path_traversal_events_full['mode'] != "bus"]
# get all PersonCost events (record the expenditures of persons during a trip)
# person_costs = events_df.loc[events_df['type']=='PersonCost',]
legs_array = []
# record all legs corresponding to OnDemand_ride trips
on_demand_ride_trips = trips_df.loc[((trips_df['Mode'] == 'OnDemand_ride') | (trips_df['Mode'] == 'ride_hail')),]
on_demand_ride_legs_array = on_demand_ride_trips.apply(
lambda row: parse_ridehail_trips(row, non_bus_path_traversal_events, enter_veh_events), axis=1)
for bit in on_demand_ride_legs_array.tolist():
legs_array.extend(tid for tid in bit)
# record all legs corresponding to transit trips
transit_trips_df = trips_df[(trips_df['Mode'] == 'drive_transit') | (trips_df['Mode'] == 'walk_transit')]
transit_legs_array = transit_trips_df.apply(
lambda row: parse_transit_trips(row, non_bus_path_traversal_events, bus_path_traversal_events, enter_veh_events),
axis=1)
for bit in transit_legs_array.tolist():
legs_array.extend(tid for tid in bit)
# record all legs corresponding to walk and car trips
walk_car_trips_df = trips_df.loc[(trips_df['Mode'] == 'car') | (trips_df['Mode'] == 'walk'),]
walk_car_legs_array = walk_car_trips_df.apply(
lambda row: parse_walk_car_trips(row, non_bus_path_traversal_events, enter_veh_events), axis=1)
for bit in walk_car_legs_array.tolist():
legs_array.extend(tid for tid in bit)
# convert the leg array to a dataframe
legs_df = pd.DataFrame(legs_array,
columns=['PID', 'Trip_ID', 'Leg_ID', 'Mode', 'Veh', 'Veh_type', 'Start_time', 'End_time', 'Duration_sec', 'Distance_m', 'Path', 'fuel', 'fuelType'])
return legs_df, path_traversal_events_full
# ############ 3. GENERATE THE CSV FILES ###########
def extract_person_dataframes(output_plans_path, persons_path, households_path, output_folder_path):
""" Create a csv file from the processed person dataframe
Parameters
----------
output_plans_path: pathlib.Path object
Absolute path of the the `outputPlans.xml` file
persons_path: pathlib.Path object
Absolute path of the the `outputPersonAttributes.xml` file
households_path: pathlib.Path object
Absolute path of the the `outputHouseholds.xml` file
output_folder_path: pathlib.Path object
Absolute path of the output folder of the simulation (format of the output folder name: `<scenario_name>-<sample_size>__<date and time>`)
Returns
-------
persons_attributes_df: pandas DataFrame
Record of all individual and household attributes for each person
"""
# opens the xml files
output_plans_xml = open_xml(output_plans_path)
persons_xml = open_xml(persons_path)
households_xml = open_xml(households_path)
persons_attributes_df = get_persons_attributes_output(output_plans_xml, persons_xml, households_xml, output_folder_path)
persons_attributes_df.to_csv(str(output_folder_path) + "/persons_dataframe.csv")
print("person_dataframe.csv generated")
return persons_attributes_df
def extract_activities_dataframes(experienced_plans_path, output_folder):
""" Create a csv file from the processed activities dataframe
Parameters
----------
experienced_plans_path: pathlib.Path object
output_folder_path: pathlib.Path object
Absolute path of the output folder of the simulation
(format of the output folder name: `<scenario_name>-<sample_size>__<date and time>`)
Returns
-------
activities_df
"""
# opens the experiencedPlans and passes the xml file to get_activity_trip_output
# returns the actitivities_dataframe and trips_dataframe
experienced_plans_xml = open_xml(experienced_plans_path)
activities_df, _ = get_activities_output(experienced_plans_xml)
# convert dataframes into csv files
activities_df.to_csv(str(output_folder) + "/activities_dataframe.csv")
print("activities_dataframe.csv generated")
return activities_df
def extract_legs_dataframes(events_path, trips_df, person_df, bus_fares_df, trip_to_route, fuel_costs, output_folder_path):
""" Create a csv file from the processes legs dataframe
Parameters
----------
events_path: pathlib.Path object
Absolute path of the `ITERS/<num_iterations>.events.csv.gz` file
trips_df: pandas DataFrame
Record of each person's trips' attributes: output of the get_trips_output() function
person_df: pandas DataFrame
Record of each person's trips' attributes: output of the get_persons_attributes_output() function
bus_fares_df: pandas DataFrame
Dataframe with rows = ages and columns = routes: output of the parse_bus_fare_input() function
trip_to_route: dictionary
route_id / trip_id correspondence extracted from the `trips.csv` file in the
`/reference-data/sioux_faux/sioux_faux_bus_lines/gtfs_data` folder of the Starter Kit
fuel_costs: dictionary
fuel type / fuel price correspondence extracted from the `beamFuelTypes.csv` file in the
`/reference-data/sioux_faux/config/<SAMPLE_SIZE>` folder of the Starter Kit
output_folder_path: pathlib.Path object
Absolute path of the output folder of the simulation
(format of the output folder name: `<scenario_name>-<sample_size>__<date and time>`)
Returns
-------
legs_df: pandas DataFrame
Records the legs attributes for each person's trips
"""
# opens the outputevents and passes the xml file to get_legs_output
# augments the legs dataframe with estimates of the fuelcosts and fares for each leg
# extract a dataframe from the `outputEvents.xml` file
#all_events_df = extract_dataframe(str(events_path))
all_events_df = pd.read_csv(events_path)
legs_df, path_traversal_df = get_legs_output(all_events_df, trips_df)
path_traversal_df_new = calc_fuel_costs(path_traversal_df, fuel_costs)
path_traversal_df_new.to_csv(str(output_folder_path) + '/path_traversals_dataframe.csv')
print("path_traversals_dataframe.csv generated")
legs_df_new = calc_fuel_costs(legs_df, fuel_costs)
ride_hail_fares = {'base': 0.0, 'distance': 1.0, 'duration': 0.5}
legs_df_new_new = calc_fares(legs_df_new, ride_hail_fares, bus_fares_df, person_df, trip_to_route)
legs_df.to_csv(str(output_folder_path) + '/legs_dataframe.csv')
print("legs_dataframe.csv generated")
return legs_df_new_new
def output_parse(events_path, output_plans_path, persons_path, households_path, experienced_plans_path,
bus_fares_data_df, route_ids, trip_to_route, fuel_costs, output_folder_path):
persons_attributes_df = extract_person_dataframes(output_plans_path, persons_path, households_path, output_folder_path)
activities_df = extract_activities_dataframes(experienced_plans_path, output_folder_path)
trips_df = get_trips_output(experienced_plans_path)
bus_fares_df = parse_bus_fare_input(bus_fares_data_df, route_ids)
legs_df = extract_legs_dataframes(events_path, trips_df, persons_attributes_df, bus_fares_df, trip_to_route, fuel_costs, output_folder_path)
final_trips_df = merge_legs_trips(legs_df, trips_df)
final_trips_df.to_csv(str(output_folder_path) +'/trips_dataframe.csv')
print("trips_dataframe.csv generated")
``` |
{
"source": "jlazic/glog-sms-gateway",
"score": 2
} |
#### File: glog-sms-gateway/sms/views.py
```python
__author__ = '<EMAIL>'
import json
from django.http import HttpResponse, HttpResponseNotAllowed
from sms.models import StatusLog, Phone, Message
from sms.forms import RequestForm
from django.views.decorators.csrf import csrf_exempt
from django.shortcuts import get_object_or_404
from jsonview.decorators import json_view
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
from django.views.generic.edit import CreateView
from django.views.generic.detail import DetailView
from django.views.generic.base import RedirectView
from django.views.generic.list import ListView
from forms import MessageForm
from django.conf import settings
from django.core.exceptions import PermissionDenied
class Redirect(RedirectView):
url = '/sms/user/send/'
class Index(CreateView):
template_name = 'sms/user_send_message.html'
form_class = MessageForm
def get_context_data(self, **kwargs):
context = super(Index, self).get_context_data(**kwargs)
context['request'] = self.request
return context
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(Index, self).dispatch(*args, **kwargs)
class MessageDetail(DetailView):
model = Message
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(MessageDetail, self).dispatch(*args, **kwargs)
def get_context_data(self, **kwargs):
context = super(MessageDetail, self).get_context_data(**kwargs)
context['request'] = self.request
return context
class MessageList(ListView):
model = Message
def get_queryset(self):
return Message.objects.filter(user=self.request.user)
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(MessageList, self).dispatch(*args, **kwargs)
def get_context_data(self, **kwargs):
context = super(MessageList, self).get_context_data(**kwargs)
context['request'] = self.request
return context
@csrf_exempt
@json_view
def pool(request):
"""
Implementation of EnvayaSMS API http://sms.envaya.org/serverapi/
:param request:
:return:
"""
# We must allow HEAD verb in order for phones to being able to ping server
if request.method == 'HEAD':
return HttpResponse('OK')
# Deny any other verb except POST
if request.method != 'POST':
return HttpResponseNotAllowed('Only POST requests allowed here')
"""
First, save incoming request to database via RequestForm. We save all incoming requests without discrimination
based on wrong passwords, missing settings,... yeah, this could bite me in the ass later on.
"""
rf = RequestForm(request.POST)
r = rf.save()
"""
After we have saved request to database, check if we have this phone in database
If there is no such phone this will fail with error 403, and it will be written to log file
"""
try:
phone = Phone.objects.get(number=request.POST['phone_number'])
except Phone.DoesNotExist:
raise PermissionDenied('We have no phone with number {} configured'.format(request.POST['phone_number']))
"""
If we use authentication check phones password against X-Request-Signature sent by phone
"""
if settings.SMS_USE_AUTH:
url = request.build_absolute_uri()
request_signature = phone.calculate_signature(url, request.POST)
if request_signature != request.META['HTTP_X_REQUEST_SIGNATURE']:
raise PermissionDenied('You have an invalid password. What is your first dogs name?')
events = None
json_response = '{}' # Default empty JSON response
"""Sada idemo lijepo redom po svim mogucim opcijama, pocevsi sa outgoing, send_status, incoming,..."""
if request.POST['action'] == 'outgoing':
"""Additional parameters sent in POST requests with action=outgoing: (None)"""
# Sending max 5 messages at once, poor mans throttling
messages = Message.objects.filter(status='queued')[:5] # status=queued, Server queue
events = {'events': [{'event': 'send', 'messages': []}]} # Inicijaliziramo prazane evente
for message in messages:
# Grozno izgleda kako python barata sa nestanim listama i dictovima, ali sta da se radi.
events['events'][0]['messages'].append(
{'id': message.id, 'to': message.phone_number(), 'message': message.message}
)
message.status = 'sent'
message.save()
json_response = json.dumps(events)
# Server salje send_status za poruke koje imaju message.id
elif request.POST['action'] == 'send_status':
m = get_object_or_404(Message, pk=request.POST['id']) # Pronajdi poruku, ili vrati 404 gresku
s = StatusLog(status=request.POST['status'], error=request.POST['error'], log=request.POST['log'],
phone_number=request.POST['phone_number'], message=m)
s.save()
m.update_status(request.POST['status'])
return HttpResponse(json_response, content_type='application/json')
"""
DRF API related views
"""
from sms.models import Message
from rest_framework import viewsets, mixins
from sms.serializers import MessageSerializer
class MessageAPIList(mixins.CreateModelMixin, mixins.RetrieveModelMixin, mixins.ListModelMixin, viewsets.GenericViewSet):
"""
Messages list, filtered by current user
"""
serializer_class = MessageSerializer
def get_queryset(self):
"""
Filter only messages for currently authenticated user
"""
user = self.request.user
return Message.objects.filter(user=user)
def perform_create(self, serializer):
serializer.save(user=self.request.user)
``` |
{
"source": "j-lazo/image_processing",
"score": 3
} |
#### File: scripts/general_functions/annotate_ROI_dataset.py
```python
import argparse
import cv2
coords = []
drawing = False
drawing_lines = True
import os
import pandas as pd
def click_and_select_roi(event, x, y, flag, image):
"""
Callback function, called by OpenCV when the user interacts
with the window using the mouse. This function will be called
repeatedly as the user interacts.
(https://www.timpoulsen.com/2018/handling-mouse-events-in-opencv.html)
"""
# get access to a couple of global variables we'll need
global coords, drawing, drawing_lines, image_name, imgs_rois
if imgs_rois[image_name]:
cords_frame = imgs_rois[image_name]
clone = image.copy()
cv2.rectangle(clone, cords_frame[0], cords_frame[1], (0, 255, 0), 2)
cv2.imshow(image_name, clone)
if event == cv2.EVENT_RBUTTONDOWN:
imgs_rois[image_name] = []
cv2.imshow(image_name, image)
else:
clone = image.copy()
if event == cv2.EVENT_LBUTTONDOWN:
# user has clicked the mouse's left button
drawing = True
# save those starting coordinates
coords = [(x, y)]
elif event == cv2.EVENT_MOUSEMOVE and drawing is True:
# user is moving the mouse within the window
if drawing is True:
# if we're in drawing mode, we'll draw a green rectangle
# from the starting x,y coords to our current coords
cv2.rectangle(clone, coords[0], (x, y), (0, 255, 0), 2)
cv2.imshow(image_name, clone)
elif event == cv2.EVENT_MOUSEMOVE and drawing_lines is True:
clone = image.copy()
cv2.line(clone, (x, 0), (x, 511), (0, 255, 0), 2)
cv2.line(clone, (0, y), (511, y), (0, 255, 0), 2)
cv2.imshow(image_name, clone)
elif event == cv2.EVENT_LBUTTONUP:
# user has released the mouse button, leave drawing mode
# and crop the photo
drawing = False
# save our ending coordinates
coords.append((x, y))
cv2.rectangle(clone, coords[0], (x, y), (0, 255, 0), 2)
cv2.imshow(image_name, clone)
drawing_lines = False
imgs_rois[image_name] = coords
def UI_annotate_roi(dir_dataset, output_dir=''):
"""
Creates a dictionary of the ROI of the images in a dataset
Parameters
----------
dir_dataset : (str) Directory of the image dataset
Returns
-------
"""
global imgs_rois
list_imgs = [f for f in os.listdir(dir_dataset) if f.endswith('.png')]
imgs_rois = dict.fromkeys(list_imgs, [])
# wait for Esc or q key and then exit
i = 0
total_frames = len(list_imgs)
while True:
if i < 0:
i = 0
if i > total_frames - 1:
i = total_frames - 1
global image_name
image_name = list_imgs[i]
img_dir = dir_dataset + image_name
image = cv2.imread(img_dir)
# show the captured image in a window
cv2.namedWindow(image_name, cv2.WINDOW_NORMAL)
if imgs_rois[image_name]:
coords_frame = imgs_rois[image_name]
clone = image.copy()
cv2.rectangle(clone, coords_frame[0], coords_frame[1], (0, 255, 0), 2)
cv2.imshow(image_name, clone)
else:
cv2.imshow(image_name, image)
cv2.setMouseCallback(image_name, click_and_select_roi, image)
new_key = cv2.waitKey(0) & 0xFF
# wait for Esc or q key and then exit
# previous frame
if new_key == ord('a'):
cv2.destroyWindow(image_name)
i -= 1
# next frame
if new_key == ord('s'):
cv2.destroyWindow(image_name)
i += 1
if new_key == 27 or new_key == ord("k"):
print(imgs_rois)
pd.DataFrame.from_dict(imgs_rois)
#out_put_file_name = '.json'
#pd.to_json(out_put_file_name)
cv2.destroyAllWindows()
break
def main():
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--path_dataset", help="Path to the dataset")
args = vars(ap.parse_args())
dir_dataset = args["path_dataset"] if args["path_dataset"] else 0
# now let-s label the ROI of the dataset
UI_annotate_roi(dir_dataset)
if __name__ == "__main__":
main()
``` |
{
"source": "j-lazo/lumen_segmentation",
"score": 3
} |
#### File: lumen_segmentation/general/calculate_area_circunference.py
```python
from matplotlib import pyplot as plt
import numpy as np
import cv2
import os
def calculate_area_and_circunference(dir_folder):
mask_list = sorted(os.listdir(dir_folder))
list_areas = []
list_circuference = []
list_circularities = []
size_x = []
size_y = []
for mask in mask_list[:]:
name_mask = ''.join([dir_folder, mask])
arc_len, area = findArc(name_mask)
if area != 0:
circulatiry = 1.0*(arc_len**2)/(4*np.pi*area)
list_circularities.append(circulatiry)
list_areas.append(area)
list_circuference.append(arc_len)
#size_x.append(np.amax(list_x_pixels) - np.amin(list_x_pixels))
#size_y.append(np.amax(list_y_pixels) - np.amin(list_y_pixels))
return list_areas, list_circuference, list_circularities
def calculateDistance(x1, y1, X, Y):
dist_vector = []
for index, x2, in enumerate(X):
y2 = Y[index]
dist = np.sqrt((x2 - x1)**2 + (y2 - y1)**2)
dist_vector.append(dist)
return dist_vector
def findArc(image, th=200):
img = cv2.imread(image)
res = img.copy()
## convert to gray
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
## threshold the gray
th, threshed = cv2.threshold(gray, th, 255, cv2.THRESH_BINARY)
## Find contours on the binary threshed image
cnts = cv2.findContours(threshed,
cv2.RETR_LIST,
cv2.CHAIN_APPROX_SIMPLE)[-2]
## calcualte
for cnt in cnts:
arclen = cv2.arcLength(cnt, True)
area = cv2.contourArea(cnt)
cv2.drawContours(res, [cnt], -1, (0,255,0), 3, cv2.LINE_AA)
#print("Length: {:.3f}\nArea: {:.3f}".format(arclen, area))
cnt = cnts[0]
pnts_x = [point[0][0] for point in cnt]
pnts_y = [point[0][1] for point in cnt]
moments = cv2.moments(cnt)
print(np.shape(moments))
print(moments)
cx = int(moments['m10'] / moments['m00'])
cy = int(moments['m01'] / moments['m00'])
c1 = int(moments['m20'] / moments['m00'])
c2 = int(moments['m11'] / moments['m00'])
c3 = int(moments['m02'] / moments['m00'])
c4 = int(moments['m30'] / moments['m00'])
c5 = int(moments['m21'] / moments['m00'])
c6 = int(moments['m12'] / moments['m00'])
c7 = int(moments['m03'] / moments['m00'])
distances = calculateDistance(cx, cy, pnts_x, pnts_y)
print('cx:', cx, 'cy:', cy)
print('c1:', c1, 'c3:', c3)
print('c2:', c2, 'c4:', c4)
print('c5:', c5, 'c6:', c6)
print(1.0*(arclen**2)/(4*np.pi*area))
print(arclen, area)
print(np.min(distances), np.amax(distances))
fig, ax = plt.subplots()
ax.plot(cx, cy, 'ro')
ax.plot(c1, c3, 'go')
ax.plot(c5, c6, 'bo')
ax.add_artist(plt.Circle((cx, cy), np.min(distances), color='g', fill=False))
ax.add_artist(plt.Circle((cx, cy), np.max(distances), color='b', fill=False))
plt.imshow(img)
plt.show()
return arclen, area
def other():
#directory = '/home/nearlab/Jorge/current_work/lumen_segmentation/data/lumen_data/samples_masks/'
directory = '/home/nearlab/Jorge/current_work/lumen_segmentation/data/lumen_data/all_data/patient_cases/p_002_pt1/label/'
calculate_area_and_circunference(directory)
def main():
directory = '/home/nearlab/Jorge/DATASETS/lumen_ureteroscopy/p_001_pt1/label/'
size_x, size_y, circ_1 = calculate_area_and_circunference(directory)
plt.figure()
plt.plot(size_x, size_y, 'ro')
directory = '/home/nearlab/Jorge/DATASETS/lumen_ureteroscopy/p_001_pt2/label/'
size_x, size_y, circ_2 = calculate_area_and_circunference(directory)
plt.plot(size_x, size_y, 'ro', label='patient 1')
directory = '/home/nearlab/Jorge/DATASETS/lumen_ureteroscopy/p_006_pt1/label/'
size_x, size_y, circ_3 = calculate_area_and_circunference(directory)
plt.plot(size_x, size_y, 'g*', label='patient 6')
directory = '/home/nearlab/Jorge/DATASETS/lumen_ureteroscopy/p_003_pt1/label/'
size_x, size_y, circ_4 = calculate_area_and_circunference(directory)
plt.plot(size_x, size_y, 'bo')
directory = '/home/nearlab/Jorge/DATASETS/lumen_ureteroscopy/p_003_pt2/label/'
size_x, size_y, circ_5 = calculate_area_and_circunference(directory)
plt.plot(size_x, size_y, 'bo')
directory = '/home/nearlab/Jorge/DATASETS/lumen_ureteroscopy/p_003_pt3/label/'
size_x, size_y, circ_6 = calculate_area_and_circunference(directory)
plt.plot(size_x, size_y, 'bo', label='patient 3')
plt.legend(loc='best')
plt.xlabel('Contour Perimeter ')
plt.ylabel('Area')
data_patient_1 = circ_1 + circ_2
data_patient_2 = circ_3
data_patient_3 = circ_4 + circ_5 + circ_6
plt.figure()
directory = '/home/nearlab/Jorge/DATASETS/lumen_ureteroscopy/p_004_pt1/label/'
size_x, size_y, circ_7 = calculate_area_and_circunference(directory)
plt.plot(size_x, size_y, 'r*', label='patient 4')
directory = '/home/nearlab/Jorge/DATASETS/lumen_ureteroscopy/p_002_pt1/label/'
size_x, size_y, circ_8 = calculate_area_and_circunference(directory)
plt.plot(size_x, size_y, 'bo', label='patient 2')
directory = '/home/nearlab/Jorge/DATASETS/lumen_ureteroscopy/p_005_pt1/label/'
size_x, size_y, circ_9 = calculate_area_and_circunference(directory)
plt.plot(size_x, size_y, 'g*')
directory = '/home/nearlab/Jorge/DATASETS/lumen_ureteroscopy/p_005_pt2/label/'
size_x, size_y, circ_10 = calculate_area_and_circunference(directory)
plt.plot(size_x, size_y, 'g*', label='patient 5')
directory = '/home/nearlab/Jorge/DATASETS/lumen_ureteroscopy/p_007_pt1/label/'
size_x, size_y, circ_11 = calculate_area_and_circunference(directory)
plt.plot(size_x, size_y, 'yo')
directory = '/home/nearlab/Jorge/DATASETS/lumen_ureteroscopy/p_007_pt2/label/'
size_x, size_y, circ_12 = calculate_area_and_circunference(directory)
plt.plot(size_x, size_y, 'yo')
directory = '/home/nearlab/Jorge/DATASETS/lumen_ureteroscopy/p_007_pt3/label/'
size_x, size_y, circ_13 = calculate_area_and_circunference(directory)
plt.plot(size_x, size_y, 'yo')
directory = '/home/nearlab/Jorge/DATASETS/lumen_ureteroscopy/p_007_pt4/label/'
size_x, size_y, circ_14 = calculate_area_and_circunference(directory)
plt.plot(size_x, size_y, 'yo', label='patient 7')
plt.legend(loc='best')
plt.xlabel('Contour Perimeter ')
plt.ylabel('Area')
plt.figure(3)
fig, axs = plt.subplots(1, 3)
axs[0].boxplot(data_patient_1, 1, 'gD')
axs[0].set_title('patient 1')
axs[0].set_ylim(0.8, 8.9)
axs[1].boxplot(data_patient_2, 1, 'gD')
axs[1].set_title('patient 6')
axs[1].set_ylim(0.8, 8.9)
axs[2].boxplot(data_patient_3, 1, 'gD')
axs[2].set_title('patient 3')
axs[2].set_ylim(0.8, 8.9)
data_patient_7 = circ_11 + circ_12 + circ_13 + circ_14
plt.figure(4)
fig, axs = plt.subplots(1, 4)
axs[0].boxplot(circ_7, 1, 'gD')
axs[0].set_title('patient 4')
axs[0].set_ylim(0.8, 11)
axs[1].boxplot(circ_8, 1, 'gD')
axs[1].set_title('patient 1')
axs[1].set_ylim(0.8, 11)
axs[2].boxplot(circ_9 + circ_10, 1, 'gD')
axs[2].set_title('patient 5')
axs[2].set_ylim(0.8, 11)
axs[3].boxplot(data_patient_7, 1, 'gD')
axs[3].set_title('patient 7')
axs[3].set_ylim(0.8, 11)
plt.show()
if __name__ == '__main__':
#main()
other()
```
#### File: lumen_segmentation/general/calculate_p_values.py
```python
import scipy.stats as stats
import matplotlib.pyplot as plt
import csv
from scipy.stats import norm
from statsmodels.stats.contingency_tables import mcnemar
from scipy.stats import kruskal
def contingency_table(real_val, file_1, file_2):
# work on this, in this actually what you would need, for every dataset
# is the value of the pixels then you can build your table as:
# Classifier2 Correct, Classifier2 Incorrect
# Classifier1 Correct Yes/Yes Yes/No
#Classifier1 Incorrect No/Yes No/No
#check:
# https://machinelearningmastery.com/mcnemars-test-for-machine-learning/
return [[4, 2], [1, 3]]
csv_path_1 = '/home/nearlab/Jorge/current_work/' \
'lumen_segmentation/data/lumen_data/' \
'results/compare_3Dvs2D/' \
'results_evaluation_test_02_ResUnet_lr_0.001_bs_8_grayscale_03_11_2020_20_08_.csv'
csv_path_2 = '/home/nearlab/Jorge/current_work/' \
'lumen_segmentation/data/lumen_data/' \
'results/compare_3Dvs2D/' \
'results_evaluation_test_02_3D_ResUnet_lr_0.0001_bs_8_rgb_29_11_2020_20_15_new.csv'
csv_path_3= '/home/nearlab/Jorge/current_work/' \
'lumen_segmentation/data/lumen_data/' \
'results/compare_3Dvs2D/' \
'results_evaluation_test_02_3D_ResUnet_lr_0.0001_bs_16_grayscale_16_11_2020_20_17_.csv'
csv_path_4 = '/home/nearlab/Jorge/current_work/' \
'lumen_segmentation/data/lumen_data/' \
'results/compare_3Dvs2D/' \
'results_evaluation_test_02_3DMaskRCNN_2_.csv'
csv_path_5= '/home/nearlab/Jorge/current_work/' \
'lumen_segmentation/data/lumen_data/' \
'results/compare_3Dvs2D/' \
'results_evaluation_test_02_ResUnet_lr_0.001_bs_8_grayscale_03_11_2020_20_08_.csv'
csv_path_6 = '/home/nearlab/Jorge/current_work/' \
'lumen_segmentation/data/lumen_data/' \
'results/compare_3Dvs2D/' \
'results_evaluation_test_02_ensemble_all_data_average.csv'
def read_results_csv(file_path, row_id=0):
dice_values = []
with open(file_path, 'r') as file:
reader = csv.reader(file)
for row in reader:
dice_values.append(float(row[row_id]))
return dice_values
def read_results_csv_str(file_path, row_id=0):
dice_values = []
with open(file_path, 'r') as file:
reader = csv.reader(file)
for row in reader:
dice_values.append(row[row_id])
return dice_values
pre_image_list = read_results_csv_str(csv_path_1, 1)
parameter_data_1 = read_results_csv(csv_path_4, 2)
parameter_data_2 = read_results_csv(csv_path_6, 2)
parameter_data_3 = read_results_csv(csv_path_3, 2)
parameter_data_4 = read_results_csv(csv_path_4, 2)
parameter_data_5 = read_results_csv(csv_path_5, 2)
parameter_data_6 = read_results_csv(csv_path_6, 2)
#maharashtra_ages=np.concatenate((maharashtra_ages1,maharashtra_ages2))
# Paired T-Test
result = stats.ttest_ind(a=parameter_data_1,
b=parameter_data_2,
equal_var=False)
print('T-test result')
print(result)
# compare samples
stat, p = kruskal(parameter_data_1,
parameter_data_2)
print('Statistics=%.9f, p=%.9f' % (stat, p))
# interpret
print('otra vez')
print(stat, p)
alpha = 0.05
if p > alpha:
print('Same distributions (fail to reject H0)')
else:
print('Different distributions (reject H0)')
"""plt.figure()
plt.subplot(121)
plt.hist(parameter_data_1, bins = 10)
plt.subplot(122)
plt.hist(parameter_data_2, bins = 10)
plt.show()"""
```
#### File: lumen_segmentation/general/erode_and_dilate.py
```python
import os
import numpy as np
import cv2
def dilate(folder, output_folder, kernel_size=3):
kernel = np.ones((kernel_size, kernel_size), np.uint8)
img_list = sorted(os.listdir(folder))
if not(os.path.isdir(output_folder)):
os.mkdir(output_folder)
for j, image in enumerate(img_list[:]):
print(j, image)
img = cv2.imread(os.path.join(folder, image), 1)
dilation = cv2.dilate(img, kernel, iterations=1)
new_name = ''.join([output_folder, image])
cv2.imwrite(new_name, dilation)
def erode(folder, output_folder, kernel_size=3):
kernel = np.ones((kernel_size, kernel_size), np.uint8)
img_list = sorted(os.listdir(folder))
if not(os.path.isdir(output_folder)):
os.mkdir(output_folder)
for j, image in enumerate(img_list[:]):
print(j, image)
img = cv2.imread(os.path.join(folder, image), 1)
erosion = cv2.erode(img, kernel, iterations=1)
new_name = ''.join([output_folder, image])
cv2.imwrite(new_name, erosion)
def main():
path_directory = '/home/nearlab/Jorge/current_work/' \
'lumen_segmentation/data/lumen_data/' \
'test/test_02/label/'
ouput_folder = '/home/nearlab/Jorge/current_work/' \
'lumen_segmentation/data/' \
'lumen_data/test/test_02/label_dilate_3/'
dilate(path_directory, ouput_folder, kernel_size=3)
if __name__ == "__main__":
main()
```
#### File: lumen_segmentation/general/gather_all_data.py
```python
import os
import random
import shutil
def gather_all_data(source_folder, destination_folder, exceptions):
"""
This function gathers all data from different folders and put it all together in a single folder called "all"
:param source_folders:
:param destination_folder:
:param exceptions:
:return:
"""
folder_list = set(os.listdir(source_folder)) - set(exceptions)
folder_list = sorted([element for element in folder_list if
os.path.isdir(''.join([source_folder, element]))])
for folder in folder_list[:]:
print(folder)
files_path_images = "".join([source_folder, folder, '/image/'])
files_path_labels = "".join([source_folder, folder, '/label/'])
images_list = os.listdir(files_path_images)
labels_list = os.listdir(files_path_labels)
#image_subfolder = sorted([element for element in images_list if os.path.isdir(''.join([source_folder, files_path_images]))])
labels_subfolder = sorted([element for element in labels_list if
os.path.isdir(''.join([source_folder, files_path_labels]))])
if not(labels_subfolder):
destination_image_folder = "".join([destination_folder, 'image/'])
destination_label_folder = "".join([destination_folder, 'label/'])
if not (os.path.isdir(destination_image_folder)):
os.mkdir(destination_image_folder)
if not (os.path.isdir(destination_label_folder)):
os.mkdir(destination_label_folder)
for counter, image in enumerate(images_list[:]):
shutil.copy(files_path_images + image, destination_image_folder + image)
shutil.copy(files_path_labels + image[:-4] + '.png', destination_label_folder + image[:-4] + '.png')
else:
for sub_folder in labels_subfolder:
#2Do complete this option and the funciotn copy_images_and_label
copy_images_and_label(source_folder, destination_folder, sub_folder)
def copy_images_and_label(source_folder, destination_folder, folder=''):
"""
Copy tuples of images and labels in 1 step
:param original_folder:
:param destination_folder:
:return:
"""
source_folder = "".join([source_folder, '/', folder, '/'])
destination_folder = "".join([destination_folder, '/', folder, '/'])
files_path_images = "".join([source_folder, '/image/'])
files_path_labels = "".join([source_folder, '/label/'])
images_list = os.listdir(files_path_images)
labels_list = os.listdir(files_path_labels)
destination_image_folder = "".join([destination_folder, 'image/'])
destination_label_folder = "".join([destination_folder, 'label/'])
if not (os.path.isdir(destination_image_folder)):
os.mkdir(destination_image_folder)
if not (os.path.isdir(destination_label_folder)):
os.mkdir(destination_label_folder)
for counter, image in enumerate(images_list):
shutil.copy(files_path_images + image, destination_image_folder + image)
shutil.copy(files_path_labels + image, destination_label_folder + image)
return 0
def main():
source_folders = '/home/nearlab/Jorge/current_work/lumen_segmentation/data/3x3_rgb_dataset/all/patients_cases/'
destination_folder = '/home/nearlab/Jorge/current_work/lumen_segmentation/data/3x3_rgb_dataset/all/all/'
exceptions = ['all']
gather_all_data(source_folders, destination_folder, exceptions)
if __name__ == '__main__':
main()
```
#### File: lumen_segmentation/general/general_comparison.py
```python
import sys
import os
import numpy as np
import cv2
from glob import glob
from sklearn.model_selection import train_test_split
from sklearn.metrics import average_precision_score
from sklearn.metrics import recall_score
import os.path
from os import path
from PIL import Image
from os import listdir
from os.path import isfile, join
from datetime import datetime
import csv
import matplotlib.pyplot as plt
def get_mcc(groundtruth_list, predicted_list):
"""Return mcc covering edge cases"""
tn, fp, fn, tp = get_confusion_matrix_elements(groundtruth_list, predicted_list)
if _all_class_0_predicted_as_class_0(groundtruth_list, predicted_list) is True:
mcc = 1
elif _all_class_1_predicted_as_class_1(groundtruth_list, predicted_list) is True:
mcc = 1
elif _all_class_1_predicted_as_class_0(groundtruth_list, predicted_list) is True:
mcc = -1
elif _all_class_0_predicted_as_class_1(groundtruth_list, predicted_list) is True :
mcc = -1
elif _mcc_denominator_zero(tn, fp, fn, tp) is True:
mcc = -1
# Finally calculate MCC
else:
mcc = ((tp * tn) - (fp * fn)) / (
np.sqrt((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn)))
return mcc
def get_confusion_matrix_intersection_mats(groundtruth, predicted):
""" Returns dict of 4 boolean numpy arrays with True at TP, FP, FN, TN
"""
confusion_matrix_arrs = {}
groundtruth_inverse = np.logical_not(groundtruth)
predicted_inverse = np.logical_not(predicted)
confusion_matrix_arrs['tp'] = np.logical_and(groundtruth, predicted)
confusion_matrix_arrs['tn'] = np.logical_and(groundtruth_inverse, predicted_inverse)
confusion_matrix_arrs['fp'] = np.logical_and(groundtruth_inverse, predicted)
confusion_matrix_arrs['fn'] = np.logical_and(groundtruth, predicted_inverse)
return confusion_matrix_arrs
def get_confusion_matrix_overlaid_mask(image, groundtruth, predicted, alpha, colors):
"""
Returns overlay the 'image' with a color mask where TP, FP, FN, TN are
each a color given by the 'colors' dictionary
"""
#image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)
masks = get_confusion_matrix_intersection_mats(groundtruth, predicted)
color_mask = np.zeros_like(image, dtype=np.float32)
for label, mask in masks.items():
color = colors[label]
mask_rgb = np.zeros_like(image, dtype=np.float32)
#mask_rgb = mask_rgb.astype(int)
size_x, size_y, channels = np.shape(mask)
plt.figure()
plt.title(label)
plt.imshow(mask.astype(np.float32))
for x_index in range(size_x):
for y_index in range(size_y):
if mask[x_index, y_index, 0] != 0: #and mask[x_index, y_index, 1] == 0 and mask[x_index, y_index, 2] == 0:
mask_rgb[x_index, y_index, :] = color
#print(mask_rgb[x_index, y_index, :])
color_mask += mask_rgb
plt.close()
"""for label, mask in masks.items():
color = colors[label]
mask_rgb = np.zeros_like(image)
mask_rgb[mask != 0] = color
color_mask += mask_rgb
return cv2.addWeighted(image, alpha, color_mask, 1 - alpha, 0)"""
return color_mask.astype(np.float32)#cv2.addWeighted(image, 0.1, color_mask, 0.5, 0)
def calculae_rates(image_1, image_2):
image_1 = np.asarray(image_1).astype(np.bool)
image_2 = np.asarray(image_2).astype(np.bool)
image_1 = image_1.flatten()
image_2 = image_2.flatten()
if image_1.shape != image_2.shape:
raise ValueError("Shape mismatch: im1 and im2 must have the same shape.")
precision_value = average_precision_score(image_1, image_2)
recall_value = recall_score(image_1, image_2)
print(precision_value, recall_value)
return precision_value, recall_value
def dice(im1, im2):
im1 = np.asarray(im1).astype(np.bool)
im2 = np.asarray(im2).astype(np.bool)
if im1.shape != im2.shape:
raise ValueError("Shape mismatch: im1 and im2 must have the same shape.")
# Compute Dice coefficient
intersection = np.logical_and(im1, im2)
return 2. * intersection.sum() / (im1.sum() + im2.sum())
def read_img(dir_image):
original_img = cv2.imread(dir_image)
height, width, depth = original_img.shape
img = cv2.resize(original_img, (256, 256))
img = (img > 0.9) * 1.0
return img
# save the resutls of the validation dataset in a CSV file
def read_results_csv(file_path, row_id=0):
dice_values = []
with open(file_path, 'r') as file:
reader = csv.reader(file)
for row in reader:
dice_values.append(float(row[row_id]))
return dice_values
def read_img_results(dir_image):
#original_img = cv2.imread(dir_image, cv2.COLOR_BGRA2RGBA)
#original_img = cv2.imread(dir_image, cv2.COLOR_BGR2RGB)
original_img = cv2.imread(dir_image)
new_im = cv2.cvtColor(original_img, cv2.COLOR_BGR2RGB)
if original_img is None:
print('Could not open or find the image:', dir_image)
exit(0)
img = cv2.resize(new_im, (256, 256))
return img
def read_mask(dir_image):
original_img = cv2.imread(dir_image)
if original_img is None:
print('Could not open or find the image:', args.input)
exit(0)
height, width, depth = original_img.shape
img = cv2.resize(original_img, (256, 256))
img = img / 255
img = (img > 0.9) * 1.0
return img
def read_results_csv_plot(file_path):
dice_values = []
with open(file_path, 'r') as file:
reader = csv.reader(file)
for row in reader:
dice_values.append([row[1], row[2]])
return dice_values
def print_box_plots(name_test_csv_file, name_validation_csv_file, save_directory):
path_file_1= name_test_csv_file
path_file_2 = name_validation_csv_file
list_dice_values_file_1 = read_results_csv(path_file_1, 2)
list_dice_values_file_2 = read_results_csv(path_file_2, 2)
data_dice = [list_dice_values_file_1, list_dice_values_file_2]
list_precision_values_file_1 = read_results_csv(path_file_1, 3)
list_precision_values_file_2 = read_results_csv(path_file_2, 3)
data_precision_values = [list_precision_values_file_1, list_precision_values_file_2]
list_recall_file_1 = read_results_csv(path_file_1, 4)
list_recall_file_2 = read_results_csv(path_file_2, 4)
data_recall = [list_recall_file_1, list_recall_file_2]
fig1 = plt.figure(1)
ax1 = fig1.add_subplot(131)
ax1.boxplot(data_dice[0], 1, 'gD')
ax2 = fig1.add_subplot(132)
ax2.boxplot(data_precision_values[0], 1, 'gD')
ax3 = fig1.add_subplot(133)
ax3.boxplot(data_recall[0], 1, 'gD')
ax1.title.set_text('Dice Coeff')
ax2.title.set_text('Precision')
ax3.title.set_text('Recall')
ax1.set_ylim(0, 1)
ax2.set_ylim(0, 1)
ax3.set_ylim(0, 1)
plt.savefig(save_directory + 'results_test.svg')
plt.close()
fig2 = plt.figure(2)
ax1 = fig2.add_subplot(131)
ax1.boxplot(data_dice[1], 1, 'gD')
ax2 = fig2.add_subplot(132)
ax2.boxplot(data_precision_values[1], 1, 'gD')
ax3 = fig2.add_subplot(133)
ax3.boxplot(data_recall[1], 1, 'gD')
ax1.title.set_text('Dice Coeff')
ax2.title.set_text('Precision')
ax3.title.set_text('Recall')
ax1.set_ylim(0, 1)
ax2.set_ylim(0, 1)
ax3.set_ylim(0, 1)
plt.savefig(save_directory + 'results_val.svg')
plt.close()
def compare_results(dir_groundtruth, directories_prediction_1,
dir_csv_file_1,
directories_prediction_2,
dir_csv_file_2,
directories_prediction_3,
dir_csv_file_3,
directories_prediction_4,
dir_csv_file_4,
directories_prediction_5,
dir_csv_file_5,
save_directory):
alpha=0.5
confusion_matrix_colors = {
'tp': (50, 100, 0), # cyan
'fp': (125, 0, 125), # magenta
'fn': (0, 100, 50), # blue
'tn': (0, 0, 0) # black
}
path_images_folder = dir_groundtruth + 'image/rgb/'
path_masks_folder = dir_groundtruth + 'label/'
list_dice_values_1 = read_results_csv_plot(dir_csv_file_1)
list_dice_values_2 = read_results_csv_plot(dir_csv_file_2)
list_dice_values_3 = read_results_csv_plot(dir_csv_file_3)
list_dice_values_4 = read_results_csv_plot(dir_csv_file_4)
list_dice_values_5 = read_results_csv_plot(dir_csv_file_5)
image_list = [f for f in listdir(path_images_folder) if isfile(join(path_images_folder, f))]
mask_list = sorted([f for f in listdir(path_masks_folder) if isfile(join(path_masks_folder, f))])
predicted_masks_1 = [f for f in listdir(directories_prediction_1) if isfile(join(directories_prediction_1, f))]
predicted_masks_2 = [f for f in listdir(directories_prediction_2) if isfile(join(directories_prediction_2, f))]
predicted_masks_3 = [f for f in listdir(directories_prediction_3) if isfile(join(directories_prediction_3, f))]
predicted_masks_4 = [f for f in listdir(directories_prediction_4) if isfile(join(directories_prediction_4, f))]
predicted_masks_5 = [f for f in listdir(directories_prediction_5) if isfile(join(directories_prediction_5, f))]
for image in predicted_masks_1[:]:
result_image = [name for name in mask_list if (image[:] == name[:])][0]
if result_image is not None:
path_image = ''.join([path_images_folder, image])
path_mask = ''.join([path_masks_folder, image])
path_predicted_1 = ''.join([directories_prediction_1, image])
path_predicted_2 = ''.join([directories_prediction_2, image])
path_predicted_3 = ''.join([directories_prediction_3, image])
path_predicted_4 = ''.join([directories_prediction_4, image])
path_predicted_5 = ''.join([directories_prediction_5, image])
image_frame = read_img_results(path_image)
mask_image = read_mask(path_mask)
for counter, element in enumerate(list_dice_values_1):
if image == element[0]:
dice_value_1 = float(element[1])
predicted_mask_1 = read_mask(path_predicted_1)
dice_value_1 = float("{:.3f}".format(dice_value_1))
overlay_1 = get_confusion_matrix_overlaid_mask(image_frame, mask_image, predicted_mask_1, alpha,
confusion_matrix_colors)
for counter, element in enumerate(list_dice_values_2):
if image == element[0]:
dice_value_2 = float(element[1])
predicted_mask_2 = read_mask(path_predicted_2)
dice_value_2 = float("{:.3f}".format(dice_value_2))
overlay_2 = get_confusion_matrix_overlaid_mask(image_frame, mask_image, predicted_mask_2, alpha,
confusion_matrix_colors)
for counter, element in enumerate(list_dice_values_3):
if image == element[0]:
dice_value_3 = float(element[1])
predicted_mask_3 = read_mask(path_predicted_3)
dice_value_3 = float("{:.3f}".format(dice_value_3))
overlay_3 = get_confusion_matrix_overlaid_mask(image_frame, mask_image, predicted_mask_3, alpha,
confusion_matrix_colors)
for counter, element in enumerate(list_dice_values_4):
if image == element[0]:
dice_value_4 = float(element[1])
predicted_mask_4 = read_mask(path_predicted_4)
dice_value_4 = float("{:.3f}".format(dice_value_4))
overlay_4 = get_confusion_matrix_overlaid_mask(image_frame, mask_image, predicted_mask_4, alpha,
confusion_matrix_colors)
for counter, element in enumerate(list_dice_values_5):
if image == element[0]:
dice_value_5 = float(element[1])
predicted_mask_5 = read_mask(path_predicted_5)
dice_value_5 = float("{:.3f}".format(dice_value_5))
overlay_5 = get_confusion_matrix_overlaid_mask(image_frame, mask_image,
predicted_mask_5, alpha,
confusion_matrix_colors)
my_dpi = 96
plt.figure(3, figsize=(640/my_dpi, 480/my_dpi), dpi=my_dpi)
print('img:', image[12:-4])
plt.subplot(171)
#plt.title('img:', image[12:-4])
#plt.title('Frame')
plt.imshow(image_frame)
plt.axis('off')
plt.subplot(172)
#plt.title('Mask')
plt.imshow(mask_image)
plt.axis('off')
plt.subplot(173)
title_1 = 'DSC: ' + str(dice_value_1)
#title_1 = str(dice_value_1)
plt.title(title_1)
plt.imshow(overlay_1)
plt.axis('off')
plt.subplot(174)
title_2 = str(dice_value_2)
plt.title(title_2)
plt.imshow(overlay_2)
plt.axis('off')
plt.subplot(175)
title_3 = str(dice_value_3)
plt.title(title_3)
plt.imshow(overlay_3)
plt.axis('off')
plt.subplot(176)
title_3 = str(dice_value_4)
plt.title(title_3)
plt.imshow(overlay_4)
plt.axis('off')
plt.subplot(177)
title_3 = str(dice_value_5)
plt.title(title_3)
plt.imshow(overlay_5)
plt.axis('off')
plt.savefig(''.join([save_directory, image]))
plt.close()
""" for image in predicted_masks_2[:]:
result_image = [name for name in mask_list if (image[:] == name[:])][0]
if result_image is not None:
path_image = ''.join([path_images_folder, image])
path_mask = ''.join([path_masks_folder, image])
path_predicted_2 = ''.join([directories_prediction_2, image])
for counter, element in enumerate(list_dice_values_2):
if image == element[0]:
dice_value_2 = float(element[1])
predicted_mask_2 = read_mask(path_predicted_2)
dice_value_2 = float("{:.3f}".format(dice_value_2))
for image in predicted_masks_3[:]:
result_image = [name for name in mask_list if (image[:] == name[:])][0]
if result_image is not None:
path_predicted_3 = ''.join([directories_prediction_3, image])
for counter, element in enumerate(list_dice_values_3):
if image == element[0]:
dice_value_3 = float(element[1])
predicted_mask_3 = read_mask(path_predicted_3)
dice_value_3 = float("{:.3f}".format(dice_value_3))
my_dpi = 96
plt.figure(3, figsize=(640/my_dpi, 480/my_dpi), dpi=my_dpi)
plt.subplot(151)
plt.title(image)
plt.imshow(image_frame)
plt.axis('off')
plt.subplot(152)
plt.title('Mask')
plt.imshow(mask_image)
plt.axis('off')
plt.subplot(153)
title_1 = 'DSC: ' + str(dice_value_1)
plt.title(title_1)
plt.imshow(predicted_mask_1)
plt.axis('off')
plt.subplot(154)
title_2 = 'DSC: ' + str(dice_value_2)
plt.title(title_2)
plt.imshow(predicted_mask_2)
plt.axis('off')
plt.subplot(155)
title_3 = 'DSC: ' + str(dice_value_3)
plt.title(title_3)
plt.imshow(predicted_mask_3)
plt.axis('off')
plt.savefig(''.join([save_directory, image, '_',str(counter),'_.png']))
plt.close()"""
def crop_images(image_directory, roi, string_to_add):
image_list = [f for f in listdir(image_directory) if isfile(join(image_directory, f))]
for image in image_list:
print('resizing', image)
path_image = ''.join([image_directory, image])
original_img = cv2.imread(path_image)
croped_img = original_img[roi[1]:roi[3], roi[0]:roi[2]]
new_name = ''.join([image_directory, string_to_add, image])
cv2.imwrite(new_name, croped_img)
def main():
"""
test_dataset = 'test_03'
project_folder = '/home/nearlab/Jorge/current_work/' \
'lumen_segmentation/data/lumen_data/'
test_directory = ''.join([project_folder, 'test/', test_dataset, '/'])
predictions_test_directory_1 = '/home/nearlab/Jorge/current_work/lumen_segmentation/data/' \
'old_lumen_data/results/' \
'ResUnet_lr_0.001_bs_16_rgb_05_11_2020_16_32/' \
'predictions/' + test_dataset + '/'
name_test_csv_file_1 = '/home/nearlab/Jorge/current_work/lumen_segmentation/data/' \
'old_lumen_data/results/' \
'ResUnet_lr_0.001_bs_16_rgb_05_11_2020_16_32/' \
'results_evaluation_' + test_dataset + '_ResUnet_lr_0.001_bs_16_rgb_05_11_2020_16_32_.csv'
predictions_test_directory_2 = '/home/nearlab/Jorge/current_work/lumen_segmentation/data/' \
'old_lumen_data/results/' \
'MaskRCNN_threshold_0.8_grayscale/' \
'predictions/' + test_dataset + '/'
name_test_csv_file_2 = '/home/nearlab/Jorge/current_work/lumen_segmentation/data/' \
'old_lumen_data/results/' \
'MaskRCNN_threshold_0.8_rgb/' \
'results_evaluation_' + test_dataset + '_MaskRCNN_thershold_0.8_rgb_.csv'
save_directory = '/home/nearlab/Jorge/current_work/lumen_segmentation/data/' \
'old_lumen_data/results/' \
'comparison_MaskRCNN_vs_ResUnet_color/' + test_dataset + '/'"""
"""test_directory = project_folder + 'test_03/'
predictions_test_directory_1 = project_folder + 'predictions/new_data/'
predictions_test_directory_2 = project_folder + 'predictions/old_data/'
name_test_csv_file_1 = project_folder + 'predictions/results_new_data.csv'
name_test_csv_file_2 = project_folder + 'predictions/results_old_data.csv'
save_directory = project_folder + 'results_comparison/'"""
test_directory = '/home/nearlab/Jorge/current_work/lumen_segmentation/data/lumen_data/test/test_02/'
predictions_test_directory_1 = '/home/nearlab/Jorge/current_work/lumen_segmentation/data/lumen_data/results/compare_3Dvs2D/ResUnet_lr_0.001_bs_8_grayscale_03_11_2020_20_08/'
predictions_test_directory_2 = '/home/nearlab/Jorge/current_work/lumen_segmentation/data/lumen_data/results/compare_3Dvs2D/MaskRCNN_thershold_0.8_grayscale_/'
predictions_test_directory_3 = '/home/nearlab/Jorge/current_work/lumen_segmentation/data/lumen_data/results/compare_3Dvs2D/3D_ResUnet_lr_0.001_bs_16_grayscale_16_11_2020_19_37/'
predictions_test_directory_4 = '/home/nearlab/Jorge/current_work/lumen_segmentation/data/lumen_data/results/compare_3Dvs2D/3DMaskRCNN_2/'
predictions_test_directory_5 = '/home/nearlab/Jorge/current_work/lumen_segmentation/data/lumen_data/results/compare_3Dvs2D/ensemble_all_data/average/'
name_test_csv_file_1 = '/home/nearlab/Jorge/current_work/lumen_segmentation/data/lumen_data/results/compare_3Dvs2D/results_evaluation_test_02_ResUnet_lr_0.001_bs_8_grayscale_03_11_2020_20_08_.csv'
name_test_csv_file_2 = '/home/nearlab/Jorge/current_work/lumen_segmentation/data/lumen_data/results/compare_3Dvs2D/results_evaluation_test_02_MaskRCNN_thershold_0.8_grayscale_.csv'
name_test_csv_file_3 = '/home/nearlab/Jorge/current_work/lumen_segmentation/data/lumen_data/results/compare_3Dvs2D/results_evaluation_test_02_3D_ResUnet_lr_0.001_bs_16_grayscale_16_11_2020_19_37_.csv'
name_test_csv_file_4 = '/home/nearlab/Jorge/current_work/lumen_segmentation/data/lumen_data/results/compare_3Dvs2D/results_evaluation_test_02_3DMaskRCNN_2_.csv'
name_test_csv_file_5 = '/home/nearlab/Jorge/current_work/lumen_segmentation/data/lumen_data/results/compare_3Dvs2D/results_evaluation_test_02_ensemble_all_data_average.csv'
save_directory = '/home/nearlab/Jorge/current_work/lumen_segmentation/data/lumen_data/results/compare_3Dvs2D/comparisons/'
compare_results(test_directory,
predictions_test_directory_1, name_test_csv_file_1,
predictions_test_directory_2, name_test_csv_file_2,
predictions_test_directory_3, name_test_csv_file_3,
predictions_test_directory_4, name_test_csv_file_4,
predictions_test_directory_5, name_test_csv_file_5,
save_directory)
roi = [76, 160, 580, 300]
crop_images(save_directory, roi, 'test_')
#crop_images(save_directory_val, roi,'val_')
if __name__ == "__main__":
main()
```
#### File: lumen_segmentation/general/generate_training_and_validation.py
```python
import os
import random
import shutil
def generate_training_and_validation_sets(current_directory, output_directory, training_percentage=0.5):
"""
:param current_directory:
:param output_directory:
:param training_percentage:
:return:
"""
files_path_images = "".join([current_directory, 'image/'])
files_path_labels = "".join([current_directory, 'label/'])
original_images = os.listdir(files_path_images)
label_images = os.listdir(files_path_labels)
training_dir = output_directory + 'train/'
validation_dir = output_directory + 'val/'
original_images = [image[:-4] for image in original_images]
label_images = [image[:-4] for image in label_images]
for count_i, image in enumerate(original_images):
if random.random() <= training_percentage:
if image in label_images:
print(image, 'image and label exists')
shutil.copy(files_path_images + image + '.png', "".join([training_dir, 'image/', image, '.png']))
shutil.copy(files_path_labels + image + '.png', "".join([training_dir, 'label/', image, '.png']))
else:
print(image, 'the pair does not exists')
else:
if image in label_images:
print(image, 'image and label exists')
shutil.copy(files_path_images + image + '.png', "".join([validation_dir, 'image/', image, '.png']))
shutil.copy(files_path_labels + image + '.png', "".join([validation_dir, 'label/', image, '.png']))
else:
print(image, 'the pair does not exists')
def main():
data_directory = '/home/nearlab/Jorge/current_work/lumen_segmentation/data/' \
'lumen_data/test/phantom_001_pt1/original_data/'
output_directory = '/home/nearlab/Jorge/current_work/lumen_segmentation/data/lumen_data/'
generate_training_and_validation_sets(data_directory,
output_directory,
training_percentage=0.65)
if __name__ == '__main__':
main()
```
#### File: lumen_segmentation/general/list_artifacts.py
```python
import csv
import cv2
import numpy as np
import matplotlib.patches as patches
from matplotlib import pyplot as plt
import os
def read_file_txt(file_path, row_id=0):
color = []
point_1 = []
point_2 = []
point_3 = []
point_4 = []
with open(file_path, 'r') as file:
reader = csv.reader(file)
for row in reader:
line = row[0].split()
color.append(float(line[0]))
point_1.append(float(line[1]))
point_2.append(float(line[2]))
point_3.append(float(line[3]))
point_4.append(float(line[4]))
return color, point_1, point_2, point_3, point_4
def build_rectangle(center_x, center_y, width, height):
x_min = center_x - int(width/2)
x_max = center_x + int(width / 2)
y_min = center_y - int(height / 2)
y_max = center_y + int(height / 2)
points_x = [x_min, x_max, x_max, x_min, x_min]
points_y = [y_min, y_min, y_max, y_max, y_min]
return points_x, points_y
def remove_duplicates(duplicate):
final_list = []
for num in duplicate:
if num not in final_list:
final_list.append(num)
print(final_list)
return final_list
def prepare_data(dir_images, dir_csv_files, plot=False):
name_test_csv_file = 'name_file.csv'
list_csv_files = os.listdir(dir_csv_files)
list_csv_files = sorted([file[:-4] for file in list_csv_files if file.endswith('.txt')])
list_imgs =sorted([file for file in os.listdir(dir_images) if file.endswith('.png')])
unique_colors = []
unique_imgs = []
for image in list_imgs:
print(image)
if image[:-4] in list_csv_files:
img = cv2.imread(dir_images + image)
w, h, d = np.shape(img)
colours, pnts_xmax, pnts_ymax, pnts_xmin, pnts_ymin = read_file_txt(dir_csv_files + image[:-4] + '.txt')
unique_imgs.append(image)
unique_colors.append(remove_duplicates(colours))
pnts_ymax = [int(point*w) for point in pnts_ymax]
pnts_xmax = [int(point*h) for point in pnts_xmax]
pnts_ymin = [int(point*w) for point in pnts_ymin]
pnts_xmin = [int(point*h) for point in pnts_xmin]
with open(name_test_csv_file, mode='w') as results_file:
results_file_writer = csv.writer(results_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
results_file_writer.writerow(['num', 'name',
'specularity', 'saturation',
'artifact', 'blur', 'bubbles'])
for i, file in enumerate(unique_imgs):
if 0.0 in unique_colors[i]:
specularity = 1
else:
specularity = 0
if 1.0 in unique_colors[i]:
saturation = 1
else:
saturation = 0
if 2.0 in unique_colors[i]:
artifact = 1
else:
artifact = 0
if 3.0 in unique_colors[i]:
blur = 1
else:
blur = 0
if 5.0 in unique_colors[i]:
bubbles = 1
else:
bubbles = 0
results_file_writer.writerow([str(i), file,
specularity,
saturation,
artifact,
blur,
bubbles])
if plot is True:
plt.figure()
plt.imshow(img)
for j, color in enumerate(colours):
print(color)
if color == 0.0:
# specularity
col = 'red'
elif color == 1.0:
# saturation
col = 'green'
elif color == 2.0:
# artifact
col = 'purple'
elif color == 3.0:
# blur
col = 'pink'
elif color == 4.0:
# contrast
col = 'yellow'
elif color == 5.0:
# bubles
col = 'orange'
elif color == 6.0:
# insturment
col = 'white'
else:
# blood
col = 'black'
pnts_x, pnts_y = build_rectangle(pnts_xmax[j],
pnts_ymax[j],
pnts_xmin[j],
pnts_ymin[j])
plt.plot(pnts_xmax[j], pnts_ymax[j], '*')
plt.plot(pnts_x, pnts_y, color=col)
plt.show()
#print(list_csv_files)
#print(list_imgs)
def main():
dir_csv_files = '/home/nearlab/Jorge/data/EAD_2019/trainingData_detection/trainingData_detection/'
dir_images = '/home/nearlab/Jorge/DATASETS/EAD_2019/image/'
prepare_data(dir_images, dir_csv_files)
if __name__ == '__main__':
main()
```
#### File: lumen_segmentation/general/load_npy.py
```python
import os
import random
import cv2
from tqdm import tqdm, trange
import numpy as np
from matplotlib import pyplot as plt
from os import listdir
def load_npy_data(X_test_path, Y_test_path):
print('-' * 30)
print('load test images...')
print('-' * 30)
test_dir = listdir(X_test_path)
test_dir.sort()
test_mask_dir = listdir(Y_test_path)
test_mask_dir.sort()
img_size = 256
volume_size = 3
imgs_test = np.empty([0, volume_size, img_size, img_size, 3], dtype=np.float32)
mask_test = np.empty([0, volume_size, img_size, img_size, 3], dtype=np.float32)
for t in tqdm(test_dir[:]):
X_vol = np.load(os.path.join(X_test_path, t), allow_pickle=True)
print(np.shape(X_vol))
#X_vol = np.moveaxis(X_vol, -2, 0)
#print(np.shape(X_vol))
#imgs_test = np.append(imgs_test, [X_vol / 255], axis=0)
imgs_test = np.append(imgs_test, [X_vol/255], axis=0)
for tm in tqdm(test_mask_dir[:]):
Y_vol = np.load(os.path.join(Y_test_path, tm), allow_pickle=True)
#Y_vol = np.moveaxis(Y_vol, -2, 0)
#print('Y_vol', np.shape(Y_vol))
#print(np.amin(Y_vol), np.amax(Y_vol))
mask_test = np.append(mask_test, [Y_vol / 255], axis=0)
# y_new = np.empty([256, 256, 1], dtype=np.float32)
# Y_vol[:, :, 0] = cv2.cvtColor(Y_vol[:, :, :], cv2.COLOR_RGB2GRAY)
# y_new[:, :, 0] = cv2.threshold(Y_vol[:, :, 0], 127, 255, cv2.THRESH_BINARY)[1] / 255
# mask_test = np.append(mask_test, [y_new], axis=0)
return imgs_test, mask_test
def main():
path = '/home/nearlab/Jorge/current_work/lumen_segmentation/data/' \
'3D_volume_data/train/'
path_images = path + 'image/'
path_labels = path + 'label/'
images, labels = load_npy_data(path_images, path_labels)
print(np.shape(images), np.shape(labels))
index = 25
image_tes_1 = images[index][0]
mask_test_1 = labels[index][0]
image_tes_2 = images[index][1]
mask_test_2 = labels[index][1]
image_tes_3 = images[index][2]
mask_test_3 = labels[index][2]
plt.figure()
plt.subplot(421)
plt.imshow(image_tes_1)
plt.subplot(422)
plt.imshow(mask_test_1)
plt.subplot(423)
plt.imshow(image_tes_2)
plt.subplot(424)
plt.imshow(mask_test_2)
plt.subplot(425)
plt.imshow(image_tes_3)
plt.subplot(426)
plt.imshow(mask_test_3)
plt.show()
if __name__ == "__main__":
main()
```
#### File: lumen_segmentation/general/test_hough.py
```python
import numpy as np
import cv2
from matplotlib import pyplot as plt
import os
from skimage import data, color, img_as_ubyte
from skimage.feature import canny
from skimage.transform import hough_ellipse
from skimage.draw import ellipse_perimeter
import time
import csv
import imutils
import copy
def detect_circle(image_name):
calculate_elipse_hough
img = cv2.imread(image_name, 0)
img = cv2.medianBlur(img, 5)
cimg = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
circles = cv2.HoughCircles(img, cv2.HOUGH_GRADIENT, 1, 20,
param1=50, param2=30, minRadius=45, maxRadius=80)
circles = np.uint16(np.around(circles))
for i in circles[0, :]:
# draw the outer circle
cv2.circle(cimg, (i[0], i[1]), i[2], (0, 255, 0), 2)
# draw the center of the circle
cv2.circle(cimg, (i[0], i[1]), 2, (0, 0, 255), 3)
plt.figure()
plt.imshow(cimg)
plt.show()
def detect_square(image_name):
print(image_name)
img = cv2.imread(image_name)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
edges = cv2.Canny(gray, 90, 200, apertureSize=3)
lines = cv2.HoughLines(edges, 1, np.pi / 360, 60)
if lines is not None:
list_1 = [value[0][0] for value in lines]
list_2 = [value[0][1] for value in lines]
for element in lines[:4]:
rho = element[0][0]
theta = element[0][1]
print('rho', rho, 'theta', theta)
draw = False
if theta>-5*np.pi/180 and theta<5*np.pi/180:
#if theta in range(-5, 5) or theta in range(-85, 55):
draw = True
if theta>85*np.pi/180 and theta<95*np.pi/180:
draw = True
if draw is True:
a = np.cos(theta)
b = np.sin(theta)
x0 = a * rho
y0 = b * rho
x1 = int(x0 + 1000 * (-b))
y1 = int(y0 + 1000 * (a))
x2 = int(x0 - 1000 * (-b))
y2 = int(y0 - 1000 * (a))
cv2.line(img, (x1, y1), (x2, y2), (0, 0, 255), 2)
plt.figure()
plt.subplot(121)
plt.plot(list_1, list_2, '*')
plt.subplot(122)
plt.imshow(img)
plt.show()
def calculate_elipse_hough(image_gray, sigma, high_threshold):
edges = canny(image_gray, sigma=sigma,
low_threshold=0.01, high_threshold=high_threshold)
plt.figure()
plt.imshow(edges)
plt.show()
init_time = time.time()
shape = detect_shape(edges)
print(shape)
# Perform a Hough Transform
# The accuracy corresponds to the bin size of a major axis.
# The value is chosen in order to get a single high accumulator.
# The threshold eliminates low accumulators
result = hough_ellipse(edges, accuracy=20, threshold=250, min_size=100)
delta = time.time() - init_time
print(type(result))
print(np.shape(result))
print(result[0])
print(result)
print(delta)
return result, delta
def detect_shape(img_array_edges):
#blurred = cv2.GaussianBlur(img_array_edges, (5, 5), 0)
print(np.unique(img_array_edges))
th, thresh = cv2.threshold(np.float32(img_array_edges), 10,
255, cv2.THRESH_BINARY)
print(np.shape(thresh))
print(np.unique(thresh))
## Find contours on the binary threshed image
black_and_white = img_array_edges*255
print(np.unique(black_and_white))
plt.figure()
plt.imshow(black_and_white)
plt.show()
cnts, hierarchy = cv2.findContours(np.float32(black_and_white),
cv2.RETR_LIST,
cv2.CHAIN_APPROX_SIMPLE)
print(np.unique(thresh))
plt.figure()
plt.imshow(thresh)
plt.show()
cnts = imutils.grab_contours(cnts)
shape = "unidentified"
peri = cv2.arcLength(cnts, True)
approx = cv2.approxPolyDP(cnts, 0.04 * peri, True)
# if the shape is a triangle, it will have 3 vertices
if len(approx) == 3:
shape = "triangle"
# if the shape has 4 vertices, it is either a square or
# a rectangle
elif len(approx) == 4:
# compute the bounding box of the contour and use the
# bounding box to compute the aspect ratio
(x, y, w, h) = cv2.boundingRect(approx)
ar = w / float(h)
# a square will have an aspect ratio that is approximately
# equal to one, otherwise, the shape is a rectangle
shape = "square" if ar >= 0.95 and ar <= 1.05 else "rectangle"
# if the shape is a pentagon, it will have 5 vertices
elif len(approx) == 5:
shape = "pentagon"
elif len(approx) > 6 and len(approx) < 12:
shape = "octagon"
# otherwise, we assume the shape is a circle
else:
shape = "circle"
# return the name of the shape
return shape
def detect_edge(path_image):
image_rgb = cv2.imread(path_image)
image_gray = cv2.cvtColor(image_rgb, cv2.COLOR_BGR2GRAY)
high_threshold = 100
sigma = 3
result, delta = calculate_elipse_hough(image_gray, sigma, high_threshold)
counter = 0
"""while len(result) == 0 or counter == 3:
sigma = sigma - 0.1
high_threshold = high_threshold - 0.1
result, delta = calculate_elipse_hough(image_gray, sigma, high_threshold)
counter = counter + 1"""
# Estimated parameters for the ellipse
if counter <= 3:
best = list(result[-1])
yc, xc, a, b = [int(round(x)) for x in best[1:5]]
orientation = best[5]
# Draw the ellipse on the original image
cy, cx = ellipse_perimeter(yc, xc, a, b, orientation)
image_rgb[:, np.min(cx)-15] = (255, 0, 0)
image_rgb[:, np.max(cx)+15] = (255, 0, 0)
image_rgb[np.min(cy)-15, :] = (255, 0, 0)
image_rgb[np.max(cy)+15, :] = (255, 0, 0)
image_rgb[cy, cx] = (0, 255, 0)
return image_rgb, delta
def prepare_data(path_images_folder, output_dir=''):
image_list = sorted([f for f in os.listdir(path_images_folder)
if os.path.isfile(os.path.join(path_images_folder, f))])
time_stats = []
for image in image_list:
print(image)
detect_circle(path_images_folder + image)
"""path_image = ''.join([path_images_folder, image])
edge_detected, time = detect_edge(path_image)
cv2.imwrite(''.join([path_images_folder, 'test/',
image]), edge_detected)
time_stats.append(time)"""
# modified_image = crop_image(path_image)
csv_file_name = path_images_folder + 'time_test.csv'
with open(csv_file_name, mode='w') as results_file:
results_file_writer = csv.writer(results_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
results_file_writer.writerow(time_stats)
def main():
list_images_dir = '/home/nearlab/Desktop/test_shapes/'
prepare_data(list_images_dir)
if __name__ == '__main__':
main()
```
#### File: lumen_segmentation/general/test_shape_detection.py
```python
import cv2
import numpy as np
import imutils
from matplotlib import pyplot as plt
from skimage.feature import canny
def detect(c):
# initialize the shape name and approximate the contour
shape = "unidentified"
peri = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.04 * peri, True)
# if the shape is a triangle, it will have 3 vertices
if len(approx) == 3:
shape = "triangle"
# if the shape has 4 vertices, it is either a square or
# a rectangle
elif len(approx) == 4:
# compute the bounding box of the contour and use the
# bounding box to compute the aspect ratio
(x, y, w, h) = cv2.boundingRect(approx)
ar = w / float(h)
# a square will have an aspect ratio that is approximately
# equal to one, otherwise, the shape is a rectangle
shape = "square" if ar >= 0.95 and ar <= 1.05 else "rectangle"
# if the shape is a pentagon, it will have 5 vertices
elif len(approx) == 5:
shape = "pentagon"
# otherwise, we assume the shape is a circle
else:
shape = "circle"
# return the name of the shape
return shape
image_dir = '/home/nearlab/Downloads/test_shapes/example_2.png'
# load the image and resize it to a smaller factor so that
# the shapes can be approximated better
image = cv2.imread(image_dir)
resized = imutils.resize(image, width=300)
ratio = image.shape[0] / float(resized.shape[0])
# convert the resized image to grayscale, blur it slightly,
# and threshold it
gray = cv2.cvtColor(resized, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(gray, (5, 5), 0)
print(np.unique(gray))
edges = canny(gray, sigma=3.5,
low_threshold=0.01, high_threshold=0.6)
x_lim = np.shape(edges)[0]
y_lim = np.shape(edges)[1]
print(x_lim, y_lim)
for x in range(0, x_lim):
for y in range(0, y_lim):
if edges[x, y] == True:
print(x, y, edges[x, y])
edges[x, y] = 255
else:
edges[x, y] = 0
print('edges', np.unique(edges))
thresh = cv2.threshold(gray, 150, 255, cv2.THRESH_BINARY)[1]
print(np.unique(thresh))
# find contours in the thresholded image and initialize the
# shape detector
cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
print('edges', type(edges), np.shape(edges), np.amin(edges), np.amax(edges))
print('thres, ', type(thresh), np.shape(thresh), np.amin(thresh), np.amax(thresh))
for c in cnts:
#print(cnts[0][1][0])
#plt.figure()
#cnts_0 = [numbers for numbers in cnts[0][0]]
#print(cnts_0)
#plt.plot(cnts[0])
#plt.show()
# compute the center of the contour, then detect the name of the
# shape using only the contour
#M = cv2.moments(cv2.UMat(cnts))
#cX = int((M["m10"] / M["m00"]) * ratio)
#cY = int((M["m01"] / M["m00"]) * ratio)
shape = detect(c)
print(shape)
# multiply the contour (x, y)-coordinates by the resize ratio,
# then draw the contours and the name of the shape on the image
c = c.astype("float")
c *= ratio
c = c.astype("int")
cv2.drawContours(image, [c], -1, (0, 255, 0), 2)
#cv2.putText(image, shape, (cX, cY), cv2.FONT_HERSHEY_SIMPLEX,
# 0.5, (255, 255, 255), 2)
# show the output image
plt.figure()
plt.subplot(221)
plt.imshow(thresh)
plt.subplot(222)
plt.imshow(edges)
plt.subplot(223)
plt.imshow(gray)
plt.subplot(224)
plt.imshow(image)
plt.figure()
plt.imshow(image)
plt.show()
```
#### File: lumen_segmentation/general/violin_plots.py
```python
import matplotlib.pyplot as plt
import numpy as np
import csv
def adjacent_values(vals, q1, q3):
upper_adjacent_value = q3 + (q3 - q1) * 1.5
upper_adjacent_value = np.clip(upper_adjacent_value, q3, vals[-1])
lower_adjacent_value = q1 - (q3 - q1) * 1.5
lower_adjacent_value = np.clip(lower_adjacent_value, vals[0], q1)
return lower_adjacent_value, upper_adjacent_value
def set_axis_style(ax, labels):
ax.get_xaxis().set_tick_params(direction='out')
ax.xaxis.set_ticks_position('bottom')
ax.set_xticks(np.arange(1, len(labels) + 1))
ax.set_xticklabels(labels)
ax.set_xlim(0.25, len(labels) + 0.75)
ax.set_xlabel('Sample name')
def read_results_csv(file_path, row_id=0):
dice_values = []
with open(file_path, 'r') as file:
reader = csv.reader(file)
for row in reader:
dice_values.append(float(row[row_id]))
return dice_values
# create test data
np.random.seed(19680801)
path_file_1 = '/home/nearlab/Jorge/current_work/lumen_segmentation/data/' \
'3x3_grayscale_dataset/results/' \
'ResUnet_lr_0.001_bs_16_grayscale_16_11_2020_19_37/' \
'results_evaluationtest_01_ResUnet_lr_0.001_bs_16_grayscale_16_11_2020_19_37_new.csv'
path_file_2 = '/home/nearlab/Jorge/current_work/lumen_segmentation/data/' \
'3x3_grayscale_dataset/results/' \
'ResUnet_lr_1e-05_bs_16_grayscale_16_11_2020_19_32/' \
'results_evaluationtest_01_ResUnet_lr_1e-05_bs_16_grayscale_16_11_2020_19_32_new.csv'
path_file_3 = '/home/nearlab/Jorge/current_work/lumen_segmentation/data/' \
'lumen_data/results/' \
'ResUnet_lr_0.001_bs_16_hsv_14_11_2020_20_06/' \
'results_evaluation_test_02_ResUnet_lr_0.001_bs_16_hsv_14_11_2020_20_06_.csv'
path_file_4 = '/home/nearlab/Jorge/current_work/lumen_segmentation/data/' \
'lumen_data/results/' \
'ResUnet_lr_0.001_bs_16_rgb_06_11_2020_00_51/' \
'results_evaluation_test_02_ResUnet_lr_0.001_bs_16_rgb_06_11_2020_00_51_.csv'
path_file_5 = '/home/nearlab/Jorge/current_work/lumen_segmentation/' \
'data/' \
'3x3_grayscale_dataset/results/MaskRCNN_2/' \
'results_evaluationtest_02_MaskRCNN_2_new.csv'
path_file_6 = '/home/nearlab/Jorge/current_work/lumen_segmentation/' \
'data/3x3_grayscale_dataset/' \
'results/MaskRCNN_2/' \
'results_evaluationtest_02_MaskRCNN_2_new.csv'
data_experiment_1 = sorted(read_results_csv(path_file_1, 2))
data_experiment_2 = read_results_csv(path_file_2, 2)
data_experiment_3 = read_results_csv(path_file_3, 2)
data_experiment_4 = sorted(read_results_csv(path_file_4, 2))
data_experiment_5 = sorted(read_results_csv(path_file_5, 2))
data_experiment_6 = sorted(read_results_csv(path_file_6, 2))
#data = [data_experiment_1, data_experiment_4, data_experiment_5, data_experiment_6]
#data = [sorted(np.random.normal(0, std, 100)) for std in range(1, 5)]
data = [data_experiment_1, data_experiment_2,
data_experiment_3, 0, 0, 0]
data_2 = [0,0,0, data_experiment_4,
data_experiment_5, data_experiment_6]
print(np.shape(data))
fig, (ax1, ax2) = plt.subplots(nrows=1, ncols=2,
figsize=(9, 5), sharey=True)
ax1.set_title('Default violin plot')
ax1.set_ylabel('Observed values')
ax1.violinplot(data)
ax1.violinplot(data_2)
ax2.set_title('Customized violin plot')
parts = ax2.violinplot(
data, showmeans=True, showmedians=True,
showextrema=True)
"""
for pc in parts['bodies']:
pc.set_facecolor('#D43F3A')
pc.set_edgecolor('black')
pc.set_alpha(1)
quartile1, medians, quartile3 = np.percentile(data, [25, 50, 100], axis=1)
print(quartile1, medians, quartile3)
whiskers = np.array([
adjacent_values(sorted_array, q1, q3)
for sorted_array, q1, q3 in zip(data, quartile1, quartile3)])
whiskers_min, whiskers_max = whiskers[:, 0], whiskers[:, 1]
inds = np.arange(1, len(medians) + 1)
ax2.scatter(inds, medians, marker='o', color='white', s=30, zorder=3)
ax2.vlines(inds, quartile1, quartile3, color='k', linestyle='-', lw=5)
ax2.vlines(inds, whiskers_min, whiskers_max, color='k', linestyle='-', lw=1)
"""
# set style for the axes
labels = ['A', 'B', 'C', 'D', 'E', 'F']
for ax in [ax1, ax2]:
set_axis_style(ax, labels)
plt.subplots_adjust(bottom=0.15, wspace=0.05)
plt.show()
```
#### File: lumen_segmentation/models/res_unet.py
```python
project_folder = '/home/nearlab/Jorge/current_work/lumen_segmentation/data/lumen_data/'
image_modality = 'rgb'
augmented = True
if augmented is True:
amount_data = '/augmented_data/'
else:
amount_data = '/original/'
analyze_validation_set = False
evaluate_train_dir = False
import sys
sys.path.append(project_folder)
import numpy as np
import cv2
from glob import glob
from sklearn.model_selection import train_test_split
import tensorflow as tf
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau, CSVLogger, TensorBoard
from tqdm import tqdm
import tensorflow as tf
import keras.backend as K
from tensorflow.keras.backend import sum as suma
from tensorflow.keras.backend import mean
from tensorflow.keras.layers import *
from tensorflow.keras.models import Model
from keras.utils import CustomObjectScope
import os.path
from os import path
from PIL import Image
from os import listdir
from os.path import isfile, join
from datetime import datetime
import csv
import matplotlib.pyplot as plt
from sklearn.metrics import average_precision_score
from sklearn.metrics import recall_score
def load_data(path):
path_images = ''.join([path, 'image/', image_modality, "/*"])
path_labels = ''.join([path, "label/*"])
images = sorted(glob(path_images))
masks = sorted(glob(path_labels))
total_size_images = len(images)
total_size_labels = len(masks)
print('total size images:', total_size_images, path_images)
print('total size labels:', total_size_labels, path_labels)
return (images, masks)
def read_image(path):
path = path.decode()
x = cv2.imread(path, 1)
x = cv2.resize(x, (256, 256))
x = x / 255.0
return x
def read_mask(path):
path = path.decode()
x = cv2.imread(path)
x = cv2.cvtColor(x, cv2.COLOR_BGR2GRAY)
x = cv2.resize(x, (256, 256))
x = x / 255.0
x = np.expand_dims(x, axis=-1)
return x
def tf_parse(x, y):
def _parse(x, y):
x = read_image(x)
y = read_mask(y)
return x, y
x, y = tf.numpy_function(_parse, [x, y], [tf.float64, tf.float64])
x.set_shape([256, 256, 3])
y.set_shape([256, 256, 1])
return x, y
def tf_dataset(x, y, batch=8):
dataset = tf.data.Dataset.from_tensor_slices((x, y))
dataset = dataset.map(tf_parse)
dataset = dataset.batch(batch)
dataset = dataset.repeat()
return dataset
def iou(y_true, y_pred):
def f(y_true, y_pred):
intersection = (y_true * y_pred).sum()
union = y_true.sum() + y_pred.sum() - intersection
x = (intersection + 1e-15) / (union + 1e-15)
x = x.astype(np.float32)
return x
return tf.numpy_function(f, [y_true, y_pred], tf.float32)
"""def dice_coef(y_true, y_pred, smooth=1):
def f (y_true, y_pred):
intersection = suma(y_true * y_pred, axis=[1,2,3])
union = suma(y_true, axis=[1,2,3]) + suma(y_pred, axis=[1,2,3])
x = mean( (2. * intersection + smooth) / (union + smooth), axis=0)
#x = x.astype(np.float32)
return x
return tf.numpy_function(f, [y_true, y_pred], tf.float32)"""
def dice_coef(y_true, y_pred, smooth=0.00001):
intersection = K.sum(y_true * y_pred, axis=[1, 2, 3])
union = K.sum(y_true, axis=[1, 2, 3]) + K.sum(y_pred, axis=[1, 2, 3])
return K.mean((2. * intersection + smooth) / (union + smooth), axis=0)
def dice_coef_loss(y_true, y_pred):
return 1 - dice_coef(y_true, y_pred)
def conv_block(x, num_filters):
x = Conv2D(num_filters, (3, 3), padding="same")(x)
x = BatchNormalization()(x)
skip = Conv2D(num_filters, (3, 3), padding="same")(x)
#skip = Activation("relu")(skip)
skip = BatchNormalization()(skip)
x = Conv2D(num_filters, (3, 3), padding="same")(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
x = tf.math.add_n([x, skip])
x = Activation("relu")(x)
return x
def build_model():
size = 256
num_filters = [16, 32, 48, 64]
inputs = Input((size, size, 3))
skip_x = []
x = inputs
## Encoder
for f in num_filters:
x = conv_block(x, f)
skip_x.append(x)
x = MaxPool2D((2, 2))(x)
## Bridge
x = conv_block(x, num_filters[-1])
num_filters.reverse()
skip_x.reverse()
## Decoder
for i, f in enumerate(num_filters):
x = UpSampling2D((2, 2))(x)
xs = skip_x[i]
x = Concatenate()([x, xs])
x = conv_block(x, f)
## Output
x = Conv2D(1, (1, 1), padding="same")(x)
x = Activation("sigmoid")(x)
return Model(inputs, x)
def mask_parse(mask):
mask = np.squeeze(mask)
mask = [mask, mask, mask]
mask = np.transpose(mask, (1, 2, 0))
return mask
def read_image_test(path):
x = cv2.imread(path, cv2.IMREAD_COLOR)
x = cv2.resize(x, (256, 256))
x = x / 255.0
return x
def read_mask_test(path):
x = cv2.imread(path, cv2.IMREAD_GRAYSCALE)
x = cv2.resize(x, (256, 256))
x = np.expand_dims(x, axis=-1)
return x
def calculae_rates(image_1, image_2):
image_1 = np.asarray(image_1).astype(np.bool)
image_2 = np.asarray(image_2).astype(np.bool)
image_1 = image_1.flatten()
image_2 = image_2.flatten()
if image_1.shape != image_2.shape:
raise ValueError("Shape mismatch: im1 and im2 must have the same shape.")
precision_value = average_precision_score(image_1, image_2)
recall_value = recall_score(image_1, image_2)
return precision_value, recall_value
def dice(im1, im2, smooth=1):
im1 = np.asarray(im1).astype(np.bool)
im2 = np.asarray(im2).astype(np.bool)
if im1.shape != im2.shape:
raise ValueError("Shape mismatch: im1 and im2 must have the same shape.")
# Compute Dice coefficient
intersection = np.logical_and(im1, im2)
return 2. * (intersection.sum() + smooth) / (im1.sum() + im2.sum() + smooth)
def read_img(dir_image):
original_img = cv2.imread(dir_image)
height, width, depth = original_img.shape
img = cv2.resize(original_img, (256, 256))
return img
def make_predictions():
return 0
def read_results_csv(file_path, row_id=0):
dice_values = []
with open(file_path, 'r') as file:
reader = csv.reader(file)
for row in reader:
dice_values.append(float(row[row_id]))
return dice_values
def evaluate_and_predict(model, directory_to_evaluate, results_directory, output_name):
output_directory = 'predictions/' + output_name + '/'
batch_size = 16
(test_x, test_y) = load_data(directory_to_evaluate)
test_dataset = tf_dataset(test_x, test_y, batch=batch_size)
test_steps = (len(test_x)//batch_size)
if len(test_x) % batch_size != 0:
test_steps += 1
# evaluate the model in the test dataset
model.evaluate(test_dataset, steps=test_steps)
test_steps = (len(test_x)//batch_size)
if len(test_x) % batch_size != 0:
test_steps += 1
for i, (x, y) in tqdm(enumerate(zip(test_x, test_y)), total=len(test_x)):
print(i, x)
directory_image = x
x = read_image_test(x)
y = read_mask_test(y)
y_pred = model.predict(np.expand_dims(x, axis=0))[0] > 0.5
print(directory_to_evaluate + image_modality + '/')
name_original_file = directory_image.replace(''.join([directory_to_evaluate, 'image/', image_modality, '/']), '')
print(name_original_file)
results_name = ''.join([results_directory, output_directory, name_original_file])
print(results_name)
cv2.imwrite(results_name, y_pred * 255.0)
# save the results of the test dataset in a CSV file
ground_truth_imgs_dir = directory_to_evaluate + 'image/' + image_modality + '/'
result_mask_dir = results_directory + output_directory
ground_truth_image_list = [file for file in listdir(ground_truth_imgs_dir) if
isfile(join(ground_truth_imgs_dir, file))]
results_image_list = [file for file in listdir(result_mask_dir) if isfile(join(result_mask_dir, file))]
results_dice = []
results_sensitivity = []
results_specificity = []
for image in ground_truth_image_list[:]:
result_image = [name for name in results_image_list if image[-12:] == name[-12:]][0]
if result_image is not None:
original_mask = read_img(''.join([ground_truth_imgs_dir, image]))
predicted_mask = read_img(''.join([result_mask_dir, result_image]))
dice_val = dice(original_mask, predicted_mask)
results_dice.append(dice_val)
sensitivity, specificity = calculae_rates(original_mask, predicted_mask)
results_sensitivity.append(sensitivity)
results_specificity.append(specificity)
else:
print(image, 'not found in results list')
name_test_csv_file = ''.join([results_directory, 'results_evaluation_',
output_name,
'_',
new_results_id,
'_.csv'])
with open(name_test_csv_file, mode='w') as results_file:
results_file_writer = csv.writer(results_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
for i, file in enumerate(ground_truth_image_list):
results_file_writer.writerow(
[str(i), file, results_dice[i], results_sensitivity[i], results_specificity[i]])
return name_test_csv_file
filepath_models = '/home/nearlab/Jorge/current_work/lumen_segmentation/data/old_lumen_data/results' \
'/ResUnet_lr_0.001_bs_16_grayscale_23_10_2020_13_31' \
'ResUnet_lr_0.001_bs_16_grayscale_23_10_2020_13_31_model.h5'
# ------------------- Hyperparameters -----------------------------------
batch = 16
lr = 1e-3
epochs = 10
opt = tf.keras.optimizers.Adam(lr)
metrics = ["acc", tf.keras.metrics.Recall(),
tf.keras.metrics.Precision(), dice_coef, iou]
model = build_model()
model.compile(optimizer=opt, loss=dice_coef_loss, metrics=metrics)
model.load_weights('/home/nearlab/Jorge/current_work/lumen_segmentation/'
'data/old_lumen_data/results/'
'ResUnet_lr_0.001_bs_16_grayscale_23_10_2020_15_04/'
'ResUnet_lr_0.001_bs_16_grayscale_23_10_2020_15_04_model.h5')
model.summary()
training_time = datetime.now()
new_results_id = ''.join(['ResUnet',
'_lr_',
str(lr),
'_bs_',
str(batch),
'_', image_modality, '_',
training_time.strftime("%d_%m_%Y_%H_%M"),
])
results_directory = ''.join([project_folder, 'results/', new_results_id, '/'])
os.mkdir(results_directory)
os.mkdir(results_directory + 'predictions/')
os.mkdir(results_directory + 'predictions/test_01/')
os.mkdir(results_directory + 'predictions/test_02/')
os.mkdir(results_directory + 'predictions/test_03/')
evaluation_directory_01 = project_folder + "test/test_01/"
evaluation_directory_02 = project_folder + "test/test_02/"
evaluation_directory_03 = project_folder + "test/test_03/"
name_test_csv_file_1 = evaluate_and_predict(model, evaluation_directory_01, results_directory, 'test_01')
name_test_csv_file_2 = evaluate_and_predict(model, evaluation_directory_02, results_directory, 'test_02')
name_test_csv_file_3 = evaluate_and_predict(model, evaluation_directory_03, results_directory, 'test_03')
``` |
{
"source": "jlazzaridean/mScarlet_lifetime_reports_pH",
"score": 2
} |
#### File: fig1_Scarlet_pH_sensitivity/in_vitro_PBS_35C/plot_PBS_35C.py
```python
from pathlib import Path
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import Divider, Size
import pandas as pd
from scipy.optimize import curve_fit
from scipy.stats import linregress
# This script plots the in vitro data taken for purified Scarlet-His.
# Data were analyzed with SPT (mean arrival time). Median and stdev of
# the mean arrival time for each image was calculated by
# generate_in_vitro_summary_table.py.
def logistic4(pH, min_val, hill, pKa, max_val):
return ((min_val - max_val) / (1.0 + ((pH/pKa)**hill))) + max_val
# generate some paths
current_dir = Path.cwd()
manuscript_path = current_dir.parents[2]
data_path_1 = manuscript_path / 'source_data' / 'in_vitro_characterization'
data_path = data_path_1 / 'combined_in_vitro_mean_arrival_time.csv'
# basic plot setup
plt.style.use(manuscript_path / 'figures' / 'default.mplstyle')
cycle = plt.rcParams['axes.prop_cycle'].by_key()['color']
# load in the results
results = pd.read_csv(data_path)
#########################################################################
###FIGURE 5. In vitro in PBS at 35C (this alone for main text
fig5 = plt.figure(figsize=(3, 3))
# generate fixed size axes, 1.5 inches
h = [Size.Fixed(1.0), Size.Fixed(1.5)]
v = [Size.Fixed(0.7), Size.Fixed(1.5)]
divider = Divider(fig5, (0, 0, 1, 1), h, v, aspect=False)
axs5 = fig5.add_axes(divider.get_position(),
axes_locator=divider.new_locator(nx=1, ny=1))
# now let's find the data we want to plot
pH_temp = results.loc[results['series'] == 'pH_temp']
summary = pH_temp.groupby(['buffer', 'temp_C', 'pH'])['median_tau_ns'].mean()
PBS_35 = summary.at['PBS', 35]
init_params = [1.7, 15, 5, 3.5]
# generate the fit again for plotting
popt, pcov = curve_fit(logistic4,
xdata = PBS_35.index.tolist(),
ydata = PBS_35.tolist(),
p0 = init_params,
maxfev=10000)
min_val, hill, pKa, max_val = popt
# now plot the means and the fit curves
axs5.plot(PBS_35.index.tolist(), PBS_35.tolist(), linewidth=0)
pH_plotting = np.linspace(4, 7.5, num=500)
axs5.plot(pH_plotting, logistic4(pH_plotting, min_val, hill, pKa, max_val),
label='', marker=None, markersize=0, color=cycle[0])
# some plot formatting
axs5.set_title('In Vitro')
axs5.set_ylim(1.8, 3.8)
axs5.set_yticks(np.linspace(1.8, 3.8, 6))
for ax in fig5.axes:
ax.set_xlabel('pH')
ax.set_ylabel('Lifetime (ns)')
# save the results
fig5.savefig('PBS_35C_for_main_text.pdf', bbox_inches='tight',
transparent=True)
plt.show()
```
#### File: fig1_Scarlet_pH_sensitivity/nigericin_monensin/plot_nigericin_monensin_by_cell.py
```python
from pathlib import Path
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import Divider, Size
from scipy.optimize import curve_fit
import pandas as pd
# This script reads in the output of
# 'nigericin_monensin_combo_by_cell.py', which processes the lifetime
# image data for the nigericin and monensin calibration. This script
# then generates a pH titration curve and fits it to a 4- parameter
# logistic function.
# generate some paths
current_dir = Path.cwd()
manuscript_path = current_dir.parents[2]
data_path = manuscript_path / 'source_data' / 'nigericin_monensin_U2OS' / 'nigericin_monensin_cell_means.csv'
# basic plot setup
plt.style.use(manuscript_path / 'figures' / 'default.mplstyle')
cycle = plt.rcParams['axes.prop_cycle'].by_key()['color']
# load in the results
results = pd.read_csv(data_path)
# Calculate some rudimentary stats
gb_pH = results.groupby(['buffer_pH'])
tau_means = gb_pH['mean_tau_ns'].mean().tolist()
tau_stds = gb_pH['mean_tau_ns'].std().tolist()
fig1 = plt.figure(figsize=(3,3), dpi=300)
# generate fixed size axes, 1.5 inches
h = [Size.Fixed(1.0), Size.Fixed(1.5)]
v = [Size.Fixed(0.7), Size.Fixed(1.5)]
divider = Divider(fig1, (0, 0, 1, 1), h, v, aspect=False)
axs1 = fig1.add_axes(divider.get_position(),
axes_locator=divider.new_locator(nx=1, ny=1))
# fit to a 4 parameter logistic function
def logistic4(pH, min_val, hill, pKa, max_val):
return ((min_val - max_val) / (1.0 + ((pH/pKa)**hill))) + max_val
# set up a dataframe to store the fit outputs
fit_output = pd.DataFrame(columns={'condition','model', 'temperature', 'min_val',
'min_val_SD', 'max_val', 'max_val_SD', 'hill',
'hill_SD', 'pKa', 'pKa_SD'})
# perform the fitting
pH_range = gb_pH['mean_tau_ns'].mean().index.tolist()
init_params = [1.7, 15, 5, 3.5]
popt, pcov = curve_fit(logistic4,
xdata = pH_range,
ydata = tau_means,
p0 = init_params,
maxfev=10000)
fit_output.at[0, 'condition'] = 'U2OS'
fit_output.at[0, 'temperature'] = 35
fit_output.at[0, 'model'] = 'mean_arrival'
min_val, hill, pKa, max_val = popt
fit_output.at[0, 'min_val'] = min_val
fit_output.at[0, 'max_val'] = max_val
fit_output.at[0, 'hill'] = hill
fit_output.at[0, 'pKa'] = pKa
perr = np.sqrt(np.diag(pcov))
fit_output.at[0, 'min_val_SD'] = perr[0]
fit_output.at[0, 'max_val_SD'] = perr[3]
fit_output.at[0, 'hill_SD'] = perr[1]
fit_output.at[0, 'pKa_SD'] = perr[2]
# now plot the means and the fit curves
pH_plotting = np.linspace(4, 7.5, num=500)
axs1.plot(pH_plotting, logistic4(pH_plotting, min_val, hill, pKa, max_val),
label='', marker=None, markersize=0, color=cycle[0])
# medians +/- stdev
axs1.errorbar(pH_range, tau_means, tau_stds, linewidth=0, elinewidth=1,
markersize=4, marker='.', capthick=1, color=cycle[0])
axs1.spines['right'].set_visible(False)
axs1.spines['top'].set_visible(False)
axs1.set_ylabel('Lifetime (ns)')
axs1.set_xlabel('Buffer pH')
axs1.set_ylim(1.6, 3.6)
axs1.set_yticks(np.linspace(1.6, 3.6, 6))
axs1.set_title('U2OS Cells')
fig1.savefig('nigericin_monensin_means_whole_cell.pdf',
bbox_inches='tight', transparent=True)
fit_output.to_csv('U2OS_4PL_fits.csv')
# print the standard deviation by pH for inclusion in main text
print('standard deviation by pH')
print(results.groupby('buffer_pH')['mean_tau_ns'].std())
plt.show()
``` |
{
"source": "jlbbj111/2019-Software-Engineering-Curriculum-Design",
"score": 3
} |
#### File: EMS/backstage/models.py
```python
from django.db import models
from django.contrib.auth.models import User
# 学院
class College(models.Model):
# 学院名
name = models.CharField(unique=True, max_length=128)
# 学院简称
short_name = models.CharField(unique=True, max_length=128)
def __str__(self):
return self.name
class Meta:
db_table = 'college'
# 专业
class Major(models.Model):
# 专业代码,如0403。
# 04是所在学院的排序,03是专业在院中的排序
mno = models.CharField(unique=True, max_length=20)
# 专业名称,唯一,如:计算机科学与技术, 化学工程与工艺
mname = models.CharField(max_length=128, default="")
# 专业简称,如:计算机科学与技术--计科, 化学工程与工业--化工
short_name = models.CharField(max_length=20)
# 所属学院,外键
in_college = models.ForeignKey(to=College, on_delete=models.CASCADE)
def __str__(self):
return "-".join([self.mno, self.mname])
class Meta:
db_table = 'major'
unique_together = (
'mno',
'mname'
)
# 专业信息
class MajorPlan(models.Model):
# 学年,该专业下的那个年级的学生
year = models.IntegerField()
# 专业名称,对应了需要的专业
major = models.ForeignKey(to=Major, on_delete=models.CASCADE)
# 专业计划班级数
cls_num = models.IntegerField() # 行政班
# 专业计划人数
people_num = models.IntegerField()
# 毕业最低学分
score_grad = models.IntegerField()
# 学制:毕业需要学习的年数, 4
stu_years = models.IntegerField()
# 该年级专业需要修学的课程数,80, 88...
course_num = models.IntegerField()
def __str__(self):
return str(self.year) + '-' + str(self.major)
class Meta:
db_table = 'major_plan'
unique_together = (
'year', 'major'
)
# 行政班
class AdmClass(models.Model):
# 行政班名,如:计科1605
name = models.CharField(max_length=10, unique=True)
# 行政班所属专业,由 major.short_name + in_year + 编号(自增)构成
# 例如:计科1605, 化工1606
major = models.ForeignKey(to=MajorPlan, on_delete=models.CASCADE)
def __str__(self):
return self.name
class Meta:
db_table = 'adm_class'
# 学生
class Student(User):
# 学生姓名
name = models.CharField(max_length=128)
# 学号,必须10位,例如:2016011186
# username = models.CharField()
# 登录所需密码,初始密码设置为学号
# password = models.CharField()
# 学生性别
sex = models.BooleanField(default=True)
# 目前已修学分
score_got = models.IntegerField()
# 所在的行政班级,外键
in_cls = models.ForeignKey(to=AdmClass, on_delete=models.CASCADE)
# 入学年份,用int表示即可,该学生入学的年份
# 考虑可能留级的情况,入学年份与专业年级不对应
in_year = models.IntegerField()
def __str__(self):
return "-".join([self.username, self.name])
class Meta:
db_table = 'student'
# 教师
class Teacher(User):
# 教师姓名
name = models.CharField(max_length=128)
# username, 自带字段
# 同时也就是教师工号,设置为9位,与学生区分
# password, 自带字段,默认与工号相同
sex = models.BooleanField(default=True)
# 教师所属学院
college = models.ForeignKey(to=College, on_delete=models.CASCADE)
# 教师入职的年份
in_year = models.IntegerField()
# 学历,在个人信息中显示。如:博士,博士后...
edu_background = models.CharField(null=True, max_length=128)
# 在校职位,如:教授,副教授,讲师等
title = models.CharField(default='讲师', max_length=128)
# 教师个人简介,可空
description = models.TextField(null=True)
def __str__(self):
return "-".join([self.username, self.name])
class Meta:
db_table = 'teacher'
# 教室
class ClassRoom(models.Model):
# 教室编号,如:A-302, B阶-202
crno = models.CharField(primary_key=True, max_length=128)
# 教室类型,阶教180人,中等教室120人,小教室50人
# 教室类型需要和教室编号对应
crtype = models.CharField(null=False, max_length=10)
# 教室能够容纳的学生数目,需要与类型对应
contain_num = models.IntegerField()
def __str__(self):
return self.crno
class Meta:
db_table = 'class_room'
class Announcement(models.Model):
dep = (
('全体成员', '全体成员'),
('信息科学与技术学院', '信息科学与技术学院'),
('化学工程学院', '化学工程学院'),
('材料科学与工程学院', '材料科学与工程学院'),
('机电工程学院', '机电工程学院'),
('经济管理学院', '经济管理学院'),
('理学院', '理学院'),
('文法学院', '文法学院'),
('生命科学与技术学院', '生命科学与技术学院'),
('继续教育学院', '继续教育学院'),
('马克思主义学院', '马克思主义学院'),
('国际教育学院', '国际教育学院'),
('侯德榜工程师学院', '侯德榜工程师学院'),
('能源学院', '能源学院'),
('巴黎居里工程师学院', '巴黎居里工程师学院'),
('个人', '个人'),
)
title = models.TextField(max_length=150, default='通知')
messages = models.TextField(max_length=150)
author = models.CharField(max_length=128)
receiver = models.CharField(max_length=32, choices=dep, default='全体成员')
year = models.CharField(max_length=32, default='2016')
receiver_grade = models.CharField(max_length=32, default="2016"),
visible = models.BooleanField(default=True)
time = models.DateTimeField(auto_now_add=True)
class Meta:
db_table = 'announcement'
class UploadFiles(models.Model):
file = models.FileField(upload_to='files', default='../backstage/media/image/default.png', blank=True, null=True)
author = models.CharField(max_length=128)
visible = models.BooleanField(default=True)
time = models.DateTimeField(auto_now_add=True)
class Meta:
db_table = 'upload_files'
```
#### File: utils/database_utils/add_stu_tchs.py
```python
import os
import re
import math
from random import choice, randint, choices
from backstage.models import College, Major, AdmClass, Student,\
Teacher, ClassRoom, MajorPlan
from scoreManagement.models import Course, Teaching
from django.db.utils import IntegrityError
from .make_encoding import make_encode
class CollegeInit:
college_names = [
['信息科学与技术学院', '信息学院'],
['化学工程学院', '化工学院'],
['材料科学与工程学院', '材料学院'],
['机电工程学院', '机电学院'],
['文法学院', '文法学院'],
['经济管理学院', '经管学院'],
['理学院', '理学院'],
['生命科学与技术学院', '生命学院'],
['巴黎居里工程师学院', '巴黎居里工程师学院'],
]
def __init__(self):
for i in self.college_names:
college = College.objects.create(name=i[0], short_name=i[1])
try:
college.save()
except:
print("Except happen: " + str(len(College.objects.all())))
class MajorInit:
major_names = {
'信息科学与技术学院': [
['计算机科学与技术', '计科', 192],
['自动化控制', '自控', 192],
['测控技术与仪器', '测控', 193],
['电子信息工程', '信工', 192],
['数字媒体艺术', '数媒', 173],
],
'化学工程学院': [
['化学工程与工艺', '化工'],
['能源工程', '能源'],
['环境工程', '环工'],
],
'材料科学与工程学院': [
['高分子材料与技术', '高材'],
['材料科学与技术', '材料'],
['功能材料', '功材'],
],
'机电工程学院': [
['']
],
'文法学院': [
['英语']
],
'经济管理学院': [
[]
],
'理学院': [
[]
],
'生命科学与技术学院': [
[]
],
'巴黎居里工程师学院': [
[]
]
}
def information_init(self):
college = College.objects.get(name='信息科学与技术学院')
for m in self.major_names['信息科学与技术学院']:
try:
maj = Major.objects.create(in_college=college, mname=m[0], short_name=m[1])
maj.save()
except IntegrityError:
print(len(Major.objects.all()))
def information_major_plan_init(self):
years = [2015, 2016, 2017, 2018]
for year in years:
for maj in Major.objects.all():
cls_num=choice([3, 4, 5, 6])
try:
majp = MajorPlan.objects.create(
year=year,
major=maj,
cls_num=cls_num,
score_grad=choice([192, 193]),
people_num=35*cls_num,
stu_years=4
)
majp.save()
except IntegrityError:
print(len(MajorPlan.objects.all()))
def information_class_init(self):
major_plan_set = MajorPlan.objects.all()
cnt = 1
for maj in major_plan_set:
cls_base_name = maj.major.short_name + str(maj.year)[2:]
for j in range(1, maj.cls_num+1):
cls_name = cls_base_name + "%02d" % j
print(cls_name)
try:
cls = AdmClass.objects.create(name=cls_name, major=maj, people_num=maj.people_num)
cls.save()
except IntegrityError:
print(len(AdmClass.objects.all()))
def information_student_init(self):
last_name = '赵钱孙李周吴郑王冯陈褚卫蒋沈韩杨朱秦尤许何吕施张孔曹严华金魏陶姜'
first_name = '豫章故郡洪都新府星分翼轸地接衡庐襟三江而带五湖'
score_gots = {
2015: 190,
2016: 180,
2017: 100,
2018: 56
}
adm_class_set = AdmClass.objects.all()
cnt = 1
for cls in adm_class_set:
for j in range(cls.people_num + 1):
year = cls.major.year
prefix = str(year)
subfix = "%06d" % cnt
sno = prefix + subfix
cnt += 1
name = choice(last_name) + "".join(choice(first_name) for i in range(2))
print({
'name': name,
'sno': sno
})
# try:
# stu = Student.objects.create(
# sno=sno,
# username=name,
# password=<PASSWORD>,
# sex=choice([True, False]),
# in_cls=cls,
# in_year=cls.major.year,
# score_got=score_gots[year]
# )
# stu.save()
# except IntegrityError:
# print(len(Student.objects.all()))
class TeacherInit:
last_name = '赵钱孙李周吴郑王冯陈褚卫蒋沈韩杨朱秦尤许何吕施张孔曹严华金魏陶姜'
first_name = '豫章故郡洪都新府星分翼轸地接衡庐襟三江而带五湖'
sex = choice([True, False])
college = College.objects.get(name='信息科学与技术学院')
in_year = randint(1980, 2018)
pass
if __name__ == '__main__':
# college_init = CollegeInit()
major_init = MajorInit()
# major_init.information_init()
# major_init.information_major_plan_init()
# major_init.information_class_init()
major_init.information_student_init()
pass
```
#### File: utils/score_utils/query_set2excel.py
```python
import os
import pandas as pd
import MySQLdb
import xlrd
import xlwt
from xlwt import Workbook, Worksheet, Row, Column
from openpyxl.writer.excel import save_virtual_workbook, save_workbook, ExcelWriter
base_dir = '../others/'
xls_file = '2018-2019-1semester.xls'
def write_to_excel(n, head_data, records, download_url):
<<<<<<< HEAD
=======
>>>>>>> ba466dfe7bfbdff2f98729239dada46adfd5f951
pass
``` |
{
"source": "jlbcontrols/ignition-vcs-tools",
"score": 2
} |
#### File: tagfiles/exporternode/code.py
```python
import json
import os
import shutil
from com.inductiveautomation.ignition.common.tags import TagUtilities
from com.inductiveautomation.ignition.client.gateway_interface import GatewayConnectionManager
from com.inductiveautomation.ignition.common.tags.paths.parser import TagPathParser
class ExporterNode():
def __init__(self, parentDirPath,fullConfig={},tagPath=""):
self.parentDirPath = parentDirPath
if (not fullConfig) and (not tagPath):
raise ValueError("Must provide a fullConfig or tagPath")
elif not fullConfig:
self.fullConfig = getFullConfigFromGateway(tagPath)
else:
self.fullConfig = fullConfig
def __str__(self):
return "%s(name=%s,dirPath=%s)" % (self.__class__.__name__,self.fullConfig["name"],self.getDirPath())
def getDirPath(self):
return os.path.join(self.parentDirPath, self.fullConfig['name'])
def getNodeFilePath(self):
return os.path.join(self.getDirPath(), "nodeconfig.json")
# tag config json without "name" or "tags" items. name and tags information becomes part of the folder structure.
def getNodeConfig(self):
nodeConfig = dict(self.fullConfig)
del nodeConfig["name"]
if "tags" in nodeConfig:
del nodeConfig["tags"]
return nodeConfig
# create a list of nodes for direct children (not recursive).
def getChildNodes(self):
nodes = []
if "tags" in self.fullConfig:
for childConfig in self.fullConfig["tags"]:
nodes.append(ExporterNode(self.getDirPath(),fullConfig=childConfig))
return nodes
# save all tag information as a directory of files (recursive).
def exportToFiles(self):
self.exportNodeConfig()
for node in self.getChildNodes():
node.exportToFiles()
# export the json for this node's top level tag.
def exportNodeConfig(self):
self.clearDirectory()
with open(self.getNodeFilePath(),'w') as outfile:
json.dump(self.getNodeConfig(),outfile,indent=4,sort_keys=True)
# Delete any existing directory, and create new
def clearDirectory(self):
dirPath = self.getDirPath()
if os.path.exists(dirPath):
shutil.rmtree(dirPath)
os.makedirs(dirPath)
# Get tagConfiguration dictionary for tagpath. "system.tag.getConfiguration" dict does not dump to json, so using Using "TagProviderRpc.getTagExport" instead.
def getFullConfigFromGateway(tagPath):
configString = GatewayConnectionManager.getInstance().getGatewayInterface().invoke("TagProviderRpc.getTagExport", [[TagPathParser.parse(tagPath)],True,"json"])
fullConfig = json.loads(configString)
if fullConfig["tagType"]=="Unknown":
raise ValueError(fullConfig["name"] + " tagType is Unknown. Check if tag exists.")
return fullConfig
# Export a list of tags, with prompts to select export directory and confirm.
def exportWithPrompts(tagPaths):
if not tagPaths:
raise ValueError("At least one tag required for export.")
parentDirPath = openFolderDialog("Select Parent Directory")
if parentDirPath:
nodes = []
for tagPath in tagPaths:
nodes.append(ExporterNode(parentDirPath,tagPath=tagPath))
nodeNamesLower = getNodeNamesLower(nodes)
if containsDuplicate(nodeNamesLower):
raise ValueError("Duplicate node names are not permitted.")
confirmMessage = "Are you sure you want to create or overwrite the following folder(s)?"
for node in nodes:
confirmMessage += "\n" + node.getDirPath()
parentDirName = os.path.basename(parentDirPath)
if (parentDirName.lower() in nodeNamesLower):
confirmMessage += "\n\n WARNING: Parent directory name matches tag name."
if system.gui.confirm(confirmMessage):
for node in nodes:
node.exportToFiles()
system.gui.messageBox("Export Complete")
# Get lowercase list of node names from list of nodes
def getNodeNamesLower(nodes):
names = []
for node in nodes:
names.append(node.fullConfig["name"].lower())
return names
def openFolderDialog(dialogTitle):
from javax.swing import JFileChooser
chooser = JFileChooser()
chooser.setFileSelectionMode(JFileChooser.DIRECTORIES_ONLY)
chooser.setDialogTitle(dialogTitle)
if chooser.showOpenDialog(None) == JFileChooser.APPROVE_OPTION:
return str(chooser.getSelectedFile())
# Check for duplicates in a list
def containsDuplicate(list):
return len(list) != len(set(list))
``` |
{
"source": "jlbcontrols/pidbot-manager",
"score": 2
} |
#### File: opc/opc_drop_listener/code.py
```python
logger = system.util.getLogger('opc_drop_listener')
from com.jlbcontrols.vcob import OpcBrowseNodeListTransferable
from java.awt.dnd import DropTargetListener, DropTarget
from java.awt.datatransfer import DataFlavor
from com.inductiveautomation.ignition.client.tags.dnd import ListOfNodeBrowseInfo
import pidbotscripts.pb_mgr.pidtags.pidbot_instance_generator.PidbotInstanceGenerator as PidbotInstanceGenerator
import pidbotscripts.utils.component_tools as component_tools
def addOpcDropListener(opcDroppableComponent,instanceGenerator):
# Check if component has required custom properties to add the OpcDropListener
requiredProps = ['dragEntered']
propsNotFound = component_tools.componentMissingProps(opcDroppableComponent,requiredProps)
if propsNotFound:
errorMessage = "Component %s does not have custom properties required to add an OpcDropListener: %s" % (opcDroppableComponent.getName(),propsNotFound)
system.gui.errorBox(errorMessage)
return
opcDropListener = OpcDropListener(opcDroppableComponent,instanceGenerator)
DropTarget(opcDroppableComponent, opcDropListener)
class OpcDropListener(DropTargetListener):
def __init__(self, opcDroppableComponent,instanceGenerator):
DropTargetListener.__init__(self)
self.opcDroppableComponent = opcDroppableComponent
self.instanceGenerator = instanceGenerator
def drop(self, e):
self.opcDroppableComponent.dragEntered=False
browseNodes = getOpcBrowseNodesFromDropEvent(e)
logger.debug("Dropped browseNodes="+str(browseNodes))
self.instanceGenerator.createFromOpcBrowseNodes(browseNodes)
self.opcDroppableComponent.dropComplete()
def dragOver(self,e):
return
def dragEnter(self,e):
self.opcDroppableComponent.dragEntered=True
def dragExit(self,e):
self.opcDroppableComponent.dragEntered=False
def getOpcBrowseNodesFromDropEvent(e):
if e.isDataFlavorSupported(OpcBrowseNodeListTransferable.FLAVOR):
browseNodes = e.getTransferable().getTransferData(OpcBrowseNodeListTransferable.FLAVOR)
else:
raise Exception("Drop data types: %s not supported" % e.getCurrentDataFlavorsAsList())
if len(browseNodes)>20:
if not system.gui.confirm("Create %s new tags?" % len(browseNodes)):
return []
return browseNodes
```
#### File: pb_historian_dash/tag_history/code.py
```python
def queryTagHistoryFillRange(tagPath,startDate,endDate):
# Query Raw data for time range
dsRaw = system.tag.queryTagHistory(
paths=[tagPath],
startDate=startDate,
endDate=endDate,
returnSize=-1,
returnFormat="Wide",
includeBoundingValues=True
)
#If there is no data recorded, return the empty dataset
if dsRaw.getRowCount() == 0:
return dsRaw
#Move start bounding value to the chart start time
dsWithStart = system.dataset.setValue(dsRaw,0,0,startDate)
#Duplicate end value at chart end time
lastValue = dsRaw.getValueAt(dsRaw.getRowCount()-1,1)
dsWithEnd = system.dataset.addRow(dsWithStart,[endDate,lastValue])
return dsWithEnd
```
#### File: pb_mgr/pb_mgr_config/code.py
```python
import pidbotscripts.utils.data_mapping as data_mapping
### Tag Provider Settings ###
def getPrimaryTagProvider():
return str(system.tag.read("[System]Client/System/DefaultTagProvider").value)
def getPidGroupTagProvider():
return getPrimaryTagProvider()
# Add more providers here to allow user to create/delete tags in additional providers.
def getManageableTagProviders(asDataset=False):
providerList = [
getPrimaryTagProvider(),
"testProv"
]
deduplicatedList = list(set(providerList))
if asDataset:
return data_mapping.listToColumnDs(deduplicatedList)
return deduplicatedList
### PID Group Settings (aka Displays) ###
def getPidGroupFolderPath(tagProvider=getPrimaryTagProvider()):
return "["+tagProvider+"]PidbotInst/PidGroups"
def getPidGroupUdtPath(tagProvider=getPrimaryTagProvider()):
return "["+tagProvider+"]_types_/PidbotTypes/PidGroup"
### OPC Tag Generator Settings ###
def getPidTagRootPath(tagProvider):
return "["+tagProvider+"]PidbotInst/PidTags"
def getPidTagUdtPaths(tagProvider,asDataset=False):
paths = [
"_types_/PidbotTypes/Pid/Studio5000/PIDE",
"_types_/PidbotTypes/Pid/Studio5000/P_PIDE",
"_types_/PidbotTypes/Pid/Studio5000/PID"
]
fullPaths = ["["+tagProvider+"]"+path for path in paths]
if asDataset:
return data_mapping.listToColumnDs(fullPaths)
return fullPaths
```
#### File: members/member_pids/code.py
```python
logger = system.util.getLogger(__name__)
def getMembershipDict(pidTagPaths,pidGroupPaths):
pidTagDict = getPidTagDict(pidGroupPaths)
membershipDict = {}
for pidTagPath in pidTagPaths:
membershipDict[pidTagPath]=[]
for pidGroupPath,groupMembers in pidTagDict.items():
if pidTagPath in groupMembers:
membershipDict[pidTagPath].append(pidGroupPath)
return membershipDict
def getPidTagDict(pidGroupPaths):
if isinstance(pidGroupPaths,str):
pidGroupPaths = [pidGroupPaths]
if pidGroupPaths:
pidTagListPaths = [pidGroupPath + "/pidTagList" for pidGroupPath in pidGroupPaths]
logger.debug("reading pidTagLists...\n" + str(pidTagListPaths))
pidTagListQvs = system.tag.readBlocking(pidTagListPaths)
pidTagLists = [qv.value for qv in pidTagListQvs]
pidTagDict = {}
for pidGroupPath,pidTagList in zip(pidGroupPaths,pidTagLists):
pidTagDict[pidGroupPath]=pidTagList
return pidTagDict
return {}
def updatePidTagLists(pidTagDict):
if logger.isDebugEnabled():
import pprint
logger.debug(
"updating pid group tag lists...\n"+
pprint.pformat(pidTagDict)
)
pidTagListPaths = []
pidTagLists = []
for pidGroupPath,pidTagList in pidTagDict.items():
pidTagListPaths.append(pidGroupPath+"/pidTagList")
pidTagLists.append(pidTagList)
system.tag.writeBlocking(pidTagListPaths,pidTagLists)
return
### Testing ###
def getPidTagDictTest(pidGroupPaths=["[default]PidbotInst/PidGroups/PidGroup0","[default]PidbotInst/PidGroups/PidGroup1"]):
pidTagDict = getPidTagDict(pidGroupPaths)
for pidGroupPath in pidGroupPaths:
assert pidTagDict[pidGroupPath]==system.tag.read(pidGroupPath+"/pidTagList").value
```
#### File: members/remove_pid/code.py
```python
import pidbotscripts.pb_mgr.pidgroups.members.member_pids as member_pids
import pidbotscripts.utils.list_tools as list_tools
logger = system.util.getLogger(__name__)
def removePidTagsFromGroups(pathsToRemove,pidGroupPaths,removeChildren=False):
pidTagDict = member_pids.getPidTagDict(pidGroupPaths)
for pidGroupPath in pidTagDict.keys():
if removeChildren:
pidTagDict[pidGroupPath] = removeTagsAndChildrenFromList(pathsToRemove, pidTagDict[pidGroupPath])
else:
pidTagDict[pidGroupPath] = list_tools.listExcludingItems(pidTagDict[pidGroupPath],pathsToRemove)
member_pids.updatePidTagLists(pidTagDict)
return
def removeTagsAndChildrenFromList(pathsToRemove,pathList):
filteredList = []
for path in pathList:
remove = False
for pathToRemove in pathsToRemove:
if path.startswith(pathToRemove):
remove = True
break
if not remove:
filteredList.append(path)
return filteredList
```
#### File: pidgroups/pid_drop_listener/code.py
```python
from java.awt.dnd import DropTargetListener, DropTarget
from java.awt.datatransfer import DataFlavor
from com.inductiveautomation.ignition.client.tags.dnd import ListOfNodeBrowseInfo
import pidbotscripts.utils.component_tools as component_tools
import pidbotscripts.pb_mgr.pidgroups.members.add_pid as add_pid
def addPidDropListener(pidDroppableComponent):
# Check if component has required custom properties to add the PidDropListener
requiredProps = ['pidGroupPath','dropBeforeTagPath','dragEntered']
propsNotFound = component_tools.componentMissingProps(pidDroppableComponent,requiredProps)
if propsNotFound:
errorMessage = "Component %s does not have custom properties required to use addPidDropListener(component): %s" % (pidDroppableComponent.getName(),propsNotFound)
system.gui.errorBox(errorMessage)
return
pidDropListener = PidDropListener(pidDroppableComponent)
DropTarget(pidDroppableComponent, pidDropListener)
class PidDropListener(DropTargetListener):
def __init__(self, pidDroppableComponent):
DropTargetListener.__init__(self)
self.pidDroppableComponent = pidDroppableComponent
def drop(self, e):
self.pidDroppableComponent.dragEntered=False
newTagPaths = getTagPathListFromDropEvent(e)
pidGroupPath = self.pidDroppableComponent.pidGroupPath
dropBeforeTagPath = self.pidDroppableComponent.dropBeforeTagPath
add_pid.addTagsToGroup(newTagPaths,pidGroupPath,dropBeforeTagPath)
def dragOver(self,e):
return
def dragEnter(self,e):
self.pidDroppableComponent.dragEntered=True
def dragExit(self,e):
self.pidDroppableComponent.dragEntered=False
def getTagPathListFromDropEvent(e):
if e.isDataFlavorSupported(ListOfNodeBrowseInfo.FLAVOR):
transferData = e.getTransferable().getTransferData(ListOfNodeBrowseInfo.FLAVOR)
tagPathList = [str(tag.getFullPath()) for tag in transferData]
elif e.isDataFlavorSupported(DataFlavor.stringFlavor):
transferData = e.getTransferable().getTransferData(DataFlavor.stringFlavor)
tagPathList = [str(transferData)]
else:
raise Exception("Drop data types: %s not supported" % e.getCurrentDataFlavorsAsList())
if len(tagPathList)>20:
if not system.gui.confirm("Add %s tags to Tag Group?" % len(transferData)):
return []
return tagPathList
```
#### File: pidtags/pidbot_instance_generator/code.py
```python
import pidbotscripts.opc.udt_instance_generator as udt_instance_generator
import pidbotscripts.opc.opc_type_recognizer as opc_type_recognizer
import pidbotscripts.utils.list_tools as list_tools
import pidbotscripts.utils.tag_tools as tag_tools
class PidbotInstanceGenerator(udt_instance_generator.AbstractUdtInstanceGenerator):
def __init__(self,instanceRootFolderPath,opcRecognizedUdtPaths):
self.instanceRootFolderPath = instanceRootFolderPath
opcTypeRecognizer = opc_type_recognizer.OpcMultiTypeRecognizer(opcRecognizedUdtPaths)
udt_instance_generator.AbstractUdtInstanceGenerator.__init__(self,opcTypeRecognizer)
def getInstanceName(self,udtPath,opcNodeId,opcServerName):
return opcNodeId.split(']')[1].replace(".","_")
def getInstanceParams(self,udtPath,opcNodeId,opcServerName):
params = {}
params['opcPrefix'] = opcNodeId.split('[')[0]
params['plcName'] = opcNodeId.split('[')[1].split(']')[0]
params['plcTag'] = opcNodeId.split(']')[1]
params['opcServName'] = opcServerName
return params
def getInstanceFolderPath(self,udtPath,opcNodeId,opcServerName):
params = self.getInstanceParams(udtPath,opcNodeId,opcServerName)
return self.instanceRootFolderPath + "/" + params['opcServName'] + "/" + params['plcName']
def onInstanceCreated(self,folderPath,tagConfig):
loopNamePath = folderPath + "/" + tagConfig['name'] + "/loopName"
system.tag.write(loopNamePath, tagConfig['name'])
def handleUnrecognizedOpcBrowseNodes(self,unrecognizedOpcBrowseNodes):
if unrecognizedOpcBrowseNodes:
errorMessage = self.getUnrecognizedOpcBrowseNodesMessage(self,unrecognizedOpcBrowseNodes)
system.gui.messageBox(errorMessage, "Error recognizing some requested tags.")
# Testing
def createInstanceTest(plcTag = "LC1", udtPath = "[default]_types_/PidbotTypes/Pid/Studio5000/P_PIDE",instanceRootFolderPath="[default]PidbotInst/PidTags"):
opcPrefix = "ns=1;s="
plcName = "plc1"
opcNodeId = opcPrefix+"["+plcName+"]"+plcTag
opcServerName = "Ignition OPC UA Server"
pidInstanceGenerator = PidbotInstanceGenerator(instanceRootFolderPath,[udtPath])
tagName = pidInstanceGenerator.getInstanceName(udtPath,opcNodeId,opcServerName)
instancePath = pidInstanceGenerator.getInstanceFolderPath(udtPath,opcNodeId,opcServerName)+"/"+tagName
if system.tag.exists(instancePath):
raise Exception("createInstanceTest not valid, tag already exists")
pidInstanceGenerator.createInstance(udtPath,opcNodeId,opcServerName)
newTagConfig = system.tag.getConfiguration(instancePath)[0]
assert str(newTagConfig['tagType']) == 'UdtInstance'
assert str(newTagConfig['typeId']) == tag_tools.shortenUdtPath(udtPath)
params = newTagConfig['parameters']
# .value needed for 8.0.16, not 8.0.12
assert params['opcPrefix'].value==opcPrefix
assert params['opcServName'].value==opcServerName
assert params['plcTag'].value==plcTag
assert params['plcName'].value==plcName
loopName = system.tag.read(instancePath+"/loopName").value
print loopName
assert loopName == plcTag
def getInstanceParamsTest(plcTag = "LC1", opcPrefix = "ns=1;s=", plcName = "plc1", opcServerName = "Ignition OPC UA Server"):
opcNodeId = opcPrefix+"["+plcName+"]"+plcTag
udtPath = "[default]_types_/PidbotTypes/Pid/Studio5000/P_PIDE"
instanceRootFolderPath="some/path/for/test"
pidInstanceGenerator = PidbotInstanceGenerator(instanceRootFolderPath,[udtPath])
params = pidInstanceGenerator.getInstanceParams("",opcNodeId,opcServerName)
assert params['opcPrefix'] == opcPrefix
assert params['plcName'] == plcName
assert params['plcTag'] == plcTag
```
#### File: tagnode/node_checker/code.py
```python
import pidbotscripts.utils.list_tools as list_tools
class AbstractListRequiredChecker():
def __init__(self,failKey,requiredList,listDescription):
self.requiredList = requiredList
self.listDescription = listDescription
self.failKey = failKey
def check(self,tagConfigNode):
return not self.missingItemsFrom(tagConfigNode)
def getFailureMessage(self,tagConfigNode):
missingItems = self.missingItemsFrom(tagConfigNode)
if missingItems:
path = str(tagConfigNode.tagConfig['path'])
header = "Tag is not valid:\n%s\nMissing %s:" % (path,self.listDescription)
return list_tools.createListMessage(missingItems,header)
return ""
def missingItemsFrom(self,tagConfigNode):
actualList = self.itemsFoundForNode(tagConfigNode)
return list_tools.itemsNotInList(self.requiredList,actualList)
def itemsFoundForNode(self,tagConfigNode):
raise NotImplementedError
class ChildrenRequiredChecker(AbstractListRequiredChecker):
def __init__(self,requiredChildNames,failKey="missingChildren"):
AbstractListRequiredChecker.__init__(self,failKey,requiredChildNames,"required child tags")
def itemsFoundForNode(self,tagConfigNode):
return [childNode.getName() for childNode in tagConfigNode.childNodes]
class ParamsRequiredChecker(AbstractListRequiredChecker):
def __init__(self,requiredParamNames,failKey="missingParams"):
AbstractListRequiredChecker.__init__(self,failKey,requiredParamNames,"required parameters")
def itemsFoundForNode(self,tagConfigNode):
return tagConfigNode.tagConfig.get('parameters',{}).keys()
class UdtMemberExcluder():
def __init__(self,failKey="udtMember"):
self.failKey = failKey
def check(self,tagConfigNode):
for ancestorNode in tagConfigNode.getAncestorNodes():
if str(ancestorNode.tagConfig.get('tagType',"")) in ["UdtInstance","UdtType"]:
return False
return True
def getFailureMessage(self,tagConfigNode):
if not self.check(tagConfigNode):
path = str(tagConfigNode.tagConfig['path'])
return "Tag is not valid:\n%s\nCannot be a member of another tag." % path
return ""
class DirectoryChecker():
def __init__(self,allowedDirectoryPaths,failKey="directoryNotAllowed"):
self.allowedDirectoryPaths = allowedDirectoryPaths
self.failKey = failKey
def check(self,tagConfigNode):
for allowedDirectoryPath in self.allowedDirectoryPaths:
if tagConfigNode.isDescendentOf(allowedDirectoryPath):
return True
return False
def getFailureMessage(self,tagConfigNode):
if not self.check(tagConfigNode):
path = str(tagConfigNode.tagConfig['path'])
header = "Tag is not valid:\n%s\nMust be located in directory:%s\n" % (path)
return list_tools.createListMessage(self.allowedDirectoryPaths,header)
return ""
class TagTypeChecker():
def __init__(self,allowedTypes,failKey="typeNotAllowed"):
self.allowedTypes = allowedTypes
self.failKey = failKey
def check(self,tagConfigNode):
tagType = str(tagConfigNode.tagConfig['tagType'])
return tagType in self.allowedTypes
def getFailureMessage(self,tagConfigNode):
if not self.check(tagConfigNode):
path = str(tagConfigNode.tagConfig['path'])
header = "Tag is not valid:\n%s\nType must be:" % path
return list_tools.createListMessage(self.allowedTypes,header)
return ""
class NodeExistsChecker():
def __init__(self,failKey="doesNotExist"):
self.failKey = failKey
def check(self,tagConfigNode):
return tagConfigNode.exists
def getFailureMessage(self,tagConfigNode):
if not self.check(tagConfigNode):
path = str(tagConfigNode.tagConfig['path'])
return "Tag not found or does not exist:\n%s" % path
return ""
```
#### File: tagnode/tag_config_node/code.py
```python
from com.inductiveautomation.ignition.common.tags.model import TagPath
from com.inductiveautomation.ignition.common.tags.paths.parser import TagPathParser
import pidbotscripts.utils.tag_tools as tag_tools
class TagConfigNode:
def __init__(self,tagConfig,parentNode=None,exists=True):
self.tagConfig = tagConfig
self.parentNode = parentNode
self.exists = exists
self.childNodes = self.createChildNodes()
def createChildNodes(self):
childNodes = []
for tagConfig in self.tagConfig.get('tags',[]):
childNodes.append(TagConfigNode(tagConfig,self))
return childNodes
def getAncestorNodes(self,nodes=[]):
if self.parentNode:
nodes.append(self.parentNode)
self.parentNode.getAncestorNodes(nodes)
return nodes
def getRootNode(self):
ancestorNodes = self.getAncestorNodes()
if ancestorNodes:
return ancestorNodes.pop()
def getChildNode(self,childName):
for childNode in self.childNodes:
if childNode.getName()==childName:
return childNode
return createImaginaryNode(childName,parentNode=self)
def getName(self):
return str(self.tagConfig.get('name',""))
def getFullPath(self):
tagPath = self.tagConfig['path']
if self.parentNode:
return self.parentNode.getFullPath().getChildPath(tagPath.toStringFull())
return tagPath
def getNodeForPath(self,path):
tagPath = tag_tools.coerceToTagPathObject(path)
if self.pathEquals(tagPath):
return self
if self.isAncestorOf(tagPath):
childName = tagPath.getPathComponent(self.getFullPath().getPathLength())
childNode = self.getChildNode(childName)
return childNode.getNodeForPath(tagPath)
if self.parentNode:
return self.parentNode.getNodeForPath(tagPath)
return createImaginaryNode(path)
def pathEquals(self,path):
tagPath = tag_tools.coerceToTagPathObject(path)
return self.getFullPath().toStringFull().lower() == tagPath.toStringFull().lower()
# Check if a path would be a descendent of this node. A node for this path does not need to exist.
def isAncestorOf(self,path):
tagPath = tag_tools.coerceToTagPathObject(path)
return self.getFullPath().isAncestorOf(tagPath)
# Check if a path would be an ancestor of this node. A node for this path does not need to exist.
def isDescendentOf(self,path):
tagPath = tag_tools.coerceToTagPathObject(path)
return tagPath.isAncestorOf(self.getFullPath())
def createImaginaryNode(path,parentNode=None):
tagPath = tag_tools.coerceToTagPathObject(path)
tagConfig = {
'name':tagPath.getItemName(),
'path':tagPath
}
return TagConfigNode(tagConfig,parentNode,exists=False)
def createRootNodeForPath(path):
rootConfig = system.tag.getConfiguration(path,True)[0]
return TagConfigNode(rootConfig,None)
```
#### File: utils/list_tools/code.py
```python
logger = system.util.getLogger('pidbotscripts_util')
# Return a list of required items not found in another list
def itemsNotInList(requiredItems,items):
itemsNotFound = []
for requiredItem in requiredItems:
if not requiredItem in items:
itemsNotFound.append(requiredItem)
return itemsNotFound
def listExcludingItems(fullList,itemsToExclude):
return [item for item in fullList if item not in itemsToExclude]
# Generate a titled message (i.e. for use in message box) for a list of objects. Returns "" if list is empty.
def createListMessage(items,title):
if items:
message = str(title) + "\n"
for item in items:
message += "- " + str(item) + "\n"
return message
return ""
```
#### File: utils/tag_tools/code.py
```python
from com.inductiveautomation.ignition.common.tags.model import TagPath
from com.inductiveautomation.ignition.common.tags.paths.parser import TagPathParser
import pidbotscripts.utils.list_tools as list_tools
logger = system.util.getLogger(__name__)
def coerceToTagPathObject(path):
if isinstance(path,TagPath):
return path
return TagPathParser.parseSafe(path)
def commonAncestorOf(paths,tagProvider):
shortestPathLength = sys.maxsize #sys.maxint
shortestPath = None
tagPaths = []
for path in paths:
tagPath = coerceToTagPathObject(path)
tagPaths.append(tagPath)
if tagPath.getPathLength()<shortestPathLength:
shortestPathLength=tagPath.getPathLength()
shortestPath = tagPath
ancestorPath = "["+tagProvider+"]"
for i in range(0,shortestPathLength):
pathSegment = shortestPath.getPathComponent(i).lower()
for tagPath in tagPaths:
if tagPath.getPathComponent(i).lower() != pathSegment:
return ancestorPath
ancestorPath += pathSegment + '/'
return ancestorPath
def sortTagsByProvider(paths):
tagPaths = [coerceToTagPathObject(path) for path in paths]
tagDict = {}
for tagPath in tagPaths:
provider = tagPath.getSource()
if provider not in tagDict.keys():
tagDict[provider]=[tagPath.toStringFull()]
else:
tagDict[provider].append(tagPath.toStringFull())
return tagDict
def shortenUdtPath(udtPath):
splitPath = udtPath.split("_types_/")
if len(splitPath)==2:
return splitPath[1]
return udtPath
def getUdtInstancePathsInFolder(udtPath,folderPath):
children = system.tag.browse(folderPath).getResults()
pathList = []
if children!=None:
shortUdtPath = shortenUdtPath(udtPath)
for child in children:
if str(child['tagType']) == "UdtInstance" and str(child['typeId']).endswith(shortUdtPath):
childPath = str(child['fullPath'])
pathList.append(childPath)
return pathList
def createFolderAndParentFolders(folderPath):
if system.tag.exists(folderPath):
return
pathSegments = folderPath.split("/")
if pathSegments[0].startswith("["):
parentPath = pathSegments.pop(0)
else:
parentPath = ""
collisionPolicy = "a"
for pathSegment in pathSegments:
folderConfig = {
"name": pathSegment,
"tagType" : "Folder",
}
qc = system.tag.configure(parentPath, [folderConfig], collisionPolicy)
if logger.isDebugEnabled():
logger.debug(str(qc[0]))
parentPath+= "/" + pathSegment
if not system.tag.exists(folderPath):
raise Exception("Folder '%s' could not be created." % folderPath)
class TagQualityException(Exception):
def __init__(self, tagPaths, qualitycodes, baseMessage="Error updating tags."):
self.qualDict = dict(zip(tagPaths,qualitycodes))
self.baseMessage = baseMessage
Exception.__init__(self,self.getTagQualMessage(includeGood=False))
def getTagsByQuality(self):
tagsByQual = {
"good":[],
"notGood":{}
}
for tagPath,qc in self.qualDict.items():
if qc.good:
tagsByQual["good"].append(tagPath)
else:
tagsByQual["notGood"].setdefault(qc, []).append(tagPath)
return tagsByQual
def getTagQualMessage(self,includeGood=True):
tagsByQual = self.getTagsByQuality()
message = self.baseMessage + "\n"
for qc,tagPaths in tagsByQual["notGood"].items():
message += list_tools.createListMessage(tagPaths,"Quality Code=%s" % str(qc)) + "\n"
if includeGood:
message += list_tools.createListMessage(tagsByQual["good"],"The following tags were updated successfully:") + "\n"
return message
class TagConfigQualityException(TagQualityException):
def __init__(self, tagPaths, qualitycodes, baseMessage="Error configuring tags."):
TagQualityException.__init__(self, tagPaths, qualitycodes, baseMessage)
class TagWriteQualityException(TagQualityException):
def __init__(self, tagPaths, qualitycodes, baseMessage="Error writing to tags."):
TagQualityException.__init__(self, tagPaths, qualitycodes, baseMessage)
def writeBlockingRaiseException(tagPaths,values,timeout=45000):
qualitycodes = system.tag.writeBlocking(tagPaths,values,timeout)
for qc in qualitycodes:
if not qc.good:
raise TagWriteQualityException(tagPaths,qualitycodes)
def configureRaiseException(basePath, tags, collisionPolicy="a"):
qualitycodes = system.tag.configure(basePath, tags, collisionPolicy)
for qc in qualitycodes:
if not qc.good:
tagPaths = [basePath+"/"+tag['name'] for tag in tags]
raise TagConfigQualityException(tagPaths,qualitycodes)
### Testing ####
def createFolderAndParentFoldersTest():
folderPath = "[default]test22341/test352341/test41/test5253"
createFolderAndParentFolders(folderPath)
assert system.tag.exists(folderPath)
folderPath = "test2478/test33568/test43568/test53568"
createFolderAndParentFolders(folderPath)
assert system.tag.exists(folderPath)
``` |
{
"source": "JLBicho/rssi_module",
"score": 3
} |
#### File: lib.linux-x86_64-2.7/rssi/__init__.py
```python
name = "rssi"
from subprocess import Popen, PIPE # Used to run native OS commads in python wrapped subproccess
import numpy # Used for matrix operations in localization algorithm
# RSSI_Scan
# Use:
# from rssi import RSSI_Scan
# rssi_scan_instance = RSSI_Scan('network_interface_name)
# -------------------------------------------------------
# Description:
# Allows a user to query all available accesspoints available.
# User has the option of define a specific set of access
# points to query.
# -------------------------------------------------------
# Input: interface name
# [ie. network interface names: wlp1s0m, docker0, wlan0]
class RSSI_Scan(object):
# Allows us to declare a network interface externally.
def __init__(self, interface):
self.interface = interface
# getRawNetworkScan
# Description:
# Runs the Ubuntu command 'iwlist' to scan for available networks.
# Returns the raw console window output (unparsed).
# ----------------------------------------------------------------
# Input: (optional)
# sudo: bool; defaults to false. False will not refresh the
# network interface upon query. Sudo=true will require
# the user will need to enter a sudo password at runtime.
# ----------------------------------------------------------------
# Returns: Raw terminal output
# {
# 'output':'''wlp1s0 Scan completed :
# Cell 01 - Address: A0:3D:6F:26:77:8E
# Channel:144
# Frequency:5.72 GHz
# Quality=43/70 Signal level=-67 dBm
# Encryption key:on
# ESSID:"ucrwpa"
# Bit Rates:24 Mb/s; 36 Mb/s; 48 Mb/s; 54 Mb/s
# Mode:Master
# Cell 02 - Address: A0:3D:6F:26:77:82
# Channel:1
# Frequency:2.412 GHz (Channel 1)
# Quality=43/70 Signal level=-67 dBm
# Encryption key:on
# ESSID:"eduroam"
# Bit Rates:18 Mb/s; 24 Mb/s; 36 Mb/s; 48 Mb/s; 54 Mb/s
# Mode:Master''',
# 'error':''
# }
def getRawNetworkScan(self, sudo=False):
# Scan command 'iwlist interface scan' needs to be fed as an array.
if sudo:
scan_command = ['sudo','iwlist',self.interface,'scan']
else:
scan_command = ['iwlist',self.interface,'scan']
# Open a subprocess running the scan command.
scan_process = Popen(scan_command, stdout=PIPE, stderr=PIPE)
# Returns the 'success' and 'error' output.
(raw_output, raw_error) = scan_process.communicate()
# Block all execution, until the scanning completes.
scan_process.wait()
# Returns all output in a dictionary for easy retrieval.
return {'output':raw_output,'error':raw_error}
# getSSID
# Description:
# Parses the 'SSID' for a given cell.
# -----------------------------------------------
# Input: (Raw string)
# 01 - Address: A0:3D:6F:26:77:8E
# Channel:144
# Frequency:5.72 GHz
# Quality=43/70 Signal level=-67 dBm
# Encryption key:on
# ESSID:"ucrwpa"
# Bit Rates:24 Mb/s; 36 Mb/s; 48 Mb/s; 54 Mb/s
# Mode:Master
# -----------------------------------------------
# Returns:
# 'ucrwpa'
@staticmethod
def getSSID(raw_cell):
ssid = raw_cell.split('ESSID:"')[1]
ssid = ssid.split('"')[0]
return ssid
# getQuality
# Description:
# Parses 'Quality level' for a given cell.
# -----------------------------------------------
# Input: (Raw string)
# 01 - Address: A0:3D:6F:26:77:8E
# Channel:144
# Frequency:5.72 GHz
# Quality=43/70 Signal level=-67 dBm
# Encryption key:on
# ESSID:"ucrwpa"
# Bit Rates:24 Mb/s; 36 Mb/s; 48 Mb/s; 54 Mb/s
# Mode:Master
# -----------------------------------------------
# Returns:
# '43/70'
@staticmethod
def getQuality(raw_cell):
quality = raw_cell.split('Quality=')[1]
quality = quality.split(' ')[0]
return quality
# getSignalLevel
# Description:
# Parses 'Signal level' for a given cell.
# Measurement is in 'dBm'.
# -----------------------------------------------
# Input: (Raw string)
# 01 - Address: A0:3D:6F:26:77:8E
# Channel:144
# Frequency:5.72 GHz
# Quality=43/70 Signal level=-67 dBm
# Encryption key:on
# ESSID:"ucrwpa"
# Bit Rates:24 Mb/s; 36 Mb/s; 48 Mb/s; 54 Mb/s
# Mode:Master
# -----------------------------------------------
# Returns: (string)
# '-67'
@staticmethod
def getSignalLevel(raw_cell):
signal = raw_cell.split('Signal level=')[1]
signal = int(signal.split(' ')[0])
return signal
# parseCell
# Description:
# Takes a raw cell string and parses it into a dictionary.
# -----------------------------------------------
# Input: (Raw string)
# '''01 - Address: A0:3D:6F:26:77:8E
# Channel:144
# Frequency:5.72 GHz
# Quality=43/70 Signal level=-67 dBm
# Encryption key:on
# ESSID:"ucrwpa"
# Bit Rates:24 Mb/s; 36 Mb/s; 48 Mb/s; 54 Mb/s
# Mode:Master'''
# -----------------------------------------------
# Returns:
# {
# 'ssid':'ucrwpa',
# 'quality':'43/70',
# 'signal':'-67'
# }
def parseCell(self, raw_cell):
cell = {
'ssid': self.getSSID(raw_cell),
'quality': self.getQuality(raw_cell),
'signal': self.getSignalLevel(raw_cell)
}
return cell
# formatCells
# Description:
# Every network listed is considered a 'cell.
# This function parses each cell into a dictionary.
# Returns list of dictionaries. Makes use of 'parseCell'.
# If not networks were detected, returns False.
# -----------------------------------------------
# Input: (Raw terminal string)
# '''01 - Address: A0:3D:6F:26:77:8E
# Channel:144
# Frequency:5.72 GHz
# Quality=43/70 Signal level=-67 dBm
# Encryption key:on
# ESSID:"ucrwpa"
# Bit Rates:24 Mb/s; 36 Mb/s; 48 Mb/s; 54 Mb/s
# Mode:Master
# 02 - Address: A0:3D:6F:26:77:8E
# Channel:144
# Frequency:5.72 GHz
# Quality=30/70 Signal level=-42 dBm
# Encryption key:on
# ESSID:"dd-wrt"
# Bit Rates:24 Mb/s; 36 Mb/s; 48 Mb/s; 54 Mb/s
# Mode:Master'''
# -----------------------------------------------
# Returns: (Array of dictionaries)
# [
# {
# 'ssid':'ucrwpa',
# 'quality':'43/70',
# 'signal':'-67'
# },
# {
# 'ssid':'dd-wrt',
# 'quality':'30/70',
# 'signal':'-42'
# }
# ]
def formatCells(self, raw_cell_string):
raw_cells = raw_cell_string.split('Cell') # Divide raw string into raw cells.
raw_cells.pop(0) # Remove unneccesary "Scan Completed" message.
if(len(raw_cells) > 0): # Continue execution, if atleast one network is detected.
# Iterate through raw cells for parsing.
# Array will hold all parsed cells as dictionaries.
formatted_cells = [self.parseCell(cell) for cell in raw_cells]
# Return array of dictionaries, containing cells.
return formatted_cells
else:
print("Networks not detected.")
return False
# TODO implement function in ndoe to process this boolean (False)
# filterAccessPoints
# Description:
# If the 'networks' parameter is passed to the 'getAPinfo'
# function, then this method will filter out all irrelevant
# access-points. Access points specified in 'networks' array
# will be returned (if available).
# -----------------------------------------------
# Input: (Parsed array of cell dictionaries)
# all_access_points =
# [
# {
# 'ssid':'ucrwpa',
# 'quality':'43/70',
# 'signal':'-67'
# },
# {
# 'ssid':'dd-wrt',
# 'quality':'30/70',
# 'signal':'-42'
# },
# {
# 'ssid':'linksys',
# 'quality':'58/70',
# 'signal':'-24'
# }
# ]
# network_names = (array of network names)
# ['ucrwpa','dd-wrt']
# -----------------------------------------------
# Returns: (Array of dictionaries)
# [
# {
# 'ssid':'ucrwpa',
# 'quality':'43/70',
# 'signal':'-67'
# },
# {
# 'ssid':'dd-wrt',
# 'quality':'30/70',
# 'signal':'-42'
# }
# ]
@staticmethod
def filterAccessPoints(all_access_points, network_names):
focus_points = [] # Array holding the access-points of concern.
# Iterate throguh all access-points found.
for point in all_access_points:
# Check if current AP is in our desired list.
if point['ssid'] in network_names:
focus_points.append(point)
return focus_points
# TODO implement something incase our desired ones were not found
# getAPinfo
# Description:
# Method returns all (or chosen) available access points (in range).
# Takes 2 optional parameters:
# 'networks' (array):
# Lists all ssid's of concern. Will return only the available access
# points listed here. If not provided, will return ALL access-points in range.
# 'sudo' (bool):
# Whether of not method should use sudo privileges. If user uses sudo
# privileges, the network manager will be refreshed and will return
# a fresh list of access-points available. If sudo is not provided,
# a cached list will be returned. Cached list gets updated periodically.
# -----------------------------------------------
# Input: (Parsed array of cell dictionaries)
# networks = (array of network names)
# ['ucrwpa','dd-wrt']
# sudo = True || False
# -----------------------------------------------
# Returns: (Array of dictionaries)
# [
# {
# 'ssid':'ucrwpa',
# 'quality':'43/70',
# 'signal':'-67'
# },
# {
# 'ssid':'dd-wrt',
# 'quality':'30/70',
# 'signal':'-42'
# }
# ]
def getAPinfo(self, networks=False, sudo=False):
# TODO implement error callback if error is raise in subprocess
# Unparsed access-point listing. AccessPoints are strings.
raw_scan_output = self.getRawNetworkScan(sudo)['output']
# Parsed access-point listing. Access-points are dictionaries.
all_access_points = self.formatCells(raw_scan_output)
# Checks if access-points were found.
if all_access_points:
# Checks if specific networks were declared.
if networks:
# Return specific access-points found.
return self.filterAccessPoints(all_access_points, networks)
else:
# Return ALL access-points found.
return all_access_points
else:
# No access-points were found.
return False
# RSSI_Localizer
# Use:
# from rssi import RSSI_Localizer
# rssi_localizer_instance = RSSI_Localizer()
# -------------------------------------------------------
# Description:
# This class helps a user implement rssi-based localization.
# The algorithm assumes the logarithmic distance-path-loss model
# And assumes a minimum of 3 (or more) access points.
# -------------------------------------------------------
# Input:
# accessPoints: Array holding accessPoint dictionaries.
# The order of the arrays supplied will retain
# its order, throughout the entire execution.
# [{
# 'signalAttenuation': 3,
# 'location': {
# 'y': 1,
# 'x': 1
# },
# 'reference': {
# 'distance': 4,
# 'signal': -50
# },
# 'name': 'dd-wrt'
# },
# {
# 'signalAttenuation': 4,
# 'location': {
# 'y': 1,
# 'x': 7
# },
# 'reference': {
# 'distance': 3,
# 'signal': -41
# },
# 'name': 'ucrwpa'
# }]
class RSSI_Localizer(object):
# Allows us to fetch for networks/accessPoints externally.
# Array of access points must be formatted.
# 'self.count' parameter is computed internally to aid in
# scaling of the algorithm.
def __init__(self,accessPoints):
self.accessPoints = accessPoints
self.count = len(accessPoints)
# getDistanceFromAP
# Description:
# Uses the log model to compute an estimated dstance(di) from node(i)
# -------------------------------------------------------
# Input:
# accessPoint: dicitonary holding accesspoint info.
# {
# 'signalAttenuation': 3,
# 'location': {
# 'y': 1,
# 'x': 1
# },
# 'reference': {
# 'distance': 4,
# 'signal': -50
# },
# 'name': 'dd-wrt'
# }
# signalStrength: -69
# -------------------------------------------------------
# output:
# accessPoint: dicitonary holding accesspoint info.
# {
# 'signalAttenuation': 3,
# 'location': {
# 'y': 1,
# 'x': 1
# },
# 'reference': {
# 'distance': 4,
# 'signal': -50
# },
# 'name': 'dd-wrt',
# 'distance': 2
# }
@staticmethod
def getDistanceFromAP(accessPoint, signalStrength):
beta_numerator = float(accessPoint['reference']['signal']-signalStrength)
beta_denominator = float(10*accessPoint['signalAttenuation'])
beta = beta_numerator/beta_denominator
distanceFromAP = round(((10**beta)*accessPoint['reference']['distance']),4)
accessPoint.update({'distance':distanceFromAP})
return accessPoint
# TODO fix this because theres two consecutive for loops.
# One that runs to fefd signal strengths to this function,
# a second consecutive loop inside the function.
# getDistancesForAllAPs
# Description:
# Makes use of 'getDistanceFromAP' to iterate through all
# accesspoints being used in localization and obtains the
# distance from each one of them.
# ------------------------------------------------
# Input:
# signalStrengths:
# [siganl1, siganl2, siganl3]
# [-42, -53, -77]
# ------------------------------------------------
# Output:
# [
# {
# 'distance': 4,
# 'x': 2,
# 'y': 3
# },
# {
# 'distance': 7,
# 'x': 2,
# 'y': 5
# },
# {
# 'distance': 9,
# 'x': 7,
# 'y': 3
# }
# ]
def getDistancesForAllAPs(self, signalStrengths):
apNodes = []
for i in range(len(self.accessPoints)):
ap = self.accessPoints[i]
distanceFromAP = self.getDistanceFromAP(
ap,
signalStrengths[i]
)
apNodes.append({
'distance': distanceFromAP['distance'],
'x': ap['location']['x'],
'y': ap['location']['y']
})
return apNodes
# createMatrices
# Description:
# Creates tehmatrices neccesary to use the least squares method
# in order to mnimize the error (error=|realDistance-estimatedDistance|).
# Assuming 'n' number of nodes and d(m) is the distance(d) from node (m).
# AX = B, where X is our estimated location.
# A = [
# 2(x(i)-xn) 2(y(i)-yn)
# 2(x(i+1)-xn) 2(y(i+1)-yn)
# ... ...
# 2(x(n-1)-xn) 2(y(n-1)-yn)
# ]
# B = [
# x(i)^2 + y(i)^2 - x(n)^2 + y(n)^2 - d(i)^2 + d(n)^2
# x(i+1)^2 + y(i+1)^2 - x(n)^2 + y(n)^2 - d(i+1)^2 + d(n)^2
# ...
# x(n-1)^2 + y(n-1)^2 - x(n)^2 + y(n)^2 - d(n-1)^2 + d(n)^2
# ]
# ----------------------------------------
# Input:
# accessPoints
# [
# {
# 'distance': 4,
# 'x': 2,
# 'y': 3
# },
# {
# 'distance': 7,
# 'x': 2,
# 'y': 5
# },
# {
# 'distance': 9,
# 'x': 7,
# 'y': 3
# }
# ]
# ----------------------------------------
# Output:
# A = [
# 2(2-7) 2(3-3)
# 2(2-7) 2(5-3)
# ]
# B = [
# 2^2 + 3^2 - 7^2 + 3^2 - 4^2 + 9^2
# 2^2 + 5^2 - 7^2 + 3^2 - 7^2 + 9^2
# ]
def createMatrices(self, accessPoints):
# Sets up that te matrics only go as far as 'n-1' rows,
# with 'n being the # of access points being used.
n_count = self.count-1
# initialize 'A' matrix with 'n-1' ranodm rows.
a = numpy.empty((n_count,2))
# initialize 'B' matrix with 'n-1' ranodm rows.
b = numpy.empty((n_count,1))
# Define 'x(n)' (x of last accesspoint)
x_n = accessPoints[n_count]['x']
# Define 'y(n)' (y of last accesspoint)
y_n = accessPoints[n_count]['y']
# Define 'd(n)' (distance from of last accesspoint)
d_n = accessPoints[n_count]['distance']
# Iteration through accesspoints is done upto 'n-1' only
for i in range(n_count):
ap = accessPoints[i]
x, y, d = ap['x'], ap['y'], ap['distance']
a[i] = [2*(x-x_n), 2*(y-y_n)]
b[i] = [(x**2)+(y**2)-(x_n**2)-(y_n**2)-(d**2)+(d_n**2)]
return a, b
# computePosition
# Description:
# Performs the 'least squares method' matrix operations
# neccessary to get the 'x' and 'y' of the unknown
# beacon's position.
# X = [(A_transposed*A)^-1]*[A_transposed*B]
# ----------------------------------------
# Input:
# A = [
# 0 0
# 0 -4
# ]
# B = [
# 4 + 9 - 49 + 9 - 16 + 81 => 38
# 4 + 25 - 49 + 9 - 49 + 81 => 21
# ]
# ----------------------------------------
# Output:
# x
# [
# 2,
# 3
# ]
@staticmethod
def computePosition(a, b):
# Get 'A_transposed' matrix
at = numpy.transpose(a)
# Get 'A_transposed*A' matrix
at_a = numpy.matmul(at,a)
# Get '[(A_transposed*A)^-1]' matrix
inv_at_a = numpy.linalg.inv(at_a)
# Get '[A_transposed*B]'
at_b = numpy.matmul(at,b)
# Get '[(A_transposed*A)^-1]*[A_transposed*B]'
# This holds our position (xn,yn)
x = numpy.matmul(inv_at_a,at_b)
return x
# getNodePosition
# Description:
# Combines 'getDistancesForAllAPs', 'createMatrics',
# and 'computerPosition' to get the 'X' vector that
# contains our unkown (x,y) position.
# ----------------------------------------
# Input:
# signalStrengths
# [4, 2 , 3]
# ----------------------------------------
# Output:
# x
# [2, 3]
def getNodePosition(self, signalStrengths):
apNodes = self.getDistancesForAllAPs(signalStrengths)
a, b = self.createMatrices(apNodes)
position = self.computePosition(a, b)
# print(a)
# print(b)
return position
``` |
{
"source": "jlboat/BayesASE",
"score": 3
} |
#### File: src/scripts/gen_headers_after_merge_priors_one_comparate.py
```python
import re
import argparse
import os
import sys
import numpy as np
from functools import reduce
from collections import OrderedDict
import pandas as pd
DEBUG = False
def getOptions():
parser = argparse.ArgumentParser(
description='Create headers for model for a for a single comparate.'
)
parser.add_argument(
"-output",
"--output",
dest="output",
action="store",
required=True,
help="Output directory for complete merged comparate files ready for Bayesian"
)
parser.add_argument(
"-collection_identifiers",
"--collection_identifiers",
dest="collection_identifiers",
action="store",
required=True,
help="ASE count table collection identifiers"
)
parser.add_argument(
"-collection_filenames",
"--collection_filenames",
dest="collection_filenames",
action='store',
required=True,
help="ASE count table collection filenames"
)
parser.add_argument(
"-design",
"--design",
dest="design",
action='store',
required=True,
help="Design file"
)
parser.add_argument(
"--debug",
action="store_true",
default=False,
help="Print debugging output"
)
args = parser.parse_args()
return args
def main():
args = getOptions()
global DEBUG
if args.debug:
DEBUG=True
identifiers = [i.strip() for i in args.collection_identifiers.split(",")]
filenames = [i.strip() for i in args.collection_filenames.split(",")]
input_dict_comp = dict(zip(identifiers, filenames))
if DEBUG:
print(f"DEBUG: input dict:\n{input_dict_comp}")
### Read in design file as dataframe
#Make sure design file is read as a tsv
df_design = pd.read_csv(args.design, sep ='\t')
if DEBUG:
print(f"DEBUG: design:\n{df_design}")
### Subset design file and create comparate specification columns
### Store compID to name output file
c1_list = df_design['Comparate_1'].tolist()
#Changed dict to dict_b since "dict" is name already used above to read collections
sample_dict = {}
#col_list = list(df_design.columns.values)
row_list = []
comparison_list = df_design['compID'].tolist()
del df_design['compID']
### Create dictionaries per design file row to store the row's comparate files
for index, sample in df_design.iterrows():
sample_dict[index] = list(sample)
## Create a dictionary containing the comparisons between each parental genome the comparate
for key in sample_dict:
row_list = sample_dict[key]
file_list = []
comp_dict = {}
comparison = comparison_list[key]
c1 = c1_list[key]
for i, comp in enumerate(row_list):
comp_dict[i + 1] = comp
# Assign filename so that Galaxy can correctly call and recognize the collection
comp_name = 'bayesian_input_' + comp
row_list[i] = input_dict_comp[comp_name]
#Use pd.read_table to read file into dataframe
file = pd.read_table(row_list[i], index_col=None, header=0)
file_list.append(file)
df_merged = reduce(lambda x, y: pd.merge(x, y, on = ['FEATURE_ID']), file_list)
### drop columns you don't want before merge
df_merged = df_merged[df_merged.columns.drop(list(df_merged.filter(regex='comp')))]
df_merged.set_index('FEATURE_ID', inplace=True)
## AMM fixing below line get_values is deprecated
## merged_headers = list(df_merged.columns.get_values())
merged_headers = list(df_merged.columns.to_numpy())
### For stan model, requires headers to have general comparate input names
### This reassigns comparate names to be c1
for x in comp_dict:
for i in range(len(merged_headers)):
if c1 in merged_headers[i]:
merged_headers[i] = merged_headers[i].replace(c1, 'c1')
df_merged.columns=merged_headers
df_filtered = df_merged
#Change the output name from bayesian_input_comp to bayesian_input
outfile = args.output + '/bayesian_input_' + comparison
df_filtered.to_csv(outfile, sep='\t')
if __name__=='__main__':
main()
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.