metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "jmencisom/wikastro",
"score": 3
} |
#### File: wikastro/src/BasicInfo.py
```python
import os
import urllib as url_op
from astropy.coordinates import SkyCoord, get_constellation
from astropy import units as u
from astropy.cosmology import WMAP9 as cosmo
from astropy.coordinates import Distance
class BasicInfo:
def __init__(self, attributes):
"""
Receives the attributes in a string and then assigns these values into
the class variables.
:Param: String that contains basic info separated by '|'
:Return: Nothing.
"""
self.__attributes = attributes
self.__objectType = ""
self.__nombre=""
self.__epoch = "J2000"
self.__rightAscension = ""
self.__declination = ""
self.__helioRadialVelocity = ""
self.__redShift = ""
self.__morphologicalType = ""
self.__apparentMagnitude = ""
self.__otherNames = ""
self.__namesReferences = [[" UGC", " [[Uppsala General Catalogue|UGC]]"],
[" PGC", " [[Principal Galaxies Catalogue|PGC]]"],
[" MCG", " [[Morphological Catalogue of Galaxies|MCG]]"],
[" GC", " [[Catalogue of Nebulae and Clusters of Stars|GC]]"],
[" CGCG", " [[Catalogue of Galaxies and Clusters of Galaxies|CGCG]]"],
[" NGC", " [[New General Catalogue|NGC]]"],
[" ESO", " [[ESO]]"],
[" Tuc", " [[Bayer designation|ξ Tuc]]"],
[" 1RXS", " [[1RXS]]"],
[" 2MASX", " [[2MASX]]"]]
self.__obtainValues()
def __obtainValues(self):
"""
Using the variable 'self.__attributes', a string array is obtained
and then each individual string is assigned to differents
clas variables.
:Param: Nothing.
:Returns: Nothing.
"""
separatedText = self.__attributes.split("|")
self.__nombre = separatedText[0]
self.__objectType = separatedText[3]
self.__rightAscension = separatedText[1]
self.__declination = separatedText[2]
self.__helioRadialVelocity = separatedText[4]
self.__redShift = separatedText[5]
self.__morphologicalType = separatedText[7]
self.__apparentMagnitude = separatedText[6]
self.__otherNames = self.formatOtherNames(separatedText[8])
def setAttributes(self, attributes):
"""
Assign the attributes received in the parameters into the
attributes from the class and then calls the method obtainValues()
in order to get a better organization.
:Param: attributes obtained from the simbad database.
:Returns: nothing.
"""
self.__attributes = attributes
self.__obtainValues()
def getAttributes(self):
"""
Obtain the attributes from the class and returns it.
The variable attributes is the text recieved from the simbad
database.
:Param: nothing.
:Returns: attributes from the class.
"""
return self.__attributes
def getObjectType(self):
"""
If the object type is not empty, returns its containt. Else, it returns
a string that contains just a emptiness.
:Param: nothing.
:Returns: object type.
"""
if(self.testEmptiness(self.__objectType, "Object Type")):
return ""
else:
return self.__objectType
def getEpoch(self):
"""
Returns the epoch of the NGC object, this variable usually contains
J2000 epoch.
:Param: nothing.
:Returns: epoch.
"""
return self.__epoch
def getRightAscension(self):
"""
Returns the right ascension inside the coordinates of the ngc object.
If the variable is empty, then is return an empty string.
:Param: nothing.
:Returns: right ascension.
"""
if(self.testEmptiness(self.__rightAscension, "Right Ascension")):
return ""
else:
return self.__rightAscension
def getDeclination(self):
"""
Returns the declination inside the coordinates of the ngc object.
If the variable is empty, then is return an empty string.
:Param: nothing.
:Returns: declination.
"""
if(self.testEmptiness(self.__declination, "Declination")):
return ""
else:
return self.__declination
def getHelioRadialVelocity(self):
"""
Returns the helio radial velocity also known as hrv of the ngc object.
If the variable is empty, then is return an empty string.
:Param: nothing.
:Returns: helio radial velocity.
"""
if(self.testEmptiness(self.__helioRadialVelocity,
"Helio Radial Velocity")):
return ""
else:
return self.__helioRadialVelocity
def getRedShift(self):
"""
Returns the red shift also known as z of the ngc object.
If the variable is empty, then is return an empty string.
:Param: nothing.
:Returns: red shift.
"""
if(self.testEmptiness(self.__redShift, "Red Shift")):
return ""
else:
return self.__redShift
def getMorphologicalType(self):
"""
Returns the morphological type of the ngc object that is usually
abreviated by the simbad database as some casual objects like
galaxies, stars, etc.
:Param: nothing.
:Returns: morphological type.
"""
if(self.testEmptiness(self.__morphologicalType, "Morphological Type")):
return ""
else:
return self.__morphologicalType
def getApparentMagnitude(self):
"""
Returns the apparent magnitude also known as B of the ngc object.
If the variable is empty, then is return an empty string.
:Param: nothing.
:Returns: morphological type.
"""
if(self.testEmptiness(self.__apparentMagnitude, "Apparent Magnitude")):
return ""
else:
return self.__apparentMagnitude
def getOtherNames(self):
"""
Returns the other names of an ngc object. this variable could contain
a lot of different not so known names or could be just one single name.
:Param: nothing.
:Returns: other names of the ngc object as a string.
:rtype: String
"""
if(self.testEmptiness(self.__otherNames, "Other Names")):
return ""
else:
return self.__otherNames
def getCoordinates(self):
"""
This method concatenates the right ascension and the declination
into a single string.
:Param: Nothing.
:Returns: Coordinates in the form: right_ascension declination.
"""
return (self.getRightAscension() + " " + self.getDeclination()).split()
def getConstellation(self):
"""
Coming soon...
"""
ra=self.getRightAscension()
dec=self.getDeclination()
ra2=ra.split(" ")
dec2=dec.split(" ")
ra=ra2[0]+"h"+ra2[1]+"m"+ra2[2]+"s"
dec=dec2[0]+"d"+dec2[1]+"m"+dec2[2]+"s"
constellation = SkyCoord(ra, dec, frame='icrs')
return get_constellation(constellation)
def getDistance(self):
"""
This method returns the distance of the object.
:Param: Nothing.
:Returns: Float with the distance of the object in lightyears units.
:rtype: Float
"""
if (self.getRedShift() != ""):
r = float(self.getRedShift())
d2 = Distance(cosmo.comoving_distance(r), u.lightyear)
value = float(str(d2).split()[0])
divided = value / 1000000 #1 million
if divided > 1:
return "{:.2f} M".format(divided)
else:
return "{:.2f} ".format(value)
else:
return ""
def getDiscovererAndYear(self,ngc_num):
"""
This method returns the discoverer of the object and the
year it was discovered.
:Param: the number of the object required.
:Returns: String array of two positions that contains the discoverer and
the year respectively.
:rtype: String
"""
discoverer=""
year=""
try:
if(os.path.exists('info.txt')):
pass
else:
page = 'http://ngcicproject.org/public_HCNGC/DPublic_HCNGC.txt'
url_op.urlretrieve(page, "info.txt")
self.ngc_num = int(ngc_num)+9
archivo = open("info.txt")
cont=0
while cont != self.ngc_num:
linea=archivo.readline()
cont+=1
arr = linea.split("|")
discoverer = arr[9]
year = arr[10]
except:
print "Error found"
return [discoverer, year]
def formatOtherNames(self, names):
"""
This method checks that the main name is not included and also
adds the known references.
:Param: A string that contains the list of names returned by simbad.
:type: String
:Returns: A String that contains the referenced names.
:rtype: String
"""
format_names=names[names.find(",")+1:] #elimina primer nombre (NGC)
for ref in self.__namesReferences:
format_names=format_names.replace(ref[0], ref[1])
return format_names
def testEmptiness(self, variable, name):
"""
This method test the emptiness of a variable. The emptiness is
represented by a empty string '' or by this string: '~'. If
the variable is empty then True is returned.
:Param: Variable to test and name of the variable to print.
:Returns: True if variable is empty.
"""
if(variable == "" or variable == "~"):
print("There is no info about " + name + ".")
return True
return False
``` |
{
"source": "jmendeth/protobuf-parser",
"score": 2
} |
#### File: protobuf-parser/protobuf_inspector/core.py
```python
import io, struct
# Core parsing. This handles the most low-level deserialization.
# No guessing going on here. These functions return None on EOF.
def read_varint(file):
result = 0; pos = 0
while True:
b = file.read(1)
if not len(b):
assert(not pos)
return None
b = b[0]
result |= ((b & 0x7F) << pos)
pos += 7
if not (b & 0x80):
assert(b != 0 or pos == 7)
return result
def read_identifier(file):
id = read_varint(file)
if id is None: return (None, None)
return (id >> 3, id & 0x07)
def read_value(file, wire_type):
if wire_type == 0:
return read_varint(file)
if wire_type == 1:
c = file.read(8)
if not len(c): return None
assert(len(c) == 8)
return c
if wire_type == 2:
length = read_varint(file)
if length is None: return None
c = file.read(length)
assert(len(c) == length)
return io.BytesIO(c)
if wire_type == 3 or wire_type == 4:
return wire_type == 3
if wire_type == 5:
c = file.read(4)
if not len(c): return None
assert(len(c) == 4)
return c
raise Exception("Unknown wire type %d" % wire_type)
``` |
{
"source": "jmendozais/SDSSDepth",
"score": 2
} |
#### File: SDSSDepth/data/kitti.py
```python
import torch
import torch.utils.data as data
import os
import glob
import numpy as np
import numpy.random as rng
from skimage import io, transform
from torchvision.transforms import functional as func
from PIL import Image
from data.dataset import Dataset
from eval import kitti_depth_eval_utils as kitti_utils
class Kitti(Dataset):
def __init__(
self,
data_dir,
frames_file,
height=128,
width=416,
num_scales=4,
seq_len=3,
is_training=True,
load_depth=False,
load_intrinsics=True,
):
super(Kitti, self).__init__(
data_dir,
frames_file,
height,
width,
num_scales,
seq_len,
is_training,
load_depth,
load_intrinsics=load_intrinsics,
)
self.full_res = (375, 1242)
(
self.gt_files,
self.gt_calib,
self.im_sizes,
self.im_files,
self.cams_ids,
) = kitti_utils.read_file_data(self.filenames, data_dir)
def _get_color(self, idx):
abs_path = os.path.join(self.data_dir, self.filenames[idx])
assert abs_path[-3:] == "jpg"
with open(abs_path, "rb") as f:
img = self.reader.decode(f.read(), pixel_format=0)
return Image.fromarray(img)
def _get_depth(self, idx):
camera_id = self.cams_ids[idx] # 2 is left, 3 is right
depth = kitti_utils.generate_depth_map(
self.gt_calib[idx],
self.gt_files[idx],
self.im_sizes[idx],
camera_id,
False,
True,
)
return depth
def read_calib_file(self, filepath):
# Borrowed from
# https://github.com/utiasSTARS/pykitti/blob/master/pykitti/utils.py
data = {}
with open(filepath, "r") as f:
for line in f.readlines():
key, value = line.split(":", 1)
# The only non-float values in these files are dates, which
# we don't care about anyway
try:
data[key] = np.array([float(x) for x in value.split()])
except ValueError:
pass
return data
def _get_intrinsics(self, idx):
calib_path = os.path.join(self.gt_calib[idx], "calib_cam_to_cam.txt")
calib_data = self.read_calib_file(calib_path)
raw_K = np.reshape(calib_data["P_rect_0" + self.cams_ids[idx]], (3, 4))
K = np.zeros(shape=(4, 4), dtype=np.float32)
K[:3, :3] = raw_K[:3, :3]
K[0, :] *= self.width / self.im_sizes[idx][1]
K[1, :] *= self.height / self.im_sizes[idx][0]
K[3, 3] = 1
"""
K = np.array([[241.2800, 0.0000, 208.0000, 0.0000],
[ 0.0000, 245.7600, 64.0000, 0.0000],
[ 0.0000, 0.0000, 1.0000, 0.0000],
[ 0.0000, 0.0000, 0.0000, 1.0000]]).astype(np.float32)
"""
return K, None
def test_kitti_dataset():
dataset_dir = "/data/ra153646/datasets/KITTI/raw_data/"
train_file = "./data/kitti/train.txt"
height = 128
width = 416
num_scales = 4
load_intrinsics = False
seq_len = 3
dataset = Kitti(
dataset_dir,
train_file,
height,
width,
num_scales,
seq_len,
True,
True,
load_intrinsics=load_intrinsics,
)
for i in range(10):
snippet, _ = dataset[i]
print(snippet["K"])
print(len(snippet))
if __name__ == "__main__":
test_kitti_dataset()
```
#### File: SDSSDepth/data/lanczos.py
```python
import math
import typing
import torch
from torch.nn import functional as F
__all__ = ['imresize']
_I = typing.Optional[int]
_D = typing.Optional[torch.dtype]
def cubic_contribution(x: torch.Tensor, a: float = -0.5) -> torch.Tensor:
ax = x.abs()
ax2 = ax * ax
ax3 = ax * ax2
range_01 = ax.le(1)
range_12 = torch.logical_and(ax.gt(1), ax.le(2))
cont_01 = (a + 2) * ax3 - (a + 3) * ax2 + 1
cont_01 = cont_01 * range_01.to(dtype=x.dtype)
cont_12 = (a * ax3) - (5 * a * ax2) + (8 * a * ax) - (4 * a)
cont_12 = cont_12 * range_12.to(dtype=x.dtype)
cont = cont_01 + cont_12
return cont
def _normalized_sinc(x):
ones = torch.ones_like(x)
x_pi = x * math.pi
return torch.where(x != 0, torch.sin(x_pi) / x_pi, ones)
def lanczos_contribution(x: torch.Tensor) -> torch.Tensor:
sinc_x = _normalized_sinc(x)
sinc_x_a = _normalized_sinc(x / a)
return sinc_x * sinc_x_a
def reflect_padding(
x: torch.Tensor,
dim: int,
pad_pre: int,
pad_post: int) -> torch.Tensor:
'''
Apply reflect padding to the given Tensor.
Note that it is slightly different from the PyTorch functional.pad,
where boundary elements are used only once.
Instead, we follow the MATLAB implementation
which uses boundary elements twice.
For example,
[a, b, c, d] would become [b, a, b, c, d, c] with the PyTorch implementation,
while our implementation yields [a, a, b, c, d, d].
'''
b, c, h, w = x.size()
if dim == 2 or dim == -2:
padding_buffer = x.new_zeros(b, c, h + pad_pre + pad_post, w)
padding_buffer[..., pad_pre:(h + pad_pre), :].copy_(x)
for p in range(pad_pre):
padding_buffer[..., pad_pre - p - 1, :].copy_(x[..., p, :])
for p in range(pad_post):
padding_buffer[..., h + pad_pre + p, :].copy_(x[..., -(p + 1), :])
else:
padding_buffer = x.new_zeros(b, c, h, w + pad_pre + pad_post)
padding_buffer[..., pad_pre:(w + pad_pre)].copy_(x)
for p in range(pad_pre):
padding_buffer[..., pad_pre - p - 1].copy_(x[..., p])
for p in range(pad_post):
padding_buffer[..., w + pad_pre + p].copy_(x[..., -(p + 1)])
return padding_buffer
def padding(
x: torch.Tensor,
dim: int,
pad_pre: int,
pad_post: int,
padding_type: typing.Optional[str] = 'reflect') -> torch.Tensor:
if padding_type is None:
return x
elif padding_type == 'reflect':
x_pad = reflect_padding(x, dim, pad_pre, pad_post)
else:
raise ValueError('{} padding is not supported!'.format(padding_type))
return x_pad
def get_padding(
base: torch.Tensor,
kernel_size: int,
x_size: int) -> typing.Tuple[int, int, torch.Tensor]:
base = base.long()
r_min = base.min()
r_max = base.max() + kernel_size - 1
if r_min <= 0:
pad_pre = -r_min
pad_pre = pad_pre.item()
base += pad_pre
else:
pad_pre = 0
if r_max >= x_size:
pad_post = r_max - x_size + 1
pad_post = pad_post.item()
else:
pad_post = 0
return pad_pre, pad_post, base
def get_weight(
dist: torch.Tensor,
kernel_size: int,
kernel: str = 'cubic',
antialiasing_factor: float = 1) -> torch.Tensor:
buffer_pos = dist.new_zeros(kernel_size, len(dist))
for idx, buffer_sub in enumerate(buffer_pos):
buffer_sub.copy_(dist - idx)
buffer_pos *= antialiasing_factor
if kernel == 'cubic':
weight = cubic_contribution(buffer_pos)
if kernel == 'lanczos3':
weight = cubic_contribution(buffer_pos)
else:
raise ValueError('{} kernel is not supported!'.format(kernel))
weight /= weight.sum(dim=0, keepdim=True)
return weight
def reshape_tensor(x: torch.Tensor, dim: int,
kernel_size: int) -> torch.Tensor:
# Resize height
if dim == 2 or dim == -2:
k = (kernel_size, 1)
h_out = x.size(-2) - kernel_size + 1
w_out = x.size(-1)
# Resize width
else:
k = (1, kernel_size)
h_out = x.size(-2)
w_out = x.size(-1) - kernel_size + 1
unfold = F.unfold(x, k)
unfold = unfold.view(unfold.size(0), -1, h_out, w_out)
return unfold
# TODO:
# 1. how to define a proper kernel size for images
# 2. Analyze antialiasing factor. is antialising == scale(<1) ok?
def resize_1d(
x: torch.Tensor,
dim: int,
size: typing.Optional[int],
scale: typing.Optional[float],
kernel: str = 'cubic',
padding_type: str = 'reflect',
antialiasing: bool = True) -> torch.Tensor:
# Identity case
if scale == 1:
return x
# Default bicubic kernel with antialiasing (only when downsampling)
if kernel == 'cubic':
kernel_size = 4
if kernel == 'lanczos3':
kernel_size = 2 * 3
else:
sigma = 2.0
kernel_size = math.floor(6 * sigma)
if antialiasing and (scale < 1):
antialiasing_factor = scale
kernel_size = math.ceil(kernel_size / antialiasing_factor)
else:
antialiasing_factor = 1
# We allow margin to both sizes
kernel_size += 2
# Weights only depend on the shape of input and output,
# so we do not calculate gradients here.
with torch.no_grad():
pos = torch.linspace(
0, size - 1, steps=size, dtype=x.dtype, device=x.device,
)
pos = (pos + 0.5) / scale - 0.5
base = pos.floor() - (kernel_size // 2) + 1
dist = pos - base
weight = get_weight(
dist,
kernel_size,
kernel=kernel,
antialiasing_factor=antialiasing_factor,
)
pad_pre, pad_post, base = get_padding(base, kernel_size, x.size(dim))
# To backpropagate through x
x_pad = padding(x, dim, pad_pre, pad_post, padding_type=padding_type)
unfold = reshape_tensor(x_pad, dim, kernel_size)
# Subsampling first
if dim == 2 or dim == -2:
sample = unfold[..., base, :]
weight = weight.view(1, kernel_size, sample.size(2), 1)
else:
sample = unfold[..., base]
weight = weight.view(1, kernel_size, 1, sample.size(3))
# Apply the kernel
x = sample * weight
x = x.sum(dim=1, keepdim=True)
return x
def resize_image(
x: torch.Tensor,
sizes: typing.Optional[typing.Tuple[int, int]] = None,
kernel: str = 'lanczos3',
padding_type: str = 'reflect',
antialiasing: bool = True) -> torch.Tensor:
assert len(x.size()) == 4
assert x.dtype != torch.float32 or x.dtype != torch.float64
b, c, h, w = x.size()
x = x.view(-1, 1, h, w)
scales = (sizes[0] / h, sizes[1] / w)
kwargs = {
'kernel': kernel,
'padding_type': padding_type,
'antialiasing': antialiasing,
}
# Core resizing module
x = resize_1d(x, -2, size=sizes[0], scale=scales[0], **kwargs)
x = resize_1d(x, -1, size=sizes[1], scale=scales[1], **kwargs)
rh, rw = x.size(-2), x.size(-1)
x = x.view(b, c, rh, rw)
return x
def _lanczos_kernel_integer_positions(a, support):
x = torch.linspace(-a, a, int(2 * support + 1))
sinc_x = _normalized_sinc(x)
sinc_x_a = _normalized_sinc(x / a)
return sinc_x * sinc_x_a
def resize_lanczos_exact(x, size):
b, c, h, w = x.size()
nh, nw = size
# scale factors
assert h % nh == 0 and w % nw == 0
sfh = h / nh
sfw = w / nw
LANCZOS_A = 3.0
support = int(sfh * LANCZOS_A)
kernel = _lanczos_kernel_integer_positions(
LANCZOS_A, support=support).to(x.device)
kernel = kernel / kernel.sum()
kernel = kernel.unsqueeze(0).unsqueeze(0).repeat(c, 1, 1)
x_wview = x.permute(0, 2, 1, 3).reshape(b * h, c, w)
x_wview_padded = F.pad(x_wview, (support, support), mode='reflect')
out = F.conv1d(x_wview_padded, kernel, stride=int(sfw), groups=c)
x_hview = out.view(b, h, c, nw).permute(0, 3, 2, 1).reshape(b * nw, c, h)
x_hview_padded = F.pad(x_hview, (support, support), mode='reflect')
out2 = F.conv1d(x_hview_padded, kernel, stride=int(sfh), groups=c)
out2 = out2.view(b, nw, c, nh).permute(0, 2, 3, 1)
return out2
```
#### File: jmendozais/SDSSDepth/eval_depth.py
```python
import os
import time
import argparse
import configargparse
import numpy as np
import torch
import model
import data
from data import transform as DT
from data import create_dataset
from eval.kitti_depth_eval_utils import *
from eval.depth_eval_utils import *
import opts
import util
def eval_depth(model, test_loader, args):
preds = []
depth_metrics = dict()
for i, data in enumerate(test_loader, 0):
with torch.no_grad():
# [b, sl, 3, h, w] dim[1] = {tgt, src, src ...}
data['color'] = data['color'].to(args.device)
data = DT.normalize(data)
depths_or_disp_pyr, _, _ = model.depth_net(
data['color'][:, 0]) # just for tgt images
if model.depthnet_out == 'disp':
depths = 1 / depths_or_disp_pyr[0]
else:
depths = depths_or_disp_pyr[0]
if args.batchwise:
batch_metrics = compute_metrics_batch(depths, data['depth'].to(
args.device), min_depth=args.min_depth, crop_eigen=args.crop_eigen)
util.accumulate_metrics(depth_metrics, batch_metrics)
else:
preds.append(depths[0].cpu().numpy())
if args.batchwise:
for k, v in depth_metrics.items():
depth_metrics[k] = np.mean(v)
print("Ambiguous scale factor (batchwise)")
print(' '.join([k for k in depth_metrics.keys()]))
print(' '.join(['{:.4f}'.format(depth_metrics[k])
for k in depth_metrics.keys()]))
else:
assert len(test_loader) == len(preds)
assert args.gt_file is not None
preds = np.concatenate(preds, axis=0).squeeze(1)
gt = np.load(args.gt_file, allow_pickle=True)
preds = resize_like(preds, gt)
metrics = compute_metrics(
preds, gt, args.min_depth, args.max_depth, crop_eigen=args.crop_eigen)
print("Ambiguous scale factor")
print(' '.join([k for k in metrics.keys()]))
print(' '.join(['{:.4f}'.format(metrics[k]) for k in metrics.keys()]))
if args.single_scalor:
scale_factor = compute_scale_factor(
preds, gt, args.min_depth, args.max_depth)
metrics = compute_metrics(
preds, gt, args.min_depth, args.max_depth, scale_factor, crop_eigen=args.crop_eigen)
print("Consistent scale factor")
print(' '.join([k for k in metrics.keys()]))
print(' '.join(['{:.4f}'.format(metrics[k])
for k in metrics.keys()]))
print('time: ', time.perf_counter() - start)
if __name__ == '__main__':
seq_len = 1
args = opts.parse_args()
print(args)
start = time.perf_counter()
test_set = create_dataset(args.dataset,
args.dataset_dir,
args.test_file,
height=args.height,
width=args.width,
num_scales=args.num_scales,
seq_len=args.seq_len,
load_depth=args.batchwise,
is_training=False)
test_loader = torch.utils.data.DataLoader(test_set,
args.batch_size,
shuffle=False,
num_workers=args.workers,
pin_memory=True,
drop_last=False)
checkpoint = torch.load(args.checkpoint)
model = checkpoint['model']
model.to(args.device)
model.eval()
print("Student")
eval_depth(model, test_loader, args)
if 'teacher' in checkpoint.keys():
teacher = checkpoint['teacher'].ema_model
teacher.to(args.device)
teacher.eval()
print("Teacher")
eval_depth(teacher, test_loader, args)
```
#### File: jmendozais/SDSSDepth/normalization.py
```python
import numpy as np
import torch
from torch import nn, Tensor, Size
from typing import Union, List
_shape_t = Union[int, List[int], Size]
def parameterized_truncated_normal(uniform, mu, sigma, a, b):
normal = torch.distributions.normal.Normal(0, 1)
alpha = (a - mu) / sigma
beta = (b - mu) / sigma
alpha_normal_cdf = normal.cdf(alpha)
p = alpha_normal_cdf + (normal.cdf(beta) - alpha_normal_cdf) * uniform
p = p.numpy().astype(np.float32)
one = np.array(1, dtype=p.dtype)
epsilon = np.array(np.finfo(p.dtype).eps, dtype=p.dtype)
v = np.clip(2 * p - 1, -one + epsilon, one - epsilon)
x = mu + sigma * np.sqrt(2) * torch.erfinv(torch.from_numpy(v))
x = torch.clamp(x, a, b)
return x
def truncated_normal(uniform):
return parameterized_truncated_normal(
uniform, mu=0.0, sigma=1.0, a=-2, b=2)
def sample_truncated_normal(shape=()):
return truncated_normal(torch.from_numpy(np.random.uniform(0, 1, shape)))
class Identity(nn.Module):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def forward(self, x):
return x
class RandomLayerNorm(nn.Module):
def __init__(self, noise_std: float = 0.5, eps: float = 1e-5,
elementwise_affine: bool = True) -> None:
super(RandomLayerNorm, self).__init__()
self.elementwise_affine = elementwise_affine
self.register_parameter('weight', None)
self.register_parameter('bias', None)
self.noise_std = noise_std
self.normalized_shape = None
self.eps = eps
def reset_parameters(self) -> None:
if self.elementwise_affine:
nn.init.ones_(self.weight)
nn.init.zeros_(self.bias)
def forward(self, x):
if self.normalized_shape is None:
self.normalized_shape = (1,) + x.shape[1:]
self.weight = nn.Parameter(torch.ones(
*self.normalized_shape).to(x.device))
self.bias = nn.Parameter(torch.zeros(
*self.normalized_shape).to(x.device))
var, mean = torch.var_mean(x, [2, 3], keepdim=True)
if self.training:
mean = mean * \
(1.0 + sample_truncated_normal(mean.shape).to(x.device)
* self.noise_std).detach()
var = var * \
(1.0 + sample_truncated_normal(var.shape).to(x.device)
* self.noise_std).detach()
b, c, h, w = var.size()
x_norm = (x - mean) / (torch.sqrt(var) + 1e-6)
res = x_norm * self.weight + self.bias
return res
``` |
{
"source": "jmenges/yolov1_maxim",
"score": 2
} |
#### File: jmenges/yolov1_maxim/map.py
```python
import numpy as np
def calculate_map_main(gt_results, pred_results, iou_gt_thr=0.5, class_num=5):
'''
说明: 此函数用于计算目标检测中的mAP
输入:
gt_results: list, 每一个元素对应一个样本中的所有目标的真实值
如gt_gt_result[0] = [
[xmin, ymin, xmax, ymax, label],
[xmin, ymin, xamx, yamx, label],
...
]
pred_results: list, 每一个元素对应一个样本中的所有目标的预测值
如pred_results[0] = [
[xmin, ymin, xmax, ymax, label, score],
[xmin, ymin, xamx, yamx, label, score],
...
]
iou_gt_thr: float, 用于判定正负样本, 如iou_gt_thr=0.5, 计算出的就是mAP 0.5
class_num: int, 类别数
输出:
calss_ap: array, 各类别的AP
mean_ap : float, 各类别平均得到mAP
'''
all_tp = [ [] for i in range(class_num) ] # 用于存放各类所有的tp
all_fp = [ [] for i in range(class_num) ] # 用于存放各类所有的fp
all_score = [ [] for i in range(class_num) ] # 用于存放bbox对应的scores
all_gt_num = np.zeros([class_num]) # 用于存放各类的真实目标数, 之后计算recall会用到
data_num = len(gt_results) # 样本总数
# 对于每一个样本, 计算tp, fp, 并且统计预测bbox的score以及真实目标的个数
for i in range(data_num):
gt_result = gt_results[i]
pred_result = pred_results[i]
# print(f'computing {i} tp and fp')
tp, fp, score, gt_num = calculate_tpfp_single(gt_result, pred_result, iou_gt_thr, class_num)
# 按类别更新到总数中
for n in range(class_num):
all_tp[n].extend(tp[n])
all_fp[n].extend(fp[n])
all_score[n].extend(score[n])
all_gt_num[n] += gt_num[n]
# 计算出各类的AP,进而得到mAP
all_map = calculate_map(all_tp, all_fp, all_score, all_gt_num, class_num)
mean_ap = np.mean(all_map[1:])
print('mAP', mean_ap)
return all_map, mean_ap
def calculate_tpfp_single(gt_result, pred_result, iou_gt_thr, class_num):
'''
说明: 此函数用于计算单个样本的tp和fp, 并且存放对应的score, 统计真实目标的个数
输入:
gt_result: list, 一个样本中所有的目标的真实值
如gt_result=[
[xmin, ymin, xmax, ymax, label],
[xmin, ymin, xmax, ymax, label],
...
]
pred_result: list, 一个样本中所有的目标的预测值
如pred_result=[
[xmin, ymin, xmax, ymax, label, score],
[xmin, ymin, xmax, ymax, label, score],
...
]
iou_gt_thr: float, 用于判断正负样本的iou阈值
class_num: 类别数
输出:
all_tp: list, 每一个元素对应该样本计算出的对应类别的tp
all_fp: list, 每一个元素对应该样本计算出的对应类别的fp
all_score: list, 每一个元素对应该样本bbox对应类别的score
gt_num: list, 每一个元素对应该样本对应类别的真实目标数
'''
all_tp = [[] for i in range(class_num)]
all_fp = [[] for i in range(class_num)]
all_score = [[] for i in range(class_num)]
gt_num = np.zeros([class_num])
# 逐个类别提取真实bbox和预测bbox
for i in range(class_num):
tp = []
fp = []
score = []
match_gt_bbox = [obj[0:4] for obj in gt_result if int(obj[4]) == i]
match_pred_bbox = [obj[0:4] for obj in pred_result if int(obj[4]) == i]
match_pred_score = [obj[5] for obj in pred_result if int(obj[4]) == i]
len_gt = len(match_gt_bbox)
len_pred = len(match_pred_bbox)
if len_gt == 0 and len_pred != 0:
# 说明不存在该类目标,但是预测出来了,属于误检
score.extend(match_pred_score)
for k in range(len_pred):
tp.extend([0])
fp.extend([1])
if len_gt != 0 and len_pred != 0:
# 说明存在该目标,并且检测出来了,那么计算若干gt与若干pred的iou
score.extend(match_pred_score)
ious = calculate_iou(match_gt_bbox, match_pred_bbox)
max_iou = np.max(ious, axis=0) # [x,x,x...] 每一个预测框与某个gt最大的iou
# if any(s > 0.65 for s in max_iou):
# print(f"The image has good IOU: {max_iou}")
# if any(s < 0.1 for s in max_iou):
# print(f"The image has bad IOU: {max_iou}")
# 使用iou_gt_thr来进行正负样本的判定,若满足条件,则为tp,否则为fp
for k in range(len_pred):
if max_iou[k] >= iou_gt_thr:
tp.extend([1])
fp.extend([0])
if max_iou[k] < iou_gt_thr:
tp.extend([0])
fp.extend([1])
all_tp[i].extend(tp)
all_fp[i].extend(fp)
all_score[i].extend(score)
gt_num[i] += len_gt
return all_tp, all_fp, all_score, gt_num
def calculate_area(bbox):
# 计算一个bbox的面积
w = max(bbox[2] - bbox[0], 0)
h = max(bbox[3] - bbox[1], 0)
w = max(0, w)
h = max(0, h)
return w * h
def calculate_inter(bbox1, bbox2):
# 计算两个bbox的交集面积
xmin = max(bbox1[0], bbox2[0])
ymin = max(bbox1[1], bbox2[1])
xmax = min(bbox1[2], bbox2[2])
ymax = min(bbox1[3], bbox2[3])
return calculate_area([xmin, ymin, xmax, ymax])
def calculate_union(bbox1, bbox2):
# 计算两个bbox的并集面积
area1 = calculate_area(bbox1)
area2 = calculate_area(bbox2)
inter = calculate_inter(bbox1, bbox2)
union = area1 + area2 - inter
return union
def IOU(bbox1, bbox2):
# 计算两个bbox的iou
inter = calculate_inter(bbox1, bbox2)
union = calculate_union(bbox1, bbox2)
iou = inter / union
return iou
def calculate_iou(bbox1, bbox2):
'''
说明: 此函数用于计算M个bbox与N个bbox的iou
输入:
bbox1: list, 每一个元素是一个bbox, 如bbox1=[
[xmin, ymin, xamx, ymax],
[xmin, ymin, xmax, ymax],
...
]
bbox2: list, 每一个元素是一个bbox, 如bbox2=[
[xmin, ymin, xamx, ymax],
[xmin, ymin, xmax, ymax],
...
]
输出:
ans: array, size=[M, N], 计算出的iou矩阵
'''
len_1 = len(bbox1)
len_2 = len(bbox2)
ans = np.zeros([len_1, len_2])
for i in range(len_1):
for j in range(len_2):
# 计算bbox1[i]和bbox2[j]的iou
ans[i, j] = IOU(bbox1[i], bbox2[j])
return ans
def calculate_map(all_tp, all_fp, all_score, all_gt_num, class_num):
'''
说明: 此函数的输入为所有类别的tp, fp, score和真实目标数, 计算每一个类别的AP
输入:
all_tp: list, 每个元素是该类别下的tp
all_fp: list, 每个元素是该类别下的fp
all_score: list, 每个元素是该类别下预测bbox对应的score
all_gt_num: list, 每个元素是该类下真实母目标的个数
class_num: int, 类别数
输出:
all_map: array, 每个元素是该类的AP
'''
all_map = np.zeros([class_num])
for i in range(class_num):
# 首先提取出每一类的信息
class_tp = all_tp[i]
class_fp = all_fp[i]
class_score = all_score[i]
class_gt_num = all_gt_num[i]
# 计算每一类的PR曲线
class_P, class_R = calculate_PR(class_tp, class_fp, class_score, class_gt_num)
# 计算PR曲线的面积,即AP
class_map = calculate_map_single(class_P, class_R)
# 写入该类别下
all_map[i] = class_map
return all_map
def calculate_PR(class_tp, class_fp, class_score, class_gt_num):
'''
说明: 此函数用于计算某一类的PR曲线
输入:
class_tp: list, 该类下的tp, 每个元素为0或1, 代表当前样本是否为正样本
class_fp: list, 该类下的fp, 每个元素为0或1, 代表当前样本是否为负样本
class_score: list, 该类下预测bbox对应的score
class_gt_num: int, 类别数
输出:
P: list, 该类下的查准率曲线
R: list, 该类下的查全率曲线
'''
# 按照score排序
sort_inds = np.argsort(class_score)[::-1].tolist()
tp = [class_tp[i] for i in sort_inds]
fp = [class_fp[i] for i in sort_inds]
# 累加
tp = np.cumsum(tp).tolist()
fp = np.cumsum(fp).tolist()
# 计算PR
P = [tp[i] / (tp[i] + fp[i]) for i in range(len(tp))]
R = [tp[i] / class_gt_num for i in range(len(tp))]
return P, R
def calculate_map_single(P, R):
'''
说明: 此函数用于计算PR曲线的面积, 即AP
输入:
P: list, 查准率曲线
R: list, 查全率曲线
输出:
single_map: float, 曲线面积, 即AP
'''
mpre = np.concatenate(([0.], P, [0.]))
mrec = np.concatenate(([0.], R, [1.]))
for i in range(np.size(mpre) - 1, 0, -1):
# mpre的平整化
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
# 寻找mrec变化的坐标
i = np.where(mrec[1:] != mrec[:-1])[0]
# 计算面积
single_map = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
return single_map
def NMS(bounding_boxes, S=7, img_size=224, confidence_threshold=0.5, iou_threshold=0.5):
"""Compute non max suppressing to reduce overlapping bounding box.
Args:
bounding_boxes (list): a list of bounding box.
S (int): the number of grid cells.
img_size (int): image size
confidence_threshold (float): the threshold to select a box on has_obj_prob (has_object_probability).
iou_threshold (float): the threshold of IOU to remove bounding boxes.
Returns:
a list of bounding boxes.
bounding box format = [center_x (int), center_y (int), width (int), height (int), has_obj_prob (float),
class_probs (float), confident_score (float), class_id (int)].
class_probs is a list of class probability. confident_score = has_obj_prob * max(class_prob).
class_id is argmax(class_probs).
"""
bounding_boxes = bounding_boxes.cpu().detach().numpy().tolist()
nms_boxes_buf = []
grid_size = img_size / S
for batch in range(len(bounding_boxes)):
predict_boxes = []
nms_boxes = []
for i in range(S):
for j in range(S):
gridX = grid_size * j
gridY = grid_size * i
if bounding_boxes[batch][i][j][4] < bounding_boxes[batch][i][j][9]:
bounding_box = bounding_boxes[batch][i][j][5:10]
else:
bounding_box = bounding_boxes[batch][i][j][0:5]
bounding_box.extend(bounding_boxes[batch][i][j][10:])
if bounding_box[4] >= confidence_threshold:
predict_boxes.append(bounding_box)
centerX = (int)(gridX + bounding_box[0] * grid_size)
centerY = (int)(gridY + bounding_box[1] * grid_size)
width = (int)(bounding_box[2] * img_size)
height = (int)(bounding_box[3] * img_size)
bounding_box[0] = max(0, (int)(centerX - width / 2))
bounding_box[1] = max(0, (int)(centerY - height / 2))
bounding_box[2] = min(img_size - 1, (int)(centerX + width / 2))
bounding_box[3] = min(img_size - 1, (int)(centerY + height / 2))
class_idx = np.argmax(bounding_box[5:])
confident_score = bounding_box[4] * bounding_box[5 + class_idx] # has_obj_prob * class_prob
bounding_box.append(confident_score)
bounding_box.append(class_idx)
while len(predict_boxes) != 0:
predict_boxes.sort(key=lambda box: box[4])
assured_box = predict_boxes[0]
curr_class = assured_box[-1]
temp = []
nms_boxes.append(assured_box)
i = 1
while i < len(predict_boxes):
compared_box = predict_boxes[i]
if compared_box[-1] != curr_class or IOU(assured_box, predict_boxes[i]) <= iou_threshold:
temp.append(predict_boxes[i])
i = i + 1
predict_boxes = temp
nms_boxes_buf.append(nms_boxes)
return nms_boxes_buf
def gt_std(gt_results, S=7, B=2, img_size=224):
gt_results_all = []
grid_size = img_size / S
for instance_index in range(gt_results.shape[0]): # N
gt_results_instance = []
for index_i in range(gt_results.shape[1]): # 7
for index_j in range(gt_results.shape[2]): # 7
gridX = grid_size * index_j
gridY = grid_size * index_i
area = gt_results[instance_index, index_i, index_j, 9]
if area > 0:
gt_results_patch = gt_results[instance_index, index_i, index_j].tolist()
centerX = (int)(gridX + gt_results_patch[0] * grid_size)
centerY = (int)(gridY + gt_results_patch[1] * grid_size)
width = (int)(gt_results_patch[2] * img_size)
height = (int)(gt_results_patch[3] * img_size)
class_idx = int(gt_results[instance_index, index_i, index_j, 10:].argmax())
gt_results_patch[0] = max(0, (int)(centerX - width / 2))
gt_results_patch[1] = max(0, (int)(centerY - height / 2))
gt_results_patch[2] = min(img_size - 1, (int)(centerX + width / 2))
gt_results_patch[3] = min(img_size - 1, (int)(centerY + height / 2))
gt_results_patch[4] = class_idx
gt_results_instance.append(gt_results_patch[0:5])
gt_results_all.append(gt_results_instance)
return gt_results_all
```
#### File: yolov1_maxim/tiny_yolo_v2/train_voc.py
```python
import logging
import os
import random
import argparse
import sys
PROJECT_ROOT = os.path.join(os.path.dirname(__file__), '..')
sys.path.insert(0, os.path.join(PROJECT_ROOT, "ai8x-training"))
import numpy as np
import torch
import torch.optim as optim
from torch.utils.data import DataLoader
from loss import YoloLoss
from tiny_yolov2 import TinyYoloV2
from utils import custom_collate_fn, create_exp_dir, get_logger, get_time_str
from voc_dataset import VOCDataset
import ai8x
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--num_epochs', type=int, default=400, help='Maximum training epoch.')
parser.add_argument('--lr', type=float, default=3e-5, help='Learning rate.')
parser.add_argument('--batch_size', type=int, default=16, help='Minibatch size.')
parser.add_argument('--gpu', type=int, default=1, help='Use which gpu to train the model.')
parser.add_argument('--exp', type=str, default="tiny-yolo-v2", help='Experiment name.')
parser.add_argument('--seed', type=int, default=7, help='Random seed.')
args = parser.parse_args()
return args
def setup_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True
def log_init():
import glob
fdir0 = os.path.join("logs", args.exp + '-' + get_time_str())
create_exp_dir(fdir0, scripts_to_save=glob.glob('*.py'))
args.output_dir = fdir0
logger = get_logger(logdir=fdir0, tag=args.exp, log_level=logging.INFO)
logger.info("args = %s", args)
return logger
# Initialize the dataset and dataloader
def dataset_init():
dataset = VOCDataset(root_path="/data/yiwei/VOCdevkit", image_size=224)
data_loader = DataLoader(dataset, batch_size=args.batch_size, shuffle=True, num_workers=4,
collate_fn=custom_collate_fn)
return dataset, data_loader
# Train
def train(logger):
dataset, data_loader = dataset_init()
# Set ai8x device
ai8x.set_device(device=85, simulate=False, round_avg=False)
model = TinyYoloV2(num_classes=dataset.num_classes)
model = model.cuda()
logger.info("NUMBER OF PARAMETERS {}".format(sum(p.numel() for p in model.parameters())))
# Initialize the loss function
criterion = YoloLoss(dataset.num_classes, model.anchors)
optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=0.9, weight_decay=0.0005)
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer,milestones=[50, 100,200,300,400,500,600,700,800,900,1000,2000,3000,4000,5000,6000,7000,8000,9000,10000,20000,30000,40000],gamma=0.8)
# Initialize the quantization policy
num_epochs = args.num_epochs
# Main training
num_iter_per_epoch = len(data_loader)
for epoch in range(0, num_epochs):
for batch_index, batch_train in enumerate(data_loader):
train_data = batch_train[0].float().cuda()
train_data.requires_grad = True
label_data = batch_train[1]
optimizer.zero_grad()
logits = model(train_data)
loss, loss_coord, loss_conf, loss_cls = criterion(logits, label_data)
loss.backward()
optimizer.step()
logger.info("Epoch: {}/{}, Iteration: {}/{}, Lr: {}, Loss:{:.2f} (Coord:{:.2f} Conf:{:.2f} Cls:{:.2f})".format(
epoch + 1,
num_epochs,
batch_index + 1,
num_iter_per_epoch,
optimizer.param_groups[0]['lr'],
loss,
loss_coord,
loss_conf,
loss_cls))
if epoch % 50 == 0:
torch.save(model.state_dict(), os.path.join(args.output_dir, "tiny_yolo_v2_ep{}.pth".format(epoch)))
scheduler.step()
def main():
# Set GPU
setup_seed(args.seed)
logger = log_init()
# args.device = torch.device('cuda:' + str(args.gpu) if torch.cuda.is_available() else 'cpu')
os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu)
logger.info('Running on device: {}'.format(args.gpu))
train(logger)
if __name__ == "__main__":
args = get_args()
main()
``` |
{
"source": "jmenglund/pandas-charm",
"score": 3
} |
#### File: jmenglund/pandas-charm/test_pandascharm.py
```python
import pytest
import numpy
import pandas
import dendropy
import Bio.Alphabet
from Bio.AlignIO import MultipleSeqAlignment
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from pandas.util.testing import (
assert_categorical_equal,
assert_dict_equal,
assert_frame_equal,
assert_index_equal,
assert_produces_warning,
assert_series_equal)
from pandascharm import (
frame_as_categorical,
frame_as_object,
from_charmatrix,
to_charmatrix,
from_bioalignment,
to_bioalignment,
from_sequence_dict,
to_sequence_dict)
class TestAsCategorical():
frame = pandas.DataFrame({
't1': ['T', 'G', 'C', 'A', '?'],
't2': ['T', 'G', 'C', 'A', 'A'],
't3': ['T', 'G', 'C', 'A', 'A'],
't4': ['T', 'G', 'C', 'A', 'A']}, dtype='category')
def test_unaltered_categories(self):
assert (
set(frame_as_categorical(self.frame)['t1'].cat.categories) ==
set(self.frame['t1'].cat.categories))
def test_altered_categories(self):
assert (
set(frame_as_categorical(self.frame)['t2'].cat.categories) !=
set(self.frame['t2'].cat.categories))
def test_add_category(self):
assert(
set(
frame_as_categorical(self.frame, ['-'])['t1'].cat.categories
) == {'T', 'G', 'C', 'A', '?', '-'})
class TestAsObject():
frame_cat = pandas.DataFrame({
't1': ['T', 'G', 'C', 'A', '?'],
't2': ['T', 'G', 'C', 'A', 'A'],
't3': ['T', 'G', 'C', 'A', 'A'],
't4': ['T', 'G', 'C', 'A', 'A']}, dtype='category')
frame_obj = pandas.DataFrame({
't1': ['T', 'G', 'C', 'A', '?'],
't2': ['T', 'G', 'C', 'A', 'A'],
't3': ['T', 'G', 'C', 'A', 'A'],
't4': ['T', 'G', 'C', 'A', 'A']}, dtype='object')
def test_conversion(self):
assert_frame_equal(frame_as_object(self.frame_cat), self.frame_obj)
class TestCharmatrixConversion():
dna_charmatrix_string = '3 5\nt1 TCCAA\nt2 TGCAA\nt3 TG-AA\n'
dna_charmatrix = dendropy.DnaCharacterMatrix.get(
data=dna_charmatrix_string, schema='phylip')
dna_frame = pandas.DataFrame({
't1': ['T', 'C', 'C', 'A', 'A'],
't2': ['T', 'G', 'C', 'A', 'A'],
't3': ['T', 'G', '-', 'A', 'A']}, dtype='category')
rna_charmatrix_string = '3 5\nt1 UCCAA\nt2 UGCAA\nt3 UG-AA\n'
rna_charmatrix = dendropy.RnaCharacterMatrix.get(
data=rna_charmatrix_string, schema='phylip')
rna_frame = pandas.DataFrame({
't1': ['U', 'C', 'C', 'A', 'A'],
't2': ['U', 'G', 'C', 'A', 'A'],
't3': ['U', 'G', '-', 'A', 'A']}, dtype='category')
protein_charmatrix_string = '3 5\nt1 VKYPN\nt2 VLYPN\nt3 VL-PN\n'
protein_charmatrix = dendropy.ProteinCharacterMatrix.get(
data=protein_charmatrix_string, schema='phylip')
protein_frame = pandas.DataFrame({
't1': ['V', 'K', 'Y', 'P', 'N'],
't2': ['V', 'L', 'Y', 'P', 'N'],
't3': ['V', 'L', '-', 'P', 'N']}, dtype='category')
standard_charmatrix_string = '3 5\nt1 01010\nt2 02010\nt3 02-10\n'
standard_charmatrix = dendropy.StandardCharacterMatrix.get(
data=standard_charmatrix_string, schema='phylip')
standard_frame = pandas.DataFrame({
't1': ['0', '1', '0', '1', '0'],
't2': ['0', '2', '0', '1', '0'],
't3': ['0', '2', '-', '1', '0']}, dtype='category')
def test_from_charmatrix_dna(self):
assert_frame_equal(
from_charmatrix(self.dna_charmatrix), self.dna_frame,
check_categorical=False)
def test_from_charmatrix_dna_object(self):
assert_frame_equal(
from_charmatrix(self.dna_charmatrix, categorical=False),
frame_as_object(self.dna_frame))
def test_to_charmatrix_dna(self):
assert (
to_charmatrix(self.dna_frame, data_type='dna')
.as_string('phylip') == self.dna_charmatrix.as_string('phylip'))
def test_from_charmatrix_rna(self):
assert_frame_equal(
from_charmatrix(self.rna_charmatrix), self.rna_frame,
check_categorical=False)
def test_to_charmatrix_rna(self):
assert (
to_charmatrix(self.rna_frame, data_type='rna')
.as_string('phylip') == self.rna_charmatrix.as_string('phylip'))
def test_from_charmatrix_protein(self):
assert_frame_equal(
from_charmatrix(self.protein_charmatrix), self.protein_frame,
check_categorical=False)
def test_to_charmatrix_protein(self):
assert (
to_charmatrix(self.protein_frame, data_type='protein')
.as_string('phylip') == self.protein_charmatrix
.as_string('phylip'))
def test_from_charmatrix_standard(self):
assert_frame_equal(
from_charmatrix(self.standard_charmatrix), self.standard_frame,
check_categorical=False)
def test_to_charmatrix_standard(self):
assert (
to_charmatrix(self.standard_frame, data_type='standard')
.as_string('phylip') == self.standard_charmatrix
.as_string('phylip'))
def test_invalid_data_type(self):
with pytest.raises(ValueError):
to_charmatrix(self.standard_frame, data_type='unknown')
class TestBioalignmentConversion():
def dict_to_bioalignment(d, alphabet='generic_alphabet', sorted=True):
"""
Create a BioPython MultipleSequenceAlignment
from a dict with pairs consisting of id and sequence.
"""
alignment = MultipleSeqAlignment([])
bio_alphabet = getattr(Bio.Alphabet, alphabet)
for id, seq in d.items():
seq_record = SeqRecord(Seq(seq, alphabet=bio_alphabet), id=id)
alignment.append(seq_record)
if sorted:
alignment.sort()
return alignment
dna_alignment_dict = {'t1': 'TCCAA', 't2': 'TGCAA', 't3': 'TG-AA'}
dna_bioalignment = dict_to_bioalignment(
dna_alignment_dict, alphabet='generic_dna')
dna_frame = pandas.DataFrame({
't1': ['T', 'C', 'C', 'A', 'A'],
't2': ['T', 'G', 'C', 'A', 'A'],
't3': ['T', 'G', '-', 'A', 'A']}, dtype='category')
def test_from_bioalignment_dna(self):
assert_frame_equal(
from_bioalignment(self.dna_bioalignment), self.dna_frame)
def test_to_bioalignment_dna(self):
assert (
to_bioalignment(self.dna_frame, alphabet='generic_dna')
.format('phylip') == self.dna_bioalignment.format('phylip'))
def test_invalid_alphabet(self):
with pytest.raises(ValueError):
to_bioalignment(self.dna_frame, alphabet='dna')
class TestSequenceDictConversion():
dna_frame = pandas.DataFrame({
't1': ['T', 'C', 'C', 'A', 'A'],
't2': ['T', 'G', 'C', 'A', 'A'],
't3': ['T', 'G', '-', 'A', 'A']}, dtype='object')
dna_frame_nan = pandas.DataFrame({
't1': ['T', 'C', 'C', 'A', 'A'],
't2': ['T', 'G', 'C', 'A', 'A'],
't3': ['T', 'G', '-', 'A', numpy.nan]}, dtype='object')
dna_dict = {'t1': 'TCCAA', 't2': 'TGCAA', 't3': 'TG-AA'}
def test_from_sequence_dict(self):
assert_frame_equal(
from_sequence_dict(self.dna_dict, categorical=False),
self.dna_frame)
def test_to_sequence_dict(self):
assert(to_sequence_dict(self.dna_frame) == self.dna_dict)
def test_do_sequence_dict_nan(self):
with pytest.raises(TypeError):
to_sequence_dict(self.dna_frame_nan)
``` |
{
"source": "jmenglund/pandas-validation",
"score": 3
} |
#### File: jmenglund/pandas-validation/pandasvalidation.py
```python
import os
import warnings
import datetime
import numpy
import pandas
__author__ = '<NAME>'
__license__ = 'MIT'
__version__ = '0.5.0'
warnings.filterwarnings('default', category=DeprecationWarning)
class ValidationWarning(Warning):
pass
def _datetime_to_string(series, format='%Y-%m-%d'):
"""
Convert datetime values in a pandas Series to strings.
Other values are left as they are.
Parameters
----------
series : pandas.Series
Values to convert.
format : str
Format string for datetime type. Default: '%Y-%m-%d'.
Returns
-------
converted : pandas.Series
"""
converted = series.copy()
datetime_mask = series.apply(type).isin(
[datetime.datetime, pandas.Timestamp])
if datetime_mask.any():
converted[datetime_mask] = (
series[datetime_mask].apply(lambda x: x.strftime(format)))
return converted.where(datetime_mask, series)
def _numeric_to_string(series, float_format='%g'):
"""
Convert numeric values in a pandas Series to strings.
Other values are left as they are.
Parameters
----------
series : pandas.Series
Values to convert.
float_format : str
Format string for floating point number. Default: '%g'.
Returns
-------
converted : pandas.Series
"""
converted = series.copy()
numeric_mask = (
series.apply(lambda x: numpy.issubdtype(type(x), numpy.number)) &
series.notnull())
if numeric_mask.any():
converted[numeric_mask] = (
series[numeric_mask].apply(lambda x: float_format % x))
return converted.where(numeric_mask, series)
def _get_error_messages(masks, error_info):
"""
Get list of error messages.
Parameters
----------
masks : list
List of pandas.Series with masked errors.
error_info : dict
Dictionary with error messages corresponding to different
validation errors.
"""
msg_list = []
for key, value in masks.items():
if value.any():
msg_list.append(error_info[key])
return msg_list
def _get_return_object(masks, values, return_type):
mask_frame = pandas.concat(masks, axis='columns')
if return_type == 'mask_frame':
return mask_frame
elif return_type == 'mask_series':
return mask_frame.any(axis=1)
elif return_type == 'values':
return values.where(~mask_frame.any(axis=1))
else:
raise ValueError('Invalid return_type')
def mask_nonconvertible(
series, to_datatype, datetime_format=None, exact_date=True):
"""
Return a boolean same-sized object indicating whether values
cannot be converted.
Parameters
----------
series : pandas.Series
Values to check.
to_datatype : str
Datatype to which values should be converted. Available values
are 'numeric' and 'datetime'.
datetime_format : str
strftime to parse time, eg '%d/%m/%Y', note that '%f' will parse
all the way up to nanoseconds. Optional.
exact_date : bool
- If True (default), require an exact format match.
- If False, allow the format to match anywhere in the target string.
"""
if to_datatype == 'numeric':
converted = pandas.to_numeric(series, errors='coerce')
elif to_datatype == 'datetime':
converted = pandas.to_datetime(
series, errors='coerce', format=datetime_format, exact=exact_date)
else:
raise ValueError(
'Invalid \'to_datatype\': {}'
.format(to_datatype)) # pragma: no cover
notnull = series.copy().notnull()
mask = notnull & converted.isnull()
return mask
def to_datetime(
arg, dayfirst=False, yearfirst=False, utc=None, box=True,
format=None, exact=True, coerce=None, unit='ns',
infer_datetime_format=False):
"""
Convert argument to datetime and set nonconvertible values to NaT.
This function calls :func:`~pandas.to_datetime` with ``errors='coerce'``
and issues a warning if values cannot be converted.
"""
try:
converted = pandas.to_datetime(
arg, errors='raise', dayfirst=dayfirst, yearfirst=yearfirst,
utc=utc, box=box, format=format, exact=exact)
except ValueError:
converted = pandas.to_datetime(
arg, errors='coerce', dayfirst=dayfirst, yearfirst=yearfirst,
utc=utc, box=box, format=format, exact=exact)
if isinstance(arg, pandas.Series):
warnings.warn(
'{}: value(s) not converted to datetime set as NaT'
.format(repr(arg.name)), ValidationWarning, stacklevel=2)
else: # pragma: no cover
warnings.warn(
'Value(s) not converted to datetime set as NaT',
ValidationWarning, stacklevel=2)
return converted
def to_numeric(arg):
"""
Convert argument to numeric type and set nonconvertible values
to NaN.
This function calls :func:`~pandas.to_numeric` with ``errors='coerce'``
and issues a warning if values cannot be converted.
"""
try:
converted = pandas.to_numeric(arg, errors='raise')
except ValueError:
converted = pandas.to_numeric(arg, errors='coerce')
if isinstance(arg, pandas.Series):
warnings.warn(
'{}: value(s) not converted to numeric set as NaN'
.format(repr(arg.name)), ValidationWarning, stacklevel=2)
else: # pragma: no cover
warnings.warn(
'Value(s) not converted to numeric set as NaN',
ValidationWarning, stacklevel=2)
return converted
def to_string(series, float_format='%g', datetime_format='%Y-%m-%d'):
"""
Convert values in a pandas Series to strings.
Parameters
----------
series : pandas.Series
Values to convert.
float_format : str
Format string for floating point number. Default: '%g'.
datetime_format : str
Format string for datetime type. Default: '%Y-%m-%d'
Returns
-------
converted : pandas.Series
"""
converted = _numeric_to_string(series, float_format)
converted = _datetime_to_string(converted, format=datetime_format)
converted = converted.astype(str)
converted = converted.where(series.notnull(), numpy.nan) # missing as NaN
return converted
def validate_date(
series, nullable=True, unique=False, min_date=None,
max_date=None, return_type=None):
"""
Validate a pandas Series with values of type `datetime.date`.
Values of a different data type will be replaced with NaN prior to
the validataion.
Parameters
----------
series : pandas.Series
Values to validate.
nullable : bool
If False, check for NaN values. Default: True.
unique : bool
If True, check that values are unique. Default: False
min_date : datetime.date
If defined, check for values before min_date. Optional.
max_date : datetime.date
If defined, check for value later than max_date. Optional.
return_type : str
Kind of data object to return; 'mask_series', 'mask_frame'
or 'values'. Default: None.
"""
error_info = {
'invalid_type': 'Value(s) not of type datetime.date set as NaT',
'isnull': 'NaT value(s)',
'nonunique': 'duplicates',
'too_low': 'date(s) too early',
'too_high': 'date(s) too late'}
is_date = series.apply(lambda x: isinstance(x, datetime.date))
masks = {}
masks['invalid_type'] = ~is_date & series.notnull()
to_validate = series.where(is_date)
if nullable is not True:
masks['isnull'] = to_validate.isnull()
if unique:
masks['nonunique'] = to_validate.duplicated() & to_validate.notnull()
if min_date is not None:
masks['too_low'] = to_validate.dropna() < min_date
if max_date is not None:
masks['too_high'] = to_validate.dropna() > max_date
msg_list = _get_error_messages(masks, error_info)
if len(msg_list) > 0:
msg = repr(series.name) + ': ' + '; '.join(msg_list) + '.'
warnings.warn(msg, ValidationWarning, stacklevel=2)
if return_type is not None:
return _get_return_object(masks, to_validate, return_type)
def validate_timestamp(
series, nullable=True, unique=False, min_timestamp=None,
max_timestamp=None, return_type=None):
"""
Validate a pandas Series with values of type `pandas.Timestamp`.
Values of a different data type will be replaced with NaT prior to
the validataion.
Parameters
----------
series : pandas.Series
Values to validate.
nullable : bool
If False, check for NaN values. Default: True.
unique : bool
If True, check that values are unique. Default: False
min_timestamp : pandas.Timestamp
If defined, check for values before min_timestamp. Optional.
max_timestamp : pandas.Timestamp
If defined, check for value later than max_timestamp. Optional.
return_type : str
Kind of data object to return; 'mask_series', 'mask_frame'
or 'values'. Default: None.
"""
error_info = {
'invalid_type': 'Value(s) not of type pandas.Timestamp set as NaT',
'isnull': 'NaT value(s)',
'nonunique': 'duplicates',
'too_low': 'timestamp(s) too early',
'too_high': 'timestamp(s) too late'}
is_timestamp = series.apply(lambda x: isinstance(x, pandas.Timestamp))
masks = {}
masks['invalid_type'] = ~is_timestamp & series.notnull()
to_validate = pandas.to_datetime(series.where(is_timestamp, pandas.NaT))
if nullable is not True:
masks['isnull'] = to_validate.isnull()
if unique:
masks['nonunique'] = to_validate.duplicated() & to_validate.notnull()
if min_timestamp is not None:
masks['too_low'] = to_validate.dropna() < min_timestamp
if max_timestamp is not None:
masks['too_high'] = to_validate.dropna() > max_timestamp
msg_list = _get_error_messages(masks, error_info)
if len(msg_list) > 0:
msg = repr(series.name) + ': ' + '; '.join(msg_list) + '.'
warnings.warn(msg, ValidationWarning, stacklevel=2)
if return_type is not None:
return _get_return_object(masks, to_validate, return_type)
def validate_datetime(
series, nullable=True, unique=False, min_datetime=None,
max_datetime=None, return_type=None):
"""
Validate a pandas Series containing datetimes.
.. deprecated:: 0.5.0
`validate_datetime()` will be removed in version 0.7.0.
Use `validate_date()` or `validate_timestamp()` instead.
Parameters
----------
series : pandas.Series
Values to validate.
nullable : bool
If False, check for NaN values. Default: True.
unique : bool
If True, check that values are unique. Default: False
min_datetime : str
If defined, check for values before min_datetime. Optional.
max_datetime : str
If defined, check for value later than max_datetime. Optional.
return_type : str
Kind of data object to return; 'mask_series', 'mask_frame'
or 'values'. Default: None.
"""
warnings.warn(
'validate_datetime() is deprecated, use validate_date() or '
'validate_timestamp() instead.', DeprecationWarning)
error_info = {
'nonconvertible': 'Value(s) not converted to datetime set as NaT',
'isnull': 'NaT value(s)',
'nonunique': 'duplicates',
'too_low': 'date(s) too early',
'too_high': 'date(s) too late'}
if not series.dtype.type == numpy.datetime64:
converted = pandas.to_datetime(series, errors='coerce')
else:
converted = series.copy()
masks = {}
masks['nonconvertible'] = series.notnull() & converted.isnull()
if not nullable:
masks['isnull'] = converted.isnull()
if unique:
masks['nonunique'] = converted.duplicated() & converted.notnull()
if min_datetime is not None:
masks['too_low'] = converted.dropna() < min_datetime
if max_datetime is not None:
masks['too_high'] = converted.dropna() > max_datetime
msg_list = _get_error_messages(masks, error_info)
if len(msg_list) > 0:
msg = repr(series.name) + ': ' + '; '.join(msg_list) + '.'
warnings.warn(msg, ValidationWarning, stacklevel=2)
if return_type is not None:
return _get_return_object(masks, converted, return_type)
def validate_numeric(
series, nullable=True, unique=False, integer=False,
min_value=None, max_value=None, return_type=None):
"""
Validate a pandas Series containing numeric values.
Parameters
----------
series : pandas.Series
Values to validate.
nullable : bool
If False, check for NaN values. Default: True
unique : bool
If True, check that values are unique. Default: False
integer : bool
If True, check that values are integers. Default: False
min_value : int
If defined, check for values below minimum. Optional.
max_value : int
If defined, check for value above maximum. Optional.
return_type : str
Kind of data object to return; 'mask_series', 'mask_frame'
or 'values'. Default: None.
"""
error_info = {
'invalid_type': 'Non-numeric value(s) set as NaN',
'isnull': 'NaN value(s)',
'nonunique': 'duplicates',
'noninteger': 'non-integer(s)',
'too_low': 'value(s) too low',
'too_high': 'values(s) too high'}
is_numeric = series.apply(pandas.api.types.is_number)
masks = {}
masks['invalid_type'] = ~is_numeric & series.notnull()
to_validate = pandas.to_numeric(series.where(is_numeric))
if not nullable:
masks['isnull'] = to_validate.isnull()
if unique:
masks['nonunique'] = to_validate.duplicated() & to_validate.notnull()
if integer:
noninteger_dropped = (
to_validate.dropna() != to_validate.dropna().apply(int))
masks['noninteger'] = pandas.Series(noninteger_dropped, series.index)
if min_value is not None:
masks['too_low'] = to_validate.dropna() < min_value
if max_value is not None:
masks['too_high'] = to_validate.dropna() > max_value
msg_list = _get_error_messages(masks, error_info)
if len(msg_list) > 0:
msg = repr(series.name) + ': ' + '; '.join(msg_list) + '.'
warnings.warn(msg, ValidationWarning, stacklevel=2)
if return_type is not None:
return _get_return_object(masks, to_validate, return_type)
def validate_string(
series, nullable=True, unique=False,
min_length=None, max_length=None, case=None, newlines=True,
trailing_whitespace=True, whitespace=True, matching_regex=None,
non_matching_regex=None, whitelist=None, blacklist=None,
return_type=None):
"""
Validate a pandas Series with strings. Non-string values
will be converted to strings prior to validation.
Parameters
----------
series : pandas.Series
Values to validate.
nullable : bool
If False, check for NaN values. Default: True.
unique : bool
If True, check that values are unique. Default: False.
min_length : int
If defined, check for strings shorter than
minimum length. Optional.
max_length : int
If defined, check for strings longer than
maximum length. Optional.
case : str
Check for a character case constraint. Available values
are 'lower', 'upper' and 'title'. Optional.
newlines : bool
If False, check for newline characters. Default: True.
trailing_whitespace : bool
If False, check for trailing whitespace. Default: True.
whitespace : bool
If False, check for whitespace. Default: True.
matching_regex : str
Check that strings matches some regular expression. Optional.
non_matching_regex : str
Check that strings do not match some regular expression. Optional.
whitelist : list
Check that values are in `whitelist`. Optional.
blacklist : list
Check that values are not in `blacklist`. Optional.
return_type : str
Kind of data object to return; 'mask_series', 'mask_frame'
or 'values'. Default: None.
"""
error_info = {
'invalid_type': 'Non-string value(s) set as NaN',
'isnull': 'NaN value(s)',
'nonunique': 'duplicates',
'too_short': 'string(s) too short',
'too_long': 'string(s) too long',
'wrong_case': 'wrong case letter(s)',
'newlines': 'newline character(s)',
'trailing_space': 'trailing whitespace',
'whitespace': 'whitespace',
'regex_mismatch': 'mismatch(es) for "matching regular expression"',
'regex_match': 'match(es) for "non-matching regular expression"',
'not_in_whitelist': 'string(s) not in whitelist',
'in_blacklist': 'string(s) in blacklist'}
is_string = series.apply(lambda x: isinstance(x, str))
masks = {}
masks['invalid_type'] = ~is_string & series.notnull()
to_validate = series.where(is_string)
if not nullable:
masks['isnull'] = to_validate.isnull()
if unique:
masks['nonunique'] = to_validate.duplicated() & to_validate.notnull()
if min_length is not None:
too_short_dropped = to_validate.dropna().apply(len) < min_length
masks['too_short'] = pandas.Series(too_short_dropped, series.index)
if max_length is not None:
too_long_dropped = to_validate.dropna().apply(len) > max_length
masks['too_long'] = pandas.Series(too_long_dropped, series.index)
if case:
altered_case = getattr(to_validate.str, case)()
wrong_case_dropped = (
altered_case.dropna() != to_validate[altered_case.notnull()])
masks['wrong_case'] = pandas.Series(wrong_case_dropped, series.index)
if newlines is False:
masks['newlines'] = to_validate.str.contains(os.linesep)
if trailing_whitespace is False:
masks['trailing_space'] = to_validate.str.contains(
r'^\s|\s$', regex=True)
if whitespace is False:
masks['whitespace'] = to_validate.str.contains(r'\s', regex=True)
if matching_regex:
masks['regex_mismatch'] = (
to_validate.str.contains(matching_regex, regex=True)
.apply(lambda x: x is False) & to_validate.notnull())
if non_matching_regex:
masks['regex_match'] = to_validate.str.contains(
non_matching_regex, regex=True)
if whitelist is not None:
masks['not_in_whitelist'] = (
to_validate.notnull() & ~to_validate.isin(whitelist))
if blacklist is not None:
masks['in_blacklist'] = to_validate.isin(blacklist)
msg_list = _get_error_messages(masks, error_info)
if len(msg_list) > 0:
msg = repr(series.name) + ': ' + '; '.join(msg_list) + '.'
warnings.warn(msg, ValidationWarning, stacklevel=2)
if return_type is not None:
return _get_return_object(masks, to_validate, return_type)
```
#### File: jmenglund/pandas-validation/test_pandasvalidation.py
```python
import datetime
import warnings
import pytest
import numpy
import pandas
from pandas.util.testing import assert_series_equal, assert_frame_equal
from pandasvalidation import (
ValidationWarning,
_datetime_to_string,
_numeric_to_string,
_get_return_object,
mask_nonconvertible,
to_datetime,
to_numeric,
to_string,
validate_datetime,
validate_date,
validate_timestamp,
validate_numeric,
validate_string)
class TestReturnTypes():
strings = pandas.Series(['1', '1', 'ab\n', 'a b', 'Ab', 'AB', numpy.nan])
masks = [
pandas.Series([False, False, False, True, True, False, False]),
pandas.Series([True, True, False, True, True, False, True])]
def test_return_mask_series(self):
assert_series_equal(
_get_return_object(self.masks, self.strings, 'mask_series'),
pandas.Series([True, True, False, True, True, False, True]))
def test_return_mask_frame(self):
assert_frame_equal(
_get_return_object(self.masks, self.strings, 'mask_frame'),
pandas.concat(self.masks, axis='columns'))
def test_return_values(self):
assert_series_equal(
_get_return_object(self.masks, self.strings, 'values'),
pandas.Series([
numpy.nan, numpy.nan, 'ab\n', numpy.nan,
numpy.nan, 'AB', numpy.nan]))
def test_wrong_return_type(self):
with pytest.raises(ValueError):
_get_return_object(self.masks, self.strings, 'wrong return type')
class TestMaskNonconvertible():
mixed = pandas.Series([
1, 2.3, numpy.nan, 'abc', pandas.datetime(2014, 1, 7), '2014'])
inconvertible_numeric = pandas.Series(
[False, False, False, True, True, False])
inconvertible_exact_dates = pandas.Series(
[True, True, False, True, True, False])
inconvertible_inexact_dates = pandas.Series(
[True, True, False, True, False, False])
def test_numeric(self):
assert_series_equal(
mask_nonconvertible(self.mixed, 'numeric'),
self.inconvertible_numeric)
def test_datetime_exact_date(self):
assert_series_equal(
mask_nonconvertible(
self.mixed, 'datetime', datetime_format='%Y', exact_date=True),
self.inconvertible_exact_dates)
assert_series_equal(
mask_nonconvertible(
self.mixed, 'datetime', datetime_format='%Y',
exact_date=False), self.inconvertible_inexact_dates)
class TestToDatetime():
mixed = pandas.Series([
1, 2.3, numpy.nan,
'abc', pandas.datetime(2014, 1, 7), '2014'])
def test_exact(self):
assert (
to_datetime(self.mixed, format='%Y', exact=True).tolist() == [
pandas.NaT, pandas.NaT, pandas.NaT, pandas.NaT,
pandas.NaT, pandas.Timestamp('2014-01-01 00:00:00')])
assert (
to_datetime(self.mixed, format='%Y', exact=False).tolist() == [
pandas.NaT, pandas.NaT, pandas.NaT, pandas.NaT,
pandas.Timestamp('2014-01-01 00:00:00'),
pandas.Timestamp('2014-01-01 00:00:00')])
class TestToNumeric():
mixed = pandas.Series([
1, 2.3, numpy.nan, 'abc', pandas.datetime(2014, 1, 7), '2014'])
def test_conversion(self):
assert (
to_numeric(self.mixed).sum() == 2017.3)
pytest.warns(ValidationWarning, to_numeric, self.mixed)
class TestToString():
mixed = pandas.Series(
[1, 2.3, numpy.nan, 'abc', pandas.datetime(2014, 1, 7)])
numeric_as_strings = pandas.Series(
['1', '2.3', numpy.nan, 'abc', pandas.datetime(2014, 1, 7)])
datetimes_as_strings = pandas.Series(
[1, 2.3, numpy.nan, 'abc', '2014-01-07'])
all_values_as_strings = pandas.Series(
['1', '2.3', numpy.nan, 'abc', '2014-01-07'])
def test_numeric_to_string(self):
assert_series_equal(
_numeric_to_string(self.mixed), self.numeric_as_strings)
def test_datetime_to_string(self):
assert_series_equal(
_datetime_to_string(self.mixed, format='%Y-%m-%d'),
self.datetimes_as_strings)
def test_to_string(self):
assert_series_equal(
to_string(
self.mixed, float_format='%g', datetime_format='%Y-%m-%d'),
self.all_values_as_strings)
class TestValidateDatetime():
dates_as_strings = pandas.Series([
'2014-01-07', '2014-01-07', '2014-02-28', numpy.nan])
dates = pandas.Series([
datetime.datetime(2014, 1, 7), datetime.datetime(2014, 1, 7),
datetime.datetime(2014, 2, 28), numpy.nan])
def test_validation(self):
assert_series_equal(
validate_datetime(self.dates_as_strings, return_type='values'),
validate_datetime(self.dates, return_type='values'))
pytest.warns(
ValidationWarning, validate_datetime, self.dates, nullable=False)
pytest.warns(
ValidationWarning, validate_datetime, self.dates, unique=True)
pytest.warns(
ValidationWarning, validate_datetime, self.dates,
min_datetime='2014-01-08')
pytest.warns(
ValidationWarning, validate_datetime, self.dates,
max_datetime='2014-01-08')
class TestValidateDate():
dates = pandas.Series([
datetime.datetime(2014, 1, 7),
datetime.datetime(2014, 1, 7),
datetime.datetime(2014, 2, 28),
pandas.NaT])
def test_validation(self):
assert_series_equal(
validate_date(self.dates, return_type='values'),
self.dates)
pytest.warns(
ValidationWarning, validate_date, self.dates, nullable=False)
pytest.warns(
ValidationWarning, validate_date, self.dates, unique=True)
pytest.warns(
ValidationWarning, validate_date, self.dates,
min_date=datetime.date(2014, 1, 8))
pytest.warns(
ValidationWarning, validate_date, self.dates,
max_date=datetime.date(2014, 1, 8))
class TestValidateTimestamp():
timestamps = pandas.Series([
pandas.Timestamp(2014, 1, 7, 12, 0, 5),
pandas.Timestamp(2014, 1, 7, 12, 0, 5),
pandas.Timestamp(2014, 2, 28, 0, 0, 0),
pandas.NaT])
def test_validation(self):
assert_series_equal(
validate_timestamp(self.timestamps, return_type='values'),
self.timestamps)
pytest.warns(
ValidationWarning, validate_timestamp, self.timestamps,
nullable=False)
pytest.warns(
ValidationWarning, validate_timestamp, self.timestamps,
unique=True)
pytest.warns(
ValidationWarning, validate_timestamp, self.timestamps,
min_timestamp=pandas.Timestamp(2014, 1, 8))
pytest.warns(
ValidationWarning, validate_timestamp, self.timestamps,
max_timestamp=pandas.Timestamp(2014, 1, 8))
class TestValidateNumber():
numeric_with_string = pandas.Series([-1, -1, 2.3, '1'])
numeric = pandas.Series([-1, -1, 2.3, numpy.nan])
def test_validation(self):
assert_series_equal(
validate_numeric(self.numeric_with_string, return_type='values'),
self.numeric)
pytest.warns(
ValidationWarning, validate_numeric, self.numeric, nullable=False)
pytest.warns(
ValidationWarning, validate_numeric, self.numeric, unique=True)
pytest.warns(
ValidationWarning, validate_numeric, self.numeric, integer=True)
pytest.warns(
ValidationWarning, validate_numeric, self.numeric, min_value=0)
pytest.warns(
ValidationWarning, validate_numeric, self.numeric, max_value=0)
class TestValidateString():
mixed = pandas.Series(['ab\n', 'a b', 'Ab', 'Ab', 'AB', 1, numpy.nan])
strings = pandas.Series(
['ab\n', 'a b', 'Ab', 'Ab', 'AB', numpy.nan, numpy.nan])
def test_validation(self):
assert_series_equal(
validate_string(self.mixed, return_type='values'),
self.strings)
pytest.warns(
ValidationWarning, validate_string, self.strings, nullable=False)
pytest.warns(
ValidationWarning, validate_string, self.strings, unique=True)
pytest.warns(
ValidationWarning, validate_string, self.strings, min_length=3)
pytest.warns(
ValidationWarning, validate_string, self.strings, max_length=2)
pytest.warns(
ValidationWarning, validate_string, self.strings[3:], case='lower')
pytest.warns(
ValidationWarning, validate_string, self.strings[3:], case='upper')
pytest.warns(
ValidationWarning, validate_string, self.strings[3:], case='title')
pytest.warns(
ValidationWarning, validate_string, self.strings, newlines=False)
pytest.warns(
ValidationWarning, validate_string, self.strings,
trailing_whitespace=False)
pytest.warns(
ValidationWarning, validate_string, self.strings, whitespace=False)
pytest.warns(
ValidationWarning, validate_string, self.strings,
matching_regex=r'\d')
pytest.warns(
ValidationWarning, validate_string, self.strings,
non_matching_regex=r'[\d\s\w]')
pytest.warns(
ValidationWarning, validate_string, self.strings,
whitelist=self.strings[:4])
pytest.warns(
ValidationWarning, validate_string, self.strings,
blacklist=['a', 'Ab'])
``` |
{
"source": "JMensch/reddit-watch",
"score": 3
} |
#### File: JMensch/reddit-watch/bot.py
```python
import praw
import time
import datetime
from multiprocessing import Process
def controller(keywords, subreddits, username):
"""
"""
# instantiate praw and request reddit login
r = praw.Reddit('Reddit-Watch RedditBot')
r.login()
# the process queue
jobs = []
for subreddit in subreddits:
p = Process(target=run, args=(r, keywords, subreddit, username))
jobs.append(p)
p.start()
def run(reddit, keywords, subreddit, username):
"""
"""
# list of searched posts
already_done = []
# the subreddit watcher
while True:
subreddit = reddit.get_subreddit(subreddit)
print "watching %s..." % subreddit
# for newest 100 posts
for post in subreddit.get_new(limit=100):
# get post body and title
op_body = post.selftext.lower()
op_title = post.title.lower()
op = op_body + " " + op_title
# check if post title or body has a search value
if post.id not in already_done:
for keyword in keywords:
if keyword in op:
# create and send message
msg = '[%s](%s)' % (op_title, post.short_link)
title = '[Reddit-Watch]['+datetime.datetime.now().strftime("%Y-%m-%d")+'] ' + keyword
reddit.send_message(username, title, msg)
already_done.append(post.id)
# wait for 15mins
time.sleep(900)
```
#### File: JMensch/reddit-watch/main.py
```python
import optparse
from bot import controller
def main():
desc = "Reddit-Watch: a subreddit search and notification bot"
# the cli
p = optparse.OptionParser(description=desc)
p.add_option('-s', '--subs', default=[], help="subreddits to search", action="append", dest="subs")
p.add_option('-k', '--keywords', default=[], help="keywords to search for", action="append", dest="keys")
p.add_option('-u', '--username', default=[], help="username to send notification", action="store", dest="username")
opts, args = p.parse_args()
# check required params
if not opts.subs:
p.error('Please enter a subreddit to watch.')
if not opts.keys:
p.error('Please enter a keyword for watch for.')
if not opts.username:
p.error('Please enter a username to send the notification.')
controller(opts.keys, opts.subs, opts.username)
if __name__ == '__main__':
main()
``` |
{
"source": "jmenzelupb/MSMP-xmas",
"score": 2
} |
#### File: 2019/fourier_xmas/xmas.py
```python
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.animation as animation
Box = [0.0, 0.0 + 3.5j, 11.0 + 3.5j, 11.0, 0.0]
X=[0.0, -1.0 + -1.0j, 1.0 + 1.0j, 0.0, -1.0 + 1.0j, 1.0 + -1.0j]
M=[-1.0 + -1.0j, -1.0 + 1.0j, 0.0, 1.0 + 1.0j, 1.0 + -1.0j]
A=[-0.6 + -1.0j, 1.0j,0.6 + -1.0j,-0.8 + -0.1j, 0.8 + -0.1j]
S=[0.0 + 1.0j,0.5 + 0.7j, 0.0 + 1.0j, -0.5 + 0.7j, 0.5 + -0.7j, 0.0 - 1.0j, -0.5 + -0.7j]
print(A)
X_offset=1.75 + 1.75j
M_offset=X_offset + 2.5
A_offset=M_offset + 2.5
S_offset=A_offset + 2.5
print(len(A))
for i in range(len(X)):
X[i] += X_offset
for i in range(len(M)):
M[i] += M_offset
for i in range(len(A)):
A[i] += A_offset
for i in range(len(S)):
S[i] += S_offset
underline=[1.0 +- 0.2j, -9.0 + -0.2j, 2.0 + -0.4j]
for i in range(len(underline)):
underline[i] += S[-2]
endpoints = [11.0 + 0.4j, 11.0, 0.0]
func_points = Box + X + M + A + S + underline + endpoints
intervals=[50,100,50,100,20,50,100,50,50,100,20,70,50,50,70,20,90,90,50,40,30,20,20,20,40,20,20,20,100,90,20,20,50]
print(func_points)
T=sum(intervals)
print(len(func_points), len(intervals))
x = np.linspace(func_points[0],func_points[1],intervals[0])
for i in range(1,len(func_points)-1):
tmp = np.linspace(func_points[i],func_points[i+1],intervals[i])
x = np.concatenate((x,tmp))
t = np.arange(0,T)
w0 = 2 * np.pi / T
coeff = []
e_func = []
for i in range(-60,60):
x_i = 1/T * np.trapz(x*(np.e ** (-1j * w0 * i * t)))
e_i = np.e ** (1j * w0 * i * t)
print(x_i)
coeff.append(x_i)
e_func.append(e_i)
coeff = np.array(coeff)
x_hat = coeff[0] * e_func[0]
for i in range(1,len(coeff)):
x_hat += coeff[i] * e_func[i]
coeff_anim = []
for j in range(T):
points = [0]
for i in range(len(coeff)):
new_point = points[-1] + coeff[i] * e_func[i][j]
points.append(new_point)
points = np.array(points)
coeff_anim.append(points)
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
def animate(i):
if(i < T):
x_1 = np.real(x_hat[:i])
y_1 = np.imag(x_hat[:i])
x_2 = np.real(coeff_anim[i])
y_2 = np.imag(coeff_anim[i])
ax.clear()
#ax.set_xlim([-1.5,7.5])
#ax.set_ylim([-2.2,1.5])
ax.plot(x_1,y_1)
ax.plot(x_2,y_2)
else:
ax.clear()
#ax.set_xlim([-1.5,7.5])
#ax.set_ylim([-2.2,1.5])
ax.plot(np.real(x_hat),np.imag(x_hat))
anim = animation.FuncAnimation(fig,animate,interval=20)
plt.show()
``` |
{
"source": "jmeppley/jupy_tools",
"score": 3
} |
#### File: jme/jupy_tools/filesystem.py
```python
import os, re, glob, pandas, numpy
from datetime import datetime
from collections import namedtuple, defaultdict
def find(path, filters=None):
""" recursively find files that match executable filters """
for root, dirs, files in os.walk(path, topdown=False):
for name in files:
if _check_path(root, name, filters):
yield os.path.join(root, name)
skips = []
for name in dirs:
if _check_path(root, name, filters):
yield os.path.join(root, name)
else:
skips.append(name)
for name in skips:
dirs.remove(name)
def get_rexp_filter(rexp):
if isinstance(rexp, str):
rexp = re.compile(rexp)
def check_path_against_rexp(root, name):
return rexp.search(os.path.join(root, name)) is not None
return check_path_against_rexp
def _check_path(root, name, filters=None):
if filters:
for test in filters:
if not test(root, name):
return False
return True
# matches the bracketed names in a wildcard glob string
TEMPLATE_REXP = re.compile(r"(?<!{)\{\s*(\w+)\s*\}(?!})")
def glob_wildcards(template, constraints=None, as_tuple=False, debug=False):
""" should work like the snakemake function:
* given a template like path/{variable}.ext
* find the values for variable that match files
Except the return value is different.
Generates for each found file:
(file_path, dict(wildcard values))
If as_tuple is True, generates named tuple instead of dict.
"""
if constraints is None:
constraints = {}
# simple glob for finding files {xxx} -> *
glob_string = TEMPLATE_REXP.sub("*", template)
named_patterns = set()
def wc_repl(match):
""" replace {xxx} with named regex pattern using any constraints """
wc_name = match.group(1)
if wc_name in named_patterns:
return f"(?P={wc_name})"
named_patterns.add(wc_name)
wc_patt = constraints.get(wc_name, r".+")
return f"(?P<{wc_name}>{wc_patt})"
# regex for getting wildcards from path
wildcard_pattern = TEMPLATE_REXP.sub(wc_repl, _hide_dots(template))
if debug:
print(f"Wildcard regexp: '{wildcard_pattern}'")
wildcard_rexp = re.compile(wildcard_pattern)
# create named tuple class for returned data
if as_tuple:
Wildcards = namedtuple(
"Wildcards", list(set(m.group(1) for m in TEMPLATE_REXP.finditer(template)))
)
# loop over matched files
for glob_file in glob.glob(glob_string):
m = wildcard_rexp.match(glob_file)
if m:
# transform dict of names->matches to named tuple, if asked
wildcards = Wildcards(**m.groupdict()) if as_tuple else m.groupdict()
yield glob_file, wildcards
elif debug:
print("WARNING: {} doesn match {}".format(glob_file, wildcard_rexp.pattern))
def _hide_dots(path):
return re.sub(r"\.", "\.", path)
from numpy import log, power, abs
LN_BASE = log(power(1024, 1 / 3))
def human_readable_bytes(byt):
""" fixed version of https://stackoverflow.com/a/17754143/663466
hybrid of https://stackoverflow.com/a/10171475/2595465
with https://stackoverflow.com/a/5414105/2595465 """
# return bytes if small
if byt <= 99:
return str(int(byt))
magnitude = int(log(abs(byt)) / LN_BASE)
if magnitude > 19:
float_fmt = "%i"
illion = 20 // 3
else:
mag3 = (magnitude + 1) % 3
float_fmt = "%" + str(mag3) + "." + str(3 - mag3) + "f"
illion = (magnitude + 1) // 3
format_str = float_fmt + ["", "K", "M", "G", "T", "P", "E"][illion]
return (format_str % (byt * 1.0 / (1024 ** illion))).lstrip("0")
def get_file_sizes_and_dates_by_uid(volume, users=None, min_age=0, follow_links=False, time_stat='mtime'):
""" Collect date and size by user id
Params:
* users: only consider these users (ids or names)
* min_age: only save files at least this old (in seconds)
* follow_links: passed to os.walk (default is False)
* time_stat: one of 'mtime', 'ctime', 'atime', or 'max'
"""
# translate user ids to names
userid_map = get_user_lookup_table().to_dict()
# translate userids to names in include list
if users is not None:
users = set(userid_map.get(u, u) for u in users)
usage_data = defaultdict(lambda: [])
min_date = int(datetime.now().timestamp())
now = datetime.now().timestamp()
for root_path, folder_list, file_list in os.walk(volume, followlinks=follow_links):
for file_name in file_list:
try:
file_path = os.path.join(root_path, file_name)
if not(os.path.isfile(file_path)):
# skip broken links
continue
file_stats = os.stat(file_path)
# filter by owner if user list given
ownerid = file_stats.st_uid
owner = userid_map.get(ownerid, ownerid)
if users is not None and owner not in users:
continue
# get the user selected time stat
mtime = file_stats.st_ctime if time_stat == 'ctime' \
else file_stats.st_mtime if time_stat == 'mtime' \
else file_stats.st_atime if time_stat == 'atime' \
else max(file_stats.st_mtime, file_stats.st_ctime, file_stats.st_atime)
# keep track of oldest file
min_date = min(mtime, min_date)
# filter by age
file_age = now - mtime
if file_age < min_age:
continue
usage_data[owner].append((file_stats.st_size,
mtime,
file_path,
))
except:
pass
return usage_data, min_date
TIME_SPANS = {
'minutes': 60,
'hours': 3600,
'days': 3600*24,
'weeks': 3600*24*7,
'months': 3600*24*30,
'years': 3600*24*365,
}
def get_file_size_table(usage_data, min_date,
age_bin_size=2,
age_bin_type='weeks', min_age=0):
""" translate files sizes and dates into table """
now = datetime.now().timestamp()
if age_bin_type not in TIME_SPANS:
raise Exception("I don't know the time span {}. Please specify one of: {}".format(
age_bin_type,
", ".join(TIME_SPANS.keys()),
))
age_bins_step = age_bin_size * TIME_SPANS[age_bin_type]
oldest_age = now - min_date
age_bin_bounds = numpy.arange(0, oldest_age + age_bins_step, age_bins_step)
counts = {}
now = datetime.now().timestamp()
for owner, file_data_list in usage_data.items():
owner_counts = counts.setdefault(owner, {})
for file_data in file_data_list:
size = file_data[0]
file_age = now - file_data[1]
if file_age < min_age:
continue
age_bin = int(file_age/age_bins_step)
owner_counts[age_bin] = owner_counts.get(age_bin, 0) + size
# make into a data frame
file_size_table = pandas.DataFrame(counts)
# headers...
#users = get_user_lookup_table()
#file_size_table.columns = [users.get(c,c) for c in file_size_table.columns]
file_size_table.index = \
[get_bin_bounds_string(i,
age_bin_bounds,
lambda b: \
str(int(b/TIME_SPANS[age_bin_type])),
"{} old".format(age_bin_type)) \
for i in file_size_table.index]
return file_size_table
def get_bin_bounds_string(bin_index, bin_bounds, to_str=repr, suffix=""):
""" retuns, for example: '15 to 20 months' given:
bin_index: the location in bin_bounds to find the start value
bin_bounds: a list of bounding values
suffix: the class of the bounds. EG 'months' or 'days'
"""
return "{} to {} {}".format(to_str(bin_bounds[bin_index]), to_str(bin_bounds[bin_index + 1]), suffix)
def get_user_lookup_table():
""" returns series mapping user id to user name """
return pandas.read_table('/etc/passwd', sep=':', names=['user','code','id','group','home','shell'], index_col=2)['user']
``` |
{
"source": "jmeppley/np_read_clustering",
"score": 2
} |
#### File: np_read_clustering/scripts/choose_mcl_all_clusters.py
```python
import pandas, numpy, os
from collections import deque
from itertools import cycle
from scipy import stats
from Bio import SeqIO
# load the read lengths from the summary file
read_lens = pandas.read_csv(snakemake.input.read_lens,
sep='\t',
names=['read_id','sequence_length_template'],
index_col='read_id',
header=None).sequence_length_template.to_dict()
# process clusters to choose keepers
cluster_data = []
read_clusters = {}
sigma_cutoff = snakemake.params.sigma_cutoff
count_cutoff = snakemake.params.min_cl_size
# loop over clusters in mcl_file
with open(str(snakemake.input.mcl)) as mcl_lines:
for i,line in enumerate(mcl_lines):
# get cluster read names
reads = set(line.strip().split())
count = len(reads)
# get cluster read length dist
cluster_lens = numpy.array([read_lens[r] for r in reads])
counts, bins = numpy.histogram(cluster_lens, bins=100)
X = numpy.array([numpy.mean((bins[j], bins[j-1])) for j in range(1,len(bins))])
mu, sigma = stats.norm.fit(cluster_lens)
keep = (sigma <= sigma_cutoff and count >= count_cutoff)
cluster_data.append(dict(num=i, count=count, sigma=sigma, mu=mu,
keep=keep))
if keep:
"""
# write read list
if not os.path.exists(str(snakemake.output.reads)):
os.makedirs(str(snakemake.output.reads), exist_ok=True)
with open(f"{output.reads}/cluster.{i}.reads", 'wt') as reads_out:
reads_out.write("\n".join(reads) + "\n")
"""
# save cluster id
for read in reads:
read_clusters[read] = i
cluster_table = pandas.DataFrame(cluster_data)
## assign groups
# this serves 2 purposes:
# 1) we limit the number of files in each folder (too many files can slow
# down snakemake)
# 2) we enable running the workflow in chunks (can perform better in some
# cases)
keepers = cluster_table.query('keep')
num_keepers = keepers.shape[0]
# we want the number of groups, but we can get it from group_size
if 'group_size' in snakemake.config and 'num_groups' not in snakemake.config:
group_size = snakemake.config['group_size']
n_groups = int(numpy.ceil(num_keepers/group_size))
else:
n_groups = snakemake.config.get('num_groups', 100)
# assigne a group to each cluster (round-robin)
groups = cycle(range(n_groups))
cluster_groups = {c:next(groups) for c in keepers['num']}
cluster_table['group'] = [cluster_groups.get(c,None) if k else None
for c,k in cluster_table[['num','keep']].values]
# write fasta files
if not os.path.exists(str(snakemake.output.reads)):
os.makedirs(str(snakemake.output.reads), exist_ok=True)
# limit number of open files with
n_open = 250
open_handle_ids = deque([])
handles = {}
def open_cluster_fasta(i):
"""
checks for open handle for this scluster and returns it if found
otherwise closes oldest handle and replaes with new handle for this cluster
"""
# return open handle if it exists
try:
return handles[i]
except KeyError:
pass
# close handle(s) if we have too many
while len(handles) > n_open - 1:
# drop oldest
drop_id = open_handle_ids.popleft()
# close and delete
handles[drop_id].close()
del handles[drop_id]
group = cluster_groups[i]
fasta_file = f"{snakemake.output.reads}/group.{group}/cluster.{i}.fasta"
fd = os.path.dirname(fasta_file)
if not os.path.exists(fd):
os.makedirs(fd)
handle = open(fasta_file, 'at')
handles[i] = handle
open_handle_ids.append(i)
return handle
# loop over all reads and write out
skipped_read_count = 0
for read in SeqIO.parse(snakemake.input.fasta, 'fasta'):
try:
cluster = read_clusters[read.id]
except KeyError:
# skip if no cluster
skipped_read_count += 1
continue
open_cluster_fasta(cluster).write(read.format('fasta'))
# add row for unclustered reads
for k,v in dict(i=-1, count=skipped_read_count, keep=False).items():
cluster_table.loc[-1,k] = v
# save cluster table
cluster_table.to_csv(str(snakemake.output.stats), sep='\t',
index=False)
``` |
{
"source": "jmeppley/py-metagenomics",
"score": 2
} |
#### File: py-metagenomics/databases/buildRefSeqAccToTaxidMap.py
```python
from optparse import OptionParser
import sys, logging, re
from edl.taxon import readTaxonomy, getNodeFromHit
def main():
usage='%prog [OPTIONS] TAXDUMP_PATH'
description='reduce full RefSea catalog (from STDIN) to acc->taxid map using TAXDUMP to verify taxids'
parser = OptionParser(usage, description=description)
parser.add_option("-g", "--genomic", default=False, action="store_true", help="output genoic accessions instead of proteins")
parser.add_option("-v", "--verbose",
action="count", dest="verbose", default=1,
help="Print log messages. Use twice for debugging")
parser.add_option("-q", '--quiet', dest='verbose',
action="store_const", const=0,
help="Suppress warnings. Only print fatal messages")
parser.add_option("-A", "--about",
action="store_true", dest="about", default=False,
help="Print description")
(options, args) = parser.parse_args()
if options.about:
print (description)
exit(0)
# check args
if options.verbose==0:
loglevel=logging.ERROR
elif options.verbose==1:
loglevel=logging.WARN
elif options.verbose==2:
loglevel=logging.INFO
elif options.verbose>=3:
loglevel=logging.DEBUG
logging.basicConfig(stream=sys.stderr, level=loglevel)
logging.info("Log level set to %r(%d)" % (loglevel,options.verbose))
if len(args) != 1:
parser.error("Please supply TAXDUMP path in command line")
# parse catalog
accToOrg={}
accRE=re.compile(r'\b([A-Z]{2}_[A-Z]*\d+)(\.\b)?\b')
protRE=re.compile(r'^([ANXWYZ]P_[A-Z]*\d+)$')
logging.info("reading catalog from STDIN")
for line in sys.stdin:
cells=line.rstrip('\r\n').split('\t')
(taxid,name,acc)=cells[0:3]
try:
taxid=int(taxid)
except:
pass
logging.debug(acc)
acc=accRE.match(acc).group(1)
logging.debug("'%s'" % acc)
m=protRE.match(acc)
if (m==None)==options.genomic:
# will match if acc matches exp and genomic is false OR
# if acc doesn't match and genomic is true
accToOrg[acc]=(taxid,name)
logging.debug("USing acc: %s" % (acc))
else:
logging.debug("Skipping acc: %s" % (acc))
# load taxonomy
logging.info("loading taxonomy from %s" % (args[0]))
taxonomy = readTaxonomy(args[0],namesMap=True)
# print table
changes=0
for (acc,(taxid,name)) in accToOrg.iteritems():
if taxid not in taxonomy.idMap:
node = getNodeFromHit(name, taxonomy.nameMap)
logging.debug("Changing %s to %s" % (taxid,node.id))
taxid = node.id
changes+=1
print ('\t'.join([acc,str(taxid)]))
logging.info("Changed %d taxon ids" % changes)
if __name__ =='__main__':
main()
```
#### File: py-metagenomics/edl/gtdb.py
```python
import os
import re
import sys
import logging
from edl.taxon import TaxNode, Taxonomy
from edl.silva import writeDumpFiles
from edl.util import treeGenerator
logger = logging.getLogger(__name__)
GTDB = 'gtdb'
GTDBTAB = 'gtdb_table'
PHYLODB = 'phylodb'
def generate_taxdump(fasta=None, table=None, dump=".", **kwargs):
""" convert a GTDB faa file to ncbi style taxdumps """
if fasta is not None:
tax_file = fasta
fmt = 'fasta'
elif table is not None:
tax_file = table
fmt = 'table'
else:
raise Exception("Please supply 'fasta' or 'table' file")
tax_args = {k: v for k, v in kwargs.items() if k in ['style']}
taxonomy = parse_lineages(tax_file, fmt, **tax_args)
dump_args = {k: v for k, v in kwargs.items() if k in ['map_file_name']}
dump_taxonomy(taxonomy, dump, **dump_args)
def generate_gtdb_lineages_from_table(tax_file):
""" return acc,lineage tuple from file """
with open(tax_file) as table_h:
# skip header
try:
next(table_h)
except StopIteration:
raise Exception("Table is empty!\n" + tax_file)
for line in table_h:
org, _species, lineage = \
[x.strip()
for x in line.split('\t', 4)[:3]]
yield (org, lineage)
def generate_gtdb_lineages(fasta_file):
""" return acc,lineage tuple from file """
with open(fasta_file) as fasta_h:
for line in fasta_h:
if line.startswith(">"):
# in GTDB headers, lineage is second chunk
acc, lineage = line[1:].split(None, 2)[:2]
yield (acc, lineage)
def generate_phylodb_lineages(fasta_file):
""" return acc,lineage tuple from file """
with open(fasta_file) as fasta_h:
for line in fasta_h:
if line.startswith(">"):
# in GTDB headers, lineage is second chunk
acc, lineage = line[1:].split("\t", 2)[:2]
yield (acc, lineage)
def parse_lineages(tax_file, fmt='fasta', style=GTDB):
""" returns taxonomy object """
id_map = {}
root = TaxNode('root', None, None)
tree = {'root': root}
logger.debug("Parsing %s", tax_file)
if style == GTDB:
add_lineage_to_tree = add_gtdb_lineage_to_tree
generate_lineages = generate_gtdb_lineages
else:
add_lineage_to_tree = add_phylodb_lineage_to_tree
generate_lineages = generate_phylodb_lineages
# generate taxonomy tree
for acc, lineage in generate_lineages(tax_file):
# create TaxNode
node = add_lineage_to_tree(lineage, tree)
id_map[acc] = node
logger.debug("Adding id numbers to %d nodes", len(tree))
# assign numeric IDs
i = 0
for node in treeGenerator(root):
i += 1
node.id = i
logger.debug("Added %d id numbers", i)
return Taxonomy(id_map, None, None, tax_file, root)
RANK_LIST = ['domain', 'phylum', 'class',
'order', 'family', 'genus', 'species']
def add_phylodb_lineage_to_tree(lineage, tree):
""" parse given lineage
create new TaxNode objects as needed
assumes that there are 7 elements in lineage, one for each rank
return leaf node """
last_node = tree['root']
sub_lineage = []
if lineage.startswith('Euk'):
# There is an extra entr in the PhyloDB Euk lineages
ranks = [RANK_LIST[0], None] + RANK_LIST[1:]
else:
ranks = RANK_LIST
for rank, taxon_string in zip(ranks, lineage.split(';')):
sub_lineage.append(taxon_string)
taxon = ';'.join(sub_lineage)
try:
last_node = tree[taxon]
except KeyError:
new_node = TaxNode(taxon, last_node.id, rank)
new_node.name = taxon_string
new_node.setParent(last_node)
tree[taxon] = new_node
last_node = new_node
return last_node
RANK_DICT = {'d': 'domain', 'p': 'phylum', 'c': 'class',
'o': 'order', 'f': 'family', 'g': 'genus', 's': 'species'}
def add_gtdb_lineage_to_tree(lineage, tree):
""" parse given lineage
create new TaxNode objects as needed
assumes lineage names atart with x__ where x is a rank abbreviation
return leaf node """
last_node = tree['root']
sub_lineage = []
for taxon_string in lineage.split(';'):
rank_char, taxon_name = taxon_string.split('__')
rank_char = re.sub(r'^_', '', rank_char)
sub_lineage.append(taxon_string)
taxon = ';'.join(sub_lineage)
try:
last_node = tree[taxon]
except KeyError:
try:
rank = RANK_DICT[rank_char]
except KeyError:
print(lineage)
print(rank_char)
exit(-1)
new_node = TaxNode(taxon, last_node.id, rank)
new_node.name = taxon_name
new_node.setParent(last_node)
tree[taxon] = new_node
last_node = new_node
return last_node
def dump_taxonomy(taxonomy, dump_path, map_file_name='gtdb.acc.to.taxid'):
""" generate nodes.dmp and names.dmp """
# Write dump files
if not os.path.exists(dump_path):
os.makedirs(dump_path)
with open(os.path.sep.join((dump_path, 'nodes.dmp')), 'w') as nodes_h:
with open(os.path.sep.join((dump_path, 'names.dmp')), 'w') as names_h:
writeDumpFiles(taxonomy.root, nodes_h, names_h)
# Write hit->tax mapping file
if map_file_name is None:
return
with open(os.path.sep.join((dump_path, map_file_name)),
'w') as acc_map_h:
for (hitid, tax_node) in taxonomy.idMap.items():
acc_map_h.write("%s\t%d\n" % (hitid, tax_node.id))
if __name__ == '__main__':
""" convert a GTDB faa file to ncbi style taxdumps
kw arguments to generate_taxdump passed as args like:
python edl/grdb.py fasta=/path/to/x.faa dump=/path/to/dump
for reference:
generate_taxdump(fasta=None, table=None, dump="."):
"""
kwargs = dict(w.split("=", 1) for w in sys.argv[1:])
logging.basicConfig(level=logging.DEBUG)
logger.debug("args are: %r from:\n%s", kwargs, sys.argv)
# do the work:
generate_taxdump(**kwargs)
```
#### File: py-metagenomics/edl/hits.py
```python
from edl.util import parseMapFile
from edl.taxon import getNodeFromHit, \
getAncestorClosestToRank, \
readTaxonomy, \
add_taxonomy_dir_argument
from edl.blastm8 import filterM8Stream, \
FilterParams, \
formatsWithNoDescription, \
add_hit_table_arguments
from edl.expressions import accessionRE, nrOrgRE, koRE, giRE, pfamRE
import logging
logger = logging.getLogger(__name__)
#############
# Constants #
#############
ACCS = 'accs'
ORGS = 'orgs'
KEGG = 'kegg'
PFAM = 'pfam'
GIS = 'gis'
HITID = 'hitid'
HITDESC = 'hitdesc'
parsingREs = {
ORGS: nrOrgRE,
ACCS: accessionRE,
KEGG: koRE,
GIS: giRE,
PFAM: pfamRE}
ALLEQ = 'all'
FIRST = 'first'
PORTION = 'portion'
def translateHits(hitMap, hitTranslation):
for (read, hit) in hitMap.items():
if isinstance(hit, type([])):
newHits = []
for h in hit:
t = hitTranslation.get(h, None)
if t is not None:
if isinstance(t, type([])):
newHits.extend(t)
else:
newHits.append(t)
else:
newHits.append(h)
hitMap[read] = list(set(newHits))
else:
t = hitTranslation.get(hit, None)
if t is not None:
hitMap[read] = t
def translateCounts(counts, translation):
for key in counts.keys():
newKey = translation.get(key, None)
if newKey is not None:
count = counts.pop(key)
counts[newKey] = counts.setdefault(newKey, 0) + count
def binHits(hitMap):
"""
return map of assignments to list of reads
"""
hits = {}
for (read, hit) in hitMap.items():
if isinstance(hit, list):
for h in hit:
hits.setdefault(h, []).append(read)
else:
hits.setdefault(hit, []).append(read)
return hits
def binAndMapHits(hitIter):
"""
return map of assignments to list of reads
"""
hits = {}
hitMap = {}
for (read, hit) in hitIter:
hitMap[read] = hit
if isinstance(hit, list):
for h in hit:
hits.setdefault(h, []).append(read)
else:
hits.setdefault(hit, []).append(read)
return (hits, hitMap)
def loadSequenceWeights(weightFiles):
"""
Load and merge list of sequence weight maps.
"""
if len(weightFiles) > 0:
sequenceWeights = {}
for weightFile in weightFiles:
sequenceWeights.update(parseMapFile(weightFiles, valueType=int))
else:
sequenceWeights = None
return sequenceWeights
def add_weight_arguments(parser, multiple=False):
action = 'store'
default = None
helpText = "File listing counting weights by sequence id. This is \
used for clustered or assembled data where each read (or contig) could \
represent any number of raw reads. The file should be a simple two-column \
tab-separated table with sequence-ids in the first column and integer \
weights in the second. "
if multiple:
action = 'append'
default = []
helpText += "For multiple files, supply the flag (-w or \
--sequenceWeights) for each file name. Concatenating all tables into \
one file will have the same net result."
parser.add_argument("-w", "--sequenceWeights", dest='weights',
action=action, default=default, help=helpText)
def add_count_arguments(parser, defaults={}):
default = defaults.get('cutoff', 0.01)
parser.add_argument(
"-c",
"--cutoff",
dest="cutoff",
type=float,
default=default,
help="Cutoff for showing taxa. If a fractional count for a taxa "
"is below this value, it will be folded up into its parent "
"domain. Defaults to: %s" % default,
metavar="CUTOFF")
default = defaults.get('allMethod', ALLEQ)
parser.add_argument(
"-a",
"--allMethod",
dest="allMethod",
default=default,
choices=(
FIRST,
ALLEQ,
PORTION),
help="%r means +1 for every hit found for each read. %r means"
" +1 to the first hit for each read. %r means +1/(nhits) for all"
" hits of each read. Defaults to %r" % (ALLEQ,
FIRST,
PORTION,
default))
def getAllMethod(allMethod):
return allMethods[allMethod]
def applyFractionalCutoff(counts, threshold=None, cutoff=None, label='Other'):
"""
For any value in the dict below cutoff, remove and add to 'other' value
"""
if threshold is None:
if cutoff is None:
logger.warn("Nothing to do for applyFractionalCutoff")
return
threshold = float(cutoff) * sum(counts.values())
osum = 0
for key in list(counts.keys()):
if key == label:
continue
count = counts[key]
if count < threshold:
osum += count
del counts[key]
counts[label] = osum + counts.get(label, 0)
return counts
def countIterHits(hitIter, allMethod=ALLEQ, weights=None, returnMap=True):
"""
bin counts by hit and find total
return map from assignments to number of reads
and dict of original mappings
"""
countHitsForRead = getAllMethod(allMethod)
total = 0
counts = {}
if returnMap:
hitMap = {}
multiplier = 1
for (read, hit) in hitIter:
total += 1
if returnMap:
hitMap[read] = hit
if weights is not None:
multiplier = weights.get(read, 1)
if isinstance(hit, type([])):
countHitsForRead(hit, counts, multiplier=multiplier)
else:
counts[hit] = multiplier + counts.get(hit, 0)
if returnMap:
return (total, counts, hitMap)
return (total, counts)
def _oneCountPerHit(hits, counts, multiplier=1):
for hit in hits:
counts[hit] = multiplier + counts.get(hit, 0)
def _portionHitCount(hits, counts, multiplier=1):
multiplier = multiplier / float(len(hits))
_oneCountPerHit(hits, counts, multiplier=multiplier)
def _countFirstHit(hits, counts, multiplier=1):
counts[hits[0]] = multiplier + counts.get(hits[0], 0)
def countHits(hitMap):
"""
bin counts by hit and find total
return map from assignments to number of reads
"""
total = 0
counts = {}
if isinstance(hitMap, dict):
hitIter = hitMap.items()
else:
hitIter = hitMap
for (read, hit) in hitIter:
total += 1
if isinstance(hit, type([])):
for h in hit:
counts[h] = 1 + counts.get(h, 0)
else:
counts[hit] = 1 + counts.get(hit, 0)
return (total, counts)
def parseAndFilterM8Stream(inhandle, options):
"""
runs the input stream through m8 filtering
and then through parseM8Hits to get map from each read to all hits
"""
inhandle = filterM8Stream(inhandle, options, return_lines=False)
logger.info("Parsing hits")
# since filter already parses hits, use that info
infoInDescription = options.parseStyle in [KEGG, ORGS, PFAM]
return parseM8Hits(inhandle, infoInDescription)
def parseM8File(inhandle,
hitStringMap,
options,
parsingStyle,
countMethod,
taxonomy=None,
rank=None,
ignoreEmptyHits=True,
):
"""
Wrapper method that combines filterM8, parseHits, and process hits to:
filter hits using format and scorePct
map reads to hits using parseHits
translate hits using processHits
If taxonomy is not None, hits will be TaxNode objects
contMethod can only be LCA if taxonomy given
Return a dict from read to hits
"""
hitIter = parseM8FileIter(inhandle,
hitStringMap,
options,
parsingStyle,
countMethod,
taxonomy=taxonomy,
rank=rank,
ignoreEmptyHits=ignoreEmptyHits,
)
hitMap = {}
for (read, hits) in hitIter:
hitMap[read] = hits
logger.info("Done counting %d hits" % (len(hitMap)))
return hitMap
def parseM8FileIter(inhandle,
hitStringMap,
options,
parsingStyle,
countMethod,
taxonomy=None,
rank=None,
ignoreEmptyHits=True,
):
"""
Wrapper method that combines filterM8, parseHits, and process hits to:
filter hits using format and scorePct
map reads to hits using parseHits
translate hits using processHits
If taxonomy is not None, hits will be TaxNode objects
contMethod can only be LCA if taxonomy given
Return an iterator over (read,hits) tuples.
"""
# get map from reads to lists of hit strings
logger.info("Parsing hits")
# filters and parses
# options.parseStyle = parsingStyle
hitIter = filterM8Stream(inhandle, options, return_lines=False)
# apply org or acc translation
# apply map of hit names if given'
# look up taxon node
hitIter = processHits(
hitIter,
hitStringMap=hitStringMap,
parseStyle=parsingStyle,
taxonomy=taxonomy,
rank=rank)
# apply count method
hitIter = applyCountMethod(hitIter, countMethod, ignoreEmptyHits)
return hitIter
def parseHitsIter(
hitIter,
hitStringMap,
parsingStyle,
countMethod,
taxonomy=None,
rank=None,
ignoreEmptyHits=None):
"""
Same as parseM8FileIter, but takes in an iterator over Hit objects
Simply runs processHits and applyCountMethod
"""
# apply org or acc translation
# apply map of hit names if given'
# look up taxon node
hitIter = processHits(
hitIter,
hitStringMap=hitStringMap,
parseStyle=parsingStyle,
taxonomy=taxonomy,
rank=rank)
# debugKey="<KEY>"
# logger.debug("Hits for %s: %r" % (debugKey,hitMap[debugKey]))
# apply count method
hitIter = applyCountMethod(hitIter, countMethod, ignoreEmptyHits)
return hitIter
def sortedHitIterator(hitMap):
"""
Given a dictionary of reads to hits, return in order
"""
for read in sorted(hitMap.keys()):
yield (read, hitMap[read])
def applyCountMethod(hitIter, method, ignoreEmpty=True):
# chose function that applies method
if method == 'LCA' or method == 'rLCA':
getBestHit = _findLeastCommonAncestor
elif method == 'first':
getBestHit = _takeFirstHit
elif method == 'all':
getBestHit = _returnAllHits
elif method == 'consensus':
getBestHit = _returnConsensus
elif method == 'most':
getBestHit = _returnMostCommon
if ignoreEmpty:
removeEmptyFunc = _removeEmpty
else:
removeEmptyFunc = _return_value
# apply method to hit map
hitsIn = 0
hitsOut = 0
reads = 0
for (read, hits) in hitIter:
reads += 1
hitsIn += len(hits)
hits = getBestHit(hits)
hits = removeEmptyFunc(hits)
if hits is not None:
hitsOut += len(hits)
yield (read, hits)
logger.debug("%s=>%r" % (read, hits))
logger.info(
"Collected %d hits into %d hits for %d reads" %
(hitsIn, hitsOut, reads))
def _findLeastCommonAncestor(hits):
"""
Given a list of hits as TaxNode objects, find the least common ancestor.
Hits that are not TaxNodes are ignored.
"""
# check for hits not translated to TaxNode objects
i = 0
while i < len(hits):
if hits[i] is None:
hits.pop(i)
elif isinstance(hits[i], type("")):
logger.info(
"Skipping hit: %s (cannot translate to taxon)" %
(hits.pop(i)))
else:
i += 1
# make sure there are some hits to process
if len(hits) == 0:
# sys.exit("No hits given!")
return None
# get LCA for these hits
hit = hits[0]
for i in range(1, len(hits)):
hit = hit.getLCA(hits[i])
return [hit, ]
def _returnMostCommon(hits):
counts = {}
for hit in hits:
count = counts.get(hit, 0)
count += 1
counts[hit] = count
logger.debug(repr(counts))
bestCount = 0
bestHit = None
for (hit, count) in counts.items():
if count > bestCount:
bestHit = [hit, ]
bestCount = count
elif count == bestCount:
bestHit.append(hit)
return bestHit
def _takeFirstHit(hits):
if len(hits) > 0:
return hits[0:1]
else:
logger.debug("No hits!")
return None
def _returnAllHits(hits):
return list(set(hits))
def _returnConsensus(hits):
hits = _returnAllHits(hits)
if len(hits) == 1:
return hits
else:
return None
def _return_value(value):
return value
def _removeEmpty(hits):
if hits is None:
return hits
while True:
try:
hits.remove(None)
except ValueError:
break
while True:
try:
hits.remove('')
except ValueError:
break
if len(hits) > 0:
return hits
else:
return []
def parseHits(inhandle, readCol, hitCol, skipFirst, hitSep):
"""
read over lines and pull out (read,[hits]) pairs given:
inhandle: iterable set of strings (ie lines in a file)
readCol: index of column with read name
hitCol: index of column with hit name (-1 => every non-read column)
skipFirst: skip first line if True
hitSep: if not None, split data in hit column with this separator
"""
logger.debug("BEGIN parseHits(in, %r, %r, %r, %r)" %
(readCol, hitCol, skipFirst, hitSep))
# get line parsing function
if hitSep == 'eval':
extractReadHits = _getReadHitsEval
else:
hitCol = int(hitCol)
if hitCol < 0:
extractReadHits = _getReadHitsAll
elif hitSep is not None:
extractReadHits = _getReadHitsSep
else:
extractReadHits = _getReadHitsSimple
if skipFirst:
next(inhandle)
hitCount = 0
lineCount = 0
lastRead = None
for line in inhandle:
lineCount += 1
cells = line.rstrip('\n\r').split('\t')
(read, hits) = extractReadHits(cells, readCol, hitCol, hitSep)
if read != lastRead:
if lastRead is not None:
yield (lastRead, readHits)
readHits = list(hits)
lastRead = read
else:
readHits.extend(hits)
hitCount += len(hits)
if lastRead is not None:
yield (lastRead, readHits)
logger.info("Read %d hits from %d lines" % (hitCount, lineCount))
def parseM8Hits(hitIter, returnHitDescriptions):
logger.debug("BEGIN parseM8Hits()")
lastRead = None
hitCount = 0
readCount = 0
for read, hits in hitIter:
readCount += 1
fields = []
for hit in hits:
hitCount += 1
if returnHitDescriptions:
fields.append(hit.hitDesc)
else:
fields.append(hit.hit)
yield (read, fields)
logger.info("Read %d hits from %d reads" % (hitCount, readCount))
# -- helpers for parseHits -- #
# the following functions take a line from a table and return a read name
# and an iterable collection of hits
def _getReadHitsEval(cells, readCol, hitCol, hitSep):
"""
use eval to evaluate contents of hit cell. If resulting object is
not iterable, put it into a tuple
"""
read = cells[readCol]
hit = cells[hitCol]
# try to evaluate expression
try:
hit = eval(hit)
except Exception:
logger.warn("exception from 'eval(%r)'" % (hit))
# make it iterable if it's not
try:
getattr(hit, '__iter__')
except AttributeError:
hit = (hit,)
return (read, hit)
def _getReadHitsAll(cells, readCol, hitCol, hitSep):
"""
every entry in cells (other than read) is a hit
"""
read = cells.pop(readCol)
return(read, cells)
def _getReadHitsSep(cells, readCol, hitCol, hitSep):
"""
use hitSep to divide hit cell in to multipl hits
"""
read = cells[readCol]
hitCell = cells[hitCol]
hits = hitCell.strip().split(hitSep)
return (read, hits)
def _getReadHitsSimple(cells, readCol, hitCol, hitSep):
read = cells[readCol]
hit = cells[hitCol]
return (read, (hit,))
# -- end helpers for parseHits -- #
class HitTranslator:
"""
Given a list of (function,data,returnType) tuples ("mappings")
Return an object with translateHit method that will apply the
mappings to a hit
"""
def __init__(self, mappings, useDesc=False, hitsAreObjects=True):
self.mappings = mappings
if mappings is None or len(mappings) == 0:
self.applyMappings = self.returnSame
if hitsAreObjects:
if useDesc:
self.getId = self.getDescription
else:
self.getId = self.returnSame
def getId(self, hit):
return hit.hit
def getDescription(self, hit):
return hit.hitDesc
def returnSame(self, hit):
return hit
def translateHit(self, hit):
return self.applyMappings([self.getId(hit), ])
def applyMappings(self, hits):
for (mapFunc, mapping, retType) in self.mappings:
newHits = []
for hit in hits:
mapped = mapFunc(hit, mapping)
if retType is list:
newHits.extend(mapped)
elif retType is str:
newHits.append(mapped)
else:
if isinstance(mapped, list) or isinstance(mapped, tuple):
newHits.extend(mapped)
else:
newHits.append(mapped)
hits = newHits
return hits
def getHitTranslator(
hitStringMap=None,
parseStyle=ORGS,
taxonomy=None,
rank=None,
defaultToNone=True,
hitsAreObjects=True):
"""
Return a function that will return a list of organsims from a single hit.
hitStringMap (None): dictionary mapping hit IDs to something else
parseStyle (ORGS): how to process hit data into an identifying string
taxonomy (None): An edl.taxon.Taxonomy object or directory
conatining taxdmp
rank (None): Maximum rank to resolve hits
hitsAreObjects: True if hits are edl.blastm8.Hit objects, else strings
"""
parseRE = parsingREs.get(parseStyle, None)
if logger.getEffectiveLevel() <= logging.INFO:
if hitStringMap is None:
mapstr = 'None'
else:
mapstr = '%d keys' % (len(hitStringMap))
if parseRE is None:
exprstr = 'None'
else:
exprstr = parseRE.pattern
if taxonomy is None:
taxstr = 'None'
else:
taxstr = '%d ids' % (len(taxonomy.idMap))
logger.info(
"Creating hit translator:\n default to None: %r\n map: %s\n "
"parsing %s: %s\n taxa: %s\n rank: %s" %
(defaultToNone, mapstr, parseStyle, exprstr, taxstr, rank))
# set up variables
infoInDescription = parseStyle in [KEGG, ORGS, PFAM]
mappings = []
if defaultToNone:
mapFunction = _simpleMapNoneFunction
else:
mapFunction = _simpleMapFunction
# initial parsing of hit id or description via regular expression
if parseRE is not None:
mappings.append((_findAllREfunctionSimpler, parseRE, list))
# optional look up table
if hitStringMap is not None:
mappings.append((mapFunction, hitStringMap, None))
# optional conversion to Taxon objects
if taxonomy is not None:
if parseStyle == ORGS:
if defaultToNone:
mappings.append((getNodeFromHit, taxonomy.nameMap, str))
else:
mappings.append((_getNodeHitFunction, taxonomy.nameMap, str))
else:
mappings.append((mapFunction, taxonomy.idMap, str))
if rank is not None:
mappings.append((getAncestorClosestToRank, rank, str))
return HitTranslator(
mappings,
useDesc=infoInDescription,
hitsAreObjects=hitsAreObjects)
# turn hit lines into organisms or KOs or anything else
def processHits(hitIter, **kwargs):
"""
Take an in iterator over read,hits tuples and apply mappings using
a HitTranslator
"""
translator = getHitTranslator(**kwargs)
# translate hits
for (key, hits) in hitIter:
logger.debug("%s => %s" % (key, hits))
newHits = []
for h in hits:
newHits.extend(translator.translateHit(h))
logger.debug(str(newHits))
yield (key, newHits)
def processHitsOld(
hitIter,
mapping=None,
expr=None,
taxIdMap=None,
taxNameMap=None,
defaultToNone=True,
rank=None):
"""
Take a map of reads (or other keys) to lists of hits and translate hits.
Can use the following steps in this order with any steps omitted:
simpile dictionary translation using 'mapping'
regular expression (where every captured group is returned as a hit)
a translation to taxNode objects by one of:
simple dictionary translation using taxIdMap
name based look up using edl.taxon.getNodeFromHit() and taxNameMap
if defaultToNone is changed to False, anything not found in one of
the mappings
(mapping, taxIdMap, or taxNameMap)
"""
if logger.getEffectiveLevel() <= logging.DEBUG:
if mapping is None:
mapstr = 'None'
else:
mapstr = '%d keys' % (len(mapping))
if expr is None:
exprstr = 'None'
else:
exprstr = expr.pattern
if taxIdMap is None:
if taxNameMap is None:
taxstr = 'None'
else:
taxstr = '%d names' % (len(taxNameMap))
else:
taxstr = '%d ids' % (len(taxIdMap))
logger.debug(
"Starting processHits:\n default to None: %r\n map: %s\n "
"exp: %s\n taxa: %s\n rank: %s" %
(defaultToNone, mapstr, exprstr, taxstr, rank))
# set the functions to use:
if mapping is None:
mapFunction = _passFunction
elif defaultToNone:
mapFunction = _simpleMapNoneFunction
else:
mapFunction = _simpleMapFunction
exprFunction = _findAllREfunction
if taxIdMap is not None:
taxMap = taxIdMap
if defaultToNone:
taxFunction = _simpleMapNoneFunction
else:
taxFunction = _simpleMapFunction
elif taxNameMap is not None:
taxMap = taxNameMap
if defaultToNone:
taxFunction = getNodeFromHit
else:
taxFunction = _getNodeHitFunction
else:
taxMap = None
taxFunction = _passFunction
if taxMap is None or rank is None:
rankFunction = _passFunction
else:
rankFunction = getAncestorClosestToRank
# translate hits
for (key, hits) in hitIter:
logger.debug("%s => %s" % (key, hits))
newHits = []
for h in hits:
# find all matches to expr, may be more than one
hs = exprFunction(h, expr)
logger.debug("%s => %s" % (h, hs))
for hit in hs:
hts = mapFunction(hit, mapping)
if not (isinstance(hts, list) or isinstance(hts, tuple)):
hts = [hts]
for hit in hts:
hit = taxFunction(hit, taxMap)
hit = rankFunction(hit, rank)
newHits.append(hit)
logger.debug(str(newHits))
yield (key, newHits)
# helper functions for processHits
# each function takes a hit and something else, and then reutrns a
# translated hit
def _passFunction(hit, mapping):
return hit
def _simpleMapFunction(hit, mapping):
newHit = mapping.get(hit, hit)
logger.debug("%s --> %r" % (hit, newHit))
return newHit
def _simpleMapNoneFunction(hit, mapping):
newHit = mapping.get(hit, None)
logger.debug("%s --> %r" % (hit, newHit))
return newHit
def _getNodeHitFunction(hit, taxMap):
newHit = getNodeFromHit(hit, taxMap)
if newHit is None:
return hit
else:
return newHit
def _findAllREfunctionSimpler(hit, expr):
hits = expr.findall(hit)
if len(hits) == 0:
return [hit, ]
else:
return hits
def _findAllREfunction(hit, expr):
if expr is None:
return (hit,)
hits = expr.findall(hit)
if len(hits) == 0:
return [hit, ]
else:
return hits
# end helper functions for processHits
def add_taxon_arguments(parser, defaults={}, choices={}):
# get format and filter_top_pct options from blastm8
add_hit_table_arguments(parser, defaults,
flags=['format', 'filter_top_pct'])
# specific to taxon parsing:
parser.add_argument(
"-m",
"--mapFile",
dest="mapFile",
default=defaults.get(
"mapFile",
None),
metavar="MAPFILE",
help="Location of file containing table of with db hit name "
"as first column and taxa or taxonids in second column. "
"Defaults to '%s'" % (defaults.get("mapFile", None)))
parser.add_argument(
"-p",
"--parseStyle",
default=defaults.get(
"parseStyle",
ACCS),
choices=[
ACCS,
GIS,
ORGS,
HITID,
HITDESC],
help="What should be parsed from the hit table: accessions('accs'), "
"'gis', organsim names in brackets ('orgs'), the full hit "
"name('hitid'), or the full hit description('hitdesc'). "
"(defaults to '%s')" % (defaults.get("parseStyles", ACCS)))
parser.add_argument(
"-C",
"--countMethod",
dest="countMethod",
default=defaults.get(
"countMethod",
"first"),
choices=choices.get(
'countMethod',
('first',
'most',
'all',
'LCA',
'consensus')),
help="How to deal with counts from multiple hits. (first, most: "
"can return multiple hits in case of a tie, LCA: MEGAN-like, "
"all: return every hit, consensus: return None unless all "
"the same). Default is %s" % (defaults.get("countMethod",
"first")),
metavar="COUNTMETHOD")
add_taxonomy_dir_argument(parser, defaults)
def readMaps(options, namesMap=False):
"""
Load the taxonomy and id->to->taxid maps requested by user
"""
return (readTaxonomyFiles(options, namesMap=namesMap), readIDMap(options))
def readTaxonomyFiles(options, namesMap=False):
"""
load the taxonomy specififed by the user. Create a name lookup map if
parseStyle is 'orgs'
"""
# read taxonomy
if options.taxdir is not None:
getTaxNames = namesMap or options.parseStyle == ORGS
taxonomy = readTaxonomy(options.taxdir, namesMap=getTaxNames)
logging.info("Read %d nodes from tax dump" % (len(taxonomy.idMap)))
else:
taxonomy = None
if options.countMethod == 'LCA' or options.countMethod == 'rLCA':
raise Exception('Cannot use LCA without providng a taxonomy (-n)')
logging.info("No taxonomy needed")
return taxonomy
def readIDMap(options):
"""
Load the specififed lookup table for hit IDs. If the parseStyle
requested is 'gis', convert keys to integers. The values are always
convereted to integeres since they are assumed to be taxids
"""
# map reads to hits
if options.parseStyle == GIS:
keyType = int
else:
keyType = None
if options.taxdir is not None:
valueType = int
else:
valueType = None
return parseMapFile(options.mapFile, valueType=valueType, keyType=keyType)
allMethods = {ALLEQ: _oneCountPerHit,
FIRST: _countFirstHit,
PORTION: _portionHitCount}
############
# Tests
############
def test():
import sys
global myAssertEq, myAssertIs
from test import myAssertEq, myAssertIs
if len(sys.argv) > 2:
loglevel = logging.DEBUG
else:
loglevel = logging.warn
logging.basicConfig(stream=sys.stderr, level=loglevel)
logger.setLevel(loglevel)
hits = testParseHits(sys.argv[1])
testTranslateAndCountHits(hits)
def testParseHits(testFile):
# test line parsing methods
cells = [1, 2, 3, 4, "(4,5)", "6,7"]
(read, hitIter) = _getReadHitsSimple(cells, 0, 2, None)
hits = []
for h in hitIter:
hits.append(h)
myAssertEq(read, 1)
myAssertEq(len(hits), 1)
myAssertEq(hits[0], 3)
(read, hitIter) = _getReadHitsSep(cells, 1, 5, ',')
hits = []
for h in hitIter:
hits.append(h)
myAssertEq(read, 2)
myAssertEq(hits, ['6', '7'])
(read, hitIter) = _getReadHitsAll(list(cells), 3, -1, None)
hits = []
for h in hitIter:
hits.append(h)
myAssertEq(read, 4)
myAssertEq(len(hits), 5)
myAssertEq(hits, [1, 2, 3, "(4,5)", "6,7"])
# give it a test file
hitIter = parseHits(open(testFile), 0, -1, True, None)
hits = {}
for r, h in hitIter:
hits[r] = h
logging.debug(repr(hits))
myAssertEq(len(hits), 29)
myAssertEq(hits['000023_2435_2174'], ['Prochlorococcus'])
myAssertEq(hits['000178_2410_1152'], ['Bacteria <prokaryote>'])
myAssertEq(hits['000093_2435_2228'], ['Candidatus Pelagibacter'])
return hits
def testTranslateAndCountHits(hits):
(total, counts) = countHits(hits)
myAssertEq(total, 29)
myAssertEq(counts["Prochlorococcus"], 10)
myAssertEq(counts['root'], 7)
translateHits(hits,
{'Bacteria <prokaryote>': 'other',
'root': 'other',
'Candidatus Pelagibacter': 'Pelagibacter'})
myAssertEq(hits['000178_2410_1152'], ['other'])
myAssertEq(hits['000093_2435_2228'], ['Pelagibacter'])
if __name__ == '__main__':
test()
```
#### File: py-metagenomics/edl/kegg.py
```python
import logging
import os
import re
import sys
logger = logging.getLogger(__name__)
##############
# Classes #
##############
################
# compiled REs #
################
koRE = re.compile(r'\b(K\d{5})\b')
kokoRE = re.compile(r'^ENTRY\s+(K\d{5})\b')
endSectionRE = re.compile(r'^\S')
definitionRE = re.compile(r'^DEFINITION\s+(\S.*\S)\s*$')
classRE = re.compile(r'^CLASS\s+(\S.*)$')
ecRE = re.compile(r'^DEFINITION.*\[(EC:[-0-9\.]+)\]')
genesRE = re.compile(r'^GENES\s+(\S.*\S)\s*$')
trailingBracketRE = re.compile(r'\s*\[[^\[\]]+\]\s*$')
kegkoRE = re.compile(r'^D\s+.+\b(K\d{5})(?:<\/a>)?\s*(.*)$')
britekoRE = re.compile(r'^[A-Z]\s+(K\d{5})\s*(.*)$')
geneListRE = re.compile(r'(?<=\s)([a-zA-Z0-9_.-]+)\b')
orgRE = re.compile('^([A-Z]{3,4}):')
cogGroupRE = re.compile(r'(.+)\[(.+)\]')
cogMapRE = re.compile(r'^\[(\S+)\]\s+(\S+)\s+(\S.+)$')
#############
# Functions #
#############
def readSEEDTree(treeFile):
"""
Return nested dictionary where first dict is map from levels (1,2,3)
and next dict is map from role to name.
This is a simply formatted file with 4 columns:
"role\tsubsystem\tlevel 2\t level 1"
"""
seedTree = {'1': {}, '2': {}, '3': {}}
with open(treeFile) as f:
for line in f:
(role, l3, l2, l1) = line.rstrip().split('\t')
seedTree['1'][role] = l1
seedTree['3'][role] = l3
seedTree['2'][role] = l2
return seedTree
def readCogTree(mapFile):
"""
return maps from CDD id to COGID, COG description, and COG category
"""
cogMap = {'gene': {}, 'description': {}, 'group': {}}
with open(mapFile) as f:
for line in f:
cdd, cog, gene, description, count = line.rstrip().split('\t')
description, group = cogGroupRE.match(description).groups()
cogMap['gene'][cdd] = cog
cogMap['description'][cdd] = description
groups = [re.sub(' +', ' ', g.strip()) for g in group.split("/")]
cogMap['group'][cdd] = groups
# hack to make things work with the previous methods (ie SEED)
cogMap['3'] = cogMap['group']
return cogMap
def readCogTreeFromWhog(mapFile):
"""
Return a map from COG id to category
"""
cogMap = {'gene': {}, 'group': {}}
with open(mapFile) as f:
for line in f:
line.rstrip()
m = cogMapRE.maatch(line)
if m:
category = m.group(1)
cog = m.group(2)
description = m.group(3)
cogMap['gene'][cog] = description
cogMap['group'][cog] = category
return cogMap
def parseSeedMap(mapFile):
"""
Return a dictionary mapping from accession to subsystem.
The SEED map (refseq2md52role.gz) starts with two summary lines followed
by three columns (accession\thash/sum\tSubsystem):
Mapped roles:9004(subsys.txt 2 subsystems2peg)
Unmapped roles:521(s ubsys.txt)
YP_9218461b41910965945b806d5defc49ad1a224CO dehydrogenases \
maturation factor, CoxF family
YP_0012863261e472ed51c0df8feb03ee296a0 e55de4CO dehydrogenases \
maturation factor, CoxF family
"""
accMap = {}
with open(mapFile) as f:
f.next()
f.next()
for line in f:
# logger.debug(line)
(acc, code, subsys) = line.rstrip('\r\n').split('\t', 2)
# logger.debug("Mapped %s to %s (sum: %s)" % (acc,subsys,code))
accMap.setdefault(acc.strip(), []).append(subsys.strip())
return accMap
def _stripKeggKeyPrefix(key):
return key.split(":", 1)[1]
def parseLinkFile(mapFile, stripKeys=False, stripVals=True):
"""
Parse the gene_ko.list file from KEGG
hsa:10001 ko:K15128
hsa:10002 ko:K08546
hsa:10003 ko:K01301
with the possibility of duplicate records
"""
if mapFile is None:
return None
logger.info("parsing map file: %s" % (mapFile))
translation = {}
rows = 0
badRows = 0
lastKey = None
for line in open(mapFile):
cells = line.split('\t')
if len(cells) > 1:
rows += 1
key = cells[0].strip()
if stripKeys:
key = _stripKeggKeyPrefix(key)
value = cells[1].strip()
if stripVals:
# strip 'ko:' from start of each value
value = _stripKeggKeyPrefix(value)
if key == lastKey:
translation[key].append(value)
else:
translation[key] = [value, ]
else:
badRows += 1
if badRows > 0:
logger.warn("%d rows in map file had too few columns!" % (badRows))
logger.info(
"Read %d records from %d lines of %s" %
(len(translation), rows, mapFile))
return translation
def parseModuleMap(mapFile):
"""
Parse module file to dict
"""
return parseLinkFile(mapFile, stripKeys=True, stripVals=False)
def parseGeneKOMap(koFile):
"""
scan ko file an build map from gene names to kos
"""
koMap = {}
koCount = 0
inGenes = False
logger.info("Reading kos and gene names from %s" % koFile)
for line in open(koFile):
# find KO first
# looking for a KO line
match = kokoRE.match(line)
if match:
ko = match.group(1)
logger.debug("Start of %s" % (ko))
koCount += 1
if logger.getEffectiveLevel() >= logging.DEBUG:
if koCount % 1000 == 0:
logging.debug("Parsed %d KOs" % koCount)
continue
# look for information on this KO
if not inGenes:
# looking for a GENE line
match = genesRE.match(line)
if match:
inGenes = True
geneString = match.group(1)
logger.debug("found genes: %s" % (geneString))
_mapGenes(koMap, ko, geneString)
continue
# everyline is a gene line until further notice
elif not endSectionRE.match(line):
# not the end of gene section: reading more genes
geneString = line.strip()
logger.debug("found genes: %s" % (geneString))
_mapGenes(koMap, ko, geneString)
else:
# found all gene strings
logger.debug("End of genes")
inGenes = False
ko = None
logger.info("Mapped %d genes to %d kos" % (len(koMap), koCount))
return koMap
def _mapGenes(koMap, ko, geneString):
"""
geneString looks like "ORG: geneid(genename) geneid(genename) geneid geneid
while ids in KeggGenes look like: "org:geneid"
"""
org = orgRE.match(geneString).group(1).lower()
genes = geneListRE.findall(geneString)
for gene in genes:
kGene = "%s:%s" % (org, gene)
koMap.setdefault(kGene, []).append(ko)
def parse_KEGG_file(k_file, kegg_level):
""" Checks filename and runs:
parse_KO_file if base filename is ko
parse_keg_file if file extension is .keg """
if os.path.basename(k_file) == 'ko':
return parse_ko_file(k_file, kegg_level)
elif len(k_file) > 4 and k_file[-4:] == ".keg":
return parse_keg_file(k_file, kegg_level)
else:
raise Exception("I don't know what to do with file: %s"
% (os.path.basename(k_file)))
def parse_ko_file(ko_file, level):
""" Parse KEGG metadata from the ko metadata file:
level: one of
* PATH, PATHWAY, or PATHWAYS
* NAME, DEFINITION, DESCRIPTION, or FUNCITON
* 1,2 or 3 (for a level in the BRITE:ko00001 hier.
* EG: ko00002:2 (for level 2 of BRITE:ko00002)
"""
# synonyms
if level in ['PATHWAYS', 'PATH', 'PATHS']:
level = 'PATHWAY'
if level in ['DESCRIPTION', 'FUNCTION']:
level = 'DEFINITION'
if level == 'EC':
level = 'ko01000:4'
if re.match(r'(ko\d\d\d\d\d:)?(\d+)', str(level)):
# these are in the BRITE heirachy
# eg: k00001:2 for level to of the main hierarchy
brite_hier, brite_level = \
re.match(r'(?:(ko\d\d\d\d\d):)?(\d+)', str(level)).groups()
brite_level = int(brite_level)
if brite_hier is None:
# if its just a number, assume k00001
brite_hier = 'ko00001'
logger.debug(f"Looking for level {brite_level} in {brite_hier}")
with open(ko_file) as ko_handle:
# look for single line per entry
if level in ['NAME', 'DEFINITION']:
kw_expr = re.compile(r'^(ENTRY|{})(\s+)(\S.*)?'.format(level))
try:
for i, line in enumerate(ko_handle):
m = kw_expr.match(line)
if m:
keyword, spaces, value = m.groups()
if keyword == 'ENTRY':
ko = value.split()[0].strip()
elif keyword == level:
results[ko] = value.strip()
except Exception as exc:
print(f'Error on line {i}:\n{line}')
raise exc
# there can be multiple pathways after and including the PATHWAY line
elif level == 'PATHWAY':
kw_expr = re.compile(r'^(ENTRY|{})(\s+)(\S.*)?'.format(level))
def skip(line, indent, pathways):
return
def add_pathway(line, indent, pathways):
pathways.append(line[indent:-1])
pathways, indent = None, 0
for line in ko_handle:
m = kw_expr.match(line)
if m:
keyword, spaces, value = m.groups()
if keyword == 'ENTRY':
ko = value.split()[0].strip()
indent = 5 + len(spaces)
process_line = skip
continue
elif keyword == level:
process_line = add_pathway
pathways = results.setdefault(ko, [])
else:
process_line = skip
continue
process_line(line, indent, pathways)
else:
# BRITE
entry_rexp = re.compile(r'^ENTRY\s+(K\d+)')
brite_rexp = \
re.compile(r'^((?:BRITE)?\s+)(\S.+\S)\s*\[BR:(ko\d+)\]')
end_brite_rexp = re.compile(r'^\S')
level_rexp = re.compile(r'^(\s+)(\S.+)')
lines = iter(enumerate(ko_handle))
try:
# outer loop looping over Entries
while True:
# find next Entry line
for i, line in lines:
m = entry_rexp.match(line)
if m:
ko = m.group(1)
break
else:
# no more entries
break
# find start of BRITE
for i, line in lines:
m = brite_rexp.match(line)
if m:
spaces, name, hierarchy = m.groups()
if hierarchy == brite_hier:
brite_indent = len(spaces)
brite_levels = results.setdefault(ko, [])
break
# process BRITE lines
for i, line in lines:
if end_brite_rexp.match(line) or \
brite_rexp.match(line):
# start of next hierarchy or next keyword section
break
spaces, level_name = level_rexp.match(line).groups()
# level is number of spaces beyond original indent
if len(spaces) - brite_indent == brite_level:
brite_levels.append(level_name)
# end while outer loop
except StopIteration:
# I don't think we ever get here
pass
except Exception as exc:
print(f"error on line {i}:\n{line}")
print(f"found {len(results)} kos so far")
raise exc
return results
def parse_keg_file(keg_file, level):
""" Parse KEGG metadata from brite .keg files
level: one of
* PATH, PATHWAY, or PATHWAYS
* 1 - 6 or A - F
* DEFINITION, DESCRIPTION, or FUNCITON
"""
# synonyms
if level in ['PATHWAYS', 'PATHWAY', 'PATHS']:
level = 'PATH'
if level in ['DESCRIPTION', 'FUNCTION']:
level = 'DEFINITION'
if str(level) in {'1', '2', '3', '4', '5', '6'}:
level = 'ABCDEF'[int(level) - 1]
ko_def_rexp = re.compile(r'^[B-F]\s+(K\d\d\d\d\d)\s+(\S.+\S)\s*$')
level_rexp = re.compile(r'^([A-F])\s*(\S.+)')
path_rexp = re.compile(r'\s*\[PATH:\s*ko\d+\s*\]')
html_rexp = re.compile(r'</?[a-z]+/?>')
results = {}
with open(keg_file) as keg_handle:
# two types of parsing
if level == 'DEFINITION':
# just looking for the line with the K# and description
# (ignore hierarchy)
for line in keg_handle:
m = ko_def_rexp.match(line)
if m:
ko, desc = m.groups()
results[ko] = desc
elif level in ['A', 'B', 'C', 'D', 'E', 'F', 'PATH']:
# looking for level, and all KOs after that
print(f"looking for {level}")
level_name = None
for line in keg_handle:
# check for ko first, because it also looks like a level
m = ko_def_rexp.match(line)
if m:
if level_name is not None:
# if we've seen a level we like
# save ko level
ko = m.group(1)
results.setdefault(ko, []).append(level_name)
continue
m = level_rexp.match(line)
if m:
letter, name = m.groups()
if letter == level:
# found a header at the target level, remember name
level_name = html_rexp.sub('', name)
elif (level == 'PATH' and path_rexp.search(name)):
# found a header at the target level, remember name
level_name = path_rexp.sub('', name)
elif letter < level:
# we've gone back up a level, don't label anything
level_name = None
continue
# remove duplicates and ensure order
results = {ko: sorted(set(paths))
for ko, paths in results.items()}
else:
if level == 'NAME':
raise Exception("I can't parse name from a .keg file. "
"Please use the file: ko/ko")
if level == 'EC':
raise Exception("For EC use the ko/ko file or the EC brite: "
"brite/ko/ko01000.keg and choose a level")
raise Exception(f"I don't know what level {level} is!")
return results
def add_path_arguments(parser, defaults={}, choices={}, helps={}):
# get format and filter_top_pct arguments from blastm8
from edl.hits import HITID, ACCS, GIS, KEGG, HITDESC, PFAM
from edl.blastm8 import add_hit_table_arguments
add_hit_table_arguments(parser, defaults, flags=['format',
'filter_top_pct',
'sort'
])
# specific to pathway parsing:
pgroup = parser.add_argument_group(
"Pathway Arguments",
"These arguments control the mapping of hits to gene "
"function heirarchies like KEGG or SEED""")
pgroup.add_argument(
"-m",
"--mapFile",
dest="mapFile",
default=defaults.get(
"mapFile",
None),
metavar="MAPFILE",
help="Location of file containing table of with db hit name as "
"first column and geneIDs (Knumber) in second column.")
pgroup.add_argument(
"-M",
"--mapStyle",
default='auto',
choices=[
'auto',
'kegg',
'tab',
'seed'],
help="What type of mapping file are you using: simple tab "
"separated list of IDs and kos/subsystems/domains, the "
"genes_ko.list file from KEGG (which adds ko: to the K "
"numbers and can have multiple records for each gene id), "
"or the 3 column file from SEED. By default, this script "
"will inspect the file and guess, but you can force 'kegg', "
"'seed' or 'tab' with this argument.")
default = defaults.get('tab_map_delim', None)
pgroup.add_argument("--tab_map_delim",
default=default,
help=("Delimiter to parse multiple assignments in "
"map from ids to ko/path/fam. Only used for "
"tabular mapping tables. Defaults to {}"
.format(str(default))))
pgroup.add_argument(
"-p",
"--parseStyle",
default=defaults.get(
"parseStyle",
HITID),
choices=[
ACCS,
GIS,
KEGG,
HITID,
HITDESC,
PFAM],
help="What should be parsed from the hit table: accessions('accs'), "
"'gis', K numbers in description ('kegg'), the full hit "
"name('hitid'), or the full hit description('hitdesc'). "
"(defaults to '%s')" % (defaults.get("parseStyle",
HITID)))
pgroup.add_argument(
"-C",
"--countMethod",
dest="countMethod",
default=defaults.get(
"countMethod",
"first"),
choices=choices.get(
'countMethod',
('first',
'most',
'all',
'consensus')),
help=helps.get(
"countMethod",
"How to deal with counts from multiple hits. (first, most: "
"can return multiple hits, all: return every hit, consensus: "
"return None unless all the same). Do not use most or consensus "
"with more than one level at a time. Default is %s" %
(defaults.get(
"countMethod",
"first"))),
metavar="COUNTMETHOD")
if defaults.get("filter_for_path", False):
action = 'store_false'
default = True
helpstr = 'Consider all hits. By deafult, only hits with path \
assignments are used.'
else:
action = 'store_true'
default = False
helpstr = 'Ignore hits with no entry in pathway map (-m). By default \
all hits are used and if the best hit(s) is(are) to sequences with no path, \
then the read will not be assigned to a path'
pgroup.add_argument(
"-r",
"--filter_for_path",
action=action,
dest="mappedHitsOnly",
default=default,
help=helpstr)
add_pathways_argument(pgroup, defaults)
parser.add_argument_group(pgroup)
def add_pathways_argument(parser, defaults={}):
parser.add_argument(
"-T",
"--heirarchyType",
default=defaults.get(
"heirarchyType",
'kegg'),
choices=[
'kegg',
'seed',
'cazy',
'cog',
'kegg_module'],
help="What kind of functional heirarchy to use. 'kegg', seed', "
"or 'cazy'. Defaults to '%s'" % (defaults.get("heirarchyType",
'kegg')))
parser.add_argument(
"-H",
"--heirarchyFile",
metavar="HEIRARCHY_FILE",
default=defaults.get(
'heirarchyFile',
None),
help="File containing pathway/subsystem/genefaimly heirarchy "
"(either ko or ko00001.keg for KEGG or susbsys.txt for SEED). "
"Defaults to %s" % (defaults.get('heirarchyFile', None)))
############
# Tests
############
def test():
import sys
if len(sys.argv) > 3:
loglevel = logging.DEBUG
else:
loglevel = logging.WARN
logging.basicConfig(stream=sys.stderr, level=loglevel)
logger.setLevel(logging.INFO)
kegg_nosetest(sys.argv[1], sys.argv[2])
def kegg_nosetest(ko_map, kegg_file):
global myAssertEq, myAssertIs
from edl.test import myAssertEq, myAssertIs
testReadKeggFile(kegg_file)
testParseGeneLink(ko_map)
# testParseGeneKOMap(ko_map)
# testReadKoFile(ko_map)
def testParseGeneLink(koFile):
gkmap = parseLinkFile(koFile)
myAssertEq(gkmap['ggo:101148121'], ['K16534'])
myAssertEq(gkmap['olu:OSTLU_15108'], ['K11126'])
myAssertEq(gkmap['ebt:EBL_c03070'], ['K02879'])
myAssertEq(gkmap['pec:W5S_4205'], ['K00363'])
myAssertEq(gkmap['buc:BU148'], ['K03101'])
myAssertEq(gkmap['smaf:D781_0330'], ['K06925'])
myAssertEq(gkmap['nkr:NKOR_05565'], ['K03524'])
def testReadKeggFile(keggFile):
kDmap = parse_keg_file(keggFile, 'DESCRIPTION')
myAssertEq(kDmap['K01623'],
'ALDO; fructose-bisphosphate aldolase, class I [EC:4.1.2.13]')
kPmap = parse_keg_file(keggFile, 'PATHWAY')
assert('K04519' in kPmap)
assert('K15634' in kPmap)
myAssertEq(kPmap['K03011'],
['00230 Purine metabolism',
'00240 Pyrimidine metabolism',
'03020 RNA polymerase',
"05016 Huntington's disease",
'05169 Epstein-Barr virus infection'])
k2map = parse_keg_file(keggFile, 2)
myAssertEq(k2map['K13810'][0].lower(), 'Carbohydrate Metabolism'.lower())
myAssertEq(k2map['K13810'][1].lower(), 'Overview'.lower())
myAssertEq(k2map['K00399'][0].lower(), 'Energy Metabolism'.lower())
myAssertEq(k2map['K00399'][1].lower(), 'Overview'.lower())
k3map = parse_keg_file(keggFile, 3)
myAssertEq(k3map['K13810'], [
'00010 Glycolysis / Gluconeogenesis [PATH:ko00010]',
'00030 Pentose phosphate pathway [PATH:ko00030]',
'00500 Starch and sucrose metabolism [PATH:ko00500]',
'00520 Amino sugar and nucleotide sugar metabolism [PATH:ko00520]',
'01230 Biosynthesis of amino acids [PATH:ko01230]',
])
myAssertEq(k3map['K00399'], [
'00680 Methane metabolism [PATH:ko00680]',
'01200 Carbon metabolism [PATH:ko01200]',
])
myAssertEq(k3map['K03404'], [
'00860 Porphyrin and chlorophyll metabolism [PATH:ko00860]',
])
k3mapQ = parse_keg_file(keggFile, '3')
for k in k3map.keys():
try:
myAssertEq(k3map[k], k3mapQ[k])
except AssertionError:
raise AssertionError(
"level 3 classes for %s do not match:\n%s\n%s" %
(k, k3map[k], k3mapQ[k]))
def testReadKoFile(koFile):
kPmap = parse_ko_file(koFile, 'PATHWAY')
assert('K00397' not in kPmap)
myAssertEq(kPmap['K00399'],
['ko00680 Methane metabolism',
'ko01200 Carbon metabolism'])
kEmap = parse_ko_file(koFile, 'EC')
myAssertEq(kEmap['K00397'], ['EC:1.8.99.-'])
myAssertEq(kEmap['K00399'], ['EC:2.8.4.1'])
if __name__ == '__main__':
test()
```
#### File: py-metagenomics/edl/records.py
```python
from edl.util import parse_list_to_set
def recordIterator(stream, separatorRE, idRE=None):
"""
Given:
na file-like object (any iterator over strings)
1 or 2 regular expressions that define record boundaries
and identifiers
Return:
an iterator over records that returns a tuple of (id, [recordLines])
If only a separator given, it is assumed to match the record id
"""
recordId = None
recordLines = []
for line in stream:
m = separatorRE.search(line)
if m:
# is there a previous record?
if recordId is not None:
yield (recordId, recordLines)
recordId = None
recordLines = [line, ]
if idRE is None:
recordId = m.group(1)
continue
recordLines.append(line)
if idRE is not None:
m = idRE.search(line)
if m:
recordId = m.group(1)
if recordId is not None:
yield (recordId, recordLines)
def screenRecords(
stream,
separatorRE,
idRE=None,
keep=False,
screen_set=None,
screenFile=None):
"""
uses recordIterator(strean, separatorRE, idRE) to parse input into records
uses screen_set (can be read from screenFile) to identify records
identified records are kept or skipped based on the value of keep
"""
if screen_set is None:
if screenFile is None:
raise Exception(
"Please supply a hash(Python map) or file of record keys")
else:
screen_set = parse_list_to_set(screenFile)
for (
recordId,
recordLines) in recordIterator(
stream,
separatorRE,
idRE=idRE):
screened = recordId in screen_set
if screened == keep:
for line in recordLines:
yield line
```
#### File: py-metagenomics/edl/test.py
```python
import logging
import sys
def myAssertEq(a, b):
myAssert((a == b), "%s is not equal to %s" % (str(a), str(b)))
def myAssertIs(a, b):
myAssert((a is b), "%s is not %s" % (str(a), str(b)))
def myAssert(test, msg):
if not test:
sys.stderr.write(msg + "\n")
raise AssertionError
```
#### File: jmeppley/py-metagenomics/screen_table.py
```python
import sys
import logging
from edl.util import *
def main():
import argparse
# set up CLI
usage = "usage: %prog -l LIST [OPTIONS] TABLE(S)"
description = __doc__
parser = argparse.ArgumentParser(description=description)
add_screen_arguments(parser, accs=True)
add_IO_arguments(parser)
parser.add_argument(
"-d",
"--delim",
dest="delim",
default="\t",
help="Input table delimiter (tab is default). If set to 'None', "
"split on any whitespace.",
metavar="DELIM")
parser.add_argument(
"-c",
"--col",
dest="col",
type=int,
default=0,
help="Column to screen (0 is default)",
metavar="INDEX")
add_universal_arguments(parser)
arguments = parser.parse_args()
setup_logging(arguments)
# allow funky characters in delim arguments
if arguments.delim == 'None':
arguments.delim = None
elif arguments.delim != '\t':
arguments.delim = bytes(
arguments.delim,
'utf-8').decode('unicode-escape')
logging.debug("Table delim: '%s'" % (arguments.delim))
if arguments.listDelim is not None:
arguments.listDelim = bytes(
arguments.listDelim,
'utf-8').decode('unicode-escape')
# get read list
logging.debug("List file: '%s'\nList delim: '%s'" %
(arguments.listFile, arguments.listDelim))
screen_set = get_screen_list(arguments, accs=arguments.accs)
logging.debug("Got list of %d reads" % (len(screen_set)))
if len(screen_set) > 0:
logging.debug("For example: %s" % (next(iter(screen_set))))
for (inhandle, outhandle) in inputIterator(arguments):
scanFileForReads(
screen_set,
inhandle,
arguments.keep,
outhandle,
arguments.delim,
arguments.col,
arguments.accs)
################
# Functions
################
def die(msg):
sys.stderr.write("%s\n" % msg)
sys.exit(1)
def scanFileForReads(reads, inhandle, keep, outhandle, delim, col, accs):
lineCount = 0
matchCount = 0
if keep:
logging.info("Keeping matched reads")
else:
logging.info("Discarding matched reads")
for line in inhandle:
lineCount += 1
read = line.rstrip('\r\n').split(delim)[col]
if accs:
read = parseAcc(read)
logging.debug("looking for %s in %s" % (read, line))
match = read in reads
if match == keep:
# write line if either
# - read matches list AND keep is True
# - read not in list AND keep is False
outhandle.write(line)
matchCount += 1
logging.info("Kept %d of %d lines" % (matchCount, lineCount))
if __name__ == "__main__":
main()
```
#### File: py-metagenomics/test/test_02_expressions.py
```python
from edl.expressions import *
def test_accession_re():
with open('test/data/sample.1.blastx.b50.m8') as F:
try:
for line in F:
acc = accessionRE.search(line).group(1)
except AttributeError:
# There should have been a match in every line of this file
assert False
test_data = {
'ref|YP_002498923.1|': 'YP_002498923',
'ref|YP_002498923.1': 'YP_002498923',
'gi|109900248|ref|YP_663503.1|': 'YP_663503',
'YP_663503.1': 'YP_663503',
}
for data, acc in test_data.items():
new_acc = accessionRE.search(data).group(1)
assert acc == new_acc
def test_fasta_re():
file_data = {
'test/data/test.gbk.faa': 3941,
'test/data/test.gbk.fna': 3941,
'test/data/createPrimerNextera.fasta': 2,
'test/data/createPrimerTruseq.fasta': 4,
'test/data/HOT_100_reads.8.fasta': 8,
'test/data/HOT_100_reads.fasta': 100,
}
count=0
for file_name, expected_count in file_data.items():
new_count = _count_re_hits(file_name, fastaRE)
assert new_count == expected_count
count+=1
assert count == len(file_data)
assert count == 6
def _count_re_hits(file_name, regex):
count=0
with open(file_name) as INF:
for line in INF:
if regex.search(line):
count+=1
return count
```
#### File: py-metagenomics/test/test_30_assembly.py
```python
from edl import assembly
import pandas
from math import floor
fasta_file = 'test/data/HOT_100_reads.fasta'
def test_get_stats_from_contigs():
stats = assembly.get_stats_from_contigs(fasta_file)
# its a data frame
assert isinstance(stats, pandas.core.frame.DataFrame)
# 100 x 2
assert stats.shape == (100,2)
# test a couple arbitrary values
assert stats.loc['000005_1741_3371','GC'] == 32.8125
assert stats.loc['000483_1123_3166','length'] == 269
def test_get_column_stats():
stats = assembly.get_stats_from_contigs(fasta_file)
data = stats['GC']
stats = assembly.get_column_stats(data)
assert floor(stats['mean'])==39
assert floor(stats['median'])==37
def test_contig_length_stats():
assert isinstance(assembly.calc_stats(fasta_file,return_type='report'),str)
data = assembly.calc_stats(fasta_file,return_type='data')
assert data['N75'] == 142
assert data['count'] == 100
assert data['mean'] == 157.97
```
#### File: jmeppley/py-metagenomics/translate_column.py
```python
import sys
import re
import os.path
import traceback
import logging
import argparse
from edl.util import *
def main():
# set up CLI
description = """
This script takes a tab delimited text table and creates a new column
from an existing one using an external translation table.
"""
parser = argparse.ArgumentParser(description=description)
add_IO_arguments(parser)
parser.add_argument(
"-f",
"--fillMissing",
dest="fill",
metavar="FILL",
help="Put FILL in column when value not in map. If not used, "
"entire line is skipped. If set to 'KEY', value in key "
"column is used."),
parser.add_argument("-m", "--mapFile", dest="mapFile",
metavar="MAPFILE", help="Location of mapping table.")
parser.add_argument(
"-c",
"--column",
dest="col",
type=int,
default=1,
help="Column number (first column is 1)",
metavar="COLUMN")
parser.add_argument(
"-C",
"--newColumn",
dest="newcol",
type=int,
default=None,
help="Column number to insert new column after. Default is the "
"after the source column. 0=>make it the first column. "
"-1=>make it the last column.",
metavar="COLUMN")
parser.add_argument(
"-D",
"--deleteColumn",
dest="delcols",
default=[],
action='append',
metavar='COLUMN',
help="Delete this column (starting at 1, after new column "
"inserted). May be used multiple times for multiple columns")
# log level and help
add_universal_arguments(parser)
arguments = parser.parse_args()
setup_logging(arguments)
logging.info("Value map from: " + arguments.mapFile)
logging.debug("Fill: '%s'" % (arguments.fill))
translation = parseMapFile(arguments.mapFile)
for (inhandle, outhandle) in inputIterator(arguments):
# setup some counters
ncols = 0
total_lines = 0
skipped_lines = 0
lines_kept = 0
first_invalid_line = 0
invalid_line = None
# loop over lines
for i, line in enumerate(inhandle):
total_lines += 1
line = line.rstrip('\r\n')
if not line or line.startswith('#'):
skipped_lines += 1
continue
try:
cells = line.split('\t')
if ncols == 0:
# count columns and check requested column number
ncols = len(cells)
if arguments.col > ncols:
sys.exit("first line has fewer columns (%d) "
"than requested column number(%d)!" %
(ncols, arguments.col))
# get value from column
value = cells[arguments.col - 1]
if value in translation:
newCol = translation[value]
else:
if arguments.fill is not None:
if arguments.fill == 'KEY':
newCol = value
else:
newCol = arguments.fill
else:
logging.debug(
"skipping value not in translation: %s" %
(value))
skipped_lines += 1
continue
# insert new value
if arguments.newcol is None:
cells.insert(arguments.col, newCol)
elif arguments.newcol < 0 or arguments.newcol >= ncols:
cells.append(newCol)
else:
cells.insert(arguments.newcol, newCol)
# perform any requested column deletions
for delcol in sorted(
[int(c) for c in arguments.delcols], reverse=True):
cells.pop(delcol - 1)
new_line = '\t'.join(cells)
# print >> outhandle, new_line
print(new_line, file=outhandle)
lines_kept += 1
except Exception:
logging.warn(
"Unexpected error (%s): %s" %
(sys.exc_info()[0], sys.exc_info()[1]))
logging.warn(traceback.format_tb(sys.exc_info()[2]))
logging.warn('Skipping %s' % (line))
skipped_lines += 1
if not invalid_line:
first_invalid_line = i + 1
invalid_line = line
# set insertion column for logging
if arguments.newcol is None:
inserted = arguments.col + 1
elif arguments.newcol < 0 or arguments.newcol >= ncols:
inserted = ncols
else:
inserted = arguments.newcol + 1
valid_lines = total_lines - skipped_lines
message = "Processed: %s\nCreated column %d with mapfile %s \
applied to column %d" % (inhandle,
inserted,
arguments.mapFile,
arguments.col)
if valid_lines > 0:
message += '\nkept %d of %d lines.' % (lines_kept, total_lines)
else:
message += '\nNo valid lines found'
if skipped_lines > 0:
message += '\nSkipped %d lines' % (skipped_lines)
logging.info(message)
if invalid_line:
logging.warn(
'Invalid lines found! EG line #%d: "%s"' %
(first_invalid_line, invalid_line))
if (__name__ == '__main__'):
main()
``` |
{
"source": "jmeppley/snakemake_magic",
"score": 3
} |
#### File: snakemake_magic/prototype/snakemake_magic.py
```python
from __future__ import print_function
import os
import re
import tempfile
import shlex
from IPython.core.magic import (Magics, magics_class, line_magic,
cell_magic)
from snakemake.workflow import Workflow
import snakemake.workflow
from snakemake.io import load_configfile, _load_configfile
from snakemake.parser import parse
from snakemake import get_argument_parser, parse_resources, logger
rule_rexp = re.compile(r'^\s*@workflow.rule\s*\(name=\s*\'([^\']+)\'')
def get_rule_names(snakefile_name):
""" Run snakemake.parser.parse and use regexp to get the rule names
This is incredibly inefficient, but a little more stable than trying to
parse rules myself.
"""
for line in parse(snakefile_name)[0].split("\n"):
m = rule_rexp.match(line)
if m:
yield m.group(1)
# To register magic, class MUST call this class decorator at creation time
@magics_class
class SnakemakeMagic(Magics):
"""
Defines two cell and one line magic:
* %%config: load a block of yaml or json into the config object
* %%include: load a block of snakefile code into the workflow
* %smake: attempt to create a target file
"""
workflow = None
tempfiles = {"cells":[]}
updated_rules = []
# TODO: add magic to reset workflow with more options
# EG: cluster script, threads, ...
def get_workflow(self):
""" make sure there is a workflow object
TODO:
* allow multiple workflows?
* what king of options to allow?
* allow options every time or just first?
"""
if self.workflow is None:
# create a new workflow object with some basic defaults
# create a blank file just so snakemake has something to hang on to
# (this file cannot be read from on some Windows systems...)
self.tempfiles['root'] = tempfile.NamedTemporaryFile('w')
self.workflow = Workflow(snakefile=self.tempfiles['root'].name)
return self.workflow
@line_magic
def snakemake(self, line):
""" execute the workflow with the given arguments and targets
This uses the snakeamke command line argument parser for now.
"""
if self.workflow is None:
raise Exception("Workflow has no data!")
parser = get_argument_parser()
args = parser.parse_args(list(shlex.split(line)))
logger.debug(repr(args))
resources = parse_resources(args)
targets = args.target
dryrun = args.dryrun
printshellcmds = args.printshellcmds
printreason = args.reason
printrulegraph = args.rulegraph
printd3dag = args.d3dag
touch = args.touch
forceall = args.forceall
forcerun = set(args.forcerun if args.forcerun is not None else [] + self.updated_rules)
prioritytargets = args.prioritize
until = args.until
omit_from = args.omit_from
stats = args.stats
nocolor = args.nocolor
quiet = args.quiet
keepgoing = args.keep_going
standalone = True
ignore_ambiguity = args.allow_ambiguity
lock = not args.nolock
unlock = args.unlock
force_incomplete = args.rerun_incomplete
ignore_incomplete = args.ignore_incomplete
list_version_changes = args.list_version_changes
list_code_changes = args.list_code_changes
list_input_changes = args.list_input_changes
list_params_changes = args.list_params_changes
summary = args.summary
detailed_summary = args.detailed_summary
print_compilation = args.print_compilation
verbose = args.verbose
debug = args.debug
notemp = args.notemp
keep_remote_local = args.keep_remote
greediness = args.greediness
latency_wait = args.latency_wait
benchmark_repeats = args.benchmark_repeats
keep_target_files = args.keep_target_files
updated_files = list()
if greediness is None:
greediness = 0.5 if prioritytargets else 1.0
else:
if not (0 <= greediness <= 1.0):
logger.error("Error: greediness must be a float between 0 and 1.")
return False
# TODO: set target, check workflow, execute workflow
workflow = self.get_workflow()
# TODO: keep track of updated rules to set force run
# HACK: execute() is leaving directory locked, so I'm disabling locks
lock = False
workflow.check()
success = workflow.execute(
targets=targets,
dryrun=dryrun,
touch=touch,
forceall=forceall,
forcerun=forcerun,
until=until,
omit_from=omit_from,
quiet=quiet,
keepgoing=keepgoing,
printshellcmds=printshellcmds,
printreason=printreason,
printrulegraph=printrulegraph,
printd3dag=printd3dag,
ignore_ambiguity=ignore_ambiguity,
stats=stats,
force_incomplete=force_incomplete,
ignore_incomplete=ignore_incomplete,
list_version_changes=list_version_changes,
list_code_changes=list_code_changes,
list_input_changes=list_input_changes,
list_params_changes=list_params_changes,
summary=summary,
latency_wait=latency_wait,
benchmark_repeats=benchmark_repeats,
wait_for_files=None,
detailed_summary=detailed_summary,
nolock=not lock,
unlock=unlock,
notemp=notemp,
keep_remote_local=keep_remote_local,
keep_target_files=keep_target_files,
updated_files=updated_files,
resources=resources,
)
if success:
del self.updated_rules[:]
return success
@cell_magic
def sinclude(self, line, cell):
"include this cell in workflow"
workflow = self.get_workflow()
# snakemake does not support blocks of text, so we create a temp
# file.
cell_snakefile = tempfile.NamedTemporaryFile('w', delete=False)
self.tempfiles['cells'].append(cell_snakefile.name)
cell_snakefile.write(cell)
cell_snakefile.close()
# first rule is first rule
overwrite_first_rule = len(workflow._rules) == 0
# HACK: remove rules to be replaced:
# remove conflicting rules
for rule_name in get_rule_names(cell_snakefile.name):
workflow._rules.pop(rule_name, None)
self.updated_rules.append(rule_name)
# include snippet
workflow.include(cell_snakefile.name,
overwrite_first_rule=overwrite_first_rule,
)
os.unlink(cell_snakefile.name)
return "Workflow now has {} rules".format(len(workflow._rules))
@cell_magic
def sconfig(self, line, cell):
" Load JSON or YAML into workflow's config object "
workflow = self.get_workflow()
# create a temp file, so we can use snakemake.load_configfile
# it wouldn't be hard to roll our own to avoid this...
cell_config_file = tempfile.NamedTemporaryFile('w', delete=False)
cell_config_file.write(cell)
cell_config_file.close()
snakemake.workflow.config.update(load_configfile(cell_config_file.name))
logger.debug(repr(snakemake.workflow.config))
os.unlink(cell_config_file.name)
@line_magic
def _workflow(self, line):
"backdoor to inspect workflow object"
# this wouldn't be in the final version
return self.get_workflow()
@line_magic
def _reset_workflow(self, line):
self.workflow = None
# In order to actually use these magics, you must register them with a
# running IPython. This code must be placed in a file that is loaded once
# IPython is up and running:
ip = get_ipython()
# You can register the class itself without instantiating it. IPython will
# call the default constructor on it.
ip.register_magics(SnakemakeMagic)
``` |
{
"source": "jmeppley/stagecache",
"score": 2
} |
#### File: jme/stagecache/main.py
```python
import re
import logging
from jme.stagecache.target import get_target
from jme.stagecache.types import asset_types
from jme.stagecache.cache import Cache
LOGGER = logging.getLogger(name='main')
def cache_target(target_url, cache=None, atype=None, time=None, **kwargs):
"""
if file not cached, copy to cache.
return cached location
"""
LOGGER.debug("Starting up: c=%s, a=%s, t=%s",
cache, atype, time)
# initialize the Cache (turn directory name into object)
cache = Cache(cache)
# initialize the Target
if atype is None:
atype = 'file'
asset_type = asset_types.get(atype, None)
if asset_type is None:
raise Exception("No asset type defined for '{}!'".format(atype))
target = get_target(target_url, asset_type, cache.config)
return cache.add_target(target, cache_time=time, **kwargs)
def query_cache(**kwargs):
""" return state of cache:
total space used
free space
"""
cache = kwargs.get('cache', None)
return Cache(cache).inspect_cache(**kwargs)
```
#### File: jme/stagecache/manage.py
```python
import os, re
from jme.stagecache.cache import Cache
def delete_enough_files(gigs_to_free, suffix=None, cache_root=None):
""" detlete enough file to free up the requested number of GB of space
optinally specify a suffix and only delete matching files """
cache = Cache(cache_root)
# assets sorted by requested lock date
all_assets = sorted(
cache.metadata.iter_cached_files(),
key=lambda a: a.get_last_lock_date())
needed_space = gigs_to_free * pow(1024,3)
cum_sum = 0
count = 0
for a in all_assets:
if suffix is None or a.cached_target.endswith(suffix):
cum_sum += a.get_cached_target_size()[0]
count += 0
scache.remove_cached_file(a)
if cum_sum > needed_space:
break
return count
def find_unlisted_assets(cache_root=None):
""" Look in the configured cache to make sure all the files on disk
are in the cache's asset_list
returns: list of unlisted assets"""
cache = Cache(cache_root)
assets_on_disk = find_assets_in_dir(cache.cache_root)
listed_assets = cache.inspect_cache()['files']
return [a for a in assets_on_disk if a not in listed_assets]
def find_assets_in_dir(root_dir):
for current_root, dir_list, file_list in os.walk(root_dir):
for d in dir_list:
if d.startswith('.stagecache.'):
if d == '.stagecache.global':
continue
if os.path.exists(os.path.join(current_root, d, 'cache_lock')):
yield os.path.join(current_root, d[12:])[len(root_dir):]
def guess_type(asset, root):
local_asset = root + asset
if os.path.exists(local_asset):
if os.path.isfile(local_asset):
return "file"
else:
if re.search('seqdbs', asset):
return 'taxdb'
else:
if os.path.basename(asset) == 'lastdb':
return 'lastdb'
return None
```
#### File: jme/stagecache/ssh.py
```python
import logging
import os
import paramiko
from contextlib import contextmanager
LOGGER = logging.getLogger(name='ssh')
KEY_TYPES = [paramiko.DSSKey, paramiko.ECDSAKey,
paramiko.Ed25519Key, paramiko.RSAKey]
def generate_ssh_keys():
"""
loop over agent keys and private key files in ~/.ssh
"""
# start with the agent keys
agent = paramiko.Agent()
agent_keys = agent.get_keys()
LOGGER.debug("Trying %d ssh-agent keys", len(agent_keys))
for agent_key in agent_keys:
LOGGER.debug("Trying ssh-agent key: %s", agent_key.get_name())
yield agent_key
# next, looop over files and try to load them with no passcode
ssh_dir = os.path.join(os.path.expanduser("~"), '.ssh')
for keyfile in os.listdir(ssh_dir):
# crude filter: starts with id_ does not end with .pub
if not keyfile.startswith("id_"):
continue
if keyfile.endswith(".pub"):
continue
keypath = os.path.join(ssh_dir, keyfile)
## TODO: check for first line with:
# ---BEGIN XXX PRIVATE KEY---
LOGGER.debug("Trying key file: %s", keyfile)
# figure out what type of key by brute force
for keygen in KEY_TYPES:
try:
LOGGER.debug("Trying: " + repr(keygen))
pk = keygen.from_private_key_file(keypath)
yield pk
except paramiko.SSHException as e:
# try the next combo
continue
@contextmanager
def passwordless_sftp(host, username):
""" attempt to connect to host as user
try all the keys in order returned by generate_ssh_keys
return sftp session object using the first key that works
"""
for ssh_key in generate_ssh_keys():
try:
transport = paramiko.Transport(host)
transport.connect(username=username, pkey=ssh_key)
except paramiko.SSHException as e:
# try another key
continue
else:
LOGGER.debug("Connected to {}!".format(host))
sftp = paramiko.SFTPClient.from_transport(transport)
yield sftp
sftp.close()
transport.close()
break
else:
# nothing worked
raise Exception("Could not connect! Rerun with -d to get more info")
```
#### File: jme/stagecache/text_metadata.py
```python
import logging
import os
import time
import stat
from contextlib import contextmanager
LOGGER = logging.getLogger(name='metadata')
def get_cached_target(cache_root, target_path):
return os.path.abspath(cache_root + target_path)
def makedirs(path, mode=509):
if not os.path.exists(path):
makedirs(os.path.dirname(path), mode)
try:
os.mkdir(path)
os.chmod(path, mode=mode)
except FileExistsError as f_e_e:
# competing threads?
pass
class Lockable():
def __init__(self, cache):
self.umask = cache.config['cache_umask']
self.umask_dir = self.umask + 0o111
@contextmanager
def lock(self, sleep_interval=3, force=False, dry_run=False):
"""
Aquire and relase lock as a context manager.
EG:
with target.lock():
...
see get_write_lock for arguments
"""
try:
self.get_write_lock(sleep_interval, force, dry_run)
yield None
LOGGER.debug('Done with lock...')
finally:
# only release lock if it was NOT a dry run
if not dry_run:
self.release_write_lock()
def get_write_lock(self, sleep_interval=3, force=False, dry_run=False):
""" mark file as in progress (wait for existing lock) """
LOGGER.debug('Creating lock...')
if os.path.exists(self.write_lock):
if force:
os.remove(self.write_lock)
if dry_run:
return
LOGGER.info('Waiting for lock...')
LOGGER.debug("force is "+ str(force))
while os.path.exists(self.write_lock):
time.sleep(sleep_interval)
if dry_run:
return
with open(self.write_lock, 'wt') as LOCK:
LOCK.write('locked')
os.chmod(self.write_lock, self.umask)
def release_write_lock(self):
""" remove in_progress mark """
LOGGER.debug('Releasing lock (%s)...', self.write_lock)
try:
os.remove(self.write_lock)
except:
pass
class TargetMetadata(Lockable):
def __init__(self, cache, target_path, atype):
super().__init__(cache)
self.cache_root = os.path.abspath(cache.cache_root)
self.target_path = target_path
self.atype = atype
self.cached_target = get_cached_target(self.cache_root,
self.target_path,
)
cache_dir, cache_name = os.path.split(self.cached_target)
self.md_dir = os.path.join(cache_dir, '.stagecache.' + cache_name)
if not os.path.exists(self.md_dir):
makedirs(self.md_dir, mode=self.umask_dir)
self.write_lock = os.path.join(self.md_dir, 'write_lock')
LOGGER.debug("""created TargetMetadata:
cache_root=%s
target_path=%s
cached_target=%s
cache_dir=%s
md_dir=%s
write_lock=%s""",
self.cache_root, self.target_path, self.cached_target,
cache_dir, self.md_dir, self.write_lock)
def get_md_value(self, md_type, delete=False):
""" returns mtime of md file and int value from file """
md_file = os.path.join(self.md_dir, md_type)
if not os.path.exists(md_file):
# file not in cache!
return (0, None)
mtime = os.path.getmtime(md_file)
with open(md_file, 'rt') as md_handle:
value = int(md_handle.readlines()[0].strip())
if delete:
os.remove(md_file)
return value, mtime
def set_md_value(self, md_type, value):
""" writes value to md file """
md_file = os.path.join(self.md_dir, md_type)
if os.path.exists(md_file):
self.catalog(md_type)
with open(md_file, 'wt') as SIZE:
SIZE.write(str(int(value)))
os.chmod(md_file, self.umask)
def catalog(self, md_type):
""" archives old md and returns value """
log_file = os.path.join(self.md_dir, 'log')
value, mtime = self.get_md_value(md_type, delete=True)
with open(log_file, 'at') as LOG:
LOG.write("\t".join((
md_type,
str(mtime),
time.ctime(mtime),
str(value),
)) + "\n")
os.chmod(log_file, self.umask)
return value
def get_cached_target_size(self):
""" returns size and date """
return self.get_md_value('size')
def set_cached_target_size(self, size):
""" writes size to file """
self.set_md_value('size', size)
def get_last_lock_date(self):
""" returns the most recent lock end date """
lock_date = self.get_md_value('cache_lock')[0]
return lock_date
def set_cache_lock_date(self, date):
""" writes new expiration date to file """
self.set_md_value('cache_lock', date)
def is_lock_valid(self):
""" checks if lock date has passed """
lock_date = self.get_last_lock_date()
return lock_date > time.time()
def remove_target(self):
""" archive metadata for this asset """
self.catalog('cache_lock')
return self.catalog('size')
class CacheMetadata(Lockable):
def __init__(self, cache):
super().__init__(cache)
self.cache = cache
self.md_dir = os.path.abspath(
os.path.join(self.cache.cache_root, '.stagecache.global')
)
self.write_lock = os.path.join(self.md_dir, "write_lock")
self.asset_list = os.path.join(self.md_dir, "asset_list")
if not os.path.exists(self.md_dir):
makedirs(self.md_dir, self.umask_dir)
LOGGER.debug("""created CacheMetadata:
cache_root=%s
md_dir=%s
write_lock=%s""",
self.cache.cache_root, self.md_dir, self.write_lock)
def iter_cached_files(self, locked=None):
""" return list of assets with sizes and lock dates """
LOGGER.debug("Checking asset list: %s", self.asset_list)
for target_path, atype in self.list_assets():
target_metadata = TargetMetadata(self.cache,
target_path,
atype)
if locked is None or target_metadata.is_lock_valid() == locked:
yield target_metadata
def list_assets(self):
""" return list of path, type tuples in cache """
LOGGER.debug("Fetching asset list: %s", self.asset_list)
if os.path.exists(self.asset_list):
asset_list = list()
with open(self.asset_list) as assets:
for asset_line in assets:
asset_line = asset_line.strip()
if len(asset_line) == 0:
continue
asset = tuple(a.strip() for a in asset_line.split('\t'))
if len(asset) != 2:
raise Exception("Asset tuple is NOT length 2!\n%r" % (asset,))
asset_list.append(asset)
LOGGER.debug("Found %d assets in %s",
len(asset_list),
self.asset_list,
)
return asset_list
else:
return []
def remove_cached_file(self, target_metadata):
""" remove record of cached file, return size """
count = 0
# read asset list
asset_list = self.list_assets()
# write new (edited) asset list
with open(self.asset_list, 'wt') as assets:
for target_path, atype in asset_list:
if target_path != target_metadata.target_path:
assets.write(target_path + "\t" + atype + "\n")
else:
count += 1
os.chmod(self.asset_list, self.umask)
if count == 0:
LOGGER.error("No match for " + target_metadata.target_path)
raise Exception("Error recording assets")
if count > 1:
LOGGER.warning("Found {} listings for {}".format(count,
target_metadata.target_path))
return target_metadata.remove_target()
def add_cached_file(self, target_metadata, target_size, lock_end_date):
""" add record of asset """
# add to global md
paths_in_cache = set(a[0] for a in self.list_assets())
if target_metadata.target_path not in paths_in_cache:
LOGGER.debug("%s not in %s, adding...",
target_metadata.target_path,
paths_in_cache)
# add to list if not there yet
with open(self.asset_list, 'at') as assets:
assets.write(target_metadata.target_path + "\t" \
+ target_metadata.atype + "\n")
os.chmod(self.asset_list, self.umask)
added_to_list = True
else:
LOGGER.debug("%s alread in asset list",
target_metadata.target_path)
added_to_list = False
# add file specific md
target_metadata.set_cached_target_size(target_size)
target_metadata.set_cache_lock_date(lock_end_date)
return added_to_list
```
#### File: jme/stagecache/util.py
```python
import logging
import getpass
import os
import re
from math import log
from collections import namedtuple
from datetime import datetime, timezone
power = pow
LOGGER = logging.getLogger(name='util')
LN_BASE = log(power(1024, 1/3))
def human_readable_bytes(byt):
""" fixed version of https://stackoverflow.com/a/17754143/663466
hybrid of https://stackoverflow.com/a/10171475/2595465
with https://stackoverflow.com/a/5414105/2595465 """
# return bytes if small
if byt <= 99:
return str(int(byt))
magnitude = int(log(abs(byt)) / LN_BASE)
if magnitude > 19:
float_fmt = '%i'
illion = 20 // 3
else:
mag3 = (magnitude+1) % 3
float_fmt = '%' + str(mag3) + "." + str(3-mag3) + 'f'
illion = (magnitude + 1) // 3
format_str = float_fmt + ['', 'K', 'M', 'G', 'T', 'P', 'E'][illion]
return (format_str % (byt * 1.0 / (1024 ** illion))).lstrip('0')
def path_up_to_wildcard(full_path):
""" If a given path has a wildcard placeholder ( eg {sample} ),
return the last directory before that point """
path_fragment = full_path.split('{')[0]
if path_fragment == full_path:
return full_path
if path_fragment.endswith(os.path.pathsep):
return path_fragment[:-1]
return os.path.dirname(path_fragment)
URL_REXP = re.compile(r'^([A-Za-z]+)://(?:([^/@]+)@)?([^/]*)(/.+)$')
Remote = namedtuple('Remote', ['protocol', 'user', 'host', 'path'])
def parse_url(url, config, use_local=False, has_wildcards=False):
""" check if the string is a url or simple path
return None if it's a path
return named tuple with (protocol, user, host, path) if its a URL
"""
## Check 1: is it a full formed URL? EG:
# SFTP://server.com/path/to/file
# file:///local/path
# SCP://[email protected]/some/path
match = URL_REXP.search(url)
if match:
remote = Remote(*match.groups())
if remote.user is None:
user = user_from_config(config, remote.host)
remote = Remote(remote.protocol, user,
remote.host, remote.path)
if remote.protocol.lower == 'file':
if len(remote.host) > 0:
raise Exception("file URL should have no host name")
return None
return remote
# skip check 2 if file exists and we're OK using local files
if use_local:
if os.path.exists(path_up_to_wildcard(url) \
if has_wildcards else url):
return None
## check 2: user configured remote maps
for custom_patterns in config \
.get('remote', {}) \
.get('mappings', []):
try:
mnt_rexp = re.compile(custom_patterns['pattern'])
host_repl = custom_patterns['host_repl']
path_repl = custom_patterns['path_repl']
except KeyError:
LOGGER.error("custom patterns must contain: pattern, host_repl, "
"and path_repl")
raise
except:
LOGGER.error("re cannot compile custom pattern: " +
custom_patterns['pattern'])
raise
LOGGER.debug("Checking remote pattern: %r", custom_patterns['pattern'])
if not mnt_rexp.search(url):
# skip to next pattern if this doesn't match
continue
try:
source_path = mnt_rexp.sub(path_repl, url)
except:
LOGGER.error("re cannot understand replacement expression " +
path_repl)
raise
try:
host = mnt_rexp.sub(host_repl, url)
except:
LOGGER.error("re cannot understand replacement expression " +
host_repl)
raise
user = user_from_config(config, host)
LOGGER.debug("INFERRED URL SFTP://%s@%s%s", user, host, source_path)
return Remote('SFTP', user, host, source_path)
## 3: just a regular, local file
# we ge here if there was no match above
return None
def user_from_config(config, host):
""" get username from config for this host. Fall back to local username """
local_user = getpass.getuser()
LOGGER.debug("config['remote']: %r", config.get('remote',{}))
default_user = config.get('remote', {})\
.get('SFTP', {}) \
.get('default', {}) \
.get('username', local_user)
user = config.get('remote', {}).get('SFTP', {}).get(host, {}).get('username', default_user)
return user
def get_time_string(seconds):
""" return a formatted time string """
return datetime.fromtimestamp(seconds,
timezone.utc) \
.astimezone() \
.strftime("%Y-%m-%d %H:%M:%S (%Z)")
```
#### File: test/nose/test_util.py
```python
from jme.stagecache.util import path_up_to_wildcard, URL_REXP
def test_path_up_to_wildcard():
" should return last full dir name before first bracket "
assert path_up_to_wildcard('/mnt/server/volume/project/file.{suffix}') \
== '/mnt/server/volume/project'
assert path_up_to_wildcard('/path/to/data/{sample}/file.{type}.ext') \
== '/path/to/data'
assert path_up_to_wildcard('/simple/path/file.ext') \
== '/simple/path/file.ext'
def test_url_rexp_1():
m = \
URL_REXP.search('SFTP://[email protected]/remote/resource/lastdb')
assert m is not None
print(m.groups())
protocol, user, host, path = m.groups()
assert protocol == 'SFTP'
assert user == 'readonly'
assert host == 'test.hawaii.edu'
assert path.startswith('/')
def test_url_rexp_2():
m = \
URL_REXP.search('SFTP://test.berkeley.edu/remote/resource/lastdb')
assert m is not None
print(m.groups())
protocol, user, host, path = m.groups()
assert protocol == 'SFTP'
assert user is None
assert host == 'test.berkeley.edu'
assert path.startswith('/')
def test_url_rexp_3():
m = \
URL_REXP.search('file:///remote/resource/lastdb')
assert m is not None
print(m.groups())
protocol, user, host, path = m.groups()
assert protocol == 'file'
assert user is None
assert host == ''
assert path.startswith('/')
``` |
{
"source": "jmeppley/workflows",
"score": 3
} |
#### File: workflows/python/annotate.py
```python
import os
def get_db_types(config):
""" loop over DBs and:
* identify all gene family dbs
* identify all taxonomic dbs
"""
gene_family_dbs = []
taxdbs = []
for db in config["dbs"]:
db_type = config["dbs"][db].get("type", "gene")
if db_type[0:3] == "tax":
taxdbs.append(db)
elif db_type == "gene":
gene_family_dbs.append(db)
return (gene_family_dbs, taxdbs)
def get_last_alg(dbformat, extension):
"""
right now looks for last db type (lastp or lastn) and extension (faa or
not) and returns lastp, lastx, or lastn.
Support for other dbs can be added on request.
"""
if dbformat == "lastp":
if extension == "faa":
search_alg = "lastp"
else:
search_alg = "lastx"
elif dbformat == "lastn":
if extension == "faa":
raise Exception(
"I'm sorry, I don't know how to search for faa "
"sequences in a lastp database!"
)
else:
search_alg = "lastn"
elif dbformat == "bwadb":
search_alg = "bwa.sam"
elif dbformat in ["dmnd", "diamond"]:
if extension == "faa":
search_alg = "dmndp"
else:
search_alg = "dmndx"
else:
raise Exception(
("I'm sorry, but the database format '{}' is not yet " "supported").format(
dbformat
)
)
return search_alg
def get_db_dot_fmt_strings(db_list, config, query_extension="fasta"):
"""
Return a list of strings that are "{db}.{format}". Where db is the name of the database and format is the extension generated by the search (eg lastx, or tbl). There is a special case for fragmented HMM dbs where we need to add ".dbatch" to the format.
"""
strings = []
for d in db_list:
db_data = config["dbs"][d]
format = db_data.get("format", "tbl")
if format.startswith("last"):
format = get_last_alg(format, query_extension)
if "frags" in db_data and int(db_data["frags"]) > 1:
format = format + ".dbatch"
strings.append("{}.{}".format(d, format))
return strings
def get_hit_table_name_from_wildcards_db(wildcards, config):
"""
Return the hit table name based on the db name using the db config info
"""
db = wildcards.db
db_format = config["dbs"][db].get("format", "hmmer")
if db_format == "hmmer":
if "frags" in config["dbs"][db]:
template = "{name_root}.vs.{db}.tbl.dbatch"
else:
template = "{name_root}.vs.{db}.tbl"
elif db_format == "lastdb":
template = "{name_root}.vs.{db}.lastp"
else:
# Don't know what to do:
raise Exception("Unknown database format for {}: {}".format(db, db_format))
name_root = config["annotation_hit_table_map"].get(
wildcards.annotation_prefix, wildcards.annotation_prefix
)
return template.format(name_root=name_root, **wildcards)
def get_db_assignment_params(wildcards, config):
"""
return the params needed to turn hits from the given db (wildcards.db) into gene family assignments
using the assign_paths.py script
"""
assign_type = config["dbs"][wildcards.db].get("assign_type", "hitid").lower()
if assign_type == "kegg":
return "-p hitid -C first -M kegg -m %s.kos" % (
config["dbs"].get("KEGG", {"path": ""})["path"]
)
if assign_type == "pfam":
return "-p pfam -C all"
return "-C first -p hitid"
def get_path_file(config, db):
""" return the ko map for KEGG, NONE for antying else
if we add support for a new orthology, well need to add it here """
db_info = config["dbs"][db]
if db_info.get("assign_type", "hitid").lower() == "kegg":
return db_info["path"] + ".kos"
return []
def get_db_frag(config, db, N):
full_hmm = config["dbs"][db]["path"]
n_frags = config["dbs"][db]["frags"]
template = get_db_frag_template(full_hmm, n_frags)
return template.format(N=int(N))
def get_db_frag_template(full_hmm, n_frags):
n_frag_digits = 3 # unless we can get the hard coded 000 out of the rules
# n_frag_digits = len(str(n_frags))
hmm_dir = os.path.dirname(full_hmm)
frag_dir = os.path.join(hmm_dir, "frag_{}".format(n_frags))
hmm_base, hmm_ext = os.path.splitext(os.path.basename(full_hmm))
template = "{}{}{}.{{N:0{}d}}{}".format(
frag_dir, os.path.sep, hmm_base, n_frag_digits, hmm_ext
)
return template
def get_db_frags(full_hmm, n_frags):
template = get_db_frag_template(full_hmm, n_frags)
return [template.format(N) for N in range(1, n_frags + 1)]
def get_tax_files(config, database):
db_path = config["dbs"][database]["path"]
db_dir = os.path.dirname(db_path)
return [db_path + ".tax", db_dir + "/nodes.dmp", db_dir + "/names.dmp"]
```
#### File: workflows/python/gene_catalog.py
```python
import re
import os
import json
import pandas
from Bio import SeqIO
from edl import taxon as edltaxon, util, hits as edlhits, blastm8, kegg
try:
from python.common import parse_stats
from snakemake import logger
except:
# if running as a script
from common import parse_stats
import logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger()
REFSEQ = "refseq"
GTDB = "GTDB"
KEGG = "kegg"
####
# RefSeq
#
# The following methods generate annotations using a REfSeq hit table
# Regular Expressions for parsing functional roles
desc_RE = re.compile(r"\s*\[[^\[]+$")
uninformative_RE = re.compile(
r"(:?hypothetical|predicted|conserved\sdomain)\s+protein",
)
extra_loaction_info_RE = re.compile(
r"^(?:contig)?\d+,\s*\S+_\S+,\s*(\S.+),\s*\d+\s*\-\s*\d+\s*$"
)
func_pref_RE = re.compile(r"(?:MULTISPECIES|PREDICTED):\s")
func_suff_RE = re.compile(r"\s\((?:plastid|plasmid|chloroplast|chromatophore)\)\s*$")
def get_function_trimmed(hit, desc_map, db_type):
"""
Given a RefSeq hit id return a function stripped of boilerplate
"""
if db_type == REFSEQ:
return func_pref_RE.sub(
"",
func_suff_RE.sub(
"",
extra_loaction_info_RE.sub(
r"\1",
desc_RE.sub(
"",
desc_map[hit.hit])),
),
)
if db_type == GTDB:
return desc_map[hit.hit].split(None, 1)[-1]
return "NA"
# drop the accession version suffix
acc_suff_RE = re.compile(r"\.\d+$")
def translate_hit(self, hit):
hit.hit = acc_suff_RE.sub("", hit.hit)
return self.hit_translator.translateHit(hit)
# find the least common ancestor of multiple hits
def get_lca(hits, translate=lambda x: [x]):
orgs = []
for h in hits:
orgs.extend(translate(h))
if len(orgs) == 0:
return None
lca = orgs.pop()
while len(orgs) > 0:
lca = lca.getLCA(orgs.pop())
return lca
# Simplify the list of ranks
printed_ranks = [edltaxon.ranks[i]
for i in [3, 7, 11, 17, 21, 24]] + ["domain"]
major_ranks = [edltaxon.ranks[i] for i in [2, 3, 7, 11, 17, 21, 24, 27, 28]]
def get_major_rank(rank):
" return the highest major rank below or equal to the given rank "
rank_index = edltaxon.ranks.index(rank)
for mr in major_ranks:
if edltaxon.ranks.index(mr) >= rank_index:
return mr
def approximate_rank(taxon, use_major_ranks=True):
ret_rank = None
if taxon.rank != "no rank":
ret_rank = taxon.rank
else:
last_rank = edltaxon.ranks[0]
for rank in edltaxon.ranks:
if taxon.getAncestorClosestToRank(rank) != taxon:
ret_rank = last_rank
break
last_rank = rank
else:
return "Unknown"
if use_major_ranks:
return get_major_rank(ret_rank)
else:
return rank
class TaxDBGeneAnnotator:
def __init__(
self,
db_location,
db_type=REFSEQ,
taxid_delim=None,
bad_refs=set(),
genome_clades={},
**kwargs
):
self.rsdb = db_location
self.parse_db_metadata(taxid_delim, db_type)
self.set_m8_params(**kwargs)
self.set_bad_refs(bad_refs)
def set_m8_params(self, **kwargs):
kwargs.setdefault("format", blastm8.BLASTPLUS)
kwargs.setdefault("top_pct", 0.0)
self.m8_params = edlhits.FilterParams(**kwargs)
def set_bad_refs_from_file(self, bad_ref_file, **kwargs):
"""
Use util.parse_list_to_set() to generate list of bad_refs
to exclude during annotation
"""
self.set_bad_refs(util.parse_list_to_set(bad_ref_file, **kwargs))
def set_bad_refs(self, bad_refs):
self.bad_refs = bad_refs
try:
self.m8_params.bad_refs = bad_refs
except:
logger.warn("Can't set bad refs for param")
def set_genome_clades(self, genome_clades):
self.genome_clades = genome_clades
def set_genome_clades_from_file(self,
genome_clade_file,
column="Clade",
**kwargs):
kwargs.setdefault("index_col", 0)
genome_data = pandas.read_csv(genome_clade_file, **kwargs)
self.set_genome_clades(genome_data[column])
def parse_db_metadata(self, taxid_delim, db_type):
"""
Given a refseq database in my style, parse the associated map files
"""
# taxonomy
rsdb_dir = os.path.split(self.rsdb)[0]
self.taxonomy = edltaxon.readTaxonomy(rsdb_dir)
rsdb_taxid_map = self.rsdb + ".tax"
self.taxid_map = util.parseMapFile(
rsdb_taxid_map, valueDelim=taxid_delim, valueType=int
)
parse_style = edlhits.ACCS if db_type == REFSEQ else edlhits.HITIDS
self.hit_translator = edlhits.getHitTranslator(
hitStringMap=self.taxid_map,
parseStyle=parse_style,
taxonomy=self.taxonomy,
hitsAreObjects=True,
)
# descriptions
rsdb_desc_map = self.rsdb + ".ids"
self.desc_map = util.parseMapFile(rsdb_desc_map)
def annotate_genes_rs_prot(self,
hit_table,
annotation_table,
db_type=REFSEQ):
logger.info(
"Annotating "
+ db_type
+ " taxdb with "
+ hit_table
+ " and "
+ annotation_table
)
with open(annotation_table, "w") as tsv_out:
tsv_out.write(
"Gene\t"
+ "\t".join(printed_ranks)
+ "\tfunction\tmin pct ID\thit count"
+ "\ttop hit\ttop pct ID\ttop score\ttop desc\n"
)
for info in self.generate_gene_annotations_rs_prot(hit_table,
db_type):
(
gene,
lca_ranked,
function,
min_pctid,
hit_count,
top_hit,
top_pctid,
top_score,
top_desc,
) = info
tsv_out.write(
"%s\t%s\t%s\t%0.1f\t%d\t%s\t%0.1f\t%0.1f\t%s\n"
% (
gene,
"\t".join([lca_ranked.get(r, "")
for r in printed_ranks]),
function,
min_pctid,
hit_count,
top_hit,
top_pctid,
top_score,
top_desc,
)
)
def generate_gene_annotations_rs_prot(self, hit_table, db_type=REFSEQ):
logger.info("Annotating " + db_type + " taxdb with " + hit_table)
species_index = major_ranks.index("species")
genus_index = major_ranks.index("genus")
phylum_index = major_ranks.index("phylum")
total_genes = 0
total_hits = 0
with blastm8.InputFile(hit_table) as blast_m8:
for gene, hits in blastm8.filterM8Stream(
blast_m8, self.m8_params, return_lines=False
):
hits = list(hits)
total_hits += len(hits)
total_genes += 1
min_pctid = min([h.pctid for h in hits])
lca = get_lca(hits, self.hit_translator.translateHit)
lca_rank = approximate_rank(lca)
lca_ranked = {"domain":
lca.getAncestorClosestToRank("domain").name}
if lca_rank in major_ranks:
for r in range(phylum_index, species_index - 1, -1):
lca_ranked[major_ranks[r]] = \
lca.getAncestorClosestToRank(
major_ranks[r]
).name
if r <= major_ranks.index(lca_rank):
break
# get a good functional annotation form the best hit(s)
# (that aren't hypothetical)
if db_type is not None:
fns_by_score = {}
for hit in hits:
f = get_function_trimmed(hit, self.desc_map, db_type)
fns_by_score.setdefault(hit.score, []).append(f)
for score in sorted(fns_by_score.keys(), reverse=True):
# only consider things with useful annotations
useful_functions = set()
for f in fns_by_score[score]:
if uninformative_RE.search(f) is None:
useful_functions.add(f)
if len(useful_functions) > 0:
functions = useful_functions
break
else:
functions = ["unknown"]
function = ";".join(functions)
else:
function = "NA"
top_hit = hits[0]
# description of top hit
top_desc = self.desc_map[top_hit.hit]
if db_type == GTDB:
# GTDB headers are too long, take sp name and func
top_desc = top_desc.split(";")[-1]
top_desc = re.sub(r"^s__", "", top_desc)
yield (
gene,
lca_ranked,
function,
min_pctid,
len(hits),
top_hit.hit,
top_hit.pctid,
top_hit.score,
top_desc,
)
logger.info("Parsed %d hits for %d genes" % (total_hits,
total_genes))
ko_hit_org_RE = re.compile(r"^([a-z]+):")
class KeggGeneAnnotator:
kegg20160201 = "/mnt/delong/seqdbs/KEGG/KeggGene.pep.20160201/lastdb"
def __init__(self, db_location=kegg20160201):
self.keggdb = db_location
self.parse_db_metadata()
self.m8_params = edlhits.FilterParams(
format=blastm8.BLASTPLUS, top_pct=5.0, sort="score"
)
def parse_db_metadata(self):
"""
Given a refseq database in my style, parse the associated map files
"""
# KO map
kegg_kos = "%s.kos" % (self.keggdb)
self.ko_map = kegg.parseLinkFile(kegg_kos)
# descriptions
kegg_ids = "%s.ids" % (self.keggdb)
self.desc_map = util.parseMapFile(kegg_ids)
def annotate_genes_kg(self, hit_table, annotation_table):
header = "Gene\tKO(s)"
annot_tuples = self.generate_gene_annotations_kg(hit_table)
write_tsv(annotation_table, annot_tuples, header=header)
def generate_gene_annotations_kg(self, hit_table):
with blastm8.InputFile(hit_table) as blast_m8:
for read, hits in blastm8.filterM8Stream(
blast_m8, self.m8_params, return_lines=False
):
hits = list(hits)
kos_so_far = []
org_scores = {}
for h in hits:
org = ko_hit_org_RE.search(h.hit).group(1)
score = h.score
if org_scores.get(org, -1) > score:
# only take top scoring hits from each organism
continue
else:
org_scores[org] = score
kos_so_far.extend(self.ko_map.get(h.hit, []))
# collect uniq hits, dropping "None" or None
uniq_kos = set([k for k in kos_so_far
if k not in [None, "None", ""]])
yield (read, ";".join(uniq_kos))
def write_tsv(out_file, data_tuples, sep="\t", header=None):
with open(out_file, "w") as out_handle:
if header is not None:
out_handle.write(header)
if not header.endswith("\n"):
out_handle.write("\n")
for data_tuple in data_tuples:
out_handle.write(sep.join(data_tuple) + "\n")
def parse_clusters(cluster_file):
"""
expects one line per cluster, tab separated:
cluster_1_rep member_1_1 member 1_2 ...
cluster_2_rep member_2_1 member_2_2 ...
"""
cluster_dict = {}
with open(cluster_file) as LINES:
for line in LINES:
genes = line.strip().split("\t")
rep = genes[0]
for gene in genes:
cluster_dict[gene] = rep
return cluster_dict
def parse_bio_clusters(bio_json, out_tab):
""" convert bio cluster format from vsearch to our syle table """
with open(bio_json) as BIO:
data = json.load(BIO)
# build map from rep to other genes
clusters = {}
for row, column, value in data["data"]:
rep = data["rows"][row]["id"]
gene = data["columns"][column]["id"]
if rep == gene:
# make sure a cluster exists for this rep
clusters.setdefault(rep, [])
else:
# add gene to cluster for this rep
clusters.setdefault(rep, []).append(gene)
# write table
with open(out_tab, "wt") as TAB:
for rep, other_genes in clusters.items():
if len(other_genes) > 0:
TAB.write("{}\t{}\n".format(rep, "\t".join(other_genes)))
def parse_mmseq_clusters(mm_tab, out_tab):
""" reformat mmseqs cluster table to our style
input: every line maps rep to member
output: every non-single cluster listed starting w/rep
"""
with open(out_tab, "wt") as TAB:
with open(mm_tab) as MM:
prev_rep = None
gene_count = 0
cluster_count = 0
for line in MM:
rep, gene = line.strip().split("\t")
if rep != prev_rep:
if rep != gene:
raise Exception(
"expected first gene to be same "
" as rep. {} != {}".format(rep, gene)
)
gene_count = 1
prev_rep = rep
continue
gene_count += 1
if gene_count == 2:
# start writing, by ending previous line
if cluster_count != 0:
TAB.write("\n")
cluster_count += 1
# ... and writing the rep
TAB.write(rep)
# ... add this gene
TAB.write("\t" + gene)
# end final cluster
TAB.write("\n")
def parse_cdhit_clusters(clstr_file, cluster_file):
""" reformat cdhit's .clstr file into simple table """
gene_expr = re.compile(r"\s>(\S+)\.\.\.\s\s*(.+)\s*$")
with open(cluster_file, "wt") as TAB:
with open(clstr_file) as CLSTR:
cluster = []
cluster_rep = None
for line in CLSTR:
if line.startswith(">"):
if cluster_rep is not None:
TAB.write(
"{}\t{}\n".format(
cluster_rep, "\t".join(g for g in cluster)
)
)
cluster = []
cluster_rep = None
continue
try:
gene, alignment = gene_expr.search(line).groups()
except AttributeError:
print("can't parse: \n" + line)
raise
if alignment.strip() == "*":
cluster_rep = gene
else:
cluster.append(gene)
if cluster_rep is not None:
TAB.write("{}\t{}\n".format(cluster_rep,
"\t".join(g for g in cluster)))
def merge_cluster_coverages(cluster_file, coverage_tables):
"""
given a cluster file mapping cluster reps to members
and a map from assemblies to coverage tables of member genes
generate a table of cluster coverages by assembly
"""
cluster_map = parse_clusters(cluster_file)
cluster_coverages = None
for assembly, gene_coverage_table in coverage_tables.items():
gene_coverages = pandas.read_csv(gene_coverage_table, index_col=0)
gene_coverages.columns = [
assembly,
]
gene_coverages["Cluster"] = [
cluster_map.get(g, g) for g in gene_coverages.index
]
_cluster_coverages = gene_coverages.groupby("Cluster").agg(sum)
if cluster_coverages is None:
cluster_coverages = _cluster_coverages
else:
cluster_coverages = cluster_coverages.join(_cluster_coverages,
how="outer")
return cluster_coverages
# deprecated
def normalize_coverages(input, contig_col="Contig", cov_col="MeanCov"):
"""
Loop over read stats files and create a normalization factor for
each assembly (number of reads/10M)
input: the input object from snakemake with two member file lists:
input.read_stats: the cleaned read stats from all assemblies
input.contig_covs: the contig stats from all assemblies
Loop over coverage files,
group by assembly (some assemblies have 2),
normalize by adjusted number of reads,
yield (contig, coverage) tuples
"""
read_counts = {}
for stats_file in input.read_stats:
assembly = re.sub(r"/stats/.+$", "", stats_file)
reads = parse_stats(stats_file)["reads"] / 10000000
read_counts[assembly] = read_counts.get(assembly, 0) + reads
last_assembly = None
coverages = None
if len(input.contig_covs) == 0:
raise Exception("Missing contig coverage files!")
for cov_file in sorted(input.contig_covs):
assembly = os.path.dirname(cov_file)
assembly = re.sub(r"^renamed_dir", "", assembly)
if assembly != last_assembly:
if coverages is not None:
for item in coverages.items():
yield item
coverages = None
last_assembly = assembly
_coverages = (
pandas.read_csv(
cov_file,
sep="\t",
index_col=0,
header=0,
usecols=[contig_col, cov_col],
)[cov_col]
/ read_counts[assembly]
)
if coverages is None:
coverages = _coverages
else:
coverages = coverages + _coverages
for item in coverages.items():
yield item
def main():
"""
Run as a script:
gene_catalog.py {hit_table} {lastdb_path} {output_file}
"""
import argparse
description = "annotate genes from refseq or kegg hits"
parser = argparse.ArgumentParser(description=description)
parser.add_argument(
"hit_table",
metavar="HIT_TABLE",
help="The table of hits in blast format"
)
parser.add_argument(
"lastdb_path",
metavar="LASTDB_PATH",
help="The path given to lastal"
)
parser.add_argument(
"output_table",
metavar="OUT_TABLE",
help="The file to write the anntations to"
)
parser.add_argument(
"-t",
"--type",
default=None,
metavar="TYPE",
choices=[REFSEQ, KEGG, GTDB],
help="The type of database. Either 'refseq'," + "'GTDB', or 'kegg'",
)
arguments = parser.parse_args()
# try to guess type from DB name/path
if arguments.type is None:
if re.search(r"kegg", arguments.lastdb_path, flags=re.I):
arguments.type = KEGG
else:
arguments.type = REFSEQ
logger.info("Parsing db type: " + arguments.type)
# annotate!
if arguments.type == KEGG:
annotator = KeggGeneAnnotator(arguments.lastdb_path)
annotator.annotate_genes_kg(arguments.hit_table,
arguments.output_table)
else:
annotator = TaxDBGeneAnnotator(arguments.lastdb_path)
annotator.annotate_genes_rs_prot(
arguments.hit_table, arguments.output_table, arguments.type
)
def process_for_mcl(
input_file, fasta_file, output_file, format="last", pctid=0.95, minbit=0.5
):
""" generates a table of graph edges from an all v all """
params = blastm8.FilterParams(format=format, pctid=pctid)
inputm8 = blastm8.M8Stream(input_file)
# fake all self bits
self_bits = {r.id: 2 * len(r) for r in SeqIO.parse(fasta_file, "fasta")}
with open(output_file, "wt") as OUTPUT:
for seq, hits in blastm8.filterM8Stream(inputm8,
params,
returnLines=False):
for hit in hits:
if hit.hit == seq:
# we've faked the self bits for now
continue
process_hit(hit, OUTPUT, self_bits, minbit)
def process_hit(hit, output_handle, self_bits, minbit):
""" following mcl parsing used by Anvio, filter hits to feed to mcl """
bitratio = hit.score / min(self_bits[hit.hit], self_bits[hit.read])
if bitratio < minbit:
return
output_handle.write("{}\t{}\t{}\n".format(hit.hit, hit.read, hit.pctid))
def get_longest_seq(clusters, genes, format="fasta"):
"""
Given a clsuter file from mcl where each line is a cluster with
tab separated gene ids.
And given a fasta file of gene sequences.
return the ID of the longest gene in each cluster
"""
gene_lengths = {g.id: len(g) for g in SeqIO.parse(genes, format)}
with open(clusters) as CLUSTERS:
for line in CLUSTERS:
member_genes = line.rstrip().split("\t")
yield sorted(member_genes,
reverse=True,
key=lambda g: gene_lengths[g])[0]
if __name__ == "__main__":
main()
```
#### File: workflows/python/standards.py
```python
import numpy
import pandas
def calculate_factors(counts_table, spiked_amounts, table_format=None):
""" calculate a scaling factor from two input tables """
# load counts of recovered standards from this sample
pandas_args = {
"header": None,
"index_col": None,
"skiprows": 1,
"names": ["Ref", "Counts"],
}
if table_format == "bbduk":
pandas_args.update({"skiprows": 4, "names": ["Ref", "Counts", "Pct"]})
count_table = pandas.read_table(counts_table, **pandas_args)
count_table["Ref"] = [r.split()[0] for r in count_table["Ref"]]
count_table.set_index("Ref", inplace=True)
# load spiked in amounts
spike_table = pandas.read_table(
spiked_amounts, header=None, index_col=0, names=["Ref", "Spiked"]
)
# get data as lists in same order
standard_list = sorted(list(spike_table.index))
counts = [count_table.Counts.get(s, 0) for s in standard_list]
spikes = [spike_table.Spiked[s] for s in standard_list]
# calculate the scale factor and save
scale_factor = get_best_fit(counts, spikes, force_intercept=True)[0]
return scale_factor
def get_best_fit(xd, yd, force_intercept=False, force_slope=False):
"""Return coeefs for a line of best fit"""
# Calculate trendline
if force_intercept:
# intercept of 0
x = numpy.array(xd)[:, numpy.newaxis]
slope, _, _, _ = numpy.linalg.lstsq(x, yd)
coeffs = [slope[0], 0]
if force_slope:
# We shouldn't get here, but let's just return the fixed values
coeffs = (1, 0)
elif force_slope:
# slope of 1: intercept is average of difference
intercept = numpy.mean(yd - xd)
coeffs = [1, intercept]
else:
coeffs = numpy.polyfit(xd, yd, 1)
return coeffs
```
#### File: workflows/python/tmatic.py
```python
from Bio import Seq
primer_templates = {
"truseq": """>PrefixTruA/1
AATGATACGGCGACCACCGAGATCTACACTCTTTCCCTACACGACGCTCTTCCGATCT
>PrefixTruA/2
GATCGGAAGAGCACACGTCTGAACTCCAGTCAC{edocrab2}ATCTCGTATGCCGTCTTCTGCTTG
>PrefixTruB/1
AATGATACGGCGACCACCGAGATCTACACTCTTTCCCTACACGACGCTCTTCCGATCT
>PrefixTruB/2
GATCGGAAGAGCACACGTCTGAACTCCAGTCAC{edocrab2}CGATCTCGTATGCCGTCTTCTGCTTG""",
"scripseq": """>PrefixScript/1
AATGATACGGCGACCACCGAGATCTACACTCTTTCCCTACACGACGCTCTTCCGATCT
>PrefixScript/2
CAAGCAGAAGACGGCATACGAGAT{edocrab2}GTGACTGGAGTTCAGACGTGTGCTCTTCCGATCT""",
"nextera": """>Prefix25/1
AATGATACGGCGACCACCGAGATCTACAC{barcode1}TCGTCGGCAGCGTCAGATGTGTATAAGAGACAG
>Prefix25/2
CAAGCAGAAGACGGCATACGAGAT{edocrab2}GTCTCGTGGGCTCGGAGATGTGTATAAGAGACAG""",
}
def get_primer_template(chemistry):
return primer_templates[chemistry]
def process_barcode_string(barcode_string):
barcodes = barcode_string.split(".")
barcode_data = {}
for N in [1, 2]:
barcode = barcodes[N - 1] if len(barcodes) >= N else barcodes[0]
barcode_data["barcode{N}".format(N=N)] = barcode
barcode_data["edocrab{N}".format(N=N)] = Seq.reverse_complement(barcode)
return barcode_data
def get_chemistry_barcodes(sample, config):
"""
Attempt to work out cleaning params for given sample.
Try the following:
* look in config[sample_data][{sample}] for
* sample_sheet
* chemistry and barcodes
* use Ns for barcode and use config.get(chemistry, 'scripseq')
"""
sample_data = config["sample_data"][sample]
if "barcode" in sample_data and "chemistry" in sample_data:
return sample_data["chemistry"], [sample_data["barcode"],]
if "barcodes" in sample_data and "chemistry" in sample_data:
return sample_data["chemistry"], sample_data["barcodes"]
if "sample_sheet" in sample_data:
return parse_sample_sheet(sample, sample_data["sample_sheet"])
else:
if config.get("chemistry", "scripseq") == "nextera":
return config["chemistry"], ["NNNNNN", "NNNNNN"]
return config.get("chemistry", "scripseq"), ["NNNNNN",]
def parse_sample_sheet(sample, sample_sheet):
"""
return chemistry and barcode for this sample
"""
# TODO
raise NotImplementedError(
"You'll have to set the barcode and chemsitrya" "manually for now!"
)
``` |
{
"source": "jmerc77/remix-of-Russian-Doll-Maze-Puzzle-Box",
"score": 3
} |
#### File: jmerc77/remix-of-Russian-Doll-Maze-Puzzle-Box/main.py
```python
import os
import configparser
import platform
from shutil import copy, rmtree
import shlex
import random as rd
import time
import numpy as np
import math
import re
from PIL import Image
import subprocess as sp
skip = -1 # debug: skip all shells up to here (0 to n to enable)
halt = -1 # debug: terminate skipping this shell (0 to n to enable)
USE_SCAD_THREAD_TRAVERSAL = False
STL_DIR = "_files"#name gets tacked on later...
PREV_DIR = "maze_previews"
#tries to get the path to openscad
def openscad():
try:
if OPENSCAD_PATH:
return OPENSCAD_PATH
except NameError:
pass
if os.getenv("OPENSCAD_PATH"):
return os.getenv("OPENSCAD_PATH")
if platform.system() == "Darwin":
return "/Applications/OpenSCAD.app/Contents/MacOS/OpenSCAD"
if platform.system() == "Windows":
# Note: Windows allows forward slashes now
return "C:/Program Files/OpenSCAD/openscad.com"
# Default to linux-friendly CLI program name
return "openscad"
#prepares folders
def prepwd():
# Linux and other systems that use PATH variables don't need an absolute path configured.
# if os.path.exists(openscad_exe) == False:
# input("ERROR: openscad path not found.")
# exit()
if os.path.exists(STL_DIR):
rmtree(STL_DIR)
os.mkdir(STL_DIR) # Default perms: world-writable
if os.path.exists(PREV_DIR):
rmtree(PREV_DIR)
os.mkdir(PREV_DIR) # Default perms: world-writable
#checks threading availability
def has_scad_threading():
cmd = [openscad(), "--help"]
out = str(sp.check_output(cmd)).lower()
if "--parallelism" in out:
return True
return False
#checks version
def scad_version():
cmd = [openscad(), "--version"]
ver=sp.Popen(cmd,stdout=sp.PIPE).stdout.readline().decode("utf-8")
ver=ver.replace("\r","").replace("\n","").replace("-",".").replace("OpenSCAD version ","").split(".")
for v in range(len(ver)):
ver[v]=re.sub('[^0-9]','', ver[v])
return int(ver[0]) if ver else ()
#runs the scad
def execscad(threadid=0):
global ext
print("Executing OpenSCAD script...")
cmd = [openscad()]
if USE_SCAD_THREAD_TRAVERSAL:
cmd.append("--enable=thread-traversal")
cmd.extend(
[
"-o",
os.path.join(os.getcwd(), STL_DIR, name + "_" + str(shell + 1) + "." + ext),
os.path.join(os.getcwd(), "make_shells.scad"),
]
)
sp.run(cmd)
#updates the possible ways to cut the maze
def udnbers(n, vi, nc, mw, mh, stag):
#with every tile
for y in range(0, mh):
for x in range(0, mw):
#shift the vertical edge
x3 = int((x + stag[y]) % mw)
#next tile coords
x2 = [x - 1, x + 1, x, x]
y2 = [y, y, y - 1, y + 1]
#look arround
for i in range(0, 4):
#did we shift the edge?
if stag[y] % mw > 0:
#shift the next tile coords too
x2[i] = int((x2[i] + mw) % mw)
else:
#constrain to bounds otherwise
if x2[i] < 0:
x2[i] = 0
if x2[i] > mw - 1:
x2[i] = mw - 1
#is this cuttable and not out of bounds?
if (
not ((x3 == 0 and i == 0) or (x3 == mh - 1 and i == 1))
and y2[i] > -1
and y2[i] < mh
):
#mark cuttable if we have not been there
n[x, y, i] = vi[int(x2[i]), int(y2[i])] == 0
else:
#mark not cuttable
n[x, y, i] = 0
#update count of cuttable tiles
nc[x, y] = len(np.argwhere(n[x, y].astype("int")))
#makes a maze
def genmaze(mw, mh, stag):
#where we have cut a path already
visited = np.zeros(mw * mh)
#number of possible ways to cut
nbercount = np.zeros(mw * mh)
#possible ways to cut
nbers = np.ones(mw * mh * 4)
#walls of the maze tiles
# walls are: 0=L 1=R 2=U 3=D
walls = np.ones(mw * mh * 4,dtype="int")
#start here
r = rd.randint(0, mw*mh-1)
#mark start as visited
#number of places we have cut a path already
vcount = 1
visited[r] = 1
#to make things easier
visited = visited.reshape([mw, mh])
nbers = nbers.reshape([mw, mh, 4])
nbercount = nbercount.reshape([mw, mh])
walls = walls.reshape([mw, mh, 4])
#update the possible ways to cut
udnbers(nbers, visited, nbercount, mw, mh, stag)
#loop until maze is completed
while vcount < (mw * mh):
#all places we can continue cutting
v = np.transpose(np.nonzero(np.logical_and(visited == 1, nbercount > 0)))
# choose a tile to cut from
if len(v) < 2:
r=0
else:
r = rd.randint(0, len(v) - 1)
c = v[r]
#keep cutting until can't or min_branch is reached
for i in range(min_branch):
# choose wall to cut
r = rd.randint(0, nbercount[c[0], c[1]] - 1)
n = np.argwhere(nbers[c[0], c[1]])[r]
# cut the wall
walls[c[0], c[1], n] = 0
#temp for the tile we are cutting
c2 = c
#the other side of the wall
if n == 0:
n2 = 1
c2[0] = c[0] - 1
elif n == 1:
n2 = 0
c2[0] = c[0] + 1
elif n == 2:
n2 = 3
c2[1] = c[1] - 1
else:
n2 = 2
c2[1] = c[1] + 1
#wrap horizontally
c2[0] = int((c2[0] + mw) % mw)
#mark as visited
visited[c2[0], c2[1]] = 1
#cut the other side
walls[c2[0], c2[1], n2] = 0
#update the possible ways to cut again
udnbers(nbers, visited, nbercount, mw, mh, stag)
#update the number of places we have cut a path already
vcount = np.sum(visited,dtype="int")
#prepare cut again...
c=c2
#...if we can. otherwise break the for.
if nbercount[c[0],c[1]]==0:
break
return walls
#makes and writes the preview image
def preview(maze):
#a new image
im = Image.new("L", [2 * mw + 1, 2 * mh + 1], 0)
#start and end
im.putpixel((1 + ex * 2, 0), 255)#end
im.putpixel((1 + st * 2, mh * 2), 255)#start
for y in range(0, mh):
for x in range(0, mw):
#tile pos
imx = 1 + x * 2
imy = 1 + y * 2
#wall pixel coords
imnx = [imx - 1, imx + 1, imx, imx]
imny = [imy, imy, imy - 1, imy + 1]
#center of tile
im.putpixel((imx, imy), 255)
#check walls
for idx in range(0, 4):
#no wall?
if maze[x, y, idx] == 0:
#cut a hole!
im.putpixel((imnx[idx], imny[idx]), 255)
#fill in answer key
ans=ans_solver(maze,st,ex)
for y in range(0, mh):
for x in range(0, mw):
imx = 1 + x * 2
imy = 1 + y * 2
if [x,y] in ans:
im.putpixel((imx, imy), 128)
else:
im.putpixel((imx, imy), 255)
#transition shell maze 2?
if tpp == 2:
#save as maze 2
im.save(os.path.join(os.getcwd(), PREV_DIR, str(shell + 1) + "a.png"))
else:
#save as maze 1
im.save(os.path.join(os.getcwd(), PREV_DIR, str(shell + 1) + ".png"))
#for ans key in previews
def ans_solver(maze,s,e):
ret=[[s,mh],[s,mh-1]]
direction=1#r,u,l,d
x=s
y=mh-1
direction2wall=[1,2,0,3]#r,u,l,d -> l,r,u,d
direction2xy=[[1,0],[0,-1],[-1,0],[0,1]]#r,u,l,d
while x!=e or y>0:
#walls at x,y in the maze
here=maze[x,y]
#print(x,y,direction,4-np.sum(here))
if here[direction2wall[(direction+3)%4]]==0:
direction=(direction+3)%4
#change direction until no wall in case of front wall
if here[direction2wall[direction]]==1:
while here[direction2wall[direction]]==1:
#change direction
#print(direction,here[direction2wall[direction]])
direction=(direction+1)%4
x=(x+direction2xy[direction][0]+mw)%mw
y=y+direction2xy[direction][1]
#are we backtracking?
if x==ret[-2][0] and y==ret[-2][1]:
ret=ret[0:-1]
#print("backtracking")
else:
ret.append([x,y])
#print(ret)
return ret[1:]
#finds the lengths from a start to all ends of a maze
def solver(maze,s):
#start here
branches=[[s,mh-1,0,0,4]]#x,y,length,downcnt,last
#return value
ret=[]
#loop until return value is full
while len(ret)<mw:
#temporarlily store new branches here
temp=[]
#loop through all current branches
for branch in branches:
x=branch[0]
y=branch[1]
length=branch[2]
#count for how many times we go down toward start
downcnt=branch[3]
#must not back track.
last=branch[4]
#walls at x,y in the maze
here=maze[x,y]
#how many openings at x,y in maze
opencnt=4-np.sum(here)
#is this a posible end?
if y==0:
#include this length in return value.
ret.append(length)
#can we move on?
if opencnt>0:
#move on but do not bactrack.
if here[0]==0 and last!=0:
#left
temp.append([(x+mw-1)%mw,y,length+1,downcnt,1])
if here[1]==0 and last!=1:
#right
temp.append([(x+1)%mw,y,length+1,downcnt,0])
if here[2]==0 and last!=2:
#up
temp.append([x,y-1,length+1,downcnt,3])
if here[3]==0 and last!=3:
#down
temp.append([x,y+1,length+1,downcnt+1,2])
#copy the new branches over the old branches
branches=temp.copy()
return ret
#chooses a maze path (start and end) based on difficulty
def choose_path(maze):
global st
global ex
#get path lengths...
lengths=[]
for s in range(mw):
#find the lengths from this start to all ends
lengths.append(solver(maze,s))
#get sorted indexes
sortedlengthidxs=np.argsort(np.asarray(lengths).flatten())
#choose one
chosen=sortedlengthidxs[int(difficulty/101*len(sortedlengthidxs))]
#assign start and end
st=chosen//mw#start
ex=chosen%mw#end
#make and write preview image
preview(maze)
#makes parts
def gen():
global shell
global d2
global mh
global mw
global i
global tpp
global maze_data
#are we done yet?
if shell < shells:
#debug halt
if shell >= halt and halt > -1:
return True
#is the next shell the transition?
if shell + 1 < shells and shell + 1 == tp and tpp < 1:
tpp = -1
#part status
if tpp < 1:
print("part: " + str(shell + 1))
#wall thickness
wt = mwt
#are we not in transitioning stage 2?
if tpp < 1:
#is this the first?
if shell == 0:
#set the diameter
d = (mw * us * p) / np.pi / 2 - wt - marge * 2
print("diameter:",d)
else:
#are we transitioning?
if shell == tp:
#keep the diameter the same
d = d2
elif (shell+1 == shells and shells==2):
d = d2 + us * td / 2 + wt + marge * 2
else:
#set the diameter
d = d2 + us * td + wt + marge * 2
print("diameter:",d)
#is the maze on the outside?
if i == 0:
#set the maze width
mw = int(math.ceil((d + us) * np.pi / us / p))
else:
#set the maze width
mw = int(math.ceil((d + us) * np.pi / us / p ))
#fix for tpp=1 (maze outside to inside)
if (shell!=tp or i==1):
#increase maze height
mh += 1
#extra height for lid
if shell+1==shells:
mh += 1
else:
#set the diameter
d = d2 + us * td + wt + marge * 2
print("diameter:",d)
#set the maze width
mw = int(math.ceil((d + us) * np.pi / us / p))
#fix for tpp=1 (maze outside to inside)
if tpp==2:
#increase maze height
mh += 1
# shift
stag = np.zeros(mh)
#is it a random shift?
if stagmode in (1, 2):
#loop through y
for y in range(0, mh):
#are we at end or shift mode is random?
if y == 0 or stagmode == 1:
#random shift
stag[y] = rd.randint(0, mh - 1)
else:
#random shift offset
stag[y] = stag[y - 1] + rd.randint(0, mh - 1)
#is it a twist shift?
elif stagmode == 3:
#twist it!
stag = np.multiply(np.arange(0, mh), stagconst).astype("int")
#do we even have a maze with this part?
if ((i == 0 and shell < shells - 1) or (i == 1 and shell > 0)) and tpp != 1:
# maze
marr = genmaze(int(mw), int(mh), stag)
#get the path we want
choose_path(marr)
#convert to string
matrix = []
for y in range(0, mh):
row = []
for x in range(0, mw * p):
x2 = x % mw
r = marr[x2, y, 1] == 0
u = marr[x2, y, 3] == 0
if u and r:
row.append("3")
elif u:
row.append("2")
elif r:
row.append("1")
else:
row.append("0")
matrix.append(",".join(row))
s = "[["+"],[".join(matrix)+"]];"
else:
#empty maze
s="[];"
#write the maze
if tpp < 1:
maze_num = 1
maze_data="\n".join(["maze"+str(maze_num)+"="+s,
"h"+str(maze_num)+"="+str(mh)+";",
"w"+str(maze_num)+"="+str(mw*p)+";",
"st"+str(maze_num)+"="+str(st)+";",
"ex"+str(maze_num)+"="+str(ex)+";",
""])
else:
maze_num = 2
maze_data+="\n".join(["maze"+str(maze_num)+"="+s,
"h"+str(maze_num)+"="+str(mh)+";",
"w"+str(maze_num)+"="+str(mw*p)+";",
"st"+str(maze_num)+"="+str(st)+";",
"ex"+str(maze_num)+"="+str(ex)+";",
""])
#non lid
base = 1
lid = 0
#is it the lid?
if shell > shells - 2:
#lid
lid = 1
base = 0
#no more to go
#how many are left to go
mos = shells - shell - 2
with open("shell_data.scad", "w") as shell_data:
shell_data.write(opt+"\n".join(["p="+str(p)+";",
"tpp="+str(tpp)+";",
"is="+str(shell)+";",
"os="+str(mos)+";",
"lid="+str(lid)+";",
"base="+str(base)+";",
"iw="+str(wt)+";",
"id="+str(d)+";",
"s="+str(us)+";",
"td="+str(td)+";",
"i="+str(i)+";",
"bd="+str(d + wt * 2 + us * td * 2)+";",
"m="+str(marge)+";",
""])+maze_data
)
#save diameter of this one for later
if shell < shells - 2:
d2 = d
#was this the transition shell?
if shell > 0 and shell < shells-1 and shell == tp and tpp < 1:
#get ready for transition stage 2
if i == 0: # double nub transition
tpp = 1
i = 1
else: # double maze transition
tpp = 2
i = 0
else:
tpp = 0
#are we done with this shell?
if tpp < 1:
#make it!
#debug skip
if shell >= skip or skip < 0:
execscad()
#on to the next
shell = shell + 1
#not done making parts
return False
else:
#all done!
return True
#reads opt.ini
def readOpt():
global shells
global marge
global us
global mh
global mw
global mwt
global i
global p
global tp
global STL_DIR
global stagmode
global stagconst
global difficulty
global min_branch
global ext
global name
global opt
global td
config = configparser.ConfigParser()
config.read("opt.ini")
if "DEFAULT" not in config or "MAZE" not in config:
print("ERROR: No DEFAULT and/or MAZE section in opt.ini; Must have both.\n")
exit(1)
mazeconfig=config["MAZE"]
looksconfig=config["LOOKS"]
embossconfig=config["EMBOSS"]
config = config["DEFAULT"]
version = scad_version()
if config.getboolean("o3mf") and version>=2019:
ext="3mf"
else:
ext="stl"
p = abs(config.getint("nubs")-2) + 2
shells = config.getint("levels") + 1
marge = config.getfloat("tolerance")
i = int(config.getboolean("maze_inside"))
us = config.getfloat("spacing")
td = config.getfloat("td")
if td<1.0:
td=1.0
if td>2.0:
td=2.0
mh = config.getint("units_tall")
mw = config.getint("units_wide")
mwt = config.getfloat("wall_thickness")
name = config.get("name")
STL_DIR=name+STL_DIR
#maze options
#seeding...
seed=mazeconfig.get("seed").replace("\r","").replace("\n","")
if not seed.isnumeric() or "\\" in seed or "." in seed:
# Make sure we have a fresh random seed
rd.seed()
else:
#use seed from ini
rd.seed(int(seed))
difficulty=abs(mazeconfig.getfloat("diff",100.0))
if difficulty>100:
difficulty=100
min_branch=mazeconfig.getint("min_branch",5)
if min_branch<1:
min_branch=5
stagmode = mazeconfig.getint("shift",1)
stagconst = 0
if stagmode == 3:
stagconst = abs(mazeconfig.getint("twist",1))
#options
opt=""
if looksconfig.getboolean("oldnubs",True):
opt+="oldnubs=1;\n"
else:
opt+="oldnubs=0;\n"
bs=looksconfig.getint("bs",10)
if bs<3:
bs=3
opt+="bs="+str(bs)+";\n"
bversion=abs(looksconfig.getint("bversion",2))%3
opt+="bversion="+str(bversion)+";\n"
if looksconfig.getboolean("lefty",True):
opt+="lefty=1;\n"
else:
opt+="lefty=0;\n"
#emboss
if embossconfig.getboolean("ense",True):
opt+="ense=1;\n"
else:
opt+="ense=0;\n"
opt+='se="'+embossconfig.get("se").replace('"','')+'";\n'
be=embossconfig.get("be").replace('"','')
if embossconfig.getboolean("enbe",True):
opt+="enbe=1;\n"
shells=len(be)
if embossconfig.getboolean("emboss_inside_only",True):
shells+=2
if shells==0:
shells+=2
if shells==1:
shells+=1
else:
opt+="enbe=0;\n"
opt+='be="'+be+'";\n'
tp = config.getint("transition_shell")
if tp > shells-1 or tp < 1:
tp = -1
if __name__ == "__main__":
#read opt.ini
opt=""
readOpt()
try:
#prep folders
prepwd()
# get scad version:
version = scad_version()
if version < 2015:
print("ERROR: invalid scad version. must be at least 2015.xx.xx .\n")
exit(1)
#do we have threading?
if has_scad_threading():
USE_SCAD_THREAD_TRAVERSAL = (
input("multi-threading available. use it(y/n)?").lower() == "y"
)
except FileNotFoundError:
print("ERROR: Could not find OpenSCAD: " + openscad()+"\n")
exit(1)
#init vars
st=0
ex=0
d2 = 0
shell = 0
tpp = 0
# make parts:
while not gen():
continue
print("done!")
``` |
{
"source": "jmercat/fast-transformers",
"score": 3
} |
#### File: fast_transformers/attention/causal_linear_attention.py
```python
import torch
from torch.nn import Module
from fast_transformers.causal_product import causal_dot_product
def elu_feature_map(x):
return torch.nn.functional.elu(x) + 1
def causal_linear(Q, K, V):
Q = Q.permute(0,2,1,3).contiguous()
K = K.permute(0,2,1,3).contiguous()
V = V.permute(0,2,1,3).contiguous()
V_new = causal_dot_product(Q, K, V)
return V_new.permute(0,2,1,3).contiguous()
class CausalLinearAttention(Module):
"""Implement causally masked attention using dot product of feature maps in
O(N D^2) complexity.
See fast_transformers.attention.linear_attention.LinearAttention for the
general concept of replacing the softmax with feature maps. In addition to
that, we also make use of the fact that causal masking is a triangular mask
which allows us to apply the masking and still compute the attention in O(N
D^2) complexity.
Arguments
---------
feature_map: callable, a callable that applies the feature map to the
last dimension of a tensor (default: elu(x)+1)
eps: float, a small number to ensure the numerical stability of the
denominator (default: 1e-6)
"""
def __init__(self, feature_map=None, eps=1e-6):
super(CausalLinearAttention, self).__init__()
self.feature_map = feature_map or elu_feature_map
self.eps = eps
def forward(self, queries, keys, values, attn_mask, query_lengths,
key_lengths):
# Apply the feature map to the queries and keys
Q = self.feature_map(queries)
K = self.feature_map(keys)
# Apply the key padding mask and make sure the attn_mask is a
# lower triangular causal mask
if not attn_mask.lower_triangular:
raise RuntimeError(("CausalLinearAttention only supports full "
"lower triangular masks"))
K = K * key_lengths.float_matrix[:, :, None, None]
# TODO: Shall we divide the Q and K with a relatively large number to
# avoid numerical instabilities in computing the denominator?
# We used to divide each with the max norm of all q and k but
# that seems relatively costly for a simple normalization.
# Compute the normalizers
Z = 1/(torch.einsum("nlhi,nlhi->nlh", Q, K.cumsum(1)) + self.eps)
# Compute the unnormalized result
V = causal_linear(
Q,
K,
values
)
return V * Z[:, :, :, None]
```
#### File: fast_transformers/builders/transformer_encoder_builder.py
```python
from functools import partial
from torch.nn import LayerNorm
from .base import BaseTransformerBuilder
from .common_encoder_builder import CommonEncoderBuilder
from .attention_builder import AttentionBuilder
from ..attention import AttentionLayer, FullAttention, \
LinearAttention, CausalLinearAttention, \
ClusteredAttention, ImprovedClusteredAttention, \
ImprovedClusteredCausalAttention, \
ReformerAttention, ConditionalFullAttention, \
ExactTopKAttention
from ..transformers import TransformerEncoder, \
TransformerEncoderLayer
class TransformerEncoderBuilder(BaseTransformerBuilder, CommonEncoderBuilder,
AttentionBuilder):
"""TransformerEncoderBuilder builds transformer encoders (duh).
This means that the module returned is going to be an instance of
fast_transformer.transformers.TransformerEncoder.
"""
def __init__(self):
CommonEncoderBuilder.__init__(self)
AttentionBuilder.__init__(self)
def __repr__(self):
return (
"TransformerEncoderBuilder.from_kwargs(\n"
" n_layers={!r},\n"
" n_heads={!r},\n"
" feed_forward_dimensions={!r},\n"
" query_dimensions={!r},\n"
" value_dimensions={!r},\n"
" dropout={!r},\n"
" activation={!r},\n"
" final_normalization={!r},\n"
" attention_type={!r},\n"
" softmax_temp={!r},\n"
" linear_feature_map={!r},\n"
" attention_dropout={!r},\n"
" clusters={!r},\n"
" bits={!r},\n"
" hash_bias={!r},\n"
" iterations={!r},\n"
" topk={!r},\n"
" chunk_size={!r},\n"
" rounds={!r},\n"
" masked={!r},\n"
" conditional_attention={!r},\n"
" length_limit={!r}\n"
")"
).format(
self.n_layers,
self.n_heads,
self.feed_forward_dimensions,
self.query_dimensions,
self.value_dimensions,
self.dropout,
self.activation,
self.final_normalization,
self.attention_type,
self.softmax_temp,
self.linear_feature_map,
self.attention_dropout,
self.clusters,
self.bits,
self.hash_bias,
self.iterations,
self.topk,
self.chunk_size,
self.rounds,
self.masked,
self.conditional_attention,
self.length_limit
)
def _get_attention(self):
attentions = {
"full": partial(
FullAttention,
softmax_temp=self.softmax_temp,
dropout_rate=self.attention_dropout
),
"clustered": partial(
ClusteredAttention,
self.clusters,
self.iterations,
self.bits,
self.hash_bias,
self.softmax_temp,
self.attention_dropout
),
"improved-clustered": partial(
ImprovedClusteredAttention,
self.clusters,
self.iterations,
self.bits,
self.hash_bias,
self.topk,
self.softmax_temp,
self.attention_dropout
),
"improved-causal": partial(
ImprovedClusteredCausalAttention,
self.clusters,
self.iterations,
self.bits,
self.hash_bias,
self.topk,
self.softmax_temp,
self.attention_dropout
),
"reformer": partial(
ReformerAttention,
self.chunk_size,
self.bits,
self.rounds,
self.masked,
self.softmax_temp,
self.attention_dropout
),
"exact-topk": partial(
ExactTopKAttention,
self.topk,
self.softmax_temp,
self.attention_dropout
),
"linear": partial(LinearAttention, self.linear_feature_map),
"causal-linear": partial(
CausalLinearAttention,
self.linear_feature_map
)
}
attention = attentions[self.attention_type]()
if self.conditional_attention:
attention = ConditionalFullAttention(
attention,
self.length_limit,
self.softmax_temp,
self.attention_dropout
)
return attention
def get(self):
model_dimensions = self.value_dimensions*self.n_heads
return TransformerEncoder(
[
TransformerEncoderLayer(
AttentionLayer(
self._get_attention(),
model_dimensions,
self.n_heads,
d_keys=self.query_dimensions,
d_values=self.value_dimensions
),
model_dimensions,
self.n_heads,
self.feed_forward_dimensions,
self.dropout,
self.activation
)
for _ in range(self.n_layers)
],
(LayerNorm(model_dimensions) if self._final_norm else None)
)
```
#### File: recurrent/attention/full_attention.py
```python
from math import sqrt
import torch
from torch.nn import Dropout, Module
class RecurrentFullAttention(Module):
"""Implement the full softmax attention as a recurrent module.
Arguments
---------
softmax_temp: The temperature to use for the softmax attention.
(default: 1/sqrt(d_keys) where d_keys is computed at
runtime)
dropout_rate: The dropout rate to apply to the attention (default: 0.1)
"""
def __init__(self, softmax_temp=None, dropout_rate=0.1):
super(RecurrentFullAttention, self).__init__()
self.softmax_temp = softmax_temp
self.dropout = Dropout(dropout_rate)
def forward(self, query, key, value, memory=None):
# Extract some shapes and compute the temperature
N, H, E = query.shape
_, _, D = value.shape
softmax_temp = self.softmax_temp or 1./sqrt(E)
# Aggregate the list of keys and values
if memory is not None:
keys, values = memory
keys = torch.cat([keys, key[:, :, None]], dim=2)
values = torch.cat([values, value[:, :, None]], dim=2)
else:
keys = key[:, :, None]
values = value[:, :, None]
# Compute the unnormalized attention
QK = torch.einsum("nhe,nhse->nhs", query, keys)
# Compute the attention and the weighted average
A = self.dropout(torch.softmax(softmax_temp * QK, dim=-1))
V = torch.einsum("nhs,nhsd->nhd", A, values).contiguous()
# Make sure that what we return is contiguous
return V, [keys, values]
```
#### File: recurrent/attention/linear_attention.py
```python
import torch
from torch.nn import Module
def elu_feature_map(x):
return torch.nn.functional.elu(x) + 1
class RecurrentLinearAttention(Module):
"""Implement fast_transformers.attention.causal_linear_attention as a
fixed-dimensional state recurrent model.
See fast_transformers.attention.linear_attention and
fast_transformers.attention.causal_linear_attention for the general concept
of replacing the softmax with feature maps.
Arguments
---------
feature_map: callable, a callable that applies the feature map to the
last dimension of a tensor (default: elu(x)+1)
eps: float, a small number to ensure the numerical stability of the
denominator (default: 1e-6)
"""
def __init__(self, feature_map=None, eps=1e-6):
super(RecurrentLinearAttention, self).__init__()
self.feature_map = feature_map or elu_feature_map
self.eps = eps
def forward(self, query, key, value, memory=None):
# Apply the feature map to the query and key
Q = self.feature_map(query)
K = self.feature_map(key)
# Extract some shapes
N, H, D = Q.shape
_, _, M = value.shape
# Extract the memory or initialize it
if memory is None:
Si = query.new_zeros((N, H, D, M))
Zi = query.new_zeros((N, H, D))
else:
Si, Zi = memory
# Ensure the batch size did not change
if len(Si) != N:
raise ValueError("The batch size changed during iteration")
# Update the internal state
#
# NOTE: The if clause is added due to GitHub PR #10. Simply using lines
# 61, 62 does not perform the operation in place which means it is
# slower for inference.
if K.grad_fn is not None or value.grad_fn is not None:
Zi = Zi + K
Si = Si + torch.einsum("nhd,nhm->nhdm", K, value)
else:
Zi += K
Si += torch.einsum("nhd,nhm->nhdm", K, value)
# Compute the output
Z = 1. / (torch.einsum("nhd,nhd->nh", Q, Zi) + self.eps)
V = torch.einsum("nhd,nhdm,nh->nhm", Q, Si, Z)
return V, [Si, Zi]
```
#### File: fast_transformers/recurrent/transformers.py
```python
import torch
from torch.nn import Dropout, LayerNorm, Linear, Module, ModuleList
import torch.nn.functional as F
class RecurrentTransformerEncoderLayer(Module):
"""Attention to the previous inputs and feed forward with skip connections.
This transformer encoder layer is the recurrent dual of
fast_transformers.transformers.TransformerEncoderLayer . The results should
be identical given the same inputs and a lower triangular mask.
Arguments
---------
attention: The attention implementation to use given as a nn.Module
d_model: The input feature dimensionality
n_heads: The number of heads for the multi head attention
d_ff: The dimensionality of the intermediate features after the
attention (default: d_model*4)
dropout: The dropout rate to apply to the intermediate features
(default: 0.1)
activation: {'relu', 'gelu'} Which activation to use for the feed
forward part of the layer (default: relu)
"""
def __init__(self, attention, d_model, n_heads, d_ff=None, dropout=0.1,
activation="relu"):
super(RecurrentTransformerEncoderLayer, self).__init__()
d_ff = d_ff or 4*d_model
self.attention = attention
self.linear1 = Linear(d_model, d_ff)
self.linear2 = Linear(d_ff, d_model)
self.norm1 = LayerNorm(d_model)
self.norm2 = LayerNorm(d_model)
self.dropout = Dropout(dropout)
self.activation = F.relu if activation == "relu" else F.gelu
def forward(self, x, memory=None):
"""Apply the transformer encoder to the input x using the provided
memory.
Arguments
---------
x: The input features of shape (N, E) where N is the batch size and
E is d_model passed in the constructor
memory: The memory can vary depending on the attention implementation
"""
# Run the self attention and add it to the input
x2, memory = self.attention(x, x, x, memory)
x = x + self.dropout(x2)
# Run the fully connected part of the layer
y = x = self.norm1(x)
y = self.dropout(self.activation(self.linear1(y)))
y = self.dropout(self.linear2(y))
return self.norm2(x+y), memory
class RecurrentTransformerEncoder(Module):
"""RecurrentTransformerEncoder is a sequence of
RecurrentTransformerEncoderLayer instances.
RecurrentTransformerEncoder keeps a separate memory per
RecurrentTransformerEncoderLayer.
Arguments
---------
layers: list, RecurrentTransformerEncoderLayer instances or instances
that implement the same interface
norm_layer: A normalization layer to be applied to the final output
(default: None which means no normalization)
"""
def __init__(self, layers, norm_layer=None):
super(RecurrentTransformerEncoder, self).__init__()
self.layers = ModuleList(layers)
self.norm = norm_layer
def forward(self, x, memory=None):
"""Apply all recurrent transformer layers to the input x using the
provided memory.
Arguments
---------
x: The input features of shape (N, E) where N is the batch size and
E is d_model passed in the constructor of each recurrent
transformer encoder layer
memory: A list of objects to be passed to each recurrent
transformer encoder layer
"""
# Initialize the memory to None if not given
if memory is None:
memory = [None]*len(self.layers)
# Apply all the transformers
for i, layer in enumerate(self.layers):
x, m = layer(x, memory[i])
memory[i] = m
# Apply the normalization if needed
if self.norm is not None:
x = self.norm(x)
return x, memory
```
#### File: fast-transformers/fast_transformers/transformers.py
```python
import torch
from torch.nn import Dropout, LayerNorm, Linear, Module, ModuleList
import torch.nn.functional as F
from .masking import FullMask, LengthMask
class TransformerEncoderLayer(Module):
"""Self attention and feed forward network with skip connections.
This transformer encoder layer implements the same encoder layer as
PyTorch but is a bit more open for extension by receiving the attention
implementation as a constructor argument.
Arguments
---------
attention: The attention implementation to use given as a nn.Module
d_model: The input feature dimensionality
n_heads: The number of heads for the multi head attention
d_ff: The dimensionality of the intermediate features after the
attention (default: d_model*4)
dropout: The dropout rate to apply to the intermediate features
(default: 0.1)
activation: {'relu', 'gelu'} Which activation to use for the feed
forward part of the layer (default: relu)
"""
def __init__(self, attention, d_model, n_heads, d_ff=None, dropout=0.1,
activation="relu"):
super(TransformerEncoderLayer, self).__init__()
d_ff = d_ff or 4*d_model
self.attention = attention
self.linear1 = Linear(d_model, d_ff)
self.linear2 = Linear(d_ff, d_model)
self.norm1 = LayerNorm(d_model)
self.norm2 = LayerNorm(d_model)
self.dropout = Dropout(dropout)
self.activation = F.relu if activation == "relu" else F.gelu
def forward(self, x, attn_mask=None, length_mask=None):
"""Apply the transformer encoder to the input x.
Arguments
---------
x: The input features of shape (N, L, E) where N is the batch size,
L is the sequence length (padded) and E is d_model passed in the
constructor.
attn_mask: An implementation of fast_transformers.masking.BaseMask
that encodes where each element of x can attend to.
length_mask: An implementation of
fast_transformers.masking.BaseMask that encodes how
many elements each sequence in the batch consists of.
"""
# Normalize the masks
N = x.shape[0]
L = x.shape[1]
attn_mask = attn_mask or FullMask(L, device=x.device)
length_mask = length_mask or \
LengthMask(x.new_full((N,), L, dtype=torch.int64))
# Run self attention and add it to the input
x = x + self.dropout(self.attention(
x, x, x,
attn_mask=attn_mask,
query_lengths=length_mask,
key_lengths=length_mask
))
# Run the fully connected part of the layer
y = x = self.norm1(x)
y = self.dropout(self.activation(self.linear1(y)))
y = self.dropout(self.linear2(y))
return self.norm2(x+y)
class TransformerEncoder(Module):
"""TransformerEncoder is few more than a sequence of transformer encoder
layers.
It contains an optional final normalization layer as well as the ability to
create the masks once and save some computation.
Arguments
---------
layers: list, TransformerEncoderLayer instances or instances that
implement the same interface.
norm_layer: A normalization layer to be applied to the final output
(default: None which means no normalization)
"""
def __init__(self, layers, norm_layer=None):
super(TransformerEncoder, self).__init__()
self.layers = ModuleList(layers)
self.norm = norm_layer
def forward(self, x, attn_mask=None, length_mask=None):
"""Apply all transformer encoder layers to the input x.
Arguments
---------
x: The input features of shape (N, L, E) where N is the batch size,
L is the sequence length (padded) and E is d_model passed in the
constructor of each transformer encoder layer.
attn_mask: An implementation of fast_transformers.masking.BaseMask
that encodes where each element of x can attend to.
length_mask: An implementation of
fast_transformers.masking.BaseMask that encodes how
many elements each sequence in the batch consists of.
"""
# Normalize the masks
N = x.shape[0]
L = x.shape[1]
attn_mask = attn_mask or FullMask(L, device=x.device)
length_mask = length_mask or \
LengthMask(x.new_full((N,), L, dtype=torch.int64))
# Apply all the transformers
for layer in self.layers:
x = layer(x, attn_mask=attn_mask, length_mask=length_mask)
# Apply the normalization if needed
if self.norm is not None:
x = self.norm(x)
return x
``` |
{
"source": "jmerck888/esp8266_deauther",
"score": 2
} |
#### File: utils/web_converter/webConverter.py
```python
import os
import gzip
import argparse
import binascii
from pathlib import Path, PurePath
try:
from css_html_js_minify.minify import process_single_html_file, process_single_js_file, process_single_css_file
except ModuleNotFoundError:
print("\n[!] Requirements are not satisfied. Please install the 'anglerfish' package by running 'sudo python3 -m pip install anglerfish'.\n")
exit()
parser = argparse.ArgumentParser(usage="webConverter.py --repopath [path-to-repo]")
parser.add_argument("--repopath", type=str,
help='Path to the repo, if not set make sure to run the script from [repo]/utils/web_converter_python/ directory')
print("\nwebConverter for the deauther2.0 by @xdavidhu\n")
args = parser.parse_args()
if args.repopath != None:
parent = args.repopath
print("[+] Using manual path '" + args.repopath + "'\n")
else:
p = Path.cwd()
parent = p.parent.parent
license_file_path = str(os.path.join(str(parent), "LICENSE"))
q = PurePath('esp8266_deauther')
arduino_file_path = str(os.path.join(str(parent / q), "webfiles.h"))
datadir = parent / q
q = PurePath('web_interface')
dir = parent / q
q = PurePath('data')
datadir = datadir / q
if not os.path.exists(str(datadir)):
os.mkdir(str(datadir))
q = PurePath('web')
compressed = datadir / q
if not os.path.exists(str(compressed)):
os.mkdir(str(compressed))
html_files = []
css_files = []
js_files = []
lang_files = []
progmem_definitions = ""
copy_files_function = ""
webserver_events = ""
load_lang = ""
filelist = Path(dir).glob('**/*')
for x in filelist:
if x.is_file():
if x.parts[-2] == "compressed" or x.parts[-3] == "compressed":
continue
if x.suffix == ".html":
html_files.append(x)
elif x.suffix == ".css":
css_files.append(x)
elif x.suffix == ".js":
js_files.append(x)
elif x.suffix == ".lang":
lang_files.append(x)
for file in html_files:
base_file = os.path.basename(str(file))
original_file = str(file)
new_file = str(os.path.join(str(compressed), str(base_file)))
print("[+] Minifying " + base_file + "...")
process_single_html_file(original_file, output_path=new_file)
print("[+] Compressing " + base_file + "...")
f_in = open(new_file, encoding='UTF-8')
content = f_in.read()
f_in.close()
os.remove(new_file)
with gzip.GzipFile(new_file + ".gz", mode='w') as fo:
fo.write(content.encode("UTF-8"))
f_in = open(new_file + ".gz", 'rb')
content = f_in.read()
f_in.close()
array_name = base_file.replace(".", "")
hex_formatted_content = ""
hex_content = binascii.hexlify(content)
hex_content = hex_content.decode("UTF-8")
hex_content = [hex_content[i:i+2] for i in range(0, len(hex_content), 2)]
for char in hex_content:
hex_formatted_content += "0x" + char + ", "
hex_formatted_content = hex_formatted_content[:-2]
progmem_definitions += "const char " + array_name + "[] PROGMEM = {" + hex_formatted_content + "};\n"
copy_files_function += ' if(!SPIFFS.exists(String(F("/web/' + base_file + '.gz"))) || force) progmemToSpiffs(' + array_name + ', sizeof(' + array_name + '), String(F("/web/' + base_file + '.gz")));\n'
webserver_events += 'server.on(String(F("/' + base_file + '")).c_str(), HTTP_GET, [](){\n sendProgmem(' + array_name + ', sizeof(' + array_name + '), W_HTML);\n});\n'
for file in css_files:
base_file = os.path.basename(str(file))
original_file = str(file)
new_file = str(os.path.join(str(compressed), str(base_file)))
print("[+] Minifying " + base_file + "...")
process_single_css_file(original_file, output_path=new_file)
print("[+] Compressing " + base_file + "...")
f_in = open(new_file, encoding='UTF-8')
content = f_in.read()
f_in.close()
os.remove(new_file)
with gzip.GzipFile(new_file + ".gz", mode='w') as fo:
fo.write(content.encode("UTF-8"))
f_in = open(new_file + ".gz", 'rb')
content = f_in.read()
f_in.close()
array_name = base_file.replace(".", "")
hex_formatted_content = ""
hex_content = binascii.hexlify(content)
hex_content = hex_content.decode("UTF-8")
hex_content = [hex_content[i:i+2] for i in range(0, len(hex_content), 2)]
for char in hex_content:
hex_formatted_content += "0x" + char + ", "
hex_formatted_content = hex_formatted_content[:-2]
progmem_definitions += "const char " + array_name + "[] PROGMEM = {" + hex_formatted_content + "};\n"
copy_files_function += ' if(!SPIFFS.exists(String(F("/web/' + base_file + '.gz"))) || force) progmemToSpiffs(' + array_name + ', sizeof(' + array_name + '), String(F("/web/' + base_file + '.gz")));\n'
webserver_events += 'server.on(String(F("/' + base_file + '")).c_str(), HTTP_GET, [](){\n sendProgmem(' + array_name + ', sizeof(' + array_name + '), W_CSS);\n});\n'
for file in js_files:
q = PurePath('js')
compressed_js = compressed / q
if not os.path.exists(str(compressed_js)):
os.mkdir(str(compressed_js))
base_file = os.path.basename(str(file))
original_file = str(file)
new_file = str(os.path.join(str(compressed_js), str(base_file)))
#print("[+] Minifying " + base_file + "...")
#process_single_js_file(original_file, output_path=new_file)
print("[+] Compressing " + base_file + "...")
f_in = open(original_file, encoding='UTF-8')
content = f_in.read()
f_in.close()
#os.remove(new_file)
with gzip.GzipFile(new_file + ".gz", mode='w') as fo:
fo.write(content.encode("UTF-8"))
f_in = open(new_file + ".gz", 'rb')
content = f_in.read()
f_in.close()
array_name = base_file.replace(".", "")
hex_formatted_content = ""
hex_content = binascii.hexlify(content)
hex_content = hex_content.decode("UTF-8")
hex_content = [hex_content[i:i+2] for i in range(0, len(hex_content), 2)]
for char in hex_content:
hex_formatted_content += "0x" + char + ", "
hex_formatted_content = hex_formatted_content[:-2]
progmem_definitions += "const char " + array_name + "[] PROGMEM = {" + hex_formatted_content + "};\n"
copy_files_function += ' if(!SPIFFS.exists(String(F("/web/js/' + base_file + '.gz"))) || force) progmemToSpiffs(' + array_name + ', sizeof(' + array_name + '), String(F("/web/js/' + base_file + '.gz")));\n'
webserver_events += 'server.on(String(F("/js/' + base_file + '")).c_str(), HTTP_GET, [](){\n sendProgmem(' + array_name + ', sizeof(' + array_name + '), W_JS);\n});\n'
for file in lang_files:
q = PurePath('lang')
compressed_lang = compressed / q
if not os.path.exists(str(compressed_lang)):
os.mkdir(str(compressed_lang))
base_file = os.path.basename(str(file))
original_file = str(file)
new_file = str(os.path.join(str(compressed_lang), str(base_file)))
print("[+] Compressing " + base_file + "...")
f_in = open(original_file, encoding='UTF-8')
content = f_in.read()
f_in.close()
with gzip.GzipFile(new_file + ".gz", mode='w') as fo:
fo.write(content.encode("UTF-8"))
f_in = open(new_file + ".gz", 'rb')
content = f_in.read()
f_in.close()
array_name = base_file.replace(".", "")
lang_name = base_file.replace(".lang", "")
hex_formatted_content = ""
hex_content = binascii.hexlify(content)
hex_content = hex_content.decode("UTF-8")
hex_content = [hex_content[i:i+2] for i in range(0, len(hex_content), 2)]
for char in hex_content:
hex_formatted_content += "0x" + char + ", "
hex_formatted_content = hex_formatted_content[:-2]
progmem_definitions += "const char " + array_name + "[] PROGMEM = {" + hex_formatted_content + "};\n"
copy_files_function += ' if(!SPIFFS.exists(String(F("/web/lang/' + base_file + '.gz"))) || force) progmemToSpiffs(' + array_name + ', sizeof(' + array_name + '), String(F("/web/lang/' + base_file + '.gz")));\n'
webserver_events += 'server.on(String(F("/lang/' + base_file + '")).c_str(), HTTP_GET, [](){\n sendProgmem(' + array_name + ', sizeof(' + array_name + '), W_JSON);\n});\n'
if(len(load_lang) > 0):
load_lang += ' else if(settings.getLang() == String(F("'+lang_name+'"))) sendProgmem(' + array_name + ', sizeof(' + array_name + '), W_JSON);\n'
else:
load_lang += ' if(settings.getLang() == String(F("'+lang_name+'"))) sendProgmem(' + array_name + ', sizeof(' + array_name + '), W_JSON);\n'
base_file = os.path.basename(license_file_path)
new_file = str(os.path.join(str(compressed), str("LICENSE")))
print("[+] Compressing " + base_file + "...")
f_in = open(license_file_path, encoding='UTF-8')
content = f_in.read()
f_in.close()
with gzip.GzipFile(new_file + ".gz", mode='w') as fo:
fo.write(content.encode("UTF-8"))
f_in = open(new_file + ".gz", 'rb')
content = f_in.read()
f_in.close()
array_name = base_file.replace(".", "")
hex_formatted_content = ""
hex_content = binascii.hexlify(content)
hex_content = hex_content.decode("UTF-8")
hex_content = [hex_content[i:i+2] for i in range(0, len(hex_content), 2)]
for char in hex_content:
hex_formatted_content += "0x" + char + ", "
hex_formatted_content = hex_formatted_content[:-2]
progmem_definitions += "const char " + array_name + "[] PROGMEM = {" + hex_formatted_content + "};\n"
copy_files_function += ' if(!SPIFFS.exists(String(F("/web/' + base_file + '.gz"))) || force) progmemToSpiffs(' + array_name + ', sizeof(' + array_name + '), String(F("/web/' + base_file + '.gz")));\n'
print("[+] Saving everything into webfiles.h...")
f = open(arduino_file_path, 'w')
f.write("#ifndef webfiles_h\n")
f.write("#define webfiles_h\n")
f.write("\n")
f.write("// comment that out if you want to save program memory and know how to upload the web files to the SPIFFS manually\n")
f.write("#define USE_PROGMEM_WEB_FILES \n")
f.write("\n")
f.write("#ifdef USE_PROGMEM_WEB_FILES\n")
f.write(progmem_definitions)
f.write("#endif\n")
f.write("\n")
f.write("void copyWebFiles(bool force){\n")
f.write("#ifdef USE_PROGMEM_WEB_FILES\n")
f.write("if(settings.getWebSettings().use_spiffs){\n")
f.write(copy_files_function)
f.write("}\n")
f.write("#endif\n")
f.write("}\n")
f.write("\n")
f.write("#endif")
f.close()
print("\n[+] Done, happy uploading :)")
print("Here are the updated functions for wifi.h, in case you added or removed files:")
print();
print('if(!settings.getWebSpiffs()){')
print(' server.on(String(SLASH).c_str(), HTTP_GET, [](){')
print(' sendProgmem(indexhtml, sizeof(indexhtml), W_HTML);')
print('});')
print(webserver_events)
print('}')
print("server.on(str(W_DEFAULT_LANG).c_str(), HTTP_GET, [](){")
print(" if(!settings.getWebSpiffs()){")
print(load_lang)
print(' else handleFileRead(String(F("/web/lang/"))+settings.getLang()+String(F(".lang")));')
print(' } else {')
print(' handleFileRead(String(F("/web/lang/"))+settings.getLang()+String(F(".lang")));')
print(' }')
print("});");
``` |
{
"source": "jmercouris/galapagos_volume",
"score": 3
} |
#### File: jmercouris/galapagos_volume/volume.py
```python
import os
import subprocess # Python execute sub process, in this case to send osascript commands
import urwid # Replacement for curses, used in the Terminal GUI
import argparse # Easily parse command line arguments
description_string = """Galapagos Volume is a wrapper utility for setting the volume in OSX.
It works by calling applescript commands in a subprocess to set the volume. There are a couple
of ways to use Galapagos Volume, the first is by just running the program without any parameters
this will produce a GUI that allows you to increase or decrease the volume for input and output.
The second is with the flags -d -device, and -v -volume. If you specify a "-d output" or a "-d input"
the system will return the volume for that device. For example, "volume -d output" will return the output volume.
If you specify a device and a volume, the system will SET the volume for that device. Therefore a sample
command to set the output would be "volume -d output -v 85", this would set the output volume to 85."""
# Represents an OSX audio device, e.g. input, output
class AudioDevice:
"""
A class responsible for representing & manipulating OSX audio devices
"""
def __init__(self, name, set_volume_command, get_volume_command):
self.name = name
self.set_volume_command = set_volume_command
self.get_volume_command = get_volume_command
self.volume = self.get_volume()
def set_volume(self, volume):
# Constrain Volume to Valid Range
if (volume > 100):
volume = 100
if (volume < 0):
volume = 0
# Copy Command arguments list into local version for modification
local_command = self.set_volume_command[:]
local_command[2] = self.set_volume_command[2].format(volume)
process = subprocess.Popen(local_command, stdout=subprocess.PIPE)
out, err = process.communicate()
self.volume = volume
def get_volume(self):
process = subprocess.Popen(
self.get_volume_command, stdout=subprocess.PIPE)
out, err = process.communicate()
# Set Local volume to reflect system reported volume
self.volume = int(out)
return self.volume
# Represents the Data in the program
class VolumeModel:
"""
Populating the default OSX AudioDevices
"""
def __init__(self):
# List of audio devices
audio_devices = self.audio_devices = []
# AudioDevice Output
get_volume_command = ['osascript', '-e',
'output volume of (get volume settings)']
set_volume_command = ['osascript', '-e', 'set volume output volume {}']
device = AudioDevice("Output", set_volume_command, get_volume_command)
audio_devices.append(device)
# AudioDevice Input
get_volume_command = ['osascript', '-e',
'input volume of (get volume settings)']
set_volume_command = ['osascript', '-e', 'set volume input volume {}']
device = AudioDevice("Input", set_volume_command, get_volume_command)
audio_devices.append(device)
def get_audio_devices(self):
return self.audio_devices
# Class VolumeView, handles drawing and input
class VolumeView(urwid.WidgetWrap):
"""
A class responsible for providing the application's interface and
volume display.
"""
# Colors used for rendering
palette = [
('bg background', 'white', 'white'),
('bg 1', 'black', 'black', 'standout'),
('bg 1 smooth', 'dark blue', 'black'),
('bg 2', 'black', 'dark gray', 'standout'),
('bg 2 smooth', 'dark gray', 'black'),
('button normal', 'black', 'white', 'light gray'),
('button select', 'white', 'black'),
]
# Initialization
def __init__(self, controller):
self.controller = controller
self.audio_devices = self.controller.get_audio_devices()
urwid.WidgetWrap.__init__(self, self.main_window())
# Bar Graph Configuration
def bar_graph(self, smooth=False):
satt = None
if smooth:
satt = {(1, 0): 'bg 1 smooth', (2, 0): 'bg 2 smooth'}
w = urwid.BarGraph(['bg background', 'bg 1', 'bg 2'], satt=satt)
return w
def button(self, t, fn):
w = urwid.Button(t, fn)
w = urwid.AttrWrap(w, 'button normal', 'button select')
return w
# Exit Program
def exit_program(self, w):
raise urwid.ExitMainLoop()
# Change Volume
def delta_output_up(self, w):
device = self.audio_devices[0]
device.set_volume(device.get_volume() + 5)
self.update_graph()
def delta_output_down(self, w):
device = self.audio_devices[0]
device.set_volume(device.get_volume() - 5)
self.update_graph()
def delta_input_up(self, w):
device = self.audio_devices[1]
device.set_volume(device.get_volume() + 5)
self.update_graph()
def delta_input_down(self, w):
device = self.audio_devices[1]
device.set_volume(device.get_volume() - 5)
self.update_graph()
# Update Graph View
def update_graph(self, force_update=True):
l = []
for index, device in enumerate(self.audio_devices):
volume = device.volume
# toggle between two bar types
if index & 1:
l.append([0, volume])
else:
l.append([volume, 0])
self.graph.set_data(l, 100)
return True
# Controls on the right hand side
def graph_controls(self):
l = []
l.append(urwid.Text("Device Select", align="left"))
l.append(urwid.Divider())
l.append(self.button("{} +".format('Output'), self.delta_output_up))
l.append(self.button("{} -".format('Output'), self.delta_output_down))
l.append(urwid.Divider())
l.append(self.button("{} +".format('Input'), self.delta_input_up))
l.append(self.button("{} -".format('Input'), self.delta_input_down))
l.append(urwid.Divider())
l.append(self.button("Quit", self.exit_program))
w = urwid.ListBox(urwid.SimpleListWalker(l))
return w
# Configuration of the Main Window, combines controls and bar display
def main_window(self):
self.graph = self.bar_graph()
self.graph_wrap = urwid.WidgetWrap(self.graph)
vline = urwid.AttrWrap(urwid.SolidFill(u'\u2502'), 'line')
c = self.graph_controls()
w = urwid.Columns([('weight', 1, self.graph_wrap),
('fixed', 1, vline), c],
dividechars=1, focus_column=2)
w = urwid.Padding(w, ('fixed left', 1), ('fixed right', 1))
return w
# Class VolumeController, serves as a view Controller
class VolumeController:
"""
A class responsible for setting up the model and view and running
the application.
"""
def __init__(self):
self.model = VolumeModel()
self.view = VolumeView(self)
self.view.update_graph(True)
def get_audio_devices(self):
return self.model.get_audio_devices()
def main(self):
self.loop = urwid.MainLoop(self.view, self.view.palette)
self.loop.run()
# Initialize the VolumeController
def main():
# Command line Argument Parameters
parser = argparse.ArgumentParser(description=description_string)
parser.add_argument('-d', '--device', type=str,
help='Break Interval (minutes)', required=False)
parser.add_argument('-v', '--volume', type=int,
help='Volume Level', required=False)
args = parser.parse_args()
# User specified a device, but no volume setting, they want info
if args.device is not None and args.volume is None:
device = args.device
if "output" == device:
print VolumeModel().get_audio_devices()[0].get_volume()
if "input" == device:
print VolumeModel().get_audio_devices()[1].get_volume()
# Volume specified without a device specified, assume they want to change
# output
if args.volume is not None and args.device is None:
volume = args.volume
device = VolumeModel().get_audio_devices()[0]
device.set_volume(volume)
print device.get_volume()
# User specified volume and device
if args.volume is not None and args.device is not None:
device = args.device
volume = args.volume
# Assign device to correct device object
if "output" == device:
device = VolumeModel().get_audio_devices()[0]
if "input" == device:
device = VolumeModel().get_audio_devices()[1]
device.set_volume(volume)
print device.get_volume()
# Execute GUI if no command line arguments passed
if args.device is None and args.volume is None:
VolumeController().main()
# If Called from the command line
if __name__ == "__main__":
main()
##########################################################################
# Shell Commands to Change the Volume (This program executes/wraps these commands)
##########################################################################
# Src: https://coderwall.com/p/22p0ja/set-get-osx-volume-mute-from-the-command-line
# Get volume
# # Echos a number from 0 to 100
# osascript -e 'output volume of (get volume settings)'
# Set volume
# # Where 50 is a number from 0 to 100
# osascript -e 'set volume output volume 50'
# Get mute state
# # Echos a string of 'true' or 'false'
# osascript -e 'output muted of (get volume settings)'
# Set mute state
# # Where 'true' can be 'true' or 'false'
# osascript -e 'set volume output muted true'
``` |
{
"source": "jmercouris/IPSRC",
"score": 2
} |
#### File: jmercouris/IPSRC/platform_constants.py
```python
import platform_disk
import platform_git
import platform_mqtt
def get_broadcast_function(platform_string):
if(platform_string == "disk"):
return platform_disk.broadcast_data
if(platform_string == "git"):
return platform_git.broadcast_data
if(platform_string == "mqtt"):
return platform_mqtt.broadcast_data
def get_read_function(platform_string):
if(platform_string == "disk"):
return platform_disk.read_data
if(platform_string == "git"):
return platform_git.read_data
if(platform_string == "mqtt"):
return platform_mqtt.read_data
``` |
{
"source": "jmerizia/alignment-research-dataset",
"score": 3
} |
#### File: tools/ebooks/gdrive_ebooks.py
```python
import os , gdown , pypandoc , re
from .utils import slugify
class GDrive:
"""
Pull .epubs from a Google Drive and convert them to .txt
"""
def __init__(self , gdrive_adress):
self.name = 'gdrive-epubs'
self.gdrive_adress = gdrive_adress
self.local_path = 'data/ebooks/'
self.local_out = self.local_path + 'books_text/'
os.makedirs(self.local_path) if not os.path.exists(self.local_path) else ''
os.makedirs(self.local_out) if not os.path.exists(self.local_out) else ''
self.AIS_scrape_local = os.listdir(self.local_out)
self.weblink_pattern = r"(?i)\b((?:https?://|www\d{0,3}[.]|[a-z0-9.\-]+[.][a-z]{2,4}/)(?:[^\s()<>]+|\(([^\s()<>]+|(\([^\s()<>]+\)))*\))+(?:\(([^\s()<>]+|(\([^\s()<>]+\)))*\)|[^\s`!()\[\]{};:'\".,<>?«»“”‘’]))"
if os.path.exists(os.getcwd()+'/pandoc/pandoc'):
os.environ.setdefault('PYPANDOC_PANDOC', os.getcwd()+'/pandoc/pandoc')
def pull_drom_gdrive(self):
gdown.download_folder(url=self.gdrive_adress, output=self.local_out, quiet=False)
self.AIS_scrape_local = os.listdir(self.local_out)
def convert_to_txt(self):
for fName in self.AIS_scrape_local:
newName = slugify(fName[:20])
if 'epub' in fName and not os.path.exists(self.local_out + newName):
os.rename( self.local_out + fName , self.local_out + 'tmp.epub')
# convert to plain text
output = pypandoc.convert_file(self.local_out + 'tmp.epub', 'plain',
outputfile=self.local_out + 'tmp.txt')
# remove linebreaks in middle of sentence
os.system("awk ' /^$/ { print; } /./ { printf(\"%s \", $0); } ' " + self.local_out + "tmp.txt > " + self.local_out + newName + '.txt')
os.system('rm ' + self.local_out + "tmp.txt")
os.system('rm ' + self.local_out + "tmp.epub")
self.AIS_scrape_local = os.listdir(self.local_out)
def clean_txt(self , min_length=10):
# remove short lines and replace links
for fName in self.AIS_scrape_local:
if 'txt' in fName:
os.rename(self.local_out + fName , self.local_out + 'tmp.txt')
with open(self.local_out + 'tmp.txt') as f, open(self.local_out + fName,'w') as f2:
for x in f:
stripped_x = re.sub(r'http\S+' , 'ʬ' , x);
if len(stripped_x) >= min_length:
f2.write(stripped_x)
os.system('rm ' + self.local_out + "tmp.txt")
def fetch(self):
self.pull_drom_gdrive()
self.convert_to_txt()
self.clean_txt()
``` |
{
"source": "jmerizia/parallel-pytorch",
"score": 3
} |
#### File: parallel-pytorch/examples/train_minGPT.py
```python
import torch
import fire
import logging
from parallel_pytorch.data import aggregate_gradients, scatter_batch
from parallel_pytorch.models.minGPT import configure_optimizers, criterion, make_pipelined_GPT
from parallel_pytorch.topology import Topology
from parallel_pytorch.utils import set_seed, abort_on_exception
logger = logging.getLogger(__name__)
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.DEBUG,
)
@abort_on_exception
def main(
dp=1,
pp=1,
mp=1,
batch_size=8,
vocab_size=17,
block_size=4,
n_layer=2,
n_head=4,
n_embd=4,
embd_pdrop=0.1,
attn_pdrop=0.1,
resid_pdrop=0.1,
learning_rate=0.1,
weight_decay=0.1,
betas=(0.9, 0.95),
init_checkpoint_directory=None,
out_checkpoint_directory=None,
seed=42,
):
""" Train a simple minGPT model with 3D parallelism. """
# We first have to create a "topology" which is a slim class which holds information
# about the overall shape of the network.
topo = Topology(dp=dp, pp=pp, mp=mp)
# The total number of workers that participate is dp * pp * mp.
# Thus, there may be a few workers that do not participate.
if not topo.active:
logger.info("I, worker %d, am not active", topo.data_comm.Get_rank())
return
# We set the seed in torch/numpy/random to the current rank to ensure that weight initialization
# happens differently on all ranks.
set_seed(seed * topo.data_comm.Get_rank())
# Here, we load in our pipelined minGPT. The one caveat to be aware of is that
# this is not a torch.nn.Module, but rather a "Pipeline" class.
# It still has forward/backward functions, so we can use it *almost* normally.
pipeline = make_pipelined_GPT(
topo=topo,
block_size=block_size,
vocab_size=vocab_size,
n_embd=n_embd,
n_layer=n_layer,
n_head=n_head,
embd_pdrop=embd_pdrop,
attn_pdrop=attn_pdrop,
resid_pdrop=resid_pdrop,
)
if init_checkpoint_directory is not None:
# Due to the size of checkpoints of large models, dealing with one large file is unrealistic.
# Instead, we collapse along the model-parallel dimension, and then save one file
# per block of the neural network.
# That allows us to save checkpoints efficiently and agnostically to the parallelization topology.
pipeline.load_checkpoint(init_checkpoint_directory)
# Generate some fake data. Technically we only need it on the first and last stages of the pipeline,
# but text data isn't so expensive to load in on all ranks.
data = [
(
torch.randint(0, vocab_size, [batch_size, block_size], dtype=torch.long),
torch.randint(0, vocab_size, [batch_size, block_size], dtype=torch.long),
) for _ in range(10)
]
# This function also doesn't really change from the original implementation.
optimizer = configure_optimizers(
pipeline=pipeline,
learning_rate=learning_rate,
weight_decay=weight_decay,
betas=betas,
)
running_loss = 0
for it, (x, y) in enumerate(data):
# First we want to scatter the batch across all of the data parallel copies.
x, y = scatter_batch(topo=topo, inputs=x, labels=y)
optimizer.zero_grad()
# As usual, forward the model and compute loss.
# Under the hood, this is passing our input through the entire pipeline.
logits = pipeline(x)
loss = criterion(topo, logits, y)
# Now we do the backwards pass. As mentioned before, since the pipeline is not technically a module,
# when we do backward on the loss, that populates logits.grad normally,
# but it doesn't actually propagate the loss down the rest of the pipeline for us.
# This means we must call `backward()` manually.
loss.backward()
pipeline.backward(logits.grad)
# Now, for each of our parameters, PyTorch has populated a `.grad` member on each of our parameters.
# Since we are doing data parallelism, we must aggregate these gradients now.
aggregate_gradients(topo=topo, model=pipeline.stage)
# Fortunately, PyTorch aggregates the gradients for us if we call `forward()` multiple times
# (which we do in the pipeline with "micro batches").
# Thus, we can just step the optimizer as we normally do.
optimizer.step()
# This step deserves some explanation. Since we are pipelining the input, we can only use logits/loss
# if we're at the last stage of the pipeline.
# Additionally, since there might be multiple model-parallel processes, we must make sure
# to print on just the root one.
# Lastly, since there are multiple data parallel copies, we want to only print on the first one.
if topo.is_last_pipeline_stage() and topo.is_root_model_rank() and topo.get_data_parallel_idx() == 0:
running_loss += loss.item()
logger.info(f'batch {it} loss: {running_loss:.3f}')
running_loss = 0.0
if out_checkpoint_directory is not None:
# Lastly, we'll save a checkpoint of our pipeline. As mentioned before, checkpoints for pipelines
# are saved as multilple files, one per layer in the pipeline.
pipeline.save_checkpoint(out_checkpoint_directory)
if __name__ == '__main__':
fire.Fire(main)
```
#### File: parallel-pytorch/parallel_pytorch/topology.py
```python
from typing import Literal
from mpi4py import MPI
from parallel_pytorch.utils import compute_devices_per_node
class Topology(object):
"""
A tiny class that stores all the MPI communicators and rank relationships.
For example, consider if dp = 2, pp = 2, and dp = 4,
then these would be the ranks of the communicators:
world rank 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
data_comm rank 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
pipeline_comm rank 0 1 2 3 4 5 6 7 0 1 2 3 4 5 6 7
model_comm rank 0 1 2 3 0 1 2 3 0 1 2 3 0 1 2 3
per_stage_dp_comm rank 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1
GPUs ( cuda:0 ) ( cuda:1 ) ( cuda:2 ) ( cuda:3 )
"""
def __init__(
self,
*,
dp: int,
pp: int,
mp: int,
device: Literal['cpu', 'cuda'] = 'cpu',
):
assert dp > 0, \
f"dp must be greater than 0, but got {dp}"
assert pp > 0, \
f"pp must be greater than 0, but got {pp}"
assert mp > 0, \
f"mp must be greater than 0, but got {mp}"
self.dp = dp
self.pp = pp
self.mp = mp
self.device = device
if device == 'cuda':
assert self.mp == compute_devices_per_node(), \
"Topology.mp must be equal to the number of devices on each node, or else deadlocks can occur."
world = MPI.COMM_WORLD
size = self.dp * self.pp * self.mp
assert world.Get_size() >= size, \
"Topology.dp * Topology.pp * Topology.mp must be less than or equal to the number of processes."
self.active = world.Get_rank() < size
self.data_comm = world.Split(color=0 if self.active else 1, key=world.Get_rank())
data_rank = self.data_comm.Get_rank()
self.pipeline_comm = self.data_comm.Split(color=data_rank // (self.pp * self.mp), key=data_rank)
pipeline_rank = self.pipeline_comm.Get_rank()
self.model_comm = self.pipeline_comm.Split(color=pipeline_rank // self.mp, key=pipeline_rank)
self.per_stage_dp_comm = self.data_comm.Split(color=data_rank % (self.pp * self.mp), key=data_rank)
################
# MODEL #
################
def is_root_model_rank(self):
return self.model_comm.Get_rank() == 0
################
# DATA #
################
def get_num_data_parallel_copies(self):
return self.data_comm.Get_size() // self.pipeline_comm.Get_size()
def get_data_parallel_idx(self):
return self.per_stage_dp_comm.Get_rank()
################
# PIPELINE #
################
def get_pipeline_stage_idx(self):
return self.pipeline_comm.Get_rank() // self.mp
def get_num_pipeline_stages(self):
return self.pipeline_comm.Get_size() // self.model_comm.Get_size()
def get_pipeline_rank_of_next_stage(self):
assert self.get_pipeline_stage_idx() + 1 < self.get_num_pipeline_stages()
return self.pipeline_comm.Get_rank() + self.model_comm.Get_size()
def get_pipeline_rank_of_prev_stage(self):
assert 0 < self.get_pipeline_stage_idx()
return self.pipeline_comm.Get_rank() - self.model_comm.Get_size()
def get_pipeline_rank_of_last_stage(self):
return self.pipeline_comm.Get_size() - 1
def get_pipeline_rank_of_first_stage(self):
return 0
def is_first_pipeline_stage(self):
return self.get_pipeline_stage_idx() == 0
def is_last_pipeline_stage(self):
return self.get_pipeline_stage_idx() == self.get_num_pipeline_stages() - 1
```
#### File: parallel-pytorch/tests/test_merge.py
```python
import torch
import numpy as np
from mpi4py import MPI
from parallel_pytorch.ops import tensor_merge
from parallel_pytorch.utils import abort_on_exception
@abort_on_exception
def test_1():
worker_shape = [2, 2]
world = MPI.COMM_WORLD
num_workers = np.array(worker_shape).prod()
comm = MPI.COMM_WORLD.Split(color=0 if world.Get_rank() < num_workers else 1, key=world.Get_rank())
if world.Get_rank() < num_workers:
if comm.Get_rank() == 0:
x = torch.tensor([[0, 1], [4, 5]])
elif comm.Get_rank() == 1:
x = torch.tensor([[2, 3], [6, 7]])
elif comm.Get_rank() == 2:
x = torch.tensor([[8, 9], [12, 13]])
elif comm.Get_rank() == 3:
x = torch.tensor([[10, 11], [14, 15]])
x = tensor_merge(x, comm=comm, worker_shape=worker_shape)
if comm.Get_rank() == 0:
e = torch.tensor([
[0, 1, 2, 3],
[4, 5, 6, 7],
[8, 9, 10, 11],
[12, 13, 14, 15],
])
assert torch.allclose(x, e), f'{x} != {e}'
@abort_on_exception
def test_2():
x_shape = [2, 2]
worker_shape = [1, 1]
world = MPI.COMM_WORLD
num_workers = np.array(worker_shape).prod()
comm = MPI.COMM_WORLD.Split(color=0 if world.Get_rank() < num_workers else 1, key=world.Get_rank())
if world.Get_rank() < num_workers:
volume = np.array(x_shape).prod()
x = torch.arange(volume).view(x_shape)
x = tensor_merge(x, comm=comm, worker_shape=worker_shape)
e = torch.tensor([[0, 1], [2, 3]])
assert torch.allclose(x, e), f'{x} != {e}'
def run_all():
test_1()
test_2()
if __name__ == '__main__':
run_all()
``` |
{
"source": "jmerizia/plainapi",
"score": 3
} |
#### File: plainapi/backend/generate.py
```python
import sqlite3
from typing import List, Tuple, Optional, Any
from parse_sql import parse_query, parse_schema, type_hint2schema_type
import os
import subprocess
import pprint
from dotenv import load_dotenv
import openai # type: ignore
from uuid import uuid4
import sqlite3
import random
from models import Endpoint
load_dotenv()
openai.api_key = os.getenv('OPENAI_API_KEY')
DB_NAME = 'generated/demo.sqlite3'
CACHE_DIR = 'gpt_cache'
def cached_gpt3(prompt: str, stop: str = '\n', use_cache: bool = True) -> str:
if not os.path.exists(CACHE_DIR):
os.mkdir(CACHE_DIR)
if use_cache:
# Check the cache
separator = '\n===========\n'
cache_files = [os.path.join(CACHE_DIR, p) for p in os.listdir(CACHE_DIR)]
for fn in cache_files:
with open(fn, 'r') as f:
query, result = f.read().split(separator)
if query == prompt:
return result
response = openai.Completion.create(
engine="davinci",
prompt=prompt,
max_tokens=64,
temperature=0,
stop=stop,
)
result = response['choices'][0]['text']
print('cache miss, using GPT3')
# Add to the cache
cache_file = os.path.join(CACHE_DIR, str(uuid4()))
with open(cache_file, 'w') as f:
f.write(prompt + separator + result)
return result
def get_db_schema_text(db_name: str) -> str:
"""
Get the schema of an SQL query.
"""
return str(subprocess.check_output(['sqlite3', db_name, '.schema']), 'utf-8')
def english2sql(english_query: str, schema_text: Optional[str] = None, use_cache: bool = True) -> str:
"""
Convert a query written in english to an SQL query.
"""
# generate an English sentence describing the schema
if schema_text:
tables = parse_schema(schema_text)
created_tables = [t for t in tables if 'sqlite_sequence' not in t['table_name']]
if len(created_tables) == 0:
raise ValueError('Error: there are no tables')
db_spec = 'The database contains '
for table_idx, table in enumerate(created_tables):
table_name = table['table_name']
db_spec += 'a "' + table_name + '" table with the fields '
for field_idx, field in enumerate(table['fields']):
name = field['name']
type = field['type']
if name == 'id':
continue
db_spec += name + ' (' + type_hint2schema_type(type) + ')';
if field_idx < len(table['fields'])-1:
db_spec += ', '
db_spec += '; '
# always add this example table for GPT3
db_spec += 'and an "apples" table with the fields name (VARCHAR), weight (INTEGER), is_green (BOOLEAN).'
prompt = (
f'Turn the following English sentences into valid SQLite statements. {db_spec}\n'
f'\n'
f'English: get all of the apples\n'
f'SQL: SELECT * FROM apples;\n'
f'\n'
f'English: get an apple\'s name by its id\n'
f'SQL: SELECT name FROM apples WHERE id = ?;\n'
f'\n'
f'English: create a new apple that is not green\n'
f'SQL: INSERT INTO apples (name, weight, is_green) VALUES (?, ?, false);\n'
f'\n'
f'English: {english_query.strip()}\n'
f'SQL:'
)
else:
prompt = (
f'Turn the following English sentences into valid SQLite statements.\n'
f'\n'
f'English: {english_query.strip()}\n'
f'SQL:'
)
response = cached_gpt3(prompt, use_cache=use_cache).strip()
return response
def english2summary_name(english_query: str, use_cache: bool = True) -> str:
"""
Convert a query written in English to a short snake_case name
"""
prompt = '''Q: Show me all of the users
Q: Show me all the users ordered by age ascending
A: get all users ordered by age ascending
Q: Who is the oldest user?
A: get oldest user
Q: What are the names of the 10 youngest users?
A: get ten youngest users
Q: Who is the oldest person?
A: get oldest user
Q: get all the users that are located in the United Kingdom
A: get users from united kingdom
Q: get users of ages between 30 and 40?
A: get users ages between 30 and 40
Q: How many users are there?
A: get number users
Q: Where does user Betty live?
A: get betty location
Q: What is Jennifer's age?
A: get jennifer age
Q: the average age
A: get average user age
Q: the age of the oldest user
A: get age of oldest user
Q: the name of the oldest person
A: get name of oldest user
Q: the top 10 oldest users
A: get ten oldest users
Q: how many admin users are there?
A: get number admin users
Q: retrieve the email of the youngest admin
A: get email of youngest admin user
Q: get a user by their email
A: get user by email
Q: get all users' emails that live at a certain location
A: get users by location
Q: '''
prompt += english_query.strip()
prompt += '\nA:'
def valid_char(c: str) -> bool:
return c.isalnum() or c == ' ' or c == '_'
result = cached_gpt3(prompt, use_cache=use_cache)
result = ''.join(c for c in result if valid_char(c))
while len(result) > 0 and result[0].isnumeric():
result = result[1:]
if len(result) == 0:
result = 'untitled'
return '_'.join(result.strip().lower().split(' '))
def generate_endpoint(endpoint: Endpoint, schema_text: str, use_cache=True) -> str:
"""
Generate a FastAPI endpoint from an SQL query.
short_name: a name for the query in snake_case
sql_query: an SQL query
"""
sql_query = english2sql(endpoint.value, schema_text, use_cache=use_cache)
func_name = english2summary_name(endpoint.value)
inputs, outputs = parse_query(sql_query, schema_text)
template = '''
<<<RESPONSE_CLASS>>>
@app.<<<METHOD>>>("<<<URL>>>", response_model=<<<RESPONSE_MODEL>>>)
async def <<<FUNC_NAME>>>(<<<PARAMS>>>) -> <<<RESPONSE_MODEL>>>:
\'\'\'
<<<ENGLISH_QUERY>>>
\'\'\'
cur = con.cursor()
cur.execute('<<<SQL_QUERY>>>', <<<BINDINGS>>>)
res: List[Any] = []
output_names: List[str] = <<<OUTPUT_NAME_LIST>>>
for row in cur.fetchall():
row_dict = dict()
for k, v in zip(output_names, row):
row_dict[k] = v
res.append(row_dict)
con.commit()
<<<RETURN_STATEMENT>>>
'''
params = ', '.join(f'{c["name"]}: {c["type"]}' for c in inputs)
if len(inputs) > 0:
bindings = '(' + ', '.join(c["name"] for c in inputs) + ',)'
else:
bindings = ''
output_name_list = '[' + ', '.join([f'\'{c["name"]}\'' for c in outputs]) + ']'
if len(outputs) > 0:
response_model = f'List[OutputType_{func_name}]'
return_statement = 'return res'
response_class = \
f'class OutputType_{func_name}(BaseModel):\n' + \
' ' + '\n '.join(f'{c["name"]}: {c["type"]}' for c in outputs)
else:
response_model = 'None'
return_statement = 'return None'
response_class = ''
template = template.replace('<<<FUNC_NAME>>>', func_name)
template = template.replace('<<<URL>>>', endpoint.url)
template = template.replace('<<<METHOD>>>', endpoint.method.lower())
template = template.replace('<<<SQL_QUERY>>>', sql_query)
template = template.replace('<<<PARAMS>>>', params)
template = template.replace('<<<ENGLISH_QUERY>>>', endpoint.value)
template = template.replace('<<<BINDINGS>>>', bindings)
template = template.replace('<<<OUTPUT_NAME_LIST>>>', output_name_list)
template = template.replace('<<<RESPONSE_MODEL>>>', response_model)
template = template.replace('<<<RETURN_STATEMENT>>>', return_statement)
template = template.replace('<<<RESPONSE_CLASS>>>', response_class)
return template
def generate_app(title: str, endpoints: List[Endpoint], use_cache: bool = True):
code = '''
from typing import List, Union, Literal, Optional, Dict, Any
from fastapi import FastAPI, Path, HTTPException, Depends, status
from fastapi.middleware.cors import CORSMiddleware
from pydantic import BaseModel
import os
import sqlite3
app = FastAPI(
title="<<<TITLE>>>",
description="An API generated from English sentences",
version="0.1.0",
docs_url="/docs"
)
app.add_middleware(
CORSMiddleware,
allow_origins=['http://localhost:3000'],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
con = sqlite3.connect('<<<DB_NAME>>>')
'''
schema_text = get_db_schema_text(DB_NAME)
code = code.replace('<<<TITLE>>>', title)
code = code.replace('<<<DB_NAME>>>', DB_NAME)
for endpoint in endpoints:
endpoint_code = generate_endpoint(endpoint, schema_text)
code += endpoint_code
return code
def read_plain_txt(input_fn: str) -> Tuple[List[str], List[str]]:
"""
Parses the input_fn and returns a tuple of two lists of strings,
holding the migrations and queries respectively.
Formally,
(
[
<english str>,
...
],
[
<english str>,
...
]
)
"""
with open(input_fn, 'r') as f:
migrations = []
queries = []
mode = 'none'
for line in f:
stripped = line.strip()
if len(stripped) == 0:
continue
if stripped.lower() == '== migrations':
if mode != 'none':
raise ValueError(f'Invalid {input_fn}: The migrations section should appear first.')
mode = 'migrations'
elif stripped.lower() == '== queries':
if mode != 'migrations':
raise ValueError(f'Invalid {input_fn}: The queries section should appear after the migrations section.')
mode = 'queries'
elif stripped[0] == '#':
pass
else:
if mode == 'migrations':
migrations.append(stripped)
elif mode == 'queries':
queries.append(stripped)
else:
pass
return migrations, queries
def run_necessary_migrations(sql_migrations: List[str], english_migrations: List[str]):
"""
Given a list of all SQL migrations (can be any valid SQLite statements),
this function either determines that the given migrations are invalid
because old ones have been modified, or applies the new migrations.
Note: The given list should contain all migrations, not just new ones.
This function will check the English version, not the sql version.
"""
con = sqlite3.connect(DB_NAME)
cur = con.cursor()
cur.execute('''
SELECT name FROM sqlite_master WHERE type='table' AND name = '__plainapi_migrations';
''')
rows = cur.fetchall()
existing_migrations: List[Any] = []
if len(rows) == 0:
# create the table
cur.execute('''
CREATE TABLE __plainapi_migrations (
id INTEGER PRIMARY KEY AUTOINCREMENT,
sql_query VARCHAR(500) NOT NULL,
english_query VARCHAR(500) NOT NULL
);
''')
else:
cur.execute('''
SELECT sql_query, english_query FROM __plainapi_migrations ORDER BY id ASC;
''')
for sql_query, english_query in cur.fetchall():
existing_migrations.append({'sql': sql_query, 'english': english_query})
# ensure the existing migrations are correct
for a, b in zip(existing_migrations, english_migrations):
if a['english'] != b:
raise ValueError(f'Invalid previously applied migration (it has been changed):\n "{a["english"]}" -> "{b}"')
if len(sql_migrations) != len(english_migrations):
raise ValueError('Internal: There are more SQL migrations than original English migrations')
if len(existing_migrations) < len(sql_migrations):
print('Running migrations...')
for idx, (sql, english) in enumerate(zip(sql_migrations, english_migrations)):
if idx < len(existing_migrations):
pass
else:
print(f' ...{english}')
cur.execute(sql)
cur.execute('''
INSERT INTO __plainapi_migrations (sql_query, english_query) VALUES (?, ?);
''', (sql, english,))
print('All up to date.')
else:
print('No migrations to run.')
con.commit()
# def generate_app_old(input_fn: str = 'plain.txt', output_fn: str = 'api.py', use_cache: bool = True):
# """
# Given a file of English sentences, output a file containing a FastAPI web server.
# """
# english_migrations, english_queries = read_plain_txt(input_fn)
# print('Migrations:')
# for english_migration in english_migrations:
# print(f' -> {english_migration}')
# print('Queries:')
# for english_query in english_queries:
# print(f' -> {english_query}')
# sql_migrations = [english2sql(m, use_cache=use_cache) for m in english_migrations]
# run_necessary_migrations(sql_migrations, english_migrations)
# code = generate_app_from_english_queries('My API', english_queries)
# with open(output_fn, 'w') as f:
# f.write(code)
# command_to_run = 'uvicorn ' + output_fn.split('.')[0] + ':app'
# print(f'Successfully created API! Try running `{command_to_run}`')
# if __name__ == '__main__':
# with open('migrations.txt', 'r') as f:
# english_migrations = [m.strip() for m in f.read().split('\n\n')]
# sql_migrations = [english2sql(m) for m in english_migrations]
# run_necessary_migrations(sql_migrations, english_migrations)
```
#### File: backend/scripts/gen_queries.py
```python
import sys
import fire # type: ignore
from parse_sql import parse_query, parse_schema
from generate import get_db_schema_text
def generate_queries(queries_fn: str = './queries.sql', output_fn: str = './queries.py'):
schema_text = get_db_schema_text('./db/plainapi.sqlite3')
queries = []
with open(queries_fn, 'r') as f:
for line in f:
query = ''
for i in range(len(line)):
if i < len(line) - 1 and line[i:i+2] == '--':
break
query += line[i]
query = query.strip()
if len(query) > 0:
queries.append(query)
for query in queries:
if not query.startswith('DELETE') and not query.startswith('UPDATE'):
print(parse_query(query, schema_text=schema_text))
if __name__ == '__main__':
fire.Fire(generate_queries)
```
#### File: plainapi/old_stuff/parse.py
```python
from enum import Enum
def perror(msg):
print("Error:", msg)
quit()
def tokenize_sql(text):
l = 0
r = 0
special_tokens = [
'<>', '<=', '>=', '=', '<', '>', '!=',
'(', ')', ';', '+', '-', '*', '/', '\'',
'.',
]
while r < len(text):
# skip whitespace
while r < len(text) and text[r].isspace():
r += 1
# EOF
if r >= len(text):
pass
# word token
elif text[r].isalpha():
l = r
r += 1
while r < len(text) and text[r].isalnum():
r += 1
yield text[l:r]
# number token
elif text[r].isnumeric() and text[r] != '0':
l = r
r += 1
while r < len(text) and text[r].isnumeric():
r += 1
yield text[l:r]
# special token
elif any(text[r:].startswith(tok) for tok in special_tokens):
l = r
for tok in special_tokens:
if text[r:].startswith(tok):
r = l + len(tok)
yield text[l:r]
break
else:
perror("Invalid token at TODO")
class NodeKind(Enum):
Select = 'Select'
SelectExpression = 'SelectExpression'
Token = 'Token'
TableExpression = 'TableExpression'
Name = 'Name'
Expression = 'Expression'
AndCondition = 'AndCondition'
Condition = 'Condition'
Boolean = 'Boolean'
Int = 'Int'
Decimal = 'Decimal'
Number = 'Number'
Numeric = 'Numeric'
Value = 'Value'
Term = 'Term'
Factor = 'Factor'
Summand = 'Summand'
Operand = 'Operand'
Long = 'Long'
ColumnRef = 'ColumnRef'
Compare = 'Compare'
class Node:
def __init__(self, name, kind, children=None):
self.name = name
self.kind = kind
if children is None:
self.children = []
else:
self.children = children
def consume_token(tokens, idx):
if idx == len(tokens):
return None, idx
else:
return tokens[idx], idx + 1
def parse_quoted_name(tokens, idx):
print('TODO implement quoted name')
return None, idx
def parse_name(tokens, idx):
child, idx = parse_quoted_name(tokens, idx)
if child:
return child
else:
tok, idx = consume_token(tokens, idx)
if not tok:
perror("Expected token")
elif not (tok[0].isalpha() or tok[0] == '_'):
idx -= 1
return None, idx
else:
for c in tok[1:]:
if not (c.isalnum() or c == '_'):
idx -= 1
return None, idx
else:
pass
node = Node(name=tok, kind=NodeKind.Name)
return node, idx
def parse_null(tokens, idx):
tok, idx = consume_token(tokens, idx)
if not tok:
return None, idx
elif tok.upper() == 'NULL':
node = Node(name=tok, kind=NodeKind.Null)
return node, idx
else:
idx -= 1
return None, idx
def parse_boolean(tokens, idx):
tok, idx = consume_token(tokens, idx)
if not tok:
return None, idx
elif tok.upper() in ['TRUE', 'FALSE']:
node = Node(name=tok, kind=NodeKind.Boolean)
return node, idx
else:
idx -= 1
return None, idx
def parse_number(tokens, idx):
tok, idx = consume_token(tokens, idx)
if not tok:
return None, idx
elif tok.isnumeric():
node = Node(name=tok, kind=NodeKind.Number)
return node, idx
else:
idx -= 1
return None, idx
def parse_decimal(tokens, idx):
tok, idx = consume_token(tokens, idx)
node = Node(name='', kind=NodeKind.Decimal)
if tok == '-':
child = Node(name=tok, kind=NodeKind.Token)
node.children.append(child)
multiplier = -1
else:
idx -= 1
multiplier = 1
child, idx = parse_number(tokens, idx)
if child:
node.children.append(child)
tok, idx = consume_token(tokens, idx)
if not tok:
pass
elif tok == '.':
child = Node(name=tok, kind=NodeKind.Token)
node.children.append(child)
child, idx = parse_number(tokens, idx)
if child:
node.children.append(child)
else:
perror("Expected number following . in decimal")
else:
idx -= 1
return node, idx
else:
return None, idx
def parse_long(tokens, idx):
tok, idx = consume_token(tokens, idx)
node = Node(name='', kind=NodeKind.Long)
if tok == '-':
child = Node(name=tok, kind=NodeKind.Token)
node.children.append(child)
multiplier = -1
else:
idx -= 1
multiplier = 1
child, idx = parse_number(tokens, idx)
if child and -9223372036854775808 <= multiplier * int(child.name) <= 9223372036854775807:
node.children.append(child)
return node, idx
else:
return None, idx
def parse_int(tokens, idx):
tok, idx = consume_token(tokens, idx)
node = Node(name='', kind=NodeKind.Int)
if tok == '-':
child = Node(name=tok, kind=NodeKind.Token)
node.children.append(child)
multiplier = -1
else:
idx -= 1
multiplier = 1
child, idx = parse_number(tokens, idx)
if child and -2147483648 <= multiplier * int(child.name) <= 2147483647:
node.children.append(child)
return node, idx
else:
return None, idx
def parse_numeric(tokens, idx):
child, idx = parse_int(tokens, idx)
if child:
node = Node(name='', kind=NodeKind.Numeric)
node.children.append(child)
return node, idx
else:
child, idx = parse_long(tokens, idx)
if child:
node = Node(name='', kind=NodeKind.Numeric)
node.children.append(child)
return node, idx
else:
child, idx = parse_decimal(tokens, idx)
if child:
node = Node(name='', kind=NodeKind.Numeric)
node.children.append(child)
return node, idx
else:
return None, idx
def parse_string(tokens, idx):
tok, idx = consume_token(tokens, idx)
if tok[0] == '\'' and tok[-1] == '\'':
node = Node(name=tok, kind=NodeKind.String)
return node, idx
else:
idx -= 1
return None, idx
def parse_value(tokens, idx):
child, idx = parse_string(tokens, idx)
node = Node(name='', kind=NodeKind.Value)
if child:
node.children.append(child)
return node, idx
else:
child, idx = parse_numeric(tokens, idx)
if child:
node.children.append(child)
return node, idx
else:
child, idx = parse_boolean(tokens, idx)
if child:
node.children.append(child)
return node, idx
else:
child, idx = parse_null(tokens, idx)
if child:
node.children.append(child)
return node, idx
else:
return None, idx
return None, idx
def parse_column_ref(tokens, idx):
child, idx = parse_name(tokens, idx)
node = Node(name='', kind=NodeKind.ColumnRef)
if child:
# TODO
print('TODO: family name')
node.children.append(child)
return node, idx
else:
return None, idx
def parse_term(tokens, idx):
child, idx = parse_value(tokens, idx)
node = Node(name='', kind=NodeKind.Term)
if child:
node.children.append(child)
return node, idx
else:
print('TODO term: there are a lot of other cases that have not been implemented')
child, idx = parse_column_ref(tokens, idx)
if child:
node.children.append(child)
return node, idx
else:
print('TODO term: there are a lot of other cases that have not been implemented')
return None, idx
def parse_factor(tokens, idx):
child, idx = parse_term(tokens, idx)
if child:
node = Node(name='', kind=NodeKind.Factor)
node.children.append(child)
while True:
tok, idx = consume_token(tokens, idx)
if not tok:
return node, idx
elif tok in ['*', '/']:
child = Node(name=tok, kind=NodeKind.Token)
node.children.append(child)
child, idx = parse_term(tokens, idx)
if child:
node.children.append(child)
else:
perror('Expected factor after + or - token')
else:
idx -= 1
return node, idx
else:
return None, idx
def parse_summand(tokens, idx):
child, idx = parse_factor(tokens, idx)
if child:
node = Node(name='', kind=NodeKind.Summand)
node.children.append(child)
while True:
tok, idx = consume_token(tokens, idx)
if not tok:
return node, idx
elif tok in ['+', '-']:
child = Node(name=tok, kind=NodeKind.Token)
node.children.append(child)
child, idx = parse_factor(tokens, idx)
if child:
node.children.append(child)
else:
perror('Expected factor after + or - token')
else:
idx -= 1
return node, idx
else:
return None, idx
def parse_operand(tokens, idx):
child, idx = parse_summand(tokens, idx)
if child:
node = Node(name='', kind=NodeKind.Operand)
node.children.append(child)
while True:
tok, idx = consume_token(tokens, idx)
if not tok:
return node, idx
elif tok == '||':
child = Node(name=tok, kind=NodeKind.Token)
node.children.append(child)
child, idx = parse_summand(tokens, idx)
if child:
node.children.append(child)
else:
perror('Expected summand after ||')
else:
idx -= 1
return node, idx
else:
return None, idx
def parse_compare(tokens, idx):
compare_tokens = ['<>', '<=', '>=', '=', '<', '>', '!=']
tok, idx = consume_token(tokens, idx)
if tok in compare_tokens:
node = Node(name=tok, kind=NodeKind.Compare)
return node, idx
else:
idx -= 1
return None, idx
def parse_condition(tokens, idx):
child, idx = parse_operand(tokens, idx)
if child:
node = Node(name='', kind=NodeKind.Condition)
node.children.append(child)
child, idx = parse_compare(tokens, idx)
if child:
node.children.append(child)
child, idx = parse_operand(tokens, idx)
if child:
node.children.append(child)
return node, idx
else:
perror('Expected operand after compare')
else:
tok, idx = consume_token(tokens, idx)
if not tok:
print('TODO!') # TODO
elif tok.upper() == 'IN':
print('TODO!') # TODO
elif tok.upper() == 'LIKE':
print('TODO!') # TODO
elif tok.upper() == 'BETWEEN':
print('TODO!') # TODO
elif tok.upper() == 'IS':
print('TODO!') # TODO
elif tok.upper() == 'NOT':
print('TODO!') # TODO
else:
perror('Expected one of IN, LIKE, BETWEEN, IS, or NOT after operand')
return node, idx
else:
tok, idx = consume_token(tokens, idx)
if not tok:
return None, idx
elif tok.upper() == 'NOT':
child = Node(name='NOT', kind=NodeKind.Token)
node.children.append(child)
child, idx = parse_expression(tokens, idx)
if child:
node.children.append(child)
return node, idx
else:
perror("Expected expression after NOT")
elif tok == '(':
child, idx = parse_expression(tokens, idx)
if child:
node.children.append(child)
tok, idx = consume_token(tokens, idx)
if tok == ')':
child = Node(name=tok, kind=NodeKind.Token)
node.children.append(child)
return node, idx
else:
perror("Expected closing paren after expression")
else:
perror('Expected expression after \'(\'.')
else:
idx -= 1
return None, idx
def parse_and_condition(tokens, idx):
child, idx = parse_condition(tokens, idx)
if not child:
return None, idx
else:
node = Node(name='', kind=NodeKind.AndCondition)
node.children.append(child)
while True:
tok, idx = consume_token(tokens, idx)
if not tok:
return node, idx
elif tok.upper() == 'AND':
child, idx = parse_condition(tokens, idx)
if child:
node.children.append(child)
else:
perror("Expected condition")
else:
return node, idx
def parse_expression(tokens, idx):
child, idx = parse_and_condition(tokens, idx)
if child:
node = Node(name='', kind=NodeKind.Expression)
node.children.append(child)
while True:
tok, idx = consume_token(tokens, idx)
if not tok:
return node, idx
elif tok.upper() == 'OR':
child, idx = parse_and_condition(tokens, idx)
if child:
node.children.append(child)
else:
perror("Expected and_condition")
else:
idx -= 1
return node, idx
else:
return None, idx
def parse_table_expression(tokens, idx):
node = Node(name='', kind=NodeKind.TableExpression)
child, idx = parse_name(tokens, idx)
print('TODO: there are some more cases to check here')
# TODO
if child:
node.children.append(child)
return node, idx
else:
return None, idx
def parse_alias(tokens, idx):
def parse_select_expression(tokens, idx):
tok, idx = consume_token(tokens, idx)
node = Node(name='', kind=NodeKind.SelectExpression)
if not tok:
perror("Expected token")
elif tok == '*':
child = Node(name='*', kind=NodeKind.Token)
node.children.append(child)
elif tok == '(':
perror('Column families are not yet supported')
else:
idx -= 1
child, idx = parse_term(tokens, idx)
if child:
tok, idx = consume_token(tokens, idx)
if not tok:
return node, idx
elif tok.upper() == 'AS':
child = Node(name=tok, kind=NodeKind.Token)
node.children.append(child)
else:
pass
else:
return None, idx
def parse_select(tokens, idx):
tok = tokens[idx]
idx += 1
if tok.upper() != 'SELECT':
idx -= 1
return None, idx
node = Node(name='SELECT', kind=NodeKind.Select)
print('TODO parse hint') # TODO
tok, idx = consume_token(tokens, idx)
if not tok:
perror("Expected token")
if tok.upper() == 'DISTINCT':
child = Node(name='DISTINCT', kind=NodeKind.Token)
node.children.append(child)
elif tok.upper() == 'ALL':
child = Node(name='ALL', kind=NodeKind.Token)
node.children.append(child)
else:
idx -= 1
tok, idx = consume_token(tokens, idx)
if not tok:
perror("Expected token")
child, idx = parse_select_expression(tokens, idx)
if child:
node.children.append(child)
else:
perror('Expected select expression')
# TODO: potential repeats
if tok.upper() == 'FROM':
child = Node(name='FROM', kind=NodeKind.Token)
node.children.append(child)
child, idx = parse_table_expression(tokens, idx)
if child:
node.children.append(child)
else:
perror("Expected table expression")
else:
perror("Expected FROM token")
# TODO: optional column def
tok, idx = consume_token(tokens, idx)
if not tok:
pass
else:
if tok.upper() == 'WHERE':
child = Node(name='token', kind=NodeKind.Token)
child, idx = parse_expression(tokens, idx)
if child:
node.children.append(child)
else:
perror("Expected expression following WHERE token")
else:
idx -= 1
return node, idx
def print_tree(node, depth=0):
if depth > 10:
return
print('| '*depth, end='')
print(f'"{node.name}", {node.kind}')
for child in node.children:
print_tree(child, depth+1)
```
#### File: plainapi/plainapi/generate_python.py
```python
from typing import TypedDict
import os
from dotenv import load_dotenv
import openai # type: ignore
from plainapi.parse_endpoint import Endpoint
from plainapi.parse_application import Application
from plainapi.parse_code import AssignmentStatement, CodeBlock, ExceptionStatement, IfStatement, OutputStatement, PythonStatement, SQLStatement
load_dotenv('./.env')
OPENAI_API_KEY = os.getenv('OPENAI_API_KEY')
if OPENAI_API_KEY is None:
raise ValueError('Expected OPENAI_API_KEY environment variable to be set or in `.env` file.')
openai.api_key = OPENAI_API_KEY
def url2endpoint_function_name(url: str) -> str:
clean = ''.join(c if c.isalnum() else ' ' for c in url)
parts = [s.strip() for s in clean.split(' ') if s.strip() != '']
return 'endpoint_' + '_'.join(parts)
def string2variable_name(s: str) -> str:
clean = ''.join(c if c.isalnum() else ' ' for c in s)
parts = [s.strip() for s in clean.split(' ') if s.strip() != '']
return '_'.join(parts)
def escape_quotes(s: str) -> str:
o = ''
for c in s:
if c == '\'':
o += '\\\''
elif c == '\"':
o += '\\\"'
else:
o += c
return o
def generate_output_statement(block: OutputStatement, indent=0):
tab = ' ' * indent
value = block['value']
return tab + 'return ' + value + '\n'
def generate_sql_statement(block: SQLStatement):
sql = block['sql']
return f'execute({sql})'
def generate_exception_statement(block: ExceptionStatement, indent=0):
tab = ' ' * indent
code = block['code'] or 400
message = block['message'] or 'An error has occurred.'
return f'{tab}raise HTTPException(status_code={code}, detail={message})\n'
def generate_python_statement(block: PythonStatement):
return block['code']
def generate_assigment_statement(block: AssignmentStatement, indent=0):
tab = ' ' * indent
variable_name = string2variable_name(block['name'])
value = block['value']
if value['type'] == 'python':
value_string = generate_python_statement(value)
else:
value_string = generate_sql_statement(value)
return f'{tab}{variable_name} = {value_string}\n'
def generate_if_statement(block: IfStatement, indent=0):
tab = ' ' * indent
condition = block['condition']['code']
comment = block['condition']['original']
text = tab + 'if ' + condition + ': # ' + comment.strip() + '\n'
text += generate_code_block(block['case_true'], indent=indent + 4)
if block['case_false'] is not None:
text += tab + 'else:\n'
text += generate_code_block(block['case_false'], indent=indent + 4)
return text
def generate_code_block(block: CodeBlock, indent=0) -> str:
tab = ' ' * indent
text = ''
for line in block:
if line['type'] == 'if':
text += generate_if_statement(line, indent)
elif line['type'] == 'function_call':
text += tab + 'function_call\n'
elif line['type'] == 'exception':
text += generate_exception_statement(line, indent)
elif line['type'] == 'assignment':
text += generate_assigment_statement(line, indent)
elif line['type'] == 'python':
text += tab + '<python>\n'
elif line['type'] == 'sql':
text += generate_sql_statement(line) + '\n'
elif line['type'] == 'output':
text += generate_output_statement(line, indent)
return text
def generate_endpoint(endpoint: Endpoint, schema_text: str, use_cache=True) -> str:
"""
Generate a FastAPI endpoint from an SQL query.
short_name: a name for the query in snake_case
sql_query: an SQL query
"""
func_name = url2endpoint_function_name(endpoint['header']['url'])
url = endpoint['header']['url'].lower()
method = endpoint['header']['method'].lower()
params = ', '.join(i['name'] + ': ' + (i['type'] if i['name'] != 'current_user' else 'User = Depends(get_current_user)') for i in endpoint['requirements']['inputs'])
implementation = generate_code_block(endpoint['implementation'], indent=4)
code = \
f'''@app.{method}("{url}", response_model=None)
async def {func_name}({params}) -> None:
{implementation}'''
return code
def generate_app(application: Application, schema_text: str, db_name: str, host: str = 'localhost', port: int = 3000):
title = application['title']
code_for_endpoints = '\n\n'.join(generate_endpoint(endpoint, schema_text) for endpoint in application['endpoints'])
code = \
f'''from typing import List, Union, Literal, Optional, Dict, Any
from fastapi import FastAPI, Path, HTTPException, Depends, status
from fastapi.middleware.cors import CORSMiddleware
from pydantic import BaseModel
from fastapi.security import OAuth2PasswordBearer, OAuth2PasswordRequestForm
from jose import jwt, JWTError
import os
import sqlite3
ALGORITHM = "HS256"
SECRET_KEY = "TODO"
app = FastAPI(
title="{title}",
description="An API generated from English sentences",
version="0.1.0",
docs_url="/docs"
)
app.add_middleware(
CORSMiddleware,
allow_origins=['http://{host}:{port}'],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
oauth2_scheme = OAuth2PasswordBearer(tokenUrl="/users/login")
class User(BaseModel):
id: int
email: str
is_admin: bool
def sql(query: str, params: Any):
# TODO
return None
async def get_current_user(token: str = Depends(oauth2_scheme)) -> User:
credentials_exception = HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Could not validate credentials",
headers={{"WWW-Authenticate": "Bearer"}},
)
try:
payload = jwt.decode(token, SECRET_KEY, algorithms=[ALGORITHM])
except JWTError:
raise credentials_exception
email: Optional[str] = payload.get("sub")
if email is None:
raise credentials_exception
user = sql("select * from users where email = ?", [email])
if user is None:
raise credentials_exception
return user
con = sqlite3.connect('{db_name}')
{code_for_endpoints}'''
return code
```
#### File: plainapi/plainapi/plainapi.py
```python
from typing import Optional
import os
from dotenv import load_dotenv
import configparser
import argparse
from plainapi.utils import get_db_schema_text
from plainapi.generate_python import generate_app
from plainapi.parse_application import parse_application
def main():
parser = argparse.ArgumentParser(description='Generate web APIs with plain English.')
parser.add_argument('command', choices=['init', 'gen', 'start', 'restart'], help='Base command')
settings_filename = 'plain.ini'
args = parser.parse_args()
if args.command == 'gen':
if not os.path.exists(settings_filename):
raise ValueError(f'Could not find settings file: {settings_filename}')
config = configparser.ConfigParser()
config.read(settings_filename)
def read_setting(name: str) -> Optional[str]:
settings = config['default']
if name in settings:
return settings[name]
return None
endpoints_filename = read_setting('endpoints_filename') or 'endpoints.plain'
migrations_filename = read_setting('migrations_filename') or 'migrations.plain'
functions_filename = read_setting('functions_filename') or 'functions.plain'
target_filename = read_setting('target_filename') or 'app.py'
db_name = read_setting('db_name') or 'my-app.sqlite3'
host = read_setting('host') or 'localhost'
port = read_setting('port') or '3000'
port = int(port)
if not os.path.exists(endpoints_filename):
raise ValueError(f'Could not find endpoints file: {endpoints_filename}')
if not os.path.exists(migrations_filename):
raise ValueError(f'Could not find migrations file: {migrations_filename}')
if not os.path.exists(functions_filename):
raise ValueError(f'Could not find functions file: {functions_filename}')
with open(endpoints_filename, 'r') as f:
endpoints_code = f.read()
with open(migrations_filename, 'r') as f:
migrations_code = f.read()
with open(functions_filename, 'r') as f:
functions_code = f.read()
schema_text = get_db_schema_text(db_name)
application = parse_application(endpoints_code=endpoints_code,
functions_code=functions_code,
schema_text=schema_text)
code = generate_app(application=application,
schema_text=schema_text,
db_name=db_name,
host=host,
port=port)
with open(target_filename, 'w') as f:
f.write(code)
else:
raise ValueError(f'Command \'{args.command}\' is not implemented yet!')
if __name__ == '__main__':
main()
``` |
{
"source": "jmerizia/sqlgood",
"score": 3
} |
#### File: sqlgood/tests/test_postgresql.py
```python
import unittest
import psycopg2
from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT
import os
from sql2py.generate import generate
dbname_entry = 'chad'
dbname_test = 'test'
user = 'test'
password = '<PASSWORD>'
def test_code_equal(expected: str, actual: str):
a = expected
b = actual
lines_a, lines_b = a.split('\n'), b.split('\n')
for line_idx, (line_a, line_b) in enumerate(zip(lines_a, lines_b)):
for col_idx, (c_a, c_b) in enumerate(zip(line_a, line_b)):
if c_a != c_b:
raise ValueError(
f'Code does not match (line {line_idx}, col {col_idx}):\n' +
f' {line_b}\n' +
f' ' + (f' ' * col_idx) + '^\n'
)
if len(line_a) > len(line_b):
l = len(line_b)
raise ValueError(
f'Unexpected end of line (line {line_idx}, col {l}):\n' +
f' {line_b}\n' +
f' ' + (f' ' * l) + '^\n'
)
if len(line_a) < len(line_b):
l = len(line_a)
raise ValueError(
f'Expected end of line (line {line_idx}, col {l}):\n' +
f' {line_b}\n' +
f' ' + (f' ' * l) + '^\n'
)
if len(lines_a) > len(lines_b):
raise ValueError(f'Unexpected end of file\n')
if len(lines_a) < len(lines_b):
raise ValueError(f'Expected end of file\n')
class TestPostgresql(unittest.TestCase):
def setUp(self):
# create test db
self.con = psycopg2.connect(f'dbname={dbname_entry} user={user} password={password}')
self.con.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
self.cur = self.con.cursor()
self.cur.execute('create database test;')
self.con.close()
# create tables in test db
self.con = psycopg2.connect(f'dbname={dbname_test} user={user} password={password}')
self.con.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
self.cur = self.con.cursor()
self.cur.execute('''
CREATE TABLE users (
id INTEGER PRIMARY KEY,
email TEXT,
nickname TEXT,
age INTEGER,
is_admin BOOLEAN
);
''')
def tearDown(self):
# close the test db
self.con.close()
# delete the text db from the entry db
self.con = psycopg2.connect(f'dbname={dbname_entry} user={user} password={password}')
self.con.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
self.cur = self.con.cursor()
self.cur.execute('drop database test;')
self.con.close()
def test_generate_code(self):
with open('tests/queries.sql', 'r') as f:
queries_sql = f.read()
with open('tests/queries.py', 'r') as f:
queries_py = f.read()
generated_py = generate(queries_sql, dbname=dbname_test, user=user, password=password)
test_code_equal(queries_py, generated_py)
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "jmerkin/rnaseq",
"score": 3
} |
#### File: rnaseq/scripts/exon_intron_ratio_processing.py
```python
import pandas as pd
import sys
def single_gene(gene):
if gene.intron.all():
return None
gene = gene.groupby('intron').sum()
retval = pd.Series([gene.loc[False, 'TPM'], gene.loc[False, 'TPM'] / (1e-3 + gene.loc[True, 'TPM'])], index=['total', 'ratio'] )
return retval
infile = sys.argv[1]
df = pd.read_csv(infile, sep='\t', header=0, index_col=0)
df['gene'] = df.index.str.split(':').str.get(0)
df['intron'] = df.index.str.endswith('intron')
results = df.groupby('gene').apply(single_gene)
results = results.sort_values('total')
ratios = pd.np.log2(results.iloc[results.shape[0] - 1100:results.shape[0] - 100].ratio)
mean = 2 ** ratios.mean()
median = 2 ** ratios.median()
open(infile.replace('quant.sf', 'mean_intron_ratio'), 'w').write('%.2f' % mean)
open(infile.replace('quant.sf', 'median_intron_ratio'), 'w').write('%.2f' % median)
#import IPython ; IPython.embed()
``` |
{
"source": "jmerkow/segmentation_models.pytorch",
"score": 3
} |
#### File: segmentation_models/common/blocks.py
```python
import torch.nn as nn
class Conv2dReLU(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, padding=0,
stride=1, use_batchnorm=True,
activation='relu',
negative_slope=1e-2,
**batchnorm_params):
if activation == 'relu':
self._act_fn = lambda: nn.ReLU(inplace=True)
elif activation == 'leaky':
self._act_fn = lambda: nn.LeakyReLU(negative_slope=negative_slope, inplace=True)
super().__init__()
layers = [
nn.Conv2d(in_channels, out_channels, kernel_size,
stride=stride, padding=padding, bias=not (use_batchnorm)),
self._act_fn(),
]
if use_batchnorm:
layers.insert(1, nn.BatchNorm2d(out_channels, **batchnorm_params))
self.block = nn.Sequential(*layers)
def forward(self, x):
return self.block(x)
```
#### File: segmentation_models/decoders/__init__.py
```python
from .deeplab import DeepLabDecoder
from .fpn import FPNDecoder
from .linknet import LinknetDecoder
from .pspnet import PSPDecoder
from .unet import UnetDecoder
from .unetpp import UNetPPDecoder
def get_decoder_cls(decoder_name):
map = {
'FPN': FPNDecoder,
'UNET': UnetDecoder,
'LINK': LinknetDecoder,
'PSP': PSPDecoder,
'DEEPLAB': DeepLabDecoder,
'UNETPP': UNetPPDecoder,
}
return map[decoder_name.upper()]
```
#### File: segmentation_models/encoders/efficientnet.py
```python
from efficientnet_pytorch.model import EfficientNet
from efficientnet_pytorch.utils import get_model_params, relu_fn, url_map
backbone_indices = {
'efficientnet-b0': [0, 2, 4, 10],
'efficientnet-b1': [1, 4, 7, 15],
'efficientnet-b2': [1, 4, 7, 15],
'efficientnet-b3': [1, 4, 7, 17],
'efficientnet-b4': [1, 5, 9, 21],
'efficientnet-b5': [2, 7, 12, 26],
'efficientnet-b6': [2, 8, 14, 30],
'efficientnet-b7': [3, 10, 17, 37]
}
class EfficientNetEncoder(EfficientNet):
def __init__(self, model_name, **override_params):
blocks_args, global_params = get_model_params(model_name, override_params)
self.backbone_indices = backbone_indices[model_name]
super().__init__(blocks_args, global_params)
del self._fc
def forward(self, inputs):
""" Returns output of the final convolution layer """
backbone_indices = getattr(self, 'backbone_indices', None)
if not backbone_indices:
raise ValueError('no backbone indices, something went wrong!')
# Stem
x = relu_fn(self._bn0(self._conv_stem(inputs)))
features = []
# Blocks
for idx, block in enumerate(self._blocks):
drop_connect_rate = self._global_params.drop_connect_rate
if drop_connect_rate:
drop_connect_rate *= float(idx) / len(self._blocks)
x = block(x, drop_connect_rate=drop_connect_rate)
if idx in backbone_indices:
features.insert(0, x)
# Head
x = relu_fn(self._bn1(self._conv_head(x)))
features.insert(0, x)
return features
def load_state_dict(self, state_dict, **kwargs):
state_dict.pop('_fc.bias')
state_dict.pop('_fc.weight')
super().load_state_dict(state_dict, **kwargs)
pretrained_settings = {'efficientnet-b0': {'imagenet': {'url': url_map['efficientnet-b0'],
'input_space': 'RGB',
'input_size': [3, 244, 244],
'input_range': [0, 1],
'mean': [0.485, 0.456, 0.406],
'std': [0.229, 0.224, 0.225],
'num_classes': 1000,
'scale': 0.8975}},
'efficientnet-b1': {'imagenet': {'url': url_map['efficientnet-b1'],
'input_space': 'RGB',
'input_size': [3, 244, 244],
'input_range': [0, 1],
'mean': [0.485, 0.456, 0.406],
'std': [0.229, 0.224, 0.225],
'num_classes': 1000,
'scale': 0.8975}},
'efficientnet-b2': {'imagenet': {'url': url_map['efficientnet-b2'],
'input_space': 'RGB',
'input_size': [3, 244, 244],
'input_range': [0, 1],
'mean': [0.485, 0.456, 0.406],
'std': [0.229, 0.224, 0.225],
'num_classes': 1000,
'scale': 0.8975}},
'efficientnet-b3': {'imagenet': {'url': url_map['efficientnet-b3'],
'input_space': 'RGB',
'input_size': [3, 244, 244],
'input_range': [0, 1],
'mean': [0.485, 0.456, 0.406],
'std': [0.229, 0.224, 0.225],
'num_classes': 1000,
'scale': 0.8975}},
'efficientnet-b4': {'imagenet': {'url': url_map['efficientnet-b4'],
'input_space': 'RGB',
'input_size': [3, 244, 244],
'input_range': [0, 1],
'mean': [0.485, 0.456, 0.406],
'std': [0.229, 0.224, 0.225],
'num_classes': 1000,
'scale': 0.8975}},
'efficientnet-b5': {'imagenet': {'url': url_map['efficientnet-b5'],
'input_space': 'RGB',
'input_size': [3, 244, 244],
'input_range': [0, 1],
'mean': [0.485, 0.456, 0.406],
'std': [0.229, 0.224, 0.225],
'num_classes': 1000,
'scale': 0.8975}},
'efficientnet-b6': {'imagenet': {'url': url_map['efficientnet-b6'],
'input_space': 'RGB',
'input_size': [3, 244, 244],
'input_range': [0, 1],
'mean': [0.485, 0.456, 0.406],
'std': [0.229, 0.224, 0.225],
'num_classes': 1000,
'scale': 0.8975}},
'efficientnet-b7': {'imagenet': {'url': url_map['efficientnet-b7'],
'input_space': 'RGB',
'input_size': [3, 244, 244],
'input_range': [0, 1],
'mean': [0.485, 0.456, 0.406],
'std': [0.229, 0.224, 0.225],
'num_classes': 1000,
'scale': 0.8975}}}
efficientnet_encoders = {'efficientnet-b0': {'encoder': EfficientNetEncoder,
'out_shapes': [1280, 112, 40, 24, 16],
'pretrained_settings': pretrained_settings['efficientnet-b0'],
'params': {'model_name': 'efficientnet-b0'}},
'efficientnet-b1': {'encoder': EfficientNetEncoder,
'out_shapes': [1280, 112, 40, 24, 16],
'pretrained_settings': pretrained_settings['efficientnet-b1'],
'params': {'model_name': 'efficientnet-b1'}},
'efficientnet-b2': {'encoder': EfficientNetEncoder,
'out_shapes': [1408, 120, 48, 24, 16],
'pretrained_settings': pretrained_settings['efficientnet-b2'],
'params': {'model_name': 'efficientnet-b2'}},
'efficientnet-b3': {'encoder': EfficientNetEncoder,
'out_shapes': [1536, 136, 48, 32, 24],
'pretrained_settings': pretrained_settings['efficientnet-b3'],
'params': {'model_name': 'efficientnet-b3'}},
'efficientnet-b4': {'encoder': EfficientNetEncoder,
'out_shapes': [1792, 160, 56, 32, 24],
'pretrained_settings': pretrained_settings['efficientnet-b4'],
'params': {'model_name': 'efficientnet-b4'}},
'efficientnet-b5': {'encoder': EfficientNetEncoder,
'out_shapes': [2048, 176, 64, 40, 24],
'pretrained_settings': pretrained_settings['efficientnet-b5'],
'params': {'model_name': 'efficientnet-b5'}},
'efficientnet-b6': {'encoder': EfficientNetEncoder,
'out_shapes': [2304, 200, 72, 40, 32],
'pretrained_settings': pretrained_settings['efficientnet-b6'],
'params': {'model_name': 'efficientnet-b6'}},
'efficientnet-b7': {'encoder': EfficientNetEncoder,
'out_shapes': [2560, 224, 80, 48, 32],
'pretrained_settings': pretrained_settings['efficientnet-b7'],
'params': {'model_name': 'efficientnet-b7'}}}
``` |
{
"source": "jmerle/battlecode-2022",
"score": 2
} |
#### File: battlecode-2022/scripts/run.py
```python
import re
import signal
import subprocess
import sys
from datetime import datetime
from multiprocessing import Pool, Value
from pathlib import Path
def run_matches(bot1, bot2, maps, timestamp):
result = {
"bot1": bot1,
"bot2": bot2
}
winners_by_map = {}
current_map = None
args = [
str(Path(__file__).parent.parent / "gradlew"),
"run",
f"-PteamA={bot1}",
f"-PteamB={bot2}",
f"-Pmaps={','.join(maps)}",
f"-PreplayPath=replays/run-{timestamp}-%TEAM_A%-vs-%TEAM_B%.bc22"
]
proc = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
lines = []
while True:
line = proc.stdout.readline()
if not line:
break
line = line.decode("utf-8").rstrip()
lines.append(line)
map_match = re.search(r"[^ ]+ vs\. [^ ]+ on ([^ ]+)", line)
if map_match is not None:
current_map = map_match[1]
result_match = re.search(r"([^ ]+) \([AB]\) wins \(round (\d+)\)", line)
if result_match is not None:
global counter
with counter.get_lock():
counter.value += 1
current_match = counter.value
total_matches = len(maps) * 2
prefix = f"[{str(current_match).rjust(len(str(total_matches)))}/{total_matches}]"
winner_color = "red" if result_match[1] == bot1 else "blue"
print(f"{prefix} {result_match[1]} wins in {result_match[2]} rounds as {winner_color} on {current_map}")
winners_by_map[current_map] = result_match[1]
if proc.wait() != 0:
result["type"] = "error"
result["message"] = "\n".join(lines)
return result
result["type"] = "success"
result["winners"] = winners_by_map
return result
def main():
signal.signal(signal.SIGINT, lambda a, b: sys.exit(1))
if len(sys.argv) != 3:
print("Usage: python scripts/run.py <bot 1 name> <bot 2 name>")
sys.exit(1)
bot1 = sys.argv[1]
bot2 = sys.argv[2]
# Based on SERVER_MAPS in https://github.com/battlecode/battlecode22/blob/main/client/visualizer/src/constants.ts
maps = [
# Default
# "maptestsmall",
"eckleburg",
"intersection",
# Sprint 1
"colosseum",
"fortress",
"jellyfish",
"nottestsmall",
"progress",
"rivers",
"sandwich",
"squer",
"uncomfortable",
"underground",
"valley",
# Sprint 2
"chessboard",
"collaboration",
"dodgeball",
"equals",
"highway",
"nyancat",
"panda",
"pillars",
"snowflake",
"spine",
"stronghold",
"tower",
# International qualifier
"charge",
"definitely_not_league",
"fire",
"highway_redux",
"lotus",
"maze",
"olympics",
"one_river",
"planets",
"snowflake_redux",
"treasure",
"walls",
# US qualifier
"chalice",
"cobra",
"deer",
"desert",
"despair",
"flowers",
"island_hopping",
"octopus_game",
"rugged",
"snowman",
"tunnels",
"vault"
]
timestamp = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
global counter
counter = Value("i", 0)
print(f"Running {len(maps) * 2} matches")
with Pool(2) as pool:
results = pool.starmap(run_matches, [(bot1, bot2, maps, timestamp),
(bot2, bot1, maps, timestamp)])
if any(r["type"] == "error" for r in results):
for r in results:
if r["type"] == "error":
print(f"{r['bot1']} versus {r['bot2']} failed with the following error:")
print(r["message"])
sys.exit(1)
map_winners = {}
bot1_wins = 0
bot2_wins = 0
for r in results:
for map, winner in r["winners"].items():
if map in map_winners and map_winners[map] != winner:
map_winners[map] = "Tied"
else:
map_winners[map] = winner
if winner == bot1:
bot1_wins += 1
else:
bot2_wins += 1
tied_maps = [k for k, v in map_winners.items() if v == "Tied"]
bot1_superior_maps = [k for k, v in map_winners.items() if v == bot1]
bot2_superior_maps = [k for k, v in map_winners.items() if v == bot2]
if len(tied_maps) > 0:
print(f"Tied maps ({len(tied_maps)}):")
for map in tied_maps:
print(f"- {map}")
else:
print(f"There are no tied maps")
if len(bot1_superior_maps) > 0:
print(f"Maps {bot1} wins on as both red and blue ({len(bot1_superior_maps)}):")
for map in bot1_superior_maps:
print(f"- {map}")
else:
print(f"There are no maps {bot1} wins on as both red and blue")
if len(bot2_superior_maps) > 0:
print(f"Maps {bot2} wins on as both red and blue ({len(bot2_superior_maps)}):")
for map in bot2_superior_maps:
print(f"- {map}")
else:
print(f"There are no maps {bot2} wins on as both red and blue")
print(f"{bot1} wins: {bot1_wins} ({bot1_wins / (bot1_wins + bot2_wins) * 100:,.2f}% win rate)")
print(f"{bot2} wins: {bot2_wins} ({bot2_wins / (bot1_wins + bot2_wins) * 100:,.2f}% win rate)")
if __name__ == "__main__":
main()
``` |
{
"source": "jmerle/inkdrop-snippets",
"score": 3
} |
#### File: inkdrop-snippets/media/demo.py
```python
import time
import pyautogui
def wait(ms):
time.sleep(ms / 1000)
def click():
pyautogui.click()
def simulate_typing(text, wait_after=250):
pyautogui.write(text, interval=0.05)
wait(wait_after)
def shortcut(keys, wait_after=250):
pyautogui.press(keys)
wait(wait_after)
def enter(wait_after=250):
shortcut(['enter'], wait_after)
def tab(wait_after=250):
shortcut(['tab'], wait_after)
def new_section(wait_after=250):
enter(0)
enter(wait_after)
wait(5000)
click()
wait(3500)
click()
wait(500)
simulate_typing('# Snippets plugin for Inkdrop')
new_section()
simulate_typing('Usage:')
enter()
simulate_typing('1. Type trigger')
enter()
simulate_typing('Press Tab')
enter()
simulate_typing('Magic!')
new_section()
simulate_typing('Static snippet:')
enter()
simulate_typing('hello')
tab()
new_section()
simulate_typing('Dynamic snippet:')
enter()
simulate_typing('date')
tab()
new_section()
simulate_typing('Static snippet with placeholders:')
enter()
simulate_typing('name')
tab()
simulate_typing('Jane')
tab()
simulate_typing('Doe')
wait(2000)
click()
``` |
{
"source": "jmerle/inkdrop-table-editor",
"score": 3
} |
#### File: inkdrop-table-editor/media/autokey.py
```python
import time
def wait(ms): time.sleep(ms / 1000)
def wait_short(): wait(250)
def wait_long(): wait(500)
def simulate_typing(text):
for ch in text:
keyboard.send_keys(ch)
wait(50)
ctrl = '<ctrl>'
alt = '<alt>'
shift = '<shift>'
enter = '<enter>'
tab = '<tab>'
left = '<left>'
right = '<right>'
up = '<up>'
down = '<down>'
def send_shortcut(*keys):
wait_short()
keyboard.send_keys('+'.join(keys))
wait_short()
def escape(): send_shortcut(ctrl, enter)
def align_left(): send_shortcut(ctrl, alt, left)
def align_right(): send_shortcut(ctrl, alt, right)
def align_center(): send_shortcut(ctrl, alt, up)
def align_none(): send_shortcut(ctrl, alt, down)
def move_left(): send_shortcut(ctrl, left)
def move_right(): send_shortcut(ctrl, right)
def move_up(): send_shortcut(ctrl, up)
def move_down(): send_shortcut(ctrl, down)
def move_next_cell(): send_shortcut(tab)
def move_previous_cell(): send_shortcut(shift, tab)
def move_next_row(): send_shortcut(enter)
def move_row_up(): send_shortcut(ctrl, alt, shift, up)
def move_row_down(): send_shortcut(ctrl, alt, shift, down)
def move_column_left(): send_shortcut(ctrl, alt, shift, left)
def move_column_right(): send_shortcut(ctrl, alt, shift, right)
wait(2500)
mouse.click_relative_self(0, 0, 1)
wait(4000)
mouse.click_relative_self(0, 0, 1)
simulate_typing('| Command')
move_next_cell()
move_next_row()
simulate_typing('Move to next cell')
move_next_cell()
simulate_typing('Tab')
move_up()
simulate_typing('Default keybinding')
move_down()
move_next_row()
simulate_typing('Move to previous cell')
move_next_cell()
simulate_typing('Shift + Tab')
move_previous_cell()
move_next_cell()
move_next_row()
simulate_typing('Move to next row')
move_next_cell()
simulate_typing('Enter')
move_next_row()
simulate_typing('Move around')
move_next_cell()
simulate_typing('Cmd/Ctrl + Arrows')
move_up()
move_left()
move_down()
move_right()
move_next_row()
simulate_typing('Move rows around')
move_next_cell()
simulate_typing('Cmd/Ctrl + Alt + Shift + Up/Down')
move_row_up()
move_row_up()
move_row_down()
move_row_down()
move_next_row()
simulate_typing('Move columns around')
move_next_cell()
simulate_typing('Cmd/Ctrl + Alt + Shift + Left/Right')
move_column_left()
move_column_right()
move_next_row()
simulate_typing('Change alignment')
move_next_cell()
simulate_typing('Cmd/Ctrl + Alt + Arrows')
align_left()
align_center()
align_right()
align_none()
move_next_row()
simulate_typing('Exit the table')
move_next_cell()
simulate_typing('Cmd/Ctrl + Enter')
wait_long()
escape()
wait(1000)
mouse.click_relative_self(0, 0, 1)
``` |
{
"source": "jmerle/lean-python-generator",
"score": 2
} |
#### File: lean-python-generator/integration/pull_repos.py
```python
from utils import *
# Simple setup script that gets Lean and runtime repos into our workspace
# under `generated` directory
def main():
ensure_command_availability("git")
ensure_command_availability("dotnet")
ensure_command_availability("pyright")
project_root = Path(__file__).absolute().parent.parent
generated_dir = project_root / "generated"
lean_dir = generated_dir / "Lean"
runtime_dir = generated_dir / "runtime"
stubs_dir = generated_dir / "stubs"
generator_dir = project_root / "QuantConnectStubsGenerator"
generated_dir.mkdir(parents=True, exist_ok=True)
ensure_repository_up_to_date("QuantConnect/Lean", lean_dir)
ensure_repository_up_to_date("dotnet/runtime", runtime_dir)
if __name__ == "__main__":
main()
``` |
{
"source": "jmerone/svgpath2mpl",
"score": 3
} |
#### File: svgpath2mpl/tests/test_parser.py
```python
import matplotlib as mpl
import matplotlib.pyplot as plt
from svgpath2mpl import parse_path
d = "M300,200 h-150 a150,150 0 1,0 150,-150 z"
fill = "red"
stroke = "blue"
stroke_width = 5
def test_parse_path():
path = parse_path(d)
patch = mpl.patches.PathPatch(path, facecolor=fill, edgecolor=stroke, linewidth=stroke_width)
fig = plt.figure(figsize=(12, 5.25))
ax = fig.add_subplot(111)
ax.add_patch(patch)
ax.set_aspect(1)
ax.set_xlim([0, 1200])
ax.set_ylim([0, 400])
``` |
{
"source": "jmerrell93/pynet_work",
"score": 2
} |
#### File: jmerrell93/pynet_work/telnet_conn.py
```python
import telnetlib
import time
from snmp_helper import snmp_get_oid,snmp_extract
TELNET_TIMEOUT = 6
TELNET_PORT = 23
COMMUNITY = 'galileo'
SNMP_PORT = 161
username = 'pyclass'
password = '<PASSWORD>'
OID_NAME = dict()
OID_NAME['1.3.6.1.2.1.1.1.0'] = 'DEVICE DESCRIPTION'
OID_NAME['1.3.6.1.2.1.1.5.0'] = 'DEVICE NAME'
def connect_and_query(ip_address):
remote_conn = telnetlib.Telnet(ip_address, TELNET_PORT, TELNET_TIMEOUT)
remote_conn.write(username + '\n')
time.sleep(1)
remote_conn.write(password + '\n')
remote_conn.write("terminal length 0" + '\n')
time.sleep(1)
output = remote_conn.read_very_eager()
print output
snmp_query('1.3.6.1.2.1.1.1.0', ip_address)
snmp_query('1.3.6.1.2.1.1.5.0', ip_address)
def snmp_query(OID, ip_address):
device = (ip_address, COMMUNITY, SNMP_PORT)
snmp_get = snmp_get_oid(device, OID)
output = snmp_extract(snmp_get)
print "\nFOR DEVICE %s" %ip_address
print OID_NAME[OID]
print output + '\n'
connect_and_query('172.16.58.3')
connect_and_query('192.168.127.12')
``` |
{
"source": "jmerten82/pymfree",
"score": 3
} |
#### File: pymfree/core/derivative.py
```python
import numpy as np
import torch
from pymfree.core.function import DomainFunction
from pymfree.util.utils import check_pymfree_type
from pymfree.util.polynomial import count_dim_contrib
class LinearDerivative(object):
r""" A linear derivative operator in PyMfree.
Can have several additive components, stored separately. A functional
form of the derivative can be provided via a DomainFunction.
Parameters
----------
signature : str
The description of the operator via a signature. Must be a string. The
actual signature must be between two '$' characters. Each components
within the signature starts with a prefactor and then the derivative
components within round brackets. See example later on.
function : pymfree.core.function.DomainFunction, optional
If wanted, a functional form of a derivative can be provided, which is
of course not a dreivative operator. Defaults to None.
Raises
------
TypeError
If function is given and not a DomainFunction or None.
See also
--------
pymfree.core.derivative.derivative_parser
Reads derivative signatures.
"""
def __init__(self, signature, function=None):
self.signature, comps = derivative_parser(signature)
if not isinstance(function, DomainFunction) and function is not None:
raise TypeError("LinearDerivative: Implementation of\
derivative functional form must be DomainFunction.")
self.F = function
self.components = [DerivativeComponent(comp) for comp in comps]
def __call__(self, x):
r""" Applies the eventually provided derivative to a coordinate.
This only makes sense if a DomainFunction was provided. Otherwise a
0 scalar is returned.
Parameters
----------
x : torch.Tensor
The coordinates the derivative should be applied to. Must be
pymfree coordinate.
Returns
-------
torch.Tensor
A pymfree scalar with F(x), given a F was provided as
DomainFunction at construction. Otherwise Zeroes are returned.
Raises
------
TypeError
If x is not a pymfree coordinate.
"""
if not check_pymfree_type(x)['coordinate']:
raise TypeError("LinearDerivative: Input is not a coordinate.")
return self.F(x)
def __len__(self):
r""" Length operator.
Returns the number of derivative components.
Returns
-------
int
The length of the components vector, so the number of derivative
components.
"""
return len(self.components)
def __repr__(self):
r""" The class representation.
Prints the class string to stdout.
Returns
-------
stdout
Returns class string.
"""
return self.__str__()
def __str__(self):
r""" The string representation of the class.
Provides information on the total derivative signature and the
attached function form.
Returns
-------
str
The string representation of the class.
"""
one = "Signature\n"
one += "---------\n"
one += self.signature + "\n\n"
if self.F is not None:
one += "Function\n"
one += "--------"
one += str(self.F.F.__name__)
return str(one)
class DerivativeComponent(object):
r""" A representation of derivative components.
With component we mean a closed derivative operator of a certain order.
E.g. the 1/2*(d^2 / dx^2) in the 2D Laplacian.
Parameters
----------
signature : str
A valid derivative component signature. E.g. 0.5(0, 0)
Attributes
----------
factor : float
The factor in front of the derivative. E.g. a 1/2 in 2D Laplacian.
component_vector : numpy.ndarray
A vector showing the derivative order of in each relevant component.
E.g. d^2/dxdz would be [1, 0, ,1]
See also
--------
pymfree.core.derivative.derivative_component_parser
Reads the str signatures of derivative components.
"""
def __init__(self, signature):
self.signature, \
self.factor, \
self.component_vector = derivative_component_parser(signature)
self.component_vector = torch.tensor(self.component_vector)
self.component_vector = count_dim_contrib(
self.component_vector.unsqueeze(0), [0, 1, 2, 3, 4])
def __len__(self):
r""" The order of the derivative component.
"""
return len(self.component_vector)
@property
def order(self):
return np.sum(self.component_vector[:, 1])
@property
def max_dim(self):
return self.component_vector[-1, 0]
def dim_vector(self, dim):
dim_vector = torch.zeros(dim, dtype=torch.int64)
for line in self.component_vector:
dim_vector[line[0]] = torch.from_numpy(line)[1]
return dim_vector
def max_factor(self):
current = self.factor
for line in self.component_signature:
current *= np.math.factorial(line[1])
return current
def derivative_parser(input):
if not isinstance(input, str):
raise TypeError("derivative_parser: Signature must be a string.")
if input.count('$') < 2:
raise TypeError("derivative_parser: Input signature format invalid.")
# Removing all white spaces
input = input.replace(' ', '')
# Extracting functional part, according to $-convention
signature = input.split('$')[1]
if signature.count('(') == 0:
raise TypeError("derivative_parser: No derivative in signature.")
if not signature.count('(') == signature.count(')'):
raise TypeError(
"derivative_parser: Bracket count in signature invalid.")
# Extracting components, according to ()-convention
component_signature = []
component_signature.append(signature[0:signature.find(')')+1])
temp = signature[signature.find(')')+1:]
index = temp.find('(')
while index > 0:
component_signature.append(temp[0:temp.find(')')+1])
temp = temp[temp.find(')')+1:]
index = temp.find('(')
return signature, component_signature
def derivative_component_parser(input):
if not isinstance(input, str):
raise TypeError(
"derivative_component_parser: Signature must be a string.")
if input.count('(') != 1 and input.count(')') != 1:
raise TypeError(
"derivative_component_parser: Input signature contains no brackets.")
if input.count(',') == 0:
raise TypeError(
"derivative_component_parser: Input signature contains no colons.")
signature = input.replace(' ', '')
factor = float(signature[0:signature.find('(')])
temp = signature[signature.find('(')+1:signature.find(')')]
indices = temp.split(',')
# Little list comprehension the end and convert
indices = [int(element) for element in indices]
return signature, factor, indices
```
#### File: pymfree/core/domain.py
```python
import torch
import faiss
import numpy as np
from pymfree.core.function import Norm
from pymfree.core.function import L1Norm
from pymfree.core.function import L2Norm
from pymfree.core.function import L2SquaredNorm
from pymfree.core.function import LInfNorm
from pymfree.core.function import DomainFunction
from pymfree.util.utils import check_pymfree_type
from pymfree.util.utils import scale_params
class Domain(object):
r""" A mesh-free domain representation.
Implements means to save support coordinates in n-dimensions, together with
a function residing on the domain. Establishes norms and nearest neighbours
searches on the domain, together with coordinate queries.
Parameters
----------
coordinates : list, numpy.ndarray or torch.Tensor
The support coordinates of the domain. If provides as a list of floats,
also the spatial dimensionality of the domain must be provided
(see dim). If torch tensor or numpy array are provided as PyMfree
coordinates, the dimensioanlity is directly inferred,
otherwise it must also be provided.
values : list, numpy.ndarray, torch.Tensor
or pymfree.core.function.DomainFunction
A function with values on the support points. If provided as numpy
array or torch tensor, the input must be scalar. Also, the length of
the function values must be equal or longer than the number of support
coordinates. Instead of an input vector with values, also a
DomainFunction can be provided, which is evaluated at the support
coordinates.
norm : pymfree.core.function.Norm, optional
The norm to calculate distances on the Domain. Default is L2Norm
k : int, optional
The number of nearest neighbours that are searched within support given
a coordinates query.
dim : int, optional
If scalars are provided at construction for the support coordinates,
the dimension provided here is used to infer the number of spatial
components of the input. E.g. a scalar of 10 input numbers and dim set
to 3 would result in 3 3-d support coordinates. Defaults to 2.
device : torch.device, optional
The device the domain shall be created on. This means that all data
vectors are stored on this device. Default is torch.device('cpu) and
it can be changed later via Domain.to(device)
Attributes
----------
All of the following live on a specifc device which can be changed via
Domain.to(device).
node_coordinates : torch.Tensor
This is where the domains support points are stored. The format of this
is PyMfree coordinate.
node_values : torch.Tensor
A PyMfree scalar that holds function values at the support coordinates.
query : torch.Tensor
A PyMfree coordinate which stores the query coordinates the user
requested via the call operator. This is None at construction and
created after the call operator.
neighbour_map : torch.Tensor
The index map relating to query search. E.g. if n coordinates are
queried and k is set to m, the dimension of this object would be
(n, 1, m). This is None at construction and created after
the call operator.
neighbour_distances :torch.Tensor
The distances to the support nodes relating to query search.
E.g. if n coordinates are queried and k is set to m,
the dimension of this object would be (n, 1, m). This is None at
construction and created after the call operator.
k : torch.Tensor
The number of nearest neigbours for query searches.
scale : dict
If rescale_coordinates is called this holds the transformation
parameters as a dict of torch tensors. Contains keys unit, shift and
scale.
Finally, this is the index which is used to calculate nearest neighbours
fast.
index : faiss.FlatIndex or None
In most cases this is a faiss flat index, depending which Norm
is used on the Domain. If a Norm is used that is unknown to faiss, the
index is None and an internal, slower routines is used to calculate
neighbour_map and distances.
Raises
------
TypeError
If coordinates are provided as numpy array or torch tensor but are not
scalar or coordinates.
TypeError
If coordinates are not provided as list, numpy array or torch tensor.
TypeError
If the values are provided as array but are either too short
(< coordinates) or not a scalar.
TypeError
If values are not a list, numpy array, torch tensor or DomainFunction.
TypeError
If norm is not a pymfree.core.function.Norm.
TypeError
If k is not an integer.
Notes
-----
Thinking about including faiss GPU index in the future. Currently this
dodes not support all Norms.
See also
--------
pymfree.core.function.DomainFunction
One way of providing function values on the Domain.
pymfree.core.domain.Domain()
The call operator which initiates a coordinate query. Here the value
k becomes relevant.
pymfree.core.domain.Domain.to(device)
Changes the device where the domain data is stored.
pymfree.core.domain.Domain.rescale_coordinates()
Shifts and scales the domain coordinates into a fixed window.
pymfree.core.function.Norm.L2Norm
The default Norm for the Domain.
References
----------
[1] [Faiss](https://github.com/facebookresearch/faiss)
Facebook AI Research Similarity Search
Examples
--------
The following constructs a domain with one 4-d coordinate at (1.,1.,1.,1.)
and sets this coordinate to 42.
>>> from pymfree.core.domain import Domain
>>> my_domain = Domain([1., 1., 1., 1., 666.], [42., 66., 3.14], dim=4)
And this constructs a 3-d domain with 10000 random coordinates, uses the
L1Norm on the mesh, but sets values at the support nodes to the L2-norm
from the origin.
>>> import numpy as np
>>> from pymfree.core.domain import Domain
>>> from pymfree.core.function import L1Norm
>>> from pymfree.core.function import DomainFunction
>>> from pymfree.util.functional import l2
>>> l2func = DomainFunction(l2)
>>> my_domain = Domain(np.random.rand(1000,3), l2func, L1Norm)
"""
def __init__(
self, coordinates, values,
norm=L2Norm, k=32, dim=2, device=torch.device('cpu')):
if isinstance(coordinates, list):
self.node_coordinates = torch.tensor(coordinates, device=device)[
:-(len(coordinates) % dim)].reshape(-1, dim)
elif isinstance(coordinates, np.ndarray):
if len(coordinates.shape) == 1:
self.node_coordinates = torch.tensor(
coordinates, device=device)[
:-(len(coordinates) % dim)].reshape(-1, dim)
elif len(coordinates.shape) == 2:
self.node_coordinates = torch.tensor(
coordinates, device=device)
else:
raise TypeError(
"PyMfree Domain: Input array dimensions invalid")
elif isinstance(coordinates, torch.Tensor):
if len(coordinates.shape) == 1:
self.node_coordinates = coordinates[
:-(len(coordinates) % dim)].reshape(-1, dim)
elif len(coordinates.shape) == 2:
self.node_coordinates = coordinates
else:
raise TypeError(
"PyMfree Domain: Input array dimensions invalid")
self.node_coordinates.to(device)
else:
raise TypeError(
"PyMfree Domain: Input coordinates must \
be torch tensor, numpy array or list.")
if isinstance(values, DomainFunction):
# TO BE SEEN WHAT HAPPENS IF THIS IS CALLED ON CUDA DEVICE
self.node_values = values(self.node_coordinates)
elif isinstance(values, list):
if len(values) < len(self):
raise TypeError(
"PyMfree Domain: Node value list too short.")
else:
self.node_values = torch.tensor(
values[:len(self)], device=device)
elif isinstance(values, np.ndarray):
if len(values) < len(self) or len(values.shape != 1):
raise TypeError(
"PyMfree Domain: Node value array invalid.")
else:
self.node_values = torch.tensor(
values[:len(self)], device=device)
elif isinstance(values, torch.Tensor):
if len(values) < len(self) or len(values.shape != 1):
raise TypeError(
"PyMfree Domain: Node value array invalid.")
else:
self.node_values = values[:len(self)]
self.node_values.tol(device)
else:
raise TypeError(
"PyMfree Domain: Input values must come from DomainFunction \
a torch tensor, numpy array or list.")
if not isinstance(norm, Norm):
raise TypeError("PyMfree Domain: Need a proper Norm on Domain.")
self.norm = norm
if isinstance(self.norm, L1Norm):
self.index = faiss.IndexFlat(self.dim, faiss.METRIC_L1)
elif isinstance(self.norm, L2Norm):
self.index = faiss.IndexFlatL2(self.dim)
elif isinstance(self.norm, L2SquaredNorm):
self.index = faiss.IndexFlatL2(self.dim)
elif isinstance(self.norm, LInfNorm):
self.index = faiss.IndexFlat(self.dim, faiss.METRIC_Linf)
else:
self.index = None
if isinstance(k, int):
self.k = k
else:
raise TypeError("PyMfree Domain: k must be an integer.")
if self.index is not None:
self.index.add(
self.node_coordinates.to(torch.device('cpu')).numpy())
self.query = None
self.neighbour_map = None
self.neighbour_distances = None
self.__counter = 0
self.k = torch.tensor([k], device=device)
self.scale = {}
self.scale['shift'] = torch.tensor([0.], device=device)
self.scale['scale'] = torch.tensor([1.], device=device)
self.scale['unit'] = torch.tensor([1.], device=device)
self.scale['rescaled'] = False
def __len__(self):
r""" The length of the domain.
Just the number of support points.
Returns
-------
int
Calls len(self.node_coordinates)
"""
return len(self.node_coordinates)
def __iter__(self):
return self
def __next__(self):
r""" The iterator functionality.
Moves through the support coordinates and returns them while
incrementing an internal counter. Counter is reset as soon as the
last coordinate is reached.
Returns
-------
torch.Tensor, torch.Tensor
Single support coordinate with value while being iterated.
"""
if self.__counter > len(self)-1:
self.__counter = 0
raise StopIteration
else:
self.__counter += 1
return self.node_coordinates[
self.__counter-1], self.node_values[self.__counter-1]
def __str__(self):
r""" String representation of the class.
Shows information on the nodes and dimensons, as well as
query information and evetual rescaling.
Returns
-------
str
Essential class information.
"""
one = str(self.__class__.__name__)
one = one[one.rfind('.')+1:one.rfind('\'')]
two = str(
len(self) + "Nodes; \t" + str(self.dim) + "spatial dimensions.")
three = str(self.index.__class__.__name__) + "performs fast nn search."
if self.query_ready:
four = str(len(self.query)) + " point query established."
else:
four = "No query in Domain."
five = "Coordinates info ---: "
+ "Unit: " + str(self.scale['unit'].item())
+ " Scale: " + str(self.scale['scale'].item())
+ " Shift: " + str(self.scale['shift'].item())
print(one+"\n"+two+"\n"+three+"\n"+four+"\n"+five)
def __repr__(self):
r""" Representation of the class.
Shows information on the nodes and dimensons, as well as
query information and evetual rescaling.
Returns
-------
stdout
Essential class information.
"""
return print(self)
def __call__(self, x):
r""" Call operator for the class, sets a coordinate query.
This is the main function for the Domain, which lets the user set
a coordinate query. If domain is rescaled via rescale_coordinates,
transformation is applied to query coordinates. x -> x*scale +shift.
Parameters
----------
x : list, numpy.ndarray or torch.Tensor
The coordinates to be queried. If given as a scalar, the spatial
dimensionality is set to the one of the Domain (see example).
Returns
-------
neighbour_distances, neighbour_map : torch.Tensor, torch.Tensor
The distances and indeces of the k nearest neighbours in the
support domain for the query. k is set at construction.
Raises
------
TypeError
If query is numpy array or torch tensor
but not a PyMfree coordinate.
TypeError
If query is not a numpy array, torch tensor or list.
See also
--------
pymfree.core.domain.Domain.rescale_coordinates()
Shifts and scales the domain coordinates into a fixed window.
Examples
--------
The following queries two points on a 3-d Domain:
>>> import numpy as np
>>> from pymfree.core.domain import Domain
>>> my_domain = Domain(np.random.rand(100,3), np.random.rand(100))
>>> my_domain([1., 2., 3., 4., 5., 6.])
and the output will be index map and distances of shape (2, 1, 32),
since the domain has k=32 by default. The 1 in the middle is PyMfree
convention since the output is not a coordinate.
"""
if isinstance(x, list):
self.query = torch.tensor(x, device=self.device)[
:-(len(x) % self.dim)].reshape(-1, self.dim)
elif isinstance(x, np.ndarray):
if len(x.shape) == 1:
self.query = torch.tensor(
x, device=self.device)[
:-(len(x) % self.dim)].reshape(-1, self.dim)
elif len(x.shape) == 2:
self.query = torch.tensor(
x, device=self.device)
else:
raise TypeError(
"PyMfree Domain: Query input array dimensions invalid")
elif isinstance(x, torch.Tensor):
if len(x.shape) == 1:
self.query = x[
:-(len(x) % self.dim)].reshape(-1, self.dim)
elif len(x.shape) == 2:
self.query = x
else:
raise TypeError(
"PyMfree Domain: Query array dimensions invalid")
self.query.to(self.device)
else:
raise TypeError(
"PyMfree Domain: Input query array must \
be torch tensor, numpy array or list.")
if self.scale['rescaled']:
self.query *= self.scale['scale']
self.query += self.scale['shift']
if self.index is not None:
self.neighbour_distances, self.neighbour_map = self.index.search(
self.query.numpy(), self.k)
self.neighbour_distances = torch.tensor(
self.neighbour_distances, device=self.device)
self.neighbour_map = torch.tensor(
self.neighbour_map, device=self.device)
if isinstance(self.index, faiss.IndexFlatL2):
self.neighbour_distances = torch.sqrt(self.neighbour_distances)
else:
self.neighbour_distances, self.neighbour_map = any_norm_nn_search(
self.node_coordinates, self.query, self.norm, self.k)
self.neighbour_distances.unsqueeze(1)
self.neighbour_map.unsqueeze(1)
return self.neighbour_distances, self.neighbour_map
def __getitem__(self, index):
r""" The square bracket opertator
Returns the support coordinate at a given index position.
Parameters
----------
index : int
The index position of the coordinate to be returned.
Returns
-------
torch.Tensor
The coorindate at the queried index.
Raises
------
IndexError
If index out of bounds.
"""
if not isinstance(index, int) or index > len(self)-1:
raise IndexError("PyMfree Domain: Index out of bounds.")
return self.node_coordinates[index]
def rescale_coordinates(self, min_in=-1., max_in=1.):
r""" Rescales support coordinates into given window.
This routines loops through all spatial dimensions and finds the one
which has the largest difference between the largest and smallest
coorindate component. Then it rescales this distances into the interval
max_in - min_in and shifts the minimum to min_in.
Parameters
----------
min_in : int, optional
The new minimum location for the longest spatial dimension.
Defaults to -1.
max_in : int, optional
The new maximum location for the longest spatial dimension.
Defaults to -1.
Returns
-------
No direct output, but Domain attributr self.scale is set accordingly.
Raises
------
TypeError
If min_in or max_in are not given as floats.
"""
if not isinstance(min_in, float) or not isinstance(max_in, float):
raise TypeError("PyMfree Domain: domain bounds must be float.")
self.scale['scale'], self.scale['shift'] = scale_params(
self.coordinates, min_in, max_in)
self.scale['unit'] = torch.tensor(
max_in - min_in, device=self.device)
self.coordinates *= self.scale['scale']
self.coordinates += self.scale['shift']
self.scale['rescale'] = True
def to(self, device=torch.device('cpu')):
r""" Transfers Domain to a given device.
All data attributes are moved to provided device.
Parameters
----------
device : torch.device, optional
The target torch device. Default is torch.device('cpu').
Raises
------
TypeError
If device is not a torch device.
"""
if not isinstance(device, torch.device):
raise TypeError("PyMfree Domain: Device must be torch device.")
self.node_coordinates = self.node_coordinates.to(device)
self.node_values = self.node_values.to(device)
if self.query is not None:
self.query = self.query.to(device)
if self.neighbour_map is not None:
self.neighbour_map = self.neighbour_map.to(device)
if self.neighbour_distances is not None:
self.neighbour_distances = self.neighbour_distances.to(device)
self.k = self.k.to(device)
self.scale['shift'] = self.scale['shift'].to(device)
self.scale['scale'] = self.scale['scale'].to(device)
self.scale['unit'] = self.scale['unit'].to(device)
@property
def shape(self):
r""" Returns base Domain properties.
The basic dimensionalities of the Domain.
Returns
-------
dict
Entries are 'nodes', number of support nodes. 'dim', the number
of spatial dimensions and 'query', the number of query nodes.
"""
if self.query_ready:
helper = len(self.query_ready)
else:
helper = 0
return {'nodes': len(self), 'dim': self.dim, 'query': helper}
@property
def dim(self):
r""" The spatial dimension of the domain.
Directly derived from the shape of the support coordinates.
Returns
-------
int
The spatial dimension of the support node coordinates.
"""
return self.node_coordinates.shape[1]
@property
def query_ready(self):
r""" Flag if query coordinates have been set.
Returns
-------
bool
True if query attribute exists, False otherwise.
"""
return isinstance(self.query, torch.Tensor)
@property
def device(self):
r""" The device the Domain resides on.
Most data arrays have to be assigned to a specific device and
this routines returns that device.
Returns
-------
torch.device
The Domain device, as inferred from the device of the support
coordinate vector.
"""
return self.node_coordinates.device
def any_norm_nn_search(support, query, norm, k=32):
r""" Nearest neighbour search for arbitrary norm.
Since not all norms are implemented in e.g. faiss, this offers a way
of brute-force calculating nearest neighbours with any norm. Please be
adviced that this can be orders of magnitude slower than a smart index
search, even if that is flat.
Parameters
----------
support : torch.Tensor
The support coordinates for the search. Must be PyMfree coordinate.
query : torch.Tensor
The query coordinates. Must be PyMfree coordinates
norm : pymfree.core.function.Norm
The PyMfree norm to be used. This is not recommended for standard
norms auch L2, L1 or Linf, since those are implemented by default in
many faster routines such as faiss or sklearn.neighbors.KdTree.
k : int, optional
The number of nearest neighbours to be searched. Defaults to 32.
Returns
-------
distances, indices : torch.Tensor, torch.Tensor
The distances and index postions of the k nearest neighbours for each
query point. Output shape is hence (len(query), k).
Raises
------
TypeError
If support or query are not PyMfree coordinates.
TypeError
If norm is not a PyMfree Norm.
TypeError
If k is not given as int.
References
----------
[1] [faiss](https://github.com/facebookresearch/faiss)
[2] [Scikit-learn KdTree](
https://scikit-learn.org/stable/modules/generated/\
sklearn.neighbors.KDTree.html#sklearn.neighbors.KDTree)
"""
if not check_pymfree_type(support)['coordinate']:
raise TypeError("PyMfree nn search: Support must be coordinates")
if not check_pymfree_type(query)['coordinate']:
raise TypeError("PyMfree nn search: Query must be coordinates")
if not isinstance(norm, Norm):
raise TypeError(
"PyMfree nn search: Norm must be PyMfree norm.")
if not isinstance(k, int):
raise TypeError("PyMfree nn search: k must be integer.")
n = len(query)
device = support.device
result = torch.zeros(n, k, dtype=torch.float32, device=device)
indices = torch.zeros(n, k, dtype=torch.int64, device=device)
for i, element in enumerate(query):
current = torch.sub(support, element)
current = norm.no_checks(current)
dists, numbers = torch.sort(current)
result[i, :] = dists[: k]
indices[i, :] = numbers[: k]
return result, indices
```
#### File: pymfree/util/polynomial.py
```python
import torch
class PolynomialSupport(object):
r""" ...
...
"""
def __init__(self, dim, pdeg):
if not isinstance(dim, int) or pdeg < 0:
raise TypeError("PolynomialSupport: Dim must be int > 0.")
if not isinstance(pdeg, int) or pdeg < 0:
raise TypeError("PolynomialSupport: Dim must be int > 0.")
self.polynomials = get_poynomials(dim, pdeg)
def __len__(self):
return len(self.polynomials)
def shape(self):
return {"dim": self.polynomials.shape[1],
"pdeg": torch.max(self.polynomials).item(),
"terms": len(self)}
def embed_AP(self, coords, AP):
# Careful, coords must have origin at query point
if not isinstance(coords, torch.Tensor):
raise TypeError(
"PolynomialSupport: Coordinate input must be torch tensor.")
if not isinstance(AP, torch.Tensor):
raise TypeError(
"PolynomialSupport: Coordinate input must be torch tensor.")
if len(coords.shape) != 4 and len(AP.shape) != 4:
raise TypeError(
"PolynomialSupport: Need batches of PyMfree special objects.")
if coords.shape[3] != self.shape['dim']:
raise TypeError(
"PolynomialSupport: Spatial dimensions don't match.")
if AP.shape[2] < coords.shape[2] or AP.shape[2] < len(self):
raise TypeError("PolynomialSupport: Given AP matrices too small.")
AP[:, :, :coords.shape[2], len(self):] = poly_AP_contrib(
coords, self.polynomials)
return AP
def derivative_component(self, derivative):
# Keep in mind that this refers to shifted coordinates to query point.
pass
def get_poynomials(dim, pdeg):
dims = torch.range(0, dim-1, dtype=torch.int64)
out = torch.zeros(1, 3, dtype=torch.int64)
for i in range(1, pdeg+1):
out = torch.cat(
(out, count_dim_contrib(
torch.combinations(dims, with_replacement=True, r=i), dims)))
return out
def count_dim_contrib(terms, dims):
stack = []
for dim in dims:
stack.append((terms == dim).sum(axis=1))
return torch.stack(stack, 1)
def poly_AP_contrib(coords, polynomials):
out = []
for exps in polynomials:
out.append(torch.pow(coords, exps).prod(3, keepdim=True))
return torch.cat(out, 3)
``` |
{
"source": "jmertic/contrib_check",
"score": 3
} |
#### File: jmertic/contrib_check/tests.py
```python
import os
import git
import unittest
from unittest.mock import Mock
from contrib_check.org import Org
from contrib_check.repo import Repo
from contrib_check.commit import Commit
class TestCommit(unittest.TestCase):
@classmethod
def setUpClass(self):
self._mock_repo = Mock()
self._mock_commit_merge = Mock()
self._mock_commit_merge.parents = [1,2,3]
self._mock_commit = Mock()
self._mock_commit.parents = [1]
# test for not having a signoff
def testHasNoDCOSignOff(self):
commit = Commit(self._mock_commit,self._mock_repo)
commit.git_commit_object.message = "has no signoff"
self.assertFalse(commit.hasDCOSignOff(), "Commit message didn't have a signoff")
# test for having a signoff
def testHasDCOSignOff(self):
commit = Commit(self._mock_commit,self._mock_repo)
commit.git_commit_object.message = "has a signoff Signed-off-by: <NAME> <<EMAIL>>"
self.assertTrue(commit.hasDCOSignOff(), "Commit message had a signoff")
def testFoundPastDCOSignoff(self):
commit = Commit(self._mock_commit,self._mock_repo)
commit.git_commit_object.hexsha = '11ac960e1070eacc2fe92ac9a3d1753400e1fd4b'
commit.repo_object.past_signoffs = [
"I, personname hereby sign-off-by all of my past commits to this repo subject to the Developer Certificate of Origin (DCO), Version 1.1. In the past I have used emails: [<EMAIL>]\n\n11ac960e1070eacc2fe92ac9a3d1753400e1fd4b This is a commit".encode()
]
self.assertTrue(commit.hasDCOPastSignoff(), "Commit message had a past signoff")
def testFoundNoPastDCOSignoff(self):
commit = Commit(self._mock_commit,self._mock_repo)
commit.git_commit_object.hexsha = 'c1d322dfba0ed7a770d74074990ac51a9efedcd0'
commit.repo_object.past_signoffs = [
"I, personname hereby sign-off-by all of my past commits to this repo subject to the Developer Certificate of Origin (DCO), Version 1.1. In the past I have used emails: [<EMAIL>]\n\n11ac960e1070eacc2fe92ac9a3d1753400e1fd4b This is a commit".encode()
]
self.assertFalse(commit.hasDCOPastSignoff(), "Commit message had a past signoff")
def testDCOSignoffRequiredMergeCommit(self):
commit = Commit(self._mock_commit_merge,self._mock_repo)
self.assertFalse(commit.isDCOSignOffRequired(), "Merge commits don't require a DCO signoff")
def testDCOSignoffRequiredNormalCommit(self):
commit = Commit(self._mock_commit,self._mock_repo)
self.assertTrue(commit.isDCOSignOffRequired(), "All non-merge commits require a DCO signoff")
def testDCOSignoffCheckMergeCommit(self):
commit = Commit(self._mock_commit_merge,self._mock_repo)
commit.git_commit_object.message = "has no signoff"
self.assertTrue(commit.checkDCOSignoff(), "Commit message didn't have a signoff, but is a merge commit so that's ok")
def testDCOSignoffCheckNormalCommitNoSignoffPastSignoff(self):
commit = Commit(self._mock_commit_merge,self._mock_repo)
commit.git_commit_object.hexsha = '11ac960e1070eacc2fe92ac9a3d1753400e1fd4b'
commit.repo_object.past_signoffs = [
['dco-signoffs',"I, personname hereby sign-off-by all of my past commits to this repo subject to the Developer Certificate of Origin (DCO), Version 1.1. In the past I have used emails: [<EMAIL>]\n\n11ac960e1070eacc2fe92ac9a3d1753400e1fd4b This is a commit".encode() ]
]
commit.git_commit_object.message = "has no signoff"
self.assertTrue(commit.checkDCOSignoff(), "Commit message didn't have a signoff, but it has a past DCO signoff so that's ok")
class TestOrg(unittest.TestCase):
githubOrgRepos = [
type("gh_repo",(object,),{
"html_url": "https://github.com/testorg/repo1",
"name":"repo1",
"archived":False
}),
type("gh_repo",(object,),{
"html_url": "https://github.com/testorg/repo2",
"name":"repo2",
"archived":False
}),
type("gh_repo",(object,),{
"html_url": "https://github.com/testorg/repo3",
"name":"repo3",
"archived":True
}),
]
@classmethod
def tearDownClass(cls):
if os.path.exists("testorg-repo1.csv"):
os.remove("testorg-repo1.csv")
if os.path.exists("testorg-repo2.csv"):
os.remove("testorg-repo2.csv")
if os.path.exists("testorg-repo3.csv"):
os.remove("testorg-repo3.csv")
def testInitNoLoadRepos(self):
org = Org("testorg",load_repos=False)
self.assertEqual(org.repos,[])
def testOrgTypeSetGithubNoTokenDefined(self):
names_to_remove = {"GITHUB_TOKEN"}
modified_environ = {
k: v for k, v in os.environ.items() if k not in names_to_remove
}
with unittest.mock.patch.dict(os.environ, modified_environ, clear=True):
self.assertRaisesRegex(Exception,'Github token',Org,'foo')
@unittest.mock.patch.dict(os.environ,{'GITHUB_TOKEN':'<PASSWORD>'})
@unittest.mock.patch.object(git.Repo,'clone_from')
def testLoadOrgRepos(self,mock_method):
with unittest.mock.patch.object(Org,'_getGithubReposForOrg',return_value=self.githubOrgRepos) as mock:
org = Org("testorg")
self.assertEqual(org.repos[0].name,"repo1")
self.assertEqual(org.repos[1].name,"repo2")
self.assertEqual(len(org.repos),2)
@unittest.mock.patch.dict(os.environ,{'GITHUB_TOKEN':'<PASSWORD>'})
@unittest.mock.patch.object(git.Repo,'clone_from')
def testLoadOrgReposIgnoreRepo(self,mock_method):
with unittest.mock.patch.object(Org,'_getGithubReposForOrg',return_value=self.githubOrgRepos) as mock:
org = Org("testorg",ignore_repos=['repo1'])
self.assertEqual(org.repos[0].name,"repo2")
self.assertEqual(len(org.repos),1)
@unittest.mock.patch.dict(os.environ,{'GITHUB_TOKEN':'<PASSWORD>'})
@unittest.mock.patch.object(git.Repo,'clone_from')
def testLoadOrgReposOnlyRepo(self,mock_method):
with unittest.mock.patch.object(Org,'_getGithubReposForOrg',return_value=self.githubOrgRepos) as mock:
org = Org("testorg",only_repos=['repo1'])
self.assertEqual(org.repos[0].name,"repo1")
self.assertEqual(len(org.repos),1)
@unittest.mock.patch.dict(os.environ,{'GITHUB_TOKEN':'<PASSWORD>'})
@unittest.mock.patch.object(git.Repo,'clone_from')
def testLoadOrgReposIncludeArchives(self,mock_method):
with unittest.mock.patch.object(Org,'_getGithubReposForOrg',return_value=self.githubOrgRepos) as mock:
org = Org("testorg",skip_archived=False)
self.assertEqual(org.repos[0].name,"repo1")
self.assertEqual(org.repos[1].name,"repo2")
self.assertEqual(org.repos[2].name,"repo3")
self.assertEqual(len(org.repos),3)
class TestRepo(unittest.TestCase):
@unittest.mock.patch.object(git.Repo,'clone_from')
def testInitGithub(self,mock_method):
repo = Repo("https://github.com/foo/bar")
self.assertEqual(repo.name,"bar")
self.assertEqual(repo.html_url,"https://github.com/foo/bar")
self.assertEqual(repo.csv_filename,"foo-bar.csv")
self.assertTrue(os.path.isfile("foo-bar.csv"))
if os.path.isfile("foo-bar.csv"):
os.remove("foo-bar.csv")
@unittest.mock.patch.object(git.Repo,'clone_from')
def testInitLocal(self,mock_method):
repo = Repo(".")
self.assertEqual(repo.name,os.path.basename(os.path.realpath(".")))
self.assertEqual(repo.html_url,"")
self.assertEqual(repo.csv_filename,repo.name+".csv")
self.assertTrue(os.path.isfile(repo.name+".csv"))
if os.path.isfile(repo.name+".csv"):
os.remove(repo.name+".csv")
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "jmertic/landscape-tools",
"score": 3
} |
#### File: landscape-tools/landscape_tools/crunchbasemembers.py
```python
import csv
import os.path
from landscape_tools.members import Members
from landscape_tools.member import Member
class CrunchbaseMembers(Members):
bulkdatafile = 'organizations.csv'
def __init__(self, bulkdatafile = None, loadData = False):
if bulkdatafile:
self.bulkdatafile = bulkdatafile
super().__init__(loadData)
def loadData(self):
if os.path.isfile(self.bulkdatafile):
print("--Loading Crunchbase bulk export data--")
with open(self.bulkdatafile, newline='') as csvfile:
memberreader = csv.reader(csvfile, delimiter=',', quotechar='"')
fields = next(memberreader)
for row in memberreader:
member = Member()
try:
member.membership = ''
except ValueError as e:
pass # avoids all the Exceptions for logo
try:
member.orgname = row[1]
except ValueError as e:
pass # avoids all the Exceptions for logo
try:
member.website = row[11]
except ValueError as e:
pass # avoids all the Exceptions for logo
try:
member.crunchbase = row[4]
except ValueError as e:
pass # avoids all the Exceptions for logo
self.members.append(member)
```
#### File: landscape-tools/landscape_tools/landscapemembers.py
```python
import ruamel.yaml
import requests
from landscape_tools.members import Members
from landscape_tools.member import Member
class LandscapeMembers(Members):
landscapeListYAML = 'https://raw.githubusercontent.com/cncf/landscapeapp/master/landscapes.yml'
landscapeSettingsYAML = 'https://raw.githubusercontent.com/{repo}/master/settings.yml'
landscapeLandscapeYAML = 'https://raw.githubusercontent.com/{repo}/master/landscape.yml'
landscapeLogo = 'https://raw.githubusercontent.com/{repo}/master/hosted_logos/{logo}'
skipLandscapes = ['openjsf']
def __init__(self, landscapeListYAML = None, loadData = True):
if landscapeListYAML:
self.landscapeListYAML = landscapeListYAML
super().__init__(loadData)
def loadData(self):
print("--Loading other landscape members data--")
response = requests.get(self.landscapeListYAML)
landscapeList = ruamel.yaml.YAML(typ='unsafe', pure=True).load(response.content)
for landscape in landscapeList['landscapes']:
if landscape['name'] in self.skipLandscapes:
continue
print("Loading "+landscape['name']+"...")
# first figure out where memberships live
response = requests.get(self.landscapeSettingsYAML.format(repo=landscape['repo']))
settingsYaml = ruamel.yaml.YAML(typ='unsafe', pure=True).load(response.content)
# skip landscape if not well formed
if 'global' not in settingsYaml or settingsYaml['global'] is None or 'membership' not in settingsYaml['global']:
continue
membershipKey = settingsYaml['global']['membership']
# then load in members only
response = requests.get(self.landscapeLandscapeYAML.format(repo=landscape['repo']))
landscapeYaml = ruamel.yaml.YAML(typ='unsafe', pure=True).load(response.content)
for category in landscapeYaml['landscape']:
if membershipKey in category['name']:
for subcategory in category['subcategories']:
for item in subcategory['items']:
if not item.get('crunchbase'):
item['crunchbase'] = ''
member = Member()
for key, value in item.items():
try:
if key != 'enduser':
setattr(member, key, value)
except ValueError as e:
pass
try:
member.membership = ''
except ValueError as e:
pass
try:
member.orgname = item['name']
except ValueError as e:
pass
try:
member.website = item['homepage_url']
except ValueError as e:
pass
try:
member.logo = self.normalizeLogo(item['logo'],landscape['repo'])
except ValueError as e:
pass
try:
member.crunchbase = item['crunchbase']
except ValueError as e:
pass
self.members.append(member)
def normalizeLogo(self, logo, landscapeRepo):
if logo is None or logo == '':
return ""
if 'https://' in logo or 'http://' in logo:
return logo
return self.landscapeLogo.format(repo=landscapeRepo,logo=logo)
```
#### File: landscape-tools/landscape_tools/landscapeoutput.py
```python
import csv
import re
import os
import unicodedata
from pathlib import Path
## third party modules
import ruamel.yaml
import requests
class LandscapeOutput:
landscapefile = 'landscape.yml'
landscape = None
landscapeMembers = []
missingcsvfile = 'missing.csv'
_missingcsvfilewriter = None
hostedLogosDir = 'hosted_logos'
landscapeMemberCategory = 'LF Member Company'
landscapeMemberClasses = [
{"name": "Platinum Membership", "category": "Platinum"},
{"name": "Gold Membership", "category": "Gold"},
{"name": "Silver Membership", "category": "Silver"},
{"name": "Silver Membership - MPSF", "category": "Silver"},
{"name": "Associate Membership", "category": "Associate"}
]
membersAdded = 0
membersUpdated = 0
membersErrors = 0
def __init__(self, loadLandscape = False):
if loadLandscape:
self.loadLandscape()
def newLandscape(self):
self.landscape = {
'landscape': [{
'category': None,
'name': self.landscapeMemberCategory,
'subcategories': []
}]
}
for landscapeMemberClass in self.landscapeMemberClasses:
memberClass = {
"subcategory": None,
"name": landscapeMemberClass['category'],
"items" : []
}
if memberClass not in self.landscapeMembers:
self.landscapeMembers.append(memberClass)
for x in self.landscape['landscape']:
if x['name'] == self.landscapeMemberCategory:
x['subcategories'] = self.landscapeMembers
def loadLandscape(self, reset=False):
with open(self.landscapefile, 'r', encoding="utf8", errors='ignore') as fileobject:
self.landscape = ruamel.yaml.YAML(typ='unsafe', pure=True).load(fileobject)
if not self.landscape or not self.landscape['landscape']:
self.newLandscape()
else:
if reset:
for landscapeMemberClass in self.landscapeMemberClasses:
memberClass = {
"subcategory": None,
"name": landscapeMemberClass['category'],
"items" : []
}
if memberClass not in self.landscapeMembers:
self.landscapeMembers.append(memberClass)
for x in self.landscape['landscape']:
if x['name'] == self.landscapeMemberCategory:
x['subcategories'] = self.landscapeMembers
else:
for x in self.landscape['landscape']:
if x['name'] == self.landscapeMemberCategory:
self.landscapeMembers = x['subcategories']
def writeMissing(self, name, logo, homepage_url, crunchbase):
if self._missingcsvfilewriter is None:
self._missingcsvfilewriter = csv.writer(open(self.missingcsvfile, mode='w'), delimiter=',', quotechar='"', quoting=csv.QUOTE_ALL)
self._missingcsvfilewriter.writerow(['name','logo','homepage_url','crunchbase'])
self.membersErrors = self.membersErrors + 1
self._missingcsvfilewriter.writerow([name, logo, homepage_url, crunchbase])
def hostLogo(self,logo,orgname):
if 'https://' not in logo and 'http://' not in logo:
return logo
print("...Hosting logo for "+orgname)
filename = str(orgname).strip().replace(' ', '_')
filename = filename.replace('.', '')
filename = filename.replace(',', '')
filename = re.sub(r'(?u)[^-\w.]', '', filename)
filename = filename.lower()
filenamepath = os.path.normpath(self.hostedLogosDir+"/"+unicodedata.normalize('NFKD',filename).encode('ascii', 'ignore').decode('ascii')+".svg")
r = requests.get(logo, allow_redirects=True)
with open(filenamepath, 'wb') as fp:
fp.write(r.content)
return unicodedata.normalize('NFKD',filename).encode('ascii', 'ignore').decode('ascii')+".svg"
def _removeNulls(self,yamlout):
return re.sub('/(- \w+:) null/g', '$1', yamlout)
def updateLandscape(self):
# now write it back
for x in self.landscape['landscape']:
if x['name'] == self.landscapeMemberCategory:
x['subcategories'] = self.landscapeMembers
landscapefileoutput = Path(self.landscapefile)
ryaml = ruamel.yaml.YAML()
ryaml.indent(mapping=2, sequence=4, offset=2)
ryaml.default_flow_style = False
ryaml.allow_unicode = True
ryaml.width = 160
ryaml.Dumper = ruamel.yaml.RoundTripDumper
ryaml.dump(self.landscape,landscapefileoutput, transform=self._removeNulls)
print("Successfully added "+str(self.membersAdded)+" members and skipped "+str(self.membersErrors)+" members")
``` |
{
"source": "jmertic/OpenColorIO",
"score": 2
} |
#### File: python/DocStrings/FileTransform.py
```python
class FileTransform:
"""
FileTransform
"""
def __init__(self):
pass
def getSrc(self):
pass
def setSrc(self, src):
pass
def getCCCId(self):
pass
def setCCCId(self, cccid):
pass
def getInterpolation(self):
pass
def setInterpolation(self, interp):
pass
def getNumFormats(self):
pass
def getFormatNameByIndex(self, index):
pass
def getFormatExtensionByIndex(self, index):
pass
```
#### File: python/DocStrings/ProcessorMetadata.py
```python
class ProcessorMetadata:
"""
ProcessorMetadata
This contains meta information about the process that generated
this processor. The results of these functions do not
impact the pixel processing.
"""
def __init__(self):
pass
def getFiles(self):
"""
getFiles()
Returns a list of file references used internally by this processor
:return: list of filenames
:rtype: list
"""
pass
def getLooks(self):
"""
getLooks()
Returns a list of looks used internally by this processor
:return: list of look names
:rtype: list
"""
pass
``` |
{
"source": "jmespath/jmespath-playground",
"score": 2
} |
#### File: jmespath/jmespath-playground/app.py
```python
import os
import boto3
from chalice import Chalice, BadRequestError
from chalicelib.storage import Config, S3Storage, MaxSizeError, CachingStorage
from chalicelib.storage import SemiDBMCache
from chalicelib.schema import SavedQuery
CACHE_DIR = '/tmp/appcache'
app = Chalice(app_name='jmespath-playground')
app.debug = True
app.context = {}
def before_request(app):
if 'storage' in app.context:
return
s3 = boto3.client('s3')
config = Config(
bucket=os.environ['APP_S3_BUCKET'],
prefix=os.environ.get('APP_S3_PREFIX', ''),
)
if not os.path.isdir(CACHE_DIR):
os.makedirs(CACHE_DIR)
cache = SemiDBMCache(CACHE_DIR)
storage = S3Storage(client=s3,
config=config)
app.context['storage'] = CachingStorage(storage, cache)
@app.route('/anon', methods=['POST'], cors=True)
def new_anonymous_query():
before_request(app)
try:
body = app.current_request.json_body
except ValueError as e:
raise BadRequestError("Invalid JSON: %s" % e)
_validate_body(body)
storage = app.context['storage']
try:
uuid = storage.put(body)
except MaxSizeError as e:
raise BadRequestError(str(e))
return {'uuid': uuid}
def _validate_body(body):
if body is None:
raise BadRequestError("Request body cannot be empty.")
data = SavedQuery().load(body)
if data.errors:
raise BadRequestError(data.errors)
@app.route('/anon/{uuid}', methods=['GET'], cors=True)
def get_anonymous_query(uuid):
before_request(app)
storage = app.context['storage']
result = storage.get(uuid)
return result
# This is just used as a sanity check to make sure
# we can hit our API. Could also be used for monitoring.
@app.route('/ping', methods=['GET'], cors=True)
def ping():
return {'ping': 11}
```
#### File: jmespath-playground/chalicelib/storage.py
```python
import os
import json
import logging
from uuid import uuid4
# We're using a fixed name here because chalice will
# configure the appropriate handlers for the logger that
# matches the app name.
LOG = logging.getLogger('jmespath-playground.storage')
MAX_BODY_SIZE = 1024 * 100
# Make disk space allowed for cache data.
MAX_DISK_USAGE = 500 * 1024 * 1024
class MaxSizeError(Exception):
pass
class Config:
def __init__(self, bucket, prefix='', max_body_size=MAX_BODY_SIZE):
self.bucket = bucket
self.prefix = prefix
self.max_body_size = max_body_size
class Storage:
def get(self, uuid):
raise NotImplementedError("get")
def put(self, data):
raise NotImplementedError("put")
class SemiDBMCache:
# This is a small wrapper around semidbm.
# It's needed for two reasons:
# 1. We store the parsed JSON values as cache data so we need to handle the
# JSON load/dump ourself.
#
# 2. We have a fixed amount of disk storage to work with. semidbm doesn't
# support any notion of max disk space usage so this class needs to manage
# that. The approach taken here is to simply turn off caching once
# the max disk space limit is reached. This isn't the greatest idea, but
# we're betting that it's unlikely we'll reach the max disk usage
# before the function is shut down. It's worth investigating a proper
# eviction strategy in the future.
def __init__(self, dbdir, check_frequency=20, max_filesize=MAX_DISK_USAGE):
import semidbm
self._db = semidbm.open(dbdir, 'c')
self._max_filesize = max_filesize
# How frequently we check the file size of the cache.
# If we check every 20 writes, then at worst case we overshoot
# the max size by MAX_BODY_SIZE * check_frequency, or
# about 20MB if we use the default values for everything.
self._check_frequency = check_frequency
self._counter = 0
# When we reach the max disk size, we disable
# writing data to the cache.
self._writes_enabled = True
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
def __getitem__(self, key):
d = self._db[key].decode('utf-8')
return json.loads(d)
def __contains__(self, key):
return key in self._db
def __setitem__(self, key, value):
if not self._writes_enabled:
return
v = json.dumps(value).encode('utf-8')
self._db[key] = v
self._counter += 1
if self._counter >= self._check_frequency:
self._check_max_size_reached()
self._counter = 0
def _check_max_size_reached(self):
# There's no public interface for getting the
# filename of the db so we have to use an internal
# attribute to access the filename.
filesize = os.path.getsize(self._db._data_filename)
LOG.debug('SemiDBMCache filesize: %s', filesize)
if filesize > self._max_filesize:
LOG.debug("SemiDBMCache filesize (%s) exceeded %s, "
"disabling writes to cache.", filesize,
self._max_filesize)
self._writes_enabled = False
class CachingStorage(Storage):
"""Wraps a storage object with a disk cache."""
def __init__(self, real_storage, cache):
self._real_storage = real_storage
self._cache = cache
def get(self, uuid):
cached = self._cache.get(uuid)
if cached is not None:
LOG.debug("cache hit for %s", uuid)
return cached
LOG.debug("cache miss for %s, retrieving from source.", uuid)
result = self._real_storage.get(uuid)
self._cache[uuid] = result
return result
def put(self, data):
uuid = self._real_storage.put(data)
self._cache[uuid] = data
return uuid
class S3Storage(Storage):
def __init__(self, client, config):
self._config = config
self._client = client
def get(self, uuid):
bucket = self._config.bucket
key = self._create_s3_key(uuid)
contents = self._client.get_object(
Bucket=bucket, Key=key)['Body'].read()
return json.loads(contents)
def put(self, data):
bucket = self._config.bucket
uuid = str(uuid4())
key = self._create_s3_key(uuid)
body = json.dumps(data, separators=(',', ':'))
if len(body) > self._config.max_body_size:
raise MaxSizeError("Request body is too large (%s), "
"must be less than %s bytes." % (
len(body), self._config.max_body_size))
self._client.put_object(Bucket=bucket, Key=key, Body=body)
return uuid
def _create_s3_key(self, uuid):
prefix = self._config.prefix
if not prefix:
return uuid
elif prefix.endswith('/'):
prefix = prefix[:-1]
return '%s/%s' % (prefix, uuid)
```
#### File: jmespath/jmespath-playground/template-fixups.py
```python
import argparse
import json
class CFNTemplate:
def __init__(self, data):
self._data = data
def add_parameters(self, parameters):
self._data.setdefault('Parameters', {}).update(parameters)
def get_parameter_default(self, name):
return self._data.get('Parameters', {}).get(name, {}).get('Default')
def resources(self, resource_type=None):
for value in self._data['Resources'].values():
if resource_type is not None:
if value['Type'] != resource_type:
continue
yield value
def to_json(self):
return json.dumps(self._data, indent=2, separators=(',', ': ')) + '\n'
def fixup_template(fileobj):
template = CFNTemplate(json.load(fileobj))
extract_lambda_env_vars_to_template_params(template)
extract_bucket_reference_for_param_reference(template, 'AppS3Bucket')
return template
def extract_lambda_env_vars_to_template_params(template):
# This pulls out the lambda environment variable as
# cloudformation parameters. That way they can be
# overriden when deploying the stack. This will have
# no functional difference if you don't override these
# values.
extracted_template_params = {}
for resource in template.resources('AWS::Serverless::Function'):
env = resource['Properties'].get('Environment')
if env is None:
continue
env_vars = resource['Properties']['Environment']['Variables']
for key in env_vars:
# This isn't safe in the general case because we
# could have name collisions, but in our case
# we know we're using UPPER_SNAKE_CASE so
# we won't have collisions.
param_key = to_camel_case(key)
extracted_template_params[param_key] = {
'Default': env_vars[key],
'Type': 'String',
}
env_vars[key] = {'Ref': param_key}
template.add_parameters(extracted_template_params)
def to_camel_case(key):
return ''.join([k.capitalize() for k in key.split('_')])
def extract_bucket_reference_for_param_reference(template, param_name):
# This is a specific change for this app (vs. the pull up lambda
# env vars as template params). We want to replace the hard
# coded references to our S3 bucket in our IAM policy with
# the CFN param value that we've extracted.
param_value = template.get_parameter_default(param_name)
if param_value is None:
return
for resource in template.resources('AWS::Serverless::Function'):
policies = resource['Properties'].get('Policies')
if policies is None:
continue
for policy in policies:
for statement in policy['Statement']:
if param_value not in statement.get('Resource', ''):
continue
old_value = statement['Resource']
parts = list(old_value.partition(param_value))
parts[1] = {'Ref': param_name}
new_value = {'Fn::Join': ["", parts]}
statement['Resource'] = new_value
def main():
parser = argparse.ArgumentParser()
parser.add_argument('template', type=argparse.FileType('r'))
parser.add_argument('-i', '--inplace', action='store_true',
help=('Rewrite the template in place. If this '
'value is not provided, the new template is '
'written to stdout.'))
args = parser.parse_args()
new_template = fixup_template(args.template)
template_json = new_template.to_json()
if not args.inplace:
print(template_json)
else:
with open(args.template.name, 'w') as f:
f.write(template_json)
main()
```
#### File: tests/functional/test_storage.py
```python
from chalicelib.storage import SemiDBMCache
def test_can_cache_through_semidbm(tmpdir):
db = SemiDBMCache(str(tmpdir))
for i in range(20):
db[str(i)] = {'count': i}
for i in range(20):
assert db[str(i)] == {'count': i}
def test_check_frequency_noop_when_below_size_threshold(tmpdir):
db = SemiDBMCache(str(tmpdir), check_frequency=2)
for i in range(20):
db[str(i)] = {'count': i}
for i in range(20):
assert db[str(i)] == {'count': i}
def test_cache_noop_when_max_size_reached(tmpdir):
db = SemiDBMCache(str(tmpdir), check_frequency=1, max_filesize=100)
for i in range(20):
db[str(i)] = {'count': i}
assert db[b'1'] == {'count': 1}
# We've exhausted the max_filesize so any setitems will be noops.
db[b'100'] = {'count': 100}
assert b'100' not in db
``` |
{
"source": "jMetal/DasDennis",
"score": 4
} |
#### File: jMetal/DasDennis/das_dennis.py
```python
from typing import List
import numpy as np
class DasDennis:
"""Class implementing the Das-Dennis method to generate a set of uniformly-spaced weight vectors.
The method is described in: <NAME> and <NAME>. Normal-boundary intersection:
a new method for generating the pareto surface in nonlinear multicriteria optimization problems. S
IAM J. on Optimization, 8(3):631–657, March 1998. DOI: http://dx.doi.org/10.1137/S1052623496307510.
Attributes
----------
number_of_partitions: int
number of divisions in each axis
dimension: int
dimension of the points (e.g., number of objectives)
Methods
-------
get_weight_vectors()
get_number_of_points()
"""
def __init__(self, number_of_partitions, dimension):
self.number_of_partitions = number_of_partitions
self.dimension = dimension
def __get_first_level(self, number_of_partitions: int) -> List:
return [_ for _ in np.linspace(0, 1, number_of_partitions + 1)]
def __get_generic_level(self, first_level, previous_level):
next_level = []
for ind0, i in enumerate(previous_level):
for ind1, j in enumerate(i[1]):
values = [first_level[_] for _ in range(len(first_level) - ind1 - ind0)]
next_level.append([i[0] + [i[1][ind1]], values])
return next_level
def __get_last_level(self, previous_level):
last_level = []
for ind0, i in enumerate(previous_level):
for ind1, j in enumerate(i[1]):
last_level.append([i[0] + [j, 1.0 - j - sum(i[0])]])
return last_level
def get_weight_vectors(self):
first_level = self.__get_first_level(self.number_of_partitions)
previous_level = [[[], first_level]]
for i in range(1, self.dimension - 1):
next_level = self.__get_generic_level(first_level, previous_level)
previous_level = next_level
last_level = self.__get_last_level(previous_level)
result = [last_level[i][0] for i in range(len(last_level))]
return result
def save_to_file(self, file_name, weight_vectors, separator=" "):
with open(file_name, 'w+') as output_file:
for vector in weight_vectors:
output_string = ""
for value in vector:
output_string += str(value) + separator
output_string = output_string[:-1]
output_string += "\n"
output_file.write(output_string)
def __factorial(self, n:int):
if (n == 0 or n == 1):
return 1
else:
return n * self.__factorial(n-1)
def __binomial_coefficient(self, n, k):
return self.__factorial(n) / (self.__factorial(k) * self.__factorial(n - k))
def get_number_of_points(self):
return int(self.__binomial_coefficient(self.number_of_partitions + self.dimension - 1, self.dimension - 1))
```
#### File: jMetal/DasDennis/test_das_dennis.py
```python
import unittest
from das_dennis import DasDennis
class DasDennisTestCases(unittest.TestCase):
def test_should_constructor_create_a_non_null_object(self):
das_dennis = DasDennis(4, 3)
self.assertIsNotNone(das_dennis)
def test_should_constructor_set_the_valid_parameter(self):
das_dennis = DasDennis(number_of_partitions=4, dimension=3)
self.assertEquals(4, das_dennis.number_of_partitions)
self.assertEquals(3, das_dennis.dimension)
# Case 1: number of partitions = 12, dimension = 3
def test_should_get_number_of_points_return_the_correct_value_case_1(self):
das_dennis = DasDennis(number_of_partitions=12, dimension=3)
self.assertEquals(91, das_dennis.get_number_of_points())
# Case 2: number of partitions = 5, dimension = 3
def test_should_get_number_of_points_return_the_correct_value_case_2(self):
das_dennis = DasDennis(number_of_partitions=5, dimension=3)
self.assertEquals(21, das_dennis.get_number_of_points())
# Case 1: number of partitions = 12, dimension = 3
def test_should_get_weight_vectors_work_properly_case_1(self):
""" h: number of partitions, m: dimension"""
das_dennis = DasDennis(number_of_partitions=12, dimension=3)
points = das_dennis.get_weight_vectors()
self.assertEquals(91, len(points))
self.assertEquals([0.0, 0.0, 1.0], points[0])
self.assertEquals([1.0, 0.0, 0.0], points[90])
self.assertEquals([0.75, 0.25, 0.0], points[84])
# Case 2: number of partitions = 5, dimension = 3
def test_should_get_weight_vectors_work_properly_case_2(self):
""" h: number of partitions, m: dimension"""
das_dennis = DasDennis(number_of_partitions=5, dimension=3)
points = das_dennis.get_weight_vectors()
self.assertEquals(21, len(points))
self.assertEquals([0.0, 0.0, 1.0], points[0])
self.assertEquals([0.8, 0.2, 0.0], points[19])
self.assertEquals([1.0, 0.0, 0.0], points[20])
``` |
{
"source": "jmetancelin/ipython-batch-scheduler-magic",
"score": 3
} |
#### File: ipython-batch-scheduler-magic/execute_batch_scheduler/execute_magic.py
```python
from __future__ import print_function
import sys
from IPython.core.magic import (Magics, magics_class, cell_magic)
from IPython.core import magic_arguments
from IPython.utils.process import arg_split
from IPython.lib.backgroundjobs import BackgroundJobManager
# Import all known backends
from .backends import (BasicMgr, SSHMgr, SlurmMgr)
from . import _DEFAULT_MGR
# The class MUST call this class decorator at creation time
@magics_class
class ExecuteMagics(Magics):
"""Magics for cell execution through workload manager
List of available workload managers:
* :class:`execute_batch_scheduler.backends.BasicMgr`
* :class:`execute_batch_scheduler.backends.SSHMgr`
* :class:`execute_batch_scheduler.backends.SlurmMgr`
"""
# Available workload managers
_wlmgr = {'': BasicMgr, 'ssh': SSHMgr, 'slurm': SlurmMgr}
@magic_arguments.magic_arguments()
@magic_arguments.argument(
'--wlm', type=str, default=_DEFAULT_MGR,
help="""Workload manager.""")
@magic_arguments.argument(
'--shell', type=str, default='/bin/bash',
help="""Shell to use.""")
@magic_arguments.argument(
'--amgr', type=str,
help="""The variable in which to store workload manager instance.
If the script is backgrounded, this will be used to get cell output/error
using `get_output()` method.""")
@magic_arguments.argument(
'--bg', action="store_true",
help="""Whether to run the script in the background.
If given, the only way to see the output of the command is
with `--amgr`.""")
@cell_magic
def execute(self, line, cell):
"""Execute given cell content through configured workload scheduler.
Keep some arguments : ``--wlm``, ``--shell``, ``--bg`` and ``--amgr``.
Other arguments are passed to workload manager backend.
Get some extra command line arguments from variable that
may be overrided in IPython profile configuration file.
"""
from . import _DEFAULT_LINE_CMD_ARGS
args, cmd = self.execute.parser.parse_known_args(arg_split(line))
extra_cmd = []
if args.wlm == _DEFAULT_MGR:
if isinstance(_DEFAULT_LINE_CMD_ARGS, dict):
if args.wlm in _DEFAULT_LINE_CMD_ARGS.keys():
extra_cmd = arg_split(
_DEFAULT_LINE_CMD_ARGS[args.wlm])
else:
extra_cmd = arg_split(_DEFAULT_LINE_CMD_ARGS)
# Build workload manager instance
job_mgr = self._wlmgr[args.wlm](
extra_cmd + cmd, args.shell, userns=self.shell.user_ns)
# Submit the cell as job script
sub_out, sub_err = job_mgr.submit(cell)
sys.stdout.write(sub_out)
sys.stdout.flush()
sys.stderr.write(sub_err)
sys.stderr.flush()
if args.amgr:
self.shell.user_ns[args.amgr] = job_mgr
if args.bg:
self.job_manager = BackgroundJobManager()
if not args.amgr:
sys.stderr.write("""Warning: --bg argument specified without
--amgr (only way to get cell output)\n""")
sys.stderr.flush()
self.job_manager.new(job_mgr.wait_progress, kw=dict(silent=True))
return
else:
try:
job_mgr.wait_progress()
except KeyboardInterrupt:
job_mgr.interrupt()
# Get job output
job_out, job_err = job_mgr.get_output()
if job_out:
sys.stdout.write(job_out)
sys.stdout.flush()
if job_err:
sys.stderr.write(job_err)
sys.stderr.flush()
def load_ipython_extension(ipython):
"""Load extension.
Registers the ExecuteMagics
"""
# The `ipython` argument is the currently active `InteractiveShell`
# instance, which can be used in any way. This allows you to register
# new magics or aliases, for example.
ipython.register_magics(ExecuteMagics)
# def unload_ipython_extension(ipython):
# """Unload extension
# Actually doing noting
# """
# # If you want your extension to be unloadable, put that logic here.
# pass
``` |
{
"source": "jmettes/PyRate",
"score": 2
} |
#### File: PyRate/tests/test_refpixel.py
```python
import os
import copy
import shutil
from subprocess import run, PIPE
from pathlib import Path
import pytest
import itertools
import numpy as np
from numpy import nan, mean, std, isnan
import pyrate.configuration
import pyrate.core.refpixel
from pyrate.core import config as cf
from pyrate.core.refpixel import ref_pixel, _step, RefPixelError, ref_pixel_calc_wrapper, \
convert_geographic_coordinate_to_pixel_value, convert_pixel_value_to_geographic_coordinate
from pyrate.core import shared, ifgconstants as ifc
from pyrate import correct, conv2tif, prepifg
from pyrate.configuration import Configuration
from tests.common import TEST_CONF_ROIPAC, TEST_CONF_GAMMA, SML_TEST_DEM_TIF
from tests.common import small_data_setup, MockIfg, copy_small_ifg_file_list, \
copy_and_setup_small_data, manipulate_test_conf, assert_two_dirs_equal, PYTHON3P6
# TODO: figure out how editing resource.setrlimit fixes the error
# to fix the open to many files error
# https://stackoverflow.com/questions/18280612/ioerror-errno-24-too-many-open-files
# default testing values
REFNX = 5
REFNY = 7
MIN_FRAC = 0.7
CHIPSIZE = 3
PARALLEL = False
class TestReferencePixelInputTests:
'''
Verifies error checking capabilities of the reference pixel function
'''
@classmethod
def setup_method(cls):
cls.ifgs = small_data_setup()
cls.params = cf.get_config_params(TEST_CONF_ROIPAC)
cls.params[cf.REFNX] = REFNX
cls.params[cf.REFNY] = REFNY
cls.params[cf.REF_CHIP_SIZE] = CHIPSIZE
cls.params[cf.REF_MIN_FRAC] = MIN_FRAC
cls.params[cf.PARALLEL] = PARALLEL
def test_missing_chipsize(self):
self.params[cf.REF_CHIP_SIZE] = None
with pytest.raises(cf.ConfigException):
ref_pixel(self.ifgs, self.params)
def test_chipsize_valid(self):
for illegal in [0, -1, -15, 1, 2, self.ifgs[0].ncols+1, 4, 6, 10, 20]:
self.params[cf.REF_CHIP_SIZE] = illegal
with pytest.raises(RefPixelError):
ref_pixel(self.ifgs, self.params)
def test_minimum_fraction_missing(self):
self.params[cf.REF_MIN_FRAC] = None
with pytest.raises(cf.ConfigException):
ref_pixel(self.ifgs, self.params)
def test_minimum_fraction_threshold(self):
for illegal in [-0.1, 1.1, 1.000001, -0.0000001]:
self.params[cf.REF_MIN_FRAC] = illegal
with pytest.raises(RefPixelError):
ref_pixel(self.ifgs, self.params)
def test_search_windows(self):
# 45 is max # cells a width 3 sliding window can iterate over
for illegal in [-5, -1, 0, 46, 50, 100]:
self.params[cf.REFNX] = illegal
with pytest.raises(RefPixelError):
ref_pixel(self.ifgs, self.params)
# 40 is max # cells a width 3 sliding window can iterate over
for illegal in [-5, -1, 0, 71, 85, 100]:
self.params[cf.REFNY] = illegal
with pytest.raises(RefPixelError):
ref_pixel(self.ifgs, self.params)
def test_missing_search_windows(self):
self.params[cf.REFNX] = None
with pytest.raises(cf.ConfigException):
ref_pixel(self.ifgs, self.params)
self.params[cf.REFNX] = REFNX
self.params[cf.REFNY] = None
with pytest.raises(cf.ConfigException):
ref_pixel(self.ifgs, self.params)
class TestReferencePixelTests:
"""
Tests reference pixel search
"""
@classmethod
def setup_method(cls):
cls.params = cf.get_config_params(TEST_CONF_ROIPAC)
cls.params[cf.OUT_DIR], cls.ifgs = copy_and_setup_small_data()
cls.params[cf.REFNX] = REFNX
cls.params[cf.REFNY] = REFNY
cls.params[cf.REF_CHIP_SIZE] = CHIPSIZE
cls.params[cf.REF_MIN_FRAC] = MIN_FRAC
cls.params[cf.PARALLEL] = PARALLEL
def test_all_below_threshold_exception(self):
# test failure when no valid stacks in dataset
# rig mock data to be below threshold
mock_ifgs = [MockIfg(i, 6, 7) for i in self.ifgs]
for m in mock_ifgs:
m.phase_data[:1] = nan
m.phase_data[1:5] = 0.1
m.phase_data[5:] = nan
self.params[cf.REFNX] = 2
self.params[cf.REFNY] = 2
self.params[cf.REF_CHIP_SIZE] = CHIPSIZE
self.params[cf.REF_MIN_FRAC] = MIN_FRAC
self.params[cf.PARALLEL] = PARALLEL
with pytest.raises(ValueError):
ref_pixel(mock_ifgs, self.params)
def test_refnxy_step_1(self):
# test step of 1 for refnx|y gets the reference pixel for axis centre
mock_ifgs = [MockIfg(i, 47, 72) for i in self.ifgs]
for m in mock_ifgs:
m.phase_data[:1] = 0.2
m.phase_data[1:5] = 0.1
m.phase_data[5:] = 0.3
exp_refpx = (1, 1)
self.params[cf.REFNX] = 1
self.params[cf.REFNY] = 1
self.params[cf.REF_CHIP_SIZE] = CHIPSIZE
self.params[cf.REF_MIN_FRAC] = MIN_FRAC
self.params[cf.PARALLEL] = PARALLEL
res = ref_pixel(mock_ifgs, self.params)
assert exp_refpx == res
def test_large_window(self):
# 5x5 view over a 5x5 ifg with 1 window/ref pix search
chps = 5
mockifgs = [MockIfg(i, chps, chps) for i in self.ifgs]
self.params[cf.REFNX] = 1
self.params[cf.REFNY] = 1
self.params[cf.REF_CHIP_SIZE] = chps
self.params[cf.REF_MIN_FRAC] = MIN_FRAC
self.params[cf.PARALLEL] = PARALLEL
res = ref_pixel(mockifgs, self.params)
assert (2, 2) == res
def test_step(self):
# test different search windows to verify x/y step calculation
# convenience testing function
def assert_equal(actual, expected):
for a, e in zip(actual, expected):
assert a == e
# start with simple corner only test
width = 47
radius = 2
refnx = 2
exp = [2, 25, 44]
act = _step(width, refnx, radius)
assert_equal(act, exp)
# test with 3 windows
refnx = 3
exp = [2, 17, 32]
act = _step(width, refnx, radius)
assert_equal(act, exp)
# test 4 search windows
refnx = 4
exp = [2, 13, 24, 35]
act = _step(width, refnx, radius)
assert_equal(act, exp)
def test_ref_pixel(self):
exp_refpx = (2, 25)
self.params[cf.REFNX] = 2
self.params[cf.REFNY] = 2
self.params[cf.REF_CHIP_SIZE] = 5
self.params[cf.REF_MIN_FRAC] = MIN_FRAC
self.params[cf.PARALLEL] = PARALLEL
res = ref_pixel(self.ifgs, self.params)
assert res == exp_refpx
# Invalidate first data stack, get new refpix coods & retest
for i in self.ifgs:
i.phase_data[:30, :50] = nan
exp_refpx = (38, 2)
res = ref_pixel(self.ifgs, self.params)
assert res == exp_refpx
def _expected_ref_pixel(ifgs, cs):
"""Helper function for finding reference pixel when refnx/y=2"""
# calculate expected data
data = [i.phase_data for i in ifgs] # len 17 list of arrays
ul = [i[:cs, :cs] for i in data] # upper left corner stack
ur = [i[:cs, -cs:] for i in data]
ll = [i[-cs:, :cs] for i in data]
lr = [i[-cs:, -cs:] for i in data]
ulm = mean([std(i[~isnan(i)]) for i in ul]) # mean std of all the layers
urm = mean([std(i[~isnan(i)]) for i in ur])
llm = mean([std(i[~isnan(i)]) for i in ll])
lrm = mean([std(i[~isnan(i)]) for i in lr])
assert isnan([ulm, urm, llm, lrm]).any() is False
# coords of the smallest mean is the result
mn = [ulm, urm, llm, lrm]
class TestLegacyEqualityTest:
@classmethod
def setup_method(cls):
cls.params = cf.get_config_params(TEST_CONF_ROIPAC)
cls.params[cf.PARALLEL] = 0
cls.params[cf.OUT_DIR], cls.ifg_paths = copy_small_ifg_file_list()
conf_file = Path(cls.params[cf.OUT_DIR], 'conf_file.conf')
pyrate.configuration.write_config_file(params=cls.params, output_conf_file=conf_file)
cls.params = Configuration(conf_file).__dict__
cls.params_alt_ref_frac = copy.copy(cls.params)
cls.params_alt_ref_frac[cf.REF_MIN_FRAC] = 0.5
cls.params_all_2s = copy.copy(cls.params)
cls.params_all_2s[cf.REFNX] = 2
cls.params_all_2s[cf.REFNY] = 2
cls.params_chipsize_15 = copy.copy(cls.params_all_2s)
cls.params_chipsize_15[cf.REF_CHIP_SIZE] = 15
cls.params_all_1s = copy.copy(cls.params)
cls.params_all_1s[cf.REFNX] = 1
cls.params_all_1s[cf.REFNY] = 1
cls.params_all_1s[cf.REF_MIN_FRAC] = 0.7
for p, q in zip(cls.params[cf.INTERFEROGRAM_FILES], cls.ifg_paths): # hack
p.sampled_path = q
p.tmp_sampled_path = q
@classmethod
def teardown_method(cls):
shutil.rmtree(cls.params[cf.OUT_DIR])
def test_small_test_data_ref_pixel_lat_lon_provided(self):
self.params[cf.REFX], self.params[cf.REFY] = 150.941666654, -34.218333314
refx, refy = pyrate.core.refpixel.ref_pixel_calc_wrapper(self.params)
assert refx == 38
assert refy == 58
assert 0.8 == pytest.approx(self.params[cf.REF_MIN_FRAC])
def test_small_test_data_ref_pixel(self):
refx, refy = pyrate.core.refpixel.ref_pixel_calc_wrapper(self.params)
assert refx == 38
assert refy == 58
assert 0.8 == pytest.approx(self.params[cf.REF_MIN_FRAC])
def test_small_test_data_ref_chipsize_15(self):
refx, refy = pyrate.core.refpixel.ref_pixel_calc_wrapper(self.params_chipsize_15)
assert refx == 7
assert refy == 7
assert 0.5 == pytest.approx(self.params_alt_ref_frac[cf.REF_MIN_FRAC])
def test_metadata(self):
refx, refy = pyrate.core.refpixel.ref_pixel_calc_wrapper(self.params_chipsize_15)
for i in self.ifg_paths:
ifg = shared.Ifg(i)
ifg.open(readonly=True)
md = ifg.meta_data
for k, v in zip([ifc.PYRATE_REFPIX_X, ifc.PYRATE_REFPIX_Y, ifc.PYRATE_REFPIX_LAT,
ifc.PYRATE_REFPIX_LON, ifc.PYRATE_MEAN_REF_AREA, ifc.PYRATE_STDDEV_REF_AREA],
[str(refx), str(refy), 0, 0, 0, 0]):
assert k in md # metadata present
# assert values
ifg.close()
def test_small_test_data_ref_all_1(self):
refx, refy = pyrate.core.refpixel.ref_pixel_calc_wrapper(self.params_all_1s)
assert 0.7 == pytest.approx(self.params_all_1s[cf.REF_MIN_FRAC])
assert 1 == self.params_all_1s[cf.REFNX]
assert 1 == self.params_all_1s[cf.REFNY]
assert refx == 2
assert refy == 2
class TestLegacyEqualityTestMultiprocessParallel:
@classmethod
def setup_method(cls):
cls.params = cf.get_config_params(TEST_CONF_ROIPAC)
cls.params[cf.PARALLEL] = 1
cls.params[cf.OUT_DIR], cls.ifg_paths = copy_small_ifg_file_list()
conf_file = Path(cls.params[cf.OUT_DIR], 'conf_file.conf')
pyrate.configuration.write_config_file(params=cls.params, output_conf_file=conf_file)
cls.params = Configuration(conf_file).__dict__
cls.params_alt_ref_frac = copy.copy(cls.params)
cls.params_alt_ref_frac[cf.REF_MIN_FRAC] = 0.5
cls.params_all_2s = copy.copy(cls.params)
cls.params_all_2s[cf.REFNX] = 2
cls.params_all_2s[cf.REFNY] = 2
cls.params_chipsize_15 = copy.copy(cls.params_all_2s)
cls.params_chipsize_15[cf.REF_CHIP_SIZE] = 15
cls.params_all_1s = copy.copy(cls.params)
cls.params_all_1s[cf.REFNX] = 1
cls.params_all_1s[cf.REFNY] = 1
cls.params_all_1s[cf.REF_MIN_FRAC] = 0.7
for p, q in zip(cls.params[cf.INTERFEROGRAM_FILES], cls.ifg_paths): # hack
p.sampled_path = q
p.tmp_sampled_path = q
@classmethod
def teardown_method(cls):
shutil.rmtree(cls.params[cf.OUT_DIR])
def test_small_test_data_ref_pixel(self):
refx, refy = pyrate.core.refpixel.ref_pixel_calc_wrapper(self.params)
assert refx == 38
assert refy == 58
assert 0.8 == pytest.approx(self.params[cf.REF_MIN_FRAC])
def test_more_small_test_data_ref_pixel(self):
refx, refy = pyrate.core.refpixel.ref_pixel_calc_wrapper(self.params_alt_ref_frac)
assert refx == 38
assert refy == 58
assert 0.5 == pytest.approx(self.params_alt_ref_frac[cf.REF_MIN_FRAC])
def test_small_test_data_ref_pixel_all_2(self):
refx, refy = pyrate.core.refpixel.ref_pixel_calc_wrapper(self.params_all_2s)
assert refx == 25
assert refy == 2
assert 0.5 == pytest.approx(self.params_alt_ref_frac[cf.REF_MIN_FRAC])
def test_small_test_data_ref_chipsize_15(self):
refx, refy = pyrate.core.refpixel.ref_pixel_calc_wrapper(self.params_chipsize_15)
assert refx == 7
assert refy == 7
assert 0.5 == pytest.approx(self.params_alt_ref_frac[cf.REF_MIN_FRAC])
def test_small_test_data_ref_all_1(self):
refx, refy = pyrate.core.refpixel.ref_pixel_calc_wrapper(self.params_all_1s)
assert 0.7 == pytest.approx(self.params_all_1s[cf.REF_MIN_FRAC])
assert 1 == self.params_all_1s[cf.REFNX]
assert 1 == self.params_all_1s[cf.REFNY]
assert refx == 2
assert refy == 2
@pytest.mark.slow
def test_error_msg_refpixel_out_of_bounds(tempdir, gamma_conf):
"check correct latitude/longitude refpixel error is raised when specified refpixel is out of bounds"
for x, (refx, refy) in zip(['longitude', 'latitude', 'longitude and latitude'],
[(150., -34.218333314), (150.941666654, -34.), (150, -34)]):
_, err = _get_mlooked_files(gamma_conf, Path(tempdir()), refx=refx, refy=refy)
msg = "Supplied {} value is outside the bounds of the interferogram data"
assert msg.format(x) in err
@pytest.mark.slow
def test_gamma_ref_pixel_search_vs_lat_lon(tempdir, gamma_conf):
params_1, _ = _get_mlooked_files(gamma_conf, Path(tempdir()), refx=-1, refy=-1)
params_2, _ = _get_mlooked_files(gamma_conf, Path(tempdir()), refx=150.941666654, refy=-34.218333314)
assert_two_dirs_equal(params_1[cf.OUT_DIR], params_2[cf.OUT_DIR], ["*_ifg.tif", '*_coh.tif', 'dem.tif'], 35)
def _get_mlooked_files(gamma_conf, tdir, refx, refy):
params = manipulate_test_conf(gamma_conf, tdir)
params[cf.REFX] = refx
params[cf.REFY] = refy
output_conf_file = 'config.conf'
output_conf = tdir.joinpath(output_conf_file)
pyrate.configuration.write_config_file(params=params, output_conf_file=output_conf)
params = Configuration(output_conf).__dict__
conv2tif.main(params)
params = Configuration(output_conf).__dict__
prepifg.main(params)
err = run(f"pyrate correct -f {output_conf}", shell=True, universal_newlines=True, stderr=PIPE).stderr
return params, err
class TestRefPixelReuseLoadsSameFileAndPixels:
@classmethod
def setup_method(cls):
cls.conf = TEST_CONF_GAMMA
params = Configuration(cls.conf).__dict__
conv2tif.main(params)
params = Configuration(cls.conf).__dict__
prepifg.main(params)
params = Configuration(cls.conf).__dict__
correct._copy_mlooked(params)
cls.params = params
@classmethod
def teardown_method(cls):
shutil.rmtree(cls.params[cf.OUT_DIR])
@pytest.mark.slow()
def test_ref_pixel_multiple_runs_reuse_from_disc(self, ref_pixel):
params = self.params
params[cf.REFX], params[cf.REFY] = ref_pixel
params[cf.REF_PIXEL_FILE] = Configuration.ref_pixel_path(params)
ref_pixel_calc_wrapper(params)
ref_pixel_file = self.params[cf.REF_PIXEL_FILE]
time_written = os.stat(ref_pixel_file).st_mtime
assert self.params[cf.REFX_FOUND] == 38
assert self.params[cf.REFY_FOUND] == 58
# run again
ref_pixel_calc_wrapper(self.params)
ref_pixel_file = self.params[cf.REF_PIXEL_FILE]
time_written_1 = os.stat(ref_pixel_file).st_mtime
assert self.params[cf.REFX_FOUND] == 38
assert self.params[cf.REFY_FOUND] == 58
# run a third time
ref_pixel_calc_wrapper(self.params)
ref_pixel_file = self.params[cf.REF_PIXEL_FILE]
time_written_2 = os.stat(ref_pixel_file).st_mtime
assert time_written == time_written_2 == time_written_1
assert self.params[cf.REFX], self.params[cf.REFY] == ref_pixel
assert self.params[cf.REFX_FOUND] == 38
assert self.params[cf.REFY_FOUND] == 58
@pytest.fixture(scope='module')
def x_y_pixel():
dem = shared.DEM(SML_TEST_DEM_TIF)
dem.open()
Y = dem.nrows
X = dem.ncols
x = np.random.choice(range(X), 5)
y = np.random.choice(range(Y), 5)
return itertools.product(x, y) # returns a matrix of 5x5 random x, y pairs
def test_convert_pixel_value_to_geographic_coordinate(x_y_pixel):
transform = dem_transform()
for x, y in x_y_pixel:
lon, lat = convert_pixel_value_to_geographic_coordinate(x, y, transform)
out = run(f"gdallocationinfo -geoloc {SML_TEST_DEM_TIF} {lon} {lat}", shell=True, universal_newlines=True,
stdout=PIPE).stdout
xs = (x, x+1, x-1)
ys = (y, y+1, y-1)
assert any(f"({xx}P,{yy}L)" in out for xx, yy in itertools.product(xs, ys))
def dem_transform():
dem = shared.DEM(SML_TEST_DEM_TIF)
dem.open()
transform = dem.dataset.GetGeoTransform()
return transform
@pytest.mark.skipif(PYTHON3P6, reason='Skipped in python3p6')
def test_convert_geographic_coordinate_to_pixel_value(x_y_pixel):
transform = dem_transform()
for x, y in x_y_pixel:
lon, lat = convert_pixel_value_to_geographic_coordinate(x, y, transform)
xp, yp = convert_geographic_coordinate_to_pixel_value(lon, lat, transform)
assert (xp == x) & (yp == y)
``` |
{
"source": "jmetteUni/CoTeDe-modified",
"score": 3
} |
#### File: cotede/fuzzy/fuzzy_core.py
```python
import numpy as np
from numpy import ma
from .membership_functions import smf, zmf, trapmf, trimf
from .defuzz import defuzz
def fuzzyfy(data, features, output, require="all"):
"""
Notes
-----
In the generalize this once the membership combining rules are defined
in the cfg, so I can decide to use mean or max.
"""
features_list = list(features.keys())
N = max([len(data[f]) for f in features_list])
# The fuzzy set are usually: low, medium, high
# The membership of each fuzzy set are each feature scaled.
membership = {f: {} for f in output.keys()}
mfuncs = {"smf": smf, "trimf": trimf, "trapmf": trapmf, "zmf": zmf}
for t in features_list:
for m in membership:
assert m in features[t], "Missing %s in %s" % (m, features[t])
f = mfuncs[features[t][m]["type"]]
membership[m][t] = f(np.asanyarray(data[t]), features[t][m]["params"])
# Rule Set
rules = {}
# Low: u_low = mean(S_l(spike), S_l(clim)...)
# u_low = np.mean([weights['spike']['low'],
# weights['woa_relbias']['low']], axis=0)
# Medium: u_medium = mean(S_l(spike), S_l(clim)...)
# u_medium = np.mean([weights['spike']['medium'],
# weights['woa_relbias']['medium']], axis=0)
for m in [m for m in membership if m != "high"]:
tmp = np.vstack([membership[m][f] for f in membership[m]])
if require == "any":
rules[m] = np.nanmean(tmp, axis=0)
else:
rules[m] = np.mean(tmp, axis=0)
# High: u_high = max(S_l(spike), S_l(clim)...)
# u_high = np.max([weights['spike']['high'],
# weights['woa_relbias']['high']], axis=0)
tmp = np.vstack([membership["high"][f] for f in membership["high"]])
if require == "any":
rules["high"] = np.nanmax(tmp, axis=0)
else:
rules["high"] = np.max(tmp, axis=0)
return rules
def fuzzy_uncertainty(data, features, output, require="all"):
"""Estimate the Fuzzy uncertainty of the given data
Parameters
----------
data :
features :
output :
require : all or any, optional
Require all or any of the features to estimate the uncertainty
"""
# It's not clear at Morello 2014 what is the operator K()
# Q is the uncertainty, hence Q_low is the low uncertainty
# Seems like K() is just a linear factor, which would give the level of uncertainty, like 0.1 for low, 0.5 for medium and 0.9 for high would define weights for each level?! I'm not sure. But the result would be a composite curve, so when the Qs are joinned it would give a curve with the possible values on Q (maybe multiple dimensions) and the y would be the composite result [0, 1].
# Q_low = 0.1 * u_low # K_low(u_low)
# Q_medium = 0.5 * u_medium # K_medium(u_medium)
# Q_high = 0.9 *u_high # K_high(u_high)
# Bisector
# They refer to Q_i x_l, which I understand as the uncertainty for each value for each level
# It looks like the uncertainties of all tests of the three levels are groupped and ordered, and the bisector would be the value that would define the half of the area.
# Is it x the observed value of hypotetical values?
# CQ = bisector(Qs, ...
rules = fuzzyfy(data=data, features=features, output=output, require=require)
N_out = 100
output_range = np.linspace(0, 1, N_out)
Q = {}
mfuncs = {"smf": smf, "trimf": trimf, "trapmf": trapmf, "zmf": zmf}
for m in output:
f = mfuncs[output[m]["type"]]
Q[m] = f(output_range, output[m]["params"])
idx = np.isfinite([rules[r] for r in rules])
# This would be the regular fuzzy approach.
uncertainty = np.nan * np.ones(np.shape(idx)[1:])
valid = np.nonzero(idx.all(axis=0))[0]
for i in valid:
aggregated = np.zeros(N_out)
for m in rules:
aggregated = np.fmax(aggregated, np.fmin(rules[m][i], Q[m]))
if aggregated.sum() > 0:
uncertainty[i] = defuzz(output_range, aggregated, "bisector")
return uncertainty
```
#### File: cotede/qctests/constant_cluster_size.py
```python
import numpy as np
from numpy import ma
from .qctests import QCCheckVar
def constant_cluster_size(x, tol=0):
"""Estimate the cluster size with (nearly) constant value
Returns how many consecutive neighbor values are within a given
tolerance range. Note that invalid values, like NaN, are ignored.
"""
assert np.ndim(x) == 1, 'Not ready for more than 1 dimension'
# Adding a tolerance to handle roundings due to different numeric types.
tol = tol + 1e-5 * tol
ivalid = np.nonzero(~ma.getmaskarray(ma.fix_invalid(x)))[0]
dx = np.diff(np.atleast_1d(x)[ivalid])
cluster_size = np.zeros(np.shape(x), dtype='i')
for i, iv in enumerate(ivalid):
idx = np.absolute(dx[i:].cumsum()) > tol
if True in idx:
cluster_size[iv] += np.nonzero(idx)[0].min()
else:
cluster_size[iv] += idx.size
idx = np.absolute(dx[0:i][::-1].cumsum()) > tol
if True in idx:
cluster_size[iv] += np.nonzero(idx)[0].min()
else:
cluster_size[iv] += idx.size
return cluster_size
class ConstantClusterSize(QCCheckVar):
"""
Need to implement a check on time. TSG specifies constant value during 6 hrs.
"""
def set_features(self):
cluster_size = constant_cluster_size(self.data[self.varname])
N = ma.compressed(self.data[self.varname]).size
cluster_fraction = cluster_size / N
self.features = {'constant_cluster_size': cluster_size,
'constant_cluster_fraction': cluster_fraction,
}
def test(self):
self.flags = {}
threshold = self.cfg['threshold']
# assert (np.size(threshold) == 1) \
# and (threshold is not None) \
# and (np.isfinite(threshold))
if isinstance(threshold, str) and (threshold[-1] == '%'):
threshold = float(threshold[:-1]) * 1e-2
feature_name = 'constant_cluster_fraction'
else:
feature_name = 'constant_cluster_size'
flag = np.zeros(self.data[self.varname].shape, dtype='i1')
feature = self.features[feature_name]
flag[np.nonzero(feature > threshold)] = self.flag_bad
flag[np.nonzero(feature <= threshold)] = self.flag_good
flag[ma.getmaskarray(self.data[self.varname])] = 9
self.flags[feature_name] = flag
```
#### File: cotede/qctests/gradient.py
```python
import logging
import numpy as np
from numpy import ma
from cotede.qctests import QCCheckVar
try:
import pandas as pd
PANDAS_AVAILABLE = True
except ImportError:
PANDAS_AVAILABLE = False
module_logger = logging.getLogger(__name__)
def gradient(x):
return curvature(x)
def _curvature_pandas(x):
"""Equivalent to curvature() but using pandas
It looks like the numpy implementation is faster even for larger datasets,
so the default is with numpy.
Note
----
- In the future this will be useful to handle specific window widths.
"""
if isinstance(x, ma.MaskedArray):
x[x.mask] = np.nan
x = x.data
if not PANDAS_AVAILABLE:
return curvature(x)
if hasattr(x, "to_series"):
x = x.to_series()
elif not isinstance(x, pd.Series):
x = pd.Series(x)
y = np.nan * x
y = x - (x.shift(1) + x.shift(-1)) / 2.0
return np.array(y)
def curvature(x):
"""Curvature of a timeseries
This test is commonly known as gradient for historical reasons, but that
is a bad name choice since it is not the actual gradient, like:
d/dx + d/dy + d/dz,
but as defined by GTSPP, EuroGOOS and others, which is actually the
curvature of the timeseries..
Note
----
- Pandas.Series operates with indexes, so it should be done different. In
that case, call for _curvature_pandas.
"""
if isinstance(x, ma.MaskedArray):
x[x.mask] = np.nan
x = x.data
if PANDAS_AVAILABLE and isinstance(x, pd.Series):
return _curvature_pandas(x)
x = np.atleast_1d(x)
y = np.nan * x
y[1:-1] = x[1:-1] - (x[:-2] + x[2:]) / 2.0
return y
class Gradient(QCCheckVar):
def set_features(self):
self.features = {"gradient": curvature(self.data[self.varname])}
def test(self):
self.flags = {}
try:
threshold = self.cfg["threshold"]
except KeyError:
module_logger.debug(
"Deprecated cfg format. It should contain a threshold item."
)
threshold = self.cfg
assert (
(np.size(threshold) == 1)
and (threshold is not None)
and (np.isfinite(threshold))
)
flag = np.zeros(np.shape(self.data[self.varname]), dtype="i1")
feature = np.absolute(self.features["gradient"])
flag[feature > threshold] = self.flag_bad
flag[feature <= threshold] = self.flag_good
x = np.atleast_1d(self.data[self.varname])
flag[ma.getmaskarray(x) | ~np.isfinite(x)] = 9
self.flags["gradient"] = flag
```
#### File: cotede/qctests/profile_envelop.py
```python
import logging
import numpy as np
from numpy import ma
from .qctests import QCCheckVar
module_logger = logging.getLogger(__name__)
class ProfileEnvelop(QCCheckVar):
def test(self):
self.flags = {}
x = self.data[self.varname]
if isinstance(x, ma.MaskedArray):
x[x.mask] = np.nan
x = x.data
x = np.atleast_1d(x)
z = self.data["PRES"]
if isinstance(z, ma.MaskedArray):
if z.mask.any():
mask = np.ones_like(z, dtype="float32")
mask[z.mask] = np.nan
z = z * mask
z = z.data
z = np.atleast_1d(z)
assert np.shape(z) == np.shape(x)
assert "layers" in self.cfg, "Profile envelop cfg requires layers"
flag = np.zeros(np.shape(x), dtype="i1")
for layer in self.cfg["layers"]:
ind = np.nonzero(eval("(z %s) & (z %s)" % (layer[0], layer[1])))[0]
f = eval("(x[ind] > %s) & (x[ind] < %s)" % (layer[2], layer[3]))
flag[ind[f == True]] = self.flag_good
flag[ind[f == False]] = self.flag_bad
flag[ma.getmaskarray(x) | ~np.isfinite(x)] = 9
self.flags["profile_envelop"] = flag
```
#### File: tests/qctests/test_qc_deepest_pressure.py
```python
import numpy as np
from numpy import ma
from cotede.qctests import DeepestPressure
from ..data import DummyData
def test_default():
profile = DummyData()
cfg = {'threshold': 1000}
y = DeepestPressure(profile, 'TEMP', cfg)
y.test()
assert 'deepest_pressure' in y.flags
assert profile['TEMP'].shape == y.flags['deepest_pressure'].shape
idx = ma.getmaskarray(profile['TEMP'])
assert idx.any(), "Redefine DummyData to have at least one masked value"
assert np.all(y.flags['deepest_pressure'][idx] == 9)
x = profile['PRES'][y.flags['deepest_pressure'] == 1]
idx = (x <= 1.1 * cfg['threshold'])
assert idx.any(), "Redefine cfg to have at least one valid value"
assert idx.all()
x = profile['PRES'][y.flags['deepest_pressure'] == 3]
idx = (x > 1.1 * cfg['threshold'])
assert idx.any(), "Redefine cfg to have at least one non-valid value"
assert idx.all()
```
#### File: tests/qctests/test_qc_spike_depthconditional.py
```python
import numpy as np
from cotede.qctests import SpikeDepthConditional
from ..data import DummyData
from .compare import compare_input_types
def test():
profile = DummyData()
cfg = {
"pressure_threshold": 400,
"shallow_max": 6,
"deep_max": 0.05,
"flag_good": 1,
"flag_bad": 4,
}
y = SpikeDepthConditional(profile, "TEMP", cfg)
assert isinstance(y.features, dict)
assert "spike" in y.features
assert np.allclose(
y.flags["spike_depthconditional"],
np.array([0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 4, 4, 1, 0, 9], dtype="i1"),
)
def test_input_types():
cfg = {
"pressure_threshold": 400,
"shallow_max": 9,
"deep_max": 2.5,
}
compare_input_types(SpikeDepthConditional, cfg)
```
#### File: tests/qctests/test_qc_stuck_value.py
```python
import numpy as np
from numpy import ma
from cotede.qctests import StuckValue
from ..data import DummyData
def test_default():
profile = DummyData()
cfg = {'flag_good': 1, 'flag_bad': 4}
y = StuckValue(profile, 'TEMP', cfg)
assert type(y.features) is dict
idx = ma.getmaskarray(profile['TEMP'])
assert (y.flags['stuck_value'][idx] == 9).all()
assert (y.flags['stuck_value'][~idx] == 1).all()
def test_constant():
profile = DummyData()
profile['TEMP'][:] = profile['TEMP'] * 0 + 3.14
cfg = {'flag_good': 1, 'flag_bad': 4}
y = StuckValue(profile, 'TEMP', cfg)
assert type(y.features) is dict
idx = ma.getmaskarray(profile['TEMP'])
assert (y.flags['stuck_value'][idx] == 9).all()
assert (y.flags['stuck_value'][~idx] == 4).all()
def test_neglible_difference():
profile = DummyData()
profile['TEMP'][:] = profile['TEMP'] * 0 + 3.14
profile['TEMP'][:] += np.random.randn(profile['TEMP'].size) * 1e-10
cfg = {'flag_good': 1, 'flag_bad': 4}
y = StuckValue(profile, 'TEMP', cfg)
assert type(y.features) is dict
idx = ma.getmaskarray(profile['TEMP'])
assert (y.flags['stuck_value'][idx] == 9).all()
assert (y.flags['stuck_value'][~idx] == 4).all()
```
#### File: tests/qctests/test_qc_valid_geolocation.py
```python
import numpy as np
from numpy import ma
from cotede.qctests.valid_geolocation import valid_geolocation, ValidGeolocation
from ..data import DummyData
from tests.qctests.compare import compare_feature_input_types, compare_input_types
try:
import pandas as pd
PANDAS_AVAILABLE = True
except ImportError:
PANDAS_AVAILABLE = False
try:
import xarray as xr
XARRAY_AVAILABLE = True
except ImportError:
XARRAY_AVAILABLE = False
"""
Tests to implement:
- missing lat, missing lon, missing both
- single point
- list
- array
- masked array
- single point with NaN (lat, lon, lat & lon)
- list with some NaN
- lat lon coincident
- lat lon non-coincident
- masked array with some masked values
- Greenwich
Tests on class(QCCheck)
- lat lon in attrs
- lat lon in data
- invalid
- lat
- lon
- lat & lon
"""
def test_valid_single_coordinate():
coords = [[10, -30], [10, 330]]
for lat, lon in coords:
assert valid_geolocation(lat, lon) == True
def test_invalid_single_coordinate():
coords = [[99, 0], [-91.1, 0], [99, 361], [0, -181], [0, 361]]
for lat, lon in coords:
assert valid_geolocation(lat, lon) == False
def test_nan_single_coordinate():
coords = [[np.nan, -30], [10, np.nan]]
for lat, lon in coords:
assert valid_geolocation(lat, lon) == False
def test_valid_coordinate_list():
lat = [10, 10, -15]
lon = [-30, 330, 30]
assert np.all(valid_geolocation(lat, lon) == True)
def test_valid_coordinate_list():
lat = [np.nan, 10, np.nan]
lon = [-30, np.nan, 30]
assert np.all(valid_geolocation(lat, lon) == False)
def test_feature_input_types():
lat = np.array([10, 10, -15, np.nan, 0, np.nan])
lon = np.array([-30, 330, 30, 0, np.nan, np.nan])
compare_feature_input_types(valid_geolocation, lat, lon)
def test_standard_dataset():
profile = DummyData()
flags = {"valid_geolocation": [1]}
y = ValidGeolocation(profile)
for f in flags:
assert np.allclose(y.flags[f], flags[f], equal_nan=True)
def test_standard_dataset_alongtrack():
profile = DummyData()
profile.data["lat"] = profile["TEMP"] * 0
profile.data["lon"] = profile["TEMP"] * 0
flags = {"valid_geolocation": [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3]}
y = ValidGeolocation(profile)
for f in flags:
assert np.allclose(y.flags[f], flags[f], equal_nan=True)
# def test_input_types():
# compare_input_types(ValidGeolocation)
def test_compare_tuple():
"""Validate the results using a tuple
"""
profile = DummyData()
profile.data["lat"] = profile["TEMP"] * 0
profile.data["lon"] = profile["TEMP"] * 0
tp = {}
for v in profile.keys():
if isinstance(profile[v], ma.MaskedArray) and profile[v].mask.any():
profile[v][profile[v].mask] = np.nan
profile.data[v] = profile[v].data
tp[v] = tuple(profile.data[v])
y = ValidGeolocation(profile)
y2 = ValidGeolocation(tp)
assert isinstance(y2["lat"], tuple), "It didn't preserve the tuple type"
for f in y.features:
assert np.allclose(y.features[f], y2.features[f], equal_nan=True)
assert y.features[f].dtype == y2.features[f].dtype
for f in y.flags:
assert type(y.flags[f]) == type(y2.flags[f])
assert y.flags[f].dtype == y2.flags[f].dtype
assert np.allclose(y.flags[f], y2.flags[f], equal_nan=True)
def test_compare_pandas():
"""Validate the results using pandas.DataFrame
"""
if not PANDAS_AVAILABLE:
return
profile = DummyData()
profile.data["lat"] = profile["TEMP"] * 0
profile.data["lon"] = profile["TEMP"] * 0
df = pd.DataFrame(profile.data)
y = ValidGeolocation(profile)
y2 = ValidGeolocation(df)
for f in y.features:
assert np.allclose(y.features[f], y2.features[f], equal_nan=True)
for f in y.flags:
assert type(y.flags[f]) == type(y2.flags[f])
assert y.flags[f].dtype == y2.flags[f].dtype
assert np.allclose(y.flags[f], y2.flags[f], equal_nan=True)
def test_compare_xarray():
"""Validate the results using pandas.DataFrame
"""
if not XARRAY_AVAILABLE:
return
profile = DummyData()
profile.data["lat"] = profile["TEMP"] * 0
profile.data["lon"] = profile["TEMP"] * 0
ds = pd.DataFrame(profile.data).to_xarray()
y = ValidGeolocation(profile)
y2 = ValidGeolocation(ds)
for f in y.features:
assert np.allclose(y.features[f], y2.features[f], equal_nan=True)
for f in y.flags:
assert type(y.flags[f]) == type(y2.flags[f])
assert y.flags[f].dtype == y2.flags[f].dtype
assert np.allclose(y.flags[f], y2.flags[f], equal_nan=True)
```
#### File: CoTeDe-modified/tests/test_datasets.py
```python
from cotede import ProfileQC
import cotede.datasets
def test_load_ctd():
"""Load CTD sample dataset
"""
ctd_dataset = cotede.datasets.load_ctd()
ctd_dataset.keys()
varnames = [
"timeS",
"PRES",
"TEMP",
"TEMP2",
"CNDC",
"CNDC2",
"potemperature",
"potemperature2",
"PSAL",
"PSAL2",
"flag",
]
for v in varnames:
assert v in ctd_dataset.keys()
assert len(ctd_dataset[v]) == 1014
def test_load_water_level():
"""Load water level sample dataset
"""
water_level_dataset = cotede.datasets.load_water_level()
water_level_dataset.keys()
varnames = ["epoch", "water_level", "flagged", "time"]
for v in varnames:
assert v in water_level_dataset.keys()
assert len(water_level_dataset[v]) == 21900
def test_qc_sample_ctd():
"""Guarantee that ProfileQC can run with a sample dataset
Note
----
- Assume that global_range will be always a default test.
"""
data = cotede.datasets.load_ctd()
pqc = ProfileQC(data)
# Silly test. The most important is if the previous line didn't crash.
assert len(pqc.flags["TEMP"]) > 0
assert len(pqc.flags["TEMP"]["global_range"]) > 0
```
#### File: CoTeDe-modified/tests/test_load_cfg.py
```python
import os.path
import pkg_resources
from cotede.utils import load_cfg, cotederc
from cotede.utils.config import convert_021_to_022
CFG = [f[:-5] for f in pkg_resources.resource_listdir('cotede', 'qc_cfg')
if f[-5:] == '.json']
def test_no_local_duplicate_cfg():
""" Avoid local cfg of default procedures
Guarantee that there is no local copy of a default cfg json file,
otherwise the default cfg could be breaking, and the tests safely
escaping into a local, non-distributed, cfg.
"""
for cfg in CFG:
local_cfg = os.path.join(cotederc(), "cfg", "%s.json" % cfg)
assert not os.path.exists(local_cfg), \
"Redundant local cfg file: %s" % cfg
def test_inout():
""" load_cfg shouldn't modify input variable cfg
"""
cfg = 'cotede'
out = load_cfg(cfg)
assert out != cfg
def test_dict():
"""Test a user dict config
It is possible to define a full config instead of choosing one of the
builtins. This is done by giving a dictionary with the correct
structure.
"""
cfg = {'common': {'valid_datetime': None}}
cfg_out = load_cfg(cfg)
assert 'common' in cfg_out, "Missing 'common' in load_cfg output"
assert cfg_out['common'] == cfg['common']
def test_default():
cfg_out = load_cfg()
assert isinstance(cfg_out, dict)
assert len(cfg_out) > 0
def test_factory_cfgs():
"""Load all available configs, one at a time
CoTeDe comes with builtin config. This test checks if can
load all those available configs.
"""
for cfg in CFG:
print("Loading %s" % cfg)
try:
cfg_out = load_cfg(cfg)
except:
assert False, "Couldn't load: %s" % cfg
assert isinstance(cfg_out, dict)
assert len(cfg_out) > 0
# Missing a test to load cfg at ~/.cotede
def test_dict_input():
"""Test a dictionary input, i.e. manually defined config
The output configuration can't miss anything given in the input but
can have extra content.
"""
# Scalar argument
cfg = {'temperature': {'spike': 1234}}
cfg_out = load_cfg(cfg)
assert cfg_out['variables']['temperature']['spike']['threshold'] == 1234
# Dictionary argument
cfg = {'temperature': {'global_range': {'minvalue': 0, 'maxvalue': 60}}}
cfg_out = load_cfg(cfg)
assert 'global_range' in cfg_out['variables']['temperature']
tmp = cfg_out['variables']['temperature']['global_range']
for v in cfg['temperature']['global_range']:
assert v in tmp
assert cfg['temperature']['global_range'][v] == tmp[v]
def test_inheritance():
"""Test inheritance
"""
cfg = load_cfg('cotede')
cfg2 = load_cfg({'inherit': 'cotede'})
for c in cfg:
assert c in cfg
assert cfg[c] == cfg2[c]
def test_inheritance_priority():
"""Test priority when inheriting
When inheritance is a list, the first item has priority over
the last one.
"""
def walk_and_check(cfg, cfg2):
for c in cfg:
assert c in cfg2, "Missing %s in inherited cfg2" % c
if not isinstance(cfg[c], dict):
assert cfg[c] == cfg2[c], \
"Missing %s in cfg2" % c
else:
walk_and_check(cfg[c], cfg2[c])
cfg = load_cfg('cotede')
# If is a list, the last is the lowest priority
cfg2 = load_cfg({'inherit': ['cotede', 'gtspp']})
walk_and_check(cfg, cfg2)
try:
cfg2 = load_cfg({'inherit': ['gtspp', 'cotede']})
walk_and_check(cfg, cfg2)
failed = False
except:
failed = True
assert failed, "It should fail in inverse priority"
def test_convert_021_to_022():
cfg = {
"revision": "0.21",
"variables": {
"myvar": {
"fuzzylogic": {
"output": {
"low": [0.0, 0.225, 0.45],
"medium": [0.075, 0.4, 0.725],
"high": [0.55, 0.775]
},
"features": {
"spike": {
"weight": 1,
"low": [0.07, 0.2],
"medium": [0.07, 0.2, 2, 6],
"high": [2, 6]
}
},
"uncertainty": [0.29, 0.34, 0.72]
} } }}
cfg = convert_021_to_022(cfg)
output = cfg["variables"]["myvar"]["fuzzylogic"]["output"]
for o in output:
assert "type" in output[o]
assert "params" in output[o]
features = cfg["variables"]["myvar"]["fuzzylogic"]["features"]
for f in features:
for m in ("low", "medium", "high"):
assert "type" in features[f][m]
assert "params" in features[f][m]
```
#### File: CoTeDe-modified/tests/test_special_cases.py
```python
from datetime import datetime, date
from numpy import ma
import pytest
from cotede.qc import ProfileQC
from .data import DummyData
def test_single_measurement():
"""Evaluate a profile with a single measurement
WOD has some profiles with a single measurement. Something certainly went
wrong on those profiles, despite that, CoTeDe should be able to do the
best assessement possible. Some tests can't be applied, like spike which
requires neighbor measurements, but those should return flag 0.
"""
profile = DummyData()
profile.attrs = {
"id": 609483,
"LATITUDE": 6.977,
"LONGITUDE": 79.873,
"datetime": datetime(2009, 8, 14, 1, 18, 36),
"date": date(2009, 8, 14),
"schema": "pfl",
}
profile.data = {
"id": ma.masked_array(data=[51190527], mask=[False], dtype="i"),
"PRES": ma.masked_array(data=[1.0], mask=[False], dtype="f"),
"TEMP": ma.masked_array(data=[25.81], mask=[False], dtype="f"),
"PSAL": ma.masked_array(data=[0.01], mask=[False], dtype="f"),
}
ProfileQC(profile, saveauxiliary=False)
ProfileQC(profile, saveauxiliary=True)
@pytest.mark.skip(reason="Requires new generic Anomaly Detection procedure")
def test_single_negative_depth():
"""Evaluate a profile with a single measurement
WOD has some profiles with a single measurement. Something certainly went
wrong on those profiles, despite that, CoTeDe should be able to do the
best assessement possible. Some tests can't be applied, like spike which
requires neighbor measurements, but those should return flag 0.
"""
profile = DummyData()
profile.attrs = {
"id": 609483,
"LATITUDE": 6.977,
"LONGITUDE": 79.873,
"datetime": datetime(2009, 8, 14, 1, 18, 36),
"date": date(2009, 8, 14),
"schema": "pfl",
}
profile.data = {
"id": ma.masked_array(data=[51190527], mask=[False], dtype="i"),
"PRES": ma.masked_array(data=[-1.0], mask=[False], dtype="f"),
"TEMP": ma.masked_array(data=[25.81], mask=[False], dtype="f"),
"PSAL": ma.masked_array(data=[0.01], mask=[False], dtype="f"),
}
ProfileQC(profile, saveauxiliary=False)
ProfileQC(profile, saveauxiliary=True)
``` |
{
"source": "jmettraux/dotvim",
"score": 2
} |
#### File: dotvim/scripts/tree.py
```python
import os, re, sys, string, subprocess
# determine root
root = sys.argv[1]
if os.path.isfile(root): root = os.path.dirname(root)
if root[-1] != '/': root = root + '/'
# determine git root
gitroot = False
try:
DEV_NULL = open(os.devnull, 'w')
gitroot = subprocess\
.check_output([ 'git', 'rev-parse', '--show-toplevel' ], stderr=DEV_NULL)\
.decode()\
.strip()
except:
True # not a git repo
# gather git stats
git = {}
if gitroot:
cmd = 'git diff --numstat'
for line in subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE).stdout:
line = line.decode().strip()
#print([ 'gdns', line ])
ss = line.split()
#print([ gitroot, ss[2] ])
ap = os.path.abspath(os.path.join(gitroot, ss[2]))
git[ap] = { 'p': ss[2], 'a': ss[0], 'd': ss[1] }
cmd = 'git status -s'
for line in subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE).stdout:
line = line.decode().strip()
#print([ 'gss', line ])
ss = line.split()
if len(ss) > 2 and ss[2] == '->':
ap = os.path.abspath(ss[3])
g = git.get(ap, { 'p': ss[3] })
git[ap] = g
g['s'] = ss[0]
else:
ap = os.path.abspath(ss[1])
g = git.get(ap, { 'p': ss[1] })
git[ap] = g
g['s'] = ss[0]
#print(git)
#print(git.keys())
#print git['/home/jmettraux/w/sg/ispec/spec/common_spec.rb']
cf = open(os.path.join(os.path.dirname(__file__), 'countable.txt'), 'r')
exts = cf.read().split()
cf.close()
wcl = {}
cmd = (
'find ' + root + ' ' +
' -o '.join(
map(
lambda x: '-name "*.' + x + '" ',
exts)) +
' | xargs wc -l 2>/dev/null')
for line in subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE).stdout:
ss = line.decode().strip().split()
if len(ss) < 2: continue
if ss[1] == 'total': continue
if re.match(r'\/\.git\/', ss[1]): continue
wcl[os.path.abspath(ss[1])] = ss[0]
# do the tree
fs = []
def to_kmgt(s): # https://stackoverflow.com/questions/12523586
step_unit = 1024.0
for x in [ '', 'K', 'M', 'G', 'T' ]:
#if s < step_unit: return "%3.1f%s" % (s, x)
if s < step_unit: return "%i%s" % (s, x)
s /= step_unit
def compute_size(path):
m = re.match(r'^([^*]+)', path)
if m == None: return '-1'
pa = m.group(1)
if os.path.exists(pa) == False: return '-1'
return to_kmgt(os.path.getsize(pa))
def compute_path():
i = fs[-1]['i'] + 1
d = []
for f in fs[::-1]:
if f['i'] < i:
d.insert(0, f['n'])
i = f['i']
return os.path.join(*d)
cmd = 'tree -F ' + root
for line in subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE).stdout:
line = line.decode().rstrip()
d = { 'l': line, 'i': -1 }
fs.append(d);
m = re.match(r'^([-|` ]+ )?(.*)$', line)
if m == None: continue
d['i'] = len(m.group(1) or '')
d['n'] = m.group(2)
d['p'] = compute_path()
d['s'] = compute_size(d['p'])
d['d'] = os.path.isdir(d['p'])
d['L'] = wcl.get(os.path.abspath(d['p']))
def to_s(l):
return ' '.join(filter(None, l))
for f in fs:
if f['i'] < 0 or f['s'] == '-1':
print(f['l'])
else:
ap = os.path.abspath(f['p'])
g = git.get(ap)
ls = f['L'] + 'L' if f['L'] else None
if g:
un = g.get('s')
ad = '+' + g.get('a', '0') + '-' + g.get('d', '0')
if un == '??':
ad = 'untracked'
elif un and un[0:1] == 'A':
if ad == '+---':
ad = 'new'
else:
ad = ad + ' new'
print(to_s([ f['l'], f['s'], ls, ad ]))
else:
print(to_s([ f['l'], f['s'], ls ]))
``` |
{
"source": "jmetzen/bayesian_optimization",
"score": 3
} |
#### File: bayesian_optimization/bayesian_optimization/bayesian_optimization.py
```python
from itertools import cycle
import numpy as np
from sklearn.utils import check_random_state
from .utils.optimization import global_optimization
class BayesianOptimizer(object):
"""Bayesian optimization for global black-box optimization
Bayesian optimization models the landscape of the function to be optimized
internally by a surrogate model (typically a Gaussian process) and
evaluates always those parameters which are considered as global optimum
of an acquisition function defined over this surrogate model. Different
acquisition functions and optimizers can be used internally.
Bayesian optimization aims at reducing the number of evaluations of the
actual function, which is assumed to be costly. To achieve this, a large
computational budget is allocated at modelling the true function and finding
potentially optimal positions based on this model.
.. seealso:: Brochu, Cora, de Freitas
"A tutorial on Bayesian optimization of expensive cost
functions, with application to active user modelling and
hierarchical reinforcement learning"
Parameters
----------
model : surrogate model object
The surrogate model which is used to model the objective function. It
needs to provide a methods fit(X, y) for training the model and
predictive_distribution(X) for determining the predictive distribution
(mean, std-dev) at query point X.
acquisition_function : acquisition function object
When called, this function returns the acquisitability of a query point
i.e., how favourable it is to perform an evaluation at the query point.
For this, internally the trade-off between exploration and exploitation
is handled.
optimizer: string, default: "direct"
The optimizer used to identify the maximum of the acquisition function.
The optimizer is specified by a string which may be any of "direct",
"direct+lbfgs", "random", "random+lbfgs", "cmaes", or "cmaes+lbfgs".
maxf: int, default: 1000
The maximum number of evaluations of the acquisition function by the
optimizer.
initial_random_samples: int, default: 5
The number of initial sample, in which random query points are selected
without using the acquisition function. Setting this to values larger
than 0 might be required if the surrogate model needs to be trained
on datapoints before evaluating it.
random_state : RandomState or int (default: None)
Seed for the random number generator.
"""
def __init__(self, model, acquisition_function, optimizer="direct",
maxf=1000, initial_random_samples=5, random_state=0,
*args, **kwargs):
self.model = model
self.acquisition_function = acquisition_function
self.optimizer = optimizer
self.maxf = maxf
self.initial_random_samples = initial_random_samples
self.rng = check_random_state(random_state)
self.X_ = []
self.y_ = []
def select_query_point(self, boundaries,
incumbent_fct=lambda y: np.max(y)):
""" Select the next query point in boundaries based on acq. function.
Parameters
----------
boundaries : ndarray-like, shape: [n_dims, 2]
Box constraint on allowed query points. First axis corresponds
to dimensions of the search space and second axis to minimum and
maximum allowed value in the respective dimensions.
incumbent_fct: function, default: returns maximum observed value
A function which is used to determine the incumbent for the
acquisition function. Defaults to the maximum observed value.
"""
boundaries = np.asarray(boundaries)
if len(self.X_) < self.initial_random_samples:
X_query = self.rng.uniform(size=boundaries.shape[0]) \
* (boundaries[:, 1] - boundaries[:, 0]) + boundaries[:, 0]
else:
self.acquisition_function.set_boundaries(boundaries)
def objective_function(x):
# Check boundaries
if not np.all(np.logical_and(x >= boundaries[:, 0],
x <= boundaries[:, 1])):
return -np.inf
incumbent = incumbent_fct(self.y_)
return self.acquisition_function(x, incumbent=incumbent)
X_query = global_optimization(
objective_function, boundaries=boundaries,
optimizer=self.optimizer, maxf=self.maxf, random=self.rng)
# Clip to hard boundaries
return np.clip(X_query, boundaries[:, 0], boundaries[:, 1])
def update(self, X, y):
""" Update internal model for observed (X, y) from true function. """
self.X_.append(X)
self.y_.append(y)
self.model.fit(self.X_, self.y_)
def best_params(self):
""" Returns the best parameters found so far."""
return self.X_[np.argmax(self.y_)]
def best_value(self):
""" Returns the optimal value found so far."""
return np.max(self.y_)
class REMBOOptimizer(BayesianOptimizer):
""" Random EMbedding Bayesian Optimization (REMBO).
This extension of Bayesian Optimizer (BO) is better suited for
high-dimensional problems with a low effective dimensionality than BO.
This is achieved by restricting the optimization to a low-dimensional
linear manifold embedded in the higher dimensional space. Theoretical
results suggest that even if the manifold is chosen randomly, the
optimum on this manifold equals the global optimum if the function is
indeed of the same intrinsic dimensionality as the manifold.
.. seealso:: Wang, Zoghi, Hutter, Matheson, <NAME>
"Bayesian Optimization in High Dimensions via Random
Embeddings", International Joint Conferences on Artificial
Intelligence (IJCAI), 2013
Parameters
----------
n_dims : int
The dimensionality of the actual search space
n_embedding_dims : int, default: 2
The dimensionality of the randomly chosen linear manifold on which the
optimization is performed
data_space: array-like, shape=[n_dims, 2], default: None
The boundaries of the data-space. This is used for scaling the mapping
from embedded space to data space, which is useful if dimensions of the
data space have different ranges or are not centred around 0.
n_keep_dims : int, default: 0
The number of dimensions which are not embedded in the manifold but are
kept 1-to-1 in the representation. This can be useful if some
dimensions are known to be relevant. Note that it is expected that
those dimensions come first in the data representation, i.e., the first
n_keep_dims dimensions are maintained.
Further parameters are the same as in BayesianOptimizer
"""
def __init__(self, n_dims, n_embedding_dims=2, data_space=None,
n_keep_dims=0, *args, **kwargs):
super(REMBOOptimizer, self).__init__(*args, **kwargs)
self.n_dims = n_dims
self.n_embedding_dims = n_embedding_dims
self.data_space = data_space
self.n_keep_dims = n_keep_dims
if self.data_space is not None:
self.data_space = np.asarray(self.data_space)
if self.data_space.shape[0] != self.n_dims - n_keep_dims:
raise Exception("Data space must be specified for all input "
"dimensions which are not kept.")
# Determine random embedding matrix
self.A = self.rng.normal(size=(self.n_dims - self.n_keep_dims,
self.n_embedding_dims))
#self.A /= np.linalg.norm(self.A, axis=1)[:, np.newaxis] # XXX
self.X_embedded_ = []
self.boundaries_cache = {}
def select_query_point(self, boundaries,
incumbent_fct=lambda y: np.max(y)):
""" Select the next query point in boundaries based on acq. function.
Parameters
----------
boundaries : ndarray-like, shape: [n_dims, 2]
Box constraint on allowed query points. First axis corresponds
to dimensions of the search space and second axis to minimum and
maximum allowed value in the respective dimensions.
incumbent_fct: function, default: returns maximum observed value
A function which is used to determine the incumbent for the
acquisition function. Defaults to the maximum observed value.
"""
boundaries = np.asarray(boundaries)
if not boundaries.shape[0] == self.n_dims:
raise Exception("Dimensionality of boundaries should be %d"
% self.n_dims)
# Compute boundaries on embedded space
boundaries_embedded = self._compute_boundaries_embedding(boundaries)
if len(self.X_) < self.initial_random_samples:
# Select query point randomly
X_query_embedded = \
self.rng.uniform(size=boundaries_embedded.shape[0]) \
* (boundaries_embedded[:, 1] - boundaries_embedded[:, 0]) \
+ boundaries_embedded[:, 0]
else:
# Select query point by finding optimum of acquisition function
# within boundaries
def objective_function(x):
# Check boundaries
if not np.all(np.logical_and(x >= boundaries_embedded[:, 0],
x <= boundaries_embedded[:, 1])):
return -np.inf
incumbent = incumbent_fct(self.y_)
return self.acquisition_function(x, incumbent=incumbent)
X_query_embedded = global_optimization(
objective_function, boundaries=boundaries_embedded,
optimizer=self.optimizer, maxf=self.maxf, random=self.rng)
self.X_embedded_.append(X_query_embedded)
# Map to higher dimensional space and clip to hard boundaries
X_query = np.clip(self._map_to_dataspace(X_query_embedded),
boundaries[:, 0], boundaries[:, 1])
return X_query
def update(self, X, y):
""" Update internal model for observed (X, y) from true function. """
self.X_.append(X)
self.y_.append(y)
self.model.fit(self.X_embedded_, self.y_)
def _map_to_dataspace(self, X_embedded):
""" Map data from manifold to original data space. """
X_query_kd = self.A.dot(X_embedded[self.n_keep_dims:])
if self.data_space is not None:
X_query_kd = (X_query_kd + 1) / 2 \
* (self.data_space[:, 1] - self.data_space[:, 0]) \
+ self.data_space[:, 0]
X_query = np.hstack((X_embedded[:self.n_keep_dims], X_query_kd))
return X_query
def _compute_boundaries_embedding(self, boundaries):
""" Approximate box constraint boundaries on low-dimensional manifold"""
# Check if boundaries have been determined before
boundaries_hash = hash(boundaries[self.n_keep_dims:].tostring())
if boundaries_hash in self.boundaries_cache:
boundaries_embedded = \
np.array(self.boundaries_cache[boundaries_hash])
boundaries_embedded[:self.n_keep_dims] = \
boundaries[:self.n_keep_dims] # Overwrite keep-dim's boundaries
return boundaries_embedded
# Determine boundaries on embedded space
boundaries_embedded = \
np.empty((self.n_keep_dims + self.n_embedding_dims, 2))
boundaries_embedded[:self.n_keep_dims] = boundaries[:self.n_keep_dims]
for dim in range(self.n_keep_dims,
self.n_keep_dims + self.n_embedding_dims):
x_embedded = np.zeros(self.n_keep_dims + self.n_embedding_dims)
while True:
x = self._map_to_dataspace(x_embedded)
if np.sum(np.logical_or(
x[self.n_keep_dims:] < boundaries[self.n_keep_dims:, 0],
x[self.n_keep_dims:] > boundaries[self.n_keep_dims:, 1])) \
> (self.n_dims - self.n_keep_dims) / 2:
break
x_embedded[dim] -= 0.01
boundaries_embedded[dim, 0] = x_embedded[dim]
x_embedded = np.zeros(self.n_keep_dims + self.n_embedding_dims)
while True:
x = self._map_to_dataspace(x_embedded)
if np.sum(np.logical_or(
x[self.n_keep_dims:] < boundaries[self.n_keep_dims:, 0],
x[self.n_keep_dims:] > boundaries[self.n_keep_dims:, 1])) \
> (self.n_dims - self.n_keep_dims) / 2:
break
x_embedded[dim] += 0.01
boundaries_embedded[dim, 1] = x_embedded[dim]
self.boundaries_cache[boundaries_hash] = boundaries_embedded
return boundaries_embedded
class InterleavedREMBOOptimizer(BayesianOptimizer):
""" Interleaved Random EMbedding Bayesian Optimization (REMBO).
In this extension of REMBO, several different random embeddings are chosen
and the optimization is performed on all embeddings interleaved (in a
round-robin fashion). This way, the specific choice of one random embedding
becomes less relevant. On the other hand, less evaluations on each
particular embedding can be performed.
.. seealso:: Wang, Zoghi, Hutter, Matheson, <NAME>
"Bayesian Optimization in High Dimensions via Random
Embeddings", International Joint Conferences on Artificial
Intelligence (IJCAI), 2013
Parameters
----------
interleaved_runs : int
The number of interleaved runs (each on a different random embedding).
This parameter is denoted as k by Wang et al.
Further parameters are the same as in REMBOOptimizer
"""
def __init__(self, interleaved_runs=2, *args, **kwargs):
random_state = kwargs.pop("random_state", 0)
self.rembos = [REMBOOptimizer(random_state=random_state + 100 + run,
*args, **kwargs)
for run in range(interleaved_runs)]
self.rembos = cycle(self.rembos)
self.current_rembo = self.rembos.next()
self.X_ = []
self.y_ = []
def select_query_point(self, boundaries,
incumbent_fct=lambda y: np.max(y)):
""" Select the next query point in boundaries based on acq. function.
Parameters
----------
boundaries : ndarray-like, shape: [n_dims, 2]
Box constraint on allowed query points. First axis corresponds
to dimensions of the search space and second axis to minimum and
maximum allowed value in the respective dimensions.
incumbent_fct: function, default: returns maximum observed value
A function which is used to determine the incumbent for the
acquisition function. Defaults to the maximum observed value.
"""
return self.current_rembo.select_query_point(boundaries, incumbent_fct)
def update(self, X, y):
""" Update internal REMBO responsible for observed (X, y). """
self.X_.append(X)
self.y_.append(y)
self.current_rembo.update(X, y)
self.current_rembo = self.rembos.next()
``` |
{
"source": "jmetzen/skgp",
"score": 2
} |
#### File: jmetzen/skgp/setup.py
```python
import os
def configuration(parent_package='', top_path=None):
if os.path.exists('MANIFEST'):
os.remove('MANIFEST')
from numpy.distutils.misc_util import Configuration
config = Configuration(None, parent_package, top_path)
# Avoid non-useful msg:
# "Ignoring attempt to set 'name' (from ... "
config.set_options(ignore_setup_xxx_py=True,
assume_default_configuration=True,
delegate_options_to_subpackages=True,
quiet=True)
config.add_subpackage('skgp')
return config
def setup_package():
metadata = dict(
name="skgp",
author="<NAME>",
author_email="<EMAIL>",
description="Extended Gaussian Process functionality for sklearn",
long_description=open("README.rst").read(),
license="new BSD",
url="https://github.com/jmetzen/skgp",
version="0.0",
classifiers=[
"Intended Audience :: Science/Research",
"Intended Audience :: Developers",
"Programming Language :: Python",
"Operating System :: Microsoft :: Windows",
"Operating System :: POSIX",
"Operating System :: Unix",
"Operating System :: MacOS",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.3",
],
requires=["numpy", "scipy", "sklearn", "emcee"])
metadata['configuration'] = configuration
from numpy.distutils.core import setup
setup(**metadata)
if __name__ == "__main__":
setup_package()
```
#### File: skgp/correlation_models/non_stationary.py
```python
import numpy as np
from scipy.special import gamma, kv
from scipy.stats import expon, norm
from sklearn.cluster import KMeans
from .stationary import l1_cross_differences
MACHINE_EPSILON = np.finfo(np.double).eps
class LocalLengthScalesCorrelation(object):
""" Non-stationary correlation model based on local smoothness estimates.
This non-stationary correlation model learns internally point estimates of
local smoothness using a second-level Gaussian Process. For this, it
selects a subset of the training data and learns length-scales at this
specific points. These length scales are generalized using the second-level
Gaussian Process. Furthermore, global (isotropic or anisotropic) length
scales are learned for both the top-level GP and the length-scale GP.
The correlation model is based on the family of (stationary) Matern
kernels. The parameter nu of the Matern kernels (governing the smoothness
of the GP prior) can either be set or learned jointly with the remaining
parameters.
Parameters
----------
isotropic : bool, default=True
Whether the global length-scales of the top-level GP are isotropic or
anisotropic
nu: float, default=1.5
The parameter nu of the Matern kernels (governing the smoothness
of the GP prior). If None, nu is learned along with the other
hyperparameters.
l_isotropic : bool, default=True
Whether the global length-scales of the length-scale GP are isotropic
or anisotropic
l_samples: int, default=10
How many datapoints from the training data are selected as support
points for learning the length-scale GP
prior_b: float, default=inf
The variance of the log-normal prior distribution on the length scales.
If set to infinity, the distribution is assumed to be uniform.
.. seealso::
"Nonstationary Gaussian Process Regression using Point Estimates of Local
Smoothness", <NAME>, <NAME>, and <NAME>,
ECML 2008
"""
def __init__(self, isotropic=True, nu=1.5, l_isotropic=True, l_samples=10,
prior_b=np.inf, X_=None):
self.isotropic = isotropic
self.nu = nu
self.l_isotropic = l_isotropic
self.l_samples = l_samples
self.prior_b = prior_b
self.X_ = X_
if self.X_ is not None:
assert self.X_.shape[0] == self.l_samples
def fit(self, X, nugget=10. * MACHINE_EPSILON):
""" Fits the correlation model for training data X
Parameters
----------
X : array_like, shape=(n_samples, n_features)
An array of training datapoints at which observations were made,
i.e., where the outputs y are known
nugget : double or ndarray, optional
The Gaussian Process nugget parameter
The nugget is added to the diagonal of the assumed training
covariance; in this way it acts as a Tikhonov regularization in
the problem. In the special case of the squared exponential
correlation function, the nugget mathematically represents the
variance of the input values. Default assumes a nugget close to
machine precision for the sake of robustness
(nugget = 10. * MACHINE_EPSILON).
"""
self.X = X
self.nugget = nugget
self.n_samples = X.shape[0]
self.n_dims = X.shape[1]
# Determine how many entries in theta belong to the different
# categories (used later for parsing theta)
self.theta_gp_size = 1 if self.isotropic else self.n_dims
self.theta_l_size = 1 if self.l_isotropic else self.n_dims
self.nu_size = 1 if not self.nu else 0
self.theta_size = self.theta_gp_size + self.theta_l_size \
+ self.l_samples + self.nu_size
# Calculate array with shape (n_eval, n_features) giving the
# componentwise distances between locations x and x' at which the
# correlation model should be evaluated.
self.D, self.ij = l1_cross_differences(self.X)
if self.X_ is None:
# Select subset of X for which length scales are optimized.
# Generalization of length scales to other datapoints is acheived
# by means of a separate Gaussian Process (gp_l)
if self.X.shape[0] >= self.l_samples:
kmeans = KMeans(n_clusters=self.l_samples)
self.X_ = kmeans.fit(self.X).cluster_centers_
else: # Fallback to select centers using sampling with replacement
self.X_ = self.X[np.random.choice(np.arange(self.X.shape[0]),
self.l_samples)]
return self
def __call__(self, theta, X=None):
""" Compute correlation for given correlation parameter(s) theta.
Parameters
----------
theta : array_like
An array giving the autocorrelation parameter(s).
X : array_like, shape(n_eval, n_features)
An array containing the n_eval query points whose correlation with
the training datapoints shall be computed. If None, autocorrelation
of the training datapoints is computed instead.
Returns
-------
r : array_like, shape=(n_eval, n_samples) if X != None
(n_samples, n_samples) if X == None
An array containing the values of the correlation model.
"""
# Parse theta into its components
theta_gp, theta_l, length_scales, nu = self._parse_theta(theta)
# Train length-scale Gaussian Process
from skgp.estimators import GaussianProcess
self.gp_l = \
GaussianProcess(corr="matern_1.5",
theta0=theta_l).fit(self.X_,
np.log10(length_scales))
l_train = 10**self.gp_l.predict(self.X)
# Prepare distances and length scale information for any pair of
# datapoints, whose correlation shall be computed
if X is not None:
# Get pairwise componentwise L1-differences to the input training
# set
d = X[:, np.newaxis, :] - self.X[np.newaxis, :, :]
d = d.reshape((-1, X.shape[1]))
# Predict length scales for query datapoints
l_query = 10**self.gp_l.predict(X)
l = np.transpose([np.tile(l_train, len(l_query)),
np.repeat(l_query, len(l_train))])
else:
# No external datapoints given; auto-correlation of training set
# is used instead
d = self.D
l = l_train[self.ij]
# Compute general Matern kernel
if d.ndim > 1 and theta_gp.size == d.ndim:
activation = np.sum(theta_gp.reshape(1, d.ndim) * d ** 2, axis=1)
else:
activation = theta_gp[0] * np.sum(d ** 2, axis=1)
tmp = 0.5*(l**2).sum(1)
tmp2 = np.maximum(2*np.sqrt(nu * activation / tmp), 1e-5)
r = np.sqrt(l[:, 0]) * np.sqrt(l[:, 1]) / (gamma(nu) * 2**(nu - 1))
r /= np.sqrt(tmp)
r *= tmp2**nu * kv(nu, tmp2)
# Convert correlations to 2d matrix
if X is not None:
return r.reshape(-1, self.n_samples)
else: # exploit symmetry of auto-correlation
R = np.eye(self.n_samples) * (1. + self.nugget)
R[self.ij[:, 0], self.ij[:, 1]] = r
R[self.ij[:, 1], self.ij[:, 0]] = r
return R
def log_prior(self, theta):
""" Returns the (log) prior probability of parameters theta.
The prior is assumed to be uniform over the parameter space except for
the length-scales dimensions. These are assumed to be log-normal
distributed with mean 0 and variance self.prior_b. If
self.prior_b is np.inf, the log length-scales are assumed to be
uniformly distributed as well.
NOTE: The returned quantity is an improper prior as its integral over
the parameter space is not equal to 1.
Parameters
----------
theta : array_like
An array giving the autocorrelation parameter(s).
Returns
-------
log_p : float
The (log) prior probability of parameters theta. An improper
probability.
"""
if self.prior_b == np.inf:
return 0.0
_, _, length_scales, _ = self._parse_theta(theta)
squared_dist = (np.log10(length_scales)**2).sum()
return -squared_dist / self.prior_b
def _parse_theta(self, theta):
""" Parse parameter vector theta into its components.
Parameters
----------
theta : array_like
An array containing all hyperparameters.
Returns
-------
theta_gp : array_like
An array containing the hyperparameters of the main GP.
theta_l : array_like
An array containing the hyperparameters of the length-scale GP.
length_scales : array_like
An array containing the length-scales for the length-scale GP.
nu : float
The parameter nu controlling the smoothness of the Matern kernel.
"""
theta = np.asarray(theta, dtype=np.float)
assert (theta.size == self.theta_size), \
"theta does not have the expected size (expected: %d, " \
"actual size %d). Expected: %d entries for main GP, " \
"%d entries for length-scale GP, %d entries containing the "\
"length scales, and %d entries for nu." \
% (self.theta_size, theta.size, self.theta_gp_size,
self.theta_l_size, self.l_samples, self.nu_size)
# Split theta in its components
theta_gp = theta[:self.theta_gp_size]
theta_l = \
theta[self.theta_gp_size:][:self.theta_l_size]
length_scales = \
theta[self.theta_gp_size+self.theta_l_size:][:self.l_samples]
nu = self.nu if self.nu else theta[-1]
return theta_gp, theta_l, length_scales, nu
@classmethod
def create(cls, dims, isotropic=True, theta0=1e-1,
thetaL=None, thetaU=None,
l_isotropic=True, theta_l_0=1e-1,
theta_l_L=None, theta_l_U=None,
l_samples=20, l_0=1.0, l_L=None, l_U=None,
nu_0=1.5, nu_L=None, nu_U=None, prior_b=np.inf,
*args, **kwargs):
""" Factory method for creating non-stationary correlation models.
..note:: In addtion to returning an instance of
NonStationaryCorrelation, the specification of the search
space for the hyperparameters theta of the Gaussian process
is returned. This includes the start point of the search
(theta0) as well as the lower and upper boundaries thetaL and
thetaU for the values of theta.
"""
theta0 = [theta0] * (1 if isotropic else dims)
thetaL = [thetaL] * (1 if isotropic else dims)
thetaU = [thetaU] * (1 if isotropic else dims)
theta0 += [theta_l_0] * (1 if l_isotropic else dims)
thetaL += [theta_l_L] * (1 if l_isotropic else dims)
thetaU += [theta_l_U] * (1 if l_isotropic else dims)
theta0 += [l_0] * l_samples
thetaL += [l_L] * l_samples
thetaU += [l_U] * l_samples
if nu_L is not None:
theta0 += [nu_0]
thetaL += [nu_L]
thetaU += [nu_U]
corr = cls(isotropic=isotropic, nu=None if nu_L else nu_0,
l_isotropic=l_isotropic, l_samples=l_samples,
prior_b=prior_b)
return corr, theta0, thetaL, thetaU
class ManifoldCorrelation(object):
""" Non-stationary correlation model based on manifold learning.
This non-stationary correlation model consists internally of two parts:
a mapping from the actual data space onto a manifold and a stationary
correlation model on this manifold. The mapping is realized by a neural
network whose architecture can be specified externally. The parameters of
this network are learned along with the length scales of the Gaussian
process, typically such that the marginal likelihood or the posterior
probability of the GP are maximized. Any common stationary correlation
model can then be used on top of this manifold.
Parameters
----------
base_corr: string or instance of StationaryCorrelation, optional
The top-level, stationary autocorrelation function returning
the autocorrelation between two points M(x) and M(x') on the manifold.
Built-in correlation models are::
'absolute_exponential', 'squared_exponential',
'generalized_exponential', 'cubic', 'linear'
architecture: sequence of tuples
Defines the structure of the internal neural network architecture
mapping the data from the original data space onto a manifold. Note
that different data dimensions can be processed by different networks
and that the networks can have different number of layers. For
instance, the architecture ((1, 2),(2, 4, 5)) would map a 3-dimensional
input space onto a 7-dimensional manifold. For this, the first input
dimension would be processed by the network (1, 2) with 1 inputs,
2 outputs, and no hidden layer yielding the first two manifold
dimensions. The other two input dimensions would be processed by a
network (2, 4, 5) with 2 inputs, 4 hidden units, and 5 outputs
yielding the remaining five manifold dimensions.
isotropic : bool, default=True
Whether the global length-scales of the GP are isotropic or anisotropic
prior_nn_scale: float, default=inf
The standard deviation of the Gaussian prior distribution on the
network parameters. If set to infinity, the distribution is assumed to
be uniform.
prior_gp_scale: float, default=inf
The scale parameter of the exponential prior distribution on the
length-scales. If set to infinity, the distribution is assumed to be
uniform.
transfer_fct: str, default="tanh"
The transfer function used in the hidden and output units. Supported
are "tanh" and the rectified linear unit ("relu"). Defaults is "tanh"
.. seealso::
"Manifold Gaussian Process for Regression",
<NAME>, <NAME>, <NAME>, <NAME>,
http://arxiv.org/abs/1402.5876
"""
def __init__(self, base_corr, architecture, theta_nn_size,
isotropic=True, prior_nn_scale=np.inf, prior_gp_scale=np.inf,
transfer_fct="tanh"):
self.architecture = architecture
self.n_inputs = sum([subnet[0] for subnet in architecture])
self.n_outputs = sum([subnet[-1] for subnet in architecture])
self.theta_nn_size = theta_nn_size
self.isotropic = isotropic
self.prior_nn_scale = prior_nn_scale
self.prior_gp_scale = prior_gp_scale
self.transfer_fct = transfer_fct
self.theta_gp_size = 1 if self.isotropic else self.n_outputs
self.theta_size = self.theta_gp_size + self.theta_nn_size
self.base_corr = base_corr
if not callable(self.base_corr):
from skgp.correlation_models import CORRELATION_TYPES
if self.base_corr in CORRELATION_TYPES:
self.base_corr = CORRELATION_TYPES[self.base_corr]()
else:
raise ValueError("base_corr should be one of %s or callable, "
"%s was given."
% (self._correlation_types.keys(),
self.base_corr))
def fit(self, X, nugget=10. * MACHINE_EPSILON):
""" Fits the correlation model for training data X
Parameters
----------
X : array_like, shape=(n_samples, n_features)
An array of training datapoints at which observations were made,
i.e., where the outputs y are known
nugget : double or ndarray, optional
The Gaussian Process nugget parameter
The nugget is added to the diagonal of the assumed training
covariance; in this way it acts as a Tikhonov regularization in
the problem. In the special case of the squared exponential
correlation function, the nugget mathematically represents the
variance of the input values. Default assumes a nugget close to
machine precision for the sake of robustness
(nugget = 10. * MACHINE_EPSILON).
"""
assert X.shape[1] == self.n_inputs
self.X = X
self.nugget = nugget
self.n_samples = X.shape[0]
return self
def __call__(self, theta, X=None):
""" Compute correlation for given correlation parameter(s) theta.
Parameters
----------
theta : array_like
An array giving the autocorrelation parameter(s).
X : array_like, shape(n_eval, n_features)
An array containing the n_eval query points whose correlation with
the training datapoints shall be computed. If None, autocorrelation
of the training datapoints is computed instead.
Returns
-------
r : array_like, shape=(n_eval, n_samples) if X != None
(n_samples, n_samples) if X == None
An array containing the values of the correlation model.
"""
# Parse theta into its components
theta_gp, theta_nn = self._parse_theta(theta)
# Map training data onto manifold
if np.any(theta_nn == 0):
theta_nn[np.where(theta_nn == 0)] \
+= np.random.random((theta_nn == 0).sum()) * 2e-5 - 1e-5
X_train_nn = self._project_manifold(self.X, theta_nn)
self.base_corr.fit(X_train_nn, nugget=self.nugget)
if X is not None:
X_test_nn = self._project_manifold(X, theta_nn)
return self.base_corr(theta_gp, X_test_nn)
else:
return self.base_corr(theta_gp)
def _project_manifold(self, X, theta_nn):
# Lazila fetch transfer function (to keep object pickable)
if self.transfer_fct == "tanh":
transfer_fct = np.tanh
elif self.transfer_fct == "sin":
transfer_fct = np.sin
elif self.transfer_fct == "relu":
transfer_fct = lambda x: np.maximum(0, x)
elif hasattr(self.transfer_fct, "__call__"):
transfer_fct = self.transfer_fct
y = []
for subnet in self.architecture:
y.append(X[:, :subnet[0]])
for layer in range(len(subnet) - 1):
W = theta_nn[:subnet[layer]*subnet[layer+1]]
W = W.reshape((subnet[layer], subnet[layer+1]))
b = theta_nn[subnet[layer]*subnet[layer+1]:
(subnet[layer]+1)*subnet[layer+1]]
a = y[-1].dot(W) + b
y[-1] = transfer_fct(a)
# chop off weights of this layer
theta_nn = theta_nn[(subnet[layer]+1)*subnet[layer+1]:]
X = X[:, subnet[0]:] # chop off used input dimensions
return np.hstack(y)
def log_prior(self, theta):
""" Returns the (log) prior probability of parameters theta.
TODO
NOTE: The returned quantity is an improper prior as its integral over
the parameter space is not equal to 1.
Parameters
----------
theta : array_like
An array giving the autocorrelation parameter(s).
Returns
-------
log_p : float
The (log) prior probability of parameters theta. An improper
probability.
"""
theta_gp, theta_nn = self._parse_theta(theta)
if self.prior_nn_scale == np.inf:
prior_nn = 0.0
else:
prior_nn = norm.logpdf(theta_nn, scale=self.prior_nn_scale).sum()
if self.prior_gp_scale == np.inf:
prior_gp = 0.0
else:
prior_gp = expon.logpdf(theta_gp, scale=self.prior_gp_scale).sum()
return prior_nn + prior_gp
def _parse_theta(self, theta):
""" Parse parameter vector theta into its components.
Parameters
----------
theta : array_like
An array containing all hyperparameters.
Returns
-------
theta_gp : array_like
An array containing the hyperparameters of the main GP.
theta_nn : array_like
An array containing the hyperparameters of the manifold model.
"""
theta = np.asarray(theta, dtype=np.float)
assert (theta.size == self.theta_size), \
"theta does not have the expected size (expected: %d, " \
"actual size %d). Expected: %d entries for main GP and " \
"%d entries for length-scale GP." \
% (self.theta_size, theta.size, self.theta_gp_size,
self.theta_nn_size)
# Split theta in its components
theta_gp = theta[:self.theta_gp_size]
theta_nn = theta[self.theta_gp_size:]
return theta_gp, np.log10(theta_nn)
@classmethod
def create(cls, base_corr, architecture, isotropic=True,
theta0=1e-1, thetaL=None, thetaU=None,
max_nn_weight=5, prior_nn_scale=np.inf, prior_gp_scale=np.inf,
transfer_fct="tanh", *args, **kwargs):
""" Factory method for creating manifold correlation models.
..note:: In addition to returning an instance of
ManifoldCorrelation, the specification of the search
space for the hyperparameters theta of the Gaussian process
is returned. This includes the start point of the search
(theta0) as well as the lower and upper boundaries thetaL and
thetaU for the values of theta.
"""
assert "prior_b" not in kwargs
n_outputs, theta_nn_size = cls.determine_network_layout(architecture)
theta0 = [theta0] * (1 if isotropic else n_outputs)
thetaL = [thetaL] * (1 if isotropic else n_outputs)
thetaU = [thetaU] * (1 if isotropic else n_outputs)
theta0 += \
list(10**np.random.uniform(-max_nn_weight, max_nn_weight,
theta_nn_size))
thetaL += [10**-max_nn_weight] * theta_nn_size
thetaU += [10**max_nn_weight] * theta_nn_size
corr = cls(base_corr, architecture, theta_nn_size=theta_nn_size,
isotropic=isotropic, prior_nn_scale=prior_nn_scale,
prior_gp_scale=prior_gp_scale, transfer_fct=transfer_fct)
return corr, theta0, thetaL, thetaU
@staticmethod
def determine_network_layout(architecture):
""" Determine number of outputs and params of given architecture."""
n_outputs = 0
n_params = 0
for subnet in architecture:
for layer in range(len(subnet) - 1):
n_params += (subnet[layer] + 1) * subnet[layer+1]
n_outputs += subnet[-1]
return n_outputs, n_params
```
#### File: skgp/correlation_models/stationary.py
```python
from abc import ABCMeta, abstractmethod
import numpy as np
from sklearn.utils import check_array
from sklearn.externals.six import with_metaclass
MACHINE_EPSILON = np.finfo(np.double).eps
def l1_cross_differences(X):
"""
Computes the nonzero componentwise differences between the vectors
in X.
Parameters
----------
X: array_like
An array with shape (n_samples, n_features)
Returns
-------
D: array with shape (n_samples * (n_samples - 1) / 2, n_features)
The array of componentwise differences.
ij: arrays with shape (n_samples * (n_samples - 1) / 2, 2)
The indices i and j of the vectors in X associated to the cross-
distances in D: D[k] = np.abs(X[ij[k, 0]] - Y[ij[k, 1]]).
"""
X = check_array(X)
n_samples, n_features = X.shape
n_nonzero_cross_diff = n_samples * (n_samples - 1) // 2
ij = np.zeros((n_nonzero_cross_diff, 2), dtype=np.int)
D = np.zeros((n_nonzero_cross_diff, n_features))
ll_1 = 0
for k in range(n_samples - 1):
ll_0 = ll_1
ll_1 = ll_0 + n_samples - k - 1
ij[ll_0:ll_1, 0] = k
ij[ll_0:ll_1, 1] = np.arange(k + 1, n_samples)
D[ll_0:ll_1] = X[k] - X[(k + 1):n_samples]
return D, ij.astype(np.int)
class StationaryCorrelation(with_metaclass(ABCMeta, object)):
""" Base-class for stationary correlation models for Gaussian Processes.
Stationary correlation models dependent only on the relative distance
and not on the absolute positions of the respective datapoints. We can thus
work internally solely on these distances.
"""
def __init__(self):
pass
def fit(self, X, nugget=10. * MACHINE_EPSILON):
""" Fits the correlation model for training data X
Parameters
----------
X : array_like, shape=(n_samples, n_features)
An array of training datapoints at which observations were made,
i.e., where the outputs y are known
nugget : double or ndarray, optional
The Gaussian Process nugget parameter
The nugget is added to the diagonal of the assumed training
covariance; in this way it acts as a Tikhonov regularization in
the problem. In the special case of the squared exponential
correlation function, the nugget mathematically represents the
variance of the input values. Default assumes a nugget close to
machine precision for the sake of robustness
(nugget = 10. * MACHINE_EPSILON).
"""
self.X = X
self.nugget = nugget
self.n_samples = X.shape[0]
# Calculate array with shape (n_eval, n_features) giving the
# componentwise distances between locations x and x' at which the
# correlation model should be evaluated.
self.D, self.ij = l1_cross_differences(self.X)
if (np.min(np.sum(self.D, axis=1)) == 0.
and not isinstance(self, PureNugget)):
raise Exception("Multiple input features cannot have the same"
" value.")
def __call__(self, theta, X=None):
""" Compute correlation for given correlation parameter(s) theta.
Parameters
----------
theta : array_like
An array with giving the autocorrelation parameter(s).
Dimensionality depends on the specific correlation model; often
shape (1,) corresponds to an isotropic correlation model and shape
(n_features,) to a anisotropic one.
X : array_like, shape(n_eval, n_features)
An array containing the n_eval query points whose correlation with
the training datapoints shall be computed. If None, autocorrelation
of the training datapoints is computed instead.
Returns
-------
r : array_like, shape=(n_eval, n_samples) if X != None
(n_samples, n_samples) if X == None
An array containing the values of the correlation model.
"""
theta = np.asarray(theta, dtype=np.float)
if X is not None:
# Get pairwise componentwise L1-differences to the input training
# set
d = X[:, np.newaxis, :] - self.X[np.newaxis, :, :]
d = d.reshape((-1, X.shape[1]))
else:
# No external datapoints given; auto-correlation of training set
# is used instead
d = self.D
if d.ndim > 1:
n_features = d.shape[1]
else:
n_features = 1
# Compute the correlation for the respective correlation model (handled
# by subclass)
r = self._compute_corr(theta, d, n_features)
if X is not None:
# Convert to 2d matrix
return r.reshape(-1, self.n_samples)
else:
# Auto-correlation computed only for upper triangular part of
# matrix. Fill diagonal with 1+nugget and the lower triangular
# by exploiting symmetry of matrix
R = np.eye(self.n_samples) * (1. + self.nugget)
R[self.ij[:, 0], self.ij[:, 1]] = r
R[self.ij[:, 1], self.ij[:, 0]] = r
return R
def log_prior(self, theta):
""" Returns the (log) prior probability of parameters theta.
The prior is assumed to be uniform over the parameter space.
NOTE: The returned quantity is an improper prior as its integral over
the parameter space is not equal to 1.
Parameters
----------
theta : array_like, shape=(1,) or (n_features,)
An array with shape 1 (isotropic) or n_features (anisotropic)
giving the autocorrelation parameter(s).
Returns
-------
log_p : float
The (log) prior probability of parameters theta. An improper
probability.
"""
return 0
@abstractmethod
def _compute_corr(self, theta, d, n_features):
""" Correlation for given pairwise, component-wise L1-differences.
Parameters
----------
theta : array_like, shape=(1,) or (n_features,)
An array with shape 1 (isotropic) or n_features (anisotropic)
giving the autocorrelation parameter(s).
d : array_like, shape=(n_eval, n_features)
An array with the pairwise, component-wise L1-differences of x
and x' at which the correlation model should be evaluated.
Returns
-------
r : array_like, shape=(n_eval, )
An array containing the values of the autocorrelation model.
"""
class AbsoluteExponential(StationaryCorrelation):
""" Absolute exponential autocorrelation model.
Absolute exponential autocorrelation model (Ornstein-Uhlenbeck stochastic
process)::
n
theta, d --> r(theta, d) = exp( sum - theta_i * d_i )
i = 1
"""
def _compute_corr(self, theta, d, n_features):
""" Correlation for given pairwise, component-wise L1-differences.
Parameters
----------
theta : array_like, shape=(1,) or (n_features,)
An array with shape 1 (isotropic) or n_features (anisotropic)
giving the autocorrelation parameter(s).
d : array_like, shape=(n_eval, n_features)
An array with the pairwise, component-wise L1-differences of x
and x' at which the correlation model should be evaluated.
Returns
-------
r : array_like, shape=(n_eval, )
An array containing the values of the autocorrelation model.
"""
d = np.asarray(d, dtype=np.float)
d = np.abs(d)
if theta.size == 1:
return np.exp(- theta[0] * np.sum(d, axis=1))
elif theta.size != n_features:
raise ValueError("Length of theta must be 1 or %s" % n_features)
else:
return np.exp(- np.sum(theta.reshape(1, n_features) * d, axis=1))
class SquaredExponential(StationaryCorrelation):
""" Squared exponential correlation model.
Squared exponential correlation model (Radial Basis Function).
(Infinitely differentiable stochastic process, very smooth)::
n
theta, d --> r(theta, d) = exp( sum - theta_i * (d_i)^2 )
i = 1
"""
def _compute_corr(self, theta, d, n_features):
""" Correlation for given pairwise, component-wise L1-differences.
Parameters
----------
theta : array_like, shape=(1,) [isotropic]
(n_features,) [anisotropic] or
(k*n_features,) [factor analysis distance]
An array encoding the autocorrelation parameter(s).
d : array_like, shape=(n_eval, n_features)
An array with the pairwise, component-wise L1-differences of x
and x' at which the correlation model should be evaluated.
Returns
-------
r : array_like, shape=(n_eval, )
An array containing the values of the autocorrelation model.
"""
d = np.asarray(d, dtype=np.float)
return np.exp(-self._quadratic_activation(theta, d, n_features))
def _quadratic_activation(self, theta, d, n_features):
""" Utility function for computing quadratic activation.
Computes the activation activ=d.T * M * d where M is a covariance
matrix of size n*n. The hyperparameters theta specify
* an isotropic covariance matrix, i.e., M = theta * I with I being the
identity, if theta has shape 1
* an automatic relevance determination model if theta has shape n,
in which the characteristic length scales of each dimension are
learned separately: M = diag(theta)
* a factor analysis distance model if theta has shape k*n for k> 1,
in which a low-rank approximation of the full matrix M is learned.
This low-rank approximation approximates the covariance matrix as
low-rank matrix plus a diagonal matrix:
M = Lambda * Lambda.T + diag(l),
where Lambda is a n*(k-1) matrix and l specifies the diagonal
matrix.
Parameters
----------
theta : array_like, shape=(1,) [isotropic]
(n_features,) [anisotropic] or
(k*n_features,) [factor analysis distance]
An array encoding the autocorrelation parameter(s). In the
case of the factor analysis distance, M is approximated by
M = Lambda * Lambda.T + diag(l), where l is encoded in the last n
entries of theta and Lambda is encoded row-wise in the first
entries of theta. Note that Lambda may contain negative entries
while theta is strictly positive; because of this, the entries of
Lambda are set to the logarithm with basis 10 of the corresponding
entries in theta.
array_like, shape=(n_eval, n_features)
An array giving the componentwise differences of x and x' at
which the quadratic activation should be evaluated.
Returns
-------
a : array_like, shape=(n_eval, )
An array with the activation values for the respective
componentwise differences d.
"""
if theta.size == 1: # case where M is isotropic: M = diag(theta[0])
return theta[0] * np.sum(d ** 2, axis=1)
elif theta.size == n_features: # anisotropic but diagonal case (ARD)
return np.sum(theta.reshape(1, n_features) * d ** 2, axis=1)
elif theta.size % n_features == 0:
# Factor analysis case: M = lambda*lambda.T + diag(l)
theta = theta.reshape((1, theta.size))
M = np.diag(theta[0, :n_features]) # the diagonal matrix part l
# The low-rank matrix contribution which allows accounting for
# correlations in the feature dimensions
# NOTE: these components of theta are passed through a log-function
# to allow negative values in Lambda
Lambda = np.log10(theta[0, n_features:].reshape((n_features, -1)))
M += Lambda.dot(Lambda.T)
return np.sum(d.dot(M) * d, -1)
else:
raise ValueError("Length of theta must be 1 or a multiple of %s."
% n_features)
class Matern_1_5(SquaredExponential):
""" Matern correlation model for nu=1.5.
Sample paths are once differentiable. Given by::
r(theta, dx) = (1 + np.sqrt(3*activ))*exp(-np.sqrt(3*activ))
where activ=dx.T * M * dx and M is a covariance matrix of size n*n.
See Rasmussen and Williams 2006, pp84 for details regarding the different
variants of the Matern kernel.
"""
def _compute_corr(self, theta, d, n_features):
""" Correlation for given pairwise, component-wise L1-differences.
Parameters
----------
theta : array_like, shape=(1,) [isotropic]
(n_features,) [anisotropic] or
(k*n_features,) [factor analysis distance]
An array encoding the autocorrelation parameter(s).
d : array_like, shape=(n_eval, n_features)
An array with the pairwise, component-wise L1-differences of x
and x' at which the correlation model should be evaluated.
Returns
-------
r : array_like, shape=(n_eval, )
An array containing the values of the autocorrelation model.
"""
d = np.asarray(d, dtype=np.float)
activ = self._quadratic_activation(theta, d, n_features)
tmp = np.sqrt(3 * activ) # temporary variable for preventing
# recomputation
return (1 + tmp) * np.exp(-tmp)
class Matern_2_5(SquaredExponential):
""" Matern correlation model for nu=2.5.
Sample paths are twice differentiable. Given by::
r(theta, dx) = (1 + np.sqrt(5*activ) + 5/3*activ)*exp(-np.sqrt(5*activ))
where activ=dx.T * M * dx and M is a covariance matrix of size n*n.
See Rasmussen and Williams 2006, pp84 for details regarding the different
variants of the Matern kernel.
"""
def _compute_corr(self, theta, d, n_features):
""" Correlation for given pairwise, component-wise L1-differences.
Parameters
----------
theta : array_like, shape=(1,) [isotropic]
(n_features,) [anisotropic] or
(k*n_features,) [factor analysis distance]
An array encoding the autocorrelation parameter(s).
d : array_like, shape=(n_eval, n_features)
An array with the pairwise, component-wise L1-differences of x
and x' at which the correlation model should be evaluated.
Returns
-------
r : array_like, shape=(n_eval, )
An array containing the values of the autocorrelation model.
"""
d = np.asarray(d, dtype=np.float)
activ = self._quadratic_activation(theta, d, n_features)
tmp = np.sqrt(5 * activ) # temporary variable for preventing
# recomputation
return (1 + tmp + 5.0 / 3.0 * activ) * np.exp(-tmp)
class GeneralizedExponential(StationaryCorrelation):
""" Generalized exponential correlation model.
Generalized exponential correlation model.
(Useful when one does not know the smoothness of the function to be
predicted.)::
n
theta, d --> r(theta, d) = exp( sum - theta_i * |d_i|^p )
i = 1
"""
def _compute_corr(self, theta, d, n_features):
""" Correlation for given pairwise, component-wise L1-differences.
Parameters
----------
theta : array_like, shape=(1+1,) or (n_features+1,)
An array with shape 1+1 (isotropic) or n_features+1 (anisotropic)
giving the autocorrelation parameter(s) (theta, p).
d : array_like, shape=(n_eval, n_features)
An array with the pairwise, component-wise L1-differences of x
and x' at which the correlation model should be evaluated.
Returns
-------
r : array_like, shape=(n_eval, )
An array containing the values of the autocorrelation model.
"""
d = np.asarray(d, dtype=np.float)
lth = theta.size
if n_features > 1 and lth == 2:
theta = np.hstack([np.repeat(theta[0], n_features), theta[1]])
elif lth != n_features + 1:
raise Exception("Length of theta must be 2 or %s"
% (n_features + 1))
else:
theta = theta.reshape(1, lth)
td = theta[:, 0:-1].reshape(1, n_features) \
* np.abs(d) ** theta[:, -1]
return np.exp(- np.sum(td, 1))
class PureNugget(StationaryCorrelation):
""" Spatial independence correlation model (pure nugget).
Useful when one wants to solve an ordinary least squares problem!::
n
theta, d --> r(theta, dx) = 1 if sum |d_i| == 0
i = 1
0 otherwise
"""
def _compute_corr(self, theta, d, n_features):
""" Correlation for given pairwise, component-wise L1-differences.
Parameters
----------
theta : array_like
None.
d : array_like, shape=(n_eval, n_features)
An array with the pairwise, component-wise L1-differences of x
and x' at which the correlation model should be evaluated.
Returns
-------
r : array_like
An array with shape (n_eval, ) with the values of the
autocorrelation model.
"""
d = np.asarray(d, dtype=np.float)
n_eval = d.shape[0]
r = np.zeros(n_eval)
r[np.all(d == 0., axis=1)] = 1.
return r
class Cubic(StationaryCorrelation):
""" Cubic correlation model.
Cubic correlation model::
theta, d --> r(theta, d) =
n
prod max(0, 1 - 3(theta_j*d_ij)^2 + 2(theta_j*d_ij)^3) , i = 1,...,m
j = 1
"""
def _compute_corr(self, theta, d, n_features):
""" Correlation for given pairwise, component-wise L1-differences.
Parameters
----------
theta : array_like, shape=(1,) or (n_features,)
An array with shape 1 (isotropic) or n_features (anisotropic)
giving the autocorrelation parameter(s).
d : array_like, shape=(n_eval, n_features)
An array with the pairwise, component-wise L1-differences of x
and x' at which the correlation model should be evaluated.
Returns
-------
r : array_like, shape=(n_eval, )
An array containing the values of the autocorrelation model.
"""
d = np.asarray(d, dtype=np.float)
lth = theta.size
if lth == 1:
td = np.abs(d) * theta
elif lth != n_features:
raise Exception("Length of theta must be 1 or " + str(n_features))
else:
td = np.abs(d) * theta.reshape(1, n_features)
td[td > 1.] = 1.
ss = 1. - td ** 2. * (3. - 2. * td)
return np.prod(ss, 1)
class Linear(StationaryCorrelation):
""" Linear correlation model.
Linear correlation model::
theta, d --> r(theta, d) =
n
prod max(0, 1 - theta_j*d_ij) , i = 1,...,m
j = 1
"""
def _compute_corr(self, theta, d, n_features):
""" Correlation for given pairwise, component-wise L1-differences.
Parameters
----------
theta : array_like, shape=(1,) or (n_features,)
An array with shape 1 (isotropic) or n_features (anisotropic)
giving the autocorrelation parameter(s).
d : array_like, shape=(n_eval, n_features)
An array with the pairwise, component-wise L1-differences of x
and x' at which the correlation model should be evaluated.
Returns
-------
r : array_like, shape=(n_eval, )
An array containing the values of the autocorrelation model.
"""
d = np.asarray(d, dtype=np.float)
lth = theta.size
if lth == 1:
td = np.abs(d) * theta
elif lth != n_features:
raise Exception("Length of theta must be 1 or %s" % n_features)
else:
td = np.abs(d) * theta.reshape(1, n_features)
td[td > 1.] = 1.
ss = 1. - td
return np.prod(ss, 1)
```
#### File: skgp/estimators/bayesian_gp.py
```python
import numpy as np
from sklearn.base import BaseEstimator, RegressorMixin, clone
class BayesianGaussianProcess(BaseEstimator, RegressorMixin):
"""The Bayesian Gaussian Process model class.
This class wraps a Gaussian Process object and allows to use sampling
the hyperparameter from the posterior rather than selecting them using
maximum likelihood. It thus accounts better for the uncertainty in
hyperparameter selection (when computing the predictive variance) but at
the cost of considerably improved computation time - both during fitting,
which involves running MCMC with the emcee package, and during prediction,
which requires to evaluate the base GP for every hyperparameter sample
of the posterior.
Parameters
----------
base_gp : instance of Gaussian Process
The basic Gaussian process which allows to train and execute a GP for
fixed hyperparameters, which can be selected using maximum likelihood
n_posterior_samples: int
The number of samples taken from the posterior distribution. The more
samples, the more accurate the posterior distribution is captured;
however, the computational cost during prediction also increases
linearly
n_burnin: int
The number of burn-in steps during MCMC sampling
n_sampling_steps: int
The number of actual sampling steps during MCMC sampling
"""
def __init__(self, base_gp, n_posterior_samples=25,
n_burnin=500, n_sampling_steps=250, n_jobs=1):
self.base_gp = base_gp
self.n_posterior_samples = n_posterior_samples
self.n_burnin = n_burnin
self.n_sampling_steps = n_sampling_steps
self.n_jobs = n_jobs
self.gps = []
def fit(self, X, y):
"""
The Bayesian Gaussian Process model fitting method.
Parameters
----------
X : double array_like
An array with shape (n_samples, n_features) with the input at which
observations were made.
y : double array_like
An array with shape (n_samples, ) or shape (n_samples, n_targets)
with the observations of the output to be predicted.
Returns
-------
gp : self
A fitted Bayesian Gaussian Process model object awaiting data to
perform predictions.
"""
try:
import emcee
except ImportError:
raise Exception("BayesianGaussianProcess requires the emcee "
"package")
# Initialize with ML estimate
self.base_gp.fit(X, y)
# Initialize the MCMC sampling
n_dim = len(self.base_gp.theta_)
self.sampler = emcee.EnsembleSampler(nwalkers=2*n_dim, dim=n_dim,
lnpostfn=self.lnpostfn,
threads=self.n_jobs)
# Start ensemble walkers from perturbed ML estimate
p0 = [self.base_gp.theta_ * (1 + np.random.randn(n_dim) * 2e-3 - 1e-3)
for i in range(2*n_dim)]
# Run burn-in
p0, _, _ = self.sampler.run_mcmc(p0, self.n_burnin)
# Running production chain
self.sampler.run_mcmc(p0, self.n_sampling_steps)
# Select posterior samples and create clone of GP for these
self.theta_ = []
for i in range(self.n_posterior_samples):
# Select posterior samples
w = np.random.randint(self.sampler.chain.shape[0])
n = np.random.randint(self.n_burnin, self.sampler.chain.shape[1])
self.theta_.append(self.sampler.chain[w, n])
# Create clone of GP with these hyperparameter sample
gp = clone(self.base_gp)
gp.theta0 = self.theta_[-1]
gp.thetaL = None
gp.thetaU = None
gp.fit(X, y)
self.gps.append(gp)
self.theta_ = np.array(self.theta_)
# Make some of the base_gp's attributes accessible
self.X = self.base_gp.X
self.y = self.base_gp.y
self.corr = self.base_gp.corr
return self
def predict(self, X, eval_MSE=False):
""" Evaluates the Bayesian Gaussian Process model at X.
Parameters
----------
X : array_like
An array with shape (n_eval, n_features) giving the point(s) at
which the prediction(s) should be made.
eval_MSE : boolean, optional
A boolean specifying whether the Mean Squared Error should be
evaluated or not.
Default assumes evalMSE = False and evaluates only the BLUP (mean
prediction).
Returns
-------
y : array_like, shape (n_samples, ) or (n_samples, n_targets)
An array with shape (n_eval, ) if the Gaussian Process was trained
on an array of shape (n_samples, ) or an array with shape
(n_eval, n_targets) if the Gaussian Process was trained on an array
of shape (n_samples, n_targets) with the Best Linear Unbiased
Prediction at x.
MSE : array_like, optional (if eval_MSE == True)
An array with shape (n_eval, ) or (n_eval, n_targets) as with y,
with the Mean Squared Error at x.
"""
X = np.atleast_2d(X)
if self.y.shape[1] > 1:
y_pred_means = \
np.empty((self.n_posterior_samples, X.shape[0],
self.y.shape[1]))
else:
y_pred_means = \
np.empty((self.n_posterior_samples, X.shape[0]))
if eval_MSE:
if self.y.shape[1] > 1:
y_pred_variances = \
np.empty((self.n_posterior_samples, X.shape[0],
self.y.shape[1]))
else:
y_pred_variances = \
np.empty((self.n_posterior_samples, X.shape[0]))
for i in range(self.n_posterior_samples):
if eval_MSE:
y_pred_means[i], y_pred_variances[i] = \
self.gps[i].predict(X, eval_MSE=True)
else:
y_pred_means[i] = self.gps[i].predict(X, eval_MSE=False)
first_moments = y_pred_means.mean(0)
if eval_MSE:
second_moments = (first_moments**2 + y_pred_variances).mean(0)
return first_moments, second_moments - first_moments**2
else:
return first_moments
def lnpostfn(self, theta):
""" Returns the log posterior probability of parameter theta. """
theta = np.asarray(theta)
if np.any((self.base_gp.thetaL * (1 - 1e-5) > theta)
+ (theta > self.base_gp.thetaU * (1 + 1e-5))):
return -np.inf
log_prior = self.base_gp.corr.log_prior(theta)
log_likelihood = self.base_gp.reduced_likelihood_function(theta)[0]
return log_prior + log_likelihood
def __getstate__(self):
""" Return a pickable state for this object """
odict = self.__dict__.copy() # copy the dict since we change it
odict.pop("sampler", None)
return odict
```
#### File: skgp/skgp/setup.py
```python
import os
from os.path import join
import warnings
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
import numpy
config = Configuration('skgp', parent_package, top_path)
config.add_subpackage('correlation_models')
config.add_subpackage('estimators')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
``` |
{
"source": "jmetz/momanalysis",
"score": 3
} |
#### File: momanalysis/mmhelper/plot.py
```python
import matplotlib.pyplot as plt
from matplotlib import animation
def view_stack(data):
"""
Wrapper around matplotlib to create a simple data viewer
"""
fig = plt.figure()
axis = fig.add_subplot(111)
img = axis.imshow(data[0], cmap='gray')
img.TMAX = len(data)
img.tnow = 0
img.pause = False
title = plt.title("Frame %d" % img.tnow)
def prevframe():
img.tnow = (img.tnow - 1) % img.TMAX
img.set_data(data[img.tnow])
title.set_text("Frame %d" % img.tnow)
fig.canvas.draw()
def nextframe(stuff=None):
if img.pause and (stuff is not None):
return
img.tnow = (img.tnow + 1) % img.TMAX
img.set_data(data[img.tnow])
title.set_text("Frame %d" % img.tnow)
fig.canvas.draw()
def press(event):
if event.key == "left":
prevframe()
elif event.key == "right":
nextframe()
elif event.key == " ":
img.pause ^= True
else:
print("Unbound key pressed:", event.key)
fig.canvas.mpl_connect('key_press_event', press)
animation.FuncAnimation(
fig, nextframe, blit=False, interval=10, repeat=True)
plt.show()
```
#### File: mmhelper/tests/test_measurements.py
```python
import unittest
from unittest import mock
import mmhelper.measurements as mmeas
from mmhelper.measurements_class import BacteriaData, IndividualBacteria
import numpy as np
from skimage.measure import regionprops
class TestFindFilename(unittest.TestCase):
"""
Class for testing the function for finding the filename
"""
def setUp(self):
self.inputname = 'testinputfilename.test'
self.output = 'testinputfilename'
@mock.patch('os.makedirs')
def test_input_filename(self, mock_osmakedirs):
"""
Tests the innput filename is found properly
"""
self.assertEqual(
self.output, mmeas.find_input_filename(self.inputname)[1])
# Check that makedirs was called twice
self.assertEqual(mock_osmakedirs.call_count, 2)
class TestCounts(unittest.TestCase):
"""
Class for testing counting the number of bacteria in wells
"""
def setUp(self):
self.wells = np.array([
[0, 0, 0, 0, 0],
[0, 1, 0, 2, 0],
[0, 1, 0, 2, 0],
[0, 1, 0, 2, 0],
])
self.bacteria = np.array([
[0, 0, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 0, 0, 3, 0],
[0, 2, 0, 0, 0],
])
self.counts = [2, 1]
# Using just individual labels
self.bacteria_labels = {
1: np.array([[1, 0, 2, 2, 2, 0, 3], ]),
2: np.array([[0, 0, 0], ]),
3: np.array([[1, ], ])
}
self.counts2 = [3, 0, 1]
def test_counts_just_labels(self):
"""
Tests function count_bacteria_in_wells
"""
self.assertEqual(
self.counts2, mmeas.count_bacteria_in_wells(self.bacteria_labels))
class TestBacteriaMeasurements(unittest.TestCase):
"""
Class for testing bacteria measurements
"""
def setUp(self):
self.bacteria_labels = np.array([
[0, 1, 1, 0, 0, 0],
[1, 1, 1, 1, 0, 0],
[1, 1, 1, 1, 0, 0],
[1, 1, 1, 1, 0, 0],
[1, 1, 1, 1, 0, 0],
[1, 1, 1, 1, 0, 0],
[0, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 2, 2, 0, 0, 0],
[2, 2, 2, 2, 0, 0],
[2, 2, 2, 2, 0, 0],
[2, 2, 2, 2, 0, 0],
[0, 2, 2, 0, 0, 0],
])
self.bacteria_fluo = [np.array([
[0, 180, 180, 0, 0, 0],
[180, 180, 180, 180, 0, 0],
[180, 180, 180, 180, 0, 0],
[180, 180, 180, 180, 0, 0],
[180, 180, 180, 180, 0, 0],
[180, 180, 180, 180, 0, 0],
[0, 180, 180, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 200, 200, 0, 0, 0],
[200, 200, 200, 200, 0, 0],
[200, 200, 200, 200, 0, 0],
[200, 200, 200, 200, 0, 0],
[0, 200, 200, 0, 0, 0]
])]
self.well_label = 42
self.measurements = BacteriaData()
self.measurements.bacteria[1] = IndividualBacteria(1)
self.measurements.bacteria[1].bacteria_label = '2'
self.measurements.bacteria[1].bf_measurements = {
"Well label": [self.well_label], "Area": [24],
"Width": [4], "Length": [7]}
self.measurements.bacteria[1].raw_fluorescence = {(0, 0): 180}
self.measurements.bacteria[1].actual_fluorescence = {(0, 0): 170}
self.measurements.bacteria[1].integrated_fluorescence = {(0, 0): 4080}
self.measurements.bacteria[1].output_line = []
self.measurements.bacteria[1].timepoints = [1]
self.measurements.bacteria[1].num_fluo = 0
self.measurements.bacteria[1].headings_line = [
'well label',
'lineage',
'area',
'length',
'width',
'raw_fluorescence',
'fluorescence',
'integrated_fluorescence']
self.measurements.bacteria[1].measurements_output = [
self.well_label, '1', 24, 7.118052168020874,
4.163331998932266, 180.0, 170.0, 4080.0]
self.measurements.bacteria[2] = IndividualBacteria(2)
self.measurements.bacteria[2].bacteria_label = '2'
self.measurements.bacteria[2].bf_measurements = {
"Well label": [self.well_label],
"Area": [16], "Width": [4], "Length": [5]}
self.measurements.bacteria[2].raw_fluorescence = {(0, 0): 200}
self.measurements.bacteria[2].actual_fluorescence = {(0, 0): 190}
self.measurements.bacteria[2].integrated_fluorescence = {(0, 0): 3040}
self.measurements.bacteria[2].output_line = []
self.measurements.bacteria[2].timepoints = [1]
self.measurements.bacteria[2].num_fluo = 0
self.measurements.bacteria[2].headings_line = [
'well label',
'lineage',
'area',
'length',
'width',
'raw_fluorescence',
'fluorescence',
'integrated_fluorescence']
self.measurements.bacteria[2].measurements_output = [
self.well_label, '2', 16, 4.898979485566356, 4.0,
200.0, 190.0, 3040.0]
self.lbl_dict = {1: '1', 2: '2'}
self.timepoint = 0
self.fluo_values = [(10, 1), ]
def test_bf_measurements(self):
"""
Tests the measuring of bacteria brightfield measurments
"""
measurements = BacteriaData()
for region in regionprops(self.bacteria_labels):
measurements.add_bac_data(
region.label, self.lbl_dict, region, self.timepoint,
well_label=self.well_label)
for bac_num, actual_data in measurements.bacteria.items():
manual_data = self.measurements.bacteria[bac_num]
for meas, actual_val in actual_data.bf_measurements.items():
manual_val = manual_data.bf_measurements[meas]
self.assertTrue((manual_val[0] - 0.5) <=
actual_val[0] <= (manual_val[0] + 0.5))
def test_fluo_measurements(self):
"""
Tests the measuring of bacteria fluorescence measurements
"""
measurements = BacteriaData()
for region in regionprops(self.bacteria_labels):
measurements.add_bac_data(
region.label, self.lbl_dict, region, self.timepoint)
measurements.measure_fluo(
region, self.bacteria_fluo, self.fluo_values, self.timepoint)
for bac_num, actual_data in measurements.bacteria.items():
manual_data = self.measurements.bacteria[bac_num]
self.assertTrue((manual_data.raw_fluorescence) ==
(actual_data.raw_fluorescence))
self.assertTrue((manual_data.actual_fluorescence)
== (actual_data.actual_fluorescence))
self.assertTrue((manual_data.integrated_fluorescence)
== (actual_data.integrated_fluorescence))
def test_measurements_output(self):
"""
Tests the measurements can be compiled properly for output
"""
measurements = BacteriaData()
for region in regionprops(self.bacteria_labels):
measurements.add_bac_data(
region.label, self.lbl_dict, region, self.timepoint,
well_label=self.well_label)
measurements.measure_fluo(
region, self.bacteria_fluo, self.fluo_values, self.timepoint)
measurements.compile_results(max_tpoint=1)
for bac_num, actual_data in measurements.bacteria.items():
manual_data = self.measurements.bacteria[bac_num]
# Couldn't use assertListEqual here
# because of floating-point inequality
self.assertEqual(
len(manual_data.measurements_output),
len(actual_data.measurements_output))
for item1, item2 in zip(
manual_data.measurements_output,
actual_data.measurements_output):
self.assertAlmostEqual(item1, item2)
self.assertListEqual(
manual_data.headings_line,
actual_data.headings_line)
class TestFluoMeasurements(unittest.TestCase):
"""
Class for testing measuring fluorescence
"""
def setUp(self):
self.bacteria_labels = np.array([
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3],
[1, 1, 1, 0, 0, 0, 0, 0, 0, 3, 3, 3],
[1, 1, 1, 0, 0, 0, 0, 0, 0, 3, 3, 3],
[1, 1, 1, 0, 0, 0, 0, 0, 0, 3, 3, 3],
[1, 1, 1, 0, 0, 0, 0, 0, 0, 3, 3, 3],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3],
[0, 0, 0, 2, 2, 2, 0, 0, 0, 3, 3, 3],
[0, 0, 0, 2, 2, 2, 0, 0, 0, 3, 3, 3],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
self.fluo_image = np.array(
[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 3000, 3000, 3000],
[2000, 2000, 2000, 0, 0, 0, 0, 0, 0, 3000, 3000, 3000],
[2100, 2100, 2100, 0, 0, 0, 0, 0, 0, 3800, 3800, 3800],
[2100, 2100, 2100, 0, 0, 0, 0, 0, 0, 3000, 3000, 3000],
[2200, 2200, 2200, 0, 0, 0, 0, 0, 0, 3000, 3000, 3000],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 3000, 3000, 3000],
[0, 0, 0, 3200, 3200, 3200, 0, 0, 0, 3000, 3000, 3000],
[0, 0, 0, 3300, 3300, 3300, 0, 0, 0, 3000, 3000, 3000],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
self.bkground = 100
self.bkg_sem = 1
self.fluo_results = [2100, 2000, 24000]
self.fluo_results2 = [3250, 3150, 18900]
self.fluo_results3 = [3100, 3000, 72000]
self.fluo_measurements = [self.fluo_results,
self.fluo_results2, self.fluo_results3]
def test_fluorescentmeasurements(self):
"""
Tests the function fluorescence_measurements
"""
fluo_output = []
for region in regionprops(self.bacteria_labels):
fluo, background_fluo, integrated_fluorescence = \
mmeas.fluorescence_measurements(
region, self.fluo_image, (self.bkground, self.bkg_sem))
fluo_output.append(
[fluo, background_fluo, integrated_fluorescence])
self.assertEqual(self.fluo_measurements, fluo_output)
class TestFluorescenceBackground(unittest.TestCase):
"""
Class for testing the measurements of fluorescence background
"""
def setUp(self):
self.wells = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0, 0, 3, 3, 3],
[1, 1, 1, 0, 0, 2, 0, 0, 0, 3, 3, 3],
[1, 1, 1, 0, 2, 2, 2, 0, 0, 3, 3, 3],
[1, 1, 1, 0, 2, 2, 2, 0, 0, 3, 3, 3],
[1, 1, 1, 0, 2, 2, 2, 0, 0, 3, 3, 3]])
self.bact = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 3, 0, 0, 0, 0, 4, 4],
[0, 0, 0, 0, 3, 3, 3, 0, 0, 0, 4, 4],
[0, 2, 0, 0, 0, 3, 0, 0, 0, 0, 4, 4],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 4]])
self.fluo_image = np.array(
[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[100, 100, 100, 0, 0, 0, 0, 0, 0, 300, 300, 300],
[100, 100, 100, 0, 0, 200, 0, 0, 0, 300, 300, 300],
[100, 100, 100, 0, 200, 200, 200, 0, 0, 300, 300, 300],
[100, 100, 100, 0, 200, 200, 200, 0, 0, 300, 300, 300],
[100, 100, 100, 0, 200, 200, 200, 0, 0, 300, 300, 300]])
# Background sum should be 13*100 + 5*200 + 7*300 = 4400
# Mean = 4400 / 25 = 176
self.background = 176
def test_background_fluorescence(self):
"""
Tests the function for determining the fluorescence_background
"""
bkground = mmeas.fluorescence_background(
self.wells,
self.bact,
self.fluo_image,
)[0]
self.assertEqual(bkground, self.background)
if __name__ == '__main__':
unittest.main()
```
#### File: mmhelper/tests/test_plot.py
```python
import unittest
from mmhelper import plot
import numpy as np
class TestViewStack(unittest.TestCase):
"""
Class for testing view_stack
"""
def setUp(self):
self.t0_ = 100
self.w0_ = 100
self.h0_ = 100
self.input_none = None
self.input_empty = np.array(None)
self.input_zeros = np.zeros((self.t0_, self.w0_, self.h0_))
plot.plt.switch_backend("agg")
def tearDown(self):
# Close any opened figures (that might have remained from a failed
# call)
plot.plt.close("all")
def test_none_input(self):
"""
Tests response to none input
"""
self.assertRaises(TypeError, plot.view_stack, self.input_none)
def test_empty_input(self):
"""
Tests response to empty input
"""
self.assertRaises(IndexError, plot.view_stack, self.input_empty)
def test_zeros_input(self):
"""
Tests response to input of zeros
"""
plot.view_stack(self.input_zeros)
figs = plot.plt.get_fignums()
self.assertEqual(len(figs), 1)
fig = plot.plt.gcf()
# Make sure the current figure has 1 axes
axes = fig.get_axes()
self.assertEqual(len(axes), 1)
# Make sure the current figure's axes has 1 image
ax0 = axes[0]
ims = ax0.get_images()
self.assertEqual(len(ims), 1)
``` |
{
"source": "jmetzz/coffee-chatbot",
"score": 2
} |
#### File: jmetzz/coffee-chatbot/build.py
```python
from pybuilder.core import use_plugin, init
use_plugin("python.core")
use_plugin("python.unittest")
use_plugin("python.install_dependencies")
use_plugin("python.flake8")
use_plugin("python.coverage")
name = "ActionServerPybuilder"
default_task = ['install_dependencies', 'analyze', 'publish']
@init
def set_properties(project):
project.build_depends_on('tblib')
project.build_depends_on('mockito')
project.build_depends_on('parameterized')
project.build_depends_on('responses')
@init
def initialize_flake8_plugin(project):
project.build_depends_on("flake8")
project.set_property('unittest_module_glob', 'test_*')
project.set_property("flake8_verbose_output", True)
project.set_property("flake8_break_build", True)
project.set_property("flake8_max_line_length", 120)
project.set_property("flake8_exclude_patterns", None)
project.set_property("flake8_include_test_sources", False)
project.set_property("flake8_include_scripts", False)
@init
def initialize_coverage_plugin(project):
project.set_property('coverage_break_build', False)
project.set_property('coverage_threshold_warn', 80)
# for now, code coverage does not break the build
# as we do Python, a scripted language, you have to aim for 100% coverage!
project.set_property("coverage_exceptions", ['endpoint'])
``` |
{
"source": "jmetzz/ml-fundamentals",
"score": 4
} |
#### File: src/clustering/kmeans.py
```python
import pprint as pp
import random
from utils import data_helper
from common import distance
from clustering.clustering import BaseClustering
"""K-means is often referred to as Lloyd’s algorithm. In basic terms, the algorithm has three steps. The first step chooses the initial centroids, with the most basic method being to choose k samples from the dataset X. After initialization, K-means consists of looping between the two other steps. The first step assigns each sample to its nearest centroid. The second step creates new centroids by taking the mean value of all of the samples assigned to each previous centroid. The difference between the old and the new centroids are computed and the algorithm repeats these last two steps until this value is less than a threshold. In other words, it repeats until the centroids do not move significantly.
Given enough time, K-means will always converge, however this may be to a local minimum. This is highly dependent on the initialization of the centroids. As a result, the computation is often done several times, with different initializations of the centroids.
Given the inputs x1,x2,x3,…,xn and value of K
Step 1 - Pick K random points as cluster centers called centroids.
Step 2 - Assign each xi_i to nearest cluster by calculating its distance to each centroid.
Step 3 - Find new cluster center by taking the average of the assigned points.
Step 4 - Repeat Step 2 and 3 until none of the cluster assignments change.
"""
class KMeans(BaseClustering):
def __init__(self, data, function_dist=distance.euclidean):
super().__init__(data)
self.function_dist = function_dist
def build(self, k=3):
clusters = [0] * self.num_features
prev_centroids = [[-1] * self.num_features] * k
new_centroids = self._initial_centroids(k, self.data)
while self._centroids_changed(prev_centroids, new_centroids):
clusters = self._find_clusters(self.data, new_centroids)
prev_centroids = new_centroids
new_centroids = BaseClustering.calculate_centroids(self.data, k, clusters)
self._update_model(new_centroids, clusters)
def _find_clusters(self, data, centroids, function_dist=distance.euclidean):
"""Distribute the instances in the clusters represented by the centroids
:param data: the dataset of instances
:param centroids: the centroid vectors with the same structure as the instances in the dataset
:param function_dist: the function used to calculate the distance between two instances
:return: a list representing the cluster assignment for each instance in the dataset
"""
clusters = [0] * self.size
for idx in range(self.size):
distances = self._dist(data[idx], centroids)
cluster = distances.index(min(distances))
clusters[idx] = cluster
return clusters
def _dist(self, instance, centroids):
ncol = len(instance)
return [self.function_dist(instance, c, ncol) for c in centroids]
@staticmethod
def _centroids_changed(prev_centroids, new_centroids):
for i in range(len(prev_centroids)):
for z1, z2 in zip(prev_centroids[i], new_centroids[i]):
if z1 != z2:
return True
return False
@staticmethod
def _initial_centroids(k, data):
indexes = set(random.sample(range(0, len(data)), k))
return [data[i] for i in indexes]
def _update_model(self, clusters):
self.clusters = {c: [] for c in set(clusters)}
for e in range(len(self.data)):
self.clusters.get(clusters[e]).append(e)
# def dist(instance, centroids, function_dist=distance.euclidean):
# ncol = len(instance)
# return [function_dist(instance, c, ncol) for c in centroids]
#
# def find_clusters(data, centroids, function_dist=distance.euclidean):
# """Distribute the instances in the clusters represented by the centroids
#
# :param data: the dataset of instances
# :param centroids: the centroid vectors with the same structure as the instances in the dataset
# :param function_dist: the function used to calculate the distance between two instances
# :return: a list representing the cluster assignment for each instance in the dataset
# """
# clusters = [0] * len(data)
# for idx in range(len(data)):
# distances = dist(data[idx], centroids)
# cluster = distances.index(min(distances))
# clusters[idx] = cluster
# return clusters
#
# def mean(points):
# # TODO - Improvements on readability and performance:
# # by using vectorization and matrix multiplication formula, we have:
# # [\mathbf{1}^T\mathbf{M}]_j= \sum_i \mathbf{1}_i \mathbf{M}_{i,j} =\sum_i \mathbf{M}_{i,j}.
# # Hence, the column-wise sum of \mathbf{M} is \mathbf{1}^T\mathbf{M}.
# ncols = len(points[0])
# m = [0] * ncols
# for col in range(ncols):
# for p in points:
# m[col] += p[col]
# m[col] = m[col] / len(points)
#
# return m
#
#
# def calculate_centroids(data, k, clusters):
# centroids = [0.0] * k
# for c in range(k):
# points = [data[j] for j in range(len(data)) if clusters[j] == c]
# centroids[c] = mean(points)
# return centroids
#
#
# def centroids_changed(prev_centroids, new_centroids):
# for i in range(len(prev_centroids)):
# for z1, z2 in zip(prev_centroids[i], new_centroids[i]):
# if z1 != z2:
# return True
# return False
def report(data, new_centroids, clusters):
print("\nCentroids:")
print(new_centroids)
print("clusters: [Clusters | instance]")
map = {c: [] for c in set(clusters)}
for e in range(len(data)):
map.get(clusters[e]).append(data[e])
pp.pprint(map)
def main():
data = data_helper.toy_unlabeled_dataset
k = 3
clusters = [0] * len(data[0])
prev_centroids = [[-1] * len(data[0])] * k
new_centroids = _initial_centroids(k, data)
while centroids_changed(prev_centroids, new_centroids):
clusters = find_clusters(data, new_centroids)
prev_centroids = new_centroids
new_centroids = calculate_centroids(data, k, clusters)
# print(new_centroids)
report(data, new_centroids, clusters)
if __name__ == '__main__':
main()
```
#### File: src/common/entropy.py
```python
import fileinput
import string
from math import log
def _identity(value): return value
def range_bytes(): return range(256)
def range_binary(): return range(2)
def range_printable(): return (ord(c) for c in string.printable)
def H(data, iterator=range_bytes, convert=_identity, base=2):
if not data:
return 0
entropy = 0
for x in iterator():
p_x = float(data.count(convert(x))) / len(data)
if p_x > 0:
# entropy += - p_x * log(p_x, _resolve_base(data))
entropy += - p_x * log(p_x, base)
return entropy
def shannon_entropy(data):
return H(data, range_binary)
def _resolve_base(data):
base = len(set(data))
if base < 2:
base = 2
return base
def column(matrix, i):
return [row[i] for row in matrix]
def main():
for row in fileinput.input():
stringImput = row.rstrip('\n')
print("%s: %f" % (stringImput, H(stringImput, range_printable)))
# Tests:
for str in ['gargleblaster', 'tripleee', 'magnus', 'lkjasdlk',
'aaaaaaaa', 'sadfasdfasdf', '7&wS/p(', 'aabb']:
print("%s: %f" % (str, H(str, range_printable, chr)))
# str_data = ''.join(str(e) for e in column(dh.toy_labeled_dataset, 2))
# print("%s: %f" %("dataset", H(column(dh.toy_labeled_dataset, 2), range_binary)))
# dataset1 = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
# dataset2 = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
# print("%s: %f" % ("dataset 1", H(dataset1, range_binary)))
# print("%s: %f" % ("dataset 2", H(dataset2, range_binary)))
# print("%s: %f" % ("dataset 1", shannon_entropy(dataset1)))
# print("%s: %f" % ("dataset 2", shannon_entropy(dataset2)))
``` |
{
"source": "jmetzz/ml-laboratory",
"score": 3
} |
#### File: notebooks/numpy/performance_test.py
```python
import numpy as np
# Create an array with 10^7 elements
arr = np.arange(1e7)
# Converting ndarray to list
larr = arr.tolist()
def list_times(alist, scalar):
return [val * scalar for val in alist]
# Using IPython's magic
# timeit command timeit arr * 1.1
# timeit list_times(larr, 1.1)
# box(x, y){line=snake}
```
#### File: notebooks/stocks-recommendation/data_generator.py
```python
import csv
import numpy as np
import pandas as pd
NUM_OF_PEOPLE = 100
NUMBER_OF_STOCKS = 3
np.random.seed(444)
# age, gender, number of children
user_Df = pd.DataFrame(index=range(0, NUM_OF_PEOPLE))
number_of_children = np.random.normal(1, 1, 2 * NUM_OF_PEOPLE)
realistic_n_of_children = [round(child) for child in number_of_children if (child > -0.5)]
random_double_ages = np.random.normal(30, 10, 2 * NUM_OF_PEOPLE)
limited_int_ages = [round(age) for age in random_double_ages if (age > 18) and (age < 65)]
gender = ["M", "F"]
user_Df["gender"] = [gender[np.random.randint(0, 2)] for _ in range(len(user_Df.index))]
user_Df["age"] = [limited_int_ages[i] for i in range(len(user_Df.index))]
user_Df["numberOfChildren"] = [realistic_n_of_children[i] for i in range(len(user_Df.index))]
"""
Open the stocks CSV file and assign the labels and stocks lists
"""
# get CSV into useful format
with open("stocks.csv") as csvfile:
reader = csv.reader(csvfile, delimiter=";")
stocks = []
for index, row in enumerate(reader):
if index == 0:
labels = row[2:-3]
else:
specific_trends = []
for tr in range(2, len(row) - 3):
if row[tr] == "yes":
specific_trends.append(labels[tr - 2])
stocks.append({"name": row[0], "tag": row[1], "trends": specific_trends})
trends = []
for label in labels:
stocksArr = []
for stock in stocks:
if label in stock["trends"]:
stocksArr.append(stock["tag"])
trends.append({"name": label, "stocks": stocksArr})
# Assign top 3 of liked and disliked trends
def find_trends(trends, number_of_stocks, young_person):
liked_trends = []
disliked_trends = []
trends_copy = trends.copy()
for _ in range(0, min(number_of_stocks, len(trends_copy))):
liked_trends.append(trends_copy.pop(np.random.randint(0, len(trends_copy)))["name"])
for _ in range(0, min(number_of_stocks, len(trends_copy))):
disliked_trends.append(trends_copy.pop(np.random.randint(0, len(trends_copy)))["name"])
trend_arr = []
for trend in trends:
if trend["name"] in liked_trends:
trend_arr.append(1)
elif trend["name"] in disliked_trends:
trend_arr.append(-1)
else:
trend_arr.append(0)
# totally legit thing to hard-code here
if young_person and (np.random.randint(0, 10) > 3):
trend_arr[0] = 1
trend_arr[3] = 1
return trend_arr
users = []
for i in range(0, len(user_Df.index)):
trends_arr = []
number_of_stocks = NUMBER_OF_STOCKS
is_young_user = user_Df["age"][i] < 30
users.append(find_trends(trends, number_of_stocks, is_young_user))
users_transposed = []
for index in range(0, len(labels)):
new_line = []
for userId in range(0, len(users)):
new_line.append(users[userId][index])
users_transposed.append(new_line)
trend_columns = {}
for index in range(0, len(labels)):
trend_columns[labels[index]] = users_transposed[index]
trend_Df = pd.DataFrame(data=trend_columns)
complete_Df = user_Df.merge(trend_Df, left_index=True, right_index=True)
# print(completeDf)
# write to CSV
complete_Df.to_csv("./output.csv", sep=",", index=False)
"""
Assigning stocks to users based on preferences
"""
tags = [stock["tag"] for stock in stocks]
portfolio = pd.DataFrame(index=range(0, NUM_OF_PEOPLE))
for stock in stocks:
portfolio[stock["tag"]] = [0 for _ in range(0, len(portfolio.index))]
def make_stock_list(user_id):
rest = 100
amount_of_stocks = np.random.randint(1, 10)
list_of_percentages = []
while rest > 0 and len(list_of_percentages) < amount_of_stocks:
portfolio_percentage = np.random.randint(1, 1 + rest)
rest = rest - portfolio_percentage
minimum = min(portfolio_percentage, rest)
if minimum == 0:
minimum = 5
list_of_percentages.append(minimum)
if sum(list_of_percentages) < 100:
list_of_percentages.append(100 - sum(list_of_percentages))
sorted_percentages = sorted(list_of_percentages, reverse=True)
# traverse labels
liked = []
disliked = []
neutral = []
for label in labels:
if complete_Df[label][user_id] == 1:
liked.append(label)
elif complete_Df[label][user_id] == -1:
disliked.append(label)
else:
neutral.append(label)
trends_bought = []
for i in range(0, min(len(liked), round(len(sorted_percentages) / 2))):
trends_bought.append(liked[np.random.randint(0, len(liked))])
difference = len(sorted_percentages) - len(trends_bought)
for i in range(0, difference):
trends_bought.append(neutral[np.random.randint(0, len(neutral))])
stocks_bought = []
for i in range(0, len(trends_bought)):
for trend in trends:
if trend["name"] == trends_bought[i]:
matching_trend = trend
break
stocks_bought.append(matching_trend["stocks"][np.random.randint(0, len(matching_trend["stocks"]))])
stocks_bought_sorted = [0] * len(stocks)
for idx in range(0, len(stocks_bought)):
stocks_bought_sorted[tags.index(stocks_bought[idx])] = 1
return stocks_bought_sorted
stocks_Df = pd.DataFrame(columns=tags, index=range(0, NUM_OF_PEOPLE))
for index in range(0, NUM_OF_PEOPLE):
stocks_Df.loc[index] = make_stock_list(index)
stocks_Df.to_csv("./portfolios.csv", sep=",", index=False)
```
#### File: src/common/preprocessing.py
```python
import numpy as np
class Normalizer:
"""
Divide each feature by its 2-norm
So that the transformed feature has norm 1.
"""
def __init__(self):
self._norms = None
def fit(self, features: np.ndarray):
self._norms = np.linalg.norm(features, axis=0)
return self
def transform(self, features: np.ndarray) -> np.ndarray:
if self._norms is None:
raise ValueError("Model not initialized. Call fit before.")
return features / self._norms
```
#### File: src/ensemble/adaboost.py
```python
import numpy as np
from sklearn.datasets import make_moons
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
from ensemble.ensemble_utils import Sampling
from utils import datasets
class AdaBoostM1Classifier:
"""The original AdaBoost algorithm
This algorithm was proposed by <NAME> Schapire (1996). There are many variations
of it. However, this implementation refers to the original AdaBoot.M1 algorithm,
which targets binary classification problems, where the class labels are either +1 or -1.
Moreover, it uses decision trees as the weak base-learner algorithm.
By default, decision trees with only one decision node, also known
as Decision Stump (see https://en.wikipedia.org/wiki/Decision_stump).
"""
def __init__(self, rounds, criterion="gini", max_depth=1, seed=31):
self.model = dict()
self.model["rounds"] = rounds
self.classifiers = [
DecisionTreeClassifier(criterion=criterion, max_depth=max_depth, random_state=seed) for _ in range(rounds)
]
def fit(self, instances, true_labels):
"""Build a binary classification ensemble model based on AdaBoost.M1 algorithm"""
label_values = np.unique(true_labels)
if label_values.size != 2:
raise ValueError(f"Given a non-binary dataset with {label_values.size} labels")
self.model["label_map"] = {label_values[0]: -1, label_values[1]: 1}
self.model["label_map_inverse"] = {-1: label_values[0], 1: label_values[1]}
transformed_labels = [self.model["label_map"][k] for k in true_labels]
self._initialize_weights(len(instances))
# alphas_r represents the importance of the
# classifier built at round r
self.model["alphas"] = np.zeros((self.model["rounds"], 1))
self.model["errors"] = np.zeros((self.model["rounds"], 1))
for step in range(self.model["rounds"]):
while True:
features, labels = Sampling.subsample(instances, transformed_labels, self.model["weights"])
self.classifiers[step].fit(features, labels)
predictions = self.classifiers[step].predict(instances)
# store the current prediction in a cache to be used for weights update
self.model["prediction_cache"] = list(zip(transformed_labels, predictions))
error = self.calculate_error(self.model["prediction_cache"], self.model["weights"])
# remember: it is binary classification, the weak classifier should be
# better than random.
if error > 0.5:
self._reset_weights()
# ignore the current classifier and try build another one
# using a new sample data
else:
self.model["errors"][step] = error
self.model["alphas"][step] = alpha = self._calculate_alpha(error)
self.model["weights"] = self._update_weights(
alpha, self.model["prediction_cache"], self.model["weights"]
)
break
def predict(self, instances):
predictions = np.array([m.predict(instances) for m in self.classifiers])
predictions = np.sign(np.sum(predictions * self.model["alphas"], axis=0))
return [self.model["label_map_inverse"][y_hat] for y_hat in predictions]
def _initialize_weights(self, size):
self.model["weights"] = np.full((size, 1), 1 / size)
def _reset_weights(self):
self._initialize_weights(len(self.model["weights"]))
@staticmethod
def _update_weights(alpha, prediction_pairs, weights):
signed_alphas = alpha * np.array(
[1 if int(y_hat != y) else -1 for _, (y, y_hat) in enumerate(prediction_pairs)]
).reshape((-1, 1))
new_weights = weights * np.exp(signed_alphas)
normalization_factor = np.sum(new_weights)
return new_weights / normalization_factor
@staticmethod
def calculate_error(prediction_pairs, weights):
error = sum(weights[i] * int(y != y_hat) for i, (y, y_hat) in enumerate(prediction_pairs))
error.reshape((1, 1))
return error / len(weights)
@staticmethod
def _calculate_alpha(error):
return 1 / 2 * np.log((1 - error) / error)
if __name__ == "__main__":
# Debug Test with toy 1-dimension dataset
train_features = datasets.TOY_1D[:, :-1]
train_labels = datasets.TOY_1D[:, -1]
boosting = AdaBoostM1Classifier(3)
boosting.fit(train_features, train_labels)
prediction = boosting.predict(train_features)
acc = sum(int(x == train_labels) for (x, train_labels) in zip(prediction, train_labels)) / len(train_labels)
print(f"Acc = {acc:.2f}")
moon_features, moon_labels = make_moons(n_samples=160, noise=0.3, random_state=101)
train_features, test_features, train_labels, test_labels = train_test_split(
moon_features, moon_labels, test_size=0.33, random_state=101
)
boosting = AdaBoostM1Classifier(5)
boosting.fit(train_features, train_labels)
prediction = boosting.predict(test_features)
print(prediction)
acc = sum(int(x == train_labels) for (x, train_labels) in zip(prediction, test_labels)) / len(test_labels)
print(f"Acc = {acc:.2f}")
```
#### File: src/ensemble/bagging.py
```python
import numpy as np
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
from ensemble.ensemble_utils import Sampling
class BaggingClassifier:
def __init__(self, size):
self.size = size
self.models = [DecisionTreeClassifier() for _ in range(size)]
def fit(self, instances, true_labels):
for idx in range(self.size):
features, labels = Sampling.subsample(instances, true_labels, 1.0)
self.models[idx].fit(features, labels)
def predict(self, instances):
ensemble_predictions = np.array([m.predict(instances) for m in self.models])
num_instances = len(instances)
predictions = np.zeros((num_instances,), dtype=int)
for idx in range(num_instances):
labels, votes = np.unique(ensemble_predictions[:, idx], return_counts=True)
predictions[idx] = labels[np.argmax(votes)]
return predictions
if __name__ == "__main__":
full_features_set, full_labels_set = load_iris(return_X_y=True)
x_train, x_test, y_train, y_test = train_test_split(
full_features_set, full_labels_set, test_size=0.33, random_state=42
)
bagging = BaggingClassifier(5)
bagging.fit(x_train, y_train)
prediction = bagging.predict(x_test)
acc = sum(int(x == full_labels_set) for (x, full_labels_set) in zip(prediction, y_test)) / len(y_test)
print(f"Acc = {acc:.2f}")
```
#### File: neural_networks/base/networks.py
```python
import json
import random
from pathlib import Path
from typing import Any, Iterator, Sequence
import numpy as np
from neural_networks.base.activations import relu, sigmoid
from neural_networks.base.costs import CrossEntropyCost, QuadraticCost
from neural_networks.base.initializers import random_gaussian, sqrt_connections_ratio
from utils import data_helper
class NetworkBase:
def __init__(self, sizes: list[int]):
"""
The biases and weights in the Network object are all
initialized randomly, using the Numpy np.random.randn function
to generate Gaussian distributions with mean 0 and standard deviation 1.
Assumes that the first layer of neurons is an input layer.
:param sizes: the number of neurons in the respective layers.
Example:
>>> net = SimpleNetwork([2, 3, 1])
"""
self.num_layers = len(sizes)
self.layer_sizes = sizes
self.weights, self.biases = [], []
def update_batch(self, *kwargs):
raise NotImplementedError("This method must be implemented in a specialization class")
def cost_name(self):
raise NotImplementedError("This method must be implemented in a specialization class")
def load(self, filename):
raise NotImplementedError("This method must be implemented in a specialization class")
def back_propagate(self, x: np.ndarray, y: float) -> tuple[list[np.ndarray], list[np.ndarray]]:
"""Pass x through the network and back to calculate the gradient.
:param x: the test example to be classified
:param y: the true label (as an index of the neuron in the output layer
:return: the gradient for the cost function
as a tuple (g_biases, g_weights), where the elements
of the tuple are layer-by-layer lists of numpy arrays.
"""
biases_by_layers = [np.zeros(b.shape) for b in self.biases]
weights_by_layers = [np.zeros(w.shape) for w in self.weights]
# 1- feedforward
# the input, x, is the activation of the first layer
activation = x
activations = [x] # list to store all the activations, layer by layer
z_vectors_by_layer = [] # list to store all the z vectors, layer by layer
for b, w in zip(self.biases, self.weights):
z = np.dot(w, activation) + b
z_vectors_by_layer.append(z)
activation = sigmoid(z)
activations.append(activation)
# 2- backward pass
delta = self.calculate_delta(activations, z_vectors_by_layer, y)
biases_by_layers[-1] = delta
weights_by_layers[-1] = np.dot(delta, activations[-2].transpose())
# Since python allow negative index, we use it to
# iterate backwards on the network layers.
# Note that layer = 1 means the last layer of neurons,
# layer = 2 is the second-last layer, and so on.
for layer in range(2, self.num_layers):
z = z_vectors_by_layer[-layer]
sp = self.sigmoid_derivative(z)
delta = np.dot(self.weights[-layer + 1].transpose(), delta) * sp
biases_by_layers[-layer] = delta
weights_by_layers[-layer] = np.dot(delta, activations[-layer - 1].transpose())
return biases_by_layers, weights_by_layers
def feed_forward(self, input: np.ndarray) -> np.ndarray:
"""Pass the input through the network and return it's output.
It is assumed that the input a is an (n, 1) Numpy ndarray,
not a (n,) vector
"""
for b, w in zip(self.biases, self.weights):
input = sigmoid(np.dot(w, input) + b)
return input
def calculate_delta(self, activations, z_vectors_by_layer, y):
return QuadraticCost.derivative(activations[-1], y) * self.sigmoid_derivative(z_vectors_by_layer[-1])
@staticmethod
def sigmoid_derivative(z):
"""Derivative of the sigmoid function.
This function computes the gradient (also called the slope) of
the sigmoid function with respect to its input x.
It is defined as:
sigmoid\\_derivative(x) = \\sigma'(x) = \\sigma(x) (1 - \\sigma(x))\tag{2}
:param z: a scalar or numpy array
:return the gradient value
"""
sigmoid_value = sigmoid(z)
return sigmoid_value * (1 - sigmoid_value)
def export(self, filename):
"""Save the neural network to the file ``filename``."""
data = {
"sizes": self.layer_sizes,
"weights": [w.tolist() for w in self.weights],
"biases": [b.tolist() for b in self.biases],
"cost": str(self.cost_name()),
}
with Path(filename).open(mode="w") as file_handler:
json.dump(data, file_handler)
class SimpleNetwork(NetworkBase):
def __init__(self, sizes: list[int]):
super().__init__(sizes)
self.weights, self.biases = random_gaussian(sizes)
def cost_name(self):
return "Quadratic cost"
def load(self, filename):
raise NotImplementedError(f"TODO: implement load method in {self.__name__}")
def sdg(
self,
training_data: Iterator[tuple[np.ndarray, np.ndarray]],
epochs: int,
eta: float = 0.01,
batch_size: int = 100,
test_data: Iterator[tuple[np.ndarray, Any]] = None,
debug=False,
) -> None:
"""Train the neural network using mini-batch stochastic gradient descent.
:param training_data: list of tuples (X, y)
:param epochs: number of epochs to run
:param eta: learning rate
:param batch_size: the size of the batch to use per iteration
:param test_data: is provided then the network will be evaluated against
the test data after each epoch, and partial progress printed out.
:param debug: prints extra information
:return:
Estimates the gradient ∇C minimizing the cost function
Estimates the gradient ∇C by computing ∇Cx for a small sample
of randomly chosen training inputs. By averaging over this
small sample it turns out that we can quickly get a
good estimate of the true gradient ∇C.
The update rule is:
\begin{eqnarray}
w_k & \rightarrow & w_k' = w_k-\frac{\\eta}{m}
\\sum_j \frac{\\partial C_{X_j}}{\\partial w_k}\\
b_l & \rightarrow & b_l' = b_l-\frac{\\eta}{m}
\\sum_j \frac{\\partial C_{X_j}}{\\partial b_l},
\\end{eqnarray}
where the sums are over all the training examples Xj in
the current mini-batch.
"""
train_data = list(training_data)
n = len(train_data)
if test_data:
test_data = list(test_data)
n_test = len(test_data)
if debug:
self._print_configuration(epochs, batch_size, eta, n, n_test)
for j in range(epochs):
random.shuffle(train_data)
batches = [train_data[k : k + batch_size] for k in range(0, n, batch_size)]
for batch in batches:
self.update_batch(batch, eta)
if test_data:
evaluation = self.evaluate(test_data)
print(f"Epoch {j}: {evaluation} / {n_test}")
else:
print(f"Epoch {j} complete")
def update_batch(self, mini_batch: list[tuple[np.ndarray, np.ndarray]], eta: float) -> None:
"""Updates the network weights and biases according to
a single iteration of gradient descent, using just the
training data in mini_batch and back-propagation.
:param mini_batch: the batch of instances to process
:param eta: the learning rate
"""
b_hat = [np.zeros(b.shape) for b in self.biases]
w_hat = [np.zeros(w.shape) for w in self.weights]
for x, y in mini_batch:
delta_b_hat, delta_w_hat = self.back_propagate(x, y)
b_hat = [nb + dnb for nb, dnb in zip(b_hat, delta_b_hat)]
w_hat = [nw + dnw for nw, dnw in zip(w_hat, delta_w_hat)]
self.weights = [w - (eta / len(mini_batch)) * nw for w, nw in zip(self.weights, w_hat)]
self.biases = [b - (eta / len(mini_batch)) * nb for b, nb in zip(self.biases, b_hat)]
@staticmethod
def _print_configuration(epochs, batch_size, eta, n, n_test=None):
print(f"epochs: {epochs}")
print(f"batch_size: {batch_size}")
print(f"eta: {eta}")
print(f"train set size: {n}")
if n_test:
print(f"test set size: {n_test}")
def evaluate(self, data: Sequence) -> int:
"""Evaluate the network's prediction on the given test set
:param data: the list of instances the evaluate the model
:return: the number of test inputs correctly classified
"""
results = [(np.argmax(self.feed_forward(x)), y) for (x, y) in data]
return sum(int(x == y) for (x, y) in results)
class ImprovedNetwork(NetworkBase):
def __init__(
self,
layer_sizes,
cost_function=CrossEntropyCost,
weight_initializer=sqrt_connections_ratio,
):
super().__init__(layer_sizes)
self.num_classes = layer_sizes[-1]
self.weights, self.biases = weight_initializer(layer_sizes)
self.cost_function = cost_function
def cost_name(self):
return self.cost_function.__name__
def load(self, filename):
raise NotImplementedError(f"TODO: implement load method in {self.__name__}")
def sdg(
self,
training_data,
epochs,
batch_size,
eta,
lmbda=0.0,
validation_data=None,
monitor=None,
debug=False,
):
"""Train the neural network using mini-batch stochastic gradient
descent. The ``training_data`` is a list of tuples ``(x, y)``
representing the training inputs and the desired outputs. The
other non-optional parameters are self-explanatory, as is the
regularization parameter ``lmbda``. The method also accepts
``evaluation_data``, usually either the validation or test
data. We can monitor the cost and accuracy on either the
evaluation data or the training data, by setting the
appropriate flags. The method returns a tuple containing four
lists: the (per-epoch) costs on the evaluation data, the
accuracies on the evaluation data, the costs on the training
data, and the accuracies on the training data. All values are
evaluated at the end of each training epoch. So, for example,
if we train for 30 epochs, then the first element of the tuple
will be a 30-element list containing the cost on the
evaluation data at the end of each epoch. Note that the lists
are empty if the corresponding flag is not set.
"""
train_data = list(training_data)
n = len(train_data)
validation_costs, validation_acc = [], []
train_costs, train_acc = [], []
if validation_data:
validation_data = list(validation_data)
# TODO: implement early stopping
for epoch in range(epochs):
if debug:
print(f"\n****\nStarting Epoch: {epoch}")
random.shuffle(train_data)
batches = [train_data[k : k + batch_size] for k in range(0, n, batch_size)]
if debug:
batch_num = 0
qtt_batches = len(batches)
for batch in batches:
if debug:
batch_num += 1
print(f"\tProcessing batch {batch_num} of {qtt_batches}")
self.update_batch(batch, eta, lmbda, n)
if monitor and validation_data is not None:
# monitoring the validation accuracy more often,
# per mini_batch maybe, or x * mini_batches
tr_acc, tr_cost, valid_acc, valid_cost = self._monitor(monitor, lmbda, train_data, validation_data)
train_costs.append(tr_cost)
train_acc.append(tr_acc)
validation_costs.append(valid_cost)
validation_acc.append(valid_acc)
ImprovedNetwork.print_results(epoch, valid_cost, valid_acc, tr_cost, tr_acc)
return validation_costs, validation_acc, train_costs, train_acc
def update_batch(
self,
mini_batch: list[tuple[np.ndarray, np.ndarray]],
eta: float,
lmbda: float,
n: int,
) -> None:
"""Updates the network weights and biases according to
a single iteration of gradient descent, using just the
training data in mini_batch and back-propagation.
:param mini_batch: a list of tuples, where each tuple is an instance (x, y) to process
:param eta: the learning rate
:param lmbda: the regularization parameter
:param n: the total size of the training data set
"""
b_hat = [np.zeros(b.shape) for b in self.biases]
w_hat = [np.zeros(w.shape) for w in self.weights]
for x, y in mini_batch:
delta_b_hat, delta_w_hat = self.back_propagate(x, y)
b_hat = [nb + dnb for nb, dnb in zip(b_hat, delta_b_hat)]
w_hat = [nw + dnw for nw, dnw in zip(w_hat, delta_w_hat)]
self.weights = [
(1 - eta * (lmbda / n)) * w - (eta / len(mini_batch)) * nw for w, nw in zip(self.weights, w_hat)
]
self.biases = [b - (eta / len(mini_batch)) * nb for b, nb in zip(self.biases, b_hat)]
def _monitor(self, monitor, lmbda, train_data, validation_data):
train_acc = train_cost = valid_acc = valid_cost = None
if "train_acc" in monitor or "train_cost" in monitor:
measures = []
if "train_acc" in monitor:
measures.append("acc")
if "train_cost" in monitor:
measures.append("cost")
transformed_data = [(x, np.argmax(y)) for x, y in train_data]
train_acc, train_cost = self.evaluate(transformed_data, measures, lmbda)
if "valid_acc" in monitor or "valid_cost" in monitor:
measures = []
if "valid_acc" in monitor:
measures.append("acc")
if "valid_cost" in monitor:
measures.append("cost")
valid_acc, valid_cost = self.evaluate(validation_data, measures, lmbda)
return train_acc, train_cost, valid_acc, valid_cost
def evaluate(self, test_data, measures: list = None, lmbda=5.0):
measures = measures or ["acc"]
data = list(test_data)
acc = cost = None
prediction = [self.feed_forward(x) for x, y in data]
if "acc" in measures:
true_labels = [y for _, y in data]
acc = self._accuracy(zip(prediction, true_labels))
if "cost" in measures:
true_labels_as_vector = [data_helper.as_vector(y, self.num_classes) for _, y in data]
cost = self._total_cost(list(zip(prediction, true_labels_as_vector)), lmbda)
return acc, cost
def _total_cost(self, data, lmbda):
"""Return the total cost for the data set ``data``. The flag
``convert`` should be set to False if the data set is the
training data (the usual case), and to True if the data set is
the validation or test data. See comments on the similar (but
reversed) convention for the ``accuracy`` method, above.
"""
cost = 0.0
for activations, true_label in data:
cost += self.cost_function.evaluate(activations, true_label) / len(data)
cost += 0.5 * (lmbda / len(data)) * sum(np.linalg.norm(w) ** 2 for w in self.weights)
return cost
@staticmethod
def _accuracy(data) -> float:
results = [(np.argmax(activations), true_label) for (activations, true_label) in data]
return sum(int(prediction == true_label) for (prediction, true_label) in results) / len(results)
@staticmethod
def print_results(epoch, valid_cost, valid_acc, training_cost, training_acc):
print(f"Epoch {epoch}:")
if valid_acc:
print(f"\t valid_acc: {valid_acc:.5f}")
if valid_cost:
print(f"\t valid_cost: {valid_cost:.5f}")
if training_acc:
print(f"\t training_acc: {training_acc:.5f}")
if training_cost:
print(f"\t training_cost: {training_cost:.5f}")
print("-------")
class DidacticCourseraNetwork:
def forward(self, features, parameters):
"""Implement forward propagation for the [LINEAR->RELU]*(L-1)->LINEAR->SIGMOID computation
Arguments:
X -- data, numpy array of shape (input size, number of examples)
parameters -- output of initialize_parameters_deep()
Returns:
AL -- last post-activation value
caches -- list of caches containing:
every cache of linear_activation_forward() (there are L-1 of them, indexed from 0 to L-1)
"""
caches = []
activations = features
num_layers = len(parameters) // 2 # number of layers in the neural network
# Implement [LINEAR -> RELU]*(L-1). Add "cache" to the "caches" list.
for layer in range(1, num_layers):
previous_activations = activations
activations, cache = self.linear_activation_forward(
previous_activations, parameters[f"W{layer}"], parameters[f"b{layer}"], activation_func=relu
)
caches.append(cache)
# Implement LINEAR -> SIGMOID. Add "cache" to the "caches" list.
linear_activation_values, cache = self.linear_activation_forward(
activations, parameters[f"W{num_layers}"], parameters[f"b{num_layers}"], activation_func=sigmoid
)
caches.append(cache)
if linear_activation_values.shape != (1, features.shape[1]):
raise Exception("Shape mismatch when processing forward step.")
return linear_activation_values, caches
def linear_activation_forward(self, previous_activation_values, weights, biases, activation_func=relu):
"""
Implement the forward propagation for the LINEAR->ACTIVATION layer
Args:
previous_activation_values: activations from previous layer (or input data)
(size of previous layer, number of examples)
weights: a numpy array of shape (size of current layer, size of previous layer)
biases: numpy array of shape (size of the current layer, 1)
activation_func -- the activation to be used in this layer
Returns:
activation_values: the output of the activation function, also called the post-activation value
cache: a python tuple containing "linear_cache" and "activation_cache";
stored for computing the backward pass efficiently
"""
# Inputs: "A_prev, W, b". Outputs: "A, activation_cache".
pre_activations, linear_cache = self.linear_forward(previous_activation_values, weights, biases)
activation_values, activation_cache = activation_func(pre_activations)
if activation_values.shape != (weights.shape[0], previous_activation_values.shape[1]):
raise Exception("Shape mismatch when processing linear activation forward function.")
cache = (linear_cache, activation_cache)
return activation_values, cache
def linear_forward(self, activations, weight_matrix, bias_vector):
"""
Implement the linear part of a layer's forward propagation.
Args:
activations: activations from previous layer (or input data): (size of previous layer, number of examples)
weight_matrix: numpy array of shape (size of current layer, size of previous layer)
bias_vector: numpy array of shape (size of the current layer, 1)
Returns:
tuple with the pre-activation parameter values (Z) and cache,
a python tuple containing "activations", "weights" and "biases";
stored for computing the backward pass efficiently
"""
pre_activations = np.dot(weight_matrix, activations) + bias_vector
if pre_activations.shape != (weight_matrix.shape[0], activations.shape[1]):
raise Exception("Shape mismatch when processing linear forward function.")
return pre_activations, (activations, weight_matrix, bias_vector)
```
#### File: examples/logistic_regression/basic_algorithm.py
```python
import numpy as np
def sigmoid(z):
return 1.0 / (1.0 + np.exp(-z))
class IterativeBinaryLogisticRegression:
def __init__(self, num_features):
self.num_features = num_features
self.weights = np.zeros((num_features, 1))
self.bias = 0
def run(self, train_data, eta):
train_data = list(train_data)
n = len(train_data)
b_slope = 0
weight_slopes = np.zeros((self.num_features, 1))
cost = 0
# TODO: implement 'm' iterations
# Calculate the Logistic Regression Derivatives
# 1-step of Gradient Descent
for x, y in train_data:
# TODO: implement the dot operation with for loops
z = np.dot(self.weights, x) + self.bias
a = sigmoid(z)
cost += -(y * np.log(a) + (1 - y) * np.log(1 - a))
z_slope = a - y
b_slope += z_slope
for i in range(self.num_features):
weight_slopes[i] += x[i] * z_slope
cost /= n
b_slope /= n
weight_slopes = [w / n for w in weight_slopes]
# Update the weights and bias
for i in range(self.num_features):
self.weights[i] -= eta * weight_slopes[i]
self.bias -= eta * b_slope
class VectorizedBinaryLogisticRegression:
def __init__(self, num_features):
self.num_features = num_features
self.weights = np.zeros((num_features, 1))
self.bias = 0
def run(self, train_data, eta):
train_data = list(train_data)
n = len(train_data)
b_slope = 0
weight_slopes = np.zeros((self.num_features, 1))
cost = 0
# TODO: implement 'm' iterations
# Calculate the Logistic Regression Derivatives
# 1-step of Gradient Descent
# TODO: vectorize the loop
for x, y in train_data:
z = np.dot(self.weights, x) + self.bias
a = sigmoid(z)
cost += -(y * np.log(a) + (1 - y) * np.log(1 - a))
z_slope = a - y
b_slope += z_slope
weight_slopes += x * z_slope
cost /= n
b_slope /= n
weight_slopes /= n
# Update the weights and bias
# TODO: vectorize
for i in range(self.num_features):
self.weights[i] -= eta * weight_slopes[i]
self.bias -= eta * b_slope
@staticmethod
def sigmoid(z):
return 1.0 / (1.0 + np.exp(-z))
```
#### File: neural_networks/examples/next_user_action_prediction.py
```python
import numpy as np
import pandas as pd
from sklearn import preprocessing
from utils.model_helper import load_pickle_model
def prepare_test_data(df, num_features, class_feature):
y = df[class_feature].to_numpy()
min_max_scaler = preprocessing.MinMaxScaler()
X = min_max_scaler.fit_transform(df.drop([class_feature], axis=1))
inputs = [np.reshape(x, (num_features, 1)) for x in X]
labels = [label - 1 for label in y]
return zip(inputs, labels)
if __name__ == "__main__":
path_to_data = "../data/raw/user_actions"
df_test = pd.read_csv(f"{path_to_data}/test_processed_2.csv", sep="\t")
df_test = df_test.drop(["user_id", "timestamp"], axis=1)
test_set = prepare_test_data(df_test, 14, "action")
net = load_pickle_model("../models/model.pickle")
acc, cost = net.evaluate(test_set, ["acc", "cost"])
print("Test performance:")
print(f"\tAcc: {acc}")
print(f"\tCost: {cost}")
```
#### File: regression/least_squares/multiple_linear_regression.py
```python
from math import sqrt
import numpy as np
from numpy import dot, power
from numpy.linalg import inv
from regression.base import predict_output, random_init
def multiple_linear_regression_closed_form(observations, y_values):
"""
This operation is very expensive. Calculating the inverse is N^3.
There are better solutions for this problem.
Notes:
- Num of observations must be greater than the number of features
- linearly independent
- w and y are vectors
RSS(w) = -2H^T (y - Hw)
Solve for w:
RSS(w_hat) = 0
-2H^T (y - H * w_hat) = 0
-2H^T * y + 2H^T * H * w_hat = 0
H^T * H * w_hat = H^T * y
inv(H^T * H) * H^T * H * w_hat = H^T * y * inv(H^T * H)
# inv(H^T * H) * H^T * H --> I
# thus:
I * w_hat = H^T * y * inv(H^T * H)
w_hat = inv(H^T * H) * H^T * y
"""
return dot(inv(observations.T @ observations), observations.T.dot(y_values))
def gradient_descent_loop_form(
feature_matrix,
y_values,
step_size: float = 1e-2,
tolerance: float = 1e-3,
max_iter: int = 1e2,
initializer=random_init,
):
weights = initializer(feature_matrix.shape[1])
converged = False
iteration = 0
while not converged and iteration < max_iter:
predictions = predict_output(feature_matrix, weights)
residual_errors = predictions - y_values
gradient_sum_squares = 0
for i in range(len(weights)): # loop over each weight
# Recall that feature_matrix[:, i] is the feature column associated with weights[i]
# compute the derivative for weight[i]:
derivative = feature_derivative(residual_errors, feature_matrix[:, i])
# add the squared value of the derivative to the gradient sum of squares (for assessing convergence)
gradient_sum_squares += derivative ** 2
weights[i] -= step_size * derivative
# compute the square-root of the gradient sum of squares to get the gradient magnitude:
gradient_magnitude = sqrt(gradient_sum_squares)
converged = gradient_magnitude < tolerance
iteration += 1
return weights
def feature_derivative(feature, errors):
return 2 * np.dot(errors, feature)
def gradient_descent_vectorized_form(
feature_matrix,
y_values,
step_size: float = 1e-2,
tolerance: float = 1e-3,
max_iter: int = 1e2,
initializer=random_init,
):
weights = initializer(feature_matrix.shape[1])
converged = False
iteration = 0
while not converged and iteration < max_iter:
residual_errors = y_values - feature_matrix.dot(weights.T)
partials = -2 * feature_matrix.T.dot(residual_errors)
weights += step_size * partials
gradient_magnitude = np.sqrt(power(partials, 2).sum())
converged = gradient_magnitude < tolerance
iteration += 1
return weights
if __name__ == "__main__":
# first feature is the constant 1 (intercept)
feature_matrix = np.array(
[
[1, 10, 3],
[1, 2.5, 7],
[1, 15, 8],
[1, 6, 2],
[1, 67, 37],
]
)
y_values = np.array([-1, 0.2, 0.9, 3, 0.6])
step_size = 7e-12
tolerance = 2.5e7
closed_w = multiple_linear_regression_closed_form(feature_matrix, y_values)
loop_w = gradient_descent_loop_form(feature_matrix, y_values, step_size, tolerance)
vectorized_w = gradient_descent_vectorized_form(feature_matrix, y_values, step_size, tolerance)
print(closed_w)
print(loop_w)
print(vectorized_w)
``` |
{
"source": "jmetzz/speech-to-text",
"score": 3
} |
#### File: main/python/_2_convert_mic.py
```python
import argparse
import logging
from core.converter import MicCaptureToText
def create_argument_parser():
parser = argparse.ArgumentParser(description='parse incoming')
parser.add_argument('-l', '--lang',
help="The language of the audio",
default='en-US',
type=str)
parser.add_argument('-c', '--converter',
help="The language of the audio",
choices=['google', 'sphinx'],
type=str)
parser.add_argument('-v', '--verbose',
help="Be verbose. Sets logging level to INFO",
action="store_const",
dest="loglevel",
const=logging.INFO,
default=logging.INFO)
parser.add_argument('-vv', '--debug',
help="Print lots of debugging statements. "
"Sets logging level to DEBUG",
action="store_const",
dest="loglevel",
const=logging.DEBUG)
parser.add_argument('-q', '--quiet',
help="Be quiet! Sets logging level to WARNING",
action="store_const",
dest="loglevel",
const=logging.WARNING)
return parser
if __name__ == '__main__':
cmdline_args = create_argument_parser().parse_args()
logging.basicConfig(level=cmdline_args.loglevel)
converter = MicCaptureToText(recognizer=cmdline_args.converter,
language=cmdline_args.lang)
transcription = {
"success": None,
"error": None,
"stack trace": None,
"text": ""
}
print("Using input from mic:")
while transcription['text'] != "stop":
print("Say something to capture. When you are done just say 'stop'")
transcription = converter.convert()
if transcription['success']:
print(f"Recognizer thinks you said: '{transcription['text']}'")
else:
print(f"Error: '{transcription['error']}'")
print("No more audio to transcribe")
```
#### File: python/core/configuration.py
```python
import yaml
import logging
from collections import namedtuple
class Config:
logger = logging.getLogger(__name__)
@staticmethod
def load_from_file(filename):
try:
with open(filename, 'r') as stream:
try:
config = yaml.load(stream)
return Config._convert(config)
except yaml.YAMLError as exc:
Config.logger.fatal('config: Cannot load config: {}'.format(filename), exc)
except Exception as e:
Config.logger.error('config: File not found: {}'.format(filename))
raise e
@staticmethod
def load_from_db(setting_name):
raise NotImplementedError
@staticmethod
def from_dict(dictionary):
if not isinstance(dictionary, dict):
raise ValueError('Given argument is not a dictionary')
return Config._convert(dictionary)
@staticmethod
def _convert(dictionary):
"""Converts a nested dictionary into an accessible object,
allowing us to access nested property as simple as object.param.nested
:obj: dictionary
:returns: NestedObject
"""
if isinstance(dictionary, dict):
for key, value in dictionary.items():
dictionary[key] = Config._convert(value)
return namedtuple('VO', dictionary.keys())(**dictionary)
elif isinstance(dictionary, list):
return [Config._convert(item) for item in dictionary]
else:
return dictionary
``` |
{
"source": "jmewes/epages-beyond-payment-app",
"score": 3
} |
#### File: jmewes/epages-beyond-payment-app/shops.py
```python
import psycopg2
import requests
class Shop(object):
def __init__(self, id, hostname):
self.id = id
self.hostname = hostname
class Shops(object):
def __init__(self):
pass
def get_shop(self, id):
return None
class PostgresShops(Shops):
def __init__(self, database_url):
self.database_url = database_url
def create_schema(self):
with psycopg2.connect(self.database_url) as conn:
with conn.cursor() as curs:
curs.execute("""CREATE TABLE IF NOT EXISTS SHOPS (
ID varchar(255) UNIQUE NOT NULL,
HOSTNAME varchar(255) NOT NULL
)""")
def create_or_update_shop(self, shop):
print("Create/update shop %s (%s)" % (shop.hostname, shop.id))
sql = ''
if self.get_shop(shop.id):
sql = "UPDATE SHOPS SET HOSTNAME = %s WHERE ID=%s"
else:
sql = "INSERT INTO SHOPS (HOSTNAME, ID) VALUES(%s, %s)"
with psycopg2.connect(self.database_url) as conn:
with conn.cursor() as curs:
curs.execute(sql, (shop.hostname, shop.id))
def get_shop(self, id):
with psycopg2.connect(self.database_url) as conn:
with conn.cursor() as curs:
curs.execute("SELECT * FROM SHOPS WHERE ID=%s", (id,))
entry = curs.fetchone()
if entry:
return Shop(id=entry[0],
hostname=entry[1])
return None
def get_shop_id(installation):
return \
requests.get('%s/shop-id' % installation.api_url, \
headers={
"Accept": "application/hal+json",
"Authorization": "Bearer %s" % installation.access_token
}).json() \
.get('shopId', None)
```
#### File: epages-beyond-payment-app/test/test_signers.py
```python
from signers import sign
def test_sign():
signature = sign('Hello, unaltered message!', 'ha2e25nfmvo1dgeqsncd3nqsoj')
assert signature == 'WJZWs4v/Vgru4X6hdKhI71TmhUM='
``` |
{
"source": "jmew/Felix.AI",
"score": 3
} |
#### File: jmew/Felix.AI/api.py
```python
from flask import Flask, request, jsonify
from flask_restful import Api
from chatbot import chatbot_api
app = Flask(__name__)
api = Api(app)
felix = None
@app.route('/', methods=['POST']) #allow both GET and POST requests
def form_example():
if request.method == 'POST': #this block is only entered when the form is submitted
req_data = request.get_json()
sentance = req_data['input']
output = felix.predict(sentance)
return jsonify(
reply=output,
)
if __name__ == '__main__':
felix = chatbot_api.Chatbot()
felix.run()
app.run(port='5002')
```
#### File: Felix.AI/scripts/data_filterer.py
```python
import sys
import re
import os
import datetime
import argparse
import collections
class DataFilterer:
round = re.compile('\(.+?\)')
html = re.compile('\<.+?\>')
curly = re.compile('\{.+?\}')
square = re.compile('\[.+?\]')
speaker = re.compile('^(.+?:|.+?>>)')
availableDatatypes = collections.OrderedDict([ # OrderedDict because the first element is the default choice
('subs', "filterSubs"),
('transcript', "filterTranscript"),
])
@staticmethod
def dataTypeChoices():
return list(DataFilterer.availableDatatypes.keys())
@staticmethod
def getFileContent(filename):
lines = None
with open(filename, 'r') as f:
lines = f.readlines()
return lines
def __init__(self, args):
self.dataType = args.dataType
self.keepPunc = args.keepPunctuations
self.filterMethod = DataFilterer.availableDatatypes[self.dataType]
def _cleanLine(self, s):
s = DataFilterer.round.sub('', s)
s = DataFilterer.curly.sub('', s)
s = DataFilterer.square.sub('', s)
s = DataFilterer.html.sub('', s)
s = s.strip()
s = s.replace('|', ' ')
s = s.replace('. . .', '.')
s = s.replace('\n', '')
s = s.replace('\r', '')
if self.keepPunc:
s = re.sub(r'[^a-zA-Z,.!? \'-]', u'', s)
else:
s = re.sub(r'[^a-zA-Z \'-]', u'', s)
s = re.sub(r'- ', u'', s)
s = re.sub(r' -', u'', s)
if len(s) > 0 and (s[0] == '-' or s[0] == '.'):
s = s[1:]
return s.strip()
def filterSubs(self, lines):
filtered = []
inlineTiming = None
'''
Two ways that subtitles are formatted:
1. inlineTiming
{330}{363} some sentence
2. not inlineTiming
1
00:00:02,292 --> 00:00:03,645 X1:197 X2:518 Y1:484 Y2:524
some sentence
'''
if lines[0][0] == '{':
inlineTiming = True
else:
inlineTiming = False
dialogue = [] # temporary array for a sentence that is written over multiple lines
for l in lines:
if l.strip().isdigit():
continue
if not inlineTiming and ("-->" in l or len(re.findall(r"(?:(?:([01]?\d|2[0-3]):)?([0-5]?\d):)?([0-5]?\d)", l)) > 0): # garbage lines
continue
if not inlineTiming and l.isspace(): # form one sentence using multiple lines of sentences
full_s = ""
for s in dialogue:
# Merge fragmented sub sentences into one sentence
s = self._cleanLine(s)
full_s = full_s + " " + s
full_s = full_s.strip()
if len(full_s) > 0:
filtered.append(full_s)
dialogue = []
else: # A line of sentence
l = l.strip()
if inlineTiming or (not inlineTiming and len(l) > 0 and l[0] == '-'):
# If begins with -, separate person speaking
l = self._cleanLine(l)
if len(l) > 0:
filtered.append(l)
elif not inlineTiming:
# It could be one sentence written over multiple lines
dialogue.append(l)
for s in dialogue:
s = self._cleanLine(s)
if len(s) > 0:
filtered.append(s)
return filtered
def filterTranscript(self, lines):
filtered = []
for l in lines:
if l.strip().isdigit(): # if just consists of digits, skip
continue
l = l.strip()
l = DataFilterer.speaker.sub('', l)
l = self._cleanLine(l)
if len(l) > 0:
filtered.append(l)
return filtered
# Returns a list of filtered lines from the specified subtitle
def filterData(self, filename):
print("Filtering ", filename, "\r\n")
filtered = []
filter = None
try:
filter = getattr(self, self.filterMethod)
except AttributeError:
raise NotImplementedError("Class `{}` does not implement `{}`".format(my_cls.__class__.__name__, method_name))
lines = DataFilterer.getFileContent(filename)
filtered = filter(lines)
return filtered
def main(argv=None):
if len(argv) < 2:
return
parser = argparse.ArgumentParser(description='Filter txt data.')
parser.add_argument('--dataType', choices=DataFilterer.dataTypeChoices(), default=DataFilterer.dataTypeChoices()[0], help='type of txt files.')
parser.add_argument('--dirName', type=str, default='', help='use this for directory name that contains the text files')
parser.add_argument('mergeFiles', metavar='mf', type=str, nargs='*', help='Files to be added')
parser.add_argument('--keepPunctuations', action='store_true', help='use this if you want train using a different dataset type')
parser.add_argument('--mergeMode', action='store_true', help='use this if you want merge text files instead of filtering them')
parser.add_argument('--outputName', type=str, default=None, nargs='?', help='use this if you want to specify output file name')
# Parse arguments
args = parser.parse_args(argv[1:])
print(args)
training_data = []
new_filename = ""
dataFilterer = DataFilterer(args)
# Merge mode
if args.mergeMode:
for file in args.mergeFiles:
new_filename += file.split(".")[0] + "_"
training_data.extend(DataFilterer.getFileContent(file))
new_filename = new_filename[:-1] + ".txt"
# Filter mode
else:
# Get names of all text files to filter
filenames = []
for file in os.listdir(args.dirName):
if file.endswith(".txt"):
f = os.path.join(args.dirName, file)
filenames.append(f)
# Filter each file
for filename in filenames:
training_data.extend(dataFilterer.filterData(filename))
training_data.append("===")
new_filename = args.dirName.replace('/', '') + ".txt"
if args.outputName is not None:
new_filename = args.outputName
with open(new_filename, 'w+') as f:
for l in training_data:
f.write("%s\n" % l)
if __name__== "__main__":
main(sys.argv)
``` |
{
"source": "JMey94/cigar-id",
"score": 3
} |
#### File: JMey94/cigar-id/plant_input.py
```python
import os
from PIL import Image
import numpy as np
import glob
def input_data(path):
newPath = os.path.join(path,'*.jpg')
filePaths = glob.glob(newPath)
numImgs = len(filePaths)
# get numPixels from one image, all images must be square and same dimension
numPixels = np.asarray(Image.open(filePaths[0])).shape[0]
labels = np.empty(shape = (numImgs,2))
features = np.empty(shape = (numImgs,numPixels**2))
for i,imagePath in enumerate(filePaths):
if 'pinoak' in imagePath:
labelVec = [1,0]
elif 'sugarmaple' in imagePath:
labelVec = [0,1]
# if we load one image at a time, this is much faster than load_images()
img = np.asarray(Image.open(imagePath)).ravel()
features[i,:] = img
labels[i,:] = labelVec
return features,labels
``` |
{
"source": "jmeydam/alternative-destinations",
"score": 3
} |
#### File: app/api/alternative_destinations.py
```python
from datetime import datetime
from sqlalchemy import desc
from flask import request, make_response
from flask_cors import cross_origin
from . import api
from .authentication import auth
from app.exceptions import ValidationError
from app.models import Airport, Destination, Weather
MONTH_NAMES = {
1: 'January',
2: 'February',
3: 'March',
4: 'April',
5: 'May',
6: 'June',
7: 'July',
8: 'August',
9: 'September',
10: 'October',
11: 'November',
12: 'December'}
def default_destinations(iata_code_original_destination):
"""Get three default destinations different from original destination of query."""
# Paris, London, Rome, New York
defaults = ['CDG', 'LHR', 'FCO', 'JFK']
if iata_code_original_destination in defaults:
defaults.remove(iata_code_original_destination)
else:
defaults.remove('JFK')
return defaults
@api.route('/search')
@auth.login_required
@cross_origin()
def get_alternative_destinations():
"""Validates query parameters and assembles output."""
# print(request.args)
# ImmutableMultiDict([('iata_code', 'LHR'),
# ('date', '2019-01-15'),
# ('min_temperature_celsius', '5'),
# ('max_temperature_celsius', '20'),
# ('max_precipitation_mm', '0')]) # max *average* daily rainfall
try:
iata_code = request.args.get('iata_code')
assert iata_code is not None
assert len(iata_code) == 3
assert iata_code == iata_code.upper()
date = datetime.strptime(request.args.get('date'), '%Y-%m-%d')
min_temperature_celsius = float(request.args.get('min_temperature_celsius'))
assert min_temperature_celsius >= -50
assert min_temperature_celsius <= 50
max_temperature_celsius = float(request.args.get('max_temperature_celsius'))
assert max_temperature_celsius >= -50
assert max_temperature_celsius <= 50
assert min_temperature_celsius <= max_temperature_celsius
max_precipitation_mm = float(request.args.get('max_precipitation_mm'))
assert max_precipitation_mm >= 0
# avg. daily rainfall normally between 1 and 4
assert max_precipitation_mm <= 10
except Exception as e:
#print(e)
raise ValidationError('Invalid input')
defaults = default_destinations(iata_code)
dests = []
dests.append(Destination.query.filter_by(iata_code=defaults[0]).first())
dests.append(Destination.query.filter_by(iata_code=defaults[1]).first())
dests.append(Destination.query.filter_by(iata_code=defaults[2]).first())
# query in "strict mode"
query_weather_strict = Weather.query.filter(
Weather.month == MONTH_NAMES[date.month],
Weather.iata_code != iata_code,
Weather.min_temperature_celsius >= min_temperature_celsius,
Weather.max_temperature_celsius <= max_temperature_celsius,
Weather.daily_precipitation_mm <= max_precipitation_mm).order_by(
desc(Weather.min_temperature_celsius))
# print(query_weather_strict)
result_strict = query_weather_strict.all()
# print('Strict search found ' + str(len(result_strict)) + ' destinations.')
# print(result_strict)
# query in "fuzzy mode" with tolerances
query_weather_fuzzy = Weather.query.filter(
Weather.month == MONTH_NAMES[date.month],
Weather.iata_code != iata_code,
Weather.min_temperature_celsius >= min_temperature_celsius - 5,
Weather.max_temperature_celsius <= max_temperature_celsius + 5,
Weather.daily_precipitation_mm <= max_precipitation_mm + 3).order_by(
desc(Weather.min_temperature_celsius))
result_fuzzy = query_weather_fuzzy.all()
# print('Fuzzy search found ' + str(len(result_fuzzy)) + ' destinations.')
# print(result_fuzzy)
# print(type(result_fuzzy))
# <class 'list'>
# append unordered result of fuzzy search to ordered result of strict search
# only append elements that are not part of the strict search result
result = result_strict + list(set(result_fuzzy) - set(result_strict))
# print('Found ' + str(len(result)) + ' destinations. (Results strict search first.)')
# print(result)
# overwrite default destinations with search result
for index, weather in enumerate(result):
if index > 2:
break
dests[index] = Destination.query.filter_by(iata_code=weather.iata_code).first()
line_1 = '{"alternative_destinations":\n'
line_2 = ' [\n'
line_3 = ' {"iata_code": "%s", "city": "%s"},\n' % (dests[0].iata_code, dests[0].city)
line_4 = ' {"iata_code": "%s", "city": "%s"},\n' % (dests[1].iata_code, dests[1].city)
line_5 = ' {"iata_code": "%s", "city": "%s"}\n' % (dests[2].iata_code, dests[2].city)
line_6 = ' ]\n'
line_7 = '}\n'
response_string = line_1 + line_2 + line_3 + line_4 + line_5 + line_6 + line_7
response = make_response(response_string)
response.mimetype = 'application/json'
return response
```
#### File: app/main/views.py
```python
from flask import render_template, session, redirect, url_for
from ..models import Destination
from . import main
from .forms import ParameterForm
@main.route('/', methods=['GET', 'POST'])
def index():
form = ParameterForm()
if form.validate_on_submit():
#print(dir(form))
#{'iata_code': 'TLV', 'date': datetime.date(2019, 1, 19), 'min_temperature_celsius': 5, 'max_temperature_celsius': 20, 'max_precipitation_mm': 0, 'submit': True, 'csrf_token': '<KEY>'}
#print(form.data)
#print(form.iata_code)
#print(form.date.data)
#print(form.min_temperature_celsius.data)
#print(form.max_temperature_celsius.data)
#print(form.max_precipitation_mm.data)
#test_url = '../api/v1/search?iata_code=LHR&date=2019-01-15&min_temperature_celsius=5&max_temperature_celsius=20&max_precipitation_mm=0'
redirect_url = '../api/v1/search' +\
'?iata_code=' + form.iata_code.data +\
'&date=' + form.date.data.strftime('%Y-%m-%d') +\
'&min_temperature_celsius=' + str(form.min_temperature_celsius.data) +\
'&max_temperature_celsius=' + str(form.max_temperature_celsius.data) +\
'&max_precipitation_mm=' + str(form.max_precipitation_mm.data)
#http://127.0.0.1:5000/api/v1/search?iata_code=TLV&date=2019-01-19&min_temperature_celsius=5&max_temperature_celsius=20&max_precipitation_mm=0
return redirect(redirect_url)
return render_template('index.html', form=form)
``` |
{
"source": "Jmeyer1292/block_diagram_z3",
"score": 3
} |
#### File: fbdplc/apps/parse_s7db.py
```python
import argparse
import fbdplc.s7db
import pprint
import fbdplc.apps.loggers as loggers
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('paths', nargs='*')
parser.add_argument(
'--parser', choices=['guess', 'db', 'udt'], default='guess')
loggers.add_log_arguments(parser)
return parser.parse_args()
if __name__ == '__main__':
args = parse_args()
loggers.configure_logger(args)
ACTIONS = {
'db': fbdplc.s7db.parse_db_file,
'udt': fbdplc.s7db.parse_udt_file,
}
for path in args.paths:
print(f'Attempting to load s7db file "{path}"')
action = args.parser
if action == 'guess':
if path.endswith('db'):
action = 'db'
elif path.endswith('udt'):
action = 'udt'
else:
raise RuntimeError(
f'Unable to guess type of file w/ path {path}')
parser = ACTIONS[action]
result = parser(path)
pprint.pprint(result)
```
#### File: block_diagram_z3/fbdplc/modeling.py
```python
from fbdplc.functions import Block, Program, Scope
from fbdplc.parts import MovePart, PartModel, PartPort, PortDirection
from fbdplc.wires import IdentConnection, NamedConnection, Wire, WireConnection
from fbdplc.graph import MemoryProxy, ScopeContext, merge_nets
from fbdplc.access import LiteralConstantAccess, SymbolAccess, SymbolConstantAccess
from typing import List
import logging
import z3
from z3.z3types import Z3Exception
logger = logging.getLogger(__name__)
class ProgramModel:
def __init__(self, ctx=None):
self.assertions = []
self.ctx = ctx if ctx else z3.Context()
# We need some kind of static call graph for users to write assertions against
# or we need to accumulate annotations from the code itself.
self.root: Scope = None
self.global_mem = MemoryProxy('', self.ctx)
class MemoryAccessProxy:
def __init__(self, name, scope):
self.name, self.scope = name, scope
def __str__(self):
return f'MemoryAccessProxy({self.name} from {self.scope})'
def hunt_for_type(uid, code: ScopeContext, scope: Scope):
result = None
for wire_id, wire in code.wires.items():
wire: Wire = wire
# Test to see if wire endpoint 'a' is connecting to the part whose
# type we wish to infer.
a_matches = type(
wire.a) == NamedConnection and wire.a.target_uid == uid
b_matches = type(
wire.b) == NamedConnection and wire.b.target_uid == uid
if a_matches:
if type(wire.b) == IdentConnection:
access = code.accesses[wire.b.target_uid]
logger.debug(f'\tOther end is a {access}')
if isinstance(access, SymbolAccess) and access.scope == 'LocalVariable':
sort = scope.mem.sort(access.symbol)
logger.debug(f'\tSort determined to be {sort}')
return sort
elif b_matches:
if type(wire.a) == IdentConnection:
access = code.accesses[wire.a.target_uid]
logger.debug(f'\tOther end is a {access}')
if isinstance(access, SymbolAccess) and access.scope == 'LocalVariable':
sort = scope.mem.sort(access.symbol)
logger.debug(f'\tSort determined to be {sort}')
return sort
return result
def _model_block(program: Program, program_model: ProgramModel, block: Block, call_stack: List):
ns = call_stack[-1].ns
logger.info(f'Considering block {block.name} w/ call_stack {call_stack}')
logger.info(f'This block has the following scope/namespace: {ns}')
# A block consists of an ordered sequence of networks.
# Each network could potentially call into other blocks, in which case the translator
# recursively descends into them and generates a model.
#
# Each entity within a scope has a 'uid' associated with it. This uid is unique only to
# the scope it is contained within.
code = merge_nets(block.networks)
logger.debug('--ACCESSES--')
# NOTE(Jmeyer): No action should be necessary on these actions at this point in the analysis.
# The variables they reference should have already been created.
for uid, access in code.accesses.items():
pass
# Build a dictionary of instantiated parts
callables = {}
logger.debug('--PARTS--')
for uid, part_template in code.parts.items():
# TODO(Jmeyer): I need a more general way to signal that type inference is required. Right
# now I've got one part that needs it, so we just check for that.
if isinstance(part_template, MovePart):
logger.debug(
f'A part requiring type inference was detected: {part_template}')
part_template.port_type = hunt_for_type(uid, code, call_stack[-1])
assert part_template.port_type is not None, f'Type inference failed for {part_template}'
model: PartModel = part_template.instantiate(ns, program_model.ctx)
callables[uid] = model
program_model.assertions.extend(model.assertions)
# We can generate the logic for all of the primitives first
# We can generate function call instances now too.
logger.debug('--CALLS--')
for uid, call in code.calls.items():
if call.static_memory_access:
logger.info(
f'Consering call {call} w/ statics = "{call.static_memory_access}"')
next_block = program.blocks[call.target]
# The interface to a function call in THIS scope.
# Acts as a sort of user defined "part" that can be connected to via ports like any of
# the primitives.
model = call.instantiate(
f'{ns}:({uid})', program_model.ctx, next_block)
callables[uid] = model
# The act of calling a function creates a new scope in which the block variables
# associated with that scope are available in the eval of the block and are also
# connected to the input/output variables.
new_scope = Scope(ns, uid, program_model.ctx,
next_block, call_stack[-1])
new_scope.global_mem = call_stack[-1].global_mem
if call.static_memory_access:
new_scope.static_access_info = call.static_memory_access
_model_block(program, program_model, next_block,
call_stack + [new_scope])
link_assertions = new_scope.link_call(model)
program_model.assertions.extend(link_assertions)
# Then wire it all up.
# The wires define the program execution order, so translation to something akin to SSA
# should be a matter of following this though.
logger.debug('--WIRES--')
for uid, wire in code.wires.items():
a_is_access = type(wire.a) == IdentConnection
b_is_access = type(wire.b) == IdentConnection
assert(not(a_is_access and b_is_access))
# The general idea is that every endpoint is resolved to a model variable
# When we're connecting ports, all we have to do is assert equality: The signal on the wire is the same at each end
# When we connect to symbolic memory, however, things get more complicated:
# 1. A symbol may be read from or written to multiple times.
# 2. This program translates the input into a new program where each write creates a new variable
def _resolve3(conn):
assert(isinstance(conn, WireConnection))
if isinstance(conn, IdentConnection):
access = code.accesses[conn.target_uid]
# 3 types of memory access right now:
if isinstance(access, SymbolConstantAccess):
if access.scope == 'LocalConstant':
return MemoryAccessProxy(access.symbol, call_stack[-1])
else:
raise NotImplementedError(
f'Unhandled scope {access.scope} in Symbolic Constant Access')
elif isinstance(access, LiteralConstantAccess):
return access.value
elif isinstance(access, SymbolAccess):
if access.scope == 'LocalVariable':
local: Scope = call_stack[-1]
return MemoryAccessProxy(access.symbol, local)
elif access.scope == 'GlobalVariable':
return MemoryAccessProxy(access.symbol, program_model.global_mem)
else:
raise RuntimeError(
f'Unhandled access scope: {access.scope}')
else:
raise RuntimeError(
f'Unhandled access type: {type(access)}')
else:
assert isinstance(conn, NamedConnection)
part_iface: PartModel = callables[conn.target_uid]
port = part_iface.ports[conn.target_port]
return port
a = _resolve3(wire.a)
b = _resolve3(wire.b)
write_to_a = a_is_access and b.direction == PortDirection.OUT
write_to_b = b_is_access and a.direction == PortDirection.OUT
has_write_to_mem = write_to_a or write_to_b
def get_var(resolvable):
if isinstance(resolvable, MemoryAccessProxy):
scope = resolvable.scope
return scope.read(resolvable.name)
elif isinstance(resolvable, PartPort):
return resolvable.external_var()
else:
return resolvable
def get_writeable(resolvable):
if isinstance(resolvable, MemoryAccessProxy):
scope: Scope = resolvable.scope
return scope.write(resolvable.name)
else:
return get_var(resolvable)
if not has_write_to_mem:
a_var = get_var(a)
b_var = get_var(b)
try:
program_model.assertions.append(a_var == b_var)
except Z3Exception:
logger.error(
f'An exception occurred while assigning wires {a_var} of {type(a_var)} and {b_var} of {type(b_var)}')
raise
else:
the_access = None
the_port = None
the_part = None
if a_is_access:
the_access = a
the_port = b
the_part: PartModel = callables[wire.b.target_uid]
the_port_name = wire.b.target_port
else:
the_access = b
the_port = a
the_part: PartModel = callables[wire.a.target_uid]
the_port_name = wire.a.target_port
_prev = get_var(the_access)
_next = get_writeable(the_access)
other = get_var(the_port)
program_model.assertions.append(_next == other)
# NOTE(Jmeyer): This is an important step and an "oddity" in the code base: Functions
# in this codebase must be pure, so pass-by-reference is translated into an output var
# of the normal name and a *special* input parameter that takes the old value.
old = f'_old_{the_port_name}'
if old in the_part.ports:
old_port = the_part.evar(old)
a = _prev == old_port
program_model.assertions.append(a)
else:
logger.debug(
f'Mem write does not have associated old part {old}, part: {the_part}')
logger.info(f'Done w/ Block: {ns}')
def program_model(program: Program, context=None, global_memory=None):
# Need to load the "main" entry point and start symbolically translating the program.
main: Block = program.blocks[program.entry]
program_model = ProgramModel(ctx=context)
if global_memory:
program_model.global_mem = global_memory
call_stack = [Scope('', '', program_model.ctx, main)]
program_model.root = call_stack[0]
if main.block_type == Block.BLOCK_TYPE_FB:
if program.entry_point_db:
access = SymbolAccess('GlobalVariable', program.entry_point_db)
else:
# This clause supports building models without having to specify the (optional) static
# memory locations. Most users should call this function via the project module which
# will set all of this up in advance.
for name, sort in main.variables.statics:
program_model.global_mem.create('__main.' + name, sort)
access = SymbolAccess('GlobalVariable', '__main')
call_stack[-1].static_access_info = access
call_stack[-1].global_mem = program_model.global_mem
_model_block(program, program_model, main, call_stack)
return program_model
```
#### File: block_diagram_z3/fbdplc/sorts.py
```python
import typing
import z3
import logging
logger = logging.getLogger(__name__)
class Integer:
@staticmethod
def make(name: str, ctx: z3.Context):
return z3.BitVec(name, 16, ctx=ctx)
class ShortInteger:
@staticmethod
def make(name: str, ctx: z3.Context):
return z3.BitVec(name, 8, ctx=ctx)
class Boolean:
@staticmethod
def make(name: str, ctx: z3.Context):
return z3.Bool(name, ctx=ctx)
class Time:
@staticmethod
def make(name: str, ctx: z3.Context):
return z3.Int(name, ctx=ctx)
class AnyType:
pass
# Primitives
SORT_MAP = {
'Bool': Boolean,
'Int': Integer,
'Word': Integer, # TODO(Jmeyer)
'Time': Time # ???
}
class UDTSchema:
def __init__(self, name: str):
self.name = name
self.fields = {}
def __str__(self):
return f'UDTSchema({self.name})'
def make(self, name: str, ctx: z3.Context):
instance = UDTInstance(self)
for key, value in self.fields.items():
n = name + '.' + key
instance.fields[key] = value.make(n, ctx=ctx)
return instance
def iterfields(self, prefix=''):
for n, v in self.fields.items():
if isinstance(v, UDTSchema):
for _x in v.iterfields(n + '.'):
yield _x
else:
yield (prefix + n, v)
class UDTInstance:
def __init__(self, schema: UDTSchema):
self.schema = schema
self.fields = {}
def __str__(self):
return f'UDTInstance({self.schema.name})'
def __eq__(self, other):
if self.schema.name != other.schema.name:
raise RuntimeError(
f'Can not equate UDTs of different schema {self.schema} vs {other.schema}')
exprs = []
for name, value in self.fields.items():
other_value = other.fields[name]
exprs.append(value == other_value)
return z3.And(exprs)
def field(self, name: str):
return self.fields[name]
g_udt_archive = {}
def in_archive(sort):
if not isinstance(sort, UDTSchema):
return False
else:
return sort.name in g_udt_archive
def make_schema(name, parsed_schema):
if name in g_udt_archive:
# TODO(Jmeyer): Cross check that the types are, in fact, the same
logger.debug(f'Schema {name} is already in archive')
return g_udt_archive[name]
schema = UDTSchema(name)
for key, value in parsed_schema.items():
schema.fields[key] = value
register_udt(name, schema)
return schema
def register_udt(name, schema):
g_udt_archive[name] = schema
SORT_LIKE = typing.Union[UDTSchema, Integer, Boolean, Time]
def is_primitive(sort: SORT_LIKE):
return sort in SORT_MAP.values()
def children(sort: SORT_LIKE):
if isinstance(sort, UDTSchema):
return [(x, sort.fields[x]) for x in sort.fields]
else:
return []
def get_sort_factory(name):
if name in SORT_MAP:
return SORT_MAP[name]
if name in g_udt_archive:
return g_udt_archive[name]
raise RuntimeError(f'Sort "{name}" not known')
def clear_archive():
'''
Remove registered types in the singleton archive. This will be refactored shortly, but I want
to make some room for getting real feedback outside of this.
'''
g_udt_archive.clear()
```
#### File: block_diagram_z3/fbdplc/wires.py
```python
class WireConnection:
pass
class NamedConnection(WireConnection):
def __init__(self, target_uid: int, target_port: str):
self.target_uid = target_uid
self.target_port = target_port
def __str__(self):
return f'NamedConnection(id={self.target_uid}, port={self.target_port})'
class IdentConnection(WireConnection):
def __init__(self, target_uid: int):
self.target_uid = target_uid
def __str__(self):
return f'IdentConnection(id={self.target_uid})'
class Wire:
'''
Wires in TIA's S7 XML format can have more than two terminals, but we always decompose them
into a series of two terminal blocks.
'''
def __init__(self, a: WireConnection, b: WireConnection):
self.a = a
self.b = b
```
#### File: tests/integration/test_statics_project.py
```python
from fbdplc.sorts import clear_archive
import z3
from fbdplc.modeling import ProgramModel
from fbdplc.analysis import exec_and_compare
import fbdplc.project
import glob
def unit_test(program_model: ProgramModel):
inputs = {}
outputs = {}
# Test Case #1: invoking MyCounter_DB directly!
# Invoke MyCounter_DB0 twice with 1 and 2 respectively
# Invoke MyCounter_DB1 once with 3
inputs['MyCounter_DB0.counter'] = 0
inputs['MyCounter_DB1.counter'] = 0
outputs['MyCounter_DB0.counter'] = 3
outputs['MyCounter_DB1.counter'] = 3
# Test Case #2: BlockIncrementer_DB invoked twice in a row
# Each invocation: .a += 1 + 2, .b += 1 + 3
inputs['BlockIncrementer_DB.a.counter'] = 0
inputs['BlockIncrementer_DB.b.counter'] = 0
outputs['BlockIncrementer_DB.a.counter'] = 6
outputs['BlockIncrementer_DB.b.counter'] = 8
exec_and_compare(program_model, inputs, outputs)
def main():
project = fbdplc.project.ProjectContext()
# The source files:
project.udt_srcs = glob.glob('testdata/statics_project/PLC_1/**/*.udt')
project.db_srcs = glob.glob(
'testdata/statics_project/PLC_1/Program blocks/*.db')
project.tag_srcs = glob.glob(
'testdata/statics_project/PLC_1/PLC tags/*.xml')
project.fb_srcs = glob.glob(
'testdata/statics_project/PLC_1/Program blocks/*.xml')
print(project.udt_srcs)
print(project.db_srcs)
print(project.tag_srcs)
print(project.fb_srcs)
# Execution options:
project.entry_point = 'Main_Safety_RTG1'
model = fbdplc.project.build_program_model(project)
unit_test(model)
def test():
clear_archive()
main()
```
#### File: block_diagram_z3/tests/test_s7db.py
```python
import fbdplc.s7db
import pprint
def test_example0():
parsed = fbdplc.s7db.parse_db_file('testdata/db/example0.db')
assert 'name' in parsed
assert 'symbols' in parsed
assert parsed['name'] == '"DebugData"'
symbols = parsed['symbols']
assert len(symbols) == 3
for s in symbols:
assert 'name' in symbols[s]
assert 'type' in symbols[s]
assert 'source' in parsed['initializers']
assert parsed['initializers']['source'] == '1'
def test_udt():
parsed = fbdplc.s7db.parse_udt_file(
'testdata/udt_project/PLC_1/PLC data types/Box.udt')
assert 'name' in parsed
assert 'symbols' in parsed
assert parsed['name'] == '"Box"'
symbols = parsed['symbols']
assert len(symbols) == 2
assert 'min' in symbols
assert 'max' in symbols
def test_udt_array_with_defaults():
'''
Parses a data structure with an array with declared defaults like so:
v : Array[0..15] of Bool := [false, true];
'''
parsed = fbdplc.s7db.parse_udt_file('testdata/udt/ArrayWithDefault.udt')
def test_udt_polygon():
'''
A composite of an integer and an array of UDT
'''
parsed = fbdplc.s7db.parse_udt_file('testdata/udt/Polygon.udt')
def test_udt_points():
'''
Two simple data structures of template<typename T> { x: T, y: T} where T in {Real, Int}
'''
parsed_f = fbdplc.s7db.parse_udt_file('testdata/udt/Pointf.udt')
parsed_i = fbdplc.s7db.parse_udt_file('testdata/udt/Pointi.udt')
def test_udt_nested_anon():
'''
A data structure with two nested anonymous data structures
'''
parsed = fbdplc.s7db.parse_udt_file('testdata/udt/AnonStruct.udt')
def test_db_anon():
parsed = fbdplc.s7db.parse_db_file('testdata/db/AnonTestDB.db')
def test_db_polygon():
'''
The structure of a DB can be adhoc but it can also be derived from a user defined type.
This example contains a DB mapped to the "Polygon" type (defined in "testdata/udt/Polygon.udt")
and also has a couple of special initializers. TODO(Jmeyer): Support for the init statements.
'''
parsed = fbdplc.s7db.parse_db_file('testdata/db/PolygonDB.db')
```
#### File: block_diagram_z3/tests/test_s7xml.py
```python
import fbdplc.s7xml as s7mxl
from lxml import etree
def _simple_or(networks):
assert(len(networks) == 1)
def test_simple_or_file():
'''
fdb that computes "a_or_b" := Or(ToSafety.a, ToSafety.b)
stored in testdata/blocks/simple_or.xml
cut from a larger programs
'''
networks = s7mxl.parse_from_file('testdata/blocks/simple_or.xml')
_simple_or(networks)
def test_simple_or_string():
text = ''
with open('testdata/blocks/simple_or.xml', 'r') as fh:
text = fh.read()
networks = s7mxl.parse_from_string(text)
_simple_or(networks)
UDT_PARSE_TEST_DATA = '''
<Member Name="status_a" Datatype=""FooBar"" Accessibility="Public">
<Sections>
<Section Name="None">
<Member Name="a" Datatype="Bool"/>
<Member Name="b" Datatype="Bool"/>
<Member Name="c" Datatype="Bool"/>
<Member Name="d" Datatype="Bool"/>
</Section>
</Sections>
</Member>
'''
def test_udt_parse():
tree = etree.fromstring(UDT_PARSE_TEST_DATA)
udt = s7mxl.parse_udt(tree)
assert udt.name == '"FooBar"'
assert len(udt.fields) == 4
def test_time_parse():
assert s7mxl.parse_time_string('T#2S') == 2000
assert s7mxl.parse_time_string('T#2000MS') == 2000
def test_tags0():
tags = s7mxl.parse_tags_from_file('testdata/tags/tags0.xml')
print(tags)
``` |
{
"source": "jmeyers314/astrophotoreduce",
"score": 2
} |
#### File: jmeyers314/astrophotoreduce/biweight.py
```python
import numpy as np
def median_absolute_deviation(x, M=None) :
if M is None:
M = np.median(x)
return np.median(abs(x - M))
def _biweight_location_work(x, M, MAD, c) :
u = (x-M) / (c*MAD)
w = abs(u) < 1.0
if w.sum() == 0.0:
return M
term = (1.0 - u[w]**2)**2
num = ((x[w]-M)*term).sum()
den = term.sum()
CBI = M + num/den
return CBI
def biweight_location(x, c=6.0, NaN=None, niter=4) :
if NaN:
x = x[np.isfinite(x)]
if len(x) == 0:
return np.NaN
CBI = np.median(x)
for i in xrange(niter):
M = CBI
MAD = median_absolute_deviation(x, M)
CBI = _biweight_location_work(x, M, MAD, c)
return CBI
def _biweight_scale_work(x, M, MAD, c) :
u = (x-M) / (c*MAD)
w = abs(u) < 1.0
if w.sum() == 0:
return np.NaN
term = u[w]**2
num = (len(x) * ((x[w]-M)**2 * (1-term)**4).sum())**0.5
den = abs(((1.0-term)*(1.0-5*term)).sum())
return num/den
def biweight_scale(x, zero=None, c=9.0, NaN=None, niter=4) :
if NaN:
x = x[np.isfinite(x)]
if zero is None:
M = biweight_location(x)
else:
M = zero
MAD = median_absolute_deviation(x, M)
SBI = MAD/0.6745
for i in xrange(niter):
MAD = SBI*0.6745
SBI = _biweight_scale_work(x, M, MAD, c)
return SBI
```
#### File: jmeyers314/astrophotoreduce/cfa.py
```python
import numpy as np
def make_cfa(img):
cfa = np.zeros_like(img[:,:,0])
y, x = np.mgrid[0:cfa.shape[0], 0:cfa.shape[1]]
Rloc = (np.mod(x, 2) == 0) & (np.mod(y, 2) == 0)
Gloc = np.mod(x+y, 2) == 1
Bloc = (np.mod(x, 2) == 1) & (np.mod(y, 2) == 1)
cfa[Rloc] = img[Rloc,0]
cfa[Gloc] = img[Gloc,1]
cfa[Bloc] = img[Bloc,2]
return cfa
```
#### File: jmeyers314/astrophotoreduce/dark.py
```python
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
import numpy as np
import canont3i
import biweight
def find_badpixels(fns) :
bad_pixels = []
outliers = {}
for fn in fns :
print "Working on image: ", fn
cfa = canont3i.read_CR2_as_CFA(fn)
print "Image shape: ", cfa.shape
med = np.median(cfa)
print "Median: ", med
s = biweight.median_absolute_deviation(cfa, med)/0.6745
print "MAD: ", s*0.6745
print "StDev: ", s
image_outliers = np.argwhere(cfa > (med + 8*s))
image_outliers = np.concatenate([image_outliers, np.argwhere(cfa < (med - 8*s))])
print "Number of 8-sigma outliers:", image_outliers.shape[0]
print
print
for outlier in image_outliers:
key = tuple(outlier)
if key in outliers.keys():
outliers[key].append(fn)
else:
outliers[key] = [fn]
print "Bad pixels"
print "----------"
for pixel in outliers.keys():
if len(outliers[pixel]) > len(fns)**0.5:
print pixel, len(outliers[pixel])
bad_pixels.append(pixel)
return bad_pixels
def make_dark(fns, bad_pixels):
cfa0 = canont3i.read_CR2_as_CFA(fns[0])
height, width = cfa0.shape
stack = np.empty((height, width, len(fns)), dtype=cfa0.dtype)
for i, fn in enumerate(fns) :
print 'i: ', i
cfa = canont3i.read_CR2_as_CFA(fn)
stack[:,:,i] = cfa
print 'medianing'
med = np.median(stack)
print 'layer medianing'
dark = np.median(stack, axis=2)
for bp in bad_pixels:
dark[bp] = med
return dark
```
#### File: jmeyers314/astrophotoreduce/hqli.py
```python
import numpy as np
import scipy.signal as signal
# Four kernels to be convolved with CFA array
def _G_at_BR(cfa):
kernel = [[ 0, 0, -1.0, 0, 0],
[ 0, 0, 2, 0, 0],
[-1.0, 2, 4, 2, -1.0],
[ 0, 0, 2, 0, 0],
[ 0, 0, -1.0, 0, 0]]
return signal.convolve(cfa, kernel, mode='same')/8.
# red value at green pixel in red row and blue col or...
# blue value at green pixel in blue row and red col.
def _RB_at_G_in_RBrow_BRcol(cfa):
kernel = [[ 0, 0, 0.5, 0, 0],
[ 0, -1, 0, -1, 0],
[-1, 4, 5, 4, -1],
[ 0, -1, 0, -1, 0],
[ 0, 0, 0.5, 0, 0]]
return signal.convolve(cfa, kernel, mode='same')/8.
# red value at green pixel in blue row and red col or...
# blue value at green pixel in red row and blue col.
def _RB_at_G_in_BRrow_RBcol(cfa):
kernel = [[ 0, 0, -1, 0, 0],
[ 0, -1, 4, -1, 0],
[0.5, 0, 5, 0, 0.5],
[ 0, -1, 4, -1, 0],
[ 0, 0, -1, 0, 0]]
return signal.convolve(cfa, kernel, mode='same')/8.
# red value at blue pixel or...
# blue value at red pixel.
def _RB_at_BR(cfa):
kernel = [[ 0, 0, -1.5, 0, 0],
[ 0, 2, 0, 2, 0],
[-1.5, 0, 6, 0, -1.5],
[ 0, 2, 0, 2, 0],
[ 0, 0, -1.5, 0, 0]]
return signal.convolve(cfa, kernel, mode='same')/8.
def hqli(cfa):
# initialize output arrays
R = np.zeros_like(cfa, dtype=np.float64)
G = np.zeros_like(cfa, dtype=np.float64)
B = np.zeros_like(cfa, dtype=np.float64)
# coordinate index arrays
y, x = np.mgrid[0:cfa.shape[0], 0:cfa.shape[1]]
# create groups of indices based on Bayer pattern
Rloc = (np.mod(x, 2) == 0) & (np.mod(y, 2) == 0)
Gloc = np.mod(x+y, 2) == 1
Bloc = (np.mod(x, 2) == 1) & (np.mod(y, 2) == 1)
G_in_Brow_Rcol = (np.mod(x, 2) == 0) & (np.mod(y, 2) == 1)
G_in_Rrow_Bcol = (np.mod(x, 2) == 1) & (np.mod(y, 2) == 0)
# copy data that doesn't need interpolation
R[Rloc] = cfa[Rloc]
G[Gloc] = cfa[Gloc]
B[Bloc] = cfa[Bloc]
# fill in the green data at the blue/red locations
tmp = _G_at_BR(cfa)
G[Rloc] = tmp[Rloc]
G[Bloc] = tmp[Bloc]
# fill in the blue/red data
tmp = _RB_at_G_in_BRrow_RBcol(cfa)
B[G_in_Rrow_Bcol] = tmp[G_in_Rrow_Bcol]
R[G_in_Brow_Rcol] = tmp[G_in_Brow_Rcol]
tmp = _RB_at_G_in_RBrow_BRcol(cfa)
B[G_in_Brow_Rcol] = tmp[G_in_Brow_Rcol]
R[G_in_Rrow_Bcol] = tmp[G_in_Rrow_Bcol]
tmp = _RB_at_BR(cfa)
B[Rloc] = tmp[Rloc]
R[Bloc] = tmp[Bloc]
R[R<0] = 0
G[G<0] = 0
B[B<0] = 0
if cfa.dtype == np.uint8:
R[R>255] = 255
G[G>255] = 255
B[B>255] = 255
else:
R[R>65535] = 65535
G[G>65535] = 65535
B[B>65535] = 65535
return np.array(np.dstack([R, G, B]), dtype=cfa.dtype)
```
#### File: jmeyers314/astrophotoreduce/pgm.py
```python
import re
import subprocess
import numpy as np
import matplotlib.image as mpimg
# Stole this function from
# http://stackoverflow.com/questions/7368739/numpy-and-16-bit-pgm/7369986#7369986
def read_pgm(filename, byteorder='>'):
"""Return image data from a raw PGM file as numpy array.
Format specification: http://netpbm.sourceforge.net/doc/pgm.html
"""
with open(filename, 'rb') as f:
buffer_ = f.read()
try:
header, width, height, maxval = re.search(
b"(^P5\s(?:\s*#.*[\r\n])*"
b"(\d+)\s(?:\s*#.*[\r\n])*"
b"(\d+)\s(?:\s*#.*[\r\n])*"
b"(\d+)\s(?:\s*#.*[\r\n]\s)*)", buffer_).groups()
except AttributeError:
raise ValueError("Not a raw PGM file: '%s'" % filename)
return np.frombuffer(buffer_,
dtype='u1' if int(maxval) < 256 else byteorder+'u2',
count=int(width)*int(height),
offset=len(header)
).reshape((int(height), int(width)))
def write_pgm(image, filename):
""" Write grayscale image in PGM format to file.
"""
height, width = image.shape
maxval = image.max()
with open(filename, 'wb') as f:
f.write('P5 {} {} {}\n'.format(width, height, maxval))
# not sure if next line works universally, but seems to work on my mac
image.tofile(f)
def CR2_to_pgm(filename):
""" Use dcraw command line tool to convert Canon RAW format file (.CR2 extension) to PGM file.
"""
cmd = 'dcraw -D -4 -j -t 0 '+filename
subprocess.call(cmd, shell=True)
def jpg_to_pgm(filename, Rfile, Gfile, Bfile):
""" Use matplotlib to convert color jpg file into three PGM files, one for each color channel.
"""
img = mpimg.imread(filename)
write_pgm(img[:,:,0], Rfile)
write_pgm(img[:,:,1], Gfile)
write_pgm(img[:,:,2], Bfile)
``` |
{
"source": "jmeyers314/batoid",
"score": 3
} |
#### File: batoid/batoid/parse.py
```python
import batoid
import numpy as np
def parse_obscuration(config):
typ = config.pop('type')
if typ in [
'ObscCircle', 'ObscAnnulus', 'ObscRay', 'ObscRectangle', 'ObscPolygon'
]:
evalstr = "batoid.{}(**config)".format(typ)
return eval(evalstr)
elif typ == 'ObscNegation':
original = parse_obscuration(config['original'])
return batoid.ObscNegation(original)
elif typ in ['ObscUnion', 'ObscIntersection']:
items = [parse_obscuration(c) for c in config['items']] # noqa
evalstr = "batoid.{}(items)".format(typ)
return eval(evalstr)
elif typ.startswith('Clear'): # triggers negation
# put type back into config, but with Clear->Obsc
config['type'] = typ.replace("Clear", "Obsc")
return batoid.ObscNegation(parse_obscuration(config))
else:
raise ValueError(f"Unknown obscuration type {typ}")
def parse_surface(config):
typ = config.pop('type')
evalstr = "batoid.{}(**config)".format(typ)
return eval(evalstr)
def parse_coordSys(config, coordSys=batoid.CoordSys()):
"""
@param config configuration dictionary
@param coordSys sys to which transformations in config are added
"""
shift = [0.0, 0.0, 0.0]
if any(x in config for x in ['x', 'y', 'z']):
if 'shift' in config:
raise ValueError("Cannot specify both shift and x/y/z")
x = config.pop('x', 0.0)
y = config.pop('y', 0.0)
z = config.pop('z', 0.0)
shift = [x, y, z]
elif 'shift' in config:
shift = config.pop('shift')
if shift != [0.0, 0.0, 0.0]:
coordSys = coordSys.shiftLocal(shift)
# At most one (nonzero) rotation can be included and is applied after the shift.
rotXYZ = np.array([config.pop('rot' + axis, 0.0) for axis in 'XYZ'])
axes = np.where(rotXYZ != 0)[0]
if len(axes) > 1:
raise ValueError('Cannot specify rotation about more than one axis.')
elif len(axes) == 1:
axis, angle = axes[0], rotXYZ[axes[0]]
rotator = (batoid.RotX, batoid.RotY, batoid.RotZ)[axis](angle)
coordSys = coordSys.rotateLocal(rotator)
return coordSys
def parse_optic(config,
coordSys=batoid.CoordSys(),
inMedium=batoid.ConstMedium(1.0),
outMedium=None):
"""
@param config configuration dictionary
@param coordSys sys to which transformations in config are added
@param inMedium default in Medium, often set by optic parent
@param outMedium default out Medium, often set by optic parent
"""
if 'obscuration' in config:
obscuration = parse_obscuration(config.pop('obscuration'))
else:
obscuration = None
name = config.pop('name', "")
if 'coordSys' in config:
coordSys = parse_coordSys(config.pop('coordSys'), coordSys)
inMedium = parse_medium(config.pop('inMedium', inMedium))
outMedium = parse_medium(config.pop('outMedium', outMedium))
if outMedium is None:
outMedium = inMedium
typ = config.pop('type')
if typ == 'Mirror':
surface = parse_surface(config.pop('surface'))
return batoid.optic.Mirror(
surface, name=name,
coordSys=coordSys, obscuration=obscuration,
inMedium=inMedium, outMedium=outMedium)
elif typ == 'RefractiveInterface':
surface = parse_surface(config.pop('surface'))
return batoid.optic.RefractiveInterface(
surface, name=name,
coordSys=coordSys, obscuration=obscuration,
inMedium=inMedium, outMedium=outMedium)
elif typ == 'Baffle':
surface = parse_surface(config.pop('surface'))
return batoid.optic.Baffle(
surface, name=name,
coordSys=coordSys, obscuration=obscuration,
inMedium=inMedium, outMedium=outMedium)
elif typ == 'Detector':
surface = parse_surface(config.pop('surface'))
return batoid.optic.Detector(
surface, name=name,
coordSys=coordSys, obscuration=obscuration,
inMedium=inMedium, outMedium=outMedium)
elif typ == 'Lens':
medium = parse_medium(config.pop('medium'))
itemsConfig = config.pop('items')
items = [
parse_optic(
itemsConfig[0],
coordSys=coordSys,
inMedium=inMedium,
outMedium=medium
),
parse_optic(
itemsConfig[1],
coordSys=coordSys,
inMedium=medium,
outMedium=outMedium
)
]
return batoid.optic.Lens(
items, name=name, coordSys=coordSys,
inMedium=inMedium, outMedium=outMedium)
elif typ == 'CompoundOptic':
itemsConfig = config.pop('items')
items = [
parse_optic(
iC,
coordSys=coordSys,
inMedium=inMedium,
outMedium=outMedium
)
for iC in itemsConfig
]
# Look for a few more possible attributes
kwargs = {}
for k in ['backDist', 'sphereRadius', 'pupilSize', 'pupilObscuration']:
if k in config:
kwargs[k] = config[k]
if 'stopSurface' in config:
kwargs['stopSurface'] = parse_optic(config['stopSurface'])
return batoid.optic.CompoundOptic(
items, inMedium=inMedium, outMedium=outMedium,
name=name, coordSys=coordSys, **kwargs)
elif typ == 'Interface':
surface = parse_surface(config.pop('surface'))
return batoid.optic.Interface(
surface, name=name,
coordSys=coordSys
)
else:
raise ValueError("Unknown optic type")
def parse_medium(config):
from numbers import Real
if config is None:
return None
if isinstance(config, batoid.Medium):
return config
if isinstance(config, Real):
return batoid.ConstMedium(config)
# This dict may be referenced again in an ancestor config, so copy it
# before parsing
config = dict(**config)
typ = config.pop('type')
# TableMedium, Sellmeier, ConstMedium, SumitaMedium, Air end up here...
evalstr = "batoid.{}(**config)".format(typ)
return eval(evalstr)
def parse_table(config):
return batoid.Table(config['args'], config['vals'], config['interp'])
```
#### File: batoid/tests/test_OPDScreen.py
```python
import batoid
import numpy as np
from test_helpers import timer, do_pickle, init_gpu, rays_allclose
@timer
def test_prescreen():
"""Add an OPDScreen in front of LSST entrance pupil. The OPD that comes out
should be _negative_ the added phase delay by convention.
"""
lsst = batoid.Optic.fromYaml("LSST_r.yaml")
wavelength = 620e-9
z_ref = batoid.analysis.zernikeGQ(
lsst, 0, 0, wavelength, rings=10, reference='chief', jmax=37
)
rng = np.random.default_rng(577)
for i in range(4, 38):
amplitude = rng.uniform(-1, 1)
zern = batoid.Zernike(
np.array([0]*i+[amplitude])*wavelength,
R_outer=4.18
)
tel = batoid.CompoundOptic(
(
batoid.optic.OPDScreen(
batoid.Plane(),
zern,
name='PS',
obscuration=batoid.ObscNegation(batoid.ObscCircle(5.0)),
coordSys=lsst.stopSurface.coordSys
),
*lsst.items
),
name='PS0',
backDist=lsst.backDist,
pupilSize=lsst.pupilSize,
inMedium=lsst.inMedium,
stopSurface=lsst.stopSurface,
sphereRadius=lsst.sphereRadius,
pupilObscuration=lsst.pupilObscuration
)
do_pickle(tel)
z_test = batoid.analysis.zernikeGQ(
tel, 0, 0, wavelength, rings=10, reference='chief', jmax=37
)
z_expect = np.zeros_like(z_test)
z_expect[i] = -amplitude # Longer OPL => negative OPD
np.testing.assert_allclose(
(z_test-z_ref)[4:], z_expect[4:],
rtol=0, atol=5e-4
)
@timer
def test_zeroscreen():
"""Add a zero phase OPDScreen in front of LSST entrance pupil. Should have
_no_ effect.
"""
lsst = batoid.Optic.fromYaml("LSST_r.yaml")
screens = [
batoid.optic.OPDScreen(
batoid.Plane(),
batoid.Plane(),
name='PS',
coordSys=lsst.stopSurface.coordSys
),
batoid.optic.OPDScreen(
batoid.Paraboloid(100.0),
batoid.Plane(),
name='PS',
coordSys=lsst.stopSurface.coordSys
),
batoid.optic.OPDScreen(
batoid.Quadric(11.0, -0.5),
batoid.Plane(),
name='PS',
coordSys=lsst.stopSurface.coordSys
),
batoid.optic.OPDScreen(
batoid.Zernike([0, 0, 0, 0, 300e-9, 0, 0, 400e-9, -600e-9]),
batoid.Zernike([0]*22),
name='PS',
coordSys=lsst.stopSurface.coordSys
)
]
for screen in screens:
tel = batoid.CompoundOptic(
(screen, *lsst.items),
name='PS0',
backDist=lsst.backDist,
pupilSize=lsst.pupilSize,
inMedium=lsst.inMedium,
stopSurface=lsst.stopSurface,
sphereRadius=lsst.sphereRadius,
pupilObscuration=lsst.pupilObscuration
)
do_pickle(tel)
rng = np.random.default_rng(57)
thx = np.deg2rad(rng.uniform(-1, 1))
thy = np.deg2rad(rng.uniform(-1, 1))
rays = batoid.RayVector.asPolar(
optic=tel, wavelength=620e-9,
theta_x=thx, theta_y=thy,
nrad=2, naz=6
)
tf1 = tel.traceFull(rays)
tf2 = lsst.traceFull(rays)
np.testing.assert_allclose(
tf1['PS']['in'].v,
tf1['PS']['out'].v,
rtol=0, atol=1e-14
)
for key in tf2:
rays_allclose(
tf1[key]['out'],
tf2[key]['out'],
atol=1e-13
)
if __name__ == '__main__':
init_gpu()
test_prescreen()
test_zeroscreen()
``` |
{
"source": "jmeyers314/DP_SNe",
"score": 3
} |
#### File: DP_SNe/dpmm/density.py
```python
import numpy as np
from scipy.special import gamma
def multivariate_t_density(nu, mu, Sig, x):
"""Return multivariate t distribution: t_nu(x | mu, Sig), in d-dimensions."""
detSig = np.linalg.det(Sig)
invSig = np.linalg.inv(Sig)
d = len(mu)
coef = gamma(nu/2.0+d/2.0) * detSig**(-0.5)
coef /= gamma(nu/2.0) * nu**(d/2.0)*np.pi**(d/2.0)
if x.ndim == 1:
einsum = np.dot(x-mu, np.dot(invSig, x-mu))
else:
einsum = np.einsum("...i,ij,...j", x-mu, invSig, x-mu) # (x-mu).T * invSig * (x-mu)
return coef * (1.0 + einsum/nu)**(-(nu+d)/2.0)
def t_density(nu, mu, sigsqr, x):
c = gamma((nu+1.)/2.)/gamma(nu/2.)/np.sqrt(nu*np.pi*sigsqr)
return c*(1.0+1./nu*((x-mu)**2/sigsqr))**(-(1.+nu)/2.0)
def scaled_IX_density(nu, sigsqr, x):
return (1.0/gamma(nu/2.0) *
(nu*sigsqr/2.0)**(nu/2.0) *
x**(-nu/2.0-1.0) *
np.exp(-nu*sigsqr/(2.0*x)))
def normal_density(mu, var, x):
return np.exp(-0.5*(x-mu)**2/var)/np.sqrt(2*np.pi*var)
```
#### File: DP_SNe/tests/test_plot_samples.py
```python
import numpy as np
import matplotlib.pyplot as plt
import dpmm
from test_utils import timer
from dpmm.utils import plot_ellipse, random_wish, random_invwish
from unittest import skip
@skip
@timer
def test_GaussianMeanKnownVariance():
mu_0 = 1.1
sigsqr_0 = 0.42
sigsqr = 0.21
model = dpmm.GaussianMeanKnownVariance(mu_0, sigsqr_0, sigsqr)
samples = model.sample(size=1000)
f = plt.figure(figsize=(5, 3))
ax = f.add_subplot(111)
ax.hist(samples, bins=30, normed=True, alpha=0.5, color='k')
xlim = np.percentile(samples, [1.0, 99.0])
ax.set_xlim(xlim)
x = np.linspace(xlim[0], xlim[1], 100)
y = model(x)
ax.plot(x, y, c='k', lw=3)
ax.set_xlabel("$\mu$")
ax.set_ylabel("Pr($\mu$)")
f.tight_layout()
ax.set_title("GaussianMeanKnownVariance")
f.savefig("plots/GaussianMeanKnownVariance_samples.png")
@skip
@timer
def test_InvGamma():
alpha = 1.4
beta = 1.3
mu = 1.2
model = dpmm.InvGamma(alpha, beta, mu)
samples = model.sample(size=1000)
xlim = np.percentile(samples, [0.0, 95.0])
f = plt.figure(figsize=(5, 3))
ax = f.add_subplot(111)
ax.hist(samples, bins=30, range=xlim, normed=True, alpha=0.5, color='k')
ax.set_xlim(xlim)
x = np.linspace(xlim[0], xlim[1], 100)
y = model(x)
ax.plot(x, y, c='k', lw=3)
ax.set_xlabel("$\sigma^2$")
ax.set_ylabel("Pr($\sigma^2$)")
f.tight_layout()
ax.set_title("InvGamma")
f.savefig("plots/InvGamma_samples.png")
@skip
@timer
def test_NormInvChi2():
mu_0 = 1.5
kappa_0 = 2.3
sigsqr_0 = 0.24
nu_0 = 2
model = dpmm.NormInvChi2(mu_0, kappa_0, sigsqr_0, nu_0)
samples = model.sample(size=1000)
mu_samples = np.array([s[0] for s in samples])
var_samples = np.array([s[1] for s in samples])
xlim = np.percentile(mu_samples, [2.5, 97.5])
f = plt.figure(figsize=(5, 3))
ax = f.add_subplot(111)
ax.hist(mu_samples, bins=30, range=xlim, normed=True, alpha=0.5, color='k')
ax.set_xlim(xlim)
x = np.linspace(xlim[0], xlim[1], 100)
y = model.marginal_mu(x)
ax.plot(x, y, c='k', lw=3)
ax.set_xlabel("$\mu$")
ax.set_ylabel("Pr($\mu$)")
f.tight_layout()
ax.set_title("NormInvChi2")
f.savefig("plots/NormInvChi2_mu_samples.png")
xlim = np.percentile(var_samples, [0.0, 95.0])
f = plt.figure(figsize=(5, 3))
ax = f.add_subplot(111)
ax.hist(var_samples, bins=30, range=xlim, normed=True, alpha=0.5, color='k')
ax.set_xlim(xlim)
x = np.linspace(xlim[0], xlim[1], 100)
y = model.marginal_var(x)
ax.plot(x, y, c='k', lw=3)
ax.set_xlabel("$\sigma^2$")
ax.set_ylabel("Pr($\sigma^2$)")
f.tight_layout()
ax.set_title("NormInvChi2")
f.savefig("plots/NormInvChi2_var_samples.png")
@skip
@timer
def test_NormInvGamma():
mu_0 = 1.5
V_0 = 1.2
a_0 = 1.24
b_0 = 1.1
model = dpmm.NormInvGamma(mu_0, V_0, a_0, b_0)
samples = model.sample(size=1000)
mu_samples = np.array([s[0] for s in samples])
var_samples = np.array([s[1] for s in samples])
xlim = np.percentile(mu_samples, [2.5, 97.5])
f = plt.figure(figsize=(5, 3))
ax = f.add_subplot(111)
ax.hist(mu_samples, bins=30, range=xlim, normed=True, alpha=0.5, color='k')
ax.set_xlim(xlim)
x = np.linspace(xlim[0], xlim[1], 100)
y = model.marginal_mu(x)
ax.plot(x, y, c='k', lw=3)
ax.set_xlabel("$\mu$")
ax.set_ylabel("Pr($\mu$)")
f.tight_layout()
ax.set_title("NormInvGamma")
f.savefig("plots/NormInvGamma_mu_samples.png")
xlim = np.percentile(var_samples, [0.0, 95.0])
f = plt.figure(figsize=(5, 3))
ax = f.add_subplot(111)
ax.hist(var_samples, bins=30, range=xlim, normed=True, alpha=0.5, color='k')
ax.set_xlim(xlim)
x = np.linspace(xlim[0], xlim[1], 100)
y = model.marginal_var(x)
ax.plot(x, y, c='k', lw=3)
ax.set_xlabel("$\sigma^2$")
ax.set_ylabel("Pr($\sigma^2$)")
f.tight_layout()
ax.set_title("NormInvGamma")
f.savefig("plots/NormInvGamma_var_samples.png")
@skip
@timer
def test_NormInvWish():
mu_0 = np.r_[0.3, -0.2]
d = len(mu_0)
Lam_0 = np.linalg.inv(np.array([[2, 1.1], [1.1, 1.2]]))
kappa_0 = 2.1
nu_0 = 8
model = dpmm.NormInvWish(mu_0, kappa_0, Lam_0, nu_0)
# First check some numerics
Nsample = 5000
samples = model.sample(size=Nsample)
mu_samples = [s[0] for s in samples]
cov_samples = [s[1] for s in samples]
mean = np.mean(mu_samples, axis=0)
std = np.std(mu_samples, axis=0)/np.sqrt(Nsample)
print "NormInvWish mu_0 = {}".format(mu_0)
print "NormInvWish E(mu) = {} +/- {}".format(mean, std)
mean_cov = np.mean(cov_samples, axis=0)
std_cov = np.std(cov_samples, axis=0)/np.sqrt(Nsample)
print "NormInvWish (Lam_0)^(-1)/(nu_0-d-1) = \n{}".format(np.linalg.inv(Lam_0)/(nu_0-d-1))
print "NormInvWish E(Sig) = \n{}\n +/-\n{}".format(mean_cov, std_cov)
# Now try some plots with different values of kappa_0 and nu_0
f = plt.figure(figsize=(7, 7))
for i, (kappa_0, nu_0) in enumerate(zip([0.4, 0.4, 6.5, 6.5],
[10, 4, 10, 4])):
model = dpmm.NormInvWish(mu_0, kappa_0, Lam_0, nu_0)
samples = model.sample(size=25)
ax = f.add_subplot(2, 2, i+1)
for sample in samples:
mu, Sig = sample
plot_ellipse(mu, Sig, ax=ax, facecolor='none', edgecolor='k', alpha=0.2)
plot_ellipse(mu_0, np.linalg.inv(Lam_0)/(nu_0-d-1), ax=ax, facecolor='none', edgecolor='r')
ax.set_xlim(-3, 3)
ax.set_ylim(-3, 3)
ax.axvline(mu_0[0], c='r', alpha=0.1)
ax.axhline(mu_0[1], c='r', alpha=0.1)
ax.set_title(r"$\kappa_0$={}, $\nu_0$={}".format(kappa_0, nu_0))
print np.mean([s[1] for s in samples], axis=0)
f.savefig("plots/NormInvWish_samples.png")
@skip
@timer
def test_random_wish():
dof = 3
S = np.array([[1.0, 0.25], [0.25, 0.5]])
Nsamples = 5000
samples = random_wish(dof, S, size=Nsamples)
mean = np.mean(samples, axis=0)
std = np.std(samples, axis=0)/np.sqrt(Nsamples)
print "E(wish) = \n{}".format(dof * S)
print "<wish> = \n{}\n +/-\n{}".format(mean, std)
@skip
@timer
def test_random_invwish():
dof = 6
d = 2
S = np.array([[1.0, 0.25], [0.25, 0.5]])
invS = np.linalg.inv(S)
Nsamples = 5000
samples = random_invwish(dof, invS, size=Nsamples)
mean = np.mean(samples, axis=0)
std = np.std(samples, axis=0)/np.sqrt(Nsamples)
print "E(invwish) = \n{}".format(S/(dof-d-1))
print "<invwish> = \n{}\n +/-\n{}".format(mean, std)
@skip
@timer
def test_ellipse_plotter():
f = plt.figure(figsize=(7, 7))
for i, Sig in enumerate([np.array([[1.0, 0.0], [0.0, 0.25]]),
np.array([[0.25, 0.0], [0.0, 1.0]]),
np.array([[1.0, 0.8], [0.8, 1.0]]),
np.array([[1.0, -0.8], [-0.8, 1.0]])]):
ax = f.add_subplot(2, 2, i+1)
ax.set_xlim(-1, 1)
ax.set_ylim(-1, 1)
plot_ellipse([0., 0.], Sig)
ax.set_title("$\Sigma$={}".format(Sig))
f.tight_layout()
f.savefig("plots/ellipse.png")
if __name__ == "__main__":
test_GaussianMeanKnownVariance()
test_InvGamma()
test_NormInvChi2()
test_NormInvGamma()
test_NormInvWish()
test_random_wish()
test_random_invwish()
test_ellipse_plotter()
```
#### File: DP_SNe/tests/test_prior.py
```python
import warnings
import numpy as np
from scipy.integrate import quad, dblquad, tplquad
import dpmm
from test_utils import timer
@timer
def test_GaussianMeanKnownVariance():
mu_0 = 0.15
sigsqr_0 = 1.2
sigsqr = 0.15
model = dpmm.GaussianMeanKnownVariance(mu_0, sigsqr_0, sigsqr)
D = np.r_[1.0, 2.2, 1.1, -1.13]
mus = np.r_[1.1, 2.0, 0.1]
# Check prior density
r = quad(model, -np.inf, np.inf)
np.testing.assert_almost_equal(r[0], 1.0, 10,
"GaussianMeanKnownVariance prior density does not integrate to 1.0")
# Check prior predictive density
r = quad(model.pred, -np.inf, np.inf)
np.testing.assert_almost_equal(
r[0], 1.0, 10,
"GaussianMeanKnownVariance prior predictive density does not integrate to 1.0")
# Check posterior density
r = quad(model.post(D), -np.inf, np.inf)
np.testing.assert_almost_equal(
r[0], 1.0, 10,
"GaussianMeanKnownVariance posterior density does not integrate to 1.0")
# Check posterior predictive density
r = quad(model.post(D).pred, -np.inf, np.inf)
np.testing.assert_almost_equal(
r[0], 1.0, 10,
"GaussianMeanKnownVariance posterior predictive density does not integrate to 1.0")
# Check that the likelihood integrates to 1.
r = quad(lambda x: model.like1(x, mu=1.1), -np.inf, np.inf)
np.testing.assert_almost_equal(r[0], 1.0, 10,
"GaussianMeanKnownVariance likelihood does not integrate to 1.0")
# # Check that evidence (of single data point) integrates to 1.
# r = quad(lambda x: model.evidence(x), -np.inf, np.inf)
# np.testing.assert_almost_equal(r[0], 1.0, 10,
# "GaussianMeanKnownVariance evidence does not integrate to 1.0")
# # Check evidence for two data points.
# r = dblquad(lambda x, y: model.evidence([x, y]),
# -np.inf, np.inf,
# lambda x: -np.inf, lambda x: np.inf)
# np.testing.assert_almost_equal(r[0], 1.0, 5,
# "GaussianMeanKnownVariance evidence does not integrate to 1.0")
# # Check that posterior = prior * likelihood / evidence
# post = model.post(D)
# post1 = [model(mu)*model.likelihood(mu, D=D) / model.evidence(D) for mu in mus]
# post2 = [post(mu) for mu in mus]
# np.testing.assert_array_almost_equal(
# post1, post2, 10,
# "GaussianMeanKnownVariance posterior != prior * likelihood / evidence")
# Check that posterior is proportional to prior * likelihood
# Add some more data points
posts = [model.post(D)(mu) for mu in mus]
posts2 = [model(mu)*model.likelihood(D, mu) for mu in mus]
np.testing.assert_array_almost_equal(
posts/posts[0], posts2/posts2[0], 5,
"GaussianMeanKnownVariance posterior not proportional to prior * likelihood.")
# Check that integrating out theta yields the prior predictive.
xs = [0.1, 0.2, 0.3, 0.4]
preds1 = np.array([quad(lambda theta: model(theta) * model.like1(x, theta), -np.inf, np.inf)[0] for x in xs])
preds2 = np.array([model.pred(x) for x in xs])
np.testing.assert_array_almost_equal(
preds1/preds1[0], preds2/preds2[0], 5,
"Prior predictive not proportional to integral of likelihood * prior")
@timer
def test_InvGamma():
alpha = 1.1
beta = 1.2
mu = 0.1
ig = dpmm.InvGamma(alpha, beta, mu)
ig.sample()
# Check prior density
r = quad(ig, 0.0, np.inf)
np.testing.assert_almost_equal(r[0], 1.0, 5, "InvGamma prior density does not integrate to 1.0")
# Check prior predictive density
r = quad(ig.pred, -np.inf, np.inf)
np.testing.assert_almost_equal(r[0], 1.0, 10,
"InvGamma prior predictive density does not integrate to 1.0")
# Check posterior density
D = [1.0, 2.0, 3.0]
r = quad(ig.post(D), 0.0, np.inf)
np.testing.assert_almost_equal(r[0], 1.0, 7,
"InvGamma posterior density does not integrate to 1.0")
# Check posterior predictive density
r = quad(ig.post(D).pred, -np.inf, np.inf)
np.testing.assert_almost_equal(
r[0], 1.0, 10, "InvGamma posterior predictive density does not integrate to 1.0")
# Check that the likelihood integrates to 1.
r = quad(lambda x: ig.like1(x, var=2.1), -np.inf, np.inf)
np.testing.assert_almost_equal(r[0], 1.0, 10,
"InvGamma likelihood does not integrate to 1.0")
# Check that posterior is proportional to prior * likelihood
# Add some more data points
D = np.array([1.0, 2.0, 3.0, 2.2, 2.3, 1.2])
vars_ = [0.7, 1.1, 1.2, 1.5]
posts = [ig.post(D)(var) for var in vars_]
posts2 = [ig(var)*ig.likelihood(D, var) for var in vars_]
np.testing.assert_array_almost_equal(
posts/posts[0], posts2/posts2[0], 5,
"InvGamma posterior not proportional to prior * likelihood.")
# Check mean and variance
mean = 1./beta/(alpha-1.0)
np.testing.assert_almost_equal(quad(lambda x: ig(x)*x, 0.0, np.inf)[0], mean, 10,
"InvGamma has wrong mean.")
var = beta**(-2)/(alpha-1)**2/(alpha-2)
with warnings.catch_warnings():
warnings.simplefilter('ignore')
np.testing.assert_almost_equal(quad(lambda x: ig(x)*(x-mean)**2, 0.0, np.inf)[0], var, 5,
"InvGamma has wrong variance.")
# Check that integrating out theta yields the prior predictive.
xs = [0.1, 0.2, 0.3, 0.4]
preds1 = np.array([quad(lambda theta: ig(theta) * ig.like1(x, theta), 0, np.inf)[0] for x in xs])
preds2 = np.array([ig.pred(x) for x in xs])
np.testing.assert_array_almost_equal(
preds1/preds1[0], preds2/preds2[0], 5,
"Prior predictive not proportional to integral of likelihood * prior")
@timer
def test_InvGamma2D(full=False):
alpha = 1.1
beta = 1.2
mu = np.r_[0.1, 0.2]
ig2d = dpmm.InvGamma2D(alpha, beta, mu)
ig2d.sample()
# Check prior density
r = quad(ig2d, 0.0, np.inf)
np.testing.assert_almost_equal(
r[0], 1.0, 5, "InvGamma2D prior density does not integrate to 1.0")
if __name__ == '__main__' and full:
# Check prior predictive density
r = dblquad(lambda x, y: ig2d.pred(np.r_[x, y]),
-np.inf, np.inf,
lambda x: -np.inf, lambda x: np.inf)
np.testing.assert_almost_equal(
r[0], 1.0, 5, "InvGamma2D prior predictive density does not integrate to 1.0")
# Check posterior density
D = np.array([[0.1, 0.2], [0.2, 0.3]])
r = quad(ig2d.post(D), 0.0, np.inf)
np.testing.assert_almost_equal(r[0], 1.0, 7,
"InvGamma2D posterior density does not integrate to 1.0")
# Check posterior predictive density
r = dblquad(lambda x, y: ig2d.post(D).pred(np.r_[x, y]),
-np.inf, np.inf,
lambda x: -np.inf, lambda x: np.inf)
np.testing.assert_almost_equal(
r[0], 1.0, 5, "InvGamma2D posterior predictive density does not integrate to 1.0")
# Check that the likelihood integrates to 1.
r = dblquad(lambda x, y: ig2d.like1(np.r_[x, y], var=2.1),
-np.inf, np.inf,
lambda x: -np.inf, lambda x: np.inf)
np.testing.assert_almost_equal(r[0], 1.0, 10,
"InvGamma2D likelihood does not integrate to 1.0")
# Check that posterior is proportional to prior * likelihood
vars_ = [0.7, 1.1, 1.2, 1.5]
posts = np.array([ig2d.post(D)(var) for var in vars_])
posts2 = np.array([ig2d(var)*ig2d.likelihood(D, var) for var in vars_])
np.testing.assert_array_almost_equal(
posts/posts[0], posts2/posts2[0], 5,
"InvGamma2D posterior not proportional to prior * likelihood.")
# Check mean and variance
mean = 1./beta/(alpha-1.0)
np.testing.assert_almost_equal(quad(lambda x: ig2d(x)*x, 0.0, np.inf)[0], mean, 10,
"InvGamma2D has wrong mean.")
var = beta**(-2)/(alpha-1)**2/(alpha-2)
with warnings.catch_warnings():
warnings.simplefilter('ignore')
np.testing.assert_almost_equal(quad(lambda x: ig2d(x)*(x-mean)**2, 0.0, np.inf)[0], var, 5,
"InvGamma2D has wrong variance.")
# Check that integrating out theta yields the prior predictive.
xs = [np.r_[0.1, 0.2], np.r_[0.2, 0.3], np.r_[0.1, 0.3]]
preds1 = np.array([quad(lambda theta: ig2d(theta) * ig2d.like1(x, theta), 0, np.inf)[0] for x in xs])
preds2 = np.array([ig2d.pred(x) for x in xs])
np.testing.assert_array_almost_equal(
preds1/preds1[0], preds2/preds2[0], 5,
"Prior predictive not proportional to integral of likelihood * prior")
@timer
def test_NormInvChi2():
mu_0 = -0.1
sigsqr_0 = 1.1
kappa_0 = 2
nu_0 = 3
nix = dpmm.NormInvChi2(mu_0, kappa_0, sigsqr_0, nu_0)
D = np.r_[1.0, 2.0, 3.0]
mus = np.r_[1.1, 1.2, 1.3]
vars_ = np.r_[1.2, 3.2, 2.3]
# Check prior density
with warnings.catch_warnings():
warnings.simplefilter('ignore')
r = dblquad(nix, 0.0, np.inf, lambda x: -np.inf, lambda x: np.inf)
np.testing.assert_almost_equal(r[0], 1.0, 5,
"NormInvChi2 prior density does not integrate to 1.0")
# Check prior predictive density
r = quad(nix.pred, -np.inf, np.inf)
np.testing.assert_almost_equal(r[0], 1.0, 10,
"NormInvChi2 prior predictive density does not integrate to 1.0")
# Check posterior density
r = dblquad(nix.post(D), 0.0, np.inf, lambda x: -np.inf, lambda x: np.inf)
np.testing.assert_almost_equal(r[0], 1.0, 7,
"NormInvChi2 posterior density does not integrate to 1.0")
# Check posterior predictive density
r = quad(nix.post(D).pred, -np.inf, np.inf)
np.testing.assert_almost_equal(
r[0], 1.0, 10,
"NormInvChi2 posterior predictive density does not integrate to 1.0")
# Check that the likelihood integrates to 1.
r = quad(lambda x: nix.like1(x, 1.1, 2.1), -np.inf, np.inf)
np.testing.assert_almost_equal(r[0], 1.0, 10,
"NormInvChi2 likelihood does not integrate to 1.0")
# Check that evidence (of single data point) integrates to 1.
r = quad(lambda x: nix.evidence(x), -np.inf, np.inf)
np.testing.assert_almost_equal(r[0], 1.0, 10,
"NormInvChi2 evidence does not integrate to 1.0")
# Check evidence for two data points.
r = dblquad(lambda x, y: nix.evidence([x, y]),
-np.inf, np.inf,
lambda x: -np.inf, lambda x: np.inf)
np.testing.assert_almost_equal(r[0], 1.0, 5,
"NormInvChi2 evidence does not integrate to 1.0")
# Check that posterior = prior * likelihood / evidence
post = nix.post(D)
post1 = [nix(mu, var)*nix.likelihood(D, mu, var) / nix.evidence(D)
for mu, var in zip(mus, vars_)]
post2 = [post(mu, var) for mu, var in zip(mus, vars_)]
np.testing.assert_array_almost_equal(post1, post2, 10,
"NormInvChi2 posterior != prior * likelihood / evidence")
# Test that marginal variance probability method matches integrated result.
Pr_var1 = [nix.marginal_var(var) for var in vars_]
Pr_var2 = [quad(lambda mu: nix(mu, var), -np.inf, np.inf)[0] for var in vars_]
np.testing.assert_array_almost_equal(
Pr_var1, Pr_var2, 10,
"Pr(var) method calculation does not match integrated result.")
# Test that marginal mean probability method matches integrated result.
Pr_mu1 = [nix.marginal_mu(mu) for mu in mus]
Pr_mu2 = [quad(lambda var: nix(mu, var), 0.0, np.inf)[0] for mu in mus]
np.testing.assert_array_almost_equal(
Pr_mu1, Pr_mu2, 10,
"Pr(mu) method calculation does not match integrated result.")
# Check that integrating out theta yields the prior predictive.
xs = [0.1, 0.2, 0.3, 0.4]
preds1 = np.array([dblquad(lambda mu, var: nix(mu, var) * nix.like1(x, mu, var),
0, np.inf,
lambda var: -np.inf, lambda var: np.inf)[0]
for x in xs])
preds2 = np.array([nix.pred(x) for x in xs])
np.testing.assert_array_almost_equal(
preds1/preds1[0], preds2/preds2[0], 5,
"Prior predictive not proportional to integral of likelihood * prior")
@timer
def test_NormInvGamma():
m_0 = -0.1
V_0 = 1.1
a_0 = 2.0
b_0 = 3.0
nig = dpmm.NormInvGamma(m_0, V_0, a_0, b_0)
D = np.r_[1.0, 2.0, 3.0]
mus = np.r_[1.1, 1.2, 1.3]
vars_ = np.r_[1.2, 3.2, 2.3]
# Check prior density
with warnings.catch_warnings():
warnings.simplefilter('ignore')
r = dblquad(nig, 0.0, np.inf, lambda x: -np.inf, lambda x: np.inf)
np.testing.assert_almost_equal(r[0], 1.0, 5,
"NormInvGamma prior density does not integrate to 1.0")
# Check prior predictive density
r = quad(nig.pred, -np.inf, np.inf)
np.testing.assert_almost_equal(
r[0], 1.0, 10,
"NormInvGamma prior predictive density does not integrate to 1.0")
# Check posterior density
r = dblquad(nig.post(D), 0.0, np.inf, lambda x: -np.inf, lambda x: np.inf)
np.testing.assert_almost_equal(r[0], 1.0, 7,
"NormInvGamma posterior density does not integrate to 1.0")
# Check posterior predictive density
r = quad(nig.post(D).pred, -np.inf, np.inf)
np.testing.assert_almost_equal(
r[0], 1.0, 10,
"NormInvGamma posterior predictive density does not integrate to 1.0")
# Check that the likelihood integrates to 1.
r = quad(lambda x: nig.like1(x, 1.1, 2.1), -np.inf, np.inf)
np.testing.assert_almost_equal(r[0], 1.0, 10,
"NormInvGamma likelihood does not integrate to 1.0")
# Check that evidence (of single data point) integrates to 1.
r = quad(lambda x: nig.evidence(x), -np.inf, np.inf)
np.testing.assert_almost_equal(r[0], 1.0, 10,
"NormInvGamma evidence does not integrate to 1.0")
# Check evidence for two data points.
r = dblquad(lambda x, y: nig.evidence([x, y]),
-np.inf, np.inf,
lambda x: -np.inf, lambda x: np.inf)
np.testing.assert_almost_equal(r[0], 1.0, 5,
"NormInvGamma evidence does not integrate to 1.0")
# Check that posterior = prior * likelihood / evidence
post = nig.post(D)
post1 = [nig(mu, var)*nig.likelihood(D, mu, var) / nig.evidence(D)
for mu, var in zip(mus, vars_)]
post2 = [post(mu, var) for mu, var in zip(mus, vars_)]
np.testing.assert_array_almost_equal(post1, post2, 10,
"NormInvGamma posterior != prior * likelihood / evidence")
# Test that marginal variance probability method matches integrated result.
Pr_var1 = [nig.marginal_var(var) for var in vars_]
Pr_var2 = [quad(lambda mu: nig(mu, var), -np.inf, np.inf)[0] for var in vars_]
np.testing.assert_array_almost_equal(
Pr_var1, Pr_var2, 10,
"Pr(var) method calculation does not match integrated result.")
# Test that marginal mean probability method matches integrated result.
Pr_mu1 = [nig.marginal_mu(mu) for mu in mus]
Pr_mu2 = [quad(lambda var: nig(mu, var), 0.0, np.inf)[0] for mu in mus]
np.testing.assert_array_almost_equal(
Pr_mu1, Pr_mu2, 10,
"Pr(mu) method calculation does not match integrated result.")
# Check that integrating out theta yields the prior predictive.
xs = [0.1, 0.2, 0.3, 0.4]
preds1 = np.array([dblquad(lambda mu, var: nig(mu, var) * nig.like1(x, mu, var),
0, np.inf,
lambda var: -np.inf, lambda var: np.inf)[0]
for x in xs])
preds2 = np.array([nig.pred(x) for x in xs])
np.testing.assert_array_almost_equal(
preds1/preds1[0], preds2/preds2[0], 5,
"Prior predictive not proportional to integral of likelihood * prior")
@timer
def test_NormInvChi2_eq_NormInvGamma():
mu_0 = 0.1
sigsqr_0 = 1.1
kappa_0 = 2
nu_0 = 3
m_0 = mu_0
V_0 = 1./kappa_0
a_0 = nu_0/2.0
b_0 = nu_0*sigsqr_0/2.0
model1 = dpmm.NormInvChi2(mu_0, kappa_0, sigsqr_0, nu_0)
model2 = dpmm.NormInvGamma(m_0, V_0, a_0, b_0)
mus = np.linspace(-2.2, 2.2, 5)
vars_ = np.linspace(1.0, 4.0, 5)
xs = np.arange(-1.1, 1.1, 5)
for x in xs:
np.testing.assert_equal(
model1.pred(x), model2.pred(x),
"NormInvChi2 and NormInvGamma prior predictive densities don't agree at x = ".format(x))
np.testing.assert_equal(
model1.post(x).pred(x), model2.post(x).pred(x),
"NormInvChi2 and NormInvGamma posterior " +
"predictive densities don't agree at x = {}".format(x))
for mu, var in zip(mus, vars_):
np.testing.assert_almost_equal(
model1(mu, var), model2(mu, var), 10,
"NormInvChi2 and NormInvGamma prior densities " +
"don't agree at mu, var = {}, {}".format(mu, var))
post1 = model1.post(xs)
post2 = model2.post(xs)
for mu, var in zip(mus, vars_):
np.testing.assert_almost_equal(
post1(mu, var), post2(mu, var), 10,
"NormInvChi2 and NormInvGamma posterior densities " +
"don't agree at mu, var = {}, {}".format(mu, var))
for mu, var, x in zip(mus, vars_, xs):
np.testing.assert_almost_equal(
model1.like1(x, mu, var), model2.like1(x, mu, var), 10,
"NormInvChi2 and NormInvGamma likelihoods don't " +
"agree at mu, var, x = {}, {}, {}".format(mu, var, x))
np.testing.assert_almost_equal(
model1.evidence(xs), model2.evidence(xs), 10,
"NormInvChi2 and NormInvGamma evidences don't agree")
@timer
def test_NormInvWish(full=False):
mu_0 = np.r_[0.2, 0.1]
kappa_0 = 2.0
Lam_0 = np.eye(2)+0.1
nu_0 = 3
# Create a Normal-Inverse-Wishart prior.
niw = dpmm.NormInvWish(mu_0, kappa_0, Lam_0, nu_0)
# Check that we can draw samples from NormInvWish.
niw.sample()
niw.sample(size=10)
# Check that we can evaluate a likelihood given data.
theta = np.zeros(1, dtype=niw.model_dtype)
theta['mu'] = np.r_[1.0, 1.0]
theta['Sig'] = np.eye(2)+0.12
D = np.array([[0.1, 0.2], [0.2, 0.3], [0.1, 0.2], [0.4, 0.3]])
niw.likelihood(D, theta)
# Evaluate prior
niw(theta)
if __name__ == "__main__" and full:
# Check prior predictive density
with warnings.catch_warnings():
warnings.simplefilter('ignore')
r = dblquad(lambda x, y: niw.pred(np.r_[x, y]), -np.inf, np.inf,
lambda x: -np.inf, lambda x: np.inf)
np.testing.assert_almost_equal(r[0], 1.0, 5,
"NormInvWish prior predictive density does not integrate to 1.0")
# Check posterior predictive density
r = dblquad(lambda x, y: niw.post(D).pred(np.r_[x, y]), -np.inf, np.inf,
lambda x: -np.inf, lambda x: np.inf)
np.testing.assert_almost_equal(
r[0], 1.0, 5, "NormInvWish posterior predictive density does not integrate to 1.0")
# Check that the likelihood of a single point in 2 dimensions integrates to 1.
r = dblquad(lambda x, y: niw.like1(np.r_[x, y], np.r_[1.2, 1.1], np.eye(2)+0.12),
-np.inf, np.inf, lambda x: -np.inf, lambda x: np.inf)
np.testing.assert_almost_equal(r[0], 1.0, 10,
"NormInvWish likelihood does not integrate to 1.0")
if __name__ == "__main__" and full:
# Check that likelihood of a single point in 3 dimensions integrates to 1.
niw3 = dpmm.NormInvWish(np.r_[1, 1, 1], 2.0, np.eye(3), 3)
r = tplquad(lambda x, y, z: niw3.like1(np.r_[x, y, z], np.r_[0.1, 0.2, 0.3], np.eye(3)+0.1),
-np.inf, np.inf,
lambda x: -np.inf, lambda x: np.inf,
lambda x, y: -np.inf, lambda x, y: np.inf)
np.testing.assert_almost_equal(r[0], 1.0, 8,
"NormInvWish likelihood does not integrate to 1.0")
# Check that posterior is proportional to prior * likelihood
D = np.array([[0.1, 0.2], [0.2, 0.3], [0.1, 0.2], [0.4, 0.3]])
mus = [np.r_[2.1, 1.1], np.r_[0.9, 1.2], np.r_[0.9, 1.1]]
Sigs = [np.eye(2)*1.5, np.eye(2)*0.7, np.array([[1.1, -0.1], [-0.1, 1.2]])]
posts = [niw.post(D)(mu, Sig) for mu, Sig in zip(mus, Sigs)]
posts2 = [niw(mu, Sig)*niw.likelihood(D, mu, Sig) for mu, Sig, in zip(mus, Sigs)]
np.testing.assert_array_almost_equal(
posts/posts[0], posts2/posts2[0], 5,
"NormInvWish posterior not proportional to prior * likelihood.")
# Check that posterior = prior * likelihood / evidence
mus = [np.r_[1.1, 1.1], np.r_[1.1, 1.2], np.r_[0.7, 1.3]]
Sigs = [np.eye(2)*0.2, np.eye(2)*0.1, np.array([[2.1, -0.1], [-0.1, 2.2]])]
post = niw.post(D)
post1 = [niw(mu, Sig) * niw.likelihood(D, mu, Sig) / niw.evidence(D)
for mu, Sig in zip(mus, Sigs)]
post2 = [post(mu, Sig) for mu, Sig in zip(mus, Sigs)]
np.testing.assert_array_almost_equal(post1, post2, 10,
"NormInvWish posterior != prior * likelihood / evidence")
# Would like to check that pred(x) == int prior(theta) * like1(x, theta) d(theta), but I don't
# know how to integrate over all covariance matrices. Plus, integrating over a 2D covariance
# matrix plus a 2D mean is a 5 dimensional integral, which sounds nasty to do.
if __name__ == "__main__":
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument('--full', action='store_true', help="Run full test suite (slow).")
args = parser.parse_args()
test_GaussianMeanKnownVariance()
test_InvGamma()
test_InvGamma2D()
test_NormInvChi2()
test_NormInvGamma()
test_NormInvChi2_eq_NormInvGamma()
test_NormInvWish(args.full)
```
#### File: DP_SNe/tests/test_utils.py
```python
def timer(f):
import functools
@functools.wraps(f)
def f2(*args, **kwargs):
import time
import inspect
t0 = time.time()
result = f(*args, **kwargs)
t1 = time.time()
fname = inspect.stack()[1][4][0].split('(')[0].strip()
print 'time for %s = %.2f' % (fname, t1-t0)
return result
return f2
``` |
{
"source": "jmeyers314/jtrace",
"score": 4
} |
#### File: jtrace/batoid/coordSys.py
```python
import numpy as np
def RotX(th):
sth, cth = np.sin(th), np.cos(th)
return np.array([[1,0,0],[0,cth,-sth],[0,sth,cth]])
def RotY(th):
sth, cth = np.sin(th), np.cos(th)
return np.array([[cth,0,sth],[0,1,0],[-sth,0,cth]])
def RotZ(th):
sth, cth = np.sin(th), np.cos(th)
return np.array([[cth,-sth,0],[sth,cth,0],[0,0,1]])
class CoordSys:
"""A coordinate system against which to measure surfaces or rays.
Coordinate systems consist of an origin and a rotation. The ``origin``
attribute specifies where in 3D space the current coordinate system's
origin lands in the global coordinate system. The rotation ``rot``
specifies the 3D rotation matrix to apply to the global coordinate axes to
yield the axes of the this coordinate system.
Parameters
----------
origin : ndarray of float, shape (3,)
Origin of coordinate system in global coordinates.
rot : ndarray of float, shape (3, 3)
Rotation matrix taking global axes into current system axes.
"""
def __init__(self, origin=None, rot=None):
if origin is None:
origin = np.zeros(3, dtype=float)
if rot is None:
rot = np.eye(3, dtype=float)
self.origin = np.array(origin)
self.rot = np.array(rot)
@property
def xhat(self):
"""ndarray of float, shape (3,): Orientation of local x vector in
global coordinates.
"""
return self.rot[:, 0]
@property
def yhat(self):
"""ndarray of float, shape (3,): Orientation of local y vector in
global coordinates.
"""
return self.rot[:, 1]
@property
def zhat(self):
"""ndarray of float, shape (3,): Orientation of local z vector in
global coordinates.
"""
return self.rot[:, 2]
def shiftGlobal(self, dr):
"""Return new CoordSys with origin shifted along global axes.
Parameters
----------
dr : ndarray of float, shape (3,)
Amount to shift in meters.
Returns
-------
CoordSys
"""
return CoordSys(self.origin+dr, self.rot)
def shiftLocal(self, dr):
"""Return new CoordSys with origin shifted along local axes.
Parameters
----------
dr : ndarray of float, shape (3,)
Amount to shift in meters.
Returns
-------
CoordSys
"""
# Rotate the shift into global coordinates, then do the shift globally
return self.shiftGlobal(self.rot@dr)
def rotateGlobal(self, rot, rotCenter=(0,0,0), coordSys=None):
"""Return new CoordSys rotated with respect to global axes.
Parameters
----------
rot : ndarray of float, shape (3, 3)
Rotation matrix to apply.
rotCenter : ndarray of float, shape (3,)
Point about which to rotate.
coordSys : CoordSys
Coordinate system in which rotCenter is specified.
Returns
-------
CoordSys
"""
if coordSys is None:
coordSys = CoordSys()
# Find rot center in global coordinates
globalRotCenter = coordSys.rot@rotCenter + coordSys.origin
# Then rotate about this center
return CoordSys(
rot@(self.origin-globalRotCenter)+globalRotCenter,
[email protected]
)
def rotateLocal(self, rot, rotCenter=(0,0,0), coordSys=None):
"""Return new CoordSys rotated with respect to local axes.
Parameters
----------
rot : ndarray of float, shape (3, 3)
Rotation matrix to apply.
rotCenter : ndarray of float, shape (3,)
Point about which to rotate.
coordSys : CoordSys
Coordinate system in which rotCenter is specified.
Returns
-------
CoordSys
"""
if coordSys is None:
coordSys = self
# Find rot center in global coordinates
globalRotCenter = coordSys.rot@rotCenter + coordSys.origin
# first rotate rot into global coords: (self.rot rot self.rot.T),
# then apply that: (self.rot rot self.rot.T) self.rot = self.rot rot
rTmp = self.rot@rot
return CoordSys(
[email protected]@(self.origin-globalRotCenter)+globalRotCenter,
rTmp
)
def __getstate__(self):
return self.origin, self.rot
def __setstate__(self, d):
self.origin, self.rot = d
def __eq__(self, rhs):
if not isinstance(rhs, CoordSys): return False
return (
np.array_equal(self.origin, rhs.origin) and
np.array_equal(self.rot, rhs.rot)
)
def __ne__(self, rhs):
return not (self == rhs)
def __hash__(self):
return hash((
"batoid.CoordSys",
tuple(self.origin.tolist()),
tuple(self.rot.ravel().tolist())
))
def copy(self):
return CoordSys(self.origin, self.rot)
def __repr__(self):
rotstr = np.array2string(self.rot, separator=', ').replace('\n', '')
return f"CoordSys({self.origin!r}, array({rotstr}))"
```
#### File: jtrace/batoid/coordTransform.py
```python
from . import _batoid
from .coordSys import CoordSys
import numpy as np
class CoordTransform:
"""Transformation between two coordinate systems.
Parameters
----------
fromSys : CoordSys
Origin coordinate systems.
toSys : CoordSys
Destination coordinate systems.
"""
def __init__(self, fromSys, toSys):
self.fromSys = fromSys
self.toSys = toSys
self.dr = fromSys.rot.T@(toSys.origin - fromSys.origin)
self.drot = [email protected]
def __getstate__(self):
return self.fromSys, self.toSys
def __setstate__(self, d):
self.__init__(*d)
def __eq__(self, rhs):
if not isinstance(rhs, CoordTransform): return False
return (
self.fromSys == rhs.fromSys and
self.toSys == rhs.toSys
)
def __ne__(self, rhs):
return not (self == rhs)
def applyForward(self, rv):
"""Apply forward-direction transformation to RayVector.
Parameters
----------
rv : RayVector
Rays to transform.
Returns
-------
transformed : RayVector
Reference to input RayVector transformed in place.
"""
from .trace import applyForwardTransform
return applyForwardTransform(self, rv)
def applyReverse(self, rv):
"""Apply reverse-direction transformation to RayVector.
Parameters
----------
rv : RayVector
Rays to transform.
Returns
-------
transformed : RayVector
Reference to input RayVector transformed in place.
"""
from .trace import applyReverseTransform
return applyReverseTransform(self, rv)
def applyForwardArray(self, x, y, z):
"""Apply forward-direction transformation to ndarrays.
Parameters
----------
x, y, z : ndarray
Coordinates to transform.
Returns
-------
xyz : ndarray
Transformed coordinates.
Notes
-----
Unlike applyForward, this method does not transform in-place, but
returns a newly created ndarray.
"""
r = np.array([x, y, z], dtype=float).T
r -= self.dr
return [email protected]
def applyReverseArray(self, x, y, z):
"""Apply reverse-direction transformation to ndarrays.
Parameters
----------
x, y, z : ndarray
Coordinates to transform.
Returns
-------
xyz : ndarray
Transformed coordinates.
Notes
-----
Unlike applyReverse, this method does not transform in-place, but
returns a newly created ndarray.
"""
r = np.array([x, y, z], dtype=float)
r = (self.drot@r).T
r += self.dr
return r.T
def __repr__(self):
return f"CoordTransform({self.fromSys!r}, {self.toSys!r})"
def __hash__(self):
return hash(("CoordTransform", self.fromSys, self.toSys))
```
#### File: jtrace/batoid/plotUtils.py
```python
from matplotlib.figure import Figure
from matplotlib.gridspec import GridSpec
def zernikePyramid(xs, ys, zs, figsize=(13, 8), vmin=-1, vmax=1, vdim=True,
s=5, title=None, filename=None, fig=None, **kwargs):
"""Make a multi-zernike plot in a pyramid shape.
Subplots show individual Zernikes over a range of x and y (presumably a
field of view).
Parameters
----------
xs, ys: array of float
Field angles (or other spatial coordinate over which to plot Zernikes)
zs: array of float, shape (jmax, xymax)
Zernike values. First index labels the particular Zernike coefficient,
second index labels spatial coordinate. First index implicitly starts
at j=4 defocus.
"""
import warnings
import galsim
jmax = zs.shape[0]+3
nmax, _ = galsim.zernike.noll_to_zern(jmax)
nrow = nmax - 1
ncol = nrow + 2
gridspec = GridSpec(nrow, ncol)
def shift(pos, amt):
return [pos.x0+amt, pos.y0, pos.width, pos.height]
def shiftAxes(axes, amt):
for ax in axes:
ax.set_position(shift(ax.get_position(), amt))
if fig is None:
fig = Figure(figsize=figsize, **kwargs)
axes = {}
shiftLeft = []
shiftRight = []
for j in range(4, jmax+1):
n, m = galsim.zernike.noll_to_zern(j)
if n%2 == 0:
row, col = n-2, m//2 + ncol//2
else:
row, col = n-2, (m-1)//2 + ncol//2
subplotspec = gridspec.new_subplotspec((row, col))
axes[j] = fig.add_subplot(subplotspec)
axes[j].set_aspect('equal')
if nrow%2==0 and n%2==0:
shiftLeft.append(axes[j])
if nrow%2==1 and n%2==1:
shiftRight.append(axes[j])
cbar = {}
for j, ax in axes.items():
n, _ = galsim.zernike.noll_to_zern(j)
ax.set_title("Z{}".format(j))
if vdim:
_vmin = vmin/n
_vmax = vmax/n
else:
_vmin = vmin
_vmax = vmax
scat = ax.scatter(
xs, ys, c=zs[j-4], s=s, linewidths=0.5, cmap='Spectral_r',
rasterized=True, vmin=_vmin, vmax=_vmax
)
cbar[j] = fig.colorbar(scat, ax=ax)
ax.set_xticks([])
ax.set_yticks([])
if title:
fig.suptitle(title, x=0.1)
# Mistakenly raises MatplotlibDeprecationWarning.
# See https://github.com/matplotlib/matplotlib/issues/19486
with warnings.catch_warnings():
warnings.simplefilter("ignore")
fig.tight_layout()
amt = 0.5*(axes[4].get_position().x0 - axes[5].get_position().x0)
shiftAxes(shiftLeft, -amt)
shiftAxes(shiftRight, amt)
shiftAxes([cbar[j].ax for j in cbar.keys() if axes[j] in shiftLeft], -amt)
shiftAxes([cbar[j].ax for j in cbar.keys() if axes[j] in shiftRight], amt)
if filename:
fig.savefig(filename)
return fig
```
#### File: jtrace/batoid/utils.py
```python
import warnings
import numpy as np
def normalized(*args):
if len(args) == 1:
args = np.array(*args)
return args/np.linalg.norm(args)
def bilinear_fit(ux, uy, kx, ky):
a = np.empty((len(ux), 3), dtype=float)
a[:,0] = 1
a[:,1] = ux
a[:,2] = uy
b = np.empty((len(ux), 2), dtype=float)
b[:,0] = kx
b[:,1] = ky
x, _, _, _ = np.linalg.lstsq(a, b, rcond=-1)
return x
def gnomonicToDirCos(u, v):
"""Convert gnomonic tangent plane projection u,v to direction cosines.
Parameters
----------
u, v : float
Gnomonic tangent plane coordinates in radians.
Returns
-------
alpha, beta, gamma : float
Direction cosines (unit vector projected onto x, y, z in order)
Notes
-----
The tangent plane reference is at (u,v) = (0,0), which corresponds to
(alpha, beta, gamma) = (0, 0, -1) (a ray coming directly from above). The
orientation is such that vx (vy) is positive when u (v) is positive.
"""
gamma = 1/np.sqrt(1.0 + u*u + v*v)
alpha = u*gamma
beta = v*gamma
return alpha, beta, -gamma
def dirCosToGnomonic(alpha, beta, gamma):
"""Convert direction cosines to gnomonic tangent plane projection.
Parameters
----------
alpha, beta, gamma : float
Direction cosines (unit vector projected onto x, y, z in order)
Returns
-------
u, v : float
Gnomonic tangent plane coordinates in radians.
Notes
-----
The tangent plane reference is at (u,v) = (0,0), which corresponds to
(alpha, beta, gamma) = (0, 0, -1) (a ray coming directly from above). The
orientation is such that vx (vy) is positive when u (v) is positive.
"""
u = -alpha / gamma
v = -beta / gamma
return u, v
def postelToDirCos(u, v):
"""Convert Postel azimuthal equidistant tangent plane projection u,v to
direction cosines.
Parameters
----------
u, v : float
Postel tangent plane coordinates in radians.
Returns
-------
alpha, beta, gamma : float
Direction cosines (unit vector projected onto x, y, z in order)
Notes
-----
The tangent plane reference is at (u,v) = (0,0), which corresponds to
(alpha, beta, gamma) = (0, 0, -1) (a ray coming directly from above). The
orientation is such that vx (vy) is positive when u (v) is positive.
"""
rho = np.sqrt(u*u + v*v)
wZero = (rho == 0.0)
try:
if wZero:
return 0.0, 0.0, -1.0
except ValueError:
pass
srho = np.sin(rho)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
alpha = u/rho*srho
beta = v/rho*srho
gamma = -np.cos(rho)
if np.any(wZero):
alpha[wZero] = 0.0
beta[wZero] = 0.0
gamma[wZero] = -1.0
return alpha, beta, gamma
def dirCosToPostel(alpha, beta, gamma):
"""Convert direction cosines to Postel azimuthal equidistant tangent plane
projection.
Parameters
----------
alpha, beta, gamma : float
Direction cosines (unit vector projected onto x, y, z in order)
Returns
-------
u, v : float
Postel tangent plane coordinates in radians.
Notes
-----
The tangent plane reference is at (u,v) = (0,0), which corresponds to
(alpha, beta, gamma) = (0, 0, -1) (a ray coming directly from above). The
orientation is such that vx (vy) is positive when u (v) is positive.
"""
wZero = (gamma == -1)
try:
if wZero:
return 0.0, 0.0
except ValueError:
pass
rho = np.arccos(-gamma)
srho = np.sin(rho)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
u = alpha*rho/srho
v = beta*rho/srho
if np.any(wZero):
u[wZero] = 0.0
v[wZero] = 0.0
return u, v
def zemaxToDirCos(u, v):
"""Convert Zemax field angles u,v to direction cosines.
Parameters
----------
u, v : float
Zemax field angles in radians.
Returns
-------
alpha, beta, gamma : float
Direction cosines (unit vector projected onto x, y, z in order)
Notes
-----
The tangent plane reference is at (u,v) = (0,0), which corresponds to
(alpha, beta, gamma) = (0, 0, -1) (a ray coming directly from above). The
orientation is such that vx (vy) is positive when u (v) is positive.
The Zemax field angle convention is not rotationally invariant. The
z-direction cosine for (u, v) = (0, 1) does not equal the z-direction
cosine for (u, v) = (0.6, 0.8).
"""
tanu = np.tan(u)
tanv = np.tan(v)
norm = np.sqrt(1 + tanu*tanu + tanv*tanv)
return tanu/norm, tanv/norm, -1/norm
def dirCosToZemax(alpha, beta, gamma):
"""Convert direction cosines to Postel azimuthal equidistant tangent plane
projection.
Parameters
----------
alpha, beta, gamma : float
Direction cosines (unit vector projected onto x, y, z in order)
Returns
-------
u, v : float
Postel tangent plane coordinates in radians.
Notes
-----
The tangent plane reference is at (u,v) = (0,0), which corresponds to
(alpha, beta, gamma) = (0, 0, -1) (a ray coming directly from above). The
orientation is such that vx (vy) is positive when u (v) is positive.
The Zemax field angle convention is not rotationally invariant. The
z-direction cosine for (u, v) = (0, 1) does not equal the z-direction
cosine for (u, v) = (0.6, 0.8).
"""
return np.arctan(-alpha/gamma), np.arctan(-beta/gamma)
def stereographicToDirCos(u, v):
"""Convert stereographic tangent plane projection u,v to direction cosines.
Parameters
----------
u, v : float
Stereographic tangent plane coordinates in radians.
Returns
-------
alpha, beta, gamma : float
Direction cosines (unit vector projected onto x, y, z in order)
Notes
-----
The tangent plane reference is at (u,v) = (0,0), which corresponds to
(alpha, beta, gamma) = (0, 0, -1) (a ray coming directly from above). The
orientation is such that vx (vy) is positive when u (v) is positive.
"""
rho = np.sqrt(u*u + v*v)
wZero = (rho == 0.0)
try:
if wZero:
return 0.0, 0.0, -1.0
except ValueError:
pass
theta = 2*np.arctan(rho/2)
stheta = np.sin(theta)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
alpha = u/rho*stheta
beta = v/rho*stheta
gamma = -np.cos(theta)
if np.any(wZero):
alpha[wZero] = 0.0
beta[wZero] = 0.0
gamma[wZero] = -1.0
return alpha, beta, gamma
def dirCosToStereographic(alpha, beta, gamma):
"""Convert direction cosines to stereographic tangent plane projection.
Parameters
----------
alpha, beta, gamma : float
Direction cosines (unit vector projected onto x, y, z in order)
Returns
-------
u, v : float
Stereographic tangent plane coordinates in radians.
Notes
-----
The tangent plane reference is at (u,v) = (0,0), which corresponds to
(alpha, beta, gamma) = (0, 0, -1) (a ray coming directly from above). The
orientation is such that vx (vy) is positive when u (v) is positive.
"""
wZero = (gamma == -1)
try:
if wZero:
return 0.0, 0.0
except ValueError:
pass
theta = np.arccos(-gamma)
rho = 2*np.tan(theta/2)
stheta = np.sin(theta)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
u = alpha*rho/stheta
v = beta*rho/stheta
if np.any(wZero):
u[wZero] = 0.0
v[wZero] = 0.0
return u, v
def orthographicToDirCos(u, v):
"""Convert orthographic tangent plane projection u,v to direction cosines.
Parameters
----------
u, v : float
Orthographic tangent plane coordinates in radians.
Returns
-------
alpha, beta, gamma : float
Direction cosines (unit vector projected onto x, y, z in order)
Notes
-----
The tangent plane reference is at (u,v) = (0,0), which corresponds to
(alpha, beta, gamma) = (0, 0, -1) (a ray coming directly from above). The
orientation is such that vx (vy) is positive when u (v) is positive.
"""
rho = np.sqrt(u*u + v*v)
theta = np.arcsin(rho)
gamma = np.cos(theta)
alpha = u
beta = v
return alpha, beta, -gamma
def dirCosToOrthographic(alpha, beta, gamma):
"""Convert direction cosines to orthographic tangent plane projection.
Parameters
----------
alpha, beta, gamma : float
Direction cosines (unit vector projected onto x, y, z in order)
Returns
-------
u, v : float
Orthographic tangent plane coordinates in radians.
Notes
-----
The tangent plane reference is at (u,v) = (0,0), which corresponds to
(alpha, beta, gamma) = (0, 0, -1) (a ray coming directly from above). The
orientation is such that vx (vy) is positive when u (v) is positive.
"""
u = alpha
v = beta
return u, v
def lambertToDirCos(u, v):
"""Convert Lambert azimuthal equal-area tangent plane projection u,v to
direction cosines.
Parameters
----------
u, v : float
Lambert tangent plane coordinates in radians.
Returns
-------
alpha, beta, gamma : float
Direction cosines (unit vector projected onto x, y, z in order)
Notes
-----
The tangent plane reference is at (u,v) = (0,0), which corresponds to
(alpha, beta, gamma) = (0, 0, -1) (a ray coming directly from above). The
orientation is such that vx (vy) is positive when u (v) is positive.
"""
rhosqr = u*u + v*v
wZero = rhosqr == 0.0
try:
if wZero:
return 0.0, 0.0, -1.0
except ValueError:
pass
rho = np.sqrt(rhosqr)
gamma = (2-rhosqr)/2
r = np.sqrt(1-gamma*gamma)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
alpha = u * r/rho
beta = v * r/rho
if np.any(wZero):
alpha[wZero] = 0.0
beta[wZero] = 0.0
gamma[wZero] = 1.0
return alpha, beta, -gamma
def dirCosToLambert(alpha, beta, gamma):
"""Convert direction cosines to Lambert azimuthal equal-area tangent plane
projection.
Parameters
----------
alpha, beta, gamma : float
Direction cosines (unit vector projected onto x, y, z in order)
Returns
-------
u, v : float
Lambert tangent plane coordinates in radians.
Notes
-----
The tangent plane reference is at (u,v) = (0,0), which corresponds to
(alpha, beta, gamma) = (0, 0, -1) (a ray coming directly from above). The
orientation is such that vx (vy) is positive when u (v) is positive.
"""
wZero = (gamma == -1)
try:
if wZero:
return 0.0, 0.0
except ValueError:
pass
rho = np.sqrt(2+2*gamma)
norm = np.sqrt(1-gamma*gamma)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
u = alpha*rho/norm
v = beta*rho/norm
if np.any(wZero):
u[wZero] = 0.0
v[wZero] = 0.0
return u, v
def fieldToDirCos(u, v, projection='postel'):
"""Convert field angle to direction cosines using specified projection.
Parameters
----------
u, v : float
Tangent plane coordinates in radians.
projection : {'postel', 'zemax', 'gnomonic', 'stereographic', 'lambert', 'orthographic'}
Projection used to convert field angle to direction cosines.
Returns
-------
alpha, beta, gamma : float
Direction cosines (unit vector projected onto x, y, z in order)
Notes
-----
The tangent plane reference is at (u,v) = (0,0), which corresponds to
(alpha, beta, gamma) = (0, 0, -1) (a ray coming directly from above). The
orientation is such that vx (vy) is positive when u (v) is positive.
"""
if projection == 'postel':
return postelToDirCos(u, v)
elif projection == 'zemax':
return zemaxToDirCos(u, v)
elif projection == 'gnomonic':
return gnomonicToDirCos(u, v)
elif projection == 'stereographic':
return stereographicToDirCos(u, v)
elif projection == 'lambert':
return lambertToDirCos(u, v)
elif projection == 'orthographic':
return orthographicToDirCos(u, v)
else:
raise ValueError("Bad projection: {}".format(projection))
def dirCosToField(alpha, beta, gamma, projection='postel'):
"""Convert direction cosines to field angle using specified projection.
Parameters
----------
alpha, beta, gamma : float
Direction cosines (unit vector projected onto x, y, z in order)
projection : {'postel', 'zemax', 'gnomonic', 'stereographic', 'lambert', 'orthographic'}
Projection used to convert direction cosines to field angle.
Returns
-------
u, v : float
Tangent plane coordinates in radians.
Notes
-----
The tangent plane reference is at (u,v) = (0,0), which corresponds to
(alpha, beta, gamma) = (0, 0, -1) (a ray coming directly from above). The
orientation is such that vx (vy) is positive when u (v) is positive.
"""
if projection == 'postel':
return dirCosToPostel(alpha, beta, gamma)
elif projection == 'zemax':
return dirCosToZemax(alpha, beta, gamma)
elif projection == 'gnomonic':
return dirCosToGnomonic(alpha, beta, gamma)
elif projection == 'stereographic':
return dirCosToStereographic(alpha, beta, gamma)
elif projection == 'lambert':
return dirCosToLambert(alpha, beta, gamma)
elif projection == 'orthographic':
return dirCosToOrthographic(alpha, beta, gamma)
else:
raise ValueError("Bad projection: {}".format(projection))
# http://stackoverflow.com/a/6849299
class lazy_property(object):
"""
meant to be used for lazy evaluation of an object attribute.
property should represent non-mutable data, as it replaces itself.
"""
def __init__(self, fget):
self.fget = fget
self.func_name = fget.__name__
def __get__(self, obj, cls):
if obj is None:
return self
value = self.fget(obj)
setattr(obj, self.func_name, value)
return value
```
#### File: jtrace/tests/test_draw.py
```python
import batoid
import numpy as np
from test_helpers import timer
# Use matplotlib with a non-interactive backend.
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
@timer
def initialize(ngrid=25, theta_x=1.):
telescope = batoid.Optic.fromYaml("DESI.yaml")
dirCos = batoid.utils.gnomonicToDirCos(np.deg2rad(theta_x), 0.)
rays = batoid.RayVector.asGrid(
optic=telescope,
theta_x=np.deg2rad(theta_x), theta_y=0.0,
wavelength=500e-9, nx=ngrid
)
return telescope, telescope.traceFull(rays)
@timer
def draw2dtelescope(ax, telescope):
telescope.draw2d(ax, c='k')
@timer
def draw2drays(ax, rays, start=None, stop=None):
batoid.drawTrace2d(ax, rays, start, stop, c='b', lw=1)
def test_draw2d(ngrid=25):
telescope, rays = initialize(ngrid)
fig = plt.figure(figsize=(20, 20))
ax = fig.add_subplot(111)
draw2dtelescope(ax, telescope)
draw2drays(ax, rays)
def test_draw2d_only():
telescope, rays = initialize(3)
fig = plt.figure(figsize=(20, 20))
ax = fig.add_subplot(111)
telescope.draw2d(ax, only=batoid.optic.Lens, fc='c', alpha=0.2)
telescope.draw2d(ax, only=batoid.optic.Detector, c='b', lw=1)
@timer
def draw3dtelescope(ax, telescope):
telescope.draw3d(ax, c='k')
@timer
def draw3drays(ax, rays, start=None, stop=None):
batoid.drawTrace3d(ax, rays, start, stop, c='b', lw=1)
def test_draw3d(ngrid=25):
telescope, rays = initialize(ngrid)
fig = plt.figure(figsize=(20, 20))
ax = fig.add_subplot(111, projection='3d')
draw3dtelescope(ax, telescope)
draw3drays(ax, rays)
if __name__ == '__main__':
test_draw3d()
plt.savefig('draw3d.png')
test_draw2d()
plt.savefig('draw2d.png')
test_draw2d_only()
```
#### File: jtrace/tests/test_Lattice.py
```python
import numpy as np
import batoid
from test_helpers import timer, all_obj_diff, do_pickle
@timer
def test_lattice_coords():
np.random.seed(5)
# Check 1D
for _ in np.arange(10):
N = np.random.randint(1, 2000)
arr = np.ones((N,))
primitiveVector = np.random.uniform(-1.0, 1.0)
lattice = batoid.Lattice(arr, primitiveVector)
np.testing.assert_allclose(
np.squeeze(lattice.coords),
np.arange(-(N//2), -(-N//2))*primitiveVector
)
# Check 2D
for _ in np.arange(10):
N1 = np.random.randint(1, 200)
N2 = np.random.randint(1, 200)
arr = np.ones((N1, N2))
pv1 = np.random.uniform(-1.0, 1.0, size=2)
pv2 = np.random.uniform(-1.0, 1.0, size=2)
lattice = batoid.Lattice(arr, np.vstack([pv1, pv2]))
for _ in np.arange(100):
i = np.random.randint(0, N1)
j = np.random.randint(0, N2)
np.testing.assert_allclose(
lattice.coords[i,j],
(i-N1//2)*pv1 + (j-N2//2)*pv2
)
# Check 3D
for _ in np.arange(10):
N1 = np.random.randint(1, 20)
N2 = np.random.randint(1, 20)
N3 = np.random.randint(1, 20)
arr = np.ones((N1, N2, N3))
pv1 = np.random.uniform(-1.0, 1.0, size=3)
pv2 = np.random.uniform(-1.0, 1.0, size=3)
pv3 = np.random.uniform(-1.0, 1.0, size=3)
lattice = batoid.Lattice(arr, np.vstack([pv1, pv2, pv3]))
with np.printoptions(threshold=20**3):
do_pickle(lattice)
for __ in np.arange(100):
i = np.random.randint(0, N1)
j = np.random.randint(0, N2)
k = np.random.randint(0, N3)
np.testing.assert_allclose(
lattice.coords[i,j,k],
(i-N1//2)*pv1 + (j-N2//2)*pv2 + (k-N3//2)*pv3
)
@timer
def test_ne():
rng = np.random.default_rng(57)
N1 = rng.integers(1, 5)
N2 = rng.integers(1, 5)
N3 = rng.integers(1, 5)
arr = np.ones((N1, N2, N3))
pv1 = rng.uniform(-1.0, 1.0, size=3)
pv2 = rng.uniform(-1.0, 1.0, size=3)
pv3 = rng.uniform(-1.0, 1.0, size=3)
lattice1 = batoid.Lattice(arr, np.vstack([pv1, pv2, pv3]))
lattice2 = batoid.Lattice(arr[...,0], np.vstack([pv1, pv2, pv3])[:2,:2])
lattice3 = batoid.Lattice(arr, 2*np.vstack([pv1, pv2, pv3]))
lattice4 = batoid.Lattice(2*arr, np.vstack([pv1, pv2, pv3]))
objs = [
batoid.CoordSys(),
lattice1,
lattice2,
lattice3,
lattice4
]
all_obj_diff(objs)
if __name__ == '__main__':
test_lattice_coords()
test_ne()
```
#### File: jtrace/tests/test_Obscuration.py
```python
import batoid
import numpy as np
from test_helpers import timer, do_pickle, all_obj_diff
@timer
def test_ObscCircle():
rng = np.random.default_rng(5)
size = 10_000
for i in range(100):
cx = rng.normal(0.0, 1.0)
cy = rng.normal(0.0, 1.0)
r = rng.uniform(0.5, 1.5)
obsc = batoid.ObscCircle(r, cx, cy)
for i in range(100):
x = rng.normal(0.0, 1.0)
y = rng.normal(0.0, 1.0)
assert obsc.contains(x, y) == (np.hypot(x-cx, y-cy) <= r)
x = rng.normal(0.0, 1.0, size=size)
y = rng.normal(0.0, 1.0, size=size)
np.testing.assert_array_equal(
obsc.contains(x, y),
np.hypot(x-cx, y-cy) <= r
)
do_pickle(obsc)
rv = batoid.RayVector(x, y, 0.0, 0.0, 0.0, 0.0)
batoid.obscure(obsc, rv)
np.testing.assert_array_equal(
obsc.contains(x, y),
rv.vignetted
)
# Check method syntax too
rv = batoid.RayVector(x, y, 0.0, 0.0, 0.0, 0.0)
obsc.obscure(rv)
np.testing.assert_array_equal(
obsc.contains(x, y),
rv.vignetted
)
@timer
def test_ObscAnnulus():
rng = np.random.default_rng(57)
size = 10_000
for i in range(100):
cx = rng.normal(0.0, 1.0)
cy = rng.normal(0.0, 1.0)
inner = rng.uniform(0.5, 1.5)
outer = rng.uniform(1.6, 1.9)
obsc = batoid.ObscAnnulus(inner, outer, cx, cy)
for i in range(100):
x = rng.normal(0.0, 1.0)
y = rng.normal(0.0, 1.0)
assert obsc.contains(x, y) == (
inner <= np.hypot(x-cx, y-cy) < outer
)
x = rng.normal(0.0, 1.0, size=size)
y = rng.normal(0.0, 1.0, size=size)
r = np.hypot(x-cx, y-cy)
np.testing.assert_array_equal(
obsc.contains(x, y),
(inner <= r) & (r < outer)
)
do_pickle(obsc)
rv = batoid.RayVector(x, y, 0.0, 0.0, 0.0, 0.0)
batoid.obscure(obsc, rv)
np.testing.assert_array_equal(
obsc.contains(x, y),
rv.vignetted
)
@timer
def test_ObscRectangle():
rng = np.random.default_rng(577)
size = 10_000
for i in range(100):
cx = rng.normal(0.0, 1.0)
cy = rng.normal(0.0, 1.0)
w = rng.uniform(0.5, 2.5)
h = rng.uniform(0.5, 2.5)
obsc = batoid.ObscRectangle(w, h, cx, cy)
for i in range(100):
x = rng.normal(0.0, 2.0)
y = rng.normal(0.0, 2.0)
assert obsc.contains(x, y) == (x > cx-w/2 and x < cx+w/2 and y > cy-h/2 and y < cy+h/2)
th = rng.uniform(0.0, np.pi/2)
obsc = batoid.ObscRectangle(w, h, cx, cy, th)
for i in range(100):
x = rng.normal(0.0, 2.0)
y = rng.normal(0.0, 2.0)
xp = (x-cx)*np.cos(-th) - (y-cy)*np.sin(-th) + cx
yp = (x-cx)*np.sin(-th) + (y-cy)*np.cos(-th) + cy
assert obsc.contains(x, y) == (xp > cx-w/2 and xp < cx+w/2 and yp > cy-h/2 and yp < cy+h/2)
x = rng.normal(0.0, 2.0, size=size)
y = rng.normal(0.0, 2.0, size=size)
xp = (x-cx)*np.cos(-th) - (y-cy)*np.sin(-th) + cx
yp = (x-cx)*np.sin(-th) + (y-cy)*np.cos(-th) + cy
np.testing.assert_array_equal(
obsc.contains(x, y),
(xp > cx-w/2) & (xp < cx+w/2) & (yp > cy-h/2) & (yp < cy+h/2)
)
do_pickle(obsc)
rv = batoid.RayVector(x, y, 0.0, 0.0, 0.0, 0.0)
batoid.obscure(obsc, rv)
np.testing.assert_array_equal(
obsc.contains(x, y),
rv.vignetted
)
@timer
def test_ObscRay():
rng = np.random.default_rng(5772)
size = 10_000
for i in range(100):
cx = rng.normal(0.0, 1.0)
cy = rng.normal(0.0, 1.0)
w = rng.uniform(0.5, 2.5)
th = rng.uniform(0.0, np.pi/2)
obsc = batoid.ObscRay(w, th, cx, cy)
for i in range(100):
x = rng.normal(0.0, 2.0)
y = rng.normal(0.0, 2.0)
xp = (x-cx)*np.cos(-th) - (y-cy)*np.sin(-th)
yp = (x-cx)*np.sin(-th) + (y-cy)*np.cos(-th)
assert obsc.contains(x, y) == (xp > 0.0 and yp > -w/2 and yp < w/2)
x = rng.normal(0.0, 2.0, size=size)
y = rng.normal(0.0, 2.0, size=size)
xp = (x-cx)*np.cos(-th) - (y-cy)*np.sin(-th)
yp = (x-cx)*np.sin(-th) + (y-cy)*np.cos(-th)
np.testing.assert_array_equal(
obsc.contains(x, y),
(xp > 0.0) & (yp > -w/2) & (yp < w/2)
)
do_pickle(obsc)
rv = batoid.RayVector(x, y, 0.0, 0.0, 0.0, 0.0)
batoid.obscure(obsc, rv)
np.testing.assert_array_equal(
obsc.contains(x, y),
rv.vignetted
)
@timer
def test_ObscPolygon():
rng = np.random.default_rng(57721)
size = 10_000
# Test equivalency with ObscRectangle
for i in range(100):
cx = rng.normal(0.0, 1.0)
cy = rng.normal(0.0, 1.0)
w = rng.uniform(0.5, 2.5)
h = rng.uniform(0.5, 2.5)
xs = [cx-w/2, cx+w/2, cx+w/2, cx-w/2]
ys = [cy-h/2, cy-h/2, cy+h/2, cy+h/2]
obscPoly = batoid.ObscPolygon(xs, ys)
obscRect = batoid.ObscRectangle(w, h, cx, cy, 0.0)
for i in range(100):
x = rng.normal(0.0, 2.0)
y = rng.normal(0.0, 2.0)
assert obscPoly.contains(x, y) == obscRect.contains(x, y)
x = rng.normal(0.0, 2.0, size=size)
y = rng.normal(0.0, 2.0, size=size)
np.testing.assert_array_equal(
obscPoly.contains(x, y),
obscRect.contains(x, y)
)
do_pickle(obscPoly)
rv = batoid.RayVector(x, y, 0.0, 0.0, 0.0, 0.0)
batoid.obscure(obscPoly, rv)
np.testing.assert_array_equal(
obscPoly.contains(x, y),
rv.vignetted
)
# Try Union of two Rectangles equal to Polygon.
# Center of both rectangles at (1, 2)
# One is width=4, height=2
# Other is width=2, height=4
r1 = batoid.ObscRectangle(4, 2, 1, 2)
r2 = batoid.ObscRectangle(2, 4, 1, 2)
o1 = batoid.ObscUnion([r1, r2])
xs = [-2, -1, -1, 1, 1, 2, 2, 1, 1, -1, -1, -2, -2]
ys = [ 1, 1, 2, 2, 1, 1, -1, -1, -2, -2, -1, -1, 1]
o2 = batoid.ObscPolygon(np.array(xs)+1, np.array(ys)+2)
x = rng.normal(0.0, 2.0, size=size)
y = rng.normal(0.0, 2.0, size=size)
np.testing.assert_array_equal(
o1.contains(x, y),
o2.contains(x, y)
)
# Check containsGrid
x = np.linspace(-10.0, 10.0, 25)
y = np.linspace(-10.0, 10.0, 25)
xx, yy = np.meshgrid(x, y)
np.testing.assert_array_equal(
o2.contains(xx, yy),
o2.containsGrid(x, y)
)
@timer
def test_ObscNegation():
rng = np.random.default_rng(577215)
size = 10_000
for i in range(100):
cx = rng.normal(0.0, 1.0)
cy = rng.normal(0.0, 1.0)
r = rng.uniform(0.5, 1.5)
original = batoid.ObscCircle(r, cx, cy)
obsc = batoid.ObscNegation(original)
do_pickle(obsc)
for i in range(100):
x = rng.normal(0.0, 1.0)
y = rng.normal(0.0, 1.0)
assert obsc.contains(x, y) != original.contains(x, y)
x = rng.normal(0.0, 1.0, size=size)
y = rng.normal(0.0, 1.0, size=size)
np.testing.assert_array_equal(
obsc.contains(x, y),
~original.contains(x, y)
)
do_pickle(obsc)
rv = batoid.RayVector(x, y, 0.0, 0.0, 0.0, 0.0)
batoid.obscure(obsc, rv)
np.testing.assert_array_equal(
obsc.contains(x, y),
rv.vignetted
)
# also test config parsing of "ClearCircle"
config = {'type':'ClearCircle', 'x':cx, 'y':cy, 'radius':r}
obsc = batoid.parse.parse_obscuration(config)
do_pickle(obsc)
for i in range(100):
x = rng.normal(0.0, 1.0)
y = rng.normal(0.0, 1.0)
assert obsc.contains(x, y) == (np.hypot(x-cx, y-cy) > r)
@timer
def test_ObscCompound():
rng = np.random.default_rng(577215)
size = 10_000
for i in range(100):
rx = rng.normal(0.0, 1.0)
ry = rng.normal(0.0, 1.0)
w = rng.uniform(0.5, 2.5)
h = rng.uniform(0.5, 2.5)
th = rng.uniform(0.0, np.pi)
rect = batoid.ObscRectangle(w, h, rx, ry, th)
cx = rng.normal(0.0, 1.0)
cy = rng.normal(0.0, 1.0)
r = rng.uniform(0.5, 1.5)
circ = batoid.ObscCircle(r, cx, cy)
union = batoid.ObscUnion([rect, circ])
do_pickle(union)
union2 = batoid.ObscUnion([circ, rect])
assert union == union2 # commutative!
assert hash(union) == hash(union2)
intersection = batoid.ObscIntersection([rect, circ])
do_pickle(intersection)
intersection2 = batoid.ObscIntersection([circ, rect])
assert intersection == intersection2
assert hash(intersection) == hash(intersection2)
for i in range(100):
x = rng.normal(0.0, 2.0)
y = rng.normal(0.0, 2.0)
assert (union.contains(x, y) == union2.contains(x, y)
== (rect.contains(x, y) or circ.contains(x, y)))
assert (intersection.contains(x, y) == intersection2.contains(x, y)
== (rect.contains(x, y) and circ.contains(x, y)))
x = rng.normal(0.0, 2.0, size=size)
y = rng.normal(0.0, 2.0, size=size)
np.testing.assert_array_equal(
union.contains(x, y),
union2.contains(x, y)
)
np.testing.assert_array_equal(
union.contains(x, y),
rect.contains(x, y) | circ.contains(x, y)
)
np.testing.assert_array_equal(
intersection.contains(x, y),
intersection2.contains(x, y)
)
np.testing.assert_array_equal(
intersection.contains(x, y),
rect.contains(x, y) & circ.contains(x, y)
)
rv = batoid.RayVector(x, y, 0.0, 0.0, 0.0, 0.0)
batoid.obscure(union, rv)
np.testing.assert_array_equal(
union.contains(x, y),
rv.vignetted
)
rv = batoid.RayVector(x, y, 0.0, 0.0, 0.0, 0.0)
batoid.obscure(intersection, rv)
np.testing.assert_array_equal(
intersection.contains(x, y),
rv.vignetted
)
with np.testing.assert_raises(ValueError):
batoid.ObscUnion()
with np.testing.assert_raises(ValueError):
batoid.ObscIntersection()
@timer
def test_ne():
objs = [
batoid.ObscCircle(1.0),
batoid.ObscCircle(2.0),
batoid.ObscCircle(1.0, 0.1, 0.1),
batoid.ObscAnnulus(0.0, 1.0),
batoid.ObscAnnulus(0.1, 1.0),
batoid.ObscAnnulus(0.1, 1.0, 0.1, 0.1),
batoid.ObscRectangle(1.0, 2.0),
batoid.ObscRectangle(1.0, 2.0, 0.1, 0.1),
batoid.ObscRectangle(1.0, 2.0, 0.1, 0.1, 1.0),
batoid.ObscRay(1.0, 2.0),
batoid.ObscRay(1.0, 2.0, 0.1, 0.1),
batoid.ObscNegation(batoid.ObscCircle(1.0)),
batoid.ObscPolygon([0,1,1,0],[0,0,1,1]),
batoid.ObscUnion([batoid.ObscCircle(1.0)]),
batoid.ObscUnion([
batoid.ObscCircle(1.0),
batoid.ObscCircle(2.0)
]),
batoid.ObscUnion([
batoid.ObscCircle(1.0),
batoid.ObscCircle(2.2)
]),
batoid.ObscUnion([
batoid.ObscCircle(1.0),
batoid.ObscCircle(2.2),
batoid.ObscAnnulus(1.0, 2.0)
]),
batoid.ObscIntersection([batoid.ObscCircle(1.0)]),
batoid.ObscIntersection([
batoid.ObscCircle(1.0),
batoid.ObscCircle(2.0)
]),
batoid.ObscIntersection([
batoid.ObscCircle(1.0),
batoid.ObscCircle(2.2)
]),
batoid.ObscIntersection([
batoid.ObscCircle(1.0),
batoid.ObscCircle(2.2),
batoid.ObscAnnulus(1.0, 2.0)
]),
]
all_obj_diff(objs)
if __name__ == '__main__':
test_ObscCircle()
test_ObscAnnulus()
test_ObscRectangle()
test_ObscRay()
test_ObscPolygon()
test_ObscNegation()
test_ObscCompound()
test_ne()
```
#### File: jtrace/tests/test_RayVector.py
```python
import batoid
import numpy as np
from test_helpers import timer, init_gpu, rays_allclose, checkAngle, do_pickle
@timer
def test_properties():
rng = np.random.default_rng(5)
size = 10
for i in range(100):
x = rng.normal(size=size)
y = rng.normal(size=size)
z = rng.normal(size=size)
vx = rng.normal(size=size)
vy = rng.normal(size=size)
vz = rng.normal(size=size)
t = rng.normal(size=size)
w = rng.normal(size=size)
fx = rng.normal(size=size)
vig = rng.choice([True, False], size=size)
fa = rng.choice([True, False], size=size)
cs = batoid.CoordSys(
origin=rng.normal(size=3),
rot=batoid.RotX(rng.normal())@batoid.RotY(rng.normal())
)
rv = batoid.RayVector(x, y, z, vx, vy, vz, t, w, fx, vig, fa, cs)
np.testing.assert_array_equal(rv.x, x)
np.testing.assert_array_equal(rv.y, y)
np.testing.assert_array_equal(rv.z, z)
np.testing.assert_array_equal(rv.r[:, 0], x)
np.testing.assert_array_equal(rv.r[:, 1], y)
np.testing.assert_array_equal(rv.r[:, 2], z)
np.testing.assert_array_equal(rv.vx, vx)
np.testing.assert_array_equal(rv.vy, vy)
np.testing.assert_array_equal(rv.vz, vz)
np.testing.assert_array_equal(rv.v[:, 0], vx)
np.testing.assert_array_equal(rv.v[:, 1], vy)
np.testing.assert_array_equal(rv.v[:, 2], vz)
np.testing.assert_array_equal(rv.k[:, 0], rv.kx)
np.testing.assert_array_equal(rv.k[:, 1], rv.ky)
np.testing.assert_array_equal(rv.k[:, 2], rv.kz)
np.testing.assert_array_equal(rv.t, t)
np.testing.assert_array_equal(rv.wavelength, w)
np.testing.assert_array_equal(rv.flux, fx)
np.testing.assert_array_equal(rv.vignetted, vig)
np.testing.assert_array_equal(rv.failed, fa)
assert rv.coordSys == cs
rv._syncToDevice()
do_pickle(rv)
@timer
def test_positionAtTime():
rng = np.random.default_rng(57)
size = 10_000
x = rng.uniform(-1, 1, size=size)
y = rng.uniform(-1, 1, size=size)
z = rng.uniform(-0.1, 0.1, size=size)
vx = rng.uniform(-0.05, 0.05, size=size)
vy = rng.uniform(-0.05, 0.05, size=size)
vz = np.sqrt(1.0 - vx*vx - vy*vy)
# Try with default t=0 first
rv = batoid.RayVector(x, y, z, vx, vy, vz)
np.testing.assert_equal(rv.x, x)
np.testing.assert_equal(rv.y, y)
np.testing.assert_equal(rv.z, z)
np.testing.assert_equal(rv.vx, vx)
np.testing.assert_equal(rv.vy, vy)
np.testing.assert_equal(rv.vz, vz)
np.testing.assert_equal(rv.t, 0.0)
np.testing.assert_equal(rv.wavelength, 0.0)
for t1 in [0.0, 1.0, -1.1, 2.5]:
np.testing.assert_equal(
rv.positionAtTime(t1),
rv.r + t1 * rv.v
)
# Now add some random t's
t = rng.uniform(-1.0, 1.0, size=size)
rv = batoid.RayVector(x, y, z, vx, vy, vz, t)
np.testing.assert_equal(rv.x, x)
np.testing.assert_equal(rv.y, y)
np.testing.assert_equal(rv.z, z)
np.testing.assert_equal(rv.vx, vx)
np.testing.assert_equal(rv.vy, vy)
np.testing.assert_equal(rv.vz, vz)
np.testing.assert_equal(rv.t, t)
np.testing.assert_equal(rv.wavelength, 0.0)
for t1 in [0.0, 1.4, -1.3, 2.1]:
np.testing.assert_equal(
rv.positionAtTime(t1),
rv.r + rv.v*(t1-rv.t)[:,None]
)
@timer
def test_propagate():
rng = np.random.default_rng(577)
size = 10_000
x = rng.uniform(-1, 1, size=size)
y = rng.uniform(-1, 1, size=size)
z = rng.uniform(-0.1, 0.1, size=size)
vx = rng.uniform(-0.05, 0.05, size=size)
vy = rng.uniform(-0.05, 0.05, size=size)
vz = np.sqrt(1.0 - vx*vx - vy*vy)
# Try with default t=0 first
rv = batoid.RayVector(x, y, z, vx, vy, vz)
for t1 in [0.0, 1.0, -1.1, 2.5]:
rvcopy = rv.copy()
r1 = rv.positionAtTime(t1)
rvcopy.propagate(t1)
np.testing.assert_equal(
rvcopy.r,
r1
)
np.testing.assert_equal(
rvcopy.v,
rv.v
)
np.testing.assert_equal(
rvcopy.t,
t1
)
# Now add some random t's
t = rng.uniform(-1.0, 1.0, size=size)
rv = batoid.RayVector(x, y, z, vx, vy, vz, t)
for t1 in [0.0, 1.0, -1.1, 2.5]:
rvcopy = rv.copy()
r1 = rv.positionAtTime(t1)
rvcopy.propagate(t1)
np.testing.assert_equal(
rvcopy.r,
r1
)
np.testing.assert_equal(
rvcopy.v,
rv.v
)
np.testing.assert_equal(
rvcopy.t,
t1
)
@timer
def test_phase():
rng = np.random.default_rng(5772)
size = 10_000
for n in [1.0, 1.3]:
x = rng.uniform(-1, 1, size=size)
y = rng.uniform(-1, 1, size=size)
z = rng.uniform(-0.1, 0.1, size=size)
vx = rng.uniform(-0.05, 0.05, size=size)
vy = rng.uniform(-0.05, 0.05, size=size)
vz = np.sqrt(1.0/(n*n) - vx*vx - vy*vy)
t = rng.uniform(-1.0, 1.0, size=size)
wavelength = rng.uniform(300e-9, 1100e-9, size=size)
rv = batoid.RayVector(x, y, z, vx, vy, vz, t, wavelength)
# First explicitly check that phase is 0 at position and time of individual
# rays
for i in rng.choice(size, size=10):
np.testing.assert_equal(
rv.phase(rv.r[i], rv.t[i])[i],
0.0
)
# Now use actual formula
# phi = k.(r-r0) - (t-t0)omega
# k = 2 pi v / lambda |v|^2
# omega = 2 pi / lambda
# |v| = 1 / n
for r1, t1 in [
((0, 0, 0), 0),
((0, 1, 2), 3),
((-1, 2, 4), -1),
((0, 1, -4), -2)
]:
phi = np.einsum("ij,ij->i", rv.v, r1-rv.r)
phi *= n*n
phi -= (t1-rv.t)
phi *= 2*np.pi/wavelength
np.testing.assert_allclose(
rv.phase(r1, t1),
phi,
rtol=0,
atol=1e-7
)
for i in rng.choice(size, size=10):
s = slice(i, i+1)
rvi = batoid.RayVector(
x[s], y[s], z[s],
vx[s], vy[s], vz[s],
t[s].copy(), wavelength[s].copy()
)
# Move integer number of wavelengths ahead
ti = rvi.t[0]
wi = rvi.wavelength[0]
r1 = rvi.positionAtTime(ti + 5123456789*wi)[0]
a = rvi.amplitude(r1, ti)
np.testing.assert_allclose(a.real, 1.0, rtol=0, atol=2e-5)
np.testing.assert_allclose(a.imag, 0.0, rtol=0, atol=2e-5)
# Half wavelength
r1 = rvi.positionAtTime(ti + 6987654321.5*wi)[0]
a = rvi.amplitude(r1, ti)
np.testing.assert_allclose(a.real, -1.0, rtol=0, atol=2e-5)
np.testing.assert_allclose(a.imag, 0.0, rtol=0, atol=2e-5)
# Quarter wavelength
r1 = rvi.positionAtTime(ti + 0.25*wi)[0]
a = rvi.amplitude(r1, ti)
np.testing.assert_allclose(a.real, 0.0, rtol=0, atol=2e-5)
np.testing.assert_allclose(a.imag, 1.0, rtol=0, atol=2e-5)
# Three-quarters wavelength
r1 = rvi.positionAtTime(ti + 7182738495.75*wi)[0]
a = rvi.amplitude(r1, ti)
np.testing.assert_allclose(a.real, 0.0, rtol=0, atol=2e-5)
np.testing.assert_allclose(a.imag, -1.0, rtol=0, atol=2e-5)
# We can also keep the position the same and change the time in
# half/quarter integer multiples of the period.
a = rvi.amplitude(rvi.r[0], rvi.t[0]+5e9*wi)
np.testing.assert_allclose(a.real, 1.0, rtol=0, atol=1e-5)
np.testing.assert_allclose(a.imag, 0.0, rtol=0, atol=1e-5)
a = rvi.amplitude(rvi.r[0], rvi.t[0]+(5e9+5.5)*wi)
np.testing.assert_allclose(a.real, -1.0, rtol=0, atol=1e-5)
np.testing.assert_allclose(a.imag, 0.0, rtol=0, atol=1e-5)
a = rvi.amplitude(rvi.r[0], rvi.t[0]+(5e9+2.25)*wi)
np.testing.assert_allclose(a.real, 0.0, rtol=0, atol=1e-5)
np.testing.assert_allclose(a.imag, -1.0, rtol=0, atol=1e-5)
a = rvi.amplitude(rvi.r[0], rvi.t[0]+(5e9+1.75)*wi)
np.testing.assert_allclose(a.real, 0.0, rtol=0, atol=1e-5)
np.testing.assert_allclose(a.imag, 1.0, rtol=0, atol=1e-5)
# If we pick a point anywhere along a vector originating at the ray
# position, but orthogonal to its direction of propagation, then we
# should get phase = 0 (mod 2pi).
v1 = np.array([1.0, 0.0, 0.0])
v1 = np.cross(rvi.v[0], v1)
p1 = rvi.r[0] + v1
a = rvi.amplitude(p1, rvi.t[0])
np.testing.assert_allclose(a.real, 1.0, rtol=0, atol=1e-5)
np.testing.assert_allclose(a.imag, 0.0, rtol=0, atol=1e-5)
@timer
def test_sumAmplitude():
import time
rng = np.random.default_rng(57721)
size = 10_000
for n in [1.0, 1.3]:
x = rng.uniform(-1, 1, size=size)
y = rng.uniform(-1, 1, size=size)
z = rng.uniform(-0.1, 0.1, size=size)
vx = rng.uniform(-0.05, 0.05, size=size)
vy = rng.uniform(-0.05, 0.05, size=size)
vz = np.sqrt(1.0/(n*n) - vx*vx - vy*vy)
t = rng.uniform(-1.0, 1.0, size=size)
wavelength = rng.uniform(300e-9, 1100e-9, size=size)
rv = batoid.RayVector(x, y, z, vx, vy, vz, t, wavelength)
satime = 0
atime = 0
for r1, t1 in [
((0, 0, 0), 0),
((0, 1, 2), 3),
((-1, 2, 4), -1),
((0, 1, -4), -2)
]:
at0 = time.time()
s1 = rv.sumAmplitude(r1, t1)
at1 = time.time()
s2 = np.sum(rv.amplitude(r1, t1))
at2 = time.time()
np.testing.assert_allclose(s1, s2, rtol=0, atol=1e-11)
satime += at1-at0
atime += at2-at1
# print(f"sumAplitude() time: {satime}")
# print(f"np.sum(amplitude()) time: {atime}")
@timer
def test_equals():
import time
rng = np.random.default_rng(577215)
size = 10_000
x = rng.uniform(-1, 1, size=size)
y = rng.uniform(-1, 1, size=size)
z = rng.uniform(-0.1, 0.1, size=size)
vx = rng.uniform(-0.05, 0.05, size=size)
vy = rng.uniform(-0.05, 0.05, size=size)
vz = np.sqrt(1.0 - vx*vx - vy*vy)
t = rng.uniform(-1.0, 1.0, size=size)
wavelength = rng.uniform(300e-9, 1100e-9, size=size)
flux = rng.uniform(0.9, 1.1, size=size)
vignetted = rng.choice([True, False], size=size)
failed = rng.choice([True, False], size=size)
args = x, y, z, vx, vy, vz, t, wavelength, flux, vignetted, failed
rv = batoid.RayVector(*args)
rv2 = rv.copy()
assert rv == rv2
for i in range(len(args)):
newargs = [args[i].copy() for i in range(len(args))]
ai = newargs[i]
if ai.dtype == float:
ai[0] = 1.2+ai[0]*3.45
elif ai.dtype == bool:
ai[0] = not ai[0]
# else panic!
rv2 = batoid.RayVector(*newargs)
assert rv != rv2
# Repeat, but force comparison on device
rv2 = rv.copy()
rv._rv.x.syncToDevice()
rv._rv.y.syncToDevice()
rv._rv.z.syncToDevice()
rv._rv.vx.syncToDevice()
rv._rv.vy.syncToDevice()
rv._rv.vz.syncToDevice()
rv._rv.t.syncToDevice()
rv._rv.wavelength.syncToDevice()
rv._rv.flux.syncToDevice()
rv._rv.vignetted.syncToDevice()
rv._rv.failed.syncToDevice()
assert rv == rv2
for i in range(len(args)):
newargs = [args[i].copy() for i in range(len(args))]
ai = newargs[i]
if ai.dtype == float:
ai[0] = 1.2+ai[0]*3.45
elif ai.dtype == bool:
ai[0] = not ai[0]
# else panic!
rv2 = batoid.RayVector(*newargs)
assert rv != rv2
@timer
def test_asGrid():
rng = np.random.default_rng(5772156)
for _ in range(10):
backDist = rng.uniform(9.0, 11.0)
wavelength = rng.uniform(300e-9, 1100e-9)
nx = 1
while (nx%2) == 1:
nx = rng.integers(10, 21)
lx = rng.uniform(1.0, 10.0)
dx = lx/(nx-2)
dirCos = np.array([
rng.uniform(-0.1, 0.1),
rng.uniform(-0.1, 0.1),
rng.uniform(-1.2, -0.8),
])
dirCos /= np.sqrt(np.dot(dirCos, dirCos))
# Some things that should be equivalent
grid1 = batoid.RayVector.asGrid(
backDist=backDist, wavelength=wavelength,
nx=nx, lx=lx, dirCos=dirCos
)
grid2 = batoid.RayVector.asGrid(
backDist=backDist, wavelength=wavelength,
nx=nx, dx=dx, dirCos=dirCos
)
grid3 = batoid.RayVector.asGrid(
backDist=backDist, wavelength=wavelength,
dx=dx, lx=lx, dirCos=dirCos
)
grid4 = batoid.RayVector.asGrid(
backDist=backDist, wavelength=wavelength,
nx=nx, lx=(lx, 0.0), dirCos=dirCos
)
theta_x, theta_y = batoid.utils.dirCosToField(*dirCos)
grid5 = batoid.RayVector.asGrid(
backDist=backDist, wavelength=wavelength,
nx=nx, lx=(lx, 0.0), theta_x=theta_x, theta_y=theta_y
)
rays_allclose(grid1, grid2)
rays_allclose(grid1, grid3)
rays_allclose(grid1, grid4)
rays_allclose(grid1, grid5)
# Check distance to chief ray
cridx = (nx//2)*nx+nx//2
obs_dist = np.sqrt(np.dot(grid1.r[cridx], grid1.r[cridx]))
np.testing.assert_allclose(obs_dist, backDist)
np.testing.assert_allclose(grid1.t, 0)
np.testing.assert_allclose(grid1.wavelength, wavelength)
np.testing.assert_allclose(grid1.vignetted, False)
np.testing.assert_allclose(grid1.failed, False)
np.testing.assert_allclose(grid1.vx, dirCos[0])
np.testing.assert_allclose(grid1.vy, dirCos[1])
np.testing.assert_allclose(grid1.vz, dirCos[2])
# Check distribution of points propagated to entrance pupil
pupil = batoid.Plane()
pupil.intersect(grid1)
np.testing.assert_allclose(np.diff(grid1.x)[0], dx)
np.testing.assert_allclose(np.diff(grid1.y)[0], 0, atol=1e-14)
np.testing.assert_allclose(np.diff(grid1.x)[nx-1], -dx*(nx-1))
np.testing.assert_allclose(np.diff(grid1.y)[nx-1], dx)
# Another set, but with odd nx
for _ in range(10):
backDist = rng.uniform(9.0, 11.0)
wavelength = rng.uniform(300e-9, 1100e-9)
while (nx%2) == 0:
nx = rng.integers(10, 21)
lx = rng.uniform(1.0, 10.0)
dx = lx/(nx-1)
dirCos = np.array([
rng.uniform(-0.1, 0.1),
rng.uniform(-0.1, 0.1),
rng.uniform(-1.2, -0.8),
])
dirCos /= np.sqrt(np.dot(dirCos, dirCos))
grid1 = batoid.RayVector.asGrid(
backDist=backDist, wavelength=wavelength,
nx=nx, lx=lx, dirCos=dirCos
)
grid2 = batoid.RayVector.asGrid(
backDist=backDist, wavelength=wavelength,
nx=nx, dx=dx, dirCos=dirCos
)
grid3 = batoid.RayVector.asGrid(
backDist=backDist, wavelength=wavelength,
nx=nx, lx=(lx, 0), dirCos=dirCos
)
# ... but the following is not equivalent, since default is to always
# infer an even nx and ny
# grid4 = batoid.RayVector.asGrid(
# backDist=backDist, wavelength=wavelength,
# dx=1/9, lx=1.0, dirCos=dirCos
# )
rays_allclose(grid1, grid2)
rays_allclose(grid1, grid3)
cridx = (nx*nx-1)//2
obs_dist = np.sqrt(np.dot(grid1.r[cridx], grid1.r[cridx]))
np.testing.assert_allclose(obs_dist, backDist)
np.testing.assert_allclose(grid1.t, 0)
np.testing.assert_allclose(grid1.wavelength, wavelength)
np.testing.assert_allclose(grid1.vignetted, False)
np.testing.assert_allclose(grid1.failed, False)
np.testing.assert_allclose(grid1.vx, dirCos[0])
np.testing.assert_allclose(grid1.vy, dirCos[1])
np.testing.assert_allclose(grid1.vz, dirCos[2])
# Check distribution of points propagated to entrance pupil
pupil = batoid.Plane()
pupil.intersect(grid1)
np.testing.assert_allclose(np.diff(grid1.x)[0], dx)
np.testing.assert_allclose(np.diff(grid1.y)[0], 0, atol=1e-14)
np.testing.assert_allclose(np.diff(grid1.x)[nx-1], -dx*(nx-1))
np.testing.assert_allclose(np.diff(grid1.y)[nx-1], dx)
for _ in range(10):
# Check nrandom
rays = batoid.RayVector.asGrid(
backDist=backDist, wavelength=wavelength,
lx=1.0, nx=1,
nrandom=1000, dirCos=dirCos
)
np.testing.assert_allclose(rays.t, 0)
np.testing.assert_allclose(rays.wavelength, wavelength)
np.testing.assert_allclose(rays.vignetted, False)
np.testing.assert_allclose(rays.failed, False)
np.testing.assert_allclose(rays.vx, dirCos[0])
np.testing.assert_allclose(rays.vy, dirCos[1])
np.testing.assert_allclose(rays.vz, dirCos[2])
# Check that projected points are inside region
pupil = batoid.Plane()
pupil.intersect(rays)
np.testing.assert_allclose(rays.z, 0.0)
np.testing.assert_array_less(rays.x, 0.5)
np.testing.assert_array_less(rays.y, 0.5)
np.testing.assert_array_less(-0.5, rays.x)
np.testing.assert_array_less(-0.5, rays.y)
assert len(rays) == 1000
@timer
def test_asPolar():
rng = np.random.default_rng(5772156)
for _ in range(10):
backDist = rng.uniform(9.0, 11.0)
wavelength = rng.uniform(300e-9, 1100e-9)
inner = rng.uniform(1.0, 3.0)
outer = inner + rng.uniform(1.0, 3.0)
nrad = rng.integers(1, 11)
naz = rng.integers(10, 21)
dirCos = np.array([
rng.uniform(-0.1, 0.1),
rng.uniform(-0.1, 0.1),
rng.uniform(-1.2, -0.8),
])
dirCos /= np.sqrt(np.dot(dirCos, dirCos))
rays = batoid.RayVector.asPolar(
backDist=backDist, wavelength=wavelength,
outer=outer, inner=inner,
nrad=nrad, naz=naz,
dirCos=dirCos
)
np.testing.assert_allclose(rays.t, 0)
np.testing.assert_allclose(rays.wavelength, wavelength)
np.testing.assert_allclose(rays.vignetted, False)
np.testing.assert_allclose(rays.failed, False)
np.testing.assert_allclose(rays.vx, dirCos[0])
np.testing.assert_allclose(rays.vy, dirCos[1])
np.testing.assert_allclose(rays.vz, dirCos[2])
assert len(rays)%6 == 0
# If we set inner=0, then last ray should
# intersect the center of the pupil
inner = 0.0
rays = batoid.RayVector.asPolar(
backDist=backDist, wavelength=wavelength,
outer=outer, inner=inner,
nrad=nrad, naz=naz,
dirCos=dirCos
)
assert len(rays)%6 == 1
pupil = batoid.Plane()
pupil.intersect(rays)
np.testing.assert_allclose(rays.x[-1], 0, atol=1e-14)
np.testing.assert_allclose(rays.y[-1], 0, atol=1e-14)
np.testing.assert_allclose(rays.z[-1], 0, atol=1e-14)
@timer
def test_asSpokes():
rng = np.random.default_rng(5772156)
for _ in range(10):
backDist = rng.uniform(9.0, 11.0)
wavelength = rng.uniform(300e-9, 1100e-9)
inner = rng.uniform(1.0, 3.0)
outer = inner + rng.uniform(1.0, 3.0)
rings = rng.integers(1, 11)
spokes = rng.integers(10, 21)
dirCos = np.array([
rng.uniform(-0.1, 0.1),
rng.uniform(-0.1, 0.1),
rng.uniform(-1.2, -0.8),
])
dirCos /= np.sqrt(np.dot(dirCos, dirCos))
rays = batoid.RayVector.asSpokes(
backDist=backDist, wavelength=wavelength,
outer=outer, inner=inner,
spokes=spokes, rings=rings,
dirCos=dirCos
)
np.testing.assert_allclose(rays.t, 0)
np.testing.assert_allclose(rays.wavelength, wavelength)
np.testing.assert_allclose(rays.vignetted, False)
np.testing.assert_allclose(rays.failed, False)
np.testing.assert_allclose(rays.vx, dirCos[0])
np.testing.assert_allclose(rays.vy, dirCos[1])
np.testing.assert_allclose(rays.vz, dirCos[2])
assert len(rays) == spokes*rings
pupil = batoid.Plane()
pupil.intersect(rays)
radii = np.hypot(rays.x, rays.y)
ths = np.arctan2(rays.y, rays.x)
for i in range(spokes):
np.testing.assert_allclose(
radii[rings*i:rings*(i+1)],
np.linspace(inner, outer, rings, endpoint=True)
)
for i in range(rings):
checkAngle(ths[i::rings], np.linspace(0, 2*np.pi, spokes, endpoint=False))
# Check explicit rings and spokes
rings = rng.uniform(inner, outer, rings)
spokes = rng.uniform(0, 2*np.pi, spokes)
rays = batoid.RayVector.asSpokes(
backDist=backDist, wavelength=wavelength,
outer=outer, inner=inner,
rings=rings, spokes=spokes,
dirCos=dirCos
)
pupil = batoid.Plane()
pupil.intersect(rays)
radii = np.hypot(rays.x, rays.y)
ths = np.arctan2(rays.y, rays.x)
for i in range(len(spokes)):
np.testing.assert_allclose(
radii[len(rings)*i:len(rings)*(i+1)],
rings
)
for i in range(len(rings)):
checkAngle(
ths[i::len(rings)],
spokes
)
# Check Gaussian Quadrature
rings = rng.integers(5, 11)
spokes = 2*rings+1
rays = batoid.RayVector.asSpokes(
backDist=backDist, wavelength=wavelength,
outer=outer,
rings=rings,
spacing='GQ',
dirCos=dirCos
)
assert len(rays) == spokes*rings
pupil = batoid.Plane()
pupil.intersect(rays)
radii = np.hypot(rays.x, rays.y)
ths = np.arctan2(rays.y, rays.x)
Li, w = np.polynomial.legendre.leggauss(rings)
rings = np.sqrt((1+Li)/2)*outer
flux = w*np.pi/(2*spokes)
spokes = np.linspace(0, 2*np.pi, spokes, endpoint=False)
for i in range(len(spokes)):
np.testing.assert_allclose(
radii[len(rings)*i:len(rings)*(i+1)],
rings
)
np.testing.assert_allclose(
rays.flux[len(rings)*i:len(rings)*(i+1)],
flux
)
for i in range(len(rings)):
checkAngle(
ths[i::len(rings)],
spokes
)
# Sanity check GQ grids against literature
# Values from Forbes JOSA Vol. 5, No. 11 (1988) Table 1
rings = [1, 2, 3, 4, 5, 6]
rad = [
[0.70710678],
[0.45970084, 0.88807383],
[0.33571069, 0.70710678, 0.94196515],
[0.26349923, 0.57446451, 0.81852949, 0.96465961],
[0.21658734, 0.48038042, 0.70710678, 0.87706023, 0.97626324],
[0.18375321, 0.41157661, 0.61700114, 0.78696226, 0.91137517, 0.98297241]
]
w = [
[0.5],
[0.25, 0.25],
[0.13888889, 0.22222222, 0.13888889],
[0.08696371, 0.16303629, 0.16303629, 0.08696371],
[0.05923172, 0.11965717, 0.14222222, 0.11965717, 0.05923172],
[0.04283112, 0.09019039, 0.11697848, 0.11697848, 0.09019039, 0.04283112]
]
for rings_, rad_, w_ in zip(rings, rad, w):
rays = batoid.RayVector.asSpokes(
backDist=backDist, wavelength=wavelength,
outer=1,
rings=rings_,
spacing='GQ',
dirCos=[0,0,-1]
)
spokes = rings_*2+1
radii = np.hypot(rays.x, rays.y)
for i in range(spokes):
np.testing.assert_allclose(
radii[rings_*i:rings_*(i+1)],
rad_
)
np.testing.assert_allclose(
rays.flux[rings_*i:rings_*(i+1)]*spokes/(2*np.pi),
w_
)
@timer
def test_factory_optic():
telescope = batoid.Optic.fromYaml("LSST_r.yaml")
grid1 = batoid.RayVector.asGrid(
optic=telescope, wavelength=500e-9, theta_x=0.1, theta_y=0.1,
nx=16
)
grid2 = batoid.RayVector.asGrid(
wavelength=500e-9, theta_x=0.1, theta_y=0.1,
backDist=telescope.backDist, stopSurface=telescope.stopSurface,
medium=telescope.inMedium, lx=telescope.pupilSize,
nx=16
)
rays_allclose(grid1, grid2)
grid1 = batoid.RayVector.asPolar(
optic=telescope, wavelength=500e-9, theta_x=0.1, theta_y=0.1,
naz=100, nrad=20
)
grid2 = batoid.RayVector.asPolar(
wavelength=500e-9, theta_x=0.1, theta_y=0.1,
backDist=telescope.backDist, stopSurface=telescope.stopSurface,
medium=telescope.inMedium, outer=telescope.pupilSize/2,
inner=telescope.pupilSize/2*telescope.pupilObscuration,
naz=100, nrad=20
)
rays_allclose(grid1, grid2)
grid1 = batoid.RayVector.asSpokes(
optic=telescope, wavelength=500e-9, theta_x=0.1, theta_y=0.1,
rings=10, spokes=21
)
grid2 = batoid.RayVector.asSpokes(
wavelength=500e-9, theta_x=0.1, theta_y=0.1,
backDist=telescope.backDist, stopSurface=telescope.stopSurface,
medium=telescope.inMedium, outer=telescope.pupilSize/2,
rings=10, spokes=21
)
rays_allclose(grid1, grid2)
@timer
def test_getitem():
telescope = batoid.Optic.fromYaml("LSST_r.yaml")
rv = batoid.RayVector.asPolar(
optic=telescope, wavelength=625e-9,
theta_x=np.deg2rad(1.0), theta_y=np.deg2rad(0.2),
nrad=10, naz=60
)
telescope.trace(rv)
# Single item indexing
for i in range(-len(rv), len(rv)):
rv1 = rv[i]
np.testing.assert_equal(rv1.r[0], rv.r[i])
np.testing.assert_equal(rv1.x[0], rv.x[i])
np.testing.assert_equal(rv1.y[0], rv.y[i])
np.testing.assert_equal(rv1.z[0], rv.z[i])
np.testing.assert_equal(rv1.v[0], rv.v[i])
np.testing.assert_equal(rv1.vx[0], rv.vx[i])
np.testing.assert_equal(rv1.vy[0], rv.vy[i])
np.testing.assert_equal(rv1.vz[0], rv.vz[i])
np.testing.assert_equal(rv1.t[0], rv.t[i])
np.testing.assert_equal(rv1.wavelength[0], rv.wavelength[i])
np.testing.assert_equal(rv1.flux[0], rv.flux[i])
np.testing.assert_equal(rv1.vignetted[0], rv.vignetted[i])
np.testing.assert_equal(rv1.failed[0], rv.failed[i])
assert rv1.r.flags.f_contiguous
assert rv1.v.flags.f_contiguous
# slice indexing
for i in range(-len(rv)//10, len(rv)//10):
slc = slice(i*10, (i+1)*10, 2)
rv2 = rv[slc]
np.testing.assert_equal(rv2.r, rv.r[slc])
np.testing.assert_equal(rv2.x, rv.x[slc])
np.testing.assert_equal(rv2.y, rv.y[slc])
np.testing.assert_equal(rv2.z, rv.z[slc])
np.testing.assert_equal(rv2.v, rv.v[slc])
np.testing.assert_equal(rv2.vx, rv.vx[slc])
np.testing.assert_equal(rv2.vy, rv.vy[slc])
np.testing.assert_equal(rv2.vz, rv.vz[slc])
np.testing.assert_equal(rv2.t, rv.t[slc])
np.testing.assert_equal(rv2.wavelength, rv.wavelength[slc])
np.testing.assert_equal(rv2.flux, rv.flux[slc])
np.testing.assert_equal(rv2.vignetted, rv.vignetted[slc])
np.testing.assert_equal(rv2.failed, rv.failed[slc])
assert rv2.r.flags.f_contiguous
assert rv2.v.flags.f_contiguous
# integer array indexing
idx = [0, -1, 1, -2, 2, -3, 50]
rv3 = rv[idx]
np.testing.assert_equal(rv3.r, rv.r[idx])
np.testing.assert_equal(rv3.x, rv.x[idx])
np.testing.assert_equal(rv3.y, rv.y[idx])
np.testing.assert_equal(rv3.z, rv.z[idx])
np.testing.assert_equal(rv3.v, rv.v[idx])
np.testing.assert_equal(rv3.vx, rv.vx[idx])
np.testing.assert_equal(rv3.vy, rv.vy[idx])
np.testing.assert_equal(rv3.vz, rv.vz[idx])
np.testing.assert_equal(rv3.t, rv.t[idx])
np.testing.assert_equal(rv3.wavelength, rv.wavelength[idx])
np.testing.assert_equal(rv3.flux, rv.flux[idx])
np.testing.assert_equal(rv3.vignetted, rv.vignetted[idx])
np.testing.assert_equal(rv3.failed, rv.failed[idx])
assert rv3.r.flags.f_contiguous
assert rv3.v.flags.f_contiguous
# boolean array indexing
idx = np.zeros(len(rv), dtype=bool)
idx[[0, -1, 5]] = True
rv4 = rv[idx]
np.testing.assert_equal(rv4.r, rv.r[idx])
np.testing.assert_equal(rv4.x, rv.x[idx])
np.testing.assert_equal(rv4.y, rv.y[idx])
np.testing.assert_equal(rv4.z, rv.z[idx])
np.testing.assert_equal(rv4.v, rv.v[idx])
np.testing.assert_equal(rv4.vx, rv.vx[idx])
np.testing.assert_equal(rv4.vy, rv.vy[idx])
np.testing.assert_equal(rv4.vz, rv.vz[idx])
np.testing.assert_equal(rv4.t, rv.t[idx])
np.testing.assert_equal(rv4.wavelength, rv.wavelength[idx])
np.testing.assert_equal(rv4.flux, rv.flux[idx])
np.testing.assert_equal(rv4.vignetted, rv.vignetted[idx])
np.testing.assert_equal(rv4.failed, rv.failed[idx])
assert rv4.r.flags.f_contiguous
assert rv4.v.flags.f_contiguous
# test iteration
for i, rv5 in enumerate(rv):
np.testing.assert_equal(rv5.r[0], rv.r[i])
np.testing.assert_equal(rv5.x[0], rv.x[i])
np.testing.assert_equal(rv5.y[0], rv.y[i])
np.testing.assert_equal(rv5.z[0], rv.z[i])
np.testing.assert_equal(rv5.v[0], rv.v[i])
np.testing.assert_equal(rv5.vx[0], rv.vx[i])
np.testing.assert_equal(rv5.vy[0], rv.vy[i])
np.testing.assert_equal(rv5.vz[0], rv.vz[i])
np.testing.assert_equal(rv5.t[0], rv.t[i])
np.testing.assert_equal(rv5.wavelength[0], rv.wavelength[i])
np.testing.assert_equal(rv5.flux[0], rv.flux[i])
np.testing.assert_equal(rv5.vignetted[0], rv.vignetted[i])
np.testing.assert_equal(rv5.failed[0], rv.failed[i])
assert rv5.r.flags.f_contiguous
assert rv5.v.flags.f_contiguous
for i, rv6 in enumerate(reversed(rv)):
np.testing.assert_equal(rv6.r[0], rv.r[-i-1])
np.testing.assert_equal(rv6.r[0], rv.r[-i-1])
np.testing.assert_equal(rv6.x[0], rv.x[-i-1])
np.testing.assert_equal(rv6.y[0], rv.y[-i-1])
np.testing.assert_equal(rv6.z[0], rv.z[-i-1])
np.testing.assert_equal(rv6.v[0], rv.v[-i-1])
np.testing.assert_equal(rv6.vx[0], rv.vx[-i-1])
np.testing.assert_equal(rv6.vy[0], rv.vy[-i-1])
np.testing.assert_equal(rv6.vz[0], rv.vz[-i-1])
np.testing.assert_equal(rv6.t[0], rv.t[-i-1])
np.testing.assert_equal(rv6.wavelength[0], rv.wavelength[-i-1])
np.testing.assert_equal(rv6.flux[0], rv.flux[-i-1])
np.testing.assert_equal(rv6.vignetted[0], rv.vignetted[-i-1])
np.testing.assert_equal(rv6.failed[0], rv.failed[-i-1])
assert rv6.r.flags.f_contiguous
assert rv6.v.flags.f_contiguous
with np.testing.assert_raises(IndexError):
rv[len(rv)]
with np.testing.assert_raises(IndexError):
rv[-len(rv)-1]
def test_fromStop():
telescope = batoid.Optic.fromYaml("LSST_r.yaml")
rv = batoid.RayVector.asPolar(
optic=telescope, wavelength=625e-9,
theta_x=np.deg2rad(1.0), theta_y=np.deg2rad(0.2),
nrad=4, naz=10
)
rv_traced = telescope.trace(rv.copy())
rv_stop = telescope.stopSurface.interact(rv.copy())
for rv1, rv_traced1, rv_stop1 in zip(rv, rv_traced, rv_stop):
rv_test1 = batoid.RayVector.fromStop(
rv_stop1.x[0], rv_stop1.y[0],
optic=telescope, wavelength=625e-9,
theta_x=np.deg2rad(1.0), theta_y=np.deg2rad(0.2)
)
rv_test2 = batoid.RayVector.fromStop(
rv_stop1.x[0], rv_stop1.y[0],
optic=telescope, backDist=telescope.backDist, wavelength=625e-9,
theta_x=np.deg2rad(1.0), theta_y=np.deg2rad(0.2)
)
rv_test3 = batoid.RayVector.fromStop(
rv_stop1.x[0], rv_stop1.y[0],
optic=telescope, medium=telescope.inMedium, wavelength=625e-9,
theta_x=np.deg2rad(1.0), theta_y=np.deg2rad(0.2)
)
rv_test4 = batoid.RayVector.fromStop(
rv_stop1.x[0], rv_stop1.y[0],
optic=telescope, stopSurface=telescope.stopSurface,
wavelength=625e-9,
theta_x=np.deg2rad(1.0), theta_y=np.deg2rad(0.2)
)
rv_test5 = batoid.RayVector.fromStop(
rv_stop1.x[0], rv_stop1.y[0],
optic=telescope, stopSurface=telescope.stopSurface,
wavelength=625e-9,
dirCos=batoid.utils.fieldToDirCos(np.deg2rad(1.0), np.deg2rad(0.2))
)
for rv_test in [rv_test1, rv_test2, rv_test3, rv_test4, rv_test5]:
telescope.trace(rv_test)
np.testing.assert_allclose(
rv_test.x, rv_traced1.x, rtol=0, atol=1e-14
)
np.testing.assert_allclose(
rv_test.y, rv_traced1.y, rtol=0, atol=1e-14
)
np.testing.assert_allclose(
rv_test.z, rv_traced1.z, rtol=0, atol=1e-14
)
np.testing.assert_allclose(
rv_test.vx, rv_traced1.vx, rtol=0, atol=1e-14
)
np.testing.assert_allclose(
rv_test.vy, rv_traced1.vy, rtol=0, atol=1e-14
)
np.testing.assert_allclose(
rv_test.vz, rv_traced1.vz, rtol=0, atol=1e-14
)
# A few more coverage checks
with np.testing.assert_raises(ValueError):
rv = batoid.RayVector.fromStop(
0, 0, theta_x=0.0, theta_y=0.0
)
rv = batoid.RayVector.fromStop(
0, 0, theta_x=0.0, theta_y=0.0, wavelength=625e-9
)
rv2 = batoid.RayVector.fromStop(
0, 0, theta_x=0.0, theta_y=0.0, wavelength=625e-9,
backDist=40.0,
stopSurface=batoid.Interface(batoid.Plane()),
medium=batoid.vacuum
)
assert rv == rv2
def test_fromFieldAngles():
telescope = batoid.Optic.fromYaml("LSST_r.yaml")
thx = np.linspace(-0.5, 0.5, 10)
thy = np.linspace(-0.5, 0.5, 10)
rv = batoid.RayVector.fromFieldAngles(
np.deg2rad(thx), np.deg2rad(thy),
optic=telescope, wavelength=625e-9,
)
rv_traced = telescope.trace(rv.copy())
rv_stop = telescope.stopSurface.interact(rv.copy())
for rv1, rv_traced1, rv_stop1 in zip(rv, rv_traced, rv_stop):
dc = rv_stop1.v[0]/np.sqrt(np.sum(np.square(rv_stop1.v)))
thx, thy = batoid.utils.dirCosToField(*dc)
rv_test1 = batoid.RayVector.fromStop(
0.0, 0.0,
optic=telescope, wavelength=625e-9,
theta_x=thx, theta_y=thy
)
rv_test2 = batoid.RayVector.fromStop(
0.0, 0.0,
optic=telescope, backDist=telescope.backDist, wavelength=625e-9,
theta_x=thx, theta_y=thy
)
rv_test3 = batoid.RayVector.fromStop(
0.0, 0.0,
optic=telescope, medium=telescope.inMedium, wavelength=625e-9,
theta_x=thx, theta_y=thy
)
rv_test4 = batoid.RayVector.fromStop(
0.0, 0.0,
optic=telescope, stopSurface=telescope.stopSurface,
wavelength=625e-9,
theta_x=thx, theta_y=thy
)
rv_tests = [rv_test1, rv_test2, rv_test3, rv_test4]
for rv_test in rv_tests:
telescope.trace(rv_test)
np.testing.assert_allclose(
rv_test.x, rv_traced1.x, rtol=0, atol=1e-14
)
np.testing.assert_allclose(
rv_test.y, rv_traced1.y, rtol=0, atol=1e-14
)
np.testing.assert_allclose(
rv_test.z, rv_traced1.z, rtol=0, atol=1e-14
)
np.testing.assert_allclose(
rv_test.vx, rv_traced1.vx, rtol=0, atol=1e-14
)
np.testing.assert_allclose(
rv_test.vy, rv_traced1.vy, rtol=0, atol=1e-14
)
np.testing.assert_allclose(
rv_test.vz, rv_traced1.vz, rtol=0, atol=1e-14
)
# A few more coverage checks
with np.testing.assert_raises(ValueError): # no wavelength
rv = batoid.RayVector.fromFieldAngles(
0, 0
)
rv = batoid.RayVector.fromFieldAngles(
0, 0, wavelength=625e-9
)
rv2 = batoid.RayVector.fromFieldAngles(
0, 0, wavelength=625e-9,
backDist=40.0,
stopSurface=batoid.Interface(batoid.Plane()),
medium=batoid.vacuum
)
assert rv == rv2
if __name__ == '__main__':
init_gpu()
test_properties()
test_positionAtTime()
test_propagate()
test_phase()
test_sumAmplitude()
test_equals()
test_asGrid()
test_asPolar()
test_asSpokes()
test_factory_optic()
test_getitem()
test_fromStop()
test_fromFieldAngles()
```
#### File: jtrace/tests/test_Sum.py
```python
import batoid
import numpy as np
from test_helpers import timer, do_pickle, rays_allclose, all_obj_diff, init_gpu
@timer
def test_properties():
rng = np.random.default_rng(5)
for i in range(100):
s1 = batoid.Sphere(rng.uniform(1, 3))
s2 = batoid.Paraboloid(rng.uniform(1, 3))
sum = batoid.Sum([s1, s2])
do_pickle(sum)
assert s1 is sum.surfaces[0]
assert s2 is sum.surfaces[1]
s3 = batoid.Quadric(rng.uniform(3, 5), rng.uniform(-0.1, 0.1))
sum2 = batoid.Sum([s1, s2, s3])
do_pickle(sum2)
assert s1 is sum2.surfaces[0]
assert s2 is sum2.surfaces[1]
assert s3 is sum2.surfaces[2]
@timer
def test_sag():
rng = np.random.default_rng(57)
for _ in range(100):
s1 = batoid.Sphere(rng.uniform(1, 3))
s2 = batoid.Paraboloid(rng.uniform(1, 3))
sum = batoid.Sum([s1, s2])
x = rng.normal(size=5000)
y = rng.normal(size=5000)
np.testing.assert_allclose(
sum.sag(x, y),
s1.sag(x, y) + s2.sag(x, y),
rtol=0,
atol=1e-12
)
s3 = batoid.Quadric(rng.uniform(3, 5), rng.uniform(-0.1, 0.1))
sum2 = batoid.Sum([s1, s2, s3])
np.testing.assert_allclose(
sum2.sag(x, y),
s1.sag(x, y) + s2.sag(x, y) + s3.sag(x, y),
rtol=0,
atol=1e-12
)
@timer
def test_normal():
rng = np.random.default_rng(577)
for _ in range(100):
s1 = batoid.Sphere(rng.uniform(1, 3))
s2 = batoid.Paraboloid(rng.uniform(1, 3))
sum = batoid.Sum([s1, s2])
x = rng.normal(size=5000)
y = rng.normal(size=5000)
n1 = s1.normal(x, y)
n2 = s2.normal(x, y)
nx = n1[:, 0]/n1[:, 2] + n2[:, 0]/n2[:, 2]
ny = n1[:, 1]/n1[:, 2] + n2[:, 1]/n2[:, 2]
nz = 1./np.sqrt(nx*nx + ny*ny + 1)
nx *= nz
ny *= nz
normal = np.array([nx, ny, nz]).T
np.testing.assert_allclose(
sum.normal(x, y),
normal,
rtol=0,
atol=1e-12
)
s3 = batoid.Quadric(rng.uniform(3, 5), rng.uniform(-0.1, 0.1))
sum2 = batoid.Sum([s1, s2, s3])
n3 = s3.normal(x, y)
nx = n1[:, 0]/n1[:, 2] + n2[:, 0]/n2[:, 2] + n3[:, 0]/n3[:, 2]
ny = n1[:, 1]/n1[:, 2] + n2[:, 1]/n2[:, 2] + n3[:, 1]/n3[:, 2]
nz = 1./np.sqrt(nx*nx + ny*ny + 1)
nx *= nz
ny *= nz
normal = np.array([nx, ny, nz]).T
np.testing.assert_allclose(
sum2.normal(x, y),
normal,
rtol=0,
atol=1e-12
)
@timer
def test_intersect():
rng = np.random.default_rng(5772)
size = 10_000
for _ in range(100):
s1 = batoid.Sphere(1./rng.normal(0., 0.2))
s2 = batoid.Paraboloid(rng.uniform(1, 3))
sum = batoid.Sum([s1, s2])
sumCoordSys = batoid.CoordSys(origin=[0, 0, -1])
x = rng.uniform(-1, 1, size=size)
y = rng.uniform(-1, 1, size=size)
z = np.full_like(x, -10.0)
# If we shoot rays straight up, then it's easy to predict the intersection
vx = np.zeros_like(x)
vy = np.zeros_like(x)
vz = np.ones_like(x)
rv = batoid.RayVector(x, y, z, vx, vy, vz)
np.testing.assert_allclose(rv.z, -10.0)
rv2 = batoid.intersect(sum, rv.copy(), sumCoordSys)
assert rv2.coordSys == sumCoordSys
rv2 = rv2.toCoordSys(batoid.CoordSys())
np.testing.assert_allclose(rv2.x, x)
np.testing.assert_allclose(rv2.y, y)
np.testing.assert_allclose(
rv2.z, sum.sag(x, y)-1,
rtol=0, atol=1e-12
)
# Check default intersect coordTransform
rv2 = rv.copy().toCoordSys(sumCoordSys)
batoid.intersect(sum, rv2)
assert rv2.coordSys == sumCoordSys
rv2 = rv2.toCoordSys(batoid.CoordSys())
np.testing.assert_allclose(rv2.x, x)
np.testing.assert_allclose(rv2.y, y)
np.testing.assert_allclose(
rv2.z, sum.sag(x, y)-1,
rtol=0, atol=1e-12
)
@timer
def test_reflect():
rng = np.random.default_rng(57721)
size = 10_000
for _ in range(100):
s1 = batoid.Sphere(1./rng.normal(0., 0.2))
s2 = batoid.Paraboloid(rng.uniform(1, 3))
sum = batoid.Sum([s1, s2])
x = rng.uniform(-1, 1, size=size)
y = rng.uniform(-1, 1, size=size)
z = np.full_like(x, -10.0)
vx = rng.uniform(-1e-5, 1e-5, size=size)
vy = rng.uniform(-1e-5, 1e-5, size=size)
vz = np.full_like(x, 1)
rv = batoid.RayVector(x, y, z, vx, vy, vz)
rvr = batoid.reflect(sum, rv.copy())
rvr2 = sum.reflect(rv.copy())
rays_allclose(rvr, rvr2)
# print(f"{np.sum(rvr.failed)/len(rvr)*100:.2f}% failed")
normal = sum.normal(rvr.x, rvr.y)
# Test law of reflection
a0 = np.einsum("ad,ad->a", normal, rv.v)[~rvr.failed]
a1 = np.einsum("ad,ad->a", normal, -rvr.v)[~rvr.failed]
np.testing.assert_allclose(
a0, a1,
rtol=0, atol=1e-12
)
# Test that rv.v, rvr.v and normal are all in the same plane
np.testing.assert_allclose(
np.einsum(
"ad,ad->a",
np.cross(normal, rv.v),
rv.v
)[~rvr.failed],
0.0,
rtol=0, atol=1e-12
)
@timer
def test_refract():
rng = np.random.default_rng(577215)
size = 10_000
for _ in range(100):
s1 = batoid.Sphere(1./rng.normal(0., 0.2))
s2 = batoid.Paraboloid(rng.uniform(1, 3))
sum = batoid.Sum([s1, s2])
m0 = batoid.ConstMedium(rng.normal(1.2, 0.01))
m1 = batoid.ConstMedium(rng.normal(1.3, 0.01))
x = rng.uniform(-1, 1, size=size)
y = rng.uniform(-1, 1, size=size)
z = np.full_like(x, -10.0)
vx = rng.uniform(-1e-5, 1e-5, size=size)
vy = rng.uniform(-1e-5, 1e-5, size=size)
vz = np.sqrt(1-vx*vx-vy*vy)/m0.n
rv = batoid.RayVector(x, y, z, vx, vy, vz)
rvr = batoid.refract(sum, rv.copy(), m0, m1)
rvr2 = sum.refract(rv.copy(), m0, m1)
rays_allclose(rvr, rvr2)
# print(f"{np.sum(rvr.failed)/len(rvr)*100:.2f}% failed")
normal = sum.normal(rvr.x, rvr.y)
# Test Snell's law
s0 = np.sum(np.cross(normal, rv.v*m0.n)[~rvr.failed], axis=-1)
s1 = np.sum(np.cross(normal, rvr.v*m1.n)[~rvr.failed], axis=-1)
np.testing.assert_allclose(
m0.n*s0, m1.n*s1,
rtol=0, atol=1e-9
)
# Test that rv.v, rvr.v and normal are all in the same plane
np.testing.assert_allclose(
np.einsum(
"ad,ad->a",
np.cross(normal, rv.v),
rv.v
)[~rvr.failed],
0.0,
rtol=0, atol=1e-12
)
@timer
def test_add_plane():
rng = np.random.default_rng(5772156)
for _ in range(100):
# Adding a plane should have zero effect on sag or normal vector
s1 = batoid.Sphere(rng.uniform(1, 3))
s2 = batoid.Plane()
sum = batoid.Sum([s1, s2])
x = rng.normal(size=5000)
y = rng.normal(size=5000)
np.testing.assert_allclose(
sum.sag(x, y),
s1.sag(x, y),
rtol=0,
atol=1e-12
)
np.testing.assert_allclose(
sum.normal(x, y),
s1.normal(x, y),
rtol=0,
atol=1e-12,
)
@timer
def test_sum_paraboloid():
# para_sag = r^2/(2*R^2)
# so two paraboloids yields r^2 * (1/(2*R1) + 1/(2*R2))
# so (1/(2*R1) + 1/(2*R2)) = 1/(2*R)
# implies
# 0.5/(1/(2*R1) + 1/(2*R2)) = R
rng = np.random.default_rng(57721566)
for _ in range(100):
R1 = rng.uniform(1, 2)
R2 = rng.uniform(2, 3)
Rsum = 0.5/(1/(2*R1) + 1/(2*R2))
para1 = batoid.Paraboloid(R1)
para2 = batoid.Paraboloid(R2)
paraSum = batoid.Paraboloid(Rsum)
paraSum2 = batoid.Sum([para1, para2])
x = rng.normal(size=5000)
y = rng.normal(size=5000)
np.testing.assert_allclose(
paraSum.sag(x, y),
paraSum2.sag(x, y),
rtol=0,
atol=1e-12
)
np.testing.assert_allclose(
paraSum.normal(x, y),
paraSum2.normal(x, y),
rtol=0,
atol=1e-12,
)
@timer
def test_ne():
objs = [
batoid.Sum([batoid.Plane(), batoid.Plane()]),
batoid.Sum([batoid.Plane(), batoid.Sphere(1.0)]),
batoid.Sum([batoid.Plane(), batoid.Plane(), batoid.Plane()]),
batoid.Plane()
]
all_obj_diff(objs)
@timer
def test_fail():
sum = batoid.Sum([batoid.Plane(), batoid.Sphere(1.0)])
rv = batoid.RayVector(0, 10, 0, 0, 0, -1) # Too far to side
rv2 = batoid.intersect(sum, rv.copy())
np.testing.assert_equal(rv2.failed, np.array([True]))
# This one passes
rv = batoid.RayVector(0, 0, -1, 0, 0, +1)
rv2 = batoid.intersect(sum, rv.copy())
np.testing.assert_equal(rv2.failed, np.array([False]))
if __name__ == '__main__':
init_gpu()
test_properties()
test_sag()
test_normal()
test_intersect()
test_reflect()
test_refract()
test_add_plane()
test_sum_paraboloid()
test_ne()
test_fail()
```
#### File: jtrace/tests/test_zemax.py
```python
import os
import pytest
import galsim
import numpy as np
from scipy.optimize import least_squares
import batoid
from test_helpers import timer, init_gpu
directory = os.path.dirname(__file__)
@timer
def test_HSC_trace():
telescope = batoid.Optic.fromYaml("HSC_old.yaml")
# Zemax has a number of virtual surfaces that we don't trace in batoid.
# Also, the HSC.yaml above includes Baffle surfaces not in Zemax. The
# following lists select out the surfaces in common to both models.
HSC_surfaces = [
3, 6, 7, 8, 9, 11, 12, 13, 14, 16, 17, 18, 19, 20, 21, 24, 25, 28, 29,
31
]
surface_names = ['PM', 'G1_entrance', 'G1_exit', 'G2_entrance', 'G2_exit',
'ADC1_entrance', 'ADC1_exit', 'ADC2_entrance', 'ADC2_exit',
'G3_entrance', 'G3_exit', 'G4_entrance', 'G4_exit',
'G5_entrance', 'G5_exit', 'F_entrance', 'F_exit',
'W_entrance', 'W_exit', 'D']
for fn in [
"HSC_raytrace_1.txt", "HSC_raytrace_2.txt", "HSC_raytrace_3.txt"
]:
filename = os.path.join(directory, "testdata", fn)
with open(filename) as f:
arr = np.loadtxt(f, skiprows=22, usecols=list(range(0, 12)))
arr0 = arr[0]
rv = batoid.RayVector(
arr0[1]/1000, arr0[2]/1000, 16.0,
arr0[4], arr0[5], -arr0[6],
t=0, wavelength=750e-9
)
tf = telescope.traceFull(rv)
i = 0
for name in surface_names:
surface = tf[name]
srv = surface['out']
srv.toCoordSys(batoid.CoordSys())
bt_isec = np.array([srv.x, srv.y, srv.z-16.0]).T[0]
zx_isec = arr[HSC_surfaces[i]-1][1:4]/1000
# nanometer agreement
np.testing.assert_allclose(bt_isec, zx_isec, rtol=0, atol=1e-9)
v = srv.v/np.linalg.norm(srv.v)
bt_angle = v[0]
zx_angle = arr[HSC_surfaces[i]-1][4:7]
# direction cosines agree to 1e-9
np.testing.assert_allclose(bt_angle, zx_angle, rtol=0, atol=1e-9)
i += 1
@timer
def test_HSC_huygensPSF():
fn = os.path.join(directory, "testdata", "HSC_huygensPSF.txt")
with open(fn) as f:
Zarr = np.loadtxt(f, skiprows=21)
Zarr = Zarr[::-1] # Need to invert, probably just a Zemax convention...
telescope = batoid.Optic.fromYaml("HSC_no_obsc.yaml")
thx = np.deg2rad(0.0)
thy = np.deg2rad(0.75)
wavelength = 750e-9
nx = 128
dx = 0.25e-6
print("computing Huygens PSF")
hPSF = batoid.huygensPSF(
telescope,
thx, thy, projection='zemax',
wavelength=wavelength,
nx=nx, dx=dx, nxOut=256,
reference='mean'
)
print("Done")
# Normalize images
Zarr /= np.sum(Zarr)
hPSF.array /= np.sum(hPSF.array)
Zmax = np.max(Zarr)
Zarr /= Zmax
hPSF.array /= Zmax
# Use GalSim InterpolateImage to align and subtract
ii = galsim.InterpolatedImage(
galsim.Image(hPSF.array, scale=0.25),
normalization='sb'
)
# Now setup an optimizer to fit for x/y shift
def modelimg(params, ii=ii):
dx, dy, dlogflux = params
model = ii.shift(dx, dy)*np.exp(dlogflux)
return model.drawImage(method='sb', scale=0.25, nx=256, ny=256)
def resid(params, ii=ii, Zarr=Zarr):
img = modelimg(params, ii=ii)
r = (img.array - Zarr).ravel()
return r
kwargs = dict(ii=ii, Zarr=Zarr)
print("Aligning")
result = least_squares(resid, np.array([0.0, 0.0, 0.0]), kwargs=kwargs)
optImg = modelimg(result.x, ii=ii)
print("Done")
np.testing.assert_allclose(Zarr, optImg.array, rtol=0, atol=3e-2)
Zmom = galsim.hsm.FindAdaptiveMom(galsim.Image(Zarr, scale=0.25))
bmom = galsim.hsm.FindAdaptiveMom(optImg)
np.testing.assert_allclose(
Zmom.observed_shape.g1,
bmom.observed_shape.g1,
rtol=0, atol=0.01
)
np.testing.assert_allclose(
Zmom.observed_shape.g2,
bmom.observed_shape.g2,
rtol=0, atol=1e-7
)
np.testing.assert_allclose(
Zmom.moments_sigma,
bmom.moments_sigma,
rtol=0, atol=0.1
)
@timer
def test_HSC_wf():
fn = os.path.join(directory, "testdata", "HSC_wavefront.txt")
with open(fn) as f:
Zwf = np.loadtxt(f, skiprows=17)
Zwf = Zwf[::-1] # Need to invert, probably just a Zemax convention...
telescope = batoid.Optic.fromYaml("HSC_no_obsc.yaml")
thx = np.deg2rad(0.0)
thy = np.deg2rad(0.75)
wavelength = 750e-9
nx = 512
bwf = batoid.wavefront(telescope, thx, thy, wavelength, nx=nx)
Zwf = np.ma.MaskedArray(data=Zwf, mask=Zwf==0) # Turn Zwf into masked array
# There are unimportant differences in piston, tip, and tilt terms. So
# instead of comparing the wavefront directly, we'll compare Zernike
# coefficients for j >= 4.
x = np.linspace(-1, 1, nx, endpoint=False)
x, y = np.meshgrid(x, x)
w = ~Zwf.mask # Use the same mask for both Zemax and batoid
basis = galsim.zernike.zernikeBasis(37, x[w], y[w])
Zcoefs, _, _, _ = np.linalg.lstsq(basis.T, Zwf[w], rcond=-1)
Bcoefs, _, _, _ = np.linalg.lstsq(basis.T, bwf.array[w], rcond=-1)
for j in range(1, 38):
print("{:<4d} {:8.4f} {:8.4f}".format(j, Zcoefs[j], Bcoefs[j]))
np.testing.assert_allclose(Zcoefs[4:], Bcoefs[4:], rtol=0, atol=0.01)
# higher order Zernikes match even better
np.testing.assert_allclose(Zcoefs[11:], Bcoefs[11:], rtol=0, atol=0.01)
@timer
def test_HSC_zernike():
ZZernike = [0]
with open(os.path.join(directory, "testdata", "HSC_Zernike.txt")) as f:
for i, line in enumerate(f):
if i > 38:
ZZernike.append(float(line[9:20]))
ZZernike = np.array(ZZernike)
telescope = batoid.Optic.fromYaml("HSC_no_obsc.yaml")
thx = np.deg2rad(0.0)
thy = np.deg2rad(0.75)
wavelength = 750e-9
nx = 256
bZernike = batoid.zernike(
telescope, thx, thy, wavelength, jmax=37, nx=nx,
projection='zemax', reference='chief'
)
print()
print("j Zemax batoid diff")
print("------------------------------")
for j in range(1, 38):
print(
f"{j:<4d} {ZZernike[j]:8.4f} {bZernike[j]:8.4f} "
f"{ZZernike[j]-bZernike[j]:8.4f}"
)
# Don't care about piston, tip, or tilt.
np.testing.assert_allclose(ZZernike[4:], bZernike[4:], rtol=0, atol=1e-2)
np.testing.assert_allclose(ZZernike[11:], bZernike[11:], rtol=0, atol=3e-3)
@timer
def test_LSST_wf(plot=False):
thxs = [0.0, 0.0, 0.0, 1.176]
thys = [0.0, 1.225, 1.75, 1.176]
fns = ["LSST_wf_0.0_0.0.txt",
"LSST_wf_0.0_1.225.txt",
"LSST_wf_0.0_1.75.txt",
"LSST_wf_1.176_1.176.txt"]
for thx, thy, fn in zip(thxs, thys, fns):
fn = os.path.join(directory, "testdata", fn)
with open(fn, encoding='utf-16-le') as f:
Zwf = np.loadtxt(f, skiprows=16)
Zwf = Zwf[::-1] # Need to invert, probably just a Zemax convention...
telescope = batoid.Optic.fromYaml("LSST_g_500.yaml")
thx = np.deg2rad(thx)
thy = np.deg2rad(thy)
wavelength = 500e-9
nx = 32
bwf = batoid.wavefront(
telescope, thx, thy, wavelength, nx=nx,
reference='chief', projection='zemax'
)
# Turn Zwf into masked array
Zwf = np.ma.MaskedArray(data=Zwf, mask=Zwf==0)
if plot:
import matplotlib.pyplot as plt
fig, axes = plt.subplots(ncols=3, figsize=(10,3))
i0 = axes[0].imshow(bwf.array)
i1 = axes[1].imshow(Zwf)
i2 = axes[2].imshow(bwf.array-Zwf)
axes[0].set_title("batoid")
axes[1].set_title("Zemax")
axes[2].set_title("difference")
plt.colorbar(i0, ax=axes[0], label='waves')
plt.colorbar(i1, ax=axes[1], label='waves')
plt.colorbar(i2, ax=axes[2], label='waves')
plt.tight_layout()
plt.show()
np.testing.assert_allclose(
Zwf*wavelength,
bwf.array*wavelength,
atol=1e-11, rtol=0) # 10 picometer tolerance!
@timer
def test_LSST_fftPSF(plot=False):
thxs = [0.0, 0.0, 0.0, 1.176]
thys = [0.0, 1.225, 1.75, 1.176]
fns = ["LSST_fftpsf_0.0_0.0.txt",
"LSST_fftpsf_0.0_1.225.txt",
"LSST_fftpsf_0.0_1.75.txt",
"LSST_fftpsf_1.176_1.176.txt"]
for thx, thy, fn in zip(thxs, thys, fns):
fn = os.path.join(directory, "testdata", fn)
with open(fn, encoding='utf-16-le') as f:
Zpsf = np.loadtxt(f, skiprows=18)
Zpsf = Zpsf[::-1] # Need to invert, probably just a Zemax convention...
Zpsf /= np.max(Zpsf)
telescope = batoid.Optic.fromYaml("LSST_g_500.yaml")
thx = np.deg2rad(thx)
thy = np.deg2rad(thy)
wavelength = 500e-9
nx = 32
bpsf = batoid.fftPSF(
telescope, thx, thy, wavelength, nx=nx,
reference='chief', projection='zemax'
)
bpsf.array = bpsf.array[::-1,::-1] # b/c primitives are negative
bpsf.array /= np.max(bpsf.array)
# Use GalSim InterpolateImage to align and subtract
ii = galsim.InterpolatedImage(
galsim.Image(bpsf.array, scale=1.0),
normalization='sb'
)
# Now setup an optimizer to fit for x/y shift
def modelimg(params, ii=ii):
dx, dy, dlogflux = params
model = ii.shift(dx, dy)*np.exp(dlogflux)
return model.drawImage(method='sb', scale=1.0, nx=64, ny=64)
def resid(params, ii=ii, Zpsf=Zpsf):
img = modelimg(params, ii=ii)
r = (img.array - Zpsf).ravel()
return r
kwargs = dict(ii=ii, Zpsf=Zpsf)
result = least_squares(resid, np.array([0.0, 0.0, 0.0]), kwargs=kwargs)
optImg = modelimg(result.x, ii=ii)
if plot:
import matplotlib.pyplot as plt
fig, axes = plt.subplots(ncols=3, figsize=(10,3))
i0 = axes[0].imshow(optImg.array)
i1 = axes[1].imshow(Zpsf)
i2 = axes[2].imshow(optImg.array-Zpsf)
plt.colorbar(i0, ax=axes[0])
plt.colorbar(i1, ax=axes[1])
plt.colorbar(i2, ax=axes[2])
plt.tight_layout()
plt.show()
@pytest.mark.slow
@timer
def test_LSST_huygensPSF(plot=False):
thxs = [0.0, 0.0, 0.0, 1.176]
thys = [0.0, 1.225, 1.75, 1.176]
fns = ["LSST_hpsf_0.0_0.0.txt",
"LSST_hpsf_0.0_1.225.txt",
"LSST_hpsf_0.0_1.75.txt",
"LSST_hpsf_1.176_1.176.txt"]
if __name__ != "__main__":
thxs = thxs[2:3]
thys = thys[2:3]
fns = fns[2:3]
for thx, thy, fn in zip(thxs, thys, fns):
fn = os.path.join(directory, "testdata", fn)
with open(fn, encoding='utf-16-le') as f:
Zpsf = np.loadtxt(f, skiprows=21)
Zpsf = Zpsf[::-1] # Need to invert, probably just a Zemax convention...
Zpsf /= np.max(Zpsf)
telescope = batoid.Optic.fromYaml("LSST_g_500.yaml")
thx = np.deg2rad(thx)
thy = np.deg2rad(thy)
wavelength = 500e-9
bpsf = batoid.huygensPSF(
telescope, thx, thy, wavelength, nx=128,
# telescope, thx, thy, wavelength, nx=1024,
reference='chief', projection='zemax',
dx=0.289e-6, nxOut=64
)
bpsf.array /= np.max(bpsf.array)
# Use GalSim InterpolateImage to align and subtract
ii = galsim.InterpolatedImage(
galsim.Image(bpsf.array, scale=1.0),
normalization='sb'
)
# Now setup an optimizer to fit for x/y shift
def modelimg(params, ii=ii):
dx, dy, dlogflux = params
model = ii.shift(dx, dy)*np.exp(dlogflux)
return model.drawImage(method='sb', scale=1.0, nx=64, ny=64)
def resid(params, ii=ii, Zpsf=Zpsf):
img = modelimg(params, ii=ii)
r = (img.array - Zpsf).ravel()
return r
kwargs = dict(ii=ii, Zpsf=Zpsf)
print("Aligning")
result = least_squares(resid, np.array([0.0, 0.0, 0.0]), kwargs=kwargs)
optImg = modelimg(result.x, ii=ii)
print("Done")
if plot:
import matplotlib.pyplot as plt
fig, axes = plt.subplots(ncols=3, figsize=(10,3))
i0 = axes[0].imshow(optImg.array)
i1 = axes[1].imshow(Zpsf)
i2 = axes[2].imshow(optImg.array-Zpsf)
plt.colorbar(i0, ax=axes[0])
plt.colorbar(i1, ax=axes[1])
plt.colorbar(i2, ax=axes[2])
plt.tight_layout()
plt.show()
if thy not in [0.0, 1.176]:
fig, ax = plt.subplots(figsize=(6, 4))
ax.plot(optImg.array[:,32], c='g')
ax.plot(Zpsf[:,32], c='b')
ax.plot((optImg.array-Zpsf)[:,32], c='r')
plt.show()
@timer
def test_LSST_trace(verbose=False):
# The g_500 file uses vacuum instead of air, which is important to match
# Zemax for this test.
telescope = batoid.Optic.fromYaml("LSST_g_500.yaml")
zSurfaces = [4, 5, 7, 9, 10, 11, 12, 13, 14, 15, 16, 17]
for fn in ["LSST_trace_0.txt", "LSST_trace_1.txt", "LSST_trace_2.txt"]:
filename = os.path.join(directory, "testdata", fn)
# Get normalized coordinates
with open(filename, encoding='utf-16-le') as f:
Hx, Hy, Px, Py = np.genfromtxt(f, skip_header=13, max_rows=4, usecols=(6,))
with open(filename, encoding='utf-16-le') as f:
arr = np.genfromtxt(f, skip_header=22, max_rows=18, usecols=list(range(1, 12)))
ray = batoid.RayVector.fromStop(
Px*4.18, Py*4.18,
optic=telescope,
wavelength=500e-9,
theta_x=np.deg2rad(Hx*1.75), theta_y=np.deg2rad(Hy*1.75),
projection='zemax'
)
tf = telescope.traceFull(ray)
for surface, iz in zip(tf.values(), zSurfaces):
r = surface['out'].toCoordSys(batoid.globalCoordSys)
n = 1./np.sqrt(np.sum(r.v**2))
# Note Zemax has different sign convention for z-coordinates and
# direction cosines. The important bits to match are x and y, which
# do match, including the signs.
if verbose:
print(surface['name'])
print('x', r.x[0], arr[iz][0]/1e3, r.x-arr[iz][0]/1e3)
print('y', r.y[0], arr[iz][1]/1e3, r.y-arr[iz][1]/1e3)
print('z', r.z[0], arr[iz][2]/1e3, r.z+arr[iz][2]/1e3)
print('vx', r.vx[0]*n, arr[iz][3], np.abs(r.vx*n) - np.abs(arr[iz][3]))
print('vy', r.vy[0]*n, arr[iz][4], np.abs(r.vy*n) - np.abs(arr[iz][4]))
print('vz', r.vz[0]*n, arr[iz][5], np.abs(r.vz*n) - np.abs(arr[iz][5]))
print()
np.testing.assert_allclose(r.x, arr[iz][0]/1e3, rtol=0, atol=1e-10)
np.testing.assert_allclose(r.y, arr[iz][1]/1e3, rtol=0, atol=1e-10)
np.testing.assert_allclose(
np.abs(r.z), np.abs(arr[iz][2]/1e3),
rtol=0, atol=1e-10
)
np.testing.assert_allclose(np.abs(r.vx*n), np.abs(arr[iz][3]), rtol=0, atol=1e-10)
np.testing.assert_allclose(np.abs(r.vy*n), np.abs(arr[iz][4]), rtol=0, atol=1e-10)
np.testing.assert_allclose(np.abs(r.vz*n), np.abs(arr[iz][5]), rtol=0, atol=1e-10)
@timer
def test_DECam_trace(verbose=False):
telescope = batoid.Optic.fromYaml("DECam.yaml")
for fn in ["DECam_raytrace_0.txt", "DECam_raytrace_1.txt", "DECam_raytrace_2.txt"]:
filename = os.path.join(directory, "testdata", fn)
# Get normalized coordinates
with open(filename, encoding='utf-16-le') as f:
Hx, Hy, Px, Py = np.genfromtxt(f, skip_header=13, max_rows=4, usecols=(6,))
with open(filename, encoding='utf-16-le') as f:
arr = np.genfromtxt(f, skip_header=22, max_rows=40, usecols=list(range(1, 12)))
ray = batoid.RayVector.fromStop(
Px*2.005, Py*2.005,
optic=telescope,
stopSurface=telescope['PM'],
wavelength=700e-9,
theta_x=np.deg2rad(Hx*1.1), theta_y=np.deg2rad(Hy*1.1),
projection='zemax'
)
tf = telescope.traceFull(ray)
zSurfaces = [
2, 3, 5, 7, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 28,
29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40
]
for surface, iiz in zip(tf.values(), zSurfaces):
iz = iiz-1
r = surface['out'].toCoordSys(batoid.globalCoordSys)
n = 1./np.sqrt(np.sum(r.v**2))
# Note Zemax has different sign convention for z-coordinates and
# direction cosines. The important bits to match are x and y, which
# do match, including the signs.
if verbose:
print(surface['name'])
print('x', r.x[0], arr[iz][0]/1e3, r.x-arr[iz][0]/1e3)
print('y', r.y[0], arr[iz][1]/1e3, r.y-arr[iz][1]/1e3)
print('z', r.z[0], -arr[iz][2]/1e3, r.z+arr[iz][2]/1e3)
print('vx', r.vx[0]*n, arr[iz][3], np.abs(r.vx*n) - np.abs(arr[iz][3]))
print('vy', r.vy[0]*n, arr[iz][4], np.abs(r.vy*n) - np.abs(arr[iz][4]))
print('vz', r.vz[0]*n, arr[iz][5], np.abs(r.vz*n) - np.abs(arr[iz][5]))
print()
np.testing.assert_allclose(r.x, arr[iz][0]/1e3, rtol=0, atol=1e-9)
np.testing.assert_allclose(r.y, arr[iz][1]/1e3, rtol=0, atol=1e-9)
np.testing.assert_allclose(
np.abs(r.z), np.abs(arr[iz][2]/1e3),
rtol=0, atol=1e-9
)
np.testing.assert_allclose(np.abs(r.vx*n), np.abs(arr[iz][3]), rtol=0, atol=1e-9)
np.testing.assert_allclose(np.abs(r.vy*n), np.abs(arr[iz][4]), rtol=0, atol=1e-9)
np.testing.assert_allclose(np.abs(r.vz*n), np.abs(arr[iz][5]), rtol=0, atol=1e-9)
def test_DECam_exit_pupil_pos():
telescope = batoid.Optic.fromYaml("DECam.yaml")
# From the Optics Prescription report, the exit pupil is
# -4275.016 mm behind the focal plane. This is with the stop surface set to
# the primary mirror though, so make sure to adjust that.
telescope.stopSurface = telescope['PM']
np.testing.assert_allclose(
batoid.exitPupilPos(telescope, wavelength=700e-9),
(0, 0, telescope['D'].coordSys.origin[2]+4275.016/1e3),
rtol=0, atol=1e-3
)
if __name__ == '__main__':
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument("--plotWF", action='store_true')
parser.add_argument("--plotFFT", action='store_true')
parser.add_argument("--plotHuygens", action='store_true')
args = parser.parse_args()
init_gpu()
test_HSC_trace()
test_HSC_huygensPSF()
test_HSC_wf()
test_HSC_zernike()
test_LSST_wf(args.plotWF)
test_LSST_fftPSF(args.plotFFT)
test_LSST_huygensPSF(args.plotHuygens)
test_LSST_trace(verbose=False)
test_DECam_trace(verbose=False)
test_DECam_exit_pupil_pos()
``` |
{
"source": "jmeyers314/treegp",
"score": 3
} |
#### File: treegp/treegp/gp_interp.py
```python
import treegp
import numpy as np
import copy
from .kernels import eval_kernel
from sklearn.gaussian_process.kernels import Kernel
from sklearn.neighbors import KNeighborsRegressor
from scipy.linalg import cholesky, cho_solve
class GPInterpolation(object):
"""
An interpolator that uses 2-point correlation function informations
or Maximum Likelihood informations to do a gaussian process to interpolate
a single surface.
:param kernel: A string that can be `eval`ed to make a
sklearn.gaussian_process.kernels.Kernel object. The reprs of
sklearn.gaussian_process.kernels will work, as well as the repr of a
custom treegp VonKarman object. [default: 'RBF(1)']
:param optimizer: Indicates which techniques to use for optimizing the kernel. Three options
are available. "none" does not optimize hyperparameters and used the one
given in the kernel. "two-pcf" optimize the kernel on the 1d 2-point
correlation function estimate by treecorr. "anisotropic" optimize the kernel
on the 2d 2-point correlation function estimate by treecorr.
"log-likelihood" used the classical maximum likelihood method.
:param normalize: Whether to normalize the interpolation parameters to have a mean of 0.
Normally, the parameters being interpolated are not mean 0, so you would
want this to be True, but if your parameters have an a priori mean of 0,
then subtracting off the realized mean would be invalid. [default: True]
:param white_noise: A float value that indicate the ammount of white noise that you want to
use during the gp interpolation. This is an additional uncorrelated noise
added to the error of the interpolated parameters. [default: 0.]
:param n_neighbors: Number of neighbors to used for interpolating the spatial average using
a KNeighbors interpolation. Used only if average_fits is not None. [defaulf: 4]
:param nbins: Number of bins (if 1D correlation function) of the square root of the number
of bins (if 2D correlation function) used in TreeCorr to compute the
2-point correlation function. [default: 20]
:param min_sep: Minimum separation between pairs when computing 2-point correlation
function. In the same units as the coordinates of the field.
Compute automaticaly if it is not given. [default: None]
:param max_sep: Maximum separation between pairs when computing 2-point correlation
function. In the same units as the coordinates of the field.
Compute automaticaly if it is not given. [default: None]
:param average_fits: A fits file that have the spatial average functions of the interpolated parameter
build in it. Build using meanify output across different
exposures. See meanify documentation. [default: None]
"""
def __init__(self, kernel='RBF(1)', optimizer='two-pcf',
normalize=True, p0=[3000., 0.,0.],
white_noise=0., n_neighbors=4, average_fits=None, indice_meanify=None,
nbins=20, min_sep=None, max_sep=None):
self.normalize = normalize
self.optimizer = optimizer
self.white_noise = white_noise
self.n_neighbors = n_neighbors
self.nbins = nbins
self.min_sep = min_sep
self.max_sep = max_sep
if self.optimizer == 'anisotropic':
self.robust_fit = True
else:
self.robust_fit = False
self.p0_robust_fit = p0
self.indice_meanify = indice_meanify
if isinstance(kernel,str):
self.kernel_template = eval_kernel(kernel)
else:
raise TypeError("kernel should be a string a list or a numpy.ndarray of string")
if self.optimizer not in ['anisotropic', 'two-pcf', 'log-likelihood', 'none']:
raise ValueError("Only anisotropic, two-pcf, log-likelihood and none are supported for optimizer. Current value: %s"%(self.optimizer))
if average_fits is not None:
import fitsio
average = fitsio.read(average_fits)
X0 = average['COORDS0'][0]
y0 = average['PARAMS0'][0]
else:
X0 = None
y0 = None
self._X0 = X0
self._y0 = y0
def _fit(self, kernel, X, y, y_err):
"""Update the Kernel with data.
:param kernel: sklearn.gaussian_process kernel.
:param X: Coordinates of the field. (n_samples, 1 or 2)
:param y: Values of the field. (n_samples)
:param y_err: Error of y. (n_samples)
"""
if self.optimizer is not "none":
# Hyperparameters estimation using 2-point correlation
# function information.
if self.optimizer in ['two-pcf', 'anisotropic']:
anisotropic = self.optimizer == 'anisotropic'
self._optimizer = treegp.two_pcf(X, y, y_err,
self.min_sep, self.max_sep,
nbins=self.nbins,
anisotropic=anisotropic,
robust_fit=self.robust_fit,
p0=self.p0_robust_fit)
kernel = self._optimizer.optimizer(kernel)
# Hyperparameters estimation using maximum likelihood fit.
if self.optimizer == 'log-likelihood':
self._optimizer = treegp.log_likelihood(X, y, y_err)
kernel = self._optimizer.optimizer(kernel)
return kernel
def predict(self, X, return_cov=False):
""" Predict responses to given coordinates.
:param X: The coordinates at which to interpolate. (n_samples, 1 or 2).
:returns: Regressed parameters (n_samples)
"""
y_init = copy.deepcopy(self._y)
y_err = copy.deepcopy(self._y_err)
y_interp, y_cov = self.return_gp_predict(y_init-self._mean-self._spatial_average,
self._X, X, self.kernel, y_err=y_err,
return_cov=return_cov)
y_interp = y_interp.T
spatial_average = self._build_average_meanify(X)
y_interp += self._mean + spatial_average
if return_cov:
return y_interp, y_cov
else:
return y_interp
def return_gp_predict(self, y, X1, X2, kernel, y_err, return_cov=False):
"""Compute interpolation with gaussian process for a given kernel.
:param y: Values of the field. (n_samples)
:param X1: The coodinates of the field. (n_samples, 1 or 2)
:param X2: The coordinates at which to interpolate. (n_samples, 1 or 2)
:param kernel: sklearn.gaussian_process kernel.
:param y_err: Error of y. (n_samples)
"""
HT = kernel.__call__(X2, Y=X1)
K = kernel.__call__(X1) + np.eye(len(y))*y_err**2
factor = (cholesky(K, overwrite_a=True, lower=False), False)
alpha = cho_solve(factor, y, overwrite_b=False)
y_predict = np.dot(HT,alpha.reshape((len(alpha),1))).T[0]
if return_cov:
fact = cholesky(K, lower=True) # I am computing maybe twice the same things...
v = cho_solve((fact, True), HT.T)
y_cov = kernel.__call__(X2) - HT.dot(v)
return y_predict, y_cov
else:
return y_predict, None
def initialize(self, X, y, y_err=None):
"""Initialize both the interpolator to some state prefatory to any solve iterations and
initialize the field values for use with this interpolator.
:param X: The coodinates of the field. (n_samples, 1 or 2)
:param y: Values of the field. (n_samples)
:param y_err: Error of y. (n_samples)
"""
self.kernel = copy.deepcopy(self.kernel_template)
self._X = X
self._y = y
if y_err is None:
y_err = np.zeros_like(y)
self._y_err = y_err
if self._X0 is None:
self._X0 = np.zeros_like(self._X)
self._y0 = np.zeros_like(self._y)
self._spatial_average = self._build_average_meanify(X)
if self.white_noise > 0:
y_err = np.sqrt(copy.deepcopy(self._y_err)**2 + self.white_noise**2)
self._y_err = y_err
if self.normalize:
self._mean = np.mean(y - self._spatial_average)
else:
self._mean = 0.
def _build_average_meanify(self, X):
"""Compute spatial average from meanify output for a given coordinate using KN interpolation.
If no average_fits was given, return array of 0.
:param X: Coordinates of training coordinates where to interpolate. (n_samples, 1 or 2)
"""
if np.sum(np.equal(self._X0, 0)) != len(self._X0[:,0])*len(self._X0[0]):
neigh = KNeighborsRegressor(n_neighbors=self.n_neighbors)
neigh.fit(self._X0, self._y0)
average = neigh.predict(X)
if self.indice_meanify is not None:
average = average[:,self.indice_meanify]
return average
else:
return np.zeros((len(X[:,0])))
def solve(self):
"""Set up this GPInterp object.
Solve for hyperparameters if requested using 2-point correlation
function method or maximum likelihood.
"""
self._init_theta = []
kernel = copy.deepcopy(self.kernel)
self._init_theta.append(kernel.theta)
self.kernel = self._fit(self.kernel, self._X,
self._y-self._mean-self._spatial_average, self._y_err)
def return_2pcf(self):
"""
Return 2-point correlation function and its variance using Bootstrap.
"""
anisotropic = self.optimizer == "anisotropic"
pcf = treegp.two_pcf(self._X, self._y-self._mean-self._spatial_average, self._y_err,
self.min_sep, self.max_sep,
nbins=self.nbins,
anisotropic=anisotropic)
xi, xi_weight, distance, coord, mask = pcf.return_2pcf()
return xi, xi_weight, distance, coord, mask
def return_log_likelihood(self, theta=None):
"""
Return of log likehood of gaussian process
for given hyperparameters.
:param theta: Array of hyperparamters. (default: None)
"""
kernel = copy.deepcopy(self.kernel)
if theta is not None:
kernel = kernel.clone_with_theta(theta)
logl = treegp.log_likelihood(self._X, self._y-self._mean-self._spatial_average, self._y_err)
log_likelihood = logl.log_likelihood(kernel)
return log_likelihood
```
#### File: treegp/treegp/kernels.py
```python
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.gaussian_process.kernels import StationaryKernelMixin, NormalizedKernelMixin, Kernel
from sklearn.gaussian_process.kernels import Hyperparameter
from sklearn.gaussian_process.kernels import _check_length_scale
def eval_kernel(kernel):
"""
Some import trickery to get all subclasses
of sklearn.gaussian_process.kernels.Kernel
into the local namespace without doing
"from sklearn.gaussian_process.kernels import *"
and without importing them all manually.
Example:
kernel = eval_kernel("RBF(1)") instead of
kernel = sklearn.gaussian_process.kernels.RBF(1)
"""
def recurse_subclasses(cls):
out = []
for c in cls.__subclasses__():
out.append(c)
out.extend(recurse_subclasses(c))
return out
clses = recurse_subclasses(Kernel)
for cls in clses:
module = __import__(cls.__module__, globals(), locals(), cls)
execstr = "{0} = module.{0}".format(cls.__name__)
exec(execstr, globals(), locals())
from numpy import array
try:
k = eval(kernel)
except (KeyboardInterrupt, SystemExit):
raise
except Exception as e: # pragma: no cover
raise RuntimeError("Failed to evaluate kernel string {0!r}. "
"Original exception: {1}".format(kernel, e))
if type(k.theta) is property:
raise TypeError("String provided was not initialized properly")
return k
class AnisotropicRBF(StationaryKernelMixin, NormalizedKernelMixin, Kernel):
""" A GaussianProcessRegressor Kernel representing a radial basis function (essentially a
squared exponential or Gaussian) but with arbitrary anisotropic covariance.
While the parameter for this kernel, an inverse covariance matrix, can be specified directly
with the `invLam` kwarg, it may be more convenient to instead specify a characteristic
scale-length for each axis using the `scale_length` kwarg. Note that a list or array is
required so that the dimensionality of the kernel can be determined from its length.
For optimization, it's necessary to reparameterize the inverse covariance matrix in such a way
as to ensure that it's always positive definite. To this end, we define `theta` (abbreviated
`th` below) such that
invLam = L * L.T
L = [[exp(th[0]) 0 0 ... 0 0 ]
[th[n] exp(th[1])] 0 ... 0 0 ]
[th[n+1] th[n+2] exp(th[3]) ... 0 0 ]
[... ... ... ... ... ... ]
[th[] th[] th[] ... exp(th[n-2]) 0 ]
[th[] th[] th[] ... th[n*(n+1)/2-1] exp(th[n-1])]]
I.e., the inverse covariance matrix is Cholesky-decomposed, exp(theta[0:n]) lie on the diagonal
of the Cholesky matrix, and theta[n:n*(n+1)/2] lie in the lower triangular part of the Cholesky
matrix. This parameterization invertably maps all valid n x n covariance matrices to
R^(n*(n+1)/2). I.e., the range of each theta[i] is -inf...inf.
:param invLam: Inverse covariance matrix of radial basis function. Exactly one of invLam and
scale_length must be provided.
:param scale_length: Axes-aligned scale lengths of the kernel. len(scale_length) must be the
same as the dimensionality of the kernel, even if the scale length is the same
for each axis (i.e., even if the kernel is isotropic). Exactly one of invLam
and scale_length must be provided.
:param bounds: Optional keyword indicating fitting bounds on *theta*. Can either be a
2-element iterable, which will be taken to be the min and max value for every
theta element, or an [ntheta, 2] array indicating bounds on each of ntheta
elements.
"""
def __init__(self, invLam=None, scale_length=None, bounds=(-5,5)):
if scale_length is not None:
if invLam is not None:
raise TypeError("Cannot set both invLam and scale_length in AnisotropicRBF.")
invLam = np.diag(1./np.array(scale_length)**2)
self.ndim = invLam.shape[0]
self.ntheta = self.ndim*(self.ndim+1)//2
self._d = np.diag_indices(self.ndim)
self._t = np.tril_indices(self.ndim, -1)
self.set_params(invLam)
bounds = np.array(bounds)
if bounds.ndim == 1:
bounds = np.repeat(bounds[None, :], self.ntheta, axis=0)
assert bounds.shape == (self.ntheta, 2)
self._bounds = bounds
def __call__(self, X, Y=None, eval_gradient=False):
from scipy.spatial.distance import pdist, cdist, squareform
X = np.atleast_2d(X)
if Y is None:
dists = pdist(X, metric='mahalanobis', VI=self.invLam)
K = np.exp(-0.5 * dists**2)
K = squareform(K)
np.fill_diagonal(K, 1)
else:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated when Y is None.")
dists = cdist(X, Y, metric='mahalanobis', VI=self.invLam)
K = np.exp(-0.5 * dists**2)
if eval_gradient:
if self.hyperparameter_cholesky_factor.fixed:
return K, np.empty((X.shape[0], X.shape[0], 0))
else:
# dK_pq/dth_k = -0.5 * K_pq *
# ((x_p_i-x_q_i) * dInvLam_ij/dth_k * (x_q_j - x_q_j))
# dInvLam_ij/dth_k = dL_ij/dth_k * L_ij.T + L_ij * dL_ij.T/dth_k
# dL_ij/dth_k is a matrix with all zeros except for one element. That element is
# L_ij if k indicates one of the theta parameters landing on the Cholesky diagonal,
# and is 1.0 if k indicates one of the thetas in the lower triangular region.
L_grad = np.zeros((self.ntheta, self.ndim, self.ndim), dtype=float)
L_grad[(np.arange(self.ndim),)+self._d] = self._L[self._d]
L_grad[(np.arange(self.ndim, self.ntheta),)+self._t] = 1.0
half_invLam_grad = np.dot(L_grad, self._L.T)
invLam_grad = half_invLam_grad + np.transpose(half_invLam_grad, (0, 2, 1))
dX = X[:, np.newaxis, :] - X[np.newaxis, :, :]
dist_grad = np.einsum("ijk,lkm,ijm->ijl", dX, invLam_grad, dX)
K_gradient = -0.5 * K[:, :, np.newaxis] * dist_grad
return K, K_gradient
else:
return K
@property
def hyperparameter_cholesky_factor(self):
return Hyperparameter("CholeskyFactor", "numeric", (1e-5, 1e5), int(self.ntheta))
def get_params(self, deep=True):
return {"invLam":self.invLam}
def set_params(self, invLam=None):
if invLam is not None:
self.invLam = invLam
self._L = np.linalg.cholesky(self.invLam)
self._theta = np.hstack([np.log(self._L[self._d]), self._L[self._t]])
@property
def theta(self):
return self._theta
@theta.setter
def theta(self, theta):
self._theta = theta
self._L = np.zeros_like(self.invLam)
self._L[np.diag_indices(self.ndim)] = np.exp(theta[:self.ndim])
self._L[np.tril_indices(self.ndim, -1)] = theta[self.ndim:]
self.invLam = np.dot(self._L, self._L.T)
def __repr__(self):
return "{0}(invLam={1!r})".format(self.__class__.__name__, self.invLam)
@property
def bounds(self):
return self._bounds
class VonKarman(StationaryKernelMixin, NormalizedKernelMixin, Kernel):
"""VonKarman kernel.
Parameters
-----------
length_scale : float or array with shape (n_features,), default: 1.0
The length scale of the kernel. If a float, an isotropic kernel is
used. If an array, an anisotropic kernel is used where each dimension
of l defines the length-scale of the respective feature dimension.
length_scale_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on length_scale
"""
def __init__(self, length_scale=1.0, length_scale_bounds=(1e-5, 1e5)):
self.length_scale = length_scale
self.length_scale_bounds = length_scale_bounds
@property
def anisotropic(self):
return np.iterable(self.length_scale) and len(self.length_scale) > 1
@property
def hyperparameter_length_scale(self):
if self.anisotropic:
return Hyperparameter("length_scale", "numeric",
self.length_scale_bounds,
len(self.length_scale))
return Hyperparameter(
"length_scale", "numeric", self.length_scale_bounds)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
from scipy.spatial.distance import pdist, cdist, squareform
from scipy import special
X = np.atleast_2d(X)
length_scale = _check_length_scale(X, self.length_scale)
if Y is None:
dists = pdist(X, metric='euclidean')
Filter = (dists != 0.)
K = np.zeros_like(dists)
K[Filter] = ((dists[Filter]/length_scale)**(5./6.) *
special.kv(5./6.,2*np.pi*dists[Filter]/length_scale))
K = squareform(K)
lim0 = special.gamma(5./6.) / (2 * (np.pi**(5./6.)))
np.fill_diagonal(K, lim0)
K /= lim0
else:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated when Y is None.")
dists = cdist(X, Y, metric='euclidean')
Filter = (dists != 0.)
K = np.zeros_like(dists)
K[Filter] = ((dists[Filter]/length_scale)**(5./6.) *
special.kv(5./6.,2*np.pi*dists[Filter]/length_scale))
lim0 = special.gamma(5./6.) / (2 * (np.pi**(5./6.)))
if np.sum(Filter) != len(K[0])*len(K[:,0]):
K[~Filter] = lim0
K /= lim0
if eval_gradient:
if self.hyperparameter_length_scale.fixed:
# Hyperparameter l kept fixed
return K, np.empty((X.shape[0], X.shape[0], 0))
elif not self.anisotropic or length_scale.shape[0] == 1:
K_gradient = (K * squareform(dists))[:, :, np.newaxis]
return K, K_gradient
elif self.anisotropic:
raise ValueError(
"Gradient can only be evaluated with isotropic VonKarman kernel for the moment.")
else:
return K
def __repr__(self):
if self.anisotropic:
return "{0}(length_scale=[{1}])".format(
self.__class__.__name__, ", ".join(map("{0:.3g}".format,
self.length_scale)))
else: # isotropic
return "{0}(length_scale={1:.3g})".format(
self.__class__.__name__, np.ravel(self.length_scale)[0])
class AnisotropicVonKarman(StationaryKernelMixin, NormalizedKernelMixin, Kernel):
""" A GaussianProcessRegressor Kernel representing a Von-Karman correlation function
with an arbitrary anisotropic covariance. While the parameter for this kernel,
an inverse covariance matrix, can be specified directly with the `invLam` kwarg,
it may be more convenient to instead specify a characteristic scale-length for each axis
using the `scale_length` kwarg. Note that a list or array is required so that the dimensionality
of the kernel can be determined from its length. For optimization, it's necessary to reparameterize
the inverse covariance matrix in such a way as to ensure that it's always positive definite.
To this end, we define `theta` (abbreviated `th` below) such that
invLam = L * L.T
L = [[exp(th[0]) 0 0 ... 0 0 ]
[th[n] exp(th[1])] 0 ... 0 0 ]
[th[n+1] th[n+2] exp(th[3]) ... 0 0 ]
[... ... ... ... ... ... ]
[th[] th[] th[] ... exp(th[n-2]) 0 ]
[th[] th[] th[] ... th[n*(n+1)/2-1] exp(th[n-1])]]
I.e., the inverse covariance matrix is Cholesky-decomposed, exp(theta[0:n]) lie on the diagonal
of the Cholesky matrix, and theta[n:n*(n+1)/2] lie in the lower triangular part of the Cholesky
matrix. This parameterization invertably maps all valid n x n covariance matrices to
R^(n*(n+1)/2). I.e., the range of each theta[i] is -inf...inf.
:param invLam: Inverse covariance matrix of radial basis function. Exactly one of invLam and
scale_length must be provided.
:param scale_length: Axes-aligned scale lengths of the kernel. len(scale_length) must be the
same as the dimensionality of the kernel, even if the scale length is the same
for each axis (i.e., even if the kernel is isotropic). Exactly one of invLam
and scale_length must be provided.
:param bounds: Optional keyword indicating fitting bounds on *theta*. Can either be a
2-element iterable, which will be taken to be the min and max value for every
theta element, or an [ntheta, 2] array indicating bounds on each of ntheta
elements.
"""
def __init__(self, invLam=None, scale_length=None, bounds=(-5,5)):
if scale_length is not None:
if invLam is not None:
raise TypeError("Cannot set both invLam and scale_length in AnisotropicVonKarman.")
invLam = np.diag(1./np.array(scale_length)**2)
self.ndim = invLam.shape[0]
self.ntheta = self.ndim*(self.ndim+1)//2
self._d = np.diag_indices(self.ndim)
self._t = np.tril_indices(self.ndim, -1)
self.set_params(invLam)
bounds = np.array(bounds)
if bounds.ndim == 1:
bounds = np.repeat(bounds[None, :], self.ntheta, axis=0)
assert bounds.shape == (self.ntheta, 2)
self._bounds = bounds
def __call__(self, X, Y=None, eval_gradient=False):
from scipy.spatial.distance import pdist, cdist, squareform
from scipy import special
X = np.atleast_2d(X)
if Y is None:
dists = pdist(X, metric='mahalanobis', VI=self.invLam)
Filter = (dists != 0.)
K = np.zeros_like(dists)
K[Filter] = dists[Filter] **(5./6.) * special.kv(5./6., 2*np.pi * dists[Filter])
lim0 = special.gamma(5./6.) /(2 * ((np.pi)**(5./6.)) )
K = squareform(K)
np.fill_diagonal(K, lim0)
K /= lim0
else:
if eval_gradient:
raise ValueError(
"Gradient can not be evaluated.")
dists = cdist(X, Y, metric='mahalanobis', VI=self.invLam)
Filter = (dists != 0.)
K = np.zeros_like(dists)
K[Filter] = dists[Filter] **(5./6.) * special.kv(5./6., 2*np.pi * dists[Filter])
lim0 = special.gamma(5./6.) /(2 * ((np.pi)**(5./6.)) )
if np.sum(Filter) != len(K[0])*len(K[:,0]):
K[~Filter] = lim0
K /= lim0
if eval_gradient:
raise ValueError(
"Gradient can not be evaluated.")
else:
return K
@property
def hyperparameter_cholesky_factor(self):
return Hyperparameter("CholeskyFactor", "numeric", (1e-5, 1e5), int(self.ntheta))
def get_params(self, deep=True):
return {"invLam":self.invLam}
def set_params(self, invLam=None):
if invLam is not None:
self.invLam = invLam
self._L = np.linalg.cholesky(self.invLam)
self._theta = np.hstack([np.log(self._L[self._d]), self._L[self._t]])
@property
def theta(self):
return self._theta
@theta.setter
def theta(self, theta):
self._theta = theta
self._L = np.zeros_like(self.invLam)
self._L[np.diag_indices(self.ndim)] = np.exp(theta[:self.ndim])
self._L[np.tril_indices(self.ndim, -1)] = theta[self.ndim:]
self.invLam = np.dot(self._L, self._L.T)
def __repr__(self):
return "{0}(invLam={1!r})".format(self.__class__.__name__, self.invLam)
@property
def bounds(self):
return self._bounds
``` |
{
"source": "jmeyers314/wfsim",
"score": 2
} |
#### File: wfsim/wfsim/catalog.py
```python
import numpy as np
import lsst.sphgeom as sphgeom
# Add some missing functionality from sphgeom
# See LSSTDESC.Coord for definitions of these
def _dsq(pt1, pt2):
return (
(pt1.x()-pt2.x())**2
+ (pt1.y()-pt2.y())**2
+ (pt1.z()-pt2.z())**2
)
def _cross(pt1, pt2):
return (
pt1.y() * pt2.z() - pt2.y() * pt1.z(),
pt1.z() * pt2.x() - pt2.z() * pt1.x(),
pt1.x() * pt2.y() - pt2.x() * pt1.y()
)
def _triple(pt1, pt2, pt3):
return np.linalg.det(
[[ pt2.x(), pt2.y(), pt2.z() ],
[ pt1.x(), pt1.y(), pt1.z() ],
[ pt3.x(), pt3.y(), pt3.z() ]]
)
def _alt_triple(pt1, pt2, pt3):
dsq_AC = (pt1.x()-pt2.x())**2 + (pt1.y()-pt2.y())**2 + (pt1.z()-pt2.z())**2
dsq_BC = (pt1.x()-pt3.x())**2 + (pt1.y()-pt3.y())**2 + (pt1.z()-pt3.z())**2
dsq_AB = (pt3.x()-pt2.x())**2 + (pt3.y()-pt2.y())**2 + (pt3.z()-pt2.z())**2
return 0.5 * (dsq_AC + dsq_BC - dsq_AB - 0.5 * dsq_AC * dsq_BC)
def distance_to(pt1, pt2):
dsq = _dsq(pt1, pt2)
if dsq < 3.99:
return 2*np.arcsin(0.5 * np.sqrt(dsq))
else:
cx, cy, cz = _cross(pt1, pt2)
crosssq = cx**2 + cy**2 + cz**2
return np.pi - np.arcsin(np.sqrt(crosssq))
def angle_between(pt1, pt2, pt3):
sinC = _triple(pt1, pt2, pt3)
cosC = _alt_triple(pt1, pt2, pt3)
C = np.arctan2(sinC, cosC)
return C
def area(poly):
vertices = poly.getVertices()
s = 0
N = len(vertices)
for i in range(N):
s += angle_between(
vertices[(i+1)%N],
vertices[i%N],
vertices[(i+2)%N]
)
return np.abs(s) - (N-2)*np.pi
def _magfunc(m):
"""Approximate magnitude function between +4 and +25
https://spacemath.gsfc.nasa.gov/stars/6Page103.pdf
"""
return 10**(-0.0003*m**3 + 0.0019*m**2 + 0.484*m - 3.82)
def rotate(axis, angle, vec):
ndim = vec.ndim
vec = np.atleast_2d(vec)
sth, cth = np.sin(angle), np.cos(angle)
dot = np.dot(axis, vec.T)
cross = np.cross(axis, vec)
out = vec * cth
out += cross * sth
out += axis * dot[:, None] * (1-cth)
if ndim == 1:
out = out[0]
return out
class MockStarCatalog:
def __init__(self, level=7, seed=57721):
self.htm = sphgeom.HtmPixelization(level)
self.seed = seed
self.bins = np.arange(10.0, 25.1, 0.1)
self.bincounts = np.empty(len(self.bins)-1)
for i, bin in enumerate(self.bins[:-1]):
self.bincounts[i] = _magfunc(bin+0.1) - _magfunc(bin)
self.density = np.sum(self.bincounts) # per sq. degree
def get_triangle_stars(self, idx):
rng = np.random.default_rng(self.seed+idx)
triangle = self.htm.triangle(idx)
circle = triangle.getBoundingCircle()
center = circle.getCenter()
opening_angle = circle.getOpeningAngle().asRadians()
area = circle.getArea() * (180/np.pi)**2
N = rng.poisson(self.density*area)
# uniformly sample cylinder then project to sphere
zmin = np.cos(opening_angle)
z = rng.uniform(zmin, 1, size=N)
ph = rng.uniform(0, 2*np.pi, size=N)
r = np.sqrt(1-z**2)
x = r*np.cos(ph)
y = r*np.sin(ph)
# rotate to correct point on the sky
axis = sphgeom.UnitVector3d.orthogonalTo(sphgeom.UnitVector3d.Z(), center)
angle = np.pi/2 - sphgeom.LonLat.latitudeOf(center).asRadians()
xyz = np.array([x, y, z]).T
xyz = rotate(axis, angle, xyz)
# sample magnitudes
magbin = rng.choice(
len(self.bincounts),
size=N,
p=self.bincounts/self.density
)
mag = self.bins[magbin] + rng.uniform(0, 0.1, size=N)
# only keep points actually within triangle
w = triangle.contains(*xyz.T)
xyz = xyz[w]
mag = mag[w]
return xyz, mag
def get_stars(self, polygon):
ranges = self.htm.envelope(polygon)
# For each spherical triangle, seed is index + self.seed
# Uniformly populate spherical cap
xyzs = []
mags = []
for begin, end in ranges:
for idx in range(begin, end):
xyz, mag = self.get_triangle_stars(idx)
xyzs.append(xyz)
mags.append(mag)
# trim to polygon
xyz = np.vstack(xyzs)
mag = np.hstack(mags)
w = polygon.contains(*xyz.T)
xyz = xyz[w]
mag = mag[w]
return xyz, mag
```
#### File: wfsim/wfsim/sst.py
```python
from ast import Del
import galsim
import os
import numpy as np
import astropy.io.fits as fits
import batoid
import functools
from scipy.interpolate import CloughTocher2DInterpolator
from scipy.spatial import Delaunay
@functools.lru_cache
def _fitsCache(fn):
from . import datadir
return fits.getdata(
os.path.join(
datadir,
fn
)
)
def _node_to_grid(nodex, nodey, nodez, grid_coords):
interp = CloughTocher2DInterpolator(
np.array([nodex, nodey]).T,
nodez,
fill_value=0.0
)
x, y = grid_coords
nx = len(x)
ny = len(y)
out = np.zeros([4, ny, nx])
dx = np.mean(np.diff(x))*1e-1
dy = np.mean(np.diff(y))*1e-1
x, y = np.meshgrid(x, y)
out[0] = interp(x, y)
out[1] = (interp(x+dx, y) - interp(x-dx, y))/(2*dx)
out[2] = (interp(x, y+dy) - interp(x, y-dy))/(2*dy)
out[3] = (
interp(x+dx, y+dy) -
interp(x-dx, y+dy) -
interp(x+dx, y-dy) +
interp(x-dx, y-dy)
)/(4*dx*dy)
# Zero out the central hole
r = np.hypot(x, y)
rmin = np.min(np.hypot(nodex, nodey))
w = r < rmin
out[:, w] = 0.0
return out
class SSTFactory:
def __init__(self, fiducial):
self.fiducial = fiducial
@functools.cached_property
def m1m3_fea_coords(self):
data = _fitsCache("M1M3_1um_156_grid.fits.gz")
idx = data[:, 0]
bx = data[:, 1] # (5256,)
by = data[:, 2]
idx1 = (idx == 1)
idx3 = (idx == 3)
return bx, by, idx1, idx3
@functools.cached_property
def m2_fea_coords(self):
data = _fitsCache("M2_1um_grid.fits.gz") # (15984, 75)
bx = -data[:, 1] # meters
by = data[:, 2]
return bx, by
@functools.cached_property
def m1_grid_coords(self):
data = _fitsCache("M1_bend_coords.fits.gz")
return data
@functools.cached_property
def m2_grid_coords(self):
data = _fitsCache("M2_bend_coords.fits.gz")
return data
@functools.cached_property
def m3_grid_coords(self):
data = _fitsCache("M3_bend_coords.fits.gz")
return data
def _m1m3_gravity(self, zenith_angle):
zdata = _fitsCache("M1M3_dxdydz_zenith.fits.gz")
hdata = _fitsCache("M1M3_dxdydz_horizon.fits.gz")
dxyz = (
zdata * np.cos(zenith_angle) +
hdata * np.sin(zenith_angle)
)
dz = dxyz[:,2]
# Interpolate these node displacements into z-displacements at
# original node x/y positions.
bx, by, idx1, idx3 = self.m1m3_fea_coords
# M1
zRef = self.fiducial['M1'].surface.sag(bx[idx1], by[idx1])
zpRef = self.fiducial['M1'].surface.sag(
(bx+dxyz[:, 0])[idx1],
(by+dxyz[:, 1])[idx1]
)
dz[idx1] += zRef - zpRef
# M3
zRef = self.fiducial['M3'].surface.sag(bx[idx3], by[idx3])
zpRef = self.fiducial['M3'].surface.sag(
(bx+dxyz[:, 0])[idx3],
(by+dxyz[:, 1])[idx3]
)
dz[idx3] += zRef - zpRef
# Subtract PTT
# This kinda makes sense for M1, but why for combined M1M3?
zBasis = galsim.zernike.zernikeBasis(
3, bx, by, R_outer=4.18, R_inner=2.558
)
coefs, _, _, _ = np.linalg.lstsq(zBasis.T, dxyz[:, 2], rcond=None)
zern = galsim.zernike.Zernike(coefs, R_outer=4.18, R_inner=2.558)
dz -= zern(bx, by)
return dz
def _m1m3_temperature(
self, m1m3TBulk, m1m3TxGrad, m1m3TyGrad, m1m3TzGrad, m1m3TrGrad,
):
if m1m3TxGrad is None:
m1m3TxGrad = 0.0
bx, by, idx1, idx3 = self.m1m3_fea_coords
normX = bx / 4.18
normY = by / 4.18
data = _fitsCache("M1M3_thermal_FEA.fits.gz")
delaunay = Delaunay(data[:, 0:2])
tbdz = CloughTocher2DInterpolator(delaunay, data[:, 2])(normX, normY)
txdz = CloughTocher2DInterpolator(delaunay, data[:, 3])(normX, normY)
tydz = CloughTocher2DInterpolator(delaunay, data[:, 4])(normX, normY)
tzdz = CloughTocher2DInterpolator(delaunay, data[:, 5])(normX, normY)
trdz = CloughTocher2DInterpolator(delaunay, data[:, 6])(normX, normY)
out = m1m3TBulk * tbdz
out += m1m3TxGrad * txdz
out += m1m3TyGrad * tydz
out += m1m3TzGrad * tzdz
out += m1m3TrGrad * trdz
out *= 1e-6
return out
# def _m2_gravity(self, zenith_angle):
# # This reproduces ts_phosim with preCompElevInRadian=0, but what is
# # that? Also, I have questions regarding the input domain of the Rbf
# # interpolation...
# bx, by = self.m2_fea_coords
# data = _fitsCache("M2_GT_FEA.fits.gz")
# from scipy.interpolate import Rbf
# zdz = Rbf(data[:, 0], data[:, 1], data[:, 2])(bx/1.71, by/1.71)
# hdz = Rbf(data[:, 0], data[:, 1], data[:, 3])(bx/1.71, by/1.71)
# out = zdz * (np.cos(zenith_angle) - 1)
# out += hdz * np.sin(zenith_angle)
# out *= 1e-6 # micron -> meters
# return out
# def _m2_temperature(self, m2TzGrad, m2TrGrad):
# # Same domain problem here as m2_gravity...
# bx, by = self.m2_fea_coords
# data = _fitsCache("M2_GT_FEA.fits.gz")
# from scipy.interpolate import Rbf
# tzdz = Rbf(data[:, 0], data[:, 1], data[:, 4])(bx/1.71, by/1.71)
# trdz = Rbf(data[:, 0], data[:, 1], data[:, 5])(bx/1.71, by/1.71)
# out = m2TzGrad * tzdz
# out += m2TrGrad * trdz
# out *= 1e-6 # micron -> meters
# return out
# This is Josh's preferred interpolator, but fails b/c domain issues.
def _m2_gravity(self, zenith_angle):
bx, by = self.m2_fea_coords
data = _fitsCache("M2_GT_FEA.fits.gz")
# Hack to get interpolation points inside Convex Hull of input
delaunay = Delaunay(data[:, 0:2]/0.95069)
zdz = CloughTocher2DInterpolator(delaunay, data[:, 2])(bx/1.71, by/1.71)
hdz = CloughTocher2DInterpolator(delaunay, data[:, 3])(bx/1.71, by/1.71)
out = zdz * (np.cos(zenith_angle) - 1)
out += hdz * np.sin(zenith_angle)
out *= 1e-6 # micron -> meters
return out
def _m2_temperature(self, m2TzGrad, m2TrGrad):
# Same domain problem here as m2_gravity...
bx, by = self.m2_fea_coords
normX = bx / 1.71
normY = by / 1.71
data = _fitsCache("M2_GT_FEA.fits.gz")
# Hack to get interpolation points inside Convex Hull of input
delaunay = Delaunay(data[:, 0:2]/0.95069)
tzdz = CloughTocher2DInterpolator(delaunay, data[:, 4])(normX, normY)
trdz = CloughTocher2DInterpolator(delaunay, data[:, 5])(normX, normY)
out = m2TzGrad * tzdz
out += m2TrGrad * trdz
out *= 1e-6
return out
def get_telescope(
self,
zenith_angle=None, # radians
rotation_angle=None, # radians
m1m3TBulk=0.0, # 2-sigma spans +/- 0.8C
m1m3TxGrad=0.0, # 2-sigma spans +/- 0.4C
m1m3TyGrad=0.0, # 2-sigma spans +/- 0.4C
m1m3TzGrad=0.0, # 2-sigma spans +/- 0.1C
m1m3TrGrad=0.0, # 2-sigma spans +/- 0.1C
m2TzGrad=0.0,
m2TrGrad=0.0,
camTB=None,
dof=None,
doM1M3Pert=False,
doM2Pert=False,
doCamPert=False,
_omit_dof_grid=False,
_omit_dof_zk=False,
):
optic = self.fiducial
if dof is None:
dof = np.zeros(50)
# order is z, dzdx, dzdy, d2zdxdy
# These can get set either through grav/temp perturbations or through
# dof
m1_grid = np.zeros((4, 204, 204))
m3_grid = np.zeros((4, 204, 204))
m1m3_zk = np.zeros(29)
if doM1M3Pert:
# hard code for now
# indices are over FEA nodes
m1m3_fea_dz = np.zeros(5256)
if zenith_angle is not None:
m1m3_fea_dz = self._m1m3_gravity(zenith_angle)
if any([m1m3TBulk, m1m3TxGrad, m1m3TyGrad, m1m3TzGrad, m1m3TrGrad]):
m1m3_fea_dz += self._m1m3_temperature(
m1m3TBulk, m1m3TxGrad, m1m3TyGrad, m1m3TzGrad, m1m3TrGrad
)
if np.any(m1m3_fea_dz):
bx, by, idx1, idx3 = self.m1m3_fea_coords
zBasis = galsim.zernike.zernikeBasis(
28, -bx, by, R_outer=4.18
)
m1m3_zk, *_ = np.linalg.lstsq(zBasis.T, m1m3_fea_dz, rcond=None)
zern = galsim.zernike.Zernike(m1m3_zk, R_outer=4.18)
m1m3_fea_dz -= zern(-bx, by)
m1_grid = _node_to_grid(
bx[idx1], by[idx1], m1m3_fea_dz[idx1], self.m1_grid_coords
)
m3_grid = _node_to_grid(
bx[idx3], by[idx3], m1m3_fea_dz[idx3], self.m3_grid_coords
)
m1_grid *= -1
m3_grid *= -1
m1m3_zk *= -1
# M1M3 bending modes
if np.any(dof[10:30] != 0):
if not _omit_dof_grid:
m1_bend = _fitsCache("M1_bend_grid.fits.gz")
m3_bend = _fitsCache("M3_bend_grid.fits.gz")
m1_grid += np.tensordot(m1_bend, dof[10:30], axes=[[1], [0]])
m3_grid += np.tensordot(m3_bend, dof[10:30], axes=[[1], [0]])
if not _omit_dof_zk:
m1m3_zk += np.dot(dof[10:30], _fitsCache("M13_bend_zk.fits.gz"))
if np.any([m1m3_zk]) or np.any(m1_grid):
optic = optic.withSurface(
'M1',
batoid.Sum([
optic['M1'].surface,
batoid.Zernike(m1m3_zk, R_outer=4.18),
batoid.Bicubic(*self.m1_grid_coords, *m1_grid)
])
)
if np.any([m1m3_zk]) or np.any(m3_grid):
optic = optic.withSurface(
'M3',
batoid.Sum([
optic['M3'].surface,
batoid.Zernike(m1m3_zk, R_outer=4.18),
batoid.Bicubic(*self.m3_grid_coords, *m3_grid)
])
)
m2_grid = np.zeros((4, 204, 204))
m2_zk = np.zeros(29)
if doM2Pert:
# hard code for now
# indices are over FEA nodes
m2_fea_dz = np.zeros(15984)
if zenith_angle is not None:
m2_fea_dz = self._m2_gravity(zenith_angle)
if any([m2TzGrad, m2TrGrad]):
m2_fea_dz += self._m2_temperature(
m2TzGrad, m2TrGrad
)
if np.any(m2_fea_dz):
bx, by = self.m2_fea_coords
zBasis = galsim.zernike.zernikeBasis(
28, -bx, by, R_outer=1.71
)
m2_zk, *_ = np.linalg.lstsq(zBasis.T, m2_fea_dz, rcond=None)
zern = galsim.zernike.Zernike(m2_zk, R_outer=1.71)
m2_fea_dz -= zern(-bx, by)
m3_grid = _node_to_grid(
bx, by, m2_fea_dz, self.m2_grid_coords
)
m2_grid *= -1
m2_zk *= -1
if np.any(dof[30:50] != 0):
if not _omit_dof_grid:
m2_bend = _fitsCache("M2_bend_grid.fits.gz")
m2_grid += np.tensordot(m2_bend, dof[30:50], axes=[[1], [0]])
if not _omit_dof_zk:
m2_zk += np.dot(dof[30:50], _fitsCache("M2_bend_zk.fits.gz"))
if np.any([m2_zk]) or np.any(m2_grid):
optic = optic.withSurface(
'M2',
batoid.Sum([
optic['M2'].surface,
batoid.Zernike(m2_zk, R_outer=1.71),
batoid.Bicubic(*self.m2_grid_coords, *m2_grid)
])
)
if np.any(dof[0:3] != 0):
optic = optic.withGloballyShiftedOptic(
"M2",
np.array([dof[1], dof[2], -dof[0]])*1e-6
)
if np.any(dof[3:5] != 0):
rx = batoid.RotX(np.deg2rad(-dof[3]/3600))
ry = batoid.RotY(np.deg2rad(-dof[4]/3600))
optic = optic.withLocallyRotatedOptic(
"M2",
rx @ ry
)
if np.any(dof[5:8] != 0):
optic = optic.withGloballyShiftedOptic(
"LSSTCamera",
np.array([dof[6], dof[7], -dof[5]])*1e-6
)
if np.any(dof[8:10] != 0):
rx = batoid.RotX(np.deg2rad(-dof[8]/3600))
ry = batoid.RotY(np.deg2rad(-dof[9]/3600))
optic = optic.withLocallyRotatedOptic(
"LSSTCamera",
rx @ ry
)
if doCamPert:
cam_data = [
('L1S1', 'L1_entrance', 0.775),
('L1S2', 'L1_exit', 0.775),
('L2S1', 'L2_entrance', 0.551),
('L2S2', 'L2_exit', 0.551),
('L3S1', 'L3_entrance', 0.361),
('L3S2', 'L3_exit', 0.361),
]
for tname, bname, radius in cam_data:
data = _fitsCache(tname+"zer.fits.gz")
grav_zk = data[0, 3:] * (np.cos(zenith_angle) - 1)
grav_zk += (
data[1, 3:] * np.cos(rotation_angle) +
data[2, 3:] * np.sin(rotation_angle)
) * np.sin(zenith_angle)
# subtract pre-compensated grav...
TB = np.clip(camTB, data[3, 2], data[10, 2])
fidx = np.interp(camTB, data[3:, 2], np.arange(len(data[3:, 2])))+3
idx = int(np.floor(fidx))
whi = fidx - idx
wlo = 1 - whi
temp_zk = wlo * data[idx, 3:] + whi * data[idx+1, 3:]
# subtract reference temperature zk (0 deg C is idx=5)
temp_zk -= data[5, 3:]
surf_zk = grav_zk + temp_zk
# remap Andy -> Noll Zernike indices
zIdxMapping = [
1, 3, 2, 5, 4, 6, 8, 9, 7, 10, 13, 14, 12, 15, 11, 19, 18, 20,
17, 21, 16, 25, 24, 26, 23, 27, 22, 28
]
surf_zk = surf_zk[[x - 1 for x in zIdxMapping]]
surf_zk *= -1e-3 # mm -> m
# tsph -> batoid 0-index offset
surf_zk = np.concatenate([[0], surf_zk])
optic = optic.withSurface(
bname,
batoid.Sum([
optic[bname].surface,
batoid.Zernike(-surf_zk, R_outer=radius)
])
)
return optic
# TODO:
# - M1M3 force error...
# - actuator forces
```
#### File: wfsim/wfsim/utils.py
```python
import numpy as np
import galsim
def BBSED(T):
"""(unnormalized) Blackbody SED for temperature T in Kelvin.
"""
waves_nm = np.arange(330.0, 1120.0, 10.0)
def planck(t, w):
# t in K
# w in m
c = 2.99792458e8 # speed of light in m/s
kB = 1.3806488e-23 # Boltzmann's constant J per Kelvin
h = 6.62607015e-34 # Planck's constant in J s
return w**(-5) / (np.exp(h*c/(w*kB*t))-1)
flambda = planck(T, waves_nm*1e-9)
return galsim.SED(
galsim.LookupTable(waves_nm, flambda),
wave_type='nm',
flux_type='flambda'
)
``` |
{
"source": "jmeyers314/willitdeblend",
"score": 2
} |
#### File: jmeyers314/willitdeblend/deblend.py
```python
import numpy as np
def deblend(image, peaks, interpolate=False, force_interpolate=False):
""" Quick and dirty deblender.
Args
----
@param image A numpy array representing an image of a blend.
@param peaks A list of tuples representing the peak positions of objects in the blend.
@param interpolate If at least one component of rot_center is not a half-integer, use GalSim
to rotate the image.
@param force_interpolate Use GalSim to rotate the image, even if rot_center components are
half-integer and rotation via numpy array operations is possible.
@returns templates, template_fractions, children
"""
work_image = image+1.e-20 # avoid zeros which lead to NaNs
# Step 1: Make symmetric templates
templates = [np.fmin(work_image, rotate(work_image, peak,
interpolate=interpolate,
force_interpolate=force_interpolate))
for peak in peaks]
# Step 2: Calculate relative contribution of each template
template_sum = np.sum(templates, axis=0)
template_fractions = [template/template_sum * (template_sum != 0) for template in templates]
# Step 3: Calculate deblended children
children = [t * image for t in template_fractions]
return templates, template_fractions, children
def rotate(image, rot_center, interpolate=False, force_interpolate=False):
""" Rotate an image about a point. Defaults to using numpy array operations, which means that
the rotation center is rounded to the nearest half-pixel grid point. Optionally, use GalSim to
rotate about arbitrary positions, which requires interpolation since the rotated pixel grid
doesn't generally align with the original pixel grid.
@param image Image to rotate
@param rot_center Tuple indicating point to be rotated about. (0,0) indicates rotate about
the geometric center of the image (so at the corner of 4 pixels if the
image is even-sized, or at the center of a single pixel if the image is
odd-sized image.
@param interpolate If at least one component of rot_center is not a half-integer, use GalSim
to rotate the image.
@param force_interpolate Use GalSim to rotate the image, even if rot_center components are
half-integer and rotation via numpy array operations is possible.
@returns Rotated image.
"""
height, width = image.shape
# Round rot_center to nearest half-integer
rrot_center = [0.5*np.rint(2*p) for p in rot_center]
if force_interpolate or (interpolate and rrot_center != rot_center):
try:
import galsim
except:
raise ImportError("can't interpolate w/o galsim")
imobj = (galsim.InterpolatedImage(galsim.ImageD(image, scale=1),
calculate_stepk=False,
calculate_maxk=False)
.shift(-rot_center[0], -rot_center[1])
.rotate(180*galsim.degrees)
.shift(rot_center[0], rot_center[1]))
return imobj.drawImage(nx=width, ny=height, scale=1, method='no_pixel').array
# image_center is 0-indexed and measured from the lower-left corner of the lower-left pixel.
image_center = (width * 0.5, height * 0.5)
rot_pix_center = (image_center[0] + rrot_center[0],
image_center[1] + rrot_center[1])
# compute boundary of rotate region
rot_width = 2.0*min(rot_pix_center[0], width-rot_pix_center[0])
rot_height = 2.0*min(rot_pix_center[1], height-rot_pix_center[1])
rot_bounds = [0,width,0,height] # xmin, xmax, ymin, ymax
# handle edges falling outside original postage stamp
if rot_pix_center[0] <= image_center[0]:
rot_bounds[1] = rot_pix_center[0] + rot_width/2
else:
rot_bounds[0] = rot_pix_center[0] - rot_width/2
if rot_pix_center[1] <= image_center[1]:
rot_bounds[3] = rot_pix_center[1] + rot_height/2
else:
rot_bounds[2] = rot_pix_center[1] - rot_height/2
xmin, xmax, ymin, ymax = rot_bounds
# and finally, rotate!
newimage = np.ones_like(image)*1.e-20
newimage[ymin:ymax, xmin:xmax] = (image[ymin:ymax, xmin:xmax])[::-1,::-1]
return newimage
def test_rotate():
# test odd-size array
array = np.array([[0, 0, 0, 0, 0],
[0, 11, 12, 0, 0],
[0, 0, 22, 0, 0],
[0, 0, 0, 33, 0],
[0, 0, 0, 0, 44]])
rot = rotate(array, (1,1)) # rotating about the 33 value pixel
np.testing.assert_array_almost_equal(rot, np.array([[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 44, 0, 0],
[0, 0, 0, 33, 0],
[0, 0, 0, 0, 22]]),
5, err_msg="incorrect rotate")
rot = rotate(array, (-1,-1)) # rotating about the 11 value pixel
np.testing.assert_array_almost_equal(rot, np.array([[22, 0, 0, 0, 0],
[12, 11, 0, 0, 0],
[ 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0]]),
5, err_msg="incorrect rotate")
rot = rotate(array, (0.5,0.5)) # rotating about point between 22 and 33
np.testing.assert_array_almost_equal(rot, np.array([[ 0, 0, 0, 0, 0],
[ 0, 44, 0, 0, 0],
[ 0, 0, 33, 0, 0],
[ 0, 0, 0, 22, 0],
[ 0, 0, 0, 12, 11]]),
5, err_msg="incorrect rotate")
# test even-size array
array = np.array([[0, 0, 0, 0],
[0, 11, 12, 0],
[0, 0, 22, 0],
[0, 0, 0, 33]])
rot = rotate(array, (0.5,0.5)) # rotating about the 22 value pixel
np.testing.assert_array_almost_equal(rot, np.array([[ 0, 0, 0, 0],
[ 0, 33, 0, 0],
[ 0, 0, 22, 0],
[ 0, 0, 12, 11]]),
5, err_msg="incorrect rotate")
rot = rotate(array, (0.0,0.0)) # rotating about point between 11 and 22
np.testing.assert_array_almost_equal(rot, np.array([[33, 0, 0, 0],
[ 0, 22, 0, 0],
[ 0, 12, 11, 0],
[ 0, 0, 0, 0]]),
5, err_msg="incorrect rotate")
# test non-square array
array = np.array([[0, 0, 0, 0],
[0, 11, 12, 0],
[0, 0, 22, 0],
[0, 0, 0, 33],
[0, 0, 0, 43]])
rot = rotate(array, (0.0,0.0)) # rotating about point 1/2 unit left of 22
np.testing.assert_array_almost_equal(rot, np.array([[43, 0, 0, 0],
[33, 0, 0, 0],
[ 0, 22, 0, 0],
[ 0, 12, 11, 0],
[ 0, 0, 0, 0]]),
5, err_msg="incorrect rotate")
rot = rotate(array, (0.5,0.5)) # rotating about point 1/2 unit below 22
np.testing.assert_array_almost_equal(rot, np.array([[ 0, 0, 0, 0],
[ 0, 43, 0, 0],
[ 0, 33, 0, 0],
[ 0, 0, 22, 0],
[ 0, 0, 12, 11]]),
5, err_msg="incorrect rotate")
# test that GalSim rotation agrees with numpy rotation when the rotation axis is a
# half-integer.
for center in [(0.0, 0.0), (0.5, 0.5), (0.5, -0.5)]:
numpy_rot = rotate(array, center)
galsim_rot = rotate(array, center, force_interpolate=True)
np.testing.assert_array_almost_equal(
numpy_rot, galsim_rot, 5,
err_msg="numpy rotation disagrees with galsim rotation at {}".format(center))
def test_deblend():
try:
import galsim
except:
print "cant test deblend w/o galsim"
# check that children of symmetric image show same symmetry
gal1 = galsim.Gaussian(fwhm=5).shift(-5,0)
gal2 = galsim.Gaussian(fwhm=5).shift(+5,0)
gals = gal1 + gal2
img = gals.drawImage(nx=32, ny=24, scale=1.0)
templates, template_fractions, children = deblend(img.array, [(-5, 0), (5, 0)])
xflip = children[1][:,::-1]
symdiff = (children[0] - xflip)/img.array
np.testing.assert_array_almost_equal(children[0], xflip, 10,
"deblend symmetry failed")
# check again for non-integer shift
img = galsim.ImageD(32, 24)
gal1 = galsim.Gaussian(fwhm=5).shift(-5.2,0)
gal2 = galsim.Gaussian(fwhm=5).shift(+5.2,0)
gals = gal1 + gal2
gals.drawImage(image=img, scale=1)
templates, template_fractions, children = deblend(img.array, [(-5.2, 0), (5.2, 0)])
xflip = children[1][:,::-1]
symdiff = (children[0] - xflip)/img.array
np.testing.assert_array_almost_equal(children[0], xflip, 10,
"deblend symmetry failed")
# now check that children of transposed image are simliarly transposed
# use some noise this time.
gals.drawImage(image=img, method='phot', n_photons=10000)
_, _, children = deblend(img.array, [(-3, 0), (3, 0)])
transimage = img.array.transpose()
_, _, transchildren = deblend(transimage, [(0, -3), (0, 3)])
np.testing.assert_array_almost_equal(children[0],
transchildren[0].transpose(),
10,
"transposed child of transposed image not equal to child")
np.testing.assert_array_almost_equal(children[1],
transchildren[1].transpose(),
10,
"transposed child of transposed image not equal to child")
# compare array operations rotation to Galsim.rotate
_, _, children2 = deblend(img.array, [(-3, 0), (3, 0)],
interpolate=True, force_interpolate=True)
np.testing.assert_array_almost_equal(children[0],
children2[0],
9,
"array rotate disagrees with galsim.rotate")
np.testing.assert_array_almost_equal(children[1],
children2[1],
9,
"array rotate disagrees with galsim.rotate")
if __name__ == '__main__':
test_rotate()
test_deblend()
``` |
{
"source": "jmezo/balancy",
"score": 2
} |
#### File: balancy/app/api.py
```python
from fastapi import BackgroundTasks, FastAPI, Response, status
from app import balances
from app.crud import Crud
from app.web3_client import Web3Client
w3 = Web3Client()
crud = Crud()
app = FastAPI()
@app.get("/addresses/{address}")
async def find_balances(
*,
address: str,
tasks: BackgroundTasks,
):
tasks.add_task(balances.fetch_address_token_balances, address, w3=w3, crud=crud)
return {"session_id": address}
@app.get("/sessions/{id}")
async def get_session(
*,
id: str,
response: Response,
):
res = crud.get_address_balances(id)
if not res:
response.status_code = status.HTTP_404_NOT_FOUND
return {"message": "Session not found"}
return res
```
#### File: balancy/app/cli.py
```python
import atexit
from typing import Optional
import typer
import uvicorn
from . import tokens
from .config import EnvConfig
from .crud import Crud
from .web3_client import Web3Client
app = typer.Typer()
@app.command()
def fetch_tokens(
w3url: str = typer.Option(None),
db_uri: str = typer.Option(None),
):
"""Starts searching the blockchain for ERC20 tokens,
and saves their addresses in the given database."""
EnvConfig.set_environment(w3url, db_uri)
w3 = Web3Client()
crud = Crud()
_setup_is_fetch_status(crud)
tokens.query_ERC20_tokens(w3=w3, crud=crud)
def _setup_is_fetch_status(crud: Crud):
crud.set_is_block_fetch(True)
atexit.register(crud.set_is_block_fetch, False)
@app.command()
def api(
w3url: str = typer.Option(None),
db_uri: str = typer.Option(None),
port: int = typer.Option(8000, "--port", "-p"),
auto_reload: bool = typer.Option(False, "--auto-reload", "-r"),
):
"""Starts the web api."""
EnvConfig.set_environment(w3url, db_uri)
# With this command uvicorn runs the FastAPI instance
# named `app` which is located inside app/api.py
uvicorn.run("app.api:app", port=port, reload=auto_reload)
@app.command()
def set_defaults(
w3url: str = typer.Option(None),
db_uri: str = typer.Option(None),
):
"""Sets given w3url and db-uri values as defaults
by saving them to a .env file. Creates the file if it doesn't exist"""
if w3url or db_uri:
EnvConfig.set_defaults(w3url, db_uri)
typer.echo("Done.")
else:
typer.echo("No parameters were given.")
@app.command()
def say_hello(text: Optional[str] = None):
"""Example function, this will be removed later."""
message = "General Kenobi!" if text == "Hello there" else "Hi!"
typer.echo(message)
```
#### File: balancy/app/crud.py
```python
import json
from typing import Dict, List, Optional
from urllib.parse import urlparse
import etcd3
from hexbytes import HexBytes
from app.config import EnvConfig
KEY_LAST_BLOCK = "last_block"
KEY_START_BLOCK = "start_block"
KEY_CURRENT_BLOCK = "current_block"
KEY_BLOCK_FETCH = "block_fetch"
PREFIX_TOKEN_ADDRESS = "token"
PREFIX_ADDRESS_BALANCES = "balances"
class Crud:
def __init__(self):
parsed_uri = urlparse(EnvConfig().DB_URI)
db_host = parsed_uri.hostname
db_port = str(parsed_uri.port)
self.db = etcd3.client(host=db_host, port=db_port)
def get_is_block_fetch(self) -> bool:
return self.db.get(KEY_BLOCK_FETCH)[0] == "1"
def set_is_block_fetch(self, is_fetch: bool) -> None:
fetch_status = "1" if is_fetch else "0"
return self.db.put(KEY_BLOCK_FETCH, fetch_status)
def set_start_block(self, block_hash: Optional[HexBytes]) -> None:
self._set_block(KEY_START_BLOCK, block_hash)
def set_current_block(self, block_hash: Optional[HexBytes]) -> None:
self._set_block(KEY_CURRENT_BLOCK, block_hash)
def set_last_block(self, block_hash: Optional[HexBytes]) -> None:
self._set_block(KEY_LAST_BLOCK, block_hash)
def get_start_block_hash(self) -> Optional[HexBytes]:
return self._get_block_hash(KEY_START_BLOCK)
def get_current_block_hash(self) -> Optional[HexBytes]:
return self._get_block_hash(KEY_CURRENT_BLOCK)
def get_last_block_hash(self) -> Optional[HexBytes]:
return self._get_block_hash(KEY_LAST_BLOCK)
def save_token_address(self, token_address: str) -> None:
return self.db.put(f"{PREFIX_TOKEN_ADDRESS}::{token_address}", token_address)
def get_token_addresses(self) -> List[str]:
res = self.db.get_prefix(PREFIX_TOKEN_ADDRESS)
return [token.decode("utf-8") for (token, _) in res]
def save_address_balances(self, address: str, balances: Dict) -> None:
self.db.put(f"{PREFIX_ADDRESS_BALANCES}::{address}", json.dumps(balances))
def get_address_balances(self, address: str) -> Optional[Dict]:
res = self.db.get(f"{PREFIX_ADDRESS_BALANCES}::{address}")[0]
try:
return json.loads(res)
except (json.JSONDecodeError, TypeError):
return None
def _set_block(self, key: str, block_hash: Optional[HexBytes]) -> None:
if block_hash:
self.db.put(key, block_hash.hex())
else:
self.db.delete(key)
def _get_block_hash(self, key: str) -> Optional[HexBytes]:
address = self.db.get(key)[0]
return HexBytes(address.decode("utf-8")) if address else None
```
#### File: balancy/tests/utils.py
```python
import json
from solcx import compile_standard
SOURCE_CODE_ERC20_CONTRACT = """
pragma solidity ^0.8.0;
contract TestERC20 {
string public greeting;
constructor() public {
greeting = 'Hello';
}
function totalSupply() view public returns (uint256) {
return 1;
}
function balanceOf(address owner)
public view returns (uint256) {
return 1;
}
function allowance(address owner, address spender)
public view returns (uint256) {
return 1;
}
}"""
SOURCE_CODE_NOT_ERC20_CONTRACT = """
pragma solidity ^0.8.0;
contract TestERC20 {
string public greeting;
constructor() public {
greeting = 'Hello';
}
function totalSupply() view public returns (uint256) {
return 1;
}
function allowance(address owner, address spender)
public view returns (uint256) {
return 1;
}
}"""
def create_contract(w3, source_code):
compiled_sol = compile_standard(
{
"language": "Solidity",
"sources": {"TestERC20.sol": {"content": source_code}},
"settings": {
"outputSelection": {
"*": {"*": ["metadata", "evm.bytecode", "evm.bytecode.sourceMap"]}
}
},
}
)
w3.eth.default_account = w3.eth.accounts[0]
bytecode = compiled_sol["contracts"]["TestERC20.sol"]["TestERC20"]["evm"][
"bytecode"
]["object"]
abi = json.loads(
compiled_sol["contracts"]["TestERC20.sol"]["TestERC20"]["metadata"]
)["output"]["abi"]
TestERC20 = w3.eth.contract(abi=abi, bytecode=bytecode)
tx_hash = TestERC20.constructor().transact()
tx_receipt = w3.eth.waitForTransactionReceipt(tx_hash)
contract_address = tx_receipt["contractAddress"]
return tx_hash, contract_address
``` |
{
"source": "jmf11493/MusicDownloader",
"score": 2
} |
#### File: MusicDownloader/music_dl/song_download_view.py
```python
import PyQt5
from PyQt5 import QtCore, QtGui, QtWidgets
import datetime
import pyperclip
#Fixes scaling issues on high res monitors
if hasattr(QtCore.Qt, 'AA_EnableHighDpiScaling'):
PyQt5.QtWidgets.QApplication.setAttribute(QtCore.Qt.AA_EnableHighDpiScaling, True)
if hasattr(QtCore.Qt, 'AA_UseHighDpiPixmaps'):
PyQt5.QtWidgets.QApplication.setAttribute(QtCore.Qt.AA_UseHighDpiPixmaps, True)
class SongDownloadView(object):
'''
classdocs
'''
def __init__(self, model, controller, MainWindow):
self._model = model
self._controller = controller
self.setupUi(MainWindow)
self._log = '-----Start of Log-----\n'
self._song_fail_log = ''
self._model.song_total_change.connect(self.on_song_total_change)
self._model.skipped_total_change.connect(self.on_skipped_total_change)
self._model.song_download_change.connect(self.on_song_download_change)
self._model.song_failed_change.connect(self.on_song_failed_change)
self._model.song_progress_change.connect(self.on_update_progress)
self._model.estimate_change.connect(self.on_estimate_update_change)
self._model.log_change.connect(self.on_log_update)
self._model.song_failed_name.connect(self.on_song_failed_update)
self.start_download_button.clicked.connect(self.start_download_listener)
self.normalize_audio.stateChanged.connect(self._controller.normalize_change)
self.browse_csv_button.clicked.connect(self.get_csv_path)
self.browse_output_button.clicked.connect(self.get_output_path)
self.csv_file_location.textChanged.connect(self._controller.csv_file_change)
self.output_file_location.textChanged.connect(self._controller.output_dir_change)
self.download_retry_spinbox.valueChanged.connect(self._controller.download_try_change)
self.retry_wait_time_spinbox.valueChanged.connect(self._controller.time_sleep_change)
self.stop_download_button.clicked.connect(self.stop_download_listener)
self.copy_log_button.clicked.connect(self.copy_log_listener)
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(758, 500)
MainWindow.setMinimumSize(QtCore.QSize(758, 500))
MainWindow.setMaximumSize(QtCore.QSize(758, 500))
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.gridLayoutWidget_4 = QtWidgets.QWidget(self.centralwidget)
self.gridLayoutWidget_4.setGeometry(QtCore.QRect(10, 10, 735, 421))
self.gridLayoutWidget_4.setObjectName("gridLayoutWidget_4")
self.gridLayout_4 = QtWidgets.QGridLayout(self.gridLayoutWidget_4)
self.gridLayout_4.setContentsMargins(5, 0, 5, 0)
self.gridLayout_4.setObjectName("gridLayout_4")
self.start_download_button = QtWidgets.QPushButton(self.gridLayoutWidget_4)
self.start_download_button.setMaximumSize(QtCore.QSize(100, 16777215))
self.start_download_button.setObjectName("start_download_button")
self.gridLayout_4.addWidget(self.start_download_button, 4, 0, 1, 1)
self.log_output = QtWidgets.QPlainTextEdit(self.gridLayoutWidget_4)
self.log_output.setEnabled(True)
self.log_output.setReadOnly(True)
self.log_output.setObjectName("log_output")
self.gridLayout_4.addWidget(self.log_output, 12, 0, 1, 3)
self.gridLayout = QtWidgets.QGridLayout()
self.gridLayout.setContentsMargins(5, -1, 5, -1)
self.gridLayout.setObjectName("gridLayout")
self.skipped_songs_count = QtWidgets.QLCDNumber(self.gridLayoutWidget_4)
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Light, brush)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Midlight, brush)
self.skipped_songs_count.setPalette(palette)
self.skipped_songs_count.setEnabled(True)
self.skipped_songs_count.setObjectName("skipped_songs_count")
self.gridLayout.addWidget(self.skipped_songs_count, 2, 1, 1, 1)
self.total_songs_count = QtWidgets.QLCDNumber(self.gridLayoutWidget_4)
self.total_songs_count.setPalette(palette)
self.total_songs_count.setObjectName("total_songs_count")
self.gridLayout.addWidget(self.total_songs_count, 5, 1, 1, 1)
self.total_songs_label = QtWidgets.QLabel(self.gridLayoutWidget_4)
font = QtGui.QFont()
font.setPointSize(12)
self.total_songs_label.setFont(font)
self.total_songs_label.setObjectName("total_songs_label")
self.gridLayout.addWidget(self.total_songs_label, 5, 0, 1, 1)
self.downloaded_songs_count = QtWidgets.QLCDNumber(self.gridLayoutWidget_4)
self.downloaded_songs_count.setPalette(palette)
self.downloaded_songs_count.setObjectName("downloaded_songs_count")
self.gridLayout.addWidget(self.downloaded_songs_count, 1, 1, 1, 1)
self.downloaded_songs_label = QtWidgets.QLabel(self.gridLayoutWidget_4)
font = QtGui.QFont()
font.setPointSize(12)
self.downloaded_songs_label.setFont(font)
self.downloaded_songs_label.setObjectName("downloaded_songs_label")
self.gridLayout.addWidget(self.downloaded_songs_label, 1, 0, 1, 1)
self.song_progress_bar = QtWidgets.QProgressBar(self.gridLayoutWidget_4)
self.song_progress_bar.setProperty("value", 0)
self.song_progress_bar.setObjectName("song_progress_bar")
self.gridLayout.addWidget(self.song_progress_bar, 6, 0, 1, 2)
self.skipped_songs_label = QtWidgets.QLabel(self.gridLayoutWidget_4)
font = QtGui.QFont()
font.setPointSize(12)
self.skipped_songs_label.setFont(font)
self.skipped_songs_label.setObjectName("skipped_songs_label")
self.gridLayout.addWidget(self.skipped_songs_label, 2, 0, 1, 1)
self.progress_label = QtWidgets.QLabel(self.gridLayoutWidget_4)
font = QtGui.QFont()
font.setPointSize(18)
self.progress_label.setFont(font)
self.progress_label.setAlignment(QtCore.Qt.AlignCenter)
self.progress_label.setObjectName("progress_label")
self.gridLayout.addWidget(self.progress_label, 0, 0, 1, 2)
self.failed_songs_count_label = QtWidgets.QLabel(self.gridLayoutWidget_4)
font = QtGui.QFont()
font.setPointSize(12)
self.failed_songs_count_label.setFont(font)
self.failed_songs_count_label.setObjectName("failed_songs_count_label")
self.gridLayout.addWidget(self.failed_songs_count_label, 3, 0, 1, 1)
self.failed_songs_count = QtWidgets.QLCDNumber(self.gridLayoutWidget_4)
self.failed_songs_count.setPalette(palette)
self.failed_songs_count.setEnabled(True)
self.failed_songs_count.setObjectName("failed_songs_count")
self.gridLayout.addWidget(self.failed_songs_count, 3, 1, 1, 1)
self.gridLayout_4.addLayout(self.gridLayout, 0, 4, 4, 1)
self.time_remaining_label = QtWidgets.QLabel(self.gridLayoutWidget_4)
font = QtGui.QFont()
font.setPointSize(8)
self.time_remaining_label.setFont(font)
self.time_remaining_label.setObjectName("time_remaining_label")
self.gridLayout_4.addWidget(self.time_remaining_label, 4, 4, 1, 1)
self.failed_songs_label = QtWidgets.QLabel(self.gridLayoutWidget_4)
font = QtGui.QFont()
font.setPointSize(12)
self.failed_songs_label.setFont(font)
self.failed_songs_label.setObjectName("failed_songs_label")
self.gridLayout_4.addWidget(self.failed_songs_label, 11, 3, 1, 2)
self.log_label = QtWidgets.QLabel(self.gridLayoutWidget_4)
font = QtGui.QFont()
font.setPointSize(12)
self.log_label.setFont(font)
self.log_label.setObjectName("log_label")
self.gridLayout_4.addWidget(self.log_label, 11, 0, 1, 2)
self.stop_download_button = QtWidgets.QPushButton(self.gridLayoutWidget_4)
self.stop_download_button.setEnabled(False)
self.stop_download_button.setMaximumSize(QtCore.QSize(100, 16777215))
self.stop_download_button.setAutoFillBackground(False)
self.stop_download_button.setObjectName("stop_download_button")
self.gridLayout_4.addWidget(self.stop_download_button, 4, 2, 1, 1)
self.gridLayout_2 = QtWidgets.QGridLayout()
self.gridLayout_2.setContentsMargins(5, -1, -1, -1)
self.gridLayout_2.setObjectName("gridLayout_2")
self.browse_output_button = QtWidgets.QPushButton(self.gridLayoutWidget_4)
self.browse_output_button.setMaximumSize(QtCore.QSize(150, 16777215))
self.browse_output_button.setObjectName("browse_output_button")
self.gridLayout_2.addWidget(self.browse_output_button, 1, 1, 1, 1)
self.setup_label = QtWidgets.QLabel(self.gridLayoutWidget_4)
font = QtGui.QFont()
font.setPointSize(18)
self.setup_label.setFont(font)
self.setup_label.setAlignment(QtCore.Qt.AlignCenter)
self.setup_label.setObjectName("setup_label")
self.gridLayout_2.addWidget(self.setup_label, 0, 1, 1, 2)
self.browse_csv_button = QtWidgets.QPushButton(self.gridLayoutWidget_4)
self.browse_csv_button.setMaximumSize(QtCore.QSize(150, 16777215))
self.browse_csv_button.setObjectName("browse_csv_button")
self.gridLayout_2.addWidget(self.browse_csv_button, 2, 1, 1, 1)
self.output_file_location = QtWidgets.QLineEdit(self.gridLayoutWidget_4)
self.output_file_location.setReadOnly(True)
self.output_file_location.setObjectName("output_file_location")
self.gridLayout_2.addWidget(self.output_file_location, 1, 2, 1, 1)
self.csv_file_location = QtWidgets.QLineEdit(self.gridLayoutWidget_4)
self.csv_file_location.setReadOnly(True)
self.csv_file_location.setObjectName("csv_file_location")
self.gridLayout_2.addWidget(self.csv_file_location, 2, 2, 1, 1)
self.gridLayout_3 = QtWidgets.QGridLayout()
self.gridLayout_3.setObjectName("gridLayout_3")
self.download_retry_spinbox = QtWidgets.QSpinBox(self.gridLayoutWidget_4)
self.download_retry_spinbox.setMinimum(1)
self.download_retry_spinbox.setProperty("value", 5)
self.download_retry_spinbox.setObjectName("download_retry_spinbox")
self.gridLayout_3.addWidget(self.download_retry_spinbox, 2, 4, 1, 1)
self.download_retry_label = QtWidgets.QLabel(self.gridLayoutWidget_4)
self.download_retry_label.setToolTip("")
self.download_retry_label.setObjectName("download_retry_label")
self.gridLayout_3.addWidget(self.download_retry_label, 2, 3, 1, 1)
self.configuration_label = QtWidgets.QLabel(self.gridLayoutWidget_4)
font = QtGui.QFont()
font.setPointSize(18)
self.configuration_label.setFont(font)
self.configuration_label.setAlignment(QtCore.Qt.AlignCenter)
self.configuration_label.setObjectName("configuration_label")
self.gridLayout_3.addWidget(self.configuration_label, 0, 1, 1, 4)
self.retry_wait_time_spinbox = QtWidgets.QSpinBox(self.gridLayoutWidget_4)
self.retry_wait_time_spinbox.setMinimum(2)
self.retry_wait_time_spinbox.setObjectName("retry_wait_time_spinbox")
self.gridLayout_3.addWidget(self.retry_wait_time_spinbox, 2, 2, 1, 1)
self.retry_wait_time_label = QtWidgets.QLabel(self.gridLayoutWidget_4)
self.retry_wait_time_label.setToolTip("")
self.retry_wait_time_label.setObjectName("retry_wait_time_label")
self.gridLayout_3.addWidget(self.retry_wait_time_label, 2, 1, 1, 1)
self.normalize_audio = QtWidgets.QCheckBox(self.gridLayoutWidget_4)
font = QtGui.QFont()
font.setPointSize(12)
self.normalize_audio.setFont(font)
self.normalize_audio.setChecked(True)
self.normalize_audio.setObjectName("normalize_audio")
self.gridLayout_3.addWidget(self.normalize_audio, 1, 1, 1, 1)
self.gridLayout_2.addLayout(self.gridLayout_3, 3, 1, 1, 2)
self.gridLayout_4.addLayout(self.gridLayout_2, 0, 0, 1, 4)
self.failed_songs_log = QtWidgets.QPlainTextEdit(self.gridLayoutWidget_4)
self.failed_songs_log.setEnabled(True)
self.failed_songs_log.setReadOnly(True)
self.failed_songs_log.setObjectName("failed_songs_log")
self.gridLayout_4.addWidget(self.failed_songs_log, 12, 3, 1, 2)
self.copy_log_button = QtWidgets.QPushButton(self.gridLayoutWidget_4)
self.copy_log_button.setObjectName("copy_log_button")
self.gridLayout_4.addWidget(self.copy_log_button, 11, 2, 1, 1)
self.version_label = QtWidgets.QLabel(self.centralwidget)
self.version_label.setGeometry(QtCore.QRect(330, 460, 81, 16))
self.version_label.setObjectName("version_label")
MainWindow.setCentralWidget(self.centralwidget)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "Music Downloader"))
self.start_download_button.setText(_translate("MainWindow", "Start Download"))
self.total_songs_label.setText(_translate("MainWindow", "Total Songs:"))
self.downloaded_songs_label.setText(_translate("MainWindow", "Songs Downloaded:"))
self.skipped_songs_label.setText(_translate("MainWindow", "Songs Skipped:"))
self.progress_label.setText(_translate("MainWindow", "Progress"))
self.failed_songs_count_label.setText(_translate("MainWindow", "Songs Failed:"))
self.time_remaining_label.setText(_translate("MainWindow", "Estimated Time Remaining: "))
self.failed_songs_label.setText(_translate("MainWindow", "Failed Songs:"))
self.log_label.setText(_translate("MainWindow", "Log:"))
self.stop_download_button.setText(_translate("MainWindow", "Stop Download"))
self.browse_output_button.setText(_translate("MainWindow", "Select Output Directory"))
self.setup_label.setText(_translate("MainWindow", "Setup"))
self.browse_csv_button.setText(_translate("MainWindow", "Select CSV File"))
self.download_retry_label.setText(_translate("MainWindow", "Download Attempts"))
self.configuration_label.setText(_translate("MainWindow", "Configuration"))
self.retry_wait_time_label.setText(_translate("MainWindow", "Time to Wait Between Tries"))
self.normalize_audio.setText(_translate("MainWindow", "Normalize Audio"))
self.copy_log_button.setText(_translate("MainWindow", "Copy Log to Clipboard"))
self.version_label.setText(_translate("MainWindow", "Version 1.0.0"))
def on_song_total_change(self, value):
self.total_songs_count.display(value)
def on_skipped_total_change(self, value):
self.skipped_songs_count.display(value)
def on_song_download_change(self, value):
self.downloaded_songs_count.display(value)
def on_song_failed_change(self, value):
self.failed_songs_count.display(value)
def on_update_progress(self, value):
self.song_progress_bar.setValue(value)
def on_estimate_update_change(self, value):
self.time_remaining_label.setText('Estimated Time Remaining: ' + value)
def on_log_update(self, value):
now = datetime.datetime.now()
date = str(now.year) + '-' + str(now.month) + '-' + str(now.day) + ' '+ str(now.hour) + ':' + str(now.minute) + ':' + str(now.second)
self._log = self._log +'['+ date +']'+ value + '\n'
self.log_output.setPlainText(self._log)
self.log_output.verticalScrollBar().setValue(self.log_output.verticalScrollBar().maximum())
def on_song_failed_update(self, value):
self._song_fail_log = self._song_fail_log + value + '\n'
self.failed_songs_log.setPlainText(self._song_fail_log)
self.failed_songs_log.verticalScrollBar().setValue(self.failed_songs_log.verticalScrollBar().maximum())
def get_csv_path(self):
file = QtWidgets.QFileDialog().getOpenFileName(None, 'Select CSV', '', '*.csv')
file_path = file[0]
file_path = file_path.replace('/', '\\' )
self.csv_file_location.setText(file_path)
def get_output_path(self):
file_path = QtWidgets.QFileDialog().getExistingDirectory()
file_path = file_path.replace('/', '\\' )
self.output_file_location.setText(file_path)
def start_download_listener(self):
self.start_download_button.setDisabled(True)
self.stop_download_button.setDisabled(False)
self._controller.start_download_click()
def stop_download_listener(self):
self.start_download_button.setDisabled(False)
self.stop_download_button.setDisabled(True)
self._controller.stop_download_click()
def copy_log_listener(self):
copy_value = self.log_output.toPlainText()
pyperclip.copy(copy_value)
``` |
{
"source": "jmfajardod/gym_gazebo_sb3",
"score": 3
} |
#### File: frobs_rl/common/ros_launch.py
```python
import rospy
import rospkg
import os
import subprocess
import time
def ros_launch_from_pkg(pkg_name, launch_file, args=None, launch_new_term=True) -> bool:
"""
Function to execute a roslaunch from package with args.
:param pkg_name: Name of the package where the launch file is located.
:type pkg_name: str
:param launch_file: Name of the launch file.
:type launch_file: str
:param args: Args to pass to the launch file.
:type args: list of str
:param launch_new_term: Launch the process in a new terminal (Xterm).
:type launch_new_term: bool
:return: True if the launch file was executed.
:rtype: bool
"""
rospack = rospkg.RosPack()
try:
pkg_path = rospack.get_path(pkg_name)
rospy.logdebug("Package FOUND...")
except rospkg.common.ResourceNotFound:
rospy.logerr("Package NOT FOUND")
return False
file_path = pkg_path + "/launch/" + launch_file
if os.path.exists(pkg_path + "/launch/" + launch_file) is False:
print("Launch file " + launch_file + " in " + file_path + " does not exists")
return False
term_command = "roslaunch " + pkg_name + " " + launch_file
if args is not None:
for arg in args:
term_command += " " + arg
if launch_new_term:
term_command = "xterm -e ' " + term_command + "'"
subprocess.Popen(term_command, shell=True)
time.sleep(5.0)
return True
def ros_launch_from_path(launch_file_path, args=None, launch_new_term=True) -> bool:
"""
Function to execute a roslaunch from a path with args.
:param launch_file_path: Path of the launch file.
:type launch_file_path: str
:param args: Args to pass to the launch file.
:type args: list str
:param launch_new_term: Launch the process in a new terminal (Xterm).
:type launch_new_term: bool
:return: True if the launch file was executed.
:rtype: bool
"""
if os.path.exists(launch_file_path) is False:
print("Launch file " + launch_file_path + " does not exists")
return False
term_command = "roslaunch " + launch_file_path
if args is not None:
for arg in args:
term_command += " " + arg
if launch_new_term:
term_command = "xterm -e ' " + term_command + "'"
subprocess.Popen(term_command, shell=True)
time.sleep(5.0)
return True
def ros_kill_launch_process() -> bool:
"""
Function to kill all roslaunch processes.
:return: True if the roslaunch processes were killed.
:rtype: bool
"""
term_command = "killall -9 roslaunch"
subprocess.Popen("xterm -e ' " + term_command + "'", shell=True).wait()
return True
```
#### File: frobs_rl/models/sac.py
```python
import os
import stable_baselines3
from frobs_rl.common import ros_params
from frobs_rl.models import basic_model
# ROS packages required
import rospy
class SAC(basic_model.BasicModel):
"""
Soft Actor-Critic (SAC) algorithm.
Paper: https://arxiv.org/abs/1801.01290
:param env: The environment to be used.
:param save_model_path: The path to save the model.
:param log_path: The path to save the log.
:param load_trained: Whether to load a trained model or not.
:param config_file_pkg: The package where the config file is located. Default: frobs_rl.
:param config_filename: The name of the config file. Default: sac_config.yaml.
:param ns: The namespace of the ROS parameters. Default: "/".
"""
def __init__(self, env, save_model_path, log_path, load_trained=False,
config_file_pkg="frobs_rl", config_filename="sac_config.yaml", ns="/") -> None:
"""
SAC constructor.
"""
rospy.loginfo("Init SAC Policy")
print("Init SAC Policy")
self.env = env
self.ns = ns
self.save_model_path = save_model_path
self.save_trained_model_path = None
# Load YAML Config File
ros_params.ros_load_yaml_from_pkg(config_file_pkg, config_filename, ns=ns)
#--- Init super class
super(SAC, self).__init__(env, save_model_path, log_path, load_trained=load_trained)
if load_trained:
rospy.logwarn("Loading trained model")
self.model = stable_baselines3.SAC.load(save_model_path, env=env)
else:
#--- SDE for SAC
if rospy.get_param(ns + "/model_params/use_sde"):
model_sde = True
model_sde_sample_freq = rospy.get_param(ns + "/model_params/sde_params/sde_sample_freq")
model_use_sde_at_warmup = rospy.get_param(ns + "/model_params/sde_params/use_sde_at_warmup")
self.action_noise = None
else:
model_sde = False
model_sde_sample_freq = -1
model_use_sde_at_warmup = False
#--- SAC model parameters
model_learning_rate = rospy.get_param(ns + "/model_params/sac_params/learning_rate")
model_buffer_size = rospy.get_param(ns + "/model_params/sac_params/buffer_size")
model_learning_starts = rospy.get_param(ns + "/model_params/sac_params/learning_starts")
model_batch_size = rospy.get_param(ns + "/model_params/sac_params/batch_size")
model_tau = rospy.get_param(ns + "/model_params/sac_params/tau")
model_gamma = rospy.get_param(ns + "/model_params/sac_params/gamma")
model_gradient_steps = rospy.get_param(ns + "/model_params/sac_params/gradient_steps")
model_ent_coef = rospy.get_param(ns + "/model_params/sac_params/ent_coef")
model_target_update_interval = rospy.get_param(ns + "/model_params/sac_params/target_update_interval")
model_target_entropy = rospy.get_param(ns + "/model_params/sac_params/target_entropy")
model_train_freq_freq = rospy.get_param(ns + "/model_params/sac_params/train_freq/freq")
model_train_freq_unit = rospy.get_param(ns + "/model_params/sac_params/train_freq/unit")
#--- Create or load model
if rospy.get_param(ns + "/model_params/load_model"): # Load model
model_name = rospy.get_param(ns + "/model_params/model_name")
assert os.path.exists(save_model_path + model_name + ".zip"), "Model {} doesn't exist".format(model_name)
rospy.logwarn("Loading model: " + model_name)
self.model = stable_baselines3.SAC.load(save_model_path + model_name, env=env, verbose=1, action_noise=self.action_noise,
use_sde=model_sde, sde_sample_freq=model_sde_sample_freq, use_sde_at_warmup=model_use_sde_at_warmup,
learning_rate=model_learning_rate, buffer_size=model_buffer_size, learning_starts=model_learning_starts,
batch_size=model_batch_size, tau=model_tau, gamma=model_gamma, gradient_steps=model_gradient_steps,
ent_coef=model_ent_coef, target_update_interval=model_target_update_interval,
target_entropy=model_target_entropy, train_freq=(model_train_freq_freq, model_train_freq_unit))
if os.path.exists(save_model_path + model_name + "_replay_buffer.pkl"):
rospy.logwarn("Loading replay buffer")
self.model.load_replay_buffer(save_model_path + model_name + "_replay_buffer")
else:
rospy.logwarn("No replay buffer found")
else: # Create new model
rospy.logwarn("Creating new model")
self.model = stable_baselines3.SAC("MlpPolicy", env, verbose=1 ,action_noise=self.action_noise,
use_sde=model_sde, sde_sample_freq=model_sde_sample_freq, use_sde_at_warmup=model_use_sde_at_warmup,
learning_rate=model_learning_rate, buffer_size=model_buffer_size, learning_starts=model_learning_starts,
batch_size=model_batch_size, tau=model_tau, gamma=model_gamma, gradient_steps=model_gradient_steps,
policy_kwargs=self.policy_kwargs, ent_coef=model_ent_coef, target_update_interval=model_target_update_interval,
target_entropy=model_target_entropy, train_freq=(model_train_freq_freq, model_train_freq_unit))
#--- Logger
self.set_model_logger()
def load_trained(model_path, env=None):
"""
Load a trained model. Use only with predict function, as the logs will not be saved.
:param model_path: The path to the trained model.
:type model_path: str
:param env: The environment to be used.
:type env: gym.Env
:return: The trained model.
:rtype: frobs_rl.SAC
"""
model = SAC(env=env, save_model_path=model_path, log_path=model_path, load_trained=True)
return model
```
#### File: frobs_rl/templates/CustomTaskEnv.py
```python
from gym import spaces
from gym.envs.registration import register
from frobs_rl.templates import CustomRobotEnv # Replace with your own robot env
import rospy
#- Uncomment the library modules as neeeed
# from frobs_rl.common import ros_gazebo
# from frobs_rl.common import ros_controllers
# from frobs_rl.common import ros_node
# from frobs_rl.common import ros_launch
# from frobs_rl.common import ros_params
# from frobs_rl.common import ros_urdf
# from frobs_rl.common import ros_spawn
register(
id='CustomTaskEnv-v0',
entry_point='frobs_rl.templates.CustomTaskEnv:CustomTaskEnv',
max_episode_steps=10000,
)
class CustomTaskEnv(CustomRobotEnv.CustomRobotEnv):
"""
Custom Task Env, use this env to implement a task using the robot defined in the CustomRobotEnv
"""
def __init__(self):
"""
Describe the task.
"""
rospy.loginfo("Starting Task Env")
"""
Init super class.
"""
super(CustomTaskEnv, self).__init__()
"""
Define the action and observation space.
"""
# self.action_space = spaces.Discrete(n_actions)
# self.action_space = spaces.Box(low=0, high=1, shape=(1,), dtype=np.float32)
# self.observation_space = spaces.Discrete(n_observations)
# self.observation_space = spaces.Box(low=0, high=1, shape=(1,), dtype=np.float32)
"""
Define subscribers or publishers as needed.
"""
# self.pub1 = rospy.Publisher('/robot/controller_manager/command', JointState, queue_size=1)
# self.sub1 = rospy.Subscriber('/robot/joint_states', JointState, self.callback1)
"""
Finished __init__ method
"""
rospy.loginfo("Finished Init of Custom Task env")
#-------------------------------------------------------#
# Custom available methods for the CustomTaskEnv #
def _set_episode_init_params(self):
"""
Function to set some parameters, like the position of the robot, at the beginning of each episode.
"""
raise NotImplementedError()
def _send_action(self, action):
"""
Function to send an action to the robot
"""
raise NotImplementedError()
def _get_observation(self):
"""
Function to get the observation from the environment.
"""
raise NotImplementedError()
def _get_reward(self):
"""
Function to get the reward from the environment.
"""
raise NotImplementedError()
def _check_if_done(self):
"""
Function to check if the episode is done.
If the episode has a success condition then set done as:
self.info['is_success'] = 1.0
"""
raise NotImplementedError()
```
#### File: frobs_rl/wrappers/TimeLimitWrapper.py
```python
import gym
class TimeLimitWrapper(gym.Wrapper):
"""
Wrapper to limit the number of steps per episode.
:param env: (gym.Env) Gym environment that will be wrapped
:param max_steps: (int) Max number of steps per episode
"""
def __init__(self, env, max_steps=100):
# Call the parent constructor, so we can access self.env later
super(TimeLimitWrapper, self).__init__(env)
self.max_steps = max_steps
# Counter of steps per episode
self.current_step = 0
def reset(self):
"""
Reset the environment
"""
# Reset the counter
self.current_step = 0
return self.env.reset()
def step(self, action):
"""
:param action: Action taken by the agent
:type action: [float] or int
:return: observation, reward, is the episode over, additional informations
:rtype: (np.ndarray, float, bool, dict)
"""
self.current_step += 1
obs, reward, done, info = self.env.step(action)
# Overwrite the done signal when
if self.current_step >= self.max_steps:
done = True
# Update the info dict to signal that the limit was exceeded
info['time_limit_reached'] = True
self.info['is_success'] = 0.0
return obs, reward, done, info
``` |
{
"source": "jmfajardod/tf2_example_rycsv",
"score": 2
} |
#### File: tf2_example_rycsv/scripts/broadcaster_class.py
```python
import rospy
import numpy as np
from rospy.numpy_msg import numpy_msg
from tf.transformations import rotation_matrix
import tf2_ros
import tf_conversions
from geometry_msgs.msg import TransformStamped
from ar_track_alvar_msgs.msg import AlvarMarkers, AlvarMarker
class Broadcaster:
def __init__(self):
# Atributos
# Para realizar un broadcast
self.broadcts = tf2_ros.TransformBroadcaster()
self.transform = TransformStamped()
# Para realizar la escucha "listener"
self.tfBuffer = tf2_ros.Buffer()
self.listener = tf2_ros.TransformListener(self.tfBuffer)
# Subscribers
rospy.Subscriber("/ar_pose_marker", numpy_msg(AlvarMarkers), self.Marker_Callback, queue_size=10 )
#--------------------------------------------------------------------------------------#
# Callback o interrupcion
def Marker_Callback(self, marker_info):
#--------------------------------------------------------------------------------#
#--------------------------------------------------------------------------------#
#--------------------------------------------------------------------------------#
# MTH from base to camera
try:
trans_mobbase_cam = self.tfBuffer.lookup_transform("base_footprint", "camera_depth_frame", rospy.Time())
except (tf2_ros.LookupException, tf2_ros.ConnectivityException, tf2_ros.ExtrapolationException):
rospy.logwarn("Error trying to look for transform")
return
# Creater quaternion vector
quat_odom_cam = np.array([trans_mobbase_cam.transform.rotation.x, \
trans_mobbase_cam.transform.rotation.y, \
trans_mobbase_cam.transform.rotation.z, \
trans_mobbase_cam.transform.rotation.w])
# MTH with position vector equal to zero
rt_mat_odom_cam = tf_conversions.transformations.quaternion_matrix(quat_odom_cam)
# Add position vector to MTH
MTH_odom_cam = rt_mat_odom_cam.copy()
MTH_odom_cam[0,3] = trans_mobbase_cam.transform.translation.x
MTH_odom_cam[1,3] = trans_mobbase_cam.transform.translation.y
MTH_odom_cam[2,3] = trans_mobbase_cam.transform.translation.z
#print(MTH_odom_cam)
#print("")
#--------------------------------------------------------------------------------#
#--------------------------------------------------------------------------------#
#--------------------------------------------------------------------------------#
# MTH from camera to Alvar marker
singleMarker = marker_info.markers[0]
# Creater quaternion vector
quat_cam_marker = np.array([singleMarker.pose.pose.orientation.x, \
singleMarker.pose.pose.orientation.y, \
singleMarker.pose.pose.orientation.z, \
singleMarker.pose.pose.orientation.w])
# MTH with position vector equal to zero
rt_mat_cam_marker = tf_conversions.transformations.quaternion_matrix(quat_cam_marker)
# Add position vector to MTH
MTH_cam_marker = rt_mat_cam_marker.copy()
MTH_cam_marker[0,3] = singleMarker.pose.pose.position.x
MTH_cam_marker[1,3] = singleMarker.pose.pose.position.y
MTH_cam_marker[2,3] = singleMarker.pose.pose.position.z
#print(MTH_cam_marker)
#print("")
#--------------------------------------------------------------------------------#
#--------------------------------------------------------------------------------#
#--------------------------------------------------------------------------------#
# Comparison between MTH obtained directly from ROS a the one we created using
# the two previos MTHs
# Transform obtained from ROS
try:
trans_base_marker = self.tfBuffer.lookup_transform("base_footprint", "ar_marker_5", rospy.Time())
except (tf2_ros.LookupException, tf2_ros.ConnectivityException, tf2_ros.ExtrapolationException):
rospy.logwarn("Error trying to look for transform")
return
quat_from_ROS = np.array([trans_base_marker.transform.rotation.x, \
trans_base_marker.transform.rotation.y, \
trans_base_marker.transform.rotation.z, \
trans_base_marker.transform.rotation.w])
rt_mat_from_ROS = tf_conversions.transformations.quaternion_matrix(quat_from_ROS)
MTH_from_ROS = rt_mat_from_ROS.copy()
MTH_from_ROS[0,3] = trans_base_marker.transform.translation.x
MTH_from_ROS[1,3] = trans_base_marker.transform.translation.y
MTH_from_ROS[2,3] = trans_base_marker.transform.translation.z
print("MTH obtained directly from ROS")
print(MTH_from_ROS)
print("")
MTH_from_twoSteps = np.matmul( MTH_odom_cam, MTH_cam_marker)
print("MTH obtained by the multiplication of MTHs")
print(MTH_from_twoSteps)
print("")
#Error
print("Error between the two")
print( MTH_from_ROS - MTH_from_twoSteps )
print("")
#--------------------------------------------------------------------------------#
#--------------------------------------------------------------------------------#
#--------------------------------------------------------------------------------#
# Broadcasting of a MTH from the marker to a task frame 0.5m to the right and
# 0.2m up of it, and with a rotation of 112.5 degrees around the Z-axis
# It is needed to stamp (associate a time) to the MTH
self.transform.header.stamp = rospy.Time.now()
# It is needed to determine which task frame is the parent (base)
# and the name of the new TF
self.transform.header.frame_id = "ar_marker_5" # Parent or base frame
self.transform.child_frame_id = "custom_tf" # Name of new TF, created by this MTH
# Translation part of MTH
self.transform.transform.translation.x = 0.5
self.transform.transform.translation.y = 0.2
self.transform.transform.translation.z = 0.0
# Rotation part of MTH
des_rotation = np.identity(4) # Even though we only need the rotation matrix
# to use the library of transformations it is
# needed to define a MTH so we create one with
# translational part equal to zero
angle2rotate = 5*np.math.pi/8
des_rotation[0,0] = np.math.cos(angle2rotate)
des_rotation[0,1] = -np.math.sin(angle2rotate)
des_rotation[1,0] = np.math.sin(angle2rotate)
des_rotation[1,1] = np.math.cos(angle2rotate)
quat_desired = tf_conversions.transformations.quaternion_from_matrix(des_rotation)
self.transform.transform.rotation.x = quat_desired[0]
self.transform.transform.rotation.y = quat_desired[1]
self.transform.transform.rotation.z = quat_desired[2]
self.transform.transform.rotation.w = quat_desired[3]
self.broadcts.sendTransform(self.transform )
``` |
{
"source": "JmfanBU/AdvIBP",
"score": 2
} |
#### File: AdvIBP/IBP_Adv_Training/train.py
```python
import sys
import copy
import numpy as np
import torch.optim as optim
from IBP_Adv_Training.models.bound_layers import BoundSequential, \
BoundDataParallel
from IBP_Adv_Training.torch.training import Train
from IBP_Adv_Training.torch.warm_up_training import Train_with_warmup
from IBP_Adv_Training.utils.scheduler import Scheduler
from IBP_Adv_Training.utils.config import load_config, get_path, update_dict, \
config_modelloader, config_dataloader, device
from IBP_Adv_Training.utils.argparser import argparser
class Logger(object):
def __init__(self, log_file=None):
self.log_file = log_file
def log(self, *args, **kwargs):
print(*args, **kwargs)
if self.log_file:
print(*args, **kwargs, file=self.log_file)
self.log_file.flush()
class optimizer_config(object):
def __init__(
self, opt_method, model, lr, weight_decay,
lr_decay_factor, lr_decay_step=None, lr_decay_milestones=None,
lr_decay_milestones_post_layer=None
):
self.opt_method = opt_method
self.model = model
self.lr = lr
self.weight_decay = weight_decay
self.lr_decay_factor = lr_decay_factor
self.lr_decay_step = lr_decay_step
self.lr_decay_milestones = lr_decay_milestones
if lr_decay_milestones_post_layer is not None:
self.lr_decay_milestones_post_layer = (
lr_decay_milestones_post_layer
)
else:
self.lr_decay_milestones_post_layer = self.lr_decay_milestones
def get_opt(self, idxLayer):
if self.opt_method == 'adam':
opt = optim.Adam(
self.model.parameters(),
lr=self.lr if idxLayer == 0 else self.lr*self.lr_decay_factor,
weight_decay=self.weight_decay
)
elif self.opt_method == 'sgd':
opt = optim.SGD(
self.model.parameters(), lr=self.lr, momentum=0.9,
nesterov=True, weight_decay=self.weight_decay
)
else:
raise ValueError("Unknown optimizer")
if self.lr_decay_step:
# Use StepLR. Decay by lr_decay_factor every lr_decay_step.
lr_scheduler = optim.lr_scheduler.StepLR(
opt, step_size=self.lr_decay_step,
gamma=self.lr_decay_factor
)
elif self.lr_decay_milestones and idxLayer == 0:
# Decay learning rate by lr_decay_factor at a few milestones
lr_scheduler = optim.lr_scheduler.MultiStepLR(
opt, milestones=self.lr_decay_milestones,
gamma=self.lr_decay_factor
)
elif self.lr_decay_milestones_post_layer and idxLayer != 0:
# Decay learning rate by lr_decay_factor at a few milestones
lr_scheduler = optim.lr_scheduler.MultiStepLR(
opt, milestones=self.lr_decay_milestones_post_layer,
gamma=self.lr_decay_factor
)
else:
raise ValueError(
"one of lr_decay_step and"
"lr_decay_milestones must be not empty."
)
return opt, lr_scheduler
def model_train(config, train_config, model, model_id, model_config):
if "traininig_params" in model_config:
train_config = update_dict(train_config,
model_config["training_params"])
model = BoundSequential.convert(
model, train_config["method_params"]["bound_opts"]
)
# read traininig parameters from config file
load_pretrain = config.get("load_pretrain", False)
epochs = train_config["epochs"]
lr = train_config["lr"]
weight_decay = train_config["weight_decay"]
starting_epsilon = train_config["starting_epsilon"]
end_epsilon = train_config["epsilon"]
schedule_start = train_config["schedule_start"]
schedule_length = train_config["schedule_length"]
optimizer = train_config["optimizer"]
method = train_config["method"]
verbose = train_config["verbose"]
lr_decay_step = train_config["lr_decay_step"]
lr_decay_milestones = train_config["lr_decay_milestones"]
if "lr_decay_milestones_post_layer" in train_config:
lr_decay_milestones_post_layer = train_config[
"lr_decay_milestones_post_layer"
]
else:
lr_decay_milestones_post_layer = None
lr_decay_factor = train_config["lr_decay_factor"]
multi_gpu = train_config["multi_gpu"]
# parameters for the training method
method_params = train_config["method_params"]
# adv training warm up
if "warm_up" in train_config:
warm_up_param = train_config["warm_up"]
else:
warm_up_param = False
# inner max evaluation
if "inner_max_eval" in train_config:
inner_max_eval = train_config["inner_max_eval"]
else:
inner_max_eval = False
# paramters for attack params
attack_params = config["attack_params"]
# parameters for evaluation
evaluation_params = config["eval_params"]
norm = float(train_config["norm"])
train_data, test_data = config_dataloader(
config, **train_config["loader_params"]
)
opt = optimizer_config(optimizer, model, lr, weight_decay,
lr_decay_factor, lr_decay_step,
lr_decay_milestones, lr_decay_milestones_post_layer)
batch_multiplier = train_config["method_params"].get(
"batch_multiplier", 1
)
batch_size = train_data.batch_size * batch_multiplier
num_steps_per_epoch = int(
np.ceil(1.0 * len(train_data.dataset) / batch_size)
)
if not inner_max_eval:
epsilon_scheduler = Scheduler(
train_config.get("schedule_type", "linear"),
schedule_start * num_steps_per_epoch,
((schedule_start + schedule_length) - 1) * num_steps_per_epoch,
starting_epsilon if not load_pretrain else end_epsilon,
end_epsilon,
num_steps_per_epoch
)
else:
epsilon_scheduler = Scheduler(
train_config.get("schedule_type", "linear"),
schedule_start * num_steps_per_epoch,
((schedule_start + schedule_length) - 1) * num_steps_per_epoch,
starting_epsilon if not load_pretrain else end_epsilon,
end_epsilon,
num_steps_per_epoch
)
inner_max_scheduler = Scheduler(
inner_max_eval.get("schedule_type", "linear"),
((schedule_start + schedule_length) - 1 + inner_max_eval.get(
"schedule_start", 0
)) * num_steps_per_epoch,
((schedule_start + schedule_length + inner_max_eval.get(
"schedule_start", 0
) - 1 + inner_max_eval.get(
"schedule_length", schedule_length
)) - 1) * num_steps_per_epoch,
inner_max_eval.get("c_max", 1) if not load_pretrain else inner_max_eval.get("c_min", 1e-5),
inner_max_eval.get("c_min", 1e-5),
num_steps_per_epoch
)
if warm_up_param:
warm_up_start = (
(schedule_start + schedule_length) +
warm_up_param.get("schedule_start", 0)
)
warm_up_end = (warm_up_start + warm_up_param.get(
"schedule_length", schedule_length
) - 1)
post_warm_up_scheduler = Scheduler(
warm_up_param.get("schedule_type", "linear"),
warm_up_start * num_steps_per_epoch,
warm_up_end * num_steps_per_epoch,
starting_epsilon if not load_pretrain else end_epsilon,
end_epsilon,
num_steps_per_epoch
)
if inner_max_eval:
inner_max_scheduler = Scheduler(
inner_max_eval.get("schedule_type", "linear"),
(warm_up_end + inner_max_eval.get(
"schedule_start", 0
)) * num_steps_per_epoch,
((warm_up_end + inner_max_eval.get("schedule_start", 0) +
inner_max_eval.get(
"schedule_length",
schedule_length)) - 1) * num_steps_per_epoch,
inner_max_eval.get("c_max", 1) if not load_pretrain else inner_max_eval.get("c_min", 1e-5),
inner_max_eval.get("c_min", 1e-5),
num_steps_per_epoch
)
max_eps = end_epsilon
model_name = get_path(config, model_id, "model", load=False)
best_model_name = get_path(config, model_id, "best_model", load=False)
model_log = get_path(config, model_id, "train_log")
logger = Logger(open(model_log, "w"))
logger.log(model_name)
logger.log("Command line: ", " ".join(sys.argv[:]))
logger.log("training configurations: ", train_config)
logger.log("Model structure: ")
logger.log(str(model))
logger.log("data std: ", train_data.std)
if multi_gpu:
logger.log("\nUsing multiple GPUs for computing IBP bounds\n")
model = BoundDataParallel(model,
device_ids=train_config.get(
"device_id", [0, 1, 2, 3]
))
model = model.cuda(device)
if not inner_max_eval and not warm_up_param:
Train(
model, model_id, model_name, best_model_name,
epochs, train_data, test_data, multi_gpu,
schedule_start, schedule_length,
lr_decay_step, lr_decay_milestones,
epsilon_scheduler, max_eps, norm, logger, verbose,
opt, method, method_params, attack_params, evaluation_params
)
elif inner_max_eval and not warm_up_param:
Train(
model, model_id, model_name, best_model_name,
epochs, train_data, test_data, multi_gpu,
schedule_start, schedule_length,
lr_decay_step, lr_decay_milestones,
epsilon_scheduler, max_eps, norm, logger, verbose,
opt, method, method_params, attack_params, evaluation_params,
inner_max_scheduler=inner_max_scheduler
)
elif inner_max_scheduler and warm_up_param:
Train_with_warmup(
model, model_id, model_name, best_model_name,
epochs, train_data, test_data, multi_gpu,
schedule_start, schedule_length,
lr_decay_step, lr_decay_milestones,
epsilon_scheduler, max_eps, norm, logger, verbose,
opt, method, method_params, attack_params, evaluation_params,
inner_max_scheduler=inner_max_scheduler,
post_warm_up_scheduler=post_warm_up_scheduler
)
def main(args):
config = load_config(args)
global_train_config = config["training_params"]
models, model_names = config_modelloader(
config, load_pretrain=config.get('load_pretrain', False)
)
for model, model_id, model_config in zip(models, model_names,
config["models"]):
# make a copy of global training config, and update per-model config
train_config = copy.deepcopy(global_train_config)
model_train(config, train_config, model, model_id, model_config)
if __name__ == "__main__":
args = argparser()
main(args)
``` |
{
"source": "JmfanBU/colt",
"score": 2
} |
#### File: colt/code/milp.py
```python
import argparse
import numpy as np
import os
import pickle
import time
import torch
import torch.optim as optim
from loaders import get_loaders
from utils import get_inputs, get_network
from gurobipy import GRB, Model, LinExpr
from layers import Linear, ReLU, AveragePooling, Flatten, Conv2d
from main import test, get_adv_loss, compute_bounds, compute_bounds_approx
from tqdm import tqdm
torch.set_printoptions(precision=10)
np.random.seed(100)
BINARY_THRESHOLD = 0.0
dtype = torch.float32
device = 'cuda'
def add_relu_constraints(model, in_lb, in_ub, in_neuron, out_neuron, is_binary):
if in_ub <= 0:
out_neuron.lb = 0
out_neuron.ub = 0
elif in_lb >= 0:
model.addConstr(in_neuron, GRB.EQUAL, out_neuron)
else:
model.addConstr(out_neuron >= 0)
model.addConstr(out_neuron >= in_neuron)
if is_binary:
relu_ind = model.addVar(vtype=GRB.BINARY)
model.addConstr(out_neuron <= in_ub * relu_ind)
model.addConstr(out_neuron <= in_neuron - in_lb * (1 - relu_ind))
model.addGenConstrIndicator(relu_ind, True, in_neuron, GRB.GREATER_EQUAL, 0.0)
model.addGenConstrIndicator(relu_ind, False, in_neuron, GRB.LESS_EQUAL, 0.0)
else:
model.addConstr(-in_ub * in_neuron + (in_ub - in_lb) * out_neuron, GRB.LESS_EQUAL, -in_lb * in_ub)
def handle_relu(max_binary, lidx, relu_rnk, relu_priority, model, neurons, relu_inds, n_outs, pr_lb, pr_ub, lp=False):
unstable, n_binary = 0, 0
neurons[lidx] = []
relu_inds[lidx] = {}
binary_left = n_outs if max_binary is None else max_binary
if relu_rnk is not None and max_binary is not None:
to_bin = {i: False for i in range(n_outs)}
for i in range(min(max_binary, n_outs)):
to_bin[relu_rnk[lidx][i]] = True
for out_idx in range(n_outs):
if pr_ub[0, out_idx] <= 0:
neurons[lidx] += [model.addVar(0, 0, vtype=GRB.CONTINUOUS, name='n_{}_{}'.format(lidx, out_idx))]
elif pr_lb[0, out_idx] >= 0:
neurons[lidx] += [neurons[lidx-1][out_idx]]
else:
neurons[lidx] += [model.addVar(0, pr_ub[0, out_idx], vtype=GRB.CONTINUOUS, name='n_{}_{}'.format(lidx, out_idx))]
model.addConstr(neurons[lidx][out_idx] >= 0)
model.addConstr(neurons[lidx][out_idx] >= neurons[lidx-1][out_idx])
if lp:
is_binary = False
elif max_binary is None:
is_binary = True
else:
is_binary = to_bin[out_idx] if relu_rnk is not None else binary_left > 0
if is_binary:
binary_left -= 1
n_binary += 1
relu_inds[lidx][out_idx] = model.addVar(vtype=GRB.BINARY, name='ind_relu_{}_{}'.format(lidx, out_idx))
relu_inds[lidx][out_idx].BranchPriority = relu_priority[lidx][out_idx]
model.addConstr(neurons[lidx][out_idx] <= pr_ub[0, out_idx] * relu_inds[lidx][out_idx])
model.addConstr(neurons[lidx][out_idx] <= neurons[lidx-1][out_idx] - pr_lb[0, out_idx] * (1 - relu_inds[lidx][out_idx]))
model.addGenConstrIndicator(relu_inds[lidx][out_idx], True, neurons[lidx-1][out_idx], GRB.GREATER_EQUAL, 0.0)
model.addGenConstrIndicator(relu_inds[lidx][out_idx], False, neurons[lidx-1][out_idx], GRB.LESS_EQUAL, 0.0)
else:
model.addConstr(
-pr_ub[0, out_idx] * neurons[lidx-1][out_idx] + (pr_ub[0, out_idx] - pr_lb[0, out_idx]) * neurons[lidx][out_idx],
GRB.LESS_EQUAL,
-pr_lb[0, out_idx] * pr_ub[0, out_idx])
unstable += 1
return unstable, n_binary
def report(ver_logdir, tot_verified_corr, tot_nat_ok, tot_attack_ok, tot_pgd_ok, test_idx, tot_tests, test_data):
if tot_tests % 10 == 0:
print('tot_tests: %d, verified: %.5lf [%d/%d], nat_ok: %.5lf [%d/%d], attack_ok: %.5lf [%d/%d], pgd_ok: %.5lf [%d/%d]' % (
tot_tests,
tot_verified_corr/tot_tests, tot_verified_corr, tot_tests,
tot_nat_ok/tot_tests, tot_nat_ok, tot_tests,
tot_attack_ok/tot_tests, tot_attack_ok, tot_tests,
tot_pgd_ok/tot_tests, tot_pgd_ok, tot_tests,
))
print('=====================================')
out_file = os.path.join(ver_logdir, '{}.p'.format(test_idx))
pickle.dump(test_data, open(out_file, 'wb'))
def get_diff(outs, targets, i, j, reduction):
assert reduction == 'sum'
return -(outs[:, i] - outs[:, ]).sum()
def learn_slopes(relu_params, bounds, args, n_layers, net, inputs, targets, abs_inputs, i, j):
for param in relu_params:
param.data = torch.ones(param.size()).to(param.device)
relu_opt = optim.Adam(relu_params, lr=0.03, weight_decay=0)
ret_verified = False
for it in range(args.num_iters):
relu_opt.zero_grad()
if i is None and j is None:
abs_loss, abs_ok = get_adv_loss(device, args.test_eps, args.layer_idx, net, bounds, inputs, targets, args, detach=False)
else:
abs_out = net(abs_inputs)
abs_loss = -abs_out.get_min_diff(i, j)
relu_opt.zero_grad()
abs_loss.backward()
relu_opt.step()
for param in relu_params:
if param.grad is not None:
param.data = torch.clamp(param.data, 0, 1)
if ret_verified:
break
with torch.no_grad():
abs_out = net(abs_inputs)
if i is None and j is None:
_, verified_corr = abs_out.verify(targets)
if verified_corr:
ret_verified = True
else:
abs_loss = -abs_out.get_min_diff(i, j)
if abs_loss < 0:
ret_verified = True
relu_rnk, relu_priority = {}, {}
for lidx, layer in enumerate(net.blocks):
if isinstance(layer, ReLU):
for param in layer.parameters():
relu_priority[lidx] = []
if param.grad is None:
for i in range(param.size()[0]):
relu_priority[lidx].append(0)
_, sorted_ids = torch.sort(param.abs().view(-1), descending=True)
else:
g_abs = param.grad.abs().view(-1)
for i in range(g_abs.size()[0]):
relu_priority[lidx].append(int(g_abs[i].item()*1000))
_, sorted_ids = torch.sort(param.grad.abs().view(-1), descending=True)
sorted_ids = sorted_ids.cpu().numpy()
relu_rnk[lidx] = sorted_ids
net.zero_grad()
return relu_rnk, ret_verified, relu_priority
def callback(model, where):
if where == GRB.Callback.MIP:
obj_best = model.cbGet(GRB.Callback.MIP_OBJBST)
obj_bound = model.cbGet(GRB.Callback.MIP_OBJBND)
if obj_bound > 0 or obj_best < 0:
model.terminate()
def reset_params(args, net, dtype):
relu_params = []
for param_name, param_value in net.named_parameters():
if 'deepz' in param_name:
relu_params.append(param_value)
if args.test_domain == 'zono_iter':
param_value.data = torch.ones(param_value.size()).to(param_value.device, dtype=dtype)
else:
param_value.data = torch.ones(param_value.size()).to(param_value.device, dtype=dtype)
param_value.requires_grad_(True)
else:
param_value.requires_grad_(False)
return relu_params
def get_flat_idx(img_dim, ch_idx, i, j):
return ch_idx * (img_dim) ** 2 + i * img_dim + j
def refine(args, bounds, net, refine_i, refine_j, abs_inputs, input_size):
dep = {lidx: {} for lidx in range(-1, args.refine_lidx+1)}
neurons = {lidx: {} for lidx in range(-1, args.refine_lidx+1)}
dep[args.refine_lidx][(refine_i, refine_j)] = True
model = Model("refinezono")
model.setParam('OutputFlag', 0)
model.setParam('TimeLimit', 10)
to_refine = []
refine_channels = bounds[args.refine_lidx+1][0].shape[1]
for ch_idx in range(refine_channels):
out_lb = bounds[args.refine_lidx+1][0][0, ch_idx, refine_i, refine_j]
out_ub = bounds[args.refine_lidx+1][1][0, ch_idx, refine_i, refine_j]
neurons[args.refine_lidx][(ch_idx, refine_i, refine_j)] = model.addVar(out_lb, out_ub, vtype=GRB.CONTINUOUS)
if out_lb < 0 and out_ub > 0:
to_refine.append(ch_idx)
binary_left = args.refine_milp
for lidx in range(args.refine_lidx, 0, -1):
lb, ub = bounds[lidx]
block = net.blocks[lidx]
if isinstance(block, Conv2d):
weight, bias = block.conv.weight.cpu().numpy(), block.conv.bias.cpu().numpy()
kernel_size, stride = block.kernel_size, block.stride
dim = bounds[lidx+1][0].shape[2]
out_channels, in_channels = weight.shape[0], weight.shape[1]
if kernel_size % 2 == 0:
min_kdelta, max_kdelta = -(kernel_size//2-1), kernel_size//2
else:
min_kdelta, max_kdelta = -(kernel_size//2), kernel_size//2
for x in range(0, dim):
for y in range(0, dim):
if (x, y) not in dep[lidx]:
continue
for out_ch in range(out_channels):
expr = LinExpr()
expr += bias[out_ch]
for kx in range(min_kdelta, max_kdelta+1):
for ky in range(min_kdelta, max_kdelta+1):
new_x = x*stride + kx
new_y = y*stride + ky
if new_x < 0 or new_y < 0 or new_x >= dim*stride or new_y >= dim*stride:
continue
dep[lidx-1][(new_x, new_y)] = True
for in_ch in range(in_channels):
if (in_ch, new_x, new_y) not in neurons[lidx-1]:
in_lb = bounds[lidx][0][0, in_ch, new_x, new_y].item()
in_ub = bounds[lidx][1][0, in_ch, new_x, new_y].item()
neurons[lidx-1][(in_ch, new_x, new_y)] = model.addVar(in_lb, in_ub, vtype=GRB.CONTINUOUS)
expr += neurons[lidx-1][(in_ch, new_x, new_y)] * weight[out_ch, in_ch, kx - min_kdelta, ky - min_kdelta]
model.addConstr(expr, GRB.EQUAL, neurons[lidx][(out_ch, x, y)])
elif isinstance(block, ReLU):
n_channels, dim = lb.shape[1], lb.shape[2]
for x in range(0, dim):
for y in range(0, dim):
if (x, y) not in dep[lidx]:
continue
dep[lidx-1][(x, y)] = True
for out_ch in range(n_channels):
in_lb, in_ub = lb[0, out_ch, x, y].item(), ub[0, out_ch, x, y].item()
neurons[lidx-1][(out_ch, x, y)] = model.addVar(in_lb, in_ub, vtype=GRB.CONTINUOUS)
if binary_left > 0 and in_lb < 0 and in_ub > 0:
is_binary = True
binary_left -= 1
else:
is_binary = False
add_relu_constraints(model, in_lb, in_ub, neurons[lidx-1][(out_ch, x, y)], neurons[lidx][(out_ch, x, y)], is_binary)
else:
assert False
for ch_idx in to_refine:
old_lb = bounds[args.refine_lidx+1][0][0, ch_idx, refine_i, refine_j]
old_ub = bounds[args.refine_lidx+1][1][0, ch_idx, refine_i, refine_j]
model.setObjective(neurons[args.refine_lidx][(ch_idx, refine_i, refine_j)], GRB.MINIMIZE)
model.update()
model.optimize()
new_lb = model.objBound
model.setObjective(neurons[args.refine_lidx][(ch_idx, refine_i, refine_j)], GRB.MAXIMIZE)
model.update()
model.optimize()
new_ub = model.objBound
if new_lb != -GRB.INFINITY and new_lb >= old_lb:
net.blocks[args.refine_lidx+1].bounds[0][0, ch_idx, refine_i, refine_j] = new_lb
if new_ub != GRB.INFINITY and new_ub <= old_ub:
net.blocks[args.refine_lidx+1].bounds[1][0, ch_idx, refine_i, refine_j] = new_ub
def verify_test(args, net, num_relu, inputs, targets, abs_inputs, bounds, refined_triples, test_data, grb_modelsdir, test_idx):
ok = True
model = None
n_layers = len(net.blocks)
for adv_idx in range(10):
if targets[0] == adv_idx:
continue
if ('verified', adv_idx) in test_data and test_data[('verified', adv_idx)]:
print('label already verified: ', adv_idx)
continue
relu_params = reset_params(args, net, dtype)
if adv_idx in test_data:
print(test_data[adv_idx])
if args.obj_threshold is not None:
if adv_idx not in test_data:
ok = False
continue
if test_data[adv_idx]['obj_bound'] < args.obj_threshold:
print('too far, not considering adv_idx = %d, obj_bound = %.5lf' % (adv_idx, test_data[adv_idx]['obj_bound']))
ok = False
continue
relu_rnk, relu_priority = None, None
if args.test_domain == 'zono_iter':
with torch.enable_grad():
relu_rnk, verified, relu_priority = learn_slopes(
relu_params, bounds, args, n_layers, net, inputs, targets, abs_inputs, targets[0].item(), adv_idx)
if verified:
print('adv_idx=%d verified without MILP' % adv_idx)
test_data[('verified', adv_idx)] = True
continue
max_binary = args.max_binary
milp_timeout = args.milp_timeout
if model is None or (args.test_domain == 'zono_iter'):
model = Model("milp")
model.setParam('OutputFlag', args.debug)
model.setParam('TimeLimit', milp_timeout)
abs_curr = net.forward_until(args.layer_idx, abs_inputs)
abs_flat = abs_curr
if len(abs_curr.head.size()) == 4:
n_channels, img_dim = abs_curr.head.size()[1], abs_curr.head.size()[2]
flat_dim = n_channels * img_dim * img_dim
abs_flat = abs_curr.view((1, flat_dim))
n_inputs = abs_flat.head.size()[1]
betas = [model.addVar(-1.0, 1.0, vtype=GRB.CONTINUOUS, name='beta_{}'.format(j)) for j in range(n_inputs)]
if abs_flat.errors is not None:
n_errors = abs_flat.errors.size()[0]
errors = [model.addVar(-1.0, 1.0, vtype=GRB.CONTINUOUS, name='error_{}'.format(j)) for j in range(n_errors)]
if net.blocks[args.layer_idx+1].bounds is not None:
lb_refine, ub_refine = net.blocks[args.layer_idx+1].bounds
lb_refine, ub_refine = lb_refine.view((1, -1)).cpu().numpy(), ub_refine.view((1, -1)).cpu().numpy()
lb, ub = abs_flat.concretize()
lb, ub = lb.detach().cpu().numpy(), ub.detach().cpu().numpy()
neurons, relu_inds = {}, {}
neurons[args.layer_idx] = []
for j in range(n_inputs):
neuron_lb, neuron_ub = lb[0, j], ub[0, j]
if net.blocks[args.layer_idx+1].bounds is not None:
neuron_lb = lb_refine[0, j]
neuron_ub = ub_refine[0, j]
lb[0, j] = neuron_lb
ub[0, j] = neuron_ub
neurons[args.layer_idx].append(model.addVar(vtype=GRB.CONTINUOUS, lb=neuron_lb, ub=neuron_ub, name='input_{}'.format(j)))
expr = LinExpr()
expr += abs_flat.head[0, j].item()
if abs_flat.beta is not None:
expr += abs_flat.beta[0, j].item() * betas[j]
if abs_flat.errors is not None:
coeffs = abs_flat.errors[:, 0, j].detach().cpu().numpy().tolist()
expr += LinExpr(coeffs, errors)
model.addConstr(expr, GRB.EQUAL, neurons[args.layer_idx][j])
n_outs = n_inputs
relu_done = False
for lidx in range(args.layer_idx+1, n_layers):
pr_lb, pr_ub = lb, ub
abs_curr = net.blocks[lidx](abs_curr)
if len(abs_curr.head.size()) == 4:
n_channels, img_dim = abs_curr.head.size()[1], abs_curr.head.size()[2]
flat_dim = n_channels * img_dim * img_dim
abs_flat = abs_curr.view((1, flat_dim))
else:
abs_flat = abs_curr
lb, ub = abs_flat.concretize()
lb, ub = lb.detach().cpu().numpy(), ub.detach().cpu().numpy()
if isinstance(net.blocks[lidx], Linear):
weight, bias = net.blocks[lidx].linear.weight, net.blocks[lidx].linear.bias
neurons[lidx] = []
n_outs = weight.size()[0]
for out_idx in range(n_outs):
nvar = model.addVar(vtype=GRB.CONTINUOUS, lb=lb[0, out_idx], ub=ub[0, out_idx], name='n_{}_{}'.format(lidx, out_idx))
neurons[lidx].append(nvar)
tmp = LinExpr()
tmp += -neurons[lidx][out_idx]
tmp += bias[out_idx].item()
tmp += LinExpr(weight[out_idx].detach().cpu().numpy(), neurons[lidx-1])
model.addConstr(tmp, GRB.EQUAL, 0)
elif isinstance(net.blocks[lidx], ReLU):
lp = False
unstable, n_binary = handle_relu(
max_binary, lidx, relu_rnk, relu_priority, model, neurons, relu_inds, n_outs, pr_lb, pr_ub, lp)
relu_done = True
print('Unstable ReLU: ', unstable, ' binary: ', n_binary)
elif isinstance(net.blocks[lidx], AveragePooling):
kernel_size = net.blocks[lidx].kernel_size
assert img_dim % kernel_size == 0
neurons[lidx] = []
for ch_idx in range(n_channels):
for i in range(0, img_dim, kernel_size):
for j in range(0, img_dim, kernel_size):
new_idx = get_flat_idx(img_dim//kernel_size, ch_idx, i//kernel_size, j//kernel_size)
nvar = model.addVar(vtype=GRB.CONTINUOUS, lb=lb[0, new_idx], ub=ub[0, new_idx], name='n_{}_{}'.format(lidx, new_idx))
neurons[lidx].append(nvar)
tmp = LinExpr()
tmp -= (kernel_size * kernel_size) * nvar
for di in range(0, kernel_size):
for dj in range(0, kernel_size):
old_idx = get_flat_idx(img_dim, ch_idx, i+di, j+dj)
tmp += neurons[lidx-1][old_idx]
model.addConstr(tmp, GRB.EQUAL, 0)
elif isinstance(net.blocks[lidx], Flatten):
neurons[lidx] = neurons[lidx-1]
elif isinstance(net.blocks[lidx], Conv2d):
weight, bias = net.blocks[lidx].conv.weight.cpu().numpy(), net.blocks[lidx].conv.bias.cpu().numpy()
kernel_size, stride = net.blocks[lidx].kernel_size, net.blocks[lidx].stride
out_channels, in_channels = weight.shape[0], weight.shape[1]
if kernel_size % 2 == 0:
min_kdelta, max_kdelta = -(kernel_size//2-1), kernel_size//2
else:
min_kdelta, max_kdelta = -(kernel_size//2), kernel_size//2
neurons[lidx] = []
for out_ch in range(out_channels):
for x in range(0, img_dim):
for y in range(0, img_dim):
new_idx = get_flat_idx(img_dim, out_ch, x, y)
nvar = model.addVar(vtype=GRB.CONTINUOUS, lb=lb[0, new_idx], ub=ub[0, new_idx], name='n_{}_{}'.format(lidx, new_idx))
neurons[lidx].append(nvar)
expr = LinExpr()
expr += bias[out_ch]
for kx in range(min_kdelta, max_kdelta+1):
for ky in range(min_kdelta, max_kdelta+1):
new_x = x*stride + kx
new_y = y*stride + ky
if new_x < 0 or new_y < 0 or new_x >= img_dim*stride or new_y >= img_dim*stride:
continue
for in_ch in range(in_channels):
old_idx = get_flat_idx(img_dim*stride, in_ch, new_x, new_y)
expr += neurons[lidx-1][old_idx] * weight[out_ch, in_ch, kx - min_kdelta, ky - min_kdelta]
model.addConstr(expr, GRB.EQUAL, nvar)
else:
print('unknown layer type: ', net.blocks[lidx])
assert False
model.setObjective(neurons[n_layers-1][targets[0].item()] - neurons[n_layers-1][adv_idx], GRB.MINIMIZE)
model.update()
if args.save_models:
model.write('%s/model_%d_%d.mps' % (grb_modelsdir, test_idx, adv_idx))
ok = False
continue
model.optimize(callback)
print('MILP: ', targets[0].item(), adv_idx, model.objVal, model.objBound, model.RunTime)
test_data[adv_idx] = {
'milp_timeout': milp_timeout,
'max_binary': max_binary,
'obj_val': model.objVal,
'obj_bound': model.objBound,
'runtime': model.RunTime,
}
if model.objBound < 0:
ok = False
break
else:
test_data[('verified', adv_idx)] = True
return ok
def main():
parser = argparse.ArgumentParser(description='Perform greedy layerwise training.')
parser.add_argument('--prune_p', default=None, type=float, help='percentage of weights to prune in each layer')
parser.add_argument('--dataset', default='cifar10', help='dataset to use')
parser.add_argument('--net', required=True, type=str, help='network to use')
parser.add_argument('--load_model', type=str, help='model to load')
parser.add_argument('--layer_idx', default=1, type=int, help='layer index of flattened vector')
parser.add_argument('--n_valid', default=1000, type=int, help='number of test samples')
parser.add_argument('--n_train', default=None, type=int, help='number of training samples to use')
parser.add_argument('--train_batch', default=1, type=int, help='batch size for training')
parser.add_argument('--test_batch', default=128, type=int, help='batch size for testing')
parser.add_argument('--test_domain', default='zono', type=str, help='domain to test with')
parser.add_argument('--test_eps', default=None, type=float, help='epsilon to verify')
parser.add_argument('--debug', action='store_true', help='debug mode')
parser.add_argument('--no_milp', action='store_true', help='no MILP mode')
parser.add_argument('--no_load', action='store_true', help='verify from scratch')
parser.add_argument('--no_smart', action='store_true', help='bla')
parser.add_argument('--milp_timeout', default=10, type=int, help='timeout for MILP')
parser.add_argument('--eval_train', action='store_true', help='evaluate on training set')
parser.add_argument('--test_idx', default=None, type=int, help='specific index to test')
parser.add_argument('--start_idx', default=0, type=int, help='specific index to start')
parser.add_argument('--end_idx', default=1000, type=int, help='specific index to end')
parser.add_argument('--max_binary', default=None, type=int, help='number of neurons to encode as binary variable in MILP (per layer)')
parser.add_argument('--num_iters', default=50, type=int, help='number of iterations to find slopes')
parser.add_argument('--max_refine_triples', default=0, type=int, help='number of triples to refine')
parser.add_argument('--refine_lidx', default=None, type=int, help='layer to refine')
parser.add_argument('--save_models', action='store_true', help='whether to only store models')
parser.add_argument('--refine_milp', default=0, type=int, help='number of neurons to refine using MILP')
parser.add_argument('--obj_threshold', default=None, type=float, help='threshold to consider for MILP verification')
parser.add_argument('--attack_type', default='pgd', type=str, help='attack')
parser.add_argument('--attack_n_steps', default=10, type=int, help='number of steps for the attack')
parser.add_argument('--attack_step_size', default=0.25, type=float, help='step size for the attack (relative to epsilon)')
parser.add_argument('--layers', required=False, default=None, type=int, nargs='+', help='layer indices for training')
args = parser.parse_args()
ver_logdir = args.load_model[:-3] + '_ver'
if not os.path.exists(ver_logdir):
os.makedirs(ver_logdir)
grb_modelsdir = args.load_model[:-3] + '_grb'
if not os.path.exists(grb_modelsdir):
os.makedirs(grb_modelsdir)
num_train, _, test_loader, input_size, input_channel = get_loaders(args)
net = get_network(device, args, input_size, input_channel)
n_layers = len(net.blocks)
# net.to_double()
args.test_domains = ['box']
with torch.no_grad():
test(device, 0, args, net, test_loader)
args.test_batch = 1
num_train, _, test_loader, input_size, input_channel = get_loaders(args)
num_relu = 0
for lidx in range(args.layer_idx+1, n_layers):
print(net.blocks[lidx])
if isinstance(net.blocks[lidx], ReLU):
num_relu += 1
with torch.no_grad():
tot_verified_corr, tot_nat_ok, tot_attack_ok, tot_pgd_ok, tot_tests = 0, 0, 0, 0, 0
for test_idx, (inputs, targets) in enumerate(test_loader):
if test_idx < args.start_idx or test_idx >= args.end_idx or test_idx >= args.n_valid:
continue
if args.test_idx is not None and test_idx != args.test_idx:
continue
tot_tests += 1
test_file = os.path.join(ver_logdir, '{}.p'.format(test_idx))
test_data = pickle.load(open(test_file, 'rb')) if (not args.no_load) and os.path.isfile(test_file) else {}
print('Verify test_idx =', test_idx)
for lidx in range(n_layers):
net.blocks[lidx].bounds = None
inputs, targets = inputs.to(device), targets.to(device)
abs_inputs = get_inputs(args.test_domain, inputs, args.test_eps, device, dtype=dtype)
nat_out = net(inputs)
nat_ok = targets.eq(nat_out.max(dim=1)[1]).item()
tot_nat_ok += float(nat_ok)
test_data['ok'] = nat_ok
if not nat_ok:
report(ver_logdir, tot_verified_corr, tot_nat_ok, tot_attack_ok, tot_pgd_ok, test_idx, tot_tests, test_data)
continue
with torch.enable_grad():
pgd_loss, pgd_ok = get_adv_loss(device, args.test_eps, -1, net, None, inputs, targets, args)
if pgd_ok:
test_data['pgd_ok'] = 1
tot_pgd_ok += 1
else:
test_data['pgd_ok'] = 0
report(ver_logdir, tot_verified_corr, tot_nat_ok, tot_attack_ok, tot_pgd_ok, test_idx, tot_tests, test_data)
continue
if 'verified' in test_data and test_data['verified']:
tot_verified_corr += 1
tot_attack_ok += 1
report(ver_logdir, tot_verified_corr, tot_nat_ok, tot_attack_ok, tot_pgd_ok, test_idx, tot_tests, test_data)
continue
relu_params = reset_params(args, net, dtype)
bounds = compute_bounds(net, device, args.layer_idx, args, abs_inputs)
if args.test_domain == 'zono_iter':
with torch.enable_grad():
learn_slopes(relu_params, bounds, args, n_layers, net, inputs, targets, abs_inputs, None, None)
with torch.enable_grad():
abs_loss, abs_ok = get_adv_loss(device, args.test_eps, args.layer_idx, net, bounds, inputs, targets, args)
refined_triples = []
if args.refine_lidx is not None:
bounds = compute_bounds(net, device, args.layer_idx+1, args, abs_inputs)
for lidx in range(0, args.layer_idx+2):
net.blocks[lidx].bounds = bounds[lidx]
print('loss before refine: ', abs_loss)
refine_dim = bounds[args.refine_lidx+1][0].shape[2]
pbar = tqdm(total=refine_dim*refine_dim, dynamic_ncols=True)
for refine_i in range(refine_dim):
for refine_j in range(refine_dim):
# refine(args, bounds, net, 0, 15, abs_inputs, input_size)
refine(args, bounds, net, refine_i, refine_j, abs_inputs, input_size)
pbar.update(1)
pbar.close()
with torch.enable_grad():
abs_loss, abs_ok = get_adv_loss(device, args.test_eps, args.layer_idx, net, bounds, inputs, targets, args)
print('loss after refine: ', abs_loss)
if abs_ok:
tot_attack_ok += 1
abs_out = net(abs_inputs)
verified, verified_corr = abs_out.verify(targets)
test_data['verified'] = int(verified_corr.item())
print('abs_loss: ', abs_loss.item(), '\tabs_ok: ', abs_ok.item(), '\tverified_corr: ', verified_corr.item())
if verified_corr:
tot_verified_corr += 1
report(ver_logdir, tot_verified_corr, tot_nat_ok, tot_attack_ok, tot_pgd_ok, test_idx, tot_tests, test_data)
continue
if args.no_milp or (not abs_ok):
report(ver_logdir, tot_verified_corr, tot_nat_ok, tot_attack_ok, tot_pgd_ok, test_idx, tot_tests, test_data)
continue
if verify_test(args, net, num_relu, inputs, targets, abs_inputs, bounds, refined_triples, test_data, grb_modelsdir, test_idx):
tot_verified_corr += 1
test_data['verified'] = True
report(ver_logdir, tot_verified_corr, tot_nat_ok, tot_attack_ok, tot_pgd_ok, test_idx, tot_tests, test_data)
if __name__ == '__main__':
main()
```
#### File: colt/code/networks.py
```python
import torch.nn as nn
import torch
from loaders import get_mean_sigma
from layers import Conv2d, Normalization, ReLU, Flatten, Linear, Sequential
class SeqNet(nn.Module):
def __init__(self):
super(SeqNet, self).__init__()
self.is_double = False
self.skip_norm = False
def forward(self, x, init_lambda=False):
if isinstance(x, torch.Tensor) and self.is_double:
x = x.to(dtype=torch.float64)
x = self.blocks(x, init_lambda, skip_norm=self.skip_norm)
return x
def reset_bounds(self):
for block in self.blocks:
block.bounds = None
def to_double(self):
self.is_double = True
for param_name, param_value in self.named_parameters():
param_value.data = param_value.data.to(dtype=torch.float64)
def forward_until(self, i, x):
""" Forward until layer i (inclusive) """
x = self.blocks.forward_until(i, x)
return x
def forward_from(self, i, x):
""" Forward from layer i (exclusive) """
x = self.blocks.forward_from(i, x)
return x
class FFNN(SeqNet):
def __init__(self, device, dataset, sizes, n_class=10, input_size=32, input_channel=3):
super(FFNN, self).__init__()
mean, sigma = get_mean_sigma(device, dataset)
self.normalizer = Normalization(mean, sigma)
layers = [Flatten(), Linear(input_size*input_size*input_channel, sizes[0]), ReLU(sizes[0])]
for i in range(1, len(sizes)):
layers += [
Linear(sizes[i-1], sizes[i]),
ReLU(sizes[i]),
]
layers += [Linear(sizes[-1], n_class)]
self.blocks = Sequential(*layers)
class ConvMed(SeqNet):
def __init__(self, device, dataset, n_class=10, input_size=32, input_channel=3, width1=1, width2=1, linear_size=100):
super(ConvMed, self).__init__()
mean, sigma = get_mean_sigma(device, dataset)
layers = [
Normalization(mean, sigma),
Conv2d(input_channel, 16*width1, 5, stride=2, padding=2, dim=input_size),
ReLU((16*width1, input_size//2, input_size//2)),
Conv2d(16*width1, 32*width2, 4, stride=2, padding=1, dim=input_size//2),
ReLU((32*width2, input_size//4, input_size//4)),
Flatten(),
Linear(32*width2*(input_size // 4)*(input_size // 4), linear_size),
ReLU(linear_size),
Linear(linear_size, n_class),
]
self.blocks = Sequential(*layers)
class ConvMedBig(SeqNet):
def __init__(self, device, dataset, n_class=10, input_size=32, input_channel=3, width1=1, width2=1, width3=1, linear_size=100):
super(ConvMedBig, self).__init__()
mean, sigma = get_mean_sigma(device, dataset)
self.normalizer = Normalization(mean, sigma)
layers = [
Normalization(mean, sigma),
Conv2d(input_channel, 16*width1, 3, stride=1, padding=1, dim=input_size),
ReLU((16*width1, input_size, input_size)),
Conv2d(16*width1, 16*width2, 4, stride=2, padding=1, dim=input_size//2),
ReLU((16*width2, input_size//2, input_size//2)),
Conv2d(16*width2, 32*width3, 4, stride=2, padding=1, dim=input_size//2),
ReLU((32*width3, input_size//4, input_size//4)),
Flatten(),
Linear(32*width3*(input_size // 4)*(input_size // 4), linear_size),
ReLU(linear_size),
Linear(linear_size, n_class),
]
self.blocks = Sequential(*layers)
class cnn_2layer(SeqNet):
def __init__(
self, device, dataset, input_channel, input_size, width, linear_size
):
super(cnn_2layer, self).__init__()
mean, sigma = get_mean_sigma(device, dataset, IBP=True)
self.normalizer = Normalization(mean, sigma)
self.layers = [
Normalization(mean, sigma),
Conv2d(
input_channel, 4 * width, 4,
stride=2, padding=1, dim=input_size
),
ReLU((4 * width, input_size//2, input_size//2)),
Conv2d(
4 * width, 8 * width, 4,
stride=2, padding=1, dim=input_size//2
),
ReLU((8 * width, input_size//4, input_size//4)),
Flatten(),
Linear(
8 * width * (input_size // 4) * (input_size // 4), linear_size
),
ReLU(linear_size),
Linear(linear_size, 10),
]
def converter(self, net):
if isinstance(net, nn.Sequential):
seq_model = net
else:
seq_model = net.module
for idx, l in enumerate(seq_model):
if isinstance(l, nn.Linear):
self.layers[idx + 1].linear.weight.data.copy_(l.weight.data)
self.layers[idx + 1].linear.bias.data.copy_(l.bias.data)
if isinstance(l, nn.Conv2d):
self.layers[idx + 1].conv.weight.data.copy_(l.weight.data)
self.layers[idx + 1].conv.bias.data.copy_(l.bias.data)
self.blocks = Sequential(*self.layers)
def model_cnn_2layer(in_ch, in_dim, width, linear_size=128):
"""
CNN, small 2-layer (default kernel size is 4 by 4)
Parameter:
in_ch: input image channel, 1 for MNIST and 3 for CIFAR
in_dim: input dimension, 28 for MNIST and 32 for CIFAR
width: width multiplier
"""
model = nn.Sequential(
nn.Conv2d(in_ch, 4 * width, 4, stride=2, padding=1),
nn.ReLU(),
nn.Conv2d(4 * width, 8 * width, 4, stride=2, padding=1),
nn.ReLU(),
Flatten(),
nn.Linear(8 * width * (in_dim // 4) * (in_dim // 4), linear_size),
nn.ReLU(),
nn.Linear(linear_size, 10)
)
return model
class cnn_4layer(SeqNet):
def __init__(
self, device, dataset, input_channel, input_size, width, linear_size
):
super(cnn_4layer, self).__init__()
mean, sigma = get_mean_sigma(device, dataset, IBP=True)
self.normalizer = Normalization(mean, sigma)
self.layers = [
Normalization(mean, sigma),
Conv2d(
input_channel, 4 * width, 3,
stride=1, padding=1, dim=input_size
),
ReLU((4 * width, input_size, input_size)),
Conv2d(
4 * width, 4 * width, 4,
stride=2, padding=1, dim=input_size//2
),
ReLU((4 * width, input_size//2, input_size//2)),
Conv2d(
4 * width, 8 * width, 3,
stride=1, padding=1, dim=input_size//2
),
ReLU((8 * width, input_size//2, input_size//2)),
Conv2d(
8 * width, 8 * width, 4,
stride=2, padding=1, dim=input_size//4
),
ReLU((8 * width, input_size//4, input_size//4)),
Flatten(),
Linear(
8 * width * (input_size // 4) * (input_size // 4), linear_size
),
ReLU(linear_size),
Linear(linear_size, linear_size),
ReLU(linear_size),
Linear(linear_size, 10),
]
def converter(self, net):
if isinstance(net, nn.Sequential):
seq_model = net
else:
seq_model = net.module
for idx, l in enumerate(seq_model):
if isinstance(l, nn.Linear):
self.layers[idx + 1].linear.weight.data.copy_(l.weight.data)
self.layers[idx + 1].linear.bias.data.copy_(l.bias.data)
if isinstance(l, nn.Conv2d):
self.layers[idx + 1].conv.weight.data.copy_(l.weight.data)
self.layers[idx + 1].conv.bias.data.copy_(l.bias.data)
self.blocks = Sequential(*self.layers)
def model_cnn_4layer(in_ch, in_dim, width, linear_size):
"""
CNN, relatively large 4-layer
Parameter:
in_ch: input image channel, 1 for MNIST and 3 for CIFAR
in_dim: input dimension, 28 for MNIST and 32 for CIFAR
width: width multiplier
"""
model = nn.Sequential(
nn.Conv2d(in_ch, 4 * width, 3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(4 * width, 4 * width, 4, stride=2, padding=1),
nn.ReLU(),
nn.Conv2d(4 * width, 8 * width, 3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(8 * width, 8 * width, 4, stride=2, padding=1),
nn.ReLU(),
Flatten(),
nn.Linear(8 * width * (in_dim // 4) * (in_dim // 4), linear_size),
nn.ReLU(),
nn.Linear(linear_size, linear_size),
nn.ReLU(),
nn.Linear(linear_size, 10)
)
return model
class cnn_IBP_large(SeqNet):
def __init__(
self, device, dataset, input_channel, input_size, linear_size
):
super(cnn_IBP_large, self).__init__()
mean, sigma = get_mean_sigma(device, dataset, IBP=True)
self.normalizer = Normalization(mean, sigma)
self.layers = [
Normalization(mean, sigma),
Conv2d(
input_channel, 64, 3,
stride=1, padding=1, dim=input_size
),
ReLU((64, input_size, input_size)),
Conv2d(
64, 64, 3,
stride=1, padding=1, dim=input_size
),
ReLU((64, input_size, input_size)),
Conv2d(
64, 128, 3,
stride=2, padding=1, dim=input_size//2
),
ReLU((128, input_size//2, input_size//2)),
Conv2d(
128, 128, 3,
stride=1, padding=1, dim=input_size//2
),
ReLU((128, input_size//2, input_size//2)),
Conv2d(
128, 128, 3,
stride=1, padding=1, dim=input_size//2
),
ReLU((128, input_size//2, input_size//2)),
Flatten(),
Linear(
128 * (input_size // 2) * (input_size // 2), linear_size
),
ReLU(linear_size),
Linear(linear_size, 10),
]
def converter(self, net):
if isinstance(net, nn.Sequential):
seq_model = net
else:
seq_model = net.module
for idx, l in enumerate(seq_model):
if isinstance(l, nn.Linear):
self.layers[idx + 1].linear.weight.data.copy_(l.weight.data)
self.layers[idx + 1].linear.bias.data.copy_(l.bias.data)
if isinstance(l, nn.Conv2d):
self.layers[idx + 1].conv.weight.data.copy_(l.weight.data)
self.layers[idx + 1].conv.bias.data.copy_(l.bias.data)
self.blocks = Sequential(*self.layers)
def IBP_large(in_ch, in_dim, linear_size=512):
model = nn.Sequential(
nn.Conv2d(in_ch, 64, 3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(64, 64, 3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(64, 128, 3, stride=2, padding=1),
nn.ReLU(),
nn.Conv2d(128, 128, 3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(128, 128, 3, stride=1, padding=1),
nn.ReLU(),
Flatten(),
nn.Linear((in_dim // 2) * (in_dim // 2) * 128, linear_size),
nn.ReLU(),
nn.Linear(linear_size, 10)
)
return model
``` |
{
"source": "jmfederico/cookiecutter-php-docker",
"score": 3
} |
#### File: cookiecutter-php-docker/hooks/post_gen_project.py
```python
from urllib.request import urlopen, Request
from pathlib import Path
def run():
"""Actions to be run."""
base_image = "{{ cookiecutter.php_image }}"
if base_image == "alpine":
Path("php-fpm/default.Dockerfile").unlink()
Path("php-fpm/alpine.Dockerfile").rename(Path("php-fpm/Dockerfile"))
if base_image == "default":
Path("php-fpm/alpine.Dockerfile").unlink()
Path("php-fpm/default.Dockerfile").rename(Path("php-fpm/Dockerfile"))
req = Request(
url="https://www.gitignore.io/api/composer", headers={"User-Agent": "Python"}
)
Path("../.gitignore").write_bytes(urlopen(req).read())
# Move root files.
_root = Path("_root")
project_root = Path("..")
for child in _root.iterdir():
child.rename(project_root / child.name)
_root.rmdir()
# Create example index.php file for tests.
Path("../public").mkdir(exist_ok=True)
index = Path("../public/index.php")
if not index.exists():
index.write_text("<?php\nphpinfo();")
run()
``` |
{
"source": "jmfederico/cookiecutter-silverstripe",
"score": 3
} |
#### File: cookiecutter-silverstripe/hooks/post_gen_project.py
```python
import glob
from pathlib import Path
from urllib.request import Request, urlopen
def run():
"""Actions to be run."""
req = Request(
url="https://www.gitignore.io/api/node,composer",
headers={"User-Agent": "Python"},
)
Path("../.gitignore").write_bytes(urlopen(req).read())
# Move root files.
_root = Path("_root")
project_root = Path("..")
for child in _root.iterdir():
child.rename(project_root / child.name)
_root.rmdir()
Path("../.gitignore").open("a").write(
"""
### Silverstripe ###
public/assets/
public/resources/
"""
)
for path in glob.glob("../**/.cookiecutter-keep", recursive=True):
Path(path).unlink()
run()
``` |
{
"source": "jmfederico/demo-app-django",
"score": 3
} |
#### File: demo-app-django/pd_django_demo/helpers.py
```python
import os
import boto3
def get_aws_secret(secret_arn):
"""Return the secret value from an AWS secret."""
secrets_client = boto3.client("secretsmanager")
secret = secrets_client.get_secret_value(SecretId=secret_arn)
return secret["SecretString"]
def get_environ_or_aws_secret(env_var):
"""
Return the value of an environment variable or AWS secret.
It received the name of an environment variable, and if it
points to an AWS secret, retrieve it and return it instead.
"""
env_var_value = os.environ.get(env_var)
if env_var_value and env_var_value[:23] == "arn:aws:secretsmanager:":
# Use `get_aws_secret()` from previous example.
return get_aws_secret(env_var_value)
return env_var_value
```
#### File: demo-app-django/tasks/views.py
```python
from uuid import uuid4
import boto3
from django.conf import settings
from django.http import HttpRequest, HttpResponse
from django.shortcuts import redirect
from django.template.loader import render_to_string
from django.views.decorators.csrf import csrf_protect
from .models import Task
client = boto3.client("sqs")
@csrf_protect
def tasks_view(request: HttpRequest):
if request.method == "POST":
if "clear" in request.POST:
Task.objects.all().delete()
return redirect(request.path_info)
task = Task.objects.create(uuid=uuid4())
if settings.SQS_URL:
client.send_message(
QueueUrl=settings.SQS_URL,
DelaySeconds=10,
MessageBody=str(task.uuid),
)
return redirect(request.path_info)
return HttpResponse(
render_to_string(
"tasks/tasks.html",
{"tasks": Task.objects.all()},
request,
)
)
``` |
{
"source": "jmfederico/dotenver",
"score": 3
} |
#### File: dotenver/dotenver/dotenver.py
```python
import io
import re
import sys
from pathlib import Path
import colorama
from faker import Faker
from jinja2 import Environment
VARIABLES = {}
VARIABLE_REGEX = r"""
^\s*
(
(?:export\s)? # Optional export command
\s*
([^\s=#]+) # Variable name
)
(?:
\s*=\s*? # Assignment
(.*?) # Verbatim value, no parsing done
{0} # Capture placeholder
)?
\s*$
"""
VALUES_REGEX = re.compile(VARIABLE_REGEX.format(""), re.VERBOSE)
TEMPLATE_REGEX = re.compile(
VARIABLE_REGEX.format(
r"""
(?:
\#\#\ +dotenver: # Start of the dotenver comment
(?:
([^\(\s#:]+) # Faker generator to use
(?::
([^\(\s#]+) # Value name
)?
)
(?:\((
.* # Arguments to pass to the generator
)\))?
)? # dotenver comment is optional
"""
),
re.VERBOSE,
)
FAKE = Faker()
def get_value_key(generator, name):
"""
Return a key for the given generator and name pair.
If name None, no key is generated.
"""
if name is not None:
return f"{generator}+{name}"
return None
def dotenver(generator, name=None, quotes=None, escape_with="\\", **kwargs):
r"""
Generate fake data from the given `generator`.
If a `name` is given, the value from the given `generator` and `name`
will be saved and used for subsequent calls.
In those cases only the quotes argument is honored for each call.
The returned value will be optionally surrounded with single or
double quotes as specified by `quotes`, and escaped with `escape_with`,
which is a backslash `\` by default.
"""
if quotes not in [None, "'", '"']:
raise ValueError("quotes must be a single `'` or double `\"` quote")
key = get_value_key(generator, name)
value = str(VARIABLES.get(key, getattr(FAKE, generator)(**kwargs)))
if key and key not in VARIABLES:
VARIABLES[key] = value
if quotes:
value = value.replace(quotes, f"{escape_with}{quotes}")
value = f"{quotes}{value}{quotes}"
return value
def parse_stream(template_stream, current_dotenv):
"""Parse a dotenver template."""
jinja2_template = io.StringIO()
env = Environment(keep_trailing_newline=True)
env.globals["dotenver"] = dotenver
extra_variables = current_dotenv.copy()
for line in template_stream:
match = TEMPLATE_REGEX.match(line)
if match:
left_side, variable, value, generator, name, arguments = match.groups()
if variable in current_dotenv:
current_value = current_dotenv[variable][1]
try:
del extra_variables[variable]
except KeyError:
pass
# Keep track of existing named values.
key = get_value_key(generator, name)
if key:
try:
VARIABLES[key]
except KeyError:
VARIABLES[key] = current_value
line = (
f"{left_side}={current_value}"
if current_value is not None
else left_side
)
elif generator:
dotenver_args = f"'{generator}'"
if name:
dotenver_args = f"{dotenver_args}, '{name}'"
if arguments:
dotenver_args = f"{dotenver_args}, {arguments}"
line = f"{left_side}={{{{ dotenver({dotenver_args}) }}}}"
elif value:
line = f"{left_side}={value}"
else:
line = left_side
jinja2_template.write(f"{line.strip()}\n")
if extra_variables:
jinja2_template.write(
"""
######################################
# Variables not in Dotenver template #
######################################
"""
)
for left_side, value in extra_variables.values():
template_string = f"{left_side}={value}" if value is not None else left_side
jinja2_template.write(f"{template_string}\n")
template = env.from_string(jinja2_template.getvalue())
return template
def get_dotenv_path(template_path):
"""Return the .env path for the given template path."""
if template_path.suffix == ".example":
return template_path.with_suffix("")
return template_path.with_name(".env")
def get_dotenv_dict(dotenv_path):
"""
Read a .env file and return a dictionary of the parsed data.
Each item has the VARIABLE as the key, and the value is a tuple:
(assignment, value)
If the file does not exist, return an empty dict.
"""
values = dict()
try:
with open(dotenv_path, "r") as dotenv_file:
for line in dotenv_file:
match = VALUES_REGEX.match(line)
if match:
assignment, variable, value = match.groups()
values[variable] = (assignment, value)
except FileNotFoundError:
pass
except Exception:
print(
colorama.Fore.RED,
f"The following exception ocurred while reading '{dotenv_path}'",
colorama.Fore.YELLOW,
sep="",
file=sys.stderr,
)
raise
return values
def parse_files(templates_paths, override=False):
"""Parse multiple dotenver templates and generate or update a .env for each."""
colorama.init()
jinja2_templates = {}
rendered_templates = {}
# First pass will:
# - capture all variables form templates and .env files
# - capture existing values from .env files
# - generate Jinja2 template
for _template_path in templates_paths:
template_path = Path(_template_path)
current_env = (
get_dotenv_dict(get_dotenv_path(template_path)) if not override else {}
)
try:
with open(template_path, "r") as template_file:
jinja2_templates[template_path] = parse_stream(
template_file, current_env
)
except Exception:
print(
colorama.Fore.RED,
f"The following exception ocurred while processing template"
f" '{template_path}'",
colorama.Fore.YELLOW,
sep="",
file=sys.stderr,
)
raise
# Second pass renders the templates.
# Rendering on a second pass ensures all named values from .env files
# were captured, and can be assigned to named dotenvers in templates.
for template_path, jinja2_template in jinja2_templates.items():
try:
rendered_templates[template_path] = jinja2_template.render()
except Exception:
print(
colorama.Fore.RED,
f"The following exception ocurred while processing template"
f" '{template_path}'",
colorama.Fore.YELLOW,
sep="",
file=sys.stderr,
)
raise
for template_path, rendered_template in rendered_templates.items():
dotenv_path = get_dotenv_path(template_path)
try:
with open(dotenv_path, "w") as dotenv_file:
dotenv_file.write(rendered_template)
except Exception:
print(
colorama.Fore.RED,
f"The following exception ocurred while writing to '{dotenv_path}'",
colorama.Fore.YELLOW,
sep="",
file=sys.stderr,
)
raise
print(
colorama.Fore.GREEN,
f"'{template_path}' rendered to '{dotenv_path}'",
sep="",
file=sys.stderr,
)
``` |
{
"source": "jmfederico/jinja2-python-version",
"score": 3
} |
#### File: jinja2-python-version/jinja2_python_version/python_version.py
```python
import platform
from jinja2.ext import Extension
class PythonVersion():
"""An object that contains python version information."""
_version = platform.python_version_tuple()
major = '{}'.format(_version[0])
minor = '{}.{}'.format(_version[0], _version[1])
micro = '{}.{}.{}'.format(_version[0], _version[1], _version[2])
def __str__(self):
"""Return Python version up to minor."""
return self.minor
class PythonVersionExtension(Extension):
"""Jinja extension that adds Python versions globals."""
def __init__(self, environment):
"""Extend environment by adding globals."""
super(PythonVersionExtension, self).__init__(environment)
environment.globals['python_version'] = PythonVersion()
``` |
{
"source": "jmfer1/flagsmith-api",
"score": 3
} |
#### File: src/app/utils.py
```python
import shortuuid
def create_hash():
"""Helper function to create a short hash"""
return shortuuid.uuid()
```
#### File: src/environments/authentication.py
```python
from django.conf import settings
from django.core.cache import caches
from rest_framework.authentication import BaseAuthentication
from rest_framework.exceptions import AuthenticationFailed
from environments.models import Environment
environment_cache = caches[settings.ENVIRONMENT_CACHE_LOCATION]
class EnvironmentKeyAuthentication(BaseAuthentication):
"""
Custom authentication class to add the environment to the request for endpoints used by the clients.
"""
def authenticate(self, request):
try:
api_key = request.META.get("HTTP_X_ENVIRONMENT_KEY")
environment = Environment.get_from_cache(api_key)
except Environment.DoesNotExist:
raise AuthenticationFailed("Invalid or missing Environment Key")
if not self._can_serve_flags(environment):
raise AuthenticationFailed("Organisation is disabled from serving flags.")
request.environment = environment
# DRF authentication expects a two tuple to be returned containing User, auth
return None, None
def _can_serve_flags(self, environment):
return not environment.project.organisation.stop_serving_flags
```
#### File: integrations/datadog/datadog.py
```python
import json
import requests
from util.logging import get_logger
from util.util import postpone
logger = get_logger(__name__)
EVENTS_API_URI = "api/v1/events"
class DataDogWrapper:
def __init__(self, base_url: str, api_key: str):
self.base_url = base_url
self.api_key = api_key
self.url = f"{self.base_url}{EVENTS_API_URI}?api_key={self.api_key}"
def _track_event(self, event: dict) -> None:
response = requests.post(self.url, data=json.dumps(event))
logger.debug(
"Sent event to DataDog. Response code was %s" % response.status_code
)
@postpone
def track_event_async(self, event: dict) -> None:
self._track_event(event)
@staticmethod
def generate_event_data(log: str, email: str, environment_name: str):
event_data = {
"text": f"{log} by user {email}",
"title": "Bullet Train Feature Flag Event",
"tags": [f"env:{environment_name}"],
}
return event_data
```
#### File: src/segments/models.py
```python
import hashlib
import re
import typing
from django.core.exceptions import ValidationError
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from environments.identities.models import Identity
from environments.identities.traits.models import Trait
from environments.models import BOOLEAN, FLOAT, INTEGER
from projects.models import Project
# Condition Types
EQUAL = "EQUAL"
GREATER_THAN = "GREATER_THAN"
LESS_THAN = "LESS_THAN"
LESS_THAN_INCLUSIVE = "LESS_THAN_INCLUSIVE"
CONTAINS = "CONTAINS"
GREATER_THAN_INCLUSIVE = "GREATER_THAN_INCLUSIVE"
NOT_CONTAINS = "NOT_CONTAINS"
NOT_EQUAL = "NOT_EQUAL"
REGEX = "REGEX"
PERCENTAGE_SPLIT = "PERCENTAGE_SPLIT"
@python_2_unicode_compatible
class Segment(models.Model):
name = models.CharField(max_length=2000)
description = models.TextField(null=True, blank=True)
project = models.ForeignKey(
Project, on_delete=models.CASCADE, related_name="segments"
)
def __str__(self):
return "Segment - %s" % self.name
def does_identity_match(
self, identity: Identity, traits: typing.List[Trait] = None
) -> bool:
rules = self.rules.all()
return rules.count() > 0 and all(
rule.does_identity_match(identity, traits) for rule in rules
)
def get_identity_percentage_value(self, identity: Identity) -> float:
"""
Given a segment and an identity, generate a number between 0 and 1 to determine whether the identity falls
within a given percentile when using percentage split rules.
"""
to_hash = f"{self.id},{identity.id}"
hashed_value = hashlib.md5(to_hash.encode("utf-8"))
hashed_value_as_int = int(hashed_value.hexdigest(), base=16)
return (hashed_value_as_int % 9999) / 9998
@python_2_unicode_compatible
class SegmentRule(models.Model):
ALL_RULE = "ALL"
ANY_RULE = "ANY"
NONE_RULE = "NONE"
RULE_TYPES = ((ALL_RULE, "all"), (ANY_RULE, "any"), (NONE_RULE, "none"))
segment = models.ForeignKey(
Segment, on_delete=models.CASCADE, related_name="rules", null=True, blank=True
)
rule = models.ForeignKey(
"self", on_delete=models.CASCADE, related_name="rules", null=True, blank=True
)
type = models.CharField(max_length=50, choices=RULE_TYPES)
def clean(self):
super().clean()
parents = [self.segment, self.rule]
num_parents = sum(parent is not None for parent in parents)
if num_parents != 1:
raise ValidationError(
"Segment rule must have exactly one parent, %d found", num_parents
)
def __str__(self):
return "%s rule for %s" % (
self.type,
str(self.segment) if self.segment else str(self.rule),
)
def does_identity_match(
self, identity: Identity, traits: typing.List[Trait] = None
) -> bool:
matches_conditions = False
conditions = self.conditions.all()
if conditions.count() == 0:
matches_conditions = True
elif self.type == self.ALL_RULE:
matches_conditions = all(
condition.does_identity_match(identity, traits)
for condition in conditions
)
elif self.type == self.ANY_RULE:
matches_conditions = any(
condition.does_identity_match(identity, traits)
for condition in conditions
)
elif self.type == self.NONE_RULE:
matches_conditions = not any(
condition.does_identity_match(identity, traits)
for condition in conditions
)
return matches_conditions and all(
rule.does_identity_match(identity, traits) for rule in self.rules.all()
)
def get_segment(self):
"""
rules can be a child of a parent rule instead of a segment, this method iterates back up the tree to find the
segment
TODO: denormalise the segment information so that we don't have to make multiple queries here in complex cases
"""
rule = self
while not rule.segment:
rule = rule.rule
return rule.segment
@python_2_unicode_compatible
class Condition(models.Model):
CONDITION_TYPES = (
(EQUAL, "Exactly Matches"),
(GREATER_THAN, "Greater than"),
(LESS_THAN, "Less than"),
(CONTAINS, "Contains"),
(GREATER_THAN_INCLUSIVE, "Greater than or equal to"),
(LESS_THAN_INCLUSIVE, "Less than or equal to"),
(NOT_CONTAINS, "Does not contain"),
(NOT_EQUAL, "Does not match"),
(REGEX, "Matches regex"),
(PERCENTAGE_SPLIT, "Percentage split"),
)
operator = models.CharField(choices=CONDITION_TYPES, max_length=500)
property = models.CharField(blank=True, null=True, max_length=1000)
value = models.CharField(max_length=1000)
rule = models.ForeignKey(
SegmentRule, on_delete=models.CASCADE, related_name="conditions"
)
def __str__(self):
return "Condition for %s: %s %s %s" % (
str(self.rule),
self.property,
self.operator,
self.value,
)
def does_identity_match(
self, identity: Identity, traits: typing.List[Trait] = None
) -> bool:
if self.operator == PERCENTAGE_SPLIT:
return self._check_percentage_split_operator(identity)
# we allow passing in traits to handle when they aren't
# persisted for certain organisations
traits = identity.identity_traits.all() if traits is None else traits
for trait in traits:
if trait.trait_key == self.property:
if trait.value_type == INTEGER:
return self.check_integer_value(trait.integer_value)
if trait.value_type == FLOAT:
return self.check_float_value(trait.float_value)
elif trait.value_type == BOOLEAN:
return self.check_boolean_value(trait.boolean_value)
else:
return self.check_string_value(trait.string_value)
def _check_percentage_split_operator(self, identity):
try:
float_value = float(self.value) / 100.0
except ValueError:
return False
segment = self.rule.get_segment()
return segment.get_identity_percentage_value(identity) <= float_value
def check_integer_value(self, value: int) -> bool:
try:
int_value = int(str(self.value))
except ValueError:
return False
if self.operator == EQUAL:
return value == int_value
elif self.operator == GREATER_THAN:
return value > int_value
elif self.operator == GREATER_THAN_INCLUSIVE:
return value >= int_value
elif self.operator == LESS_THAN:
return value < int_value
elif self.operator == LESS_THAN_INCLUSIVE:
return value <= int_value
elif self.operator == NOT_EQUAL:
return value != int_value
return False
def check_float_value(self, value: float) -> bool:
try:
float_value = float(str(self.value))
except ValueError:
return False
if self.operator == EQUAL:
return value == float_value
elif self.operator == GREATER_THAN:
return value > float_value
elif self.operator == GREATER_THAN_INCLUSIVE:
return value >= float_value
elif self.operator == LESS_THAN:
return value < float_value
elif self.operator == LESS_THAN_INCLUSIVE:
return value <= float_value
elif self.operator == NOT_EQUAL:
return value != float_value
return False
def check_boolean_value(self, value: bool) -> bool:
if self.value in ("False", "false", "0"):
bool_value = False
elif self.value in ("True", "true", "1"):
bool_value = True
else:
return False
if self.operator == EQUAL:
return value == bool_value
elif self.operator == NOT_EQUAL:
return value != bool_value
return False
def check_string_value(self, value: str) -> bool:
try:
str_value = str(self.value)
except ValueError:
return False
if self.operator == EQUAL:
return value == str_value
elif self.operator == NOT_EQUAL:
return value != str_value
elif self.operator == CONTAINS:
return str_value in value
elif self.operator == NOT_CONTAINS:
return str_value not in value
elif self.operator == REGEX:
return re.compile(str(self.value)).match(value) is not None
``` |
{
"source": "jmfernandez/cwltool",
"score": 2
} |
#### File: cwltool/cwltool/load_tool.py
```python
import hashlib
import logging
import os
import re
import urllib
import uuid
from typing import (
Any,
Dict,
List,
MutableMapping,
MutableSequence,
Optional,
Tuple,
Union,
cast,
)
from ruamel.yaml.comments import CommentedMap, CommentedSeq
from schema_salad.exceptions import ValidationException
from schema_salad.ref_resolver import Loader, file_uri
from schema_salad.schema import validate_doc
from schema_salad.sourceline import SourceLine, cmap
from schema_salad.utils import (
ContextType,
FetcherCallableType,
IdxResultType,
ResolveType,
json_dumps,
)
from . import CWL_CONTENT_TYPES, process, update
from .context import LoadingContext
from .errors import WorkflowException
from .loghandler import _logger
from .process import Process, get_schema, shortname
from .update import ALLUPDATES
from .utils import CWLObjectType, ResolverType, visit_class
jobloaderctx = {
"cwl": "https://w3id.org/cwl/cwl#",
"cwltool": "http://commonwl.org/cwltool#",
"path": {"@type": "@id"},
"location": {"@type": "@id"},
"id": "@id",
} # type: ContextType
overrides_ctx = {
"overrideTarget": {"@type": "@id"},
"cwltool": "http://commonwl.org/cwltool#",
"http://commonwl.org/cwltool#overrides": {
"@id": "cwltool:overrides",
"mapSubject": "overrideTarget",
},
"requirements": {
"@id": "https://w3id.org/cwl/cwl#requirements",
"mapSubject": "class",
},
} # type: ContextType
def default_loader(
fetcher_constructor: Optional[FetcherCallableType] = None,
enable_dev: bool = False,
doc_cache: bool = True,
) -> Loader:
return Loader(
jobloaderctx,
fetcher_constructor=fetcher_constructor,
allow_attachments=lambda r: enable_dev,
doc_cache=doc_cache,
)
def resolve_tool_uri(
argsworkflow: str,
resolver: Optional[ResolverType] = None,
fetcher_constructor: Optional[FetcherCallableType] = None,
document_loader: Optional[Loader] = None,
) -> Tuple[str, str]:
uri = None # type: Optional[str]
split = urllib.parse.urlsplit(argsworkflow)
# In case of Windows path, urlsplit misjudge Drive letters as scheme, here we are skipping that
if split.scheme and split.scheme in ["http", "https", "file"]:
uri = argsworkflow
elif os.path.exists(os.path.abspath(argsworkflow)):
uri = file_uri(str(os.path.abspath(argsworkflow)))
elif resolver is not None:
uri = resolver(
document_loader or default_loader(fetcher_constructor), argsworkflow
)
if uri is None:
raise ValidationException("Not found: '%s'" % argsworkflow)
if argsworkflow != uri:
_logger.info("Resolved '%s' to '%s'", argsworkflow, uri)
fileuri = urllib.parse.urldefrag(uri)[0]
return uri, fileuri
def fetch_document(
argsworkflow: Union[str, CWLObjectType],
loadingContext: Optional[LoadingContext] = None,
) -> Tuple[LoadingContext, CommentedMap, str]:
"""Retrieve a CWL document."""
if loadingContext is None:
loadingContext = LoadingContext()
loadingContext.loader = default_loader()
else:
loadingContext = loadingContext.copy()
if loadingContext.loader is None:
loadingContext.loader = default_loader(
loadingContext.fetcher_constructor,
enable_dev=loadingContext.enable_dev,
doc_cache=loadingContext.doc_cache,
)
if isinstance(argsworkflow, str):
uri, fileuri = resolve_tool_uri(
argsworkflow,
resolver=loadingContext.resolver,
document_loader=loadingContext.loader,
)
workflowobj = cast(
CommentedMap,
loadingContext.loader.fetch(fileuri, content_types=CWL_CONTENT_TYPES),
)
return loadingContext, workflowobj, uri
if isinstance(argsworkflow, MutableMapping):
uri = (
cast(str, argsworkflow["id"])
if argsworkflow.get("id")
else "_:" + str(uuid.uuid4())
)
workflowobj = cast(
CommentedMap, cmap(cast(Dict[str, Any], argsworkflow), fn=uri)
)
loadingContext.loader.idx[uri] = workflowobj
return loadingContext, workflowobj, uri
raise ValidationException("Must be URI or object: '%s'" % argsworkflow)
def _convert_stdstreams_to_files(
workflowobj: Union[
CWLObjectType, MutableSequence[Union[CWLObjectType, str, int]], str
]
) -> None:
if isinstance(workflowobj, MutableMapping):
if workflowobj.get("class") == "CommandLineTool":
with SourceLine(
workflowobj,
"outputs",
ValidationException,
_logger.isEnabledFor(logging.DEBUG),
):
outputs = workflowobj.get("outputs", [])
if not isinstance(outputs, CommentedSeq):
raise ValidationException('"outputs" section is not ' "valid.")
for out in cast(
MutableSequence[CWLObjectType], workflowobj.get("outputs", [])
):
if not isinstance(out, CommentedMap):
raise ValidationException(
f"Output '{out}' is not a valid OutputParameter."
)
for streamtype in ["stdout", "stderr"]:
if out.get("type") == streamtype:
if "outputBinding" in out:
raise ValidationException(
"Not allowed to specify outputBinding when"
" using %s shortcut." % streamtype
)
if streamtype in workflowobj:
filename = workflowobj[streamtype]
else:
filename = str(
hashlib.sha1( # nosec
json_dumps(workflowobj, sort_keys=True).encode(
"utf-8"
)
).hexdigest()
)
workflowobj[streamtype] = filename
out["type"] = "File"
out["outputBinding"] = cmap({"glob": filename})
for inp in cast(
MutableSequence[CWLObjectType], workflowobj.get("inputs", [])
):
if inp.get("type") == "stdin":
if "inputBinding" in inp:
raise ValidationException(
"Not allowed to specify inputBinding when"
" using stdin shortcut."
)
if "stdin" in workflowobj:
raise ValidationException(
"Not allowed to specify stdin path when"
" using stdin type shortcut."
)
else:
workflowobj["stdin"] = (
"$(inputs.%s.path)"
% cast(str, inp["id"]).rpartition("#")[2]
)
inp["type"] = "File"
else:
for entry in workflowobj.values():
_convert_stdstreams_to_files(
cast(
Union[
CWLObjectType,
MutableSequence[Union[CWLObjectType, str, int]],
str,
],
entry,
)
)
if isinstance(workflowobj, MutableSequence):
for entry in workflowobj:
_convert_stdstreams_to_files(
cast(
Union[
CWLObjectType,
MutableSequence[Union[CWLObjectType, str, int]],
str,
],
entry,
)
)
def _add_blank_ids(
workflowobj: Union[CWLObjectType, MutableSequence[Union[CWLObjectType, str]]]
) -> None:
if isinstance(workflowobj, MutableMapping):
if (
"run" in workflowobj
and isinstance(workflowobj["run"], MutableMapping)
and "id" not in workflowobj["run"]
and "$import" not in workflowobj["run"]
):
workflowobj["run"]["id"] = str(uuid.uuid4())
for entry in workflowobj.values():
_add_blank_ids(
cast(
Union[CWLObjectType, MutableSequence[Union[CWLObjectType, str]]],
entry,
)
)
if isinstance(workflowobj, MutableSequence):
for entry in workflowobj:
_add_blank_ids(
cast(
Union[CWLObjectType, MutableSequence[Union[CWLObjectType, str]]],
entry,
)
)
def resolve_and_validate_document(
loadingContext: LoadingContext,
workflowobj: Union[CommentedMap, CommentedSeq],
uri: str,
preprocess_only: bool = False,
skip_schemas: Optional[bool] = None,
) -> Tuple[LoadingContext, str]:
"""Validate a CWL document."""
if not loadingContext.loader:
raise ValueError("loadingContext must have a loader.")
else:
loader = loadingContext.loader
loadingContext = loadingContext.copy()
if not isinstance(workflowobj, MutableMapping):
raise ValueError(
"workflowjobj must be a dict, got '{}': {}".format(
type(workflowobj), workflowobj
)
)
jobobj = None
if "cwl:tool" in workflowobj:
jobobj, _ = loader.resolve_all(workflowobj, uri)
uri = urllib.parse.urljoin(uri, workflowobj["https://w3id.org/cwl/cwl#tool"])
del cast(Dict[str, Any], jobobj)["https://w3id.org/cwl/cwl#tool"]
workflowobj = fetch_document(uri, loadingContext)[1]
fileuri = urllib.parse.urldefrag(uri)[0]
cwlVersion = loadingContext.metadata.get("cwlVersion")
if not cwlVersion:
cwlVersion = workflowobj.get("cwlVersion")
if not cwlVersion and fileuri != uri:
# The tool we're loading is a fragment of a bigger file. Get
# the document root element and look for cwlVersion there.
metadata = cast(CWLObjectType, fetch_document(fileuri, loadingContext)[1])
cwlVersion = cast(str, metadata.get("cwlVersion"))
if not cwlVersion:
raise ValidationException(
"No cwlVersion found. "
"Use the following syntax in your CWL document to declare "
"the version: cwlVersion: <version>.\n"
"Note: if this is a CWL draft-3 (pre v1.0) document then it "
"will need to be upgraded first using https://pypi.org/project/cwl-upgrader/ . "
"'sbg:draft-2' documents can be upgraded using "
"https://pypi.org/project/sevenbridges-cwl-draft2-upgrader/ ."
)
if not isinstance(cwlVersion, str):
with SourceLine(
workflowobj, "cwlVersion", ValidationException, loadingContext.debug
):
raise ValidationException(
f"'cwlVersion' must be a string, got {type(cwlVersion)}"
)
# strip out version
cwlVersion = re.sub(r"^(?:cwl:|https://w3id.org/cwl/cwl#)", "", cwlVersion)
if cwlVersion not in list(ALLUPDATES):
# print out all the Supported Versions of cwlVersion
versions = []
for version in list(ALLUPDATES):
if "dev" in version:
version += " (with --enable-dev flag only)"
versions.append(version)
versions.sort()
raise ValidationException(
"The CWL reference runner no longer supports pre CWL v1.0 "
"documents. Supported versions are: "
"\n{}".format("\n".join(versions))
)
if (
isinstance(jobobj, CommentedMap)
and "http://commonwl.org/cwltool#overrides" in jobobj
):
loadingContext.overrides_list.extend(resolve_overrides(jobobj, uri, uri))
del jobobj["http://commonwl.org/cwltool#overrides"]
if (
isinstance(jobobj, CommentedMap)
and "https://w3id.org/cwl/cwl#requirements" in jobobj
):
if cwlVersion not in ("v1.1.0-dev1", "v1.1"):
raise ValidationException(
"`cwl:requirements` in the input object is not part of CWL "
"v1.0. You can adjust to use `cwltool:overrides` instead; or you "
"can set the cwlVersion to v1.1 or greater."
)
loadingContext.overrides_list.append(
{
"overrideTarget": uri,
"requirements": jobobj["https://w3id.org/cwl/cwl#requirements"],
}
)
del jobobj["https://w3id.org/cwl/cwl#requirements"]
(sch_document_loader, avsc_names) = process.get_schema(cwlVersion)[:2]
if isinstance(avsc_names, Exception):
raise avsc_names
processobj = None # type: Optional[ResolveType]
document_loader = Loader(
sch_document_loader.ctx,
schemagraph=sch_document_loader.graph,
idx=loader.idx,
cache=sch_document_loader.cache,
fetcher_constructor=loadingContext.fetcher_constructor,
skip_schemas=skip_schemas,
doc_cache=loadingContext.doc_cache,
)
if cwlVersion == "v1.0":
_add_blank_ids(workflowobj)
document_loader.resolve_all(workflowobj, fileuri)
processobj, metadata = document_loader.resolve_ref(uri)
if not isinstance(processobj, (CommentedMap, CommentedSeq)):
raise ValidationException("Workflow must be a CommentedMap or CommentedSeq.")
if not hasattr(processobj.lc, "filename"):
processobj.lc.filename = fileuri
if loadingContext.metadata:
metadata = loadingContext.metadata
if not isinstance(metadata, CommentedMap):
raise ValidationException(
"metadata must be a CommentedMap, was %s" % type(metadata)
)
if isinstance(processobj, CommentedMap):
uri = processobj["id"]
_convert_stdstreams_to_files(workflowobj)
if isinstance(jobobj, CommentedMap):
loadingContext.jobdefaults = jobobj
loadingContext.loader = document_loader
loadingContext.avsc_names = avsc_names
loadingContext.metadata = metadata
if preprocess_only:
return loadingContext, uri
if loadingContext.do_validate:
validate_doc(avsc_names, processobj, document_loader, loadingContext.strict)
# None means default behavior (do update)
if loadingContext.do_update in (True, None):
if "cwlVersion" not in metadata:
metadata["cwlVersion"] = cwlVersion
processobj = update.update(
processobj, document_loader, fileuri, loadingContext.enable_dev, metadata
)
document_loader.idx[processobj["id"]] = processobj
def update_index(pr: CommentedMap) -> None:
if "id" in pr:
document_loader.idx[pr["id"]] = pr
visit_class(
processobj, ("CommandLineTool", "Workflow", "ExpressionTool"), update_index
)
return loadingContext, uri
def make_tool(
uri: Union[str, CommentedMap, CommentedSeq], loadingContext: LoadingContext
) -> Process:
"""Make a Python CWL object."""
if loadingContext.loader is None:
raise ValueError("loadingContext must have a loader")
resolveduri, metadata = loadingContext.loader.resolve_ref(uri)
processobj = None
if isinstance(resolveduri, MutableSequence):
for obj in resolveduri:
if obj["id"].endswith("#main"):
processobj = obj
break
if not processobj:
raise WorkflowException(
"Tool file contains graph of multiple objects, must specify "
"one of #%s"
% ", #".join(
urllib.parse.urldefrag(i["id"])[1] for i in resolveduri if "id" in i
)
)
elif isinstance(resolveduri, MutableMapping):
processobj = resolveduri
else:
raise Exception("Must resolve to list or dict")
tool = loadingContext.construct_tool_object(processobj, loadingContext)
if loadingContext.jobdefaults:
jobobj = loadingContext.jobdefaults
for inp in tool.tool["inputs"]:
if shortname(inp["id"]) in jobobj:
inp["default"] = jobobj[shortname(inp["id"])]
return tool
def load_tool(
argsworkflow: Union[str, CWLObjectType],
loadingContext: Optional[LoadingContext] = None,
) -> Process:
loadingContext, workflowobj, uri = fetch_document(argsworkflow, loadingContext)
loadingContext, uri = resolve_and_validate_document(
loadingContext, workflowobj, uri
)
return make_tool(uri, loadingContext)
def resolve_overrides(
ov: IdxResultType,
ov_uri: str,
baseurl: str,
) -> List[CWLObjectType]:
ovloader = Loader(overrides_ctx)
ret, _ = ovloader.resolve_all(ov, baseurl)
if not isinstance(ret, CommentedMap):
raise Exception("Expected CommentedMap, got %s" % type(ret))
cwl_docloader = get_schema("v1.0")[0]
cwl_docloader.resolve_all(ret, ov_uri)
return cast(List[CWLObjectType], ret["http://commonwl.org/cwltool#overrides"])
def load_overrides(ov: str, base_url: str) -> List[CWLObjectType]:
ovloader = Loader(overrides_ctx)
return resolve_overrides(ovloader.fetch(ov), ov, base_url)
def recursive_resolve_and_validate_document(
loadingContext: LoadingContext,
workflowobj: Union[CommentedMap, CommentedSeq],
uri: str,
preprocess_only: bool = False,
skip_schemas: Optional[bool] = None,
) -> Tuple[LoadingContext, str, Process]:
"""Validate a CWL document, checking that a tool object can be built."""
loadingContext, uri = resolve_and_validate_document(
loadingContext,
workflowobj,
uri,
preprocess_only=preprocess_only,
skip_schemas=skip_schemas,
)
tool = make_tool(uri, loadingContext)
return loadingContext, uri, tool
``` |
{
"source": "jmfernandez/logging2telegram-ng",
"score": 2
} |
#### File: jmfernandez/logging2telegram-ng/setup.py
```python
import os.path
from setuptools import setup
import sys
# In this way, we are sure we are getting
# the installer's version of the library
# not the system's one
setupDir = os.path.dirname(__file__)
sys.path.insert(0, setupDir)
from log2tg_ng import __version__ as log2tg_ng_version
def long_description():
this_dir = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(this_dir, 'README.md'), encoding='utf-8') as f:
return f.read()
def requirements():
requirements_list = list()
with open('requirements.txt') as pc_requirements:
for install in pc_requirements:
requirements_list.append(install.strip())
return requirements_list
setup(
name='logging2telegram-ng',
version=log2tg_ng_version,
packages=['log2tg_ng'],
url='https://github.com/jmfernandez/loging2telegram-ng',
author='jmfernandez',
license='Apache License, Version 2.0, see LICENSE file',
description='Telegram logging handler (next generation)',
long_description=long_description(),
long_description_content_type='text/markdown',
install_requires=requirements(),
classifiers=[
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Environment :: Console',
'Development Status :: 3 - Alpha',
'Programming Language :: Python',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: Implementation :: PyPy',
]
)
``` |
{
"source": "jmfernandez/py_gridfs_fuse",
"score": 2
} |
#### File: py_gridfs_fuse/gridfs_fuse/main.py
```python
import logging
import argparse
import pyfuse3
import trio
import os
import sys
from pymongo.uri_parser import parse_uri
from .operations import operations_factory
FUSE_OPTIONS_HELP='''
FUSE options for mount (comma-separated) [default: %(default)s].
debug - turn on detailed debugging.
workers=N - number of workers [default: 1].
single - equivalent to workers=1 for pyfuse3 compatibility.
log_level=LEVEL - specifies the logging level.
log_file=FILE - specifies path for loging to file.
foreground - run process in foreground rather than as daemon process.
Note: Generic options can be found at: http://man7.org/linux/man-pages/man8/mount.fuse.8.html
'''
class HelpFormatter(argparse.HelpFormatter):
'''A custom formatter to rearrange order of positionals
and hide actions starting with _'''
# use defined argument order to display usage
def _format_usage(self, usage, actions, groups, prefix):
if prefix is None:
prefix = 'usage: '
# if usage is specified, use that
if usage is not None:
usage = usage % dict(prog=self._prog)
# if no optionals or positionals are available, usage is just prog
elif usage is None and not actions:
usage = '%(prog)s' % dict(prog=self._prog)
elif usage is None:
prog = '%(prog)s' % dict(prog=self._prog)
# build full usage string
actions_list = []
for a in actions:
if len(a.option_strings) > 0:
actions_list.append(a)
elif a.dest == 'help':
actions_list.insert(0, a)
elif a.dest.startswith('_'):
pass # hide these
else:
actions_list.insert(1, a) if len(actions_list) else actions_list.append(a)
action_usage = self._format_actions_usage(actions_list, groups) # NEW
usage = ' '.join([s for s in [prog, action_usage] if s])
# omit the long line wrapping code
# prefix with 'usage:'
return '%s%s\n\n' % (prefix, usage)
def _format_action(self, action):
if not action.dest.startswith('_'):
return super(self.__class__, self)._format_action(action)
class OrderedNamespace(argparse.Namespace):
'''Allows argument order to be retained'''
def __init__(self, **kwargs):
self.__dict__["_arg_order"] = []
self.__dict__["_arg_order_first_time_through"] = True
argparse.Namespace.__init__(self, **kwargs)
def __setattr__(self, name, value):
self.__dict__[name] = value
if name in self._arg_order and hasattr(self, "_arg_order_first_time_through"):
self.__dict__["_arg_order"] = []
delattr(self, "_arg_order_first_time_through")
self.__dict__["_arg_order"].append(name)
def _finalize(self):
if hasattr(self, "_arg_order_first_time_through"):
self.__dict__["_arg_order"] = []
delattr(self, "_arg_order_first_time_through")
def _latest_of(self, k1, k2):
try:
if self._arg_order.index(k1) > self._arg_order.index(k2):
return k1
except ValueError:
if k1 in self._arg_order:
return k1
return k2
def configure_parser(parser):
'''Configures CLI options'''
parser.add_argument(
'-m', '--mount-point',
dest='mount_point',
help="Path where to mount fuse/gridfs wrapper")
parser.add_argument(
'-u', '--mongodb-uri',
dest='mongodb_uri',
default="mongodb://127.0.0.1:27017/gridfs_fuse.fs",
help="""Connection string for MongoClient. http://goo.gl/abqY9 "
"[default: %(default)s]""")
parser.add_argument(
'-d', '--database',
dest='database',
default='gridfs_fuse',
help="Name of the database where the filesystem goes [default: %(default)s]")
parser.add_argument(
'-c', '--collection', dest='collection', default='fs',
help='Database collection for GridFS [default: %(default)s]')
parser.add_argument(
'-o', '--options', dest='mount_opts', action='append',
default=['default_permissions'],
help=FUSE_OPTIONS_HELP)
parser.add_argument(
'-l', '--log', dest='logfile', default=os.devnull,
const='gridfs_fuse.log', nargs='?',
help='Log actions to file [default: %(default)s]')
return parser
def fuse_configurator(parser):
'''Configure parser for mount CLI style of form: <srv> <mnt_pt> [-o <options>]'''
parser.add_argument('_script_path') # hack to fix ordering
parser.add_argument('mongodb_uri',
help="MongoDB connection URI in form "
"'mongodb://[user:password@]hostname[:port]/db.collection'")
parser.add_argument('mount_point',
help="Path to mount fuse gridfs filesystem")
parser.add_argument(
'-o', dest='mount_opts', action='append',
default=['default_permissions'], help=FUSE_OPTIONS_HELP)
return parser
def validate_options(options):
'''Validates parser arguments'''
uri = parse_uri(options.mongodb_uri)
# These were splitted because we can have a assigned None value
database = uri.get('database')
if database is not None:
options.database = database
collection = uri.get('collection')
if collection is not None:
options.collection = collection
if not options.mount_point:
raise Exception("mount_point is mandatory")
def fuse_validator(options):
'''Validates parser arguments using mount interface'''
options.database = 'gridfs_fuse'
options.collection = 'fs'
validate_options(options)
opts = dict([opt.split('=', 1) if '=' in opt else (opt, None)
for opt in options.mount_opts])
options.logfile = opts.get('log_file', None)
# shamelessly *adapted* from the the borg collective (see - borgbackup project)
def daemonize():
"""Detach process from controlling terminal and run in background
Returns: old and new get_process_id tuples
"""
old_id = os.getpid()
pid = os.fork()
if pid:
os._exit(0)
os.setsid()
pid = os.fork()
if pid:
os._exit(0)
new_id = os.getpid()
return old_id, new_id
def run_fuse_mount(ops, options, mount_opts):
'''Performs FUSE mount'''
mount_opts = ['fsname=gridfs'] + mount_opts
opts = dict((opt.split('=', 1) if '=' in opt else (opt, None) for opt in mount_opts))
# strip invalid keys
ignored_keys = ['debug', 'foreground', 'log_level', 'log_file', 'workers', 'single']
valid_keys = [k for k in opts if k not in ignored_keys]
mount_opts = set(pyfuse3.default_options)
for k in valid_keys:
if opts[k] is not None:
mount_opts.add('='.join([k, opts[k]]))
else:
mount_opts.add(k)
# handle some key options here
if 'log_level' in opts:
try:
log_level = opts['log_level'].upper()
try:
log_level = int(log_level)
except ValueError:
pass
logging.getLogger().setLevel(getattr(logging, log_level))
except (TypeError, ValueError) as error:
logging.warning('Unable to set log_level to {}: {}'.format(opts['log_level'], error))
# start gridfs bindings and run fuse process
pyfuse3.init(ops, options.mount_point, mount_opts)
# ensure that is single is given then it evaluates to true
if 'single' in opts and opts['single'] is None:
opts['single'] = True
# debug clobbers other log settings such as log_level
if 'debug' in opts:
logging.basicConfig(
format='[%(asctime)s] pid=%(process)s {%(module)s:%(funcName)s():%(lineno)d} %(levelname)s - %(message)s',
level=logging.DEBUG)
# TODO: Find way of capturing CTRL+C and calling pyfuse3.close() when in foreground
# Note: This maybe a bug in pyfuse3
workers = opts.get('workers', opts.get('single', 1)) # fudge for backwards compatibility
try:
trio.run(pyfuse3.main) # maintain compatibility with single/workers kwarg
except KeyboardInterrupt:
pass
finally:
pyfuse3.close()
def init(args, configure=configure_parser, validate=validate_options):
'''Initialise using specified parser config and validation'''
logging.basicConfig(
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
#format='[%(asctime)s] pid=%(process)s {%(module)s:%(funcName)s():%(lineno)d} %(levelname)s - %(message)s',
level=logging.INFO)
parser = argparse.ArgumentParser(formatter_class=HelpFormatter)
configure(parser)
options, _ = parser.parse_known_args(args, namespace=OrderedNamespace())
# flatten options list
flatten = lambda l: [item for sublist in l for item in sublist.split(',')]
options.mount_opts = flatten(options.mount_opts)
validate(options)
# have to fork process before creating MongoClient object otherwise safety warnings
if 'foreground' not in options.mount_opts:
pids = daemonize() # make the program run as non-blocking process
logging.debug('Daemonized parent process {} with child process {}'.format(*pids))
ops = operations_factory(options)
# TODO: Still not sure which options to use
# 'allow_other' Regardless who mounts it, all other users can access it
# 'default_permissions' Let the kernel do the permission checks
# 'nonempty' Allow mount on non empty directory
mount_opts = options.mount_opts
run_fuse_mount(ops, options, mount_opts)
def main(args=sys.argv):
'''Default interface'''
init(args, configure=configure_parser, validate=validate_options) # defaults
def _mount_fuse_main(args=sys.argv):
'''Interface for mount.fuse'''
init(args, configure=fuse_configurator, validate=fuse_validator)
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
pass
``` |
{
"source": "jmfife/vedirect-jmfife",
"score": 2
} |
#### File: vedirect-jmfife/examples/vedirect_mqtt.py
```python
import argparse, os
import paho.mqtt.client as mqtt
from vedirect import VEDirect
import logging
log = logging.getLogger(__name__)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Process VE.Direct protocol')
parser.add_argument('--port', help='Serial port')
parser.add_argument('--timeout', help='Serial port read timeout', type=int, default='60')
parser.add_argument('--mqttbroker', help='MQTT broker address', type=str, default='test.mosquitto.org')
parser.add_argument('--mqttbrokerport', help='MQTT broker port', type=int, default='1883')
parser.add_argument('--topicprefix', help='MQTT topic prefix', type=str, default='vedirect_device/')
parser.add_argument('--emulate', help='emulate one of [ALL, BMV_600, BMV_700, MPPT, PHX_INVERTER]',
default='', type=str)
parser.add_argument('--loglevel', help='logging level - one of [DEBUG, INFO, WARNING, ERROR, CRITICAL]',
default='ERROR')
args = parser.parse_args()
logging.basicConfig(level=args.loglevel.upper())
ve = VEDirect(args.port, args.timeout, args.emulate)
client = mqtt.Client()
client.connect(args.mqttbroker, args.mqttbrokerport, 60)
client.loop_start()
def mqtt_send_callback(packet):
for key, value in packet.items():
if key != 'SER#': # topic cannot contain MQTT wildcards
log.info(f"{args.topicprefix + key}: {value}")
client.publish(args.topicprefix + key, value)
ve.read_data_callback(mqtt_send_callback)
```
#### File: vedirect-jmfife/examples/vedirect_print.py
```python
import argparse, os
from vedirect import VEDirect
def print_data_callback(packet):
print(packet)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Process VE.Direct protocol')
parser.add_argument('--port', help='Serial port')
parser.add_argument('--timeout', help='Serial port read timeout', type=int, default='60')
parser.add_argument('--emulate', help='emulate one of [ALL, BMV_600, BMV_700, MPPT, PHX_INVERTER]',
default='', type=str)
args = parser.parse_args()
ve = VEDirect(args.port, args.timeout, args.emulate)
print(ve.read_data_callback(print_data_callback))
``` |
{
"source": "jmfiz/jpardobl-trastobrain",
"score": 2
} |
#### File: infrastructure/awsmultiprocess/test_comando_repository.py
```python
from trasto.infrastructure.awsmultiprocess.comando_repository import (
COMANDOS_QUEUE_NAME, ComandoRepository)
from trasto.infrastructure.memory.repositories import Idefier
from trasto.model.commands import (Comando, ComandoNuevaAccion,
ComandoNuevaTarea)
from trasto.model.entities import Accion, Tarea
from trasto.model.value_entities import Idd, TipoAccion
def test_comando_nueva_accion():
comando_repo = ComandoRepository()
cna = ComandoNuevaAccion(
idd=Idd(Idefier()),
accion=Accion(
idd=Idd(Idefier()),
nombre="nombreaccion",
script_url="url",
tipo=TipoAccion(nombre="buenhumor")
)
)
comando_repo.send_comando(cna)
count = 0
for ccna in comando_repo.next_comando():
assert not ccna is None
assert isinstance(ccna, ComandoNuevaAccion)
assert ccna.accion.nombre == "nombreaccion"
count = count + 1
break
assert count == 1
def test_comando_nueva_tarea():
comando_repo = ComandoRepository()
cnt_alta = ComandoNuevaTarea(
idd=Idd(Idefier()),
tarea=Tarea(
idd=Idd(Idefier()),
nombre="tareaalta",
parametros="parametros",
prioridad=1,
accionid="accion"
)
)
cnt_baja = ComandoNuevaTarea(
idd=Idd(Idefier()),
tarea=Tarea(
idd=Idd(Idefier()),
nombre="tareabaja",
parametros="parametros",
prioridad=0,
accionid="accion"
)
)
comando_repo.send_comando(cnt_alta)
comando_repo.send_comando(cnt_baja)
count = 0
for ccnt in comando_repo.next_comando():
assert isinstance(ccnt, ComandoNuevaTarea)
assert ccnt.tarea.nombre in ("tareabaja", "tareaalta")
print(f"vamos por contador: {count}")
count = count + 1
if count == 2:
break
assert count == 2
``` |
{
"source": "JMFlin/auto-preference-finder",
"score": 2
} |
#### File: frontend/app/app.py
```python
import hashlib
import tarfile
import streamlit as st
import logging
import os
import pandas as pd
#import tensorflow as tf
import random
import time
from google.cloud import storage
from run import *
version = 'v1'
# tar -zcvf archive.tar.gz female/ && split -b 250M archive.tar.gz "archive.part" && rm archive.tar.gz
# find . -type f | awk -v N=10 -F / 'match($0, /.*\//, m) && a[m[0]]++ < N' | xargs -r -d '\n' tar -rvf backup.tar
logging.basicConfig(level=logging.INFO)
LOGGER = logging.getLogger(__name__)
@st.cache
def download_blobs():
"""Downloads a blob from the bucket."""
LOGGER.info(f'tf version: {tf.version.VERSION}')
if not os.path.exists("./images/"):
os.mkdir("./images/")
if not os.path.exists("./images/unclassified/"):
os.mkdir("./images/unclassified/")
if not os.path.exists('./images/unclassified/female'):
num = random.randint(1,29)
_, bucket = initialize_gcloud()
LOGGER.info(f'Beginning to download images from data_partitions/{version}/archive_partition_{num}.tar.gz')
blob = bucket.blob(f'data_partitions/{version}/archive_partition_{num}.tar.gz')
blob.download_to_filename('./images/unclassified/archive.tar.gz')
LOGGER.info(f'Extracting files from archive')
my_tar = tarfile.open('./images/unclassified/archive.tar.gz')
my_tar.extractall('./images/unclassified/')
my_tar.close()
time.sleep(1)
i = 0
while not os.path.exists('./images/unclassified/female'):
try:
os.rename(f'./images/unclassified/tmp{num}', './images/unclassified/female')
except Exception as e:
LOGGER.info(e)
pass
if i > 5:
break
i = i + 1
time.sleep(1)
LOGGER.info(f'Blobs downloaded\n')
def initialize_gcloud():
bucket_name = os.getenv('GCS_BUCKET')
storage_client = storage.Client()
bucket = storage_client.bucket(bucket_name)
return(storage_client, bucket)
def download_model(userid):
"""Downloads a blob from the bucket."""
if not os.path.exists(f'./saved_model/saved_model/model'):
_, bucket = initialize_gcloud()
try:
blob = bucket.blob(f'users/{userid}/saved_model/model.tar.gz')
LOGGER.info(f'Beginning to download model from bucket with url users/{userid}/saved_model/model.tar.gz')
blob.download_to_filename(f'./saved_model/model.tar.gz')
LOGGER.info(f'Blob downloaded')
time.sleep(2)
tar = tarfile.open('./saved_model/model.tar.gz')
tar.extractall(path="./saved_model/.")
tar.close()
LOGGER.info(f'Model extracted\n')
except:
pass
def initialize():
if not os.path.exists("./files"):
os.mkdir("./files")
if not os.path.exists("./saved_model"):
os.mkdir("./saved_model")
if not os.path.exists("./files/image_state_file.txt"):
with open("./files/image_state_file.txt", "w") as text_file:
print(0, file=text_file)
if not os.path.exists("./files/like_temporary_db_file.txt"):
open("./files/like_temporary_db_file.txt", "w+")
if not os.path.exists("./files/dislike_temporary_db_file.txt"):
open("./files/dislike_temporary_db_file.txt", "w+")
if not os.path.exists("./files/pass_temporary_file.txt"):
open("./files/pass_temporary_file.txt", "w+")
source_image_folder = f'./images/unclassified/female'
images = [f for f in os.listdir(f'{source_image_folder}') if os.path.isfile(os.path.join(f'{source_image_folder}', f))]
images, _, _, _ = sync_likes_dislikes(images)
return(images)
def like_image(image):
LOGGER.info(f'User liked image: {image}')
with open("./files/like_temporary_db_file.txt", "a+") as file_object:
file_object.write(str(image) + os.linesep)
def dislike_image(image):
LOGGER.info(f'User disliked image: {image}')
with open("./files/dislike_temporary_db_file.txt", "a+") as file_object:
file_object.write(str(image) + os.linesep)
def pass_image(image):
LOGGER.info(f'User passed image: {image}')
with open("./files/pass_temporary_file.txt", "a+") as file_object:
file_object.write(str(image) + os.linesep)
def show_image(result):
st.image(image = f'./images/unclassified/female/{result}', width = 400, use_column_width=False)
def image_state() -> int:
if os.path.exists("./files/image_state_file.txt"):
with open('./files/image_state_file.txt') as f:
samples_count = int(f.readline())
samples_count = samples_count + 1
with open("./files/image_state_file.txt", "w") as text_file:
print(samples_count, file=text_file)
else:
samples_count = 0
return(samples_count)
def restart():
LOGGER.info(f'\nUser restarted image labeling\n')
if os.path.exists("./files/image_state_file.txt"):
os.remove("./files/image_state_file.txt")
if os.path.exists("./files/like_temporary_db_file.txt"):
os.remove("./files/like_temporary_db_file.txt")
if os.path.exists("./files/dislike_temporary_db_file.txt"):
os.remove("./files/dislike_temporary_db_file.txt")
if os.path.exists("./files/pass_temporary_file.txt"):
os.remove("./files/pass_temporary_file.txt")
if os.path.exists("./files/model_load_state.txt"):
os.remove("./files/model_load_state.txt")
if os.path.exists("./files/preferences.csv"):
os.remove("./files/preferences.csv")
source_image_folder = "./images/unclassified/female"
images = [f for f in os.listdir(f'{source_image_folder}') if os.path.isfile(os.path.join(f'{source_image_folder}', f))]
return(images)
def prepend_line(file_name, line):
""" Insert given string as a new line at the beginning of a file """
# define name of temporary dummy file
dummy_file = file_name + '.bak'
# open original file in read mode and dummy file in write mode
with open(file_name, 'r') as read_obj, open(dummy_file, 'w') as write_obj:
# Write given line to the dummy file
write_obj.write(line + '\n')
# Read lines from original file one by one and append them to the dummy file
for line in read_obj:
write_obj.write(line)
# remove original file
os.remove(file_name)
# Rename dummy file as the original file
os.rename(dummy_file, file_name)
def merge_files(userid):
prepend_line("./files/like_temporary_db_file.txt", "likes")
prepend_line("./files/dislike_temporary_db_file.txt", "dislikes")
likes = pd.read_csv('./files/like_temporary_db_file.txt')
dislikes = pd.read_csv('./files/dislike_temporary_db_file.txt')
merged = pd.concat([likes.reset_index(drop=True), dislikes], axis=1)
merged['userid'] = userid
merged.to_csv('./files/preferences.csv', sep = ",", index = False)
def upload_blob(userid):
client = storage.Client()
bucket_name = os.getenv('GCS_BUCKET')
LOGGER.info(f'\nUser uploading files to {bucket_name}\n')
bucket = client.get_bucket(bucket_name)
blob = bucket.blob(f'users/{userid}/preferences.csv')
blob.upload_from_filename('./files/preferences.csv')
LOGGER.info(f'File preferences.csv uploaded to {bucket_name}/users/{userid}/preferences.csv')
def hash_identifier(identifier) -> str:
identifier = identifier.lower()
return hashlib.sha256(identifier.encode()).hexdigest()
def sync_likes_dislikes(images):
if os.path.exists("./files/dislike_temporary_db_file.txt"):
with open('./files/dislike_temporary_db_file.txt') as f:
dislikes = [line.rstrip() for line in f]
else:
dislikes = []
if os.path.exists("./files/like_temporary_db_file.txt"):
with open('./files/like_temporary_db_file.txt') as f:
likes = [line.rstrip() for line in f]
else:
likes = []
if os.path.exists("./files/pass_temporary_file.txt"):
with open('./files/pass_temporary_file.txt') as f:
passes = [line.rstrip() for line in f]
else:
passes = []
images = [x for x in images if x not in dislikes]
images = [x for x in images if x not in likes]
images = [x for x in images if x not in passes]
return(images, likes, dislikes, passes)
def main():
mainHeader = st.empty()
st.sidebar.markdown(body = """
## Information about the app
This web app is for creating a training set of your preferences for Tinder. These samples will then be used to train either ResNet50 + FixMatch or VGG16 transfer learning. This is a hobby application and in no way related to Match Groups official Tinder app.
""")
userid = st.text_input("Please enter your x-auth-token", "", type='password')
#if my_profile.email is not None:
# userid = my_profile.email
# userid = hash_identifier(userid)
#else:
# logger.error("Failed to get an identifier")
# st.write("Unable to connect to your tinder account")
if userid:
options = ['Do nothing!', 'Set preferences', 'Watch your model play tinder']
selected_option = st.sidebar.selectbox("Select value:", options)
if selected_option == options[1]:
mainHeader.title(body = 'Preference creator')
download_blobs()
images = initialize()
if os.path.exists("./files/image_state_file.txt"):
with open('./files/image_state_file.txt') as f:
samples_count = int(f.readline())
else:
samples_count = 0
if st.sidebar.button(label='Like'):
like_image(images[samples_count])
samples_count = image_state()
if st.sidebar.button(label='Dislike'):
dislike_image(images[samples_count])
samples_count = image_state()
if st.sidebar.button(label='Pass'):
pass_image(images[samples_count])
if st.sidebar.button(label='Done'):
if len(userid) > 0:
merge_files(userid)
upload_blob(userid)
images = restart()
samples_count = 0
st.write("The sample data has been recorded and the training process will begin shortly!")
else:
st.write("Please enter a user id")
if st.sidebar.button(label = 'Restart'):
images = restart()
samples_count = 0
images, likes, dislikes, passes = sync_likes_dislikes(images)
st.write(f'Likes: {len(likes)} Dislikes: {len(dislikes)} Passes: {len(passes)}')
LOGGER.info(f'Images: {len(images)} Likes: {len(likes)} Dislikes: {len(dislikes)} Passes: {len(passes)}')
st.write(f'{round(samples_count / 250 * 100, 1)}% complete to minimum suggested amount')
show_image(images[samples_count])
if selected_option == options[2]:
mainHeader.title(body = 'Auto Tinder')
token = userid
play_num = st.number_input ("Please enter how many profiles to score", min_value=0, max_value=100, value=0, step=1)
if play_num >= 1:
selected_option = play(token, play_num)
def play(token, play_num):
api = tinderAPI(token)
session_matches = []
total_session_likes = play_num
likes = 0
totalSessionMatches = st.empty()
sessionMatches = st.empty()
end_time = time.time() + 60*60*2
model = tf.keras.models.load_model('./saved_model/saved_model/model')
if not os.path.exists("./images/"):
os.mkdir("./images/")
if not os.path.exists("./images/tmp/"):
os.mkdir("./images/tmp/")
while time.time() < end_time:
persons = api.nearby_persons()
#pos_schools = ["Universität Zürich", "University of Zurich", "UZH"]
LOGGER.info(f'\nFound {len(persons)} persons nearby')
for person in persons:
LOGGER.info("#---------------------------------#")
LOGGER.info(f'Analyzing {person}')
score = person.predict_likeliness(model) #model
LOGGER.info(f'Profile has a total score of {score}')
#for school in pos_schools:
# if school in person.schools:
# print()
# score *= 1.2
if score > 0.5:
res = person.like()
st.write('LIKE')
LOGGER.info('LIKE')
LOGGER.info(f'Is match: {res["is_match"]}')
if res["is_match"]:
session_matches.append(person.name)
else:
res = person.dislike()
st.write('DISLIKE')
LOGGER.info('DISLIKE')
likes = likes + 1
LOGGER.info(f'Session likes + dislikes is {likes} / {total_session_likes}')
if likes >= total_session_likes:
break
time.sleep(2)
if likes >= total_session_likes:
break
LOGGER.info(f'Total matches this sessions was {len(session_matches)}')
totalSessionMatches.text(f'Total matches this sessions was {len(session_matches)}')
if len(session_matches) > 0:
LOGGER.info(f'These are {json.dumps(session_matches)}')
sessionMatches.text(f'These are {json.dumps(session_matches)}')
return('Do nothing!')
main()
``` |
{
"source": "jmfloreszazo/HandsOnIoTAzure",
"score": 3
} |
#### File: HandsOnIoTAzure/pyAzureServiceBusTest/main.py
```python
from azure.servicebus import ServiceBusClient, ServiceBusMessage
CONNECTION_STR = "Endpoint=sb://pythontest.servicebus.windows.net/;SharedAccessKeyName=RootManageSharedAccessKey;SharedAccessKey=<KEY>
QUEUE_NAME = "testQueue"
def send_single_message(sender):
message = ServiceBusMessage("Single Message")
sender.send_messages(message)
print("Sent a single message")
def send_a_list_of_messages(sender):
messages = [ServiceBusMessage("Message in list") for _ in range(5)]
sender.send_messages(messages)
print("Sent a list of 5 messages")
def send_batch_message(sender):
batch_message = sender.create_message_batch()
for _ in range(10):
try:
batch_message.add_message(ServiceBusMessage(
"Message inside a ServiceBusMessageBatch"))
except ValueError:
break
sender.send_messages(batch_message)
print("Sent a batch of 10 messages")
servicebus_client = ServiceBusClient.from_connection_string(
conn_str=CONNECTION_STR, logging_enable=True)
with servicebus_client:
sender = servicebus_client.get_queue_sender(queue_name=QUEUE_NAME)
with sender:
send_single_message(sender)
send_a_list_of_messages(sender)
send_batch_message(sender)
print("Done sending messages. Now you can review in Azure Portal.")
print("----------------------------------------------------------")
input("Press Enter to continue...")
with servicebus_client:
receiver = servicebus_client.get_queue_receiver(
queue_name=QUEUE_NAME, max_wait_time=5)
with receiver:
for msg in receiver:
print("Received: " + str(msg))
receiver.complete_message(msg)
```
#### File: HandsOnIoTAzure/pyHelloIoTDevice/iotHelloWorldDevice.py
```python
from azure.iot.device import IoTHubDeviceClient, Message
from time import sleep
CONNECTION_STRING = "[Your Connection String]"
RECEIVED_MESSAGES = 0
message_text = "{'Hello World!'}"
def connection_client():
try:
client = IoTHubDeviceClient.create_from_connection_string(
CONNECTION_STRING)
return client
except KeyboardInterrupt:
print("Stopped!")
def run_simulation(client):
client = client
client.on_message_received = message_handler
while True:
message = Message(message_text)
print("Sending message: {}".format(message))
client.send_message(message)
print("Message successfully sent")
sleep(10)
def message_handler(message):
global RECEIVED_MESSAGES
RECEIVED_MESSAGES += 1
print("")
print("Message received:")
for property in vars(message).items():
print(" {}".format(property))
print("Total calls received: {}".format(RECEIVED_MESSAGES))
if __name__ == '__main__':
print("Started simulated device")
print("Press Ctrl-C to exit")
run_simulation(connection_client())
```
#### File: HandsOnIoTAzure/pyHelloWorld/sampleobject.py
```python
class SampleObject:
@property
def TestProperty(self):
test = 0
test = 1
return test
def __init___(self):
# ...
print("Need some code for linter.")
def __str__(self):
return "I'm a sample object."
``` |
{
"source": "jmflorez/pymatgen",
"score": 3
} |
#### File: pymatgen/alchemy/materials.py
```python
from __future__ import division
__author__ = "<NAME>, <NAME>"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "1.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__date__ = "Mar 2, 2012"
import os
import re
import json
import datetime
from copy import deepcopy
from pymatgen.core.structure import Structure
from pymatgen.io.cifio import CifParser
from pymatgen.io.vaspio.vasp_input import Poscar
from pymatgen.serializers.json_coders import MSONable, PMGJSONDecoder
from pymatgen.matproj.snl import StructureNL
from warnings import warn
dec = PMGJSONDecoder()
class TransformedStructure(MSONable):
"""
Container object for new structures that include history of
transformations.
Each transformed structure is made up of a sequence of structures with
associated transformation history.
"""
def __init__(self, structure, transformations=None, history=None,
other_parameters=None):
self.final_structure = structure
self.history = history or []
self.other_parameters = other_parameters or {}
self._undone = []
transformations = transformations or []
for t in transformations:
self.append_transformation(t)
def undo_last_change(self):
"""
Undo the last change in the TransformedStructure.
Raises:
IndexError if already at the oldest change.
"""
if len(self.history) == 0:
raise IndexError("Can't undo. Already at oldest change.")
if 'input_structure' not in self.history[-1]:
raise IndexError("Can't undo. Latest history has no "
"input_structure")
h = self.history.pop()
self._undone.append((h, self.final_structure))
s = h["input_structure"]
if isinstance(s, dict):
s = Structure.from_dict(s)
self.final_structure = s
def redo_next_change(self):
"""
Redo the last undone change in the TransformedStructure.
Raises:
IndexError if already at the latest change.
"""
if len(self._undone) == 0:
raise IndexError("Can't redo. Already at latest change.")
h, s = self._undone.pop()
self.history.append(h)
self.final_structure = s
def __getattr__(self, name):
s = object.__getattribute__(self, 'final_structure')
return getattr(s, name)
def __len__(self):
return len(self.history)
def append_transformation(self, transformation, return_alternatives=False,
clear_redo=True):
"""
Appends a transformation to the TransformedStructure.
Args:
transformation:
Transformation to append
return_alternatives:
Whether to return alternative TransformedStructures for
one-to-many transformations. return_alternatives can be a
number, which stipulates the total number of structures to
return.
clear_redo:
Boolean indicating whether to clear the redo list. By default,
this is True, meaning any appends clears the history of
undoing. However, when using append_transformation to do a
redo, the redo list should not be cleared to allow multiple
redos.
"""
if clear_redo:
self._undone = []
if return_alternatives and transformation.is_one_to_many:
ranked_list = transformation.apply_transformation(
self.final_structure, return_ranked_list=return_alternatives)
input_structure = self.final_structure.to_dict
alts = []
for x in ranked_list[1:]:
s = x.pop("structure")
actual_transformation = x.pop("transformation", transformation)
hdict = actual_transformation.to_dict
hdict["input_structure"] = input_structure
hdict["output_parameters"] = x
self.final_structure = s
d = self.to_dict
d['history'].append(hdict)
d['final_structure'] = s.to_dict
alts.append(TransformedStructure.from_dict(d))
x = ranked_list[0]
s = x.pop("structure")
actual_transformation = x.pop("transformation", transformation)
hdict = actual_transformation.to_dict
hdict["input_structure"] = self.final_structure.to_dict
hdict["output_parameters"] = x
self.history.append(hdict)
self.final_structure = s
return alts
else:
s = transformation.apply_transformation(self.final_structure)
hdict = transformation.to_dict
hdict["input_structure"] = self.final_structure.to_dict
hdict["output_parameters"] = {}
self.history.append(hdict)
self.final_structure = s
def append_filter(self, structure_filter):
"""
Adds a transformation parameter to the last transformation.
"""
hdict = structure_filter.to_dict
hdict["input_structure"] = self.final_structure.to_dict
self.history.append(hdict)
def extend_transformations(self, transformations):
"""
Extends a sequence of transformations to the TransformedStructure.
Args:
transformations:
Sequence of Transformations
"""
for t in transformations:
self.append_transformation(t)
def get_vasp_input(self, vasp_input_set, generate_potcar=True):
"""
Returns VASP input as a dict of vaspio objects.
Args:
vasp_input_set:
pymatgen.io.vaspio_set.VaspInputSet like object that creates
vasp input files from structures
generate_potcar:
Set to False to generate a POTCAR.spec file instead of a
POTCAR, which contains the POTCAR labels but not the actual
POTCAR. Defaults to True.
"""
d = vasp_input_set.get_all_vasp_input(self.final_structure,
generate_potcar)
d["transformations.json"] = json.dumps(self.to_dict)
return d
def write_vasp_input(self, vasp_input_set, output_dir,
create_directory=True):
"""
Writes VASP input to an output_dir.
Args:
vasp_input_set:
pymatgen.io.vaspio_set.VaspInputSet like object that creates
vasp input files from structures
output_dir:
Directory to output files
create_directory:
Create the directory if not present. Defaults to True.
"""
vasp_input_set.write_input(self.final_structure, output_dir,
make_dir_if_not_present=create_directory)
with open(os.path.join(output_dir, "transformations.json"), "w") as fp:
json.dump(self.to_dict, fp)
def __str__(self):
output = ["Current structure", "------------",
str(self.final_structure),
"\nHistory",
"------------"]
for h in self.history:
h.pop('input_structure', None)
output.append(str(h))
output.append("\nOther parameters")
output.append("------------")
output.append(str(self.other_parameters))
return "\n".join(output)
def set_parameter(self, key, value):
self.other_parameters[key] = value
@property
def was_modified(self):
"""
Boolean describing whether the last transformation on the structure
made any alterations to it one example of when this would return false
is in the case of performing a substitution transformation on the
structure when the specie to replace isn't in the structure.
"""
return not self.final_structure == self.structures[-2]
@property
def structures(self):
"""
Returns a copy of all structures in the TransformedStructure. A
structure is stored after every single transformation.
"""
hstructs = [Structure.from_dict(s['input_structure'])
for s in self.history if 'input_structure' in s]
return hstructs + [self.final_structure]
@staticmethod
def from_cif_string(cif_string, transformations=None, primitive=True,
occupancy_tolerance=1.):
"""
Generates TransformedStructure from a cif string.
Args:
cif_string:
Input cif string. Should contain only one structure. For cifs
containing multiple structures, please use CifTransmuter.
transformations:
Sequence of transformations to be applied to the input
structure.
primitive:
Option to set if the primitive cell should be extracted.
Defaults to True. However, there are certain instances where
you might want to use a non-primitive cell, e.g., if you are
trying to generate all possible orderings of partial removals
or order a disordered structure.
occupancy_tolerance:
If total occupancy of a site is between 1 and
occupancy_tolerance, the occupancies will be scaled down to 1.
"""
parser = CifParser.from_string(cif_string, occupancy_tolerance)
raw_string = re.sub("'", "\"", cif_string)
cif_dict = parser.to_dict
cif_keys = cif_dict.keys()
s = parser.get_structures(primitive)[0]
partial_cif = cif_dict[cif_keys[0]]
if "_database_code_ICSD" in partial_cif:
source = partial_cif["_database_code_ICSD"] + "-ICSD"
else:
source = "uploaded cif"
source_info = {"source": source,
"datetime": str(datetime.datetime.now()),
"original_file": raw_string,
"cif_data": cif_dict[cif_keys[0]]}
return TransformedStructure(s, transformations, history=[source_info])
@staticmethod
def from_poscar_string(poscar_string, transformations=None):
"""
Generates TransformedStructure from a poscar string.
Args:
poscar_string:
Input POSCAR string.
transformations:
Sequence of transformations to be applied to the input
structure.
"""
p = Poscar.from_string(poscar_string)
if not p.true_names:
raise ValueError("Transformation can be craeted only from POSCAR "
"strings with proper VASP5 element symbols.")
raw_string = re.sub("'", "\"", poscar_string)
s = p.structure
source_info = {"source": "POSCAR",
"datetime": str(datetime.datetime.now()),
"original_file": raw_string}
return TransformedStructure(s, transformations, history=[source_info])
@property
def to_dict(self):
"""
Returns a dict representation of the TransformedStructure.
"""
d = self.final_structure.to_dict
d["@module"] = self.__class__.__module__
d["@class"] = self.__class__.__name__
d["history"] = deepcopy(self.history)
d["version"] = __version__
d["last_modified"] = str(datetime.datetime.utcnow())
d["other_parameters"] = deepcopy(self.other_parameters)
return d
@classmethod
def from_dict(cls, d):
"""
Creates a TransformedStructure from a dict.
"""
s = Structure.from_dict(d)
return cls(s, history=d["history"],
other_parameters=d.get("other_parameters", None))
def to_snl(self, authors, projects=None, references='', remarks=None,
data=None, created_at=None):
if self.other_parameters:
warn('Data in TransformedStructure.other_parameters discarded '
'during type conversion to SNL')
hist = []
for h in self.history:
snl_metadata = h.pop('_snl', {})
hist.append({'name' : snl_metadata.pop('name', 'pymatgen'),
'url' : snl_metadata.pop('url',
'http://pypi.python.org/pypi/pymatgen'),
'description' : h})
return StructureNL(self.final_structure, authors, projects, references,
remarks, data, hist, created_at)
@classmethod
def from_snl(cls, snl):
"""
Create TransformedStructure from SNL.
Args:
snl:
Starting snl
copy_metadata:
update the authors, projects, references, and remarks
in the last history node with the metadata at the
root of the SNL object
copy_data:
copy the contents of snl.data into the last history
node
Returns:
TransformedStructure.
"""
hist = []
for h in snl.history:
d = h.description
d['_snl'] = {'url' : h.url, 'name' : h.name}
hist.append(d)
return cls(snl.structure, history=hist)
```
#### File: analysis/pourbaix/analyzer.py
```python
__author__ = "<NAME>"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
__date__ = "Nov 7, 2012"
import numpy as np
from pyhull.simplex import Simplex
from functools import cmp_to_key
from pyhull.halfspace import Halfspace, HalfspaceIntersection
from pyhull.convex_hull import ConvexHull
class PourbaixAnalyzer(object):
"""
Class for performing analysis on Pourbaix Diagrams
"""
numerical_tol = 1e-8
def __init__(self, pd):
"""
Args:
pd:
Pourbaix Diagram to analyze.
"""
self._pd = pd
self._keys = ['H+', 'V', '1']
self.chempot_limits = None
def get_facet_chempots(self, facet):
"""
Calculates the chemical potentials for each element within a facet.
Args:
facet:
Facet of the phase diagram.
Returns:
{ element: chempot } for all elements in the phase diagram.
"""
entrylist = [self._pd.qhull_entries[i] for i in facet]
energylist = [self._pd.qhull_entries[i].g0 for i in facet]
m = self._make_comp_matrix(entrylist)
chempots = np.dot(np.linalg.inv(m), energylist)
return dict(zip(self._keys, chempots))
def _make_comp_matrix(self, entrylist):
"""
Helper function to generates a normalized composition matrix from a
list of Pourbaix Entries
"""
return np.array([[entry.npH, entry.nPhi, 1] for entry in entrylist])
def get_chempot_range_map(self, limits=[[-2,16], [-4,4]]):
"""
Returns a chemical potential range map for each stable entry.
Args:
elements:
Sequence of elements to be considered as independent variables.
E.g., if you want to show the stability ranges of all Li-Co-O
phases wrt to uLi and uO, you will supply
[Element("Li"), Element("O")]
Returns:
Returns a dict of the form {entry: [simplices]}. The list of
simplices are the sides of the N-1 dim polytope bounding the
allowable chemical potential range of each entry.
"""
tol = PourbaixAnalyzer.numerical_tol
all_chempots = []
facets = self._pd.facets
entries = self._pd.qhull_entries
for facet in facets:
chempots = self.get_facet_chempots(facet)
chempots["H+"] /= -0.0591
chempots["V"] = -chempots["V"]
chempots["1"] = chempots["1"]
all_chempots.append([chempots[el] for el in self._keys])
basis_vecs = []
on_plane_points = []
# Create basis vectors
for row in self._pd._qhull_data:
on_plane_points.append([0, 0, row[2]])
this_basis_vecs = []
norm_vec = [-0.0591 * row[0], -1 * row[1], 1]
if abs(norm_vec[0]) > tol:
this_basis_vecs.append([-norm_vec[2]/norm_vec[0], 0, 1])
if abs(norm_vec[1]) > tol:
this_basis_vecs.append([0, -norm_vec[2]/norm_vec[1], 1])
if len(this_basis_vecs) == 0:
basis_vecs.append([[1, 0, 0], [0, 1, 0]])
elif len(this_basis_vecs) == 1:
if abs(this_basis_vecs[0][0]) < tol:
this_basis_vecs.append([1, 0, 0])
else:
this_basis_vecs.append([0, 1, 0])
basis_vecs.append(this_basis_vecs)
else:
basis_vecs.append(this_basis_vecs)
# Find point in half-space in which optimization is desired
ph_max_contrib = -1 * max([abs(0.0591 * row[0])
for row in self._pd._qhull_data]) * limits[0][1]
V_max_contrib = -1 * max([abs(row[1]) for row in self._pd._qhull_data]) * limits[1][1]
g_max = (-1 * max([abs(pt[2]) for pt in on_plane_points])
+ ph_max_contrib + V_max_contrib) - 10
point_in_region = [7, 0, g_max]
# Append border hyperplanes along limits
for i in xrange(len(limits)):
for j in xrange(len(limits[i])):
basis_vec_1 = [0.0] * 3
basis_vec_2 = [0.0] * 3
point = [0.0] * 3
basis_vec_1[2] = 1.0
basis_vec_2[2] = 0.0
for axis in xrange(len(limits)):
if axis is not i:
basis_vec_1[axis] = 0.0
basis_vec_2[axis] = 1.0
basis_vecs.append([basis_vec_1, basis_vec_2])
point[i] = limits[i][j]
on_plane_points.append(point)
# Hyperplane enclosing the very bottom
basis_vecs.append([[1, 0, 0], [0, 1, 0]])
on_plane_points.append([0, 0, 2 * g_max])
hyperplane_list = [Halfspace.from_hyperplane(basis_vecs[i], on_plane_points[i], point_in_region)
for i in xrange(len(basis_vecs))]
hs_int = HalfspaceIntersection(hyperplane_list, point_in_region)
int_points = hs_int.vertices
pourbaix_domains = {}
self.pourbaix_domain_vertices = {}
for i in xrange(len(self._pd._qhull_data)):
vertices = [[int_points[vert][0], int_points[vert][1]] for vert in
hs_int.facets_by_halfspace[i]]
if len(vertices) < 1:
continue
pourbaix_domains[self._pd._qhull_entries[i]] = ConvexHull(vertices).simplices
# Need to order vertices for highcharts area plot
cx = sum([vert[0] for vert in vertices]) / len(vertices)
cy = sum([vert[1] for vert in vertices]) / len(vertices)
point_comp = lambda x, y: x[0]*y[1] - x[1]*y[0]
vert_center = [[v[0] - cx, v[1] - cy] for v in vertices]
vert_center.sort(key=cmp_to_key(point_comp))
self.pourbaix_domain_vertices[self._pd._qhull_entries[i]] =\
[[v[0] + cx, v[1] + cy] for v in vert_center]
self.pourbaix_domains = pourbaix_domains
return pourbaix_domains
def _in_facet(self, facet, entry):
"""
Checks if a Pourbaix Entry is in a facet.
Args:
facet:
facet to test.
entry:
Pourbaix Entry to test.
"""
dim = len(self._keys)
if dim > 1:
coords = [np.array(self._pd.qhull_data[facet[i]][0:dim - 1])
for i in xrange(len(facet))]
simplex = Simplex(coords)
comp_point = [entry.npH, entry.nPhi]
return simplex.in_simplex(comp_point,
PourbaixAnalyzer.numerical_tol)
else:
return True
def _get_facets(self, entry):
"""
Get the facets that an entry falls into.
"""
memberfacets = list()
for facet in self._pd.facets:
if self._in_facet(facet, entry):
memberfacets.append(facet)
return memberfacets
def _get_facet(self, entry):
"""
Get any facet that a composition falls into.
"""
for facet in self._pd.facets:
if self._in_facet(facet, entry):
return facet
raise RuntimeError("No facet found for comp = {}".format(entry.name))
def _get_facet_entries(self, facet):
"""
Get the entries corresponding to a facet
"""
entries = []
for vertex in facet:
entries.append(self._pd.qhull_entries[vertex])
return entries
def g(self, entry, pH, V):
"""
Get free energy for a given pH, and V.
"""
g0 = entry.g0
npH = -entry.npH * 0.0591
nPhi = -entry.nPhi
return g0 - npH * pH - nPhi * V
def get_decomposition(self, entry):
"""
Provides the decomposition at a particular composition
Args:
comp:
A composition
Returns:
Decomposition as a dict of {PourbaixEntry: amount}
"""
facet = self._get_facet(entry)
entrylist = [self._pd.qhull_entries[i] for i in facet]
m = self._make_comp_matrix(entrylist)
compm = self._make_comp_matrix([entry])
decompamts = np.dot(np.linalg.inv(m.transpose()), compm.transpose())
decomp = dict()
#Scrub away zero amounts
for i in xrange(len(decompamts)):
if abs(decompamts[i][0]) > PourbaixAnalyzer.numerical_tol:
decomp[self._pd.qhull_entries[facet[i]]] = decompamts[i][0]
return decomp
def get_decomp_and_e_above_hull(self, entry):
"""
Provides the decomposition and energy above convex hull for an entry
Args:
entry:
A PourbaixEntry
Returns:
(decomp, energy above convex hull) Stable entries should have
energy above hull of 0.
"""
g0 = entry.g0
decomp = self.get_decomposition(entry)
hullenergy = sum([entry.g0 * amt
for entry, amt in decomp.items()])
return decomp, g0 - hullenergy
def get_e_above_hull(self, entry):
"""
Provides the energy above convex hull for an entry
Args:
entry - A PourbaixEntry object
Returns:
Energy above convex hull of entry. Stable entries should have
energy above hull of 0.
"""
return self.get_decomp_and_e_above_hull(entry)[1]
```
#### File: analysis/pourbaix/entry.py
```python
from __future__ import division
__author__ = "<NAME>"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
__date__ = "December 10, 2012"
import re
import math
from pymatgen.core.periodic_table import Element
from pymatgen.core.structure import Composition
from pymatgen.serializers.json_coders import MSONable
from pymatgen.core.ion import Ion
from pymatgen.phasediagram.entries import PDEntry
PREFAC = 0.0591
class PourbaixEntry(MSONable):
"""
An object encompassing all data relevant to an ion in a pourbaix diagram.
Each bulk solid/ion has a free energy g of the form:
g = g0_ref + 0.0591 log10(conc) - nO mu_H2O + (nH - 2nO) pH
+ phi (-nH + 2nO + q)
"""
def __init__(self, entry, correction=0.0, entry_id=None):
"""
Args:
entry:
An entry object
(ComputedEntry/ComputedStructureEntry/PDEntry/IonEntry)
energy:
Energy of entry
"""
if isinstance(entry, IonEntry):
self._entry = entry
self._conc = 1.0e-6
self._phase_type = "Ion"
self._charge = entry.composition.charge
else:
self._entry = entry
self._conc = 1.0
self._phase_type = "Solid"
self._charge = 0.0
self._npH = None
self._nPhi = None
self._nH2O = None
self._nM = None
self.uncorrected_energy = entry.energy
self.correction = correction
self._calc_coeff_terms()
self._name = self._entry.composition.reduced_formula
if self._phase_type == "Solid":
self._name += "(s)"
try:
self.entry_id = entry.entry_id
except AttributeError:
self.entry_id = entry_id
@property
def _g0(self):
return self.energy
@property
def energy(self):
return self.uncorrected_energy + self.correction
@property
def name(self):
"""
Returns the entry's name
"""
return self._name
def set_name(self, string):
"""
Set name of entry
Args:
string: Input string
"""
self._name = string
@property
def npH(self):
"""
Returns value of npH, the coefficient of pH
"""
return self._npH
@property
def nH2O(self):
"""
Returns coefficient of Mu_H2O
"""
return self._nH2O
@property
def nPhi(self):
"""
Returns nPhi, the coefficient of Phi
"""
return self._nPhi
@property
def g0(self):
"""
Return g0 for the entry. Legacy function.
"""
return self._g0
@property
def conc(self):
"""
Return concentration of the entry. Returns 1 if solid.
"""
return self._conc
@property
def conc_term(self):
"""
Returns the concentration contribution to the free energy.
"""
return self.normalization_factor * PREFAC * math.log10(self._conc)
@property
def phase_type(self):
"""
Returns whether the entry is a solid/ion.
"""
return self._phase_type
def g0_add(self, term):
"""
Add a correction term to g0.
Args:
term:
Correction term to add to g0
"""
self.correction += term
def g0_replace(self, term):
"""
Replace g0 by a different value.
Args:
term:
New value for g0
"""
self.uncorrected_energy = term
self.correction = 0.0
@property
def to_dict(self):
"""
Returns dict which contains Pourbaix Entry data.
Note that the pH, voltage, H2O factors are always calculated when
constructing a PourbaixEntry object.
"""
d = {"@module": self.__class__.__module__,
"@class": self.__class__.__name__}
if isinstance(self._entry, IonEntry):
d["entry type"] = "Ion"
else:
d["entry type"] = "Solid"
d["entry"] = self._entry.to_dict
d["pH factor"] = self._npH
d["voltage factor"] = self._nPhi
d["concentration"] = self._conc
d["H2O factor"] = self._nH2O
d["energy"] = self.energy
d["correction"] = self.correction
d["entry_id"] = self.entry_id
return d
@classmethod
def from_dict(cls, d):
"""
Returns a PourbaixEntry by reading in an Ion
"""
entry_type = d["entry type"]
if entry_type == "Ion":
entry = IonEntry.from_dict(d["entry"])
else:
entry = PDEntry.from_dict(d["entry"])
correction = d["correction"]
entry_id = d["entry_id"]
return PourbaixEntry(entry, correction, entry_id)
def _calc_coeff_terms(self):
"""
Calculates coefficients of pH, V, H2O
"""
nH = 0
nO = 0
nM = 0
for elt in self._entry.composition.elements:
if elt == (Element("H")):
nH = self.entry.composition[elt]
elif elt == (Element("O")):
nO = self.entry.composition[elt]
else:
nM += self.entry.composition[elt]
self._nM = nM
self._npH = (nH - 2 * nO)
self._nH2O = nO
self._nPhi = (nH - 2 * nO - self._charge)
@property
def normalization_factor(self):
"""
Normalize each entry by nM
"""
fact = 1.0 / self._nM
return fact
def scale(self, factor):
"""
Normalize all entries by normalization factor.
Args:
factor:
Normalization factor
"""
self._npH *= factor
self._nPhi *= factor
self._nH2O *= factor
self.uncorrected_energy *= factor
self.correction *= factor
# self._g0 *= factor
def normalize(self, factor):
self.scale(factor)
@property
def charge(self):
"""
Returns charge of entry
"""
return self._charge
@property
def composition(self):
"""
Returns composition
"""
return self.entry.composition
@property
def entry(self):
"""
Returns IonEntry/PDEntry object
"""
return self._entry
def reduced_entry(self):
"""
Calculate reduction factor for composition, and reduce parameters by
this factor.
"""
reduction_factor = self.entry.composition.\
get_reduced_composition_and_factor()[1]
self._nM /= reduction_factor
self.scale(1.0 / reduction_factor)
@property
def num_atoms(self):
"""
Return number of atoms in current formula. Useful for normalization
"""
return self.entry.composition.num_atoms\
/ self.entry.composition.get_reduced_composition_and_factor()[1]
def set_conc(self, conc):
"""
Set concentration manually.
Args:
conc:
Input concentration
"""
self._conc = conc
def __repr__(self):
return "Pourbaix Entry : {} with energy = {:.4f}, npH = {}, nPhi = {},\
nH2O = {}".format(self._entry.composition, self.g0, self.npH,
self.nPhi, self.nH2O)
def __str__(self):
return self.__repr__()
class MultiEntry(PourbaixEntry):
"""
PourbaixEntry-like object for constructing multi-elemental Pourbaix
diagrams.
"""
def __init__(self, entry_list, weights=None):
"""
Args:
entry_list:
List of component PourbaixEntries
weights:
Weights associated with each entry. Default is None
"""
if weights is None:
self._weights = [1.0] * len(entry_list)
else:
self._weights = weights
self._entrylist = entry_list
self.correction = 0.0
self.uncorrected_energy = 0.0
self._npH = 0.0
self._nPhi = 0.0
self._nH2O = 0.0
self._nM = 0.0
self._name = ""
self.entry_id = list()
for i in xrange(len(entry_list)):
entry = entry_list[i]
self.uncorrected_energy += self._weights[i] * \
entry.uncorrected_energy
self.correction += self._weights[i] * entry.correction
self._npH += self._weights[i] * entry.npH
self._nPhi += self._weights[i] * entry.nPhi
self._nH2O += self._weights[i] * entry.nH2O
self._nM += self._weights[i] * entry._nM
self._name += entry.name + " + "
self.entry_id.append(entry.entry_id)
self._name = self._name[:-3]
@property
def normalization_factor(self):
"""
Normalize each entry by nM
"""
norm_fac = 0.0
for i in xrange(len(self._entrylist)):
entry = self._entrylist[i]
for el in entry.composition.elements:
if (el == Element("O")) | (el == Element("H")):
continue
if entry._phase_type == 'Solid':
red_fac = entry.composition.\
get_reduced_composition_and_factor()[1]
else:
red_fac = 1.0
norm_fac += self._weights[i] * entry.composition[el] / red_fac
fact = 1.0 / norm_fac
return fact
def __repr__(self):
str = "Multiple Pourbaix Entry : with energy = {:.4f}, npH = {}, "\
"nPhi = {}, nH2O = {}".format(
self.g0, self.npH, self.nPhi, self.nH2O)
str += ", species: "
for entry in self._entrylist:
str += entry.name + " + "
return str[:-3]
def __str__(self):
return self.__repr__()
@property
def conc_term(self):
sum_conc = 0.0
for i in xrange(len(self._entrylist)):
entry = self._entrylist[i]
sum_conc += self._weights[i] * PREFAC * math.log10(entry.conc)
return sum_conc * self.normalization_factor
@property
def entrylist(self):
return self._entrylist
class IonEntry(PDEntry):
"""
Object similar to PDEntry, but contains an Ion object instead of a
Composition object.
.. attribute:: name
A name for the entry. This is the string shown in the phase diagrams.
By default, this is the reduced formula for the composition, but can be
set to some other string for display purposes.
"""
def __init__(self, ion, energy, name=None):
"""
Args:
comp:
Ion object
energy:
Energy for composition.
name:
Optional parameter to name the entry. Defaults to the
chemical formula.
"""
self._energy = energy
self._composition = ion
self.name = name if name else self._composition.reduced_formula
@classmethod
def from_dict(cls, d):
"""
Returns an IonEntry object from a dict.
"""
return IonEntry(Ion.from_dict(d["composition"]), d["energy"])
@property
def to_dict(self):
"""
Creates a dict of composition, energy, and ion name
"""
d = {"composition": self._composition.to_dict, "energy": self._energy}
return d
@property
def energy(self):
"""
Return final energy
"""
return self._energy
@property
def energy_per_atom(self):
"""
Return final energy per atom
"""
return self._energy / self.composition.num_atoms
@property
def composition(self):
"""
Returns the composition
"""
return self._composition
def __repr__(self):
return "IonEntry : {} with energy = {:.4f}".format(self.composition,
self.energy)
def __str__(self):
return self.__repr__()
class PourbaixEntryIO(object):
"""
Class to import and export Pourbaix entries from a csv file
"""
@staticmethod
def to_csv(filename, entries, latexify_names=False):
"""
Exports Pourbaix entries to a csv
Args:
filename:
Filename to write to.
entries:
Entries to export.
latexify_names:
Format entry names to be LaTex compatible, e.g., Li_{2}O
"""
import csv
elements = set()
map(elements.update, [entry.entry.composition.elements
for entry in entries])
elements = sorted(list(elements), key=lambda a: a.X)
writer = csv.writer(open(filename, "wb"), delimiter=",",
quotechar="\"", quoting=csv.QUOTE_MINIMAL)
writer.writerow(["Name"] + elements + ["Energy"] + ["Entry Type"]
+ ["Charge"] + ["Concentration"])
for entry in entries:
row = [entry.name if not latexify_names
else re.sub(r"([0-9]+)", r"_{\1}", entry.name)]
if entry.phase_type == "Solid":
reduction_fac = entry.entry.composition.\
get_reduced_composition_and_factor()[1]
else:
reduction_fac = 1.0
row.extend([entry.entry.composition[el] / reduction_fac
for el in elements])
if entry.phase_type == "Solid":
reduction_fac = 1.0
row.append(entry.g0 / reduction_fac)
row.append(entry.phase_type)
row.append(entry.charge / reduction_fac)
row.append(entry.conc)
writer.writerow(row)
@staticmethod
def from_csv(filename):
"""
Imports PourbaixEntries from a csv.
Args:
filename - Filename to import from.
Returns:
List of Entries
"""
import csv
reader = csv.reader(open(filename, "rb"), delimiter=",",
quotechar="\"", quoting=csv.QUOTE_MINIMAL)
entries = list()
header_read = False
for row in reader:
if not header_read:
elements = row[1:(len(row) - 4)]
header_read = True
else:
name = row[0]
energy = float(row[-4])
conc = float(row[-1])
comp = dict()
for ind in range(1, len(row) - 4):
if float(row[ind]) > 0:
comp[Element(elements[ind - 1])] = float(row[ind])
phase_type = row[-3]
if phase_type == "Ion":
PoE = PourbaixEntry(IonEntry(Ion.from_formula(name),
energy))
PoE.set_conc(conc)
PoE.set_name(name)
entries.append(PoE)
else:
entries.append(PourbaixEntry(PDEntry(Composition(comp),
energy)))
elements = [Element(el) for el in elements]
return elements, entries
```
#### File: pymatgen/core/structure_modifier.py
```python
from __future__ import division
__author__ = "<NAME>"
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "1.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Production"
__date__ = "Sep 23, 2011"
import abc
import itertools
import warnings
import collections
import numpy as np
from pymatgen.util.decorators import deprecated
from pymatgen.core.periodic_table import Specie, Element
from pymatgen.core.lattice import Lattice
from pymatgen.core.sites import PeriodicSite, Site
from pymatgen.core.structure import Structure, Molecule
from pymatgen.util.coord_utils import get_points_in_sphere_pbc
class StructureModifier(object):
"""
Abstract class definition for all classes that modify structures.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractproperty
def modified_structure(self):
"""
Returns the modified structure.
"""
return
@abc.abstractproperty
def original_structure(self):
"""
Returns the original structure.
"""
return
@deprecated(replacement=Structure)
class StructureEditor(StructureModifier):
"""
Editor for adding, removing and changing sites from a structure
"""
DISTANCE_TOLERANCE = 0.01
def __init__(self, structure):
"""
Args:
structure:
pymatgen.core.structure Structure object.
"""
self._original_structure = structure
self._lattice = structure.lattice
self._sites = list(structure.sites)
def add_site_property(self, property_name, values):
"""
Adds a property to a site.
Args:
property_name:
The name of the property to add.
values:
A sequence of values. Must be same length as number of sites.
"""
if len(values) != len(self._sites):
raise ValueError("Values must be same length as sites.")
for i in xrange(len(self._sites)):
site = self._sites[i]
props = site.properties
if not props:
props = {}
props[property_name] = values[i]
self._sites[i] = PeriodicSite(site.species_and_occu,
site.frac_coords, self._lattice,
properties=props)
def replace_species(self, species_mapping):
"""
Swap species in a structure.
Args:
species_mapping:
dict of species to swap. Species can be elements too.
e.g., {Element("Li"): Element("Na")} performs a Li for Na
substitution. The second species can be a sp_and_occu dict.
For example, a site with 0.5 Si that is passed the mapping
{Element('Si): {Element('Ge'):0.75, Element('C'):0.25} } will
have .375 Ge and .125 C.
"""
def mod_site(site):
new_atom_occu = collections.defaultdict(int)
for sp, amt in site.species_and_occu.items():
if sp in species_mapping:
if isinstance(species_mapping[sp], (Element, Specie)):
new_atom_occu[species_mapping[sp]] += amt
elif isinstance(species_mapping[sp], dict):
for new_sp, new_amt in species_mapping[sp].items():
new_atom_occu[new_sp] += amt * new_amt
else:
new_atom_occu[sp] += amt
return PeriodicSite(new_atom_occu, site.frac_coords, self._lattice,
properties=site.properties)
self._sites = map(mod_site, self._sites)
def replace_site(self, index, species_n_occu):
"""
Replace a single site. Takes either a species or a dict of species and
occupations.
Args:
index:
The index of the site in the _sites list.
species:
A species object.
"""
self._sites[index] = PeriodicSite(species_n_occu,
self._sites[index].frac_coords,
self._lattice,
properties=self._sites[index].
properties)
def remove_species(self, species):
"""
Remove all occurrences of a species from a structure.
Args:
species:
species to remove.
"""
new_sites = []
for site in self._sites:
new_sp_occu = {sp: amt for sp, amt in site.species_and_occu.items()
if sp not in species}
if len(new_sp_occu) > 0:
new_sites.append(PeriodicSite(new_sp_occu, site.frac_coords,
self._lattice,
properties=site.properties))
self._sites = new_sites
def append_site(self, species, coords, coords_are_cartesian=False,
validate_proximity=True):
"""
Append a site to the structure at the end.
Args:
species:
species of inserted site
coords:
coordinates of inserted site
fractional_coord:
Whether coordinates are cartesian. Defaults to False.
validate_proximity:
Whether to check if inserted site is too close to an existing
site. Defaults to True.
"""
self.insert_site(len(self._sites), species, coords,
coords_are_cartesian, validate_proximity)
def insert_site(self, i, species, coords, coords_are_cartesian=False,
validate_proximity=True, properties=None):
"""
Insert a site to the structure.
Args:
i:
index to insert site
species:
species of inserted site
coords:
coordinates of inserted site
coords_are_cartesian:
Whether coordinates are cartesian. Defaults to False.
validate_proximity:
Whether to check if inserted site is too close to an existing
site. Defaults to True.
"""
if not coords_are_cartesian:
new_site = PeriodicSite(species, coords, self._lattice,
properties=properties)
else:
frac_coords = self._lattice.get_fractional_coords(coords)
new_site = PeriodicSite(species, frac_coords, self._lattice,
properties=properties)
if validate_proximity:
for site in self._sites:
if site.distance(new_site) < self.DISTANCE_TOLERANCE:
raise ValueError("New site is too close to an existing "
"site!")
self._sites.insert(i, new_site)
def delete_site(self, i):
"""
Delete site at index i.
Args:
i:
index of site to delete.
"""
del(self._sites[i])
def delete_sites(self, indices):
"""
Delete sites with at indices.
Args:
indices:
sequence of indices of sites to delete.
"""
self._sites = [self._sites[i] for i in range(len(self._sites))
if i not in indices]
def apply_operation(self, symmop):
"""
Apply a symmetry operation to the structure and return the new
structure. The lattice is operated by the rotation matrix only.
Coords are operated in full and then transformed to the new lattice.
Args:
symmop:
Symmetry operation to apply.
"""
self._lattice = Lattice([symmop.apply_rotation_only(row)
for row in self._lattice.matrix])
def operate_site(site):
new_cart = symmop.operate(site.coords)
new_frac = self._lattice.get_fractional_coords(new_cart)
return PeriodicSite(site.species_and_occu, new_frac, self._lattice,
properties=site.properties)
self._sites = map(operate_site, self._sites)
def modify_lattice(self, new_lattice):
"""
Modify the lattice of the structure. Mainly used for changing the
basis.
Args:
new_lattice:
New lattice
"""
self._lattice = new_lattice
new_sites = []
for site in self._sites:
new_sites.append(PeriodicSite(site.species_and_occu,
site.frac_coords,
self._lattice,
properties=site.properties))
self._sites = new_sites
def apply_strain(self, strain):
"""
Apply an isotropic strain to the lattice.
Args:
strain:
Amount of strain to apply. E.g., 0.01 means all lattice
vectors are increased by 1%. This is equivalent to
calling modify_lattice with a lattice with lattice parameters
that are 1% larger.
"""
self.modify_lattice(Lattice(self._lattice.matrix * (1 + strain)))
def translate_sites(self, indices, vector, frac_coords=True):
"""
Translate specific sites by some vector, keeping the sites within the
unit cell.
Args:
sites:
List of site indices on which to perform the translation.
vector:
Translation vector for sites.
frac_coords:
Boolean stating whether the vector corresponds to fractional or
cartesian coordinates.
"""
for i in indices:
site = self._sites[i]
if frac_coords:
fcoords = site.frac_coords + vector
else:
fcoords = self._lattice.get_fractional_coords(site.coords
+ vector)
new_site = PeriodicSite(site.species_and_occu, fcoords,
self._lattice, to_unit_cell=True,
coords_are_cartesian=False,
properties=site.properties)
self._sites[i] = new_site
def perturb_structure(self, distance=0.1):
"""
Performs a random perturbation of the sites in a structure to break
symmetries.
Args:
distance:
distance in angstroms by which to perturb each site.
"""
def get_rand_vec():
#deals with zero vectors.
vector = np.random.randn(3)
vnorm = np.linalg.norm(vector)
return vector / vnorm * distance if vnorm != 0 else get_rand_vec()
for i in range(len(self._sites)):
self.translate_sites([i], get_rand_vec(), frac_coords=False)
def add_oxidation_state_by_element(self, oxidation_states):
"""
Add oxidation states to a structure.
Args:
structure:
pymatgen.core.structure Structure object.
oxidation_states:
dict of oxidation states.
E.g., {"Li":1, "Fe":2, "P":5, "O":-2}
"""
try:
for i, site in enumerate(self._sites):
new_sp = {}
for el, occu in site.species_and_occu.items():
sym = el.symbol
new_sp[Specie(sym, oxidation_states[sym])] = occu
new_site = PeriodicSite(new_sp, site.frac_coords,
self._lattice,
coords_are_cartesian=False,
properties=site.properties)
self._sites[i] = new_site
except KeyError:
raise ValueError("Oxidation state of all elements must be "
"specified in the dictionary.")
def add_oxidation_state_by_site(self, oxidation_states):
"""
Add oxidation states to a structure by site.
Args:
oxidation_states:
List of oxidation states.
E.g., [1, 1, 1, 1, 2, 2, 2, 2, 5, 5, 5, 5, -2, -2, -2, -2]
"""
try:
for i, site in enumerate(self._sites):
new_sp = {}
for el, occu in site.species_and_occu.items():
sym = el.symbol
new_sp[Specie(sym, oxidation_states[i])] = occu
new_site = PeriodicSite(new_sp, site.frac_coords,
self._lattice,
coords_are_cartesian=False,
properties=site.properties)
self._sites[i] = new_site
except IndexError:
raise ValueError("Oxidation state of all sites must be "
"specified in the dictionary.")
def remove_oxidation_states(self):
"""
Removes oxidation states from a structure.
"""
for i, site in enumerate(self._sites):
new_sp = collections.defaultdict(float)
for el, occu in site.species_and_occu.items():
sym = el.symbol
new_sp[Element(sym)] += occu
new_site = PeriodicSite(new_sp, site.frac_coords,
self._lattice,
coords_are_cartesian=False,
properties=site.properties)
self._sites[i] = new_site
def to_unit_cell(self, tolerance=0.1):
"""
Returns all the sites to their position inside the unit cell.
If there is a site within the tolerance already there, the site is
deleted instead of moved.
"""
new_sites = []
for site in self._sites:
if not new_sites:
new_sites.append(site)
frac_coords = np.array([site.frac_coords])
continue
if len(get_points_in_sphere_pbc(self._lattice, frac_coords,
site.coords, tolerance)):
continue
frac_coords = np.append(frac_coords, [site.frac_coords % 1],
axis=0)
new_sites.append(site.to_unit_cell)
self._sites = new_sites
@property
def original_structure(self):
"""
The original structure.
"""
return self._original_structure
@property
def modified_structure(self):
coords = [site.frac_coords for site in self._sites]
species = [site.species_and_occu for site in self._sites]
props = {}
if self._sites[0].properties:
for k in self._sites[0].properties.keys():
props[k] = [site.properties[k] for site in self._sites]
return Structure(self._lattice, species, coords, False,
site_properties=props)
@deprecated(replacement=Structure)
class SupercellMaker(StructureModifier):
"""
Makes a supercell.
"""
def __init__(self, structure, scaling_matrix=((1, 0, 0),
(0, 1, 0),
(0, 0, 1))):
"""
Create a supercell.
Args:
structure:
pymatgen.core.structure Structure object.
scaling_matrix:
a matrix of transforming the lattice vectors. Defaults to the
identity matrix. Has to be all integers. e.g.,
[[2,1,0],[0,3,0],[0,0,1]] generates a new structure with
lattice vectors a' = 2a + b, b' = 3b, c' = c where a, b, and c
are the lattice vectors of the original structure.
"""
self._original_structure = structure
old_lattice = structure.lattice
scale_matrix = np.array(scaling_matrix)
new_lattice = Lattice(np.dot(scale_matrix, old_lattice.matrix))
new_sites = []
def range_vec(i):
return range(max(scale_matrix[:][:, i])
- min(scale_matrix[:][:, i]) + 1)
for site in structure.sites:
for (i, j, k) in itertools.product(range_vec(0), range_vec(1),
range_vec(2)):
fcoords = site.frac_coords + np.array([i, j, k])
coords = old_lattice.get_cartesian_coords(fcoords)
new_coords = new_lattice.get_fractional_coords(coords)
new_site = PeriodicSite(site.species_and_occu, new_coords,
new_lattice,
properties=site.properties)
contains_site = False
for s in new_sites:
if s.is_periodic_image(new_site):
contains_site = True
break
if not contains_site:
new_sites.append(new_site)
self._modified_structure = Structure.from_sites(new_sites)
@property
def original_structure(self):
return self._original_structure
@property
def modified_structure(self):
return self._modified_structure
@deprecated(replacement=Structure)
class OxidationStateDecorator(StructureModifier):
"""
.. deprecated:: v2.1.3
Use StructureEditor's add_oxidation_state_by... instead.
Given a dictionary of oxidation states, decorate a structure by replacing
each Element at a site with a Specie with an oxidation state. Useful for
higher level functions.
"""
def __init__(self, structure, oxidation_states):
"""
Decorates a structure with oxidation states.
Args:
structure:
pymatgen.core.structure Structure object.
oxidation_states:
dict of oxidation states.
E.g., {"Li":1, "Fe":2, "P":5, "O": -2}
"""
warnings.warn("OxidationStateDecorator has been deprecated. Use "
"StructureEditor.remove_oxidation_states instead.")
self._original_structure = structure
editor = StructureEditor(structure)
editor.add_oxidation_state_by_element(oxidation_states)
self._modified_structure = editor.modified_structure
@property
def original_structure(self):
return self._original_structure
@property
def modified_structure(self):
return self._modified_structure
@deprecated(replacement=Structure)
class OxidationStateRemover(StructureModifier):
"""
.. deprecated:: v2.1.3
Use StructureEditor's remove_oxidation_states instead.
Replace each Specie at a site with an element. Useful for doing structure
comparisons after applying higher level functions.
"""
def __init__(self, structure):
"""
Removes oxidation states from a structure
Args:
structure:
pymatgen.core.structure Structure object.
"""
warnings.warn("OxidationStateRemover has been deprecated. Use "
"StructureEditor.remove_oxidation_states instead.")
self._original_structure = structure
new_species = [{Element(el.symbol): occu
for el, occu in site.species_and_occu.items()}
for site in structure]
self._modified_structure = Structure(structure.lattice, new_species,
structure.frac_coords, False)
@property
def original_structure(self):
return self._original_structure
@property
def modified_structure(self):
return self._modified_structure
@deprecated(replacement=Structure)
class BasisChange(StructureModifier):
"""
Given a new basis, we express the structure in this new basis.
"""
def __init__(self, structure, new_lattice):
"""
Express a given structure in a new basis.
Args:
structure:
pymatgen.core.structure Structure object.
new_lattice:
a pymatgen.core.Lattice object
"""
self._original_structure = structure
sp = [site.species_and_occu for site in structure._sites]
coords = [site.coords for site in structure._sites]
self._modified_structure = Structure(new_lattice, sp, coords,
validate_proximity=False,
to_unit_cell=True,
coords_are_cartesian=True)
@property
def original_structure(self):
return self._original_structure
@property
def modified_structure(self):
return self._modified_structure
@deprecated(replacement=Molecule)
class MoleculeEditor(StructureModifier):
"""
Editor for adding, removing and changing sites from a molecule.
"""
DISTANCE_TOLERANCE = 0.01
def __init__(self, molecule):
"""
Args:
molecule:
pymatgen.core.structure Molecule object.
"""
self._original_structure = molecule
self._sites = list(molecule.sites)
def add_site_property(self, property_name, values):
"""
Adds a property to a site.
Args:
property_name:
The name of the property to add.
values:
A sequence of values. Must be same length as number of sites.
"""
if len(values) != len(self._sites):
raise ValueError("Values must be same length as sites.")
for i in xrange(len(self._sites)):
site = self._sites[i]
props = site.properties
if not props:
props = {}
props[property_name] = values[i]
self._sites[i] = Site(site.species_and_occu, site.coords,
properties=props)
def replace_species(self, species_mapping):
"""
Swap species in a molecule.
Args:
species_mapping:
dict of species to swap. Species can be elements too.
e.g., {Element("Li"): Element("Na")} performs a Li for Na
substitution. The second species can be a sp_and_occu dict.
For example, a site with 0.5 Si that is passed the mapping
{Element('Si): {Element('Ge'):0.75, Element('C'):0.25} } will
have .375 Ge and .125 C.
"""
def mod_site(site):
new_atom_occu = dict()
for sp, amt in site.species_and_occu.items():
if sp in species_mapping:
if isinstance(species_mapping[sp], (Element, Specie)):
if species_mapping[sp] in new_atom_occu:
new_atom_occu[species_mapping[sp]] += amt
else:
new_atom_occu[species_mapping[sp]] = amt
elif isinstance(species_mapping[sp], dict):
for new_sp, new_amt in species_mapping[sp].items():
if new_sp in new_atom_occu:
new_atom_occu[new_sp] += amt * new_amt
else:
new_atom_occu[new_sp] = amt * new_amt
else:
if sp in new_atom_occu:
new_atom_occu[sp] += amt
else:
new_atom_occu[sp] = amt
return Site(new_atom_occu, site.coords, properties=site.properties)
self._sites = map(mod_site, self._sites)
def replace_site(self, index, species_n_occu):
"""
Replace a single site. Takes either a species or a dict of occus.
Args:
index:
The index of the site in the _sites list
species:
A species object.
"""
self._sites[index] = Site(species_n_occu, self._sites[index].coords,
properties=self._sites[index].properties)
def remove_species(self, species):
"""
Remove all occurrences of a species from a molecule.
Args:
species:
Species to remove.
"""
new_sites = []
for site in self._sites:
new_sp_occu = {sp: amt for sp, amt in site.species_and_occu.items()
if sp not in species}
if len(new_sp_occu) > 0:
new_sites.append(Site(new_sp_occu, site.coords,
properties=site.properties))
self._sites = new_sites
def append_site(self, species, coords, validate_proximity=True):
"""
Append a site to the structure at the end.
Args:
species:
species of inserted site.
coords:
coordinates of inserted site.
validate_proximity:
Whether to check if inserted site is too close to an existing
site. Defaults to True.
"""
self.insert_site(len(self._sites), species, coords, validate_proximity)
def insert_site(self, i, species, coords, validate_proximity=True,
properties=None):
"""
Insert a site to the structure.
Args:
i:
Index to insert site.
species:
Species of inserted site.
coords:
Coordinates of inserted site.
validate_proximity:
Whether to check if inserted site is too close to an existing
site. Defaults to True.
"""
new_site = Site(species, coords, properties=properties)
if validate_proximity:
for site in self._sites:
if site.distance(new_site) < self.DISTANCE_TOLERANCE:
raise ValueError("New site is too close to an existing "
"site!")
self._sites.insert(i, new_site)
def delete_site(self, i):
"""
Delete site at index i.
Args:
i:
index of site to delete.
"""
del(self._sites[i])
def delete_sites(self, indices):
"""
Delete sites with at indices.
Args:
indices:
sequence of indices of sites to delete.
"""
self._sites = [self._sites[i] for i in range(len(self._sites))
if i not in indices]
def translate_sites(self, indices, vector):
"""
Translate specific sites by some vector, keeping the sites within the
unit cell.
Args:
sites:
List of site indices on which to perform the translation.
vector:
Translation vector for sites.
"""
for i in indices:
site = self._sites[i]
new_site = Site(site.species_and_occu, site.coords + vector,
properties=site.properties)
self._sites[i] = new_site
def perturb_structure(self, distance=0.1):
"""
Performs a random perturbation of the sites in a structure to break
symmetries.
Args:
distance:
distance in angstroms by which to perturb each site.
"""
for i in range(len(self._sites)):
vector = np.random.rand(3)
vector /= np.linalg.norm(vector) / distance
self.translate_sites([i], vector)
@property
def original_structure(self):
return self._original_structure
@property
def modified_structure(self):
coords = [site.coords for site in self._sites]
species = [site.species_and_occu for site in self._sites]
props = {}
if self._sites[0].properties:
for k in self._sites[0].properties.keys():
props[k] = [site.properties[k] for site in self._sites]
return Molecule(species, coords, False, site_properties=props)
if __name__ == "__main__":
import doctest
doctest.testmod()
```
#### File: pymatgen/symmetry/pointgroup.py
```python
from __future__ import division
__author__ = "<NAME>"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__date__ = "5/8/13"
import logging
import itertools
from collections import defaultdict
import numpy as np
try:
import scipy.cluster as spcluster
except ImportError:
spcluster = None
from pymatgen.core.operations import SymmOp
from pymatgen.util.coord_utils import find_in_coord_list
from pymatgen.util.decorators import requires
logger = logging.getLogger(__name__)
class PointGroup(list):
"""
Defines a point group, which is essentially a sequence of symmetry
operations.
.. attribute:: sch_symbol
Schoenflies symbol of the point group.
"""
def __init__(self, sch_symbol, operations, tol=0.1):
"""
Args:
sch_symbol:
The schoenflies symbol of the point group.
operations:
An initial set of symmetry operations. It is sufficient to
provide only just enough operations to generate the full set
of symmetries.
tol:
Tolerance to generate the full set of symmetry operations.
"""
self.sch_symbol = sch_symbol
super(PointGroup, self).__init__(
generate_full_symmops(operations, tol))
def __str__(self):
return self.sch_symbol
def __repr__(self):
return self.__str__()
@requires(spcluster is not None, "Cannot import scipy. PointGroupAnalyzer "
"requires scipy.cluster")
class PointGroupAnalyzer(object):
"""
A class to analyze the point group of a molecule. The general outline of
the algorithm is as follows:
1. Center the molecule around its center of mass.
2. Compute the inertia tensor and the eigenvalues and eigenvectors.
3. Handle the symmetry detection based on eigenvalues.
a. Linear molecules have one zero eigenvalue. Possible symmetry
operations are C*v or D*v
b. Asymetric top molecules have all different eigenvalues. The
maximum rotational symmetry in such molecules is 2
c. Symmetric top molecules have 1 unique eigenvalue, which gives a
unique rotation axis. All axial point groups are possible
except the cubic groups (T & O) and I.
d. Spherical top molecules have all three eigenvalues equal. They
have the rare T, O or I point groups.
.. attribute:: sch_symbol
Schoenflies symbol of the detected point group.
"""
inversion_op = SymmOp.inversion()
def __init__(self, mol, tolerance=0.3, eigen_tolerance=0.01,
matrix_tol=0.1):
"""
The default settings are usually sufficient.
Args:
mol:
Molecule
tolerance:
Distance tolerance to consider sites as symmetrically
equivalent. Defaults to 0.3 Angstrom.
eigen_tolerance:
Tolerance to compare eigen values of the inertia tensor.
Defaults to 0.01.
matrix_tol:
Tolerance used to generate the full set of symmetry
operations of the point group.
"""
self.mol = mol
self.centered_mol = mol.get_centered_molecule()
self.tol = tolerance
self.eig_tol = eigen_tolerance
self.mat_tol = matrix_tol
self._analyze()
def _analyze(self):
if len(self.centered_mol) == 1:
self.sch_symbol = "Kh"
else:
inertia_tensor = np.zeros((3, 3))
total_inertia = 0
for site in self.mol:
c = site.coords
wt = site.species_and_occu.weight
for i in xrange(3):
inertia_tensor[i, i] += wt * (c[(i + 1) % 3] ** 2
+ c[(i + 2) % 3] ** 2)
for i, j in itertools.combinations(xrange(3), 2):
inertia_tensor[i, j] += -wt * c[i] * c[j]
inertia_tensor[j, i] += -wt * c[j] * c[i]
total_inertia += wt * np.dot(c, c)
# Normalize the inertia tensor so that it does not scale with size
# of the system. This mitigates the problem of choosing a proper
# comparison tolerance for the eigenvalues.
inertia_tensor /= total_inertia
eigvals, eigvecs = np.linalg.eig(inertia_tensor)
self.principal_axes = eigvecs.T
self.eigvals = eigvals
v1, v2, v3 = eigvals
eig_zero = abs(v1 * v2 * v3) < self.eig_tol ** 3
eig_all_same = abs(v1 - v2) < self.eig_tol and abs(
v1 - v3) < self.eig_tol
eig_all_diff = abs(v1 - v2) > self.eig_tol and abs(
v1 - v2) > self.eig_tol and abs(v2 - v3) > self.eig_tol
self.rot_sym = []
self.symmops = [SymmOp(np.eye(4))]
if eig_zero:
logger.debug("Linear molecule detected")
self._proc_linear()
elif eig_all_same:
logger.debug("Spherical top molecule detected")
self._proc_sph_top()
elif eig_all_diff:
logger.debug("Asymmetric top molecule detected")
self._proc_asym_top()
else:
logger.debug("Symmetric top molecule detected")
self._proc_sym_top()
def _proc_linear(self):
if self.is_valid_op(PointGroupAnalyzer.inversion_op):
self.sch_symbol = "D*h"
self.symmops.append(PointGroupAnalyzer.inversion_op)
else:
self.sch_symbol = "C*v"
def _proc_asym_top(self):
"""
Handles assymetric top molecules, which cannot contain rotational
symmetry larger than 2.
"""
self._check_R2_axes_asym()
if len(self.rot_sym) == 0:
logger.debug("No rotation symmetries detected.")
self._proc_no_rot_sym()
elif len(self.rot_sym) == 3:
logger.debug("Dihedral group detected.")
self._proc_dihedral()
else:
logger.debug("Cyclic group detected.")
self._proc_cyclic()
def _proc_sym_top(self):
"""
Handles symetric top molecules which has one unique eigenvalue whose
corresponding principal axis is a unique rotational axis. More complex
handling required to look for R2 axes perpendicular to this unique
axis.
"""
if abs(self.eigvals[0] - self.eigvals[1]) < self.eig_tol:
ind = 2
elif abs(self.eigvals[1] - self.eigvals[2]) < self.eig_tol:
ind = 0
else:
ind = 1
unique_axis = self.principal_axes[ind]
self._check_rot_sym(unique_axis)
if len(self.rot_sym) > 0:
self._check_perpendicular_r2_axis(unique_axis)
if len(self.rot_sym) >= 2:
self._proc_dihedral()
elif len(self.rot_sym) == 1:
self._proc_cyclic()
else:
self._proc_no_rot_sym()
def _proc_no_rot_sym(self):
"""
Handles molecules with no rotational symmetry. Only possible point
groups are C1, Cs and Ci.
"""
self.sch_symbol = "C1"
if self.is_valid_op(PointGroupAnalyzer.inversion_op):
self.sch_symbol = "Ci"
self.symmops.append(PointGroupAnalyzer.inversion_op)
else:
for v in self.principal_axes:
mirror_type = self._find_mirror(v)
if not mirror_type == "":
self.sch_symbol = "Cs"
break
def _proc_cyclic(self):
"""
Handles cyclic group molecules.
"""
main_axis, rot = max(self.rot_sym, key=lambda v: v[1])
self.sch_symbol = "C{}".format(rot)
mirror_type = self._find_mirror(main_axis)
if mirror_type == "h":
self.sch_symbol += "h"
elif mirror_type == "v":
self.sch_symbol += "v"
elif mirror_type == "":
if self.is_valid_op(SymmOp.rotoreflection(main_axis,
angle=180 / rot)):
self.sch_symbol = "S{}".format(2 * rot)
def _proc_dihedral(self):
"""
Handles dihedral group molecules, i.e those with intersecting R2 axes
and a main axis.
"""
main_axis, rot = max(self.rot_sym, key=lambda v: v[1])
self.sch_symbol = "D{}".format(rot)
mirror_type = self._find_mirror(main_axis)
if mirror_type == "h":
self.sch_symbol += "h"
elif not mirror_type == "":
self.sch_symbol += "d"
def _check_R2_axes_asym(self):
"""
Test for 2-fold rotation along the principal axes. Used to handle
asymetric top molecules.
"""
for v in self.principal_axes:
op = SymmOp.from_axis_angle_and_translation(v, 180)
if self.is_valid_op(op):
self.symmops.append(op)
self.rot_sym.append((v, 2))
def _find_mirror(self, axis):
"""
Looks for mirror symmetry of specified type about axis. Possible
types are "h" or "vd". Horizontal (h) mirrors are perpendicular to
the axis while vertical (v) or diagonal (d) mirrors are parallel. v
mirrors has atoms lying on the mirror plane while d mirrors do
not.
"""
mirror_type = ""
#First test whether the axis itself is the normal to a mirror plane.
if self.is_valid_op(SymmOp.reflection(axis)):
self.symmops.append(SymmOp.reflection(axis))
mirror_type = "h"
else:
# Iterate through all pairs of atoms to find mirror
for s1, s2 in itertools.combinations(self.centered_mol, 2):
if s1.species_and_occu == s2.species_and_occu:
normal = s1.coords - s2.coords
if np.dot(normal, axis) < self.tol:
op = SymmOp.reflection(normal)
if self.is_valid_op(op):
self.symmops.append(op)
if len(self.rot_sym) > 1:
mirror_type = "d"
for v, r in self.rot_sym:
if not np.linalg.norm(v - axis) < self.tol:
if np.dot(v, normal) < self.tol:
mirror_type = "v"
break
else:
mirror_type = "v"
break
return mirror_type
def _get_smallest_set_not_on_axis(self, axis):
"""
Returns the smallest list of atoms with the same species and
distance from origin AND does not lie on the specified axis. This
maximal set limits the possible rotational symmetry operations,
since atoms lying on a test axis is irrelevant in testing rotational
symmetryOperations.
"""
def not_on_axis(site):
v = np.cross(site.coords, axis)
return np.linalg.norm(v) > self.tol
valid_sets = []
origin_site, dist_el_sites = cluster_sites(self.centered_mol, self.tol)
for test_set in dist_el_sites.values():
valid_set = filter(not_on_axis, test_set)
if len(valid_set) > 0:
valid_sets.append(valid_set)
return min(valid_sets, key=lambda s: len(s))
def _check_rot_sym(self, axis):
"""
Determines the rotational symmetry about supplied axis. Used only for
symmetric top molecules which has possible rotational symmetry
operations > 2.
"""
min_set = self._get_smallest_set_not_on_axis(axis)
max_sym = len(min_set)
for i in xrange(max_sym, 0, -1):
if max_sym % i != 0:
continue
op = SymmOp.from_axis_angle_and_translation(axis, 360 / i)
rotvalid = self.is_valid_op(op)
if rotvalid:
self.symmops.append(op)
self.rot_sym.append((axis, i))
return i
return 1
def _check_perpendicular_r2_axis(self, axis):
"""
Checks for R2 axes perpendicular to unique axis. For handling
symmetric top molecules.
"""
min_set = self._get_smallest_set_not_on_axis(axis)
for s1, s2 in itertools.combinations(min_set, 2):
test_axis = np.cross(s1.coords - s2.coords, axis)
if np.linalg.norm(test_axis) > self.tol:
op = SymmOp.from_axis_angle_and_translation(test_axis, 180)
r2present = self.is_valid_op(op)
if r2present:
self.symmops.append(op)
self.rot_sym.append((test_axis, 2))
return True
def _proc_sph_top(self):
"""
Handles Sperhical Top Molecules, which belongs to the T, O or I point
groups.
"""
self._find_spherical_axes()
main_axis, rot = max(self.rot_sym, key=lambda v: v[1])
if len(self.rot_sym) == 0 or rot < 3:
logger.debug("Accidental speherical top!")
self._proc_sym_top()
elif rot == 3:
mirror_type = self._find_mirror(main_axis)
if mirror_type != "":
if self.is_valid_op(PointGroupAnalyzer.inversion_op):
self.symmops.append(PointGroupAnalyzer.inversion_op)
self.sch_symbol = "Th"
else:
self.sch_symbol = "Td"
else:
self.sch_symbol = "T"
elif rot == 4:
if self.is_valid_op(PointGroupAnalyzer.inversion_op):
self.symmops.append(PointGroupAnalyzer.inversion_op)
self.sch_symbol = "Oh"
else:
self.sch_symbol = "O"
elif rot == 5:
if self.is_valid_op(PointGroupAnalyzer.inversion_op):
self.symmops.append(PointGroupAnalyzer.inversion_op)
self.sch_symbol = "Ih"
else:
self.sch_symbol = "I"
def _find_spherical_axes(self):
"""
Looks for R5, R4, R3 and R2 axes in speherical top molecules. Point
group T molecules have only one unique 3-fold and one unique 2-fold
axis. O molecules have one unique 4, 3 and 2-fold axes. I molecules
have a unique 5-fold axis.
"""
rot_present = defaultdict(bool)
origin_site, dist_el_sites = cluster_sites(self.centered_mol, self.tol)
test_set = min(dist_el_sites.values(), key=lambda s: len(s))
coords = [s.coords for s in test_set]
for c1, c2, c3 in itertools.combinations(coords, 3):
for cc1, cc2 in itertools.combinations([c1, c2, c3], 2):
if not rot_present[2]:
test_axis = cc1 + cc2
if np.linalg.norm(test_axis) > self.tol:
op = SymmOp.from_axis_angle_and_translation(test_axis,
180)
rot_present[2] = self.is_valid_op(op)
if rot_present[2]:
self.symmops.append(op)
self.rot_sym.append((test_axis, 2))
test_axis = np.cross(c2 - c1, c3 - c1)
if np.linalg.norm(test_axis) > self.tol:
for r in (3, 4, 5):
if not rot_present[r]:
op = SymmOp.from_axis_angle_and_translation(
test_axis, 360/r)
rot_present[r] = self.is_valid_op(op)
if rot_present[r]:
self.symmops.append(op)
self.rot_sym.append((test_axis, r))
break
if rot_present[2] and rot_present[3] and (
rot_present[4] or rot_present[5]):
break
def get_pointgroup(self):
"""
Returns a PointGroup object for the molecule.
"""
return PointGroup(self.sch_symbol, self.symmops, self.mat_tol)
def is_valid_op(self, symmop):
"""
Check if a particular symmetry operation is a valid symmetry operation
for a molecule, i.e., the operation maps all atoms to another
equivalent atom.
Args:
symmop:
Symmetry op to test.
"""
coords = self.centered_mol.cart_coords
for site in self.centered_mol:
coord = symmop.operate(site.coords)
ind = find_in_coord_list(coords, coord, self.tol)
if not (len(ind) == 1 and
self.centered_mol[ind[0]].species_and_occu
== site.species_and_occu):
return False
return True
@requires(spcluster is not None, "Cannot import scipy. cluster_sites require "
"scipy.cluster.")
def cluster_sites(mol, tol):
"""
Cluster sites based on distance and species type.
Args:
mol:
Molecule (should be centered at center of mass).
tol:
Tolerance to use.
Returns:
(origin_site, clustered_sites). origin_site is a site at the center
of mass (None if there are no origin atoms). clustered_sites is a
dict of {(avg_dist, species_and_occu): [list of sites]}
"""
# Cluster works for dim > 2 data. We just add a dummy 0 for second
# coordinate.
dists = [[np.linalg.norm(site.coords), 0] for site in mol]
f = spcluster.hierarchy.fclusterdata(dists, tol, criterion='distance')
clustered_dists = defaultdict(list)
for i, site in enumerate(mol):
clustered_dists[f[i]].append(dists[i])
avg_dist = {label: np.mean(val) for label, val in clustered_dists.items()}
clustered_sites = defaultdict(list)
origin_site = None
for i, site in enumerate(mol):
if avg_dist[f[i]] < tol:
origin_site = site
else:
clustered_sites[(avg_dist[f[i]],
site.species_and_occu)].append(site)
return origin_site, clustered_sites
def generate_full_symmops(symmops, tol):
"""
Recursive algorithm to permute through all possible combinations of the
initially supplied symmetry operations to arrive at a complete set of
operations mapping a single atom to all other equivalent atoms in the
point group. This assumes that the initial number already uniquely
identifies all operations.
Args:
symmops:
Initial set of symmetry operations.
Returns:
Full set of symmetry operations.
"""
a = [o.affine_matrix for o in symmops]
if len(symmops) > 300:
logger.debug("Generation of symmetry operations in infinite loop. " +
"Possible error in initial operations or tolerance too "
"low.")
else:
for op1, op2 in itertools.product(symmops, symmops):
m = np.dot(op1.affine_matrix, op2.affine_matrix)
d = np.abs(a - m) < tol
if not np.any(np.all(np.all(d, axis=2), axis=1)):
return generate_full_symmops(symmops + [SymmOp(m)], tol)
return symmops
```
#### File: pymatgen/transformations/advanced_transformations.py
```python
from __future__ import division
__author__ = "<NAME>, <NAME>"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "1.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__date__ = "Jul 24, 2012"
import numpy as np
from fractions import gcd, Fraction
from pymatgen.core.structure import Specie, Composition
from pymatgen.core.periodic_table import smart_element_or_specie
from pymatgen.transformations.transformation_abc import AbstractTransformation
from pymatgen.transformations.standard_transformations import \
SubstitutionTransformation, OrderDisorderedStructureTransformation
from pymatgen.command_line.enumlib_caller import EnumlibAdaptor
from pymatgen.analysis.ewald import EwaldSummation
from pymatgen.core.structure import Structure
from pymatgen.symmetry.finder import SymmetryFinder
from pymatgen.structure_prediction.substitution_probability import \
SubstitutionPredictor
from pymatgen.analysis.structure_matcher import StructureMatcher, \
SpinComparator
from pymatgen.analysis.energy_models import SymmetryModel, \
EwaldElectrostaticModel, NsitesModel
from pymatgen.serializers.json_coders import PMGJSONDecoder
class ChargeBalanceTransformation(AbstractTransformation):
"""
This is a transformation that disorders a structure to make it charge
balanced, given an oxidation state-decorated structure.
"""
def __init__(self, charge_balance_sp):
"""
Args:
charge_balance_sp
specie to add or remove. Currently only removal is supported
"""
self._charge_balance_sp = str(charge_balance_sp)
def apply_transformation(self, structure):
charge = structure.charge
specie = smart_element_or_specie(self._charge_balance_sp)
num_to_remove = charge / specie.oxi_state
num_in_structure = structure.composition[specie]
removal_fraction = num_to_remove / num_in_structure
if removal_fraction < 0:
raise ValueError("addition of specie not yet supported by "
"ChargeBalanceTransformation")
trans = SubstitutionTransformation({self._charge_balance_sp:
{self._charge_balance_sp:
1 - removal_fraction}})
return trans.apply_transformation(structure)
def __str__(self):
return "Charge Balance Transformation : " + \
"Species to remove = {}".format(str(self._charge_balance_sp))
def __repr__(self):
return self.__str__()
@property
def inverse(self):
return None
@property
def is_one_to_many(self):
return False
@property
def to_dict(self):
return {"name": self.__class__.__name__, "version": __version__,
"init_args": {"charge_balance_sp": self._charge_balance_sp},
"@module": self.__class__.__module__,
"@class": self.__class__.__name__}
class SuperTransformation(AbstractTransformation):
"""
This is a transformation that is inherently one-to-many. It is constructed
from a list of transformations and returns one structure for each
transformation. The primary use for this class is extending a transmuter
object.
"""
def __init__(self, transformations):
"""
Args:
transformations:
list of transformations to apply to a structure. One
transformation is applied to each output structure.
"""
self._transformations = transformations
def apply_transformation(self, structure, return_ranked_list=False):
if not return_ranked_list:
raise ValueError("SuperTransformation has no single best structure"
" output. Must use return_ranked_list")
structures = []
for t in self._transformations:
structures.append({"transformation": t,
"structure": t.apply_transformation(structure)})
return structures
def __str__(self):
return "Super Transformation : Transformations = " + \
"{}".format(" ".join([str(t) for t in self._transformations]))
def __repr__(self):
return self.__str__()
@property
def inverse(self):
return None
@property
def is_one_to_many(self):
return True
@property
def to_dict(self):
return {"name": self.__class__.__name__, "version": __version__,
"init_args": {"transformations": self._transformations},
"@module": self.__class__.__module__,
"@class": self.__class__.__name__}
class MultipleSubstitutionTransformation(object):
"""
Performs multiple substitutions on a structure. For example, can do a
fractional replacement of Ge in LiGePS with a list of species, creating one
structure for each substitution. Ordering is done using a dummy element so
only one ordering must be done per substitution oxidation state. Charge
balancing of the structure is optionally performed.
.. note::
There are no checks to make sure that removal fractions are possible
and rounding may occur. Currently charge balancing only works for
removal of species.
"""
def __init__(self, sp_to_replace, r_fraction, substitution_dict,
charge_balance_species=None, order=True):
"""
Performs multiple fractional substitutions on a transmuter.
Args:
sp_to_replace
species to be replaced
r_fraction
fraction of that specie to replace
substitution_dict
dictionary of the format
{2: ["Mg", "Ti", "V", "As", "Cr", "Ta", "N", "Nb"],
3: ["Ru", "Fe", "Co", "Ce", "As", "Cr", "Ta", "N", "Nb"],
4: ["Ru", "V", "Cr", "Ta", "N", "Nb"],
5: ["Ru", "W", "Mn"]
}
The number is the charge used for each of the list of elements
(an element can be present in multiple lists)
charge_balance_species:
If specified, will balance the charge on the structure using
that specie.
"""
self._sp_to_replace = sp_to_replace
self._r_fraction = r_fraction
self._substitution_dict = substitution_dict
self._charge_balance_species = charge_balance_species
self._order = order
def apply_transformation(self, structure, return_ranked_list=False):
if not return_ranked_list:
raise ValueError("MultipleSubstitutionTransformation has no single"
" best structure output. Must use"
" return_ranked_list.")
outputs = []
for charge, el_list in self._substitution_dict.items():
mapping = {}
if charge > 0:
sign = "+"
else:
sign = "-"
dummy_sp = "X{}{}".format(str(charge), sign)
mapping[self._sp_to_replace] = {self._sp_to_replace:
1 - self._r_fraction,
dummy_sp: self._r_fraction}
trans = SubstitutionTransformation(mapping)
dummy_structure = trans.apply_transformation(structure)
if self._charge_balance_species is not None:
cbt = ChargeBalanceTransformation(self._charge_balance_species)
dummy_structure = cbt.apply_transformation(dummy_structure)
if self._order:
trans = OrderDisorderedStructureTransformation()
dummy_structure = trans.apply_transformation(dummy_structure)
for el in el_list:
if charge > 0:
sign = "+"
else:
sign = "-"
st = SubstitutionTransformation({"X{}+".format(str(charge)):
"{}{}{}".format(el, charge,
sign)})
new_structure = st.apply_transformation(dummy_structure)
outputs.append({"structure": new_structure})
return outputs
def __str__(self):
return "Multiple Substitution Transformation : Substitution on " + \
"{}".format(self._sp_to_replace)
def __repr__(self):
return self.__str__()
@property
def inverse(self):
return None
@property
def is_one_to_many(self):
return True
@property
def to_dict(self):
return {"name": self.__class__.__name__, "version": __version__,
"init_args": {"sp_to_replace": self._sp_to_replace,
"r_fraction": self._r_fraction,
"substitution_dict": self._substitution_dict,
"charge_balance_species":
self._charge_balance_species},
"@module": self.__class__.__module__,
"@class": self.__class__.__name__}
class EnumerateStructureTransformation(AbstractTransformation):
"""
Order a disordered structure using enumlib. For complete orderings, this
generally produces fewer structures that the OrderDisorderedStructure
transformation, and at a much faster speed.
"""
def __init__(self, min_cell_size=1, max_cell_size=1, symm_prec=0.1,
refine_structure=False):
"""
Args:
min_cell_size:
The minimum cell size wanted. Must be an int. Defaults to 1.
max_cell_size:
The maximum cell size wanted. Must be an int. Defaults to 1.
symm_prec:
Tolerance to use for symmetry.
refine_structure:
This parameter has the same meaning as in enumlib_caller.
If you are starting from a structure that has been relaxed via
some electronic structure code, it is usually much better to
start with symmetry determination and then obtain a refined
structure. The refined structure have cell parameters and
atomic positions shifted to the expected symmetry positions,
which makes it much less sensitive precision issues in enumlib.
If you are already starting from an experimental cif, refinment
should have already been done and it is not necessary. Defaults
to False.
"""
self.symm_prec = symm_prec
self.min_cell_size = min_cell_size
self.max_cell_size = max_cell_size
self.refine_structure = refine_structure
def apply_transformation(self, structure, return_ranked_list=False):
"""
Return either a single ordered structure or a sequence of all ordered
structures.
Args:
structure:
Structure to order.
return_ranked_list:
Boolean stating whether or not multiple structures are
returned. If return_ranked_list is a number, that number of
structures is returned.
Returns:
Depending on returned_ranked list, either a transformed structure
or a list of dictionaries, where each dictionary is of the form
{"structure" = .... , "other_arguments"}
The list of ordered structures is ranked by ewald energy / atom, if
the input structure is an oxidation state decorated structure.
Otherwise, it is ranked by number of sites, with smallest number of
sites first.
"""
try:
num_to_return = int(return_ranked_list)
except ValueError:
num_to_return = 1
if structure.is_ordered:
raise ValueError("Enumeration can be carried out only on "
"disordered structures!")
if self.refine_structure:
finder = SymmetryFinder(structure, self.symm_prec)
structure = finder.get_refined_structure()
contains_oxidation_state = False
for sp in structure.composition.elements:
if hasattr(sp, "oxi_state") and sp._oxi_state != 0:
contains_oxidation_state = True
break
adaptor = EnumlibAdaptor(structure, min_cell_size=self.min_cell_size,
max_cell_size=self.max_cell_size,
symm_prec=self.symm_prec,
refine_structure=False)
adaptor.run()
structures = adaptor.structures
original_latt = structure.lattice
inv_latt = np.linalg.inv(original_latt.matrix)
ewald_matrices = {}
all_structures = []
for s in structures:
new_latt = s.lattice
transformation = np.dot(new_latt.matrix, inv_latt)
transformation = tuple([tuple([int(round(cell)) for cell in row])
for row in transformation])
if contains_oxidation_state:
if transformation not in ewald_matrices:
s_supercell = Structure.from_sites(structure.sites)
s_supercell.make_supercell(transformation)
ewald = EwaldSummation(s_supercell)
ewald_matrices[transformation] = ewald
else:
ewald = ewald_matrices[transformation]
energy = ewald.compute_sub_structure(s)
all_structures.append({"num_sites": len(s), "energy": energy,
"structure": s})
else:
all_structures.append({"num_sites": len(s), "structure": s})
def sort_func(s):
return s["energy"] / s["num_sites"] if contains_oxidation_state \
else s["num_sites"]
self._all_structures = sorted(all_structures, key=sort_func)
if return_ranked_list:
return self._all_structures[0:num_to_return]
else:
return self._all_structures[0]["structure"]
def __str__(self):
return "EnumerateStructureTransformation"
def __repr__(self):
return self.__str__()
@property
def inverse(self):
return None
@property
def is_one_to_many(self):
return True
@property
def to_dict(self):
return {"name": self.__class__.__name__, "version": __version__,
"init_args": {"symm_prec": self.symm_prec,
"min_cell_size": self.min_cell_size,
"max_cell_size": self.max_cell_size,
"refine_structure": self.refine_structure},
"@module": self.__class__.__module__,
"@class": self.__class__.__name__}
class SubstitutionPredictorTransformation(AbstractTransformation):
"""
This transformation takes a structure and uses the structure
prediction module to find likely site substitutions.
"""
def __init__(self, threshold=1e-2, **kwargs):
"""
Args:
kwargs:
args for SubstitutionProbability class
lambda_table, alpha
"""
self._kwargs = kwargs
self._threshold = threshold
self._substitutor = SubstitutionPredictor(threshold=threshold,
**kwargs)
def apply_transformation(self, structure, return_ranked_list=False):
if not return_ranked_list:
raise ValueError("SubstitutionPredictorTransformation doesn't"
" support returning 1 structure")
preds = self._substitutor.composition_prediction(
structure.composition, to_this_composition=False)
preds.sort(key=lambda x: x['probability'], reverse=True)
outputs = []
for pred in preds:
st = SubstitutionTransformation(pred['substitutions'])
output = {'structure': st.apply_transformation(structure),
'probability': pred['probability'],
'threshold': self._threshold, 'substitutions': {}}
#dictionary keys have to be converted to strings for JSON
for key, value in pred['substitutions'].items():
output['substitutions'][str(key)] = str(value)
outputs.append(output)
return outputs
def __str__(self):
return "SubstitutionPredictorTransformation"
def __repr__(self):
return self.__str__()
@property
def inverse(self):
return None
@property
def is_one_to_many(self):
return True
@property
def to_dict(self):
d = {"name": self.__class__.__name__, "version": __version__,
"init_args": self._kwargs, "@module": self.__class__.__module__,
"@class": self.__class__.__name__}
d["init_args"]["threshold"] = self._threshold
return d
class MagOrderingTransformation(AbstractTransformation):
"""
This transformation takes a structure and returns a list of magnetic
orderings. Currently only works for ordered structures.
"""
def __init__(self, mag_species_spin, order_parameter=0.5,
energy_model=SymmetryModel(), **kwargs):
"""
Args:
mag_elements_spin:
A mapping of elements/species to magnetically order to spin
magnitudes. E.g., {"Fe3+": 5, "Mn3+": 4}
order_parameter:
degree of magnetization. 0.5 corresponds to
antiferromagnetic order
energy_model:
Energy model used to rank the structures. Some models are
provided in :mod:`pymatgen.analysis.energy_models`.
**kwargs:
Same keyword args as :class:`EnumerateStructureTransformation`,
i.e., min_cell_size, etc.
"""
self.mag_species_spin = mag_species_spin
if order_parameter > 1 or order_parameter < 0:
raise ValueError('Order Parameter must lie between 0 and 1')
else:
self.order_parameter = order_parameter
self.emodel = energy_model
self.enum_kwargs = kwargs
@classmethod
def determine_min_cell(cls, structure, mag_species_spin, order_parameter):
"""
Determine the smallest supercell that is able to enumerate
the provided structure with the given order parameter
"""
def lcm(n1, n2):
"""
Find least common multiple of two numbers
"""
return n1 * n2 / gcd(n1, n2)
denom = Fraction(order_parameter).limit_denominator(100).denominator
atom_per_specie = [structure.composition.get(m)
for m in mag_species_spin.keys()]
n_gcd = reduce(gcd, atom_per_specie)
if not n_gcd:
raise ValueError('The specified species do not exist in the structure'
' to be enumerated')
return lcm(n_gcd, denom) / n_gcd
def apply_transformation(self, structure, return_ranked_list=False):
#Make a mutable structure first
mods = Structure.from_sites(structure)
for sp, spin in self.mag_species_spin.items():
sp = smart_element_or_specie(sp)
oxi_state = getattr(sp, "oxi_state", 0)
up = Specie(sp.symbol, oxi_state, {"spin": abs(spin)})
down = Specie(sp.symbol, oxi_state, {"spin": -abs(spin)})
mods.replace_species(
{sp: Composition({up: self.order_parameter,
down: 1 - self.order_parameter})})
enum_args = self.enum_kwargs
enum_args["min_cell_size"] = max(int(
MagOrderingTransformation.determine_min_cell(
structure, self.mag_species_spin,
self.order_parameter)),
enum_args.get("min_cell_size"))
max_cell = self.enum_kwargs.get('max_cell_size')
if max_cell:
if enum_args["min_cell_size"] > max_cell:
raise ValueError('Specified max cell size is smaller'
' than the minimum enumerable cell size')
else:
enum_args["max_cell_size"] = enum_args["min_cell_size"]
t = EnumerateStructureTransformation(**enum_args)
alls = t.apply_transformation(mods,
return_ranked_list=return_ranked_list)
try:
num_to_return = int(return_ranked_list)
except ValueError:
num_to_return = 1
if num_to_return == 1:
return alls[0]["structure"]
m = StructureMatcher(comparator=SpinComparator())
grouped = m.group_structures([d["structure"] for d in alls])
alls = [{"structure": g[0], "energy": self.emodel.get_energy(g[0])}
for g in grouped]
self._all_structures = sorted(alls, key=lambda d: d["energy"])
return self._all_structures[0:num_to_return]
def __str__(self):
return "MagOrderingTransformation"
def __repr__(self):
return self.__str__()
@property
def inverse(self):
return None
@property
def is_one_to_many(self):
return True
@property
def to_dict(self):
return {
"name": self.__class__.__name__, "version": __version__,
"init_args": {"mag_species_spin": self.mag_species_spin,
"order_parameter": self.order_parameter,
"energy_model": self.emodel.to_dict,
"enum_kwargs": self.enum_kwargs},
"@module": self.__class__.__module__,
"@class": self.__class__.__name__}
@classmethod
def from_dict(cls, d):
init = d["init_args"]
return MagOrderingTransformation(
init["mag_species_spin"], init["order_parameter"],
energy_model=PMGJSONDecoder().process_decoded(
init["energy_model"]),
**init["enum_kwargs"])
``` |
{
"source": "JmFoces/OrderFiles",
"score": 2
} |
#### File: JmFoces/OrderFiles/main.py
```python
import sys
import os
from os import listdir
from queue import Queue
from utils.log import log
from mtypes.creators.file_factory import FileFactory
import multiprocessing
from multiprocessing import Process
from config import EXCLUDE_ROOT
from utils.order.organizer import Organizer
from hachoir.core import config
config.quiet = True
def work(path):
ffactory = FileFactory()
organizer = Organizer()
mfile = ffactory.create_file(path)
organizer.organize(mfile)
work_queue = Queue()
def worker_loop():
path = work_queue.get()
log.info("Ordering {}".format(path))
work(path)
log.info("Finished {}".format(path))
if __name__ == "__main__":
for param in sys.argv[1:]:
try:
p = Process(target=work, args=(param,))
p.start()
p.join()
except Exception as e:
log.exception(e)
log.info("Finished all tasks")
```
#### File: JmFoces/OrderFiles/main_sync.py
```python
import sys
import os
from os import listdir
from utils.log import log
from mtypes.creators.file_factory import FileFactory
from multiprocessing import Process
from config import EXCLUDE_ROOT
from utils.order.organizer import Organizer
from hachoir.core import config
config.quiet = True
def work(path):
ffactory = FileFactory()
organizer = Organizer()
mfile = ffactory.create_file(path)
organizer.organize(mfile)
if __name__ == "__main__":
procs = []
for param in sys.argv[1:]:
try:
if param not in EXCLUDE_ROOT :
work(param)
except Exception as e:
log.exception(e)
```
#### File: utils/order/index.py
```python
import sh
import os
import re
import config
import constants
from utils.exceptions import *
from utils.command import *
class Index:
INDEX_PATH = config.INDEX_PATH
DEPTH = 2
def __init__(self):
self.tmp = os.path.join(self.INDEX_PATH, "tmp")
sh.mkdir("-p", self.tmp)
self.step = 2
def put_file(self, path):
temp_file = str(sh.mktemp("-p", self.tmp).stdout,'utf8').strip()
path = path.strip()
if "'" in path:
returncode, stdout, stderr = launch_command(
"dd if=\"{0}\" iflag=nofollow bs=4k | tee {1} | sha1sum".format(
path,
temp_file
)
)
else:
returncode, stdout, stderr = launch_command(
"dd if='{0}' iflag=nofollow bs=4k | tee {1} | sha1sum".format(
path,
temp_file
)
)
if returncode != 0:
print(stdout)
print(stderr)
raise UnableToHashFile("File : {0}".format(path))
hash_str = re.search("^[0-9a-f]*", str(stdout,'utf8')).group(0)
destination_folder = self.create_destination_folder(hash_str)
destination_path = os.path.join(destination_folder, hash_str)
if not self.is_stored(hash_str):
sh.mv(temp_file, destination_path)
sh.chmod("444", destination_path)
else:
sh.rm(temp_file)
return destination_path
def create_destination_folder(self, hash_str):
count = 0
path = self.INDEX_PATH
while count < self.DEPTH:
path = os.path.join(path, hash_str[count:count+self.step])
sh.mkdir("-p", path)
count += self.step
return path
def is_stored(self, hash_str):
count = 0
path = self.INDEX_PATH
while count < self.DEPTH:
path = os.path.join(path, hash_str[count:count + self.step])
count += self.step
path = os.path.join(path, hash_str)
return os.path.exists(path)
``` |
{
"source": "jmframe/mclstm_2021_extrapolate",
"score": 2
} |
#### File: neuralhydrology/utils/nh_results_ensemble.py
```python
import argparse
import pickle
import sys
import glob
from collections import defaultdict
from pathlib import Path
from typing import List
import numpy as np
import pandas as pd
import xarray as xr
from tqdm import tqdm
sys.path.append(str(Path(__file__).parent.parent.parent))
from neuralhydrology.datautils.utils import get_frequency_factor, sort_frequencies
from neuralhydrology.evaluation.metrics import calculate_metrics, get_available_metrics
from neuralhydrology.utils.config import Config
from neuralhydrology.utils.errors import AllNaNError
def create_results_ensemble(run_dirs: List[Path],
best_k: int = None,
metrics: List[str] = None,
period: str = 'test',
epoch: int = None) -> dict:
"""Average the predictions of several runs for the specified period and calculate new metrics.
If `best_k` is provided, only the k runs with the best validation NSE will be used in the generated ensemble.
Parameters
----------
run_dirs : List[Path]
List of directories of the runs to be merged
best_k : int, optional
If provided, will only merge the k best runs based on validation NSE.
metrics : List[str], optional
Use this parameter to override the metrics from the config files in the run directories.
period : {'test', 'validation', 'train'}, optional
One of train, val, test. If best_k is used, only 'test' is allowed.
The run_directories must contain results files for the specified period.
epoch : int, optional
If provided, will ensemble the model predictions of this epoch otherwise of the last epoch
Returns
-------
dict
Dictionary of ensemble predictions and metrics per basin and frequency.
"""
if len(run_dirs) < 2:
raise ValueError('Need to provide at least two run directories to be merged.')
if period not in ['train', 'validation', 'test']:
raise ValueError(f'Unknown period {period}.')
if best_k is not None:
if period != 'test':
raise ValueError('If best_k is specified, the period must be test.')
print('Searching for best validation runs.')
best_val_runs = _get_best_validation_runs(run_dirs, best_k, epoch)
best_runs = [_get_results_file(run_dir, period, epoch) for run_dir in best_val_runs]
else:
best_runs = [_get_results_file(run_dir, period, epoch) for run_dir in run_dirs]
config = Config(run_dirs[0] / 'config.yml')
if metrics is not None:
# override metrics from config
config.metrics = metrics
# get frequencies from a results file.
# (they might not be stored in the config if the native data frequency was used)
run_results = pickle.load(open(best_runs[0], 'rb'))
frequencies = list(run_results[list(run_results.keys())[0]].keys())
return _create_ensemble(best_runs, frequencies, config)
def _create_ensemble(results_files: List[Path], frequencies: List[str], config: Config) -> dict:
"""Averages the predictions of the passed runs and re-calculates metrics. """
lowest_freq = sort_frequencies(frequencies)[0]
ensemble_sum = defaultdict(dict)
target_vars = config.target_variables
print('Loading results for each run.')
for run in tqdm(results_files):
run_results = pickle.load(open(run, 'rb'))
for basin, basin_results in run_results.items():
for freq in frequencies:
freq_results = basin_results[freq]['xr']
# sum up the predictions of all basins
if freq not in ensemble_sum[basin]:
ensemble_sum[basin][freq] = freq_results
else:
for target_var in target_vars:
ensemble_sum[basin][freq][f'{target_var}_sim'] += freq_results[f'{target_var}_sim']
# divide the prediction sum by number of runs to get the mean prediction for each basin and frequency
print('Combining results and calculating metrics.')
ensemble = defaultdict(lambda: defaultdict(dict))
for basin in tqdm(ensemble_sum.keys()):
for freq in frequencies:
ensemble_xr = ensemble_sum[basin][freq]
# combine date and time to a single index to calculate metrics
# create datetime range at the current frequency, removing time steps that are not being predicted
frequency_factor = int(get_frequency_factor(lowest_freq, freq))
# make sure the last day is fully contained in the range
freq_date_range = pd.date_range(start=ensemble_xr.coords['date'].values[0],
end=ensemble_xr.coords['date'].values[-1] \
+ pd.Timedelta(days=1, seconds=-1),
freq=freq)
mask = np.ones(frequency_factor).astype(bool)
mask[:-len(ensemble_xr.coords['time_step'])] = False
freq_date_range = freq_date_range[np.tile(mask, len(ensemble_xr.coords['date']))]
ensemble_xr = ensemble_xr.isel(time_step=slice(-frequency_factor, None)).stack(
datetime=['date', 'time_step'])
ensemble_xr['datetime'] = freq_date_range
for target_var in target_vars:
# average predictions
ensemble_xr[f'{target_var}_sim'] = ensemble_xr[f'{target_var}_sim'] / len(results_files)
# clip predictions to zero
sim = ensemble_xr[f'{target_var}_sim']
if target_var in config.clip_targets_to_zero:
sim = xr.where(sim < 0, 0, sim)
# calculate metrics
metrics = config.metrics if isinstance(config.metrics, list) else config.metrics[target_var]
if 'all' in metrics:
metrics = get_available_metrics()
try:
ensemble_metrics = calculate_metrics(ensemble_xr[f'{target_var}_obs'],
sim,
metrics=metrics,
resolution=freq)
except AllNaNError as err:
msg = f'Basin {basin} ' \
+ (f'{target_var} ' if len(target_vars) > 1 else '') \
+ (f'{freq} ' if len(frequencies) > 1 else '') \
+ str(err)
print(msg)
ensemble_metrics = {metric: np.nan for metric in metrics}
# add variable identifier to metrics if needed
if len(target_vars) > 1:
ensemble_metrics = {f'{target_var}_{key}': val for key, val in ensemble_metrics.items()}
for metric, val in ensemble_metrics.items():
ensemble[basin][freq][f'{metric}_{freq}'] = val
ensemble[basin][freq]['xr'] = ensemble_xr
return dict(ensemble)
def _get_medians(results: dict, metric='NSE') -> dict:
"""Calculates median metric across all basins. """
medians = {}
key = metric
frequencies = list(results[list(results.keys())[0]].keys())
for freq in frequencies:
# if the one freq was resampled, there still is a freq suffix
if len(frequencies) > 1 or (len(frequencies) == 1 and
f'{metric}_{freq}' in results[list(results.keys())[0]][freq]):
key = f'{metric}_{freq}'
metric_values = [v[freq][key] for v in results.values() if freq in v.keys() and key in v[freq].keys()]
medians[freq] = np.nanmedian(metric_values)
return medians
def _get_best_validation_runs(run_dirs: List[Path], k: int, epoch: int = None) -> List[Path]:
"""Returns the k run directories with the best median validation metrics. """
val_files = list(zip(run_dirs, [_get_results_file(run_dir, 'validation', epoch) for run_dir in run_dirs]))
# get validation medians
median_sums = {}
for run_dir, val_file in val_files:
val_results = pickle.load(open(val_file, 'rb'))
val_medians = _get_medians(val_results)
print('validation', val_file, val_medians)
median_sums[run_dir] = sum(val_medians.values())
if k > len(run_dirs):
raise ValueError(f'best_k k is larger than number of runs {len(val_files)}.')
return sorted(median_sums, key=median_sums.get, reverse=True)[:k]
def _get_results_file(run_dir: Path, period: str = 'test', epoch: int = None) -> Path:
"""Returns the path of the results file in the given run directory. """
if epoch is not None:
dir_results_files = list(Path(run_dir).glob(f'{period}/model_epoch{str(epoch).zfill(3)}/{period}_results.p'))
else:
dir_results_files = list(Path(run_dir).glob(f'{period}/model_epoch*/{period}_results.p'))
if len(dir_results_files) == 0:
raise ValueError(f'{run_dir} is missing {period} results.')
return sorted(dir_results_files)[-1]
def _main():
parser = argparse.ArgumentParser()
parser.add_argument('--run-dir', type=str, help='Parent directory of the runs to be averaged.')
parser.add_argument('--period', type=str, choices=['train', 'validation', 'test'], default='test')
parser.add_argument('--save-file', type=str, help='Path to target pickle file for averaged results.')
parser.add_argument('--metrics',
type=str,
nargs='+',
required=False,
help='Option to override the metrics from the config.')
parser.add_argument('--best-k',
type=int,
required=False,
help='If provided, will only use the k results files with the best median validation NSEs.')
parser.add_argument('--epoch',
type=int,
required=False,
help='If provided, will return results of this specific epoch otherwise of the last epoch')
args = vars(parser.parse_args())
run_dirs = [Path(dir) for dir in glob.glob(args['run_dir'] + '/**')]
ensemble_results = create_results_ensemble(run_dirs,
args['best_k'],
metrics=args['metrics'],
period=args['period'],
epoch=args['epoch'])
pickle.dump(ensemble_results, open(args['save_file'], 'wb'))
print(f'Successfully written results to {args["save_file"]}.')
if __name__ == '__main__':
_main()
``` |
{
"source": "jmfrancklab/proc_scripts",
"score": 2
} |
#### File: proc_scripts/examples/Hermitian_Phasing_Actual_var_tau.py
```python
from pyspecdata import *
from pyspecProcScripts import *
from pylab import *
import sympy as s
from collections import OrderedDict
init_logging(level='debug')
rcParams["image.aspect"] = "auto" # needed for sphinx gallery
# sphinx_gallery_thumbnail_number = 1
t2, td, vd, power, ph1, ph2 = s.symbols("t2 td vd power ph1 ph2")
f_range = (-400, 400)
filename = '201113_TEMPOL_capillary_probe_var_tau_1'
signal_pathway = {'ph1':1,'ph2':0}
with figlist_var() as fl:
for nodename,file_location,postproc,label in [
('var_tau','ODNP_NMR_comp/var_tau','spincore_var_tau_v1',
'tau is 1 ms'),
]:
data = find_file(filename,exp_type=file_location,expno=nodename,
postproc=postproc,lookup=lookup_table)
data = data['tau',:-7]
tau_list = list(data.getaxis('tau'))
data.reorder(['ph1','ph2','tau','t2'])
data = data['t2':f_range]
mytable = []
mytable.append(['programmed tau / ms','estimated tau / ms','difference / ms'])
for j in range(len(tau_list)):
tablerow = []
alias_slop=3
programmed_tau = tau_list[j]
tablerow.append(programmed_tau/1e-3)
logger.info(strm("programmed tau:",programmed_tau))
this_data = data['tau',j]
this_data.ift("t2")
fl.basename = '%0.1f ms'%(programmed_tau/1e-3)
best_shift = hermitian_function_test(
select_pathway(this_data, signal_pathway),
aliasing_slop=alias_slop,
fl=fl)
logger.info(strm("best shift is:",best_shift))
tablerow.append(best_shift/1e-3)
diff = abs(best_shift - programmed_tau)
tablerow.append(diff/1e-3)
mytable.append(tablerow)
def tabulate(mytable):
print(' '.join(mytable[0]))
strlens = [len(j) for j in mytable[0]]
print(' '.join('-'*j for j in strlens))
formatstr = ' '.join(f'%{str(j)}.2f' for j in strlens)
for j in mytable[1:]:
print(formatstr%tuple(j))
tabulate(mytable)
```
#### File: pyspecProcScripts/first_level/fl_dummy.py
```python
class fl_dummy_class (object):
def plot(*args):
pass
def next(*args):
pass
def push_marker(*args):
pass
def pop_marker(*args):
pass
fl_dummy = fl_dummy_class
```
#### File: proc_scripts/pyspecProcScripts/integral_w_error.py
```python
from pyspecdata import *
from .integrate_limits import integrate_limits
from .simple_functions import select_pathway
import numpy as np
from pylab import *
def integral_w_errors(s,sig_path,error_path, convolve_method=None, indirect='vd', direct='t2',fl=None,return_frq_slice=False):
"""Calculates the propagation of error for the given signal and returns
signal with the error associated.
Parameters
==========
sig_path: dict
Dictionary of the path of the desired signal.
error_path: dict
Dictionary of all coherence pathways that are
not the signal pathway.
convolve_meth: str
method of convolution used in integrating limits
indirect: str
Indirect axis.
direct: str
Direct axis.
Returns
=======
s: nddata
Data with error associated with coherence pathways
not included in the signal pathway.
"""
assert s.get_ft_prop(direct), "need to be in frequency domain!"
if convolve_method is not None:
kwargs = {'convolve_method':convolve_method}
else:
kwargs = {}
frq_slice = integrate_limits(select_pathway(s,sig_path),
**kwargs)
logging.debug(strm('frq_slice is',frq_slice))
s = s[direct:frq_slice]
f = s.getaxis(direct)
df = f[1]-f[0]
errors = []
all_labels = set(s.dimlabels)
all_labels -= set([indirect,direct])
extra_dims = [j for j in all_labels if not j.startswith('ph')]
if len(extra_dims) > 0:
raise ValueError("You have extra (non-phase cycling, non-indirect) dimensions: "
+str(extra_dims))
collected_variance = ndshape(
[ndshape(s)[indirect],len(error_path)],[indirect,'pathways']).alloc()
avg_error = []
for j in range(len(error_path)):
# calculate N₂ Δf² σ², which is the variance of the integral (by error propagation)
# where N₂ is the number of points in the indirect dimension
s_forerror = select_pathway(s,error_path[j])
# previous line wipes everything out and starts over -- why not use
# collected_variance above, as I had originally set up --> part of
# issue #44
if j==0: N2 = ndshape(s_forerror)[direct]
# mean divides by N₁ (indirect), integrate multiplies by Δf, and the
# mean sums all elements (there are N₁N₂ elements)
s_forerror -= s_forerror.C.mean_all_but([indirect, direct]).mean(direct)
s_forerror.run(lambda x: abs(x)**2/2).mean_all_but([direct,indirect]).mean(direct)
s_forerror *= df**2 # Δf
s_forerror *= N2
avg_error.append(s_forerror)
avg_error = sum(avg_error)/len(avg_error)
# {{{ variance calculation for debug
#print("(inside automatic routine) the stdev seems to be",sqrt(collected_variance/(df*N2)))
#print("automatically calculated integral error:",sqrt(collected_variance.data))
# }}}
s = select_pathway(s,sig_path)
retval = s.integrate(direct).set_error(sqrt(s_forerror.data))
if not return_frq_slice:
return retval
elif return_frq_slice:
return retval, frq_slice
def active_propagation(s, signal_path, indirect='vd', direct='t2',fl=None,offset=500.0):
"""propagate error from the region `offset` to the right of the peak (where
we assume there is only noise), in the signal pathway `signal_path`, which
we assume is the active coherence pathway.
Include only the real part of the signal.
Parameters
==========
signal_path: dict
Dictionary givin the active CT pathway
indirect: str
Name of the indirect dimension -- used to check that you don't have
directions that are not direct, indirect, or phase cycling.
direct: str
Name of the direct dimension
offset: float
Distance (in Hz) between the auto-chosen integration bounds from
:func:`integrate_limits` and the start of the "noise region."
Returns
=======
retval: nddata
just a data object with the error that this method predicts
"""
assert s.get_ft_prop(direct), "need to be in frequency domain!"
frq_slice = integrate_limits(select_pathway(s,signal_path),fl=fl)
logging.debug(strm('frq_slice is',frq_slice))
s = s[direct:((frq_slice[-1]+offset),None)] # grab all data more than
# offset to the right of the
# peak
df = s.get_ft_prop(direct,'df')
all_labels = set(s.dimlabels)
all_labels -= set([indirect,direct])
extra_dims = [j for j in all_labels if not j.startswith('ph')]
if len(extra_dims) > 0:
raise ValueError("You have extra (non-phase cycling, non-indirect) dimensions: "
+str(extra_dims))
s_forerror = select_pathway(s, signal_path)
N = ndshape(s_forerror)[direct]
s_forerror.run(real).run(lambda x: abs(x)**2).mean_all_but([direct,indirect]).mean(direct)
s_forerror *= df**2
s_forerror *= N
return s_forerror.run(sqrt)
```
#### File: proc_scripts/pyspecProcScripts/simple_functions.py
```python
"First order functions for very simple (a few lines) data manipulation"
import numpy as np
def select_pathway(*args,**kwargs):
r"""select a particular CT pathway from the signal `s`
Arguments are *either* ``pathway`` -- a dict of key/value pairs indicating
the pathway **or** the same set of key/value pairs, just passed as a dict.
Parameters
==========
s: nddata
the data whose coherence pathway you would like to select
pathway: dict
keys are the names of the coherence transfer dimensions (conj. of phase
cycling dimensions) and values are the pathway you want to select
"""
if len(args) == 2 and len(kwargs) == 0:
s,pathway = args
elif len(args) == 1 and len(kwargs) > 0 and len(kwargs)%2 == 0:
s = args[0]
pathway = kwargs
else:
raise ValueError("your arguments don't make any sense!!")
retval = s
for k, v in pathway.items():
retval = retval[k,v]
return retval
def determine_sign(s, direct="t2", fl=None):
"""Given that the signal resides in `pathway`, determine the sign of the signal.
The sign can be used, e.g. so that all data in an inversion-recover or
enhancement curve can be aligned together.
Parameters
==========
s: nddata
data with a single (dominant) peak, where you want to return the sign
of the integral over all the data.
This should only contain **a single coherence pathway**.
direct: str (default "t2")
Name of the direct dimension, along which the sum/integral is taken
Returns
=======
data_sgn: nddata
A dataset with all +1 or -1 (giving the sign of the original signal).
Does *not* include the `direct` dimension
"""
assert s.get_ft_prop(direct), "this only works on data that has been FT'd along the direct dimension"
if fl is not None:
fl.push_marker()
fl.next('selected pathway')
fl.image(s.C.setaxis(
'vd','#').set_units('vd','scan #'))
data_sgn = s.C.sum(direct)
data_sgn /= data_sgn.max().item()
data_sgn.run(np.real).run(lambda x: np.sign(x))
if fl is not None:
fl.next('check sign')
fl.image((s.C.setaxis(
'vd','#').set_units('vd','scan #'))*data_sgn)
fl.pop_marker()
return data_sgn
```
#### File: proc_scripts/pyspecProcScripts/slice_FID_from_echo.py
```python
from pyspecdata import *
from sympy import symbols
from .simple_functions import select_pathway
def slice_FID_from_echo(s, max_t=None, signal_pathway={'ph1':1, 'ph2':-2}, fl=None):
"""Automatically slices the FID out of an echo.
Determines the proper position for :math:`t=0` from hermitian function test.
This is followed by zeroth order phase correcting and slicing from the center of
the echo (:math:`t=0` onward to the t_range defined
Parameters
==========
max_t: int
where the slice ends in the time domain.
ph1: int
the selection of ph1 in the coherence pathway
ph2: int
selection of ph2 in coherence pathway
Returns
=======
s: phase corrected and sliced data
"""
best_shift = hermitian_function_test(select_pathway(s,signal_pathway))
s.setaxis('t2',lambda x: x-best_shift)
s.register_axis({'t2':0}, nearest=False)
coh_slice = select_pathway(s['t2':0],signal_pathway)
print(coh_slice.dimlabels)
if len(coh_slice.dimlabels) > 0:
assert len(coh_slice.dimlabels) == 1, repr(ndshape(coh_slice.dimlabels))+" has too many dimensions"
ph0 = zeroth_order_ph(coh_slice, fl=fl)
logger.info(strm('phasing dimension as one'))
else:
logger.info(strm("there is only one dimension left -- standard 1D zeroth order phasing"))
ph0 = coh_slice/abs(coh_slice)
s /= ph0
if fl is not None:
fl.side_by_side('time domain (after filtering and phasing)\n$\\rightarrow$ use to adjust time range', s, (0,max_t))
s = s['t2':(0,max_t)]
s['t2':0] *= 0.5
if 'power' in s.dimlabels:
if select_pathway(s['t2':0]['power',0],signal_pathway).real < 0:
s *= -1
elif 'vd' in s.dimlabels:
if select_pathway(s['t2':0]['vd',-1],signal_pathway).real < 0:
s *= -1
return s
```
#### File: pyspecProcScripts/third_level/process_IR.py
```python
import pylab as plb
from pyspecdata import *
from scipy.optimize import minimize, leastsq
from sympy import exp as s_exp
import numpy as np
import matplotlib.pyplot as plt
from sympy import symbols, latex, Symbol
from proc_scripts import *
from .simple_functions import select_pathway
t2 = symbols('t2')
def as_scan_nbr(d):
'''since we need to relabel vd frequently- we make a method'''
return d.C.setaxis('vd','#').set_units('vd','scan #')
def process_IR(s, label='', fl=None,
this_l = 0.032,
l = sqrt(np.logspace(-8.0,0.5,35)),
signal_pathway = {'ph1':0,'ph2':1},
excluded_pathways = [(0,0)],
clock_correction = True,
W=6.2,
f_range = (None,None),
t_range = (None,83e-3),
IR = True,
flip=False,
sign = None,
ILT=False):
s *= sign
s['ph2',0]['ph1',0]['t2':0] = 0 # kill the axial noise
s.ift('t2')
s.reorder(['ph1','ph2','vd','t2'])
#{{{ Applying DC offset
s.ift(['ph1','ph2'])
t_start = t_range[-1] / 4
t_start *= 3
rx_offset_corr = s['t2':(t_start,None)]
rx_offset_corr = rx_offset_corr.data.mean()
s -= rx_offset_corr
s.ft('t2')
s.ft(['ph1','ph2'])
#}}}
zero_crossing=abs(select_pathway(s,signal_pathway)).sum('t2').argmin('vd',raw_index=True).item()
if 'indirect' in s.dimlabels:
s.rename('indirect','vd')
# no rough centering anymore -- if anything, we should preproc based on τ,
# etc, but otherwise let the hermitian test handle it
#{{{ phasing the aligned data
s = s['t2':f_range]
s.ift('t2')
if clock_correction:
#{{{ clock correction
clock_corr = nddata(np.linspace(-3,3,2500),'clock_corr')
s.ft('t2')
if fl is not None:
fl.next('before clock correction')
fl.image(as_scan_nbr(s))
s_clock=s['ph1',1]['ph2',0].sum('t2')
s.ift(['ph1','ph2'])
min_index = abs(s_clock).argmin('vd',raw_index=True).item()
s_clock *= np.exp(-1j*clock_corr*s.fromaxis('vd'))
s_clock['vd',:min_index+1] *=-1
s_clock.sum('vd').run(abs)
if fl is not None:
fl.next('clock correction')
fl.plot(s_clock,'.',alpha=0.7)
clock_corr = s_clock.argmax('clock_corr').item()
plt.axvline(x=clock_corr, alpha=0.5, color='r')
s *= np.exp(-1j*clock_corr*s.fromaxis('vd'))
s.ft(['ph1','ph2'])
if fl is not None:
fl.next('after auto-clock correction')
fl.image(s.C.setaxis('vd','#'))
s.ift('t2')
#{{{Applying phase corrections
best_shift,max_shift = hermitian_function_test(select_pathway(s.C.mean('vd'),signal_pathway))
logger.info(strm("best shift is", best_shift))
s.setaxis('t2', lambda x: x-best_shift).register_axis({'t2':0})
if fl is not None:
fl.next('time domain after hermitian test')
fl.image(as_scan_nbr(s))
s.ft('t2')
if fl is not None:
fl.next('frequency domain after hermitian test')
fl.image(as_scan_nbr(s))
#}}}
s.ift('t2')
s.ift(['ph1','ph2'])
phasing = s['t2',0].C
phasing.data *= 0
phasing.ft(['ph1','ph2'])
phasing['ph1',0]['ph2',1] = 1
phasing.ift(['ph1','ph2'])
ph0 = s['t2':0]/phasing
ph0 /= abs(ph0)
s /= ph0
s.ft(['ph1','ph2'])
if fl is not None:
fl.next('zeroth order corrected')
fl.image(as_scan_nbr(s))
s.ft('t2')
if fl is not None:
fl.next('phased data -- frequency domain')
fl.image(as_scan_nbr(s))
#}}}
#}}}
if 'ph2' in s.dimlabels:
s.reorder(['ph1','ph2','vd','t2'])
else:
s.reorder(['ph1','vd','t2'])
#{{{Correlation Alignment
s.ift(['ph1','ph2'])
fl.basename='correlation subroutine:'
#for the following, should be modified so we can pass a mask, rather than specifying ph1 and ph2, as here
opt_shift,sigma = correl_align(s,indirect_dim='vd',
ph1_selection=signal_pathway['ph1'],ph2_selection=signal_pathway['ph2'],
sigma=10)
s.ift('t2')
s *= np.exp(-1j*2*pi*opt_shift*s.fromaxis('t2'))
s.ft('t2')
fl.basename = None
if fl is not None:
fl.next(r'after correlation, $\varphi$ domain')
fl.image(as_scan_nbr(s))
s.ift('t2')
s.ft(['ph1','ph2'])
if fl is not None:
fl.next(r'after correlation')
fl.image(as_scan_nbr(s))
if 'ph2' in s.dimlabels:
s.reorder(['ph1','ph2','vd','t2'])
else:
s.reorder(['ph1','vd','t2'])
#}}}
#{{{FID slice
s = s['t2':(0,t_range[-1])]
s['t2',0] *= 0.5
s.ft('t2')
if fl is not None:
fl.next('FID sliced -- frequency domain')
fl.image(as_scan_nbr(s))
#}}}
#s *= sign
data = s.C
zero_crossing=abs(select_pathway(s,signal_pathway)).sum('t2').argmin('vd',raw_index=True).item()
if flip:
s['vd',:zero_crossing] *= -1
# {{{ this is the general way to do it for 2 pulses I don't offhand know a compact method for N pulses
error_path = (set(((j,k) for j in range(ndshape(s)['ph1']) for k in range(ndshape(s)['ph2'])))
- set(excluded_pathways)
- set([(signal_pathway['ph1'],signal_pathway['ph2'])]))
error_path = [{'ph1':j,'ph2':k} for j,k in error_path]
# }}}
#{{{Integrating with associated error from excluded pathways
s_int,frq_slice,mystd = integral_w_errors(s,signal_pathway,error_path,
fl=fl,return_frq_slice=True)
x = s_int.get_error()
x[:] /= sqrt(2)
logger.info(strm("here is what the error looks like",s_int.get_error()))
if fl is not None:
fl.next('Integrated data - recovery curve')
fl.plot(s_int,'o',capsize=6, label='real')
fl.plot(s_int.imag,'o',capsize=6,label='imaginary')
#}}}
#{{{Fitting Routine
x = s_int.fromaxis('vd')
f = fitdata(s_int)
M0,Mi,R1,vd = symbols("M_0 M_inf R_1 vd")
if IR:
f.functional_form = Mi - 2*Mi*s_exp(-vd*R1)
else:
f.functional_form = Mi*(1-(2-s_exp(-W*R1))*s_exp(-vd*R1))
f.fit()
logger.info(strm("output:",f.output()))
logger.info(strm("latex:",f.latex()))
T1 = 1./f.output('R_1')
if fl is not None:
fl.next('fit',legend=True)
fl.plot(s_int,'o', capsize=6, label='actual data')
fl.plot(s_int.imag,'o',capsize=6,label='actual imaginary')
fl.plot(f.eval(100),label='fit')
plt.text(0.75, 0.25, f.latex(), transform=plt.gca().transAxes,size='medium',
horizontalalignment='center',verticalalignment='center',color='k',
position=(0.33,0.95),fontweight='bold')
plt.legend(bbox_to_anchor=(1,1.01),loc='upper left')
print("YOUR T1 IS:",T1)
return T1
#}}}
if ILT:
T1 = nddata(np.logspace(-3,3,150),'T1')
plot_Lcurve = False
if plot_Lcurve:
def vec_lcurve(l):
return s.C.nnls('vd',T1,lambda x,y: 1.0-2*np.exp(-x/y), l=l)
x=vec_lcurve(l)
x_norm = x.get_prop('nnls_residual').data
r_norm = x.C.run(np.linalg.norm,'T1').data
with figlist_var() as fl:
fl.next('L-Curve')
plt.figure(figsize=(15,10))
fl.plot(np.log10(r_norm[:,0]),np.log10(x_norm[:,0]),'.')
annotate_plot = True
show_lambda = True
if annotate_plot:
if show_lambda:
for j,this_l in enumerate(l):
plt.annotate('%0.3f'%this_l, (np.log10(r_norm[j,0]),np.log10(x_norm[j,0])),
ha='left',va='bottom',rotation=45)
else:
for j,this_l in enumerate(l):
plt.annotate('%d'%j, (np.log10(r_norm[j,0]),np.log10(x_norm[j,0])),
ha='left',va='bottom',rotation=45)
d_2d = s*nddata(r_[1,1,1],r'\Omega')
offset = s.get_prop('proc')['OFFSET']
o1 = s.get_prop('acq')['O1']
sfo1 = s.get_prop('acq')['BF1']
s.setaxis('t2',lambda x:
x+o1)
s.setaxis('t2',lambda x:
x/(sfo1)).set_units('t2','ppm')
s.set_prop('x_inverted',True)
soln = s.real.C.nnls('vd',T1, lambda x,y: 1.0-2.*np.exp(-x/y),l=this_l)
soln.reorder('t2',first=False)
soln.rename('T1','log(T1)')
soln.setaxis('log(T1)',np.log10(T1.data))
fl.next('w=3')
fl.image(soln)
logger.info(strm("SAVING FILE"))
if save_npz:
np.savez(thisfile+'_'+str(nodename)+'_ILT_inv',
data=soln.data,
logT1=soln.getaxis('log(T1)'),
t2=soln.getaxis('t2'))
logger.info(strm("FILE SAVED"))
T1_values[i] = T1
``` |
{
"source": "jmfranck/pyDiffTools",
"score": 3
} |
#### File: pyDiffTools/pydifftools/match_spaces.py
```python
from difflib import SequenceMatcher
def run(arguments):
with open(arguments[0], encoding='utf-8') as fp:
text1 = fp.read()
#text1 = text1.decode('utf-8')
fp = open(arguments[1], encoding='utf-8')
text2 = fp.read()
fp.close()
#text2 = text2.decode('utf-8')
utf_char = '\u00a0' # unicode no break space
text2 = text2.replace(utf_char,' ')# replace it
utf_char = '\u2004' # three-per-em space
text2 = text2.replace(utf_char,' ')# replace it
def parse_whitespace(s):
retval = []
white_or_not = []
current_string = ''
is_whitespace = True
for j in s:
if j in [' ','\t','\r','\n']:
if not is_whitespace:
retval.append(current_string)
white_or_not.append(False)# I have switched to whitespace, I was not whitespace
current_string = j
else:
current_string += j
is_whitespace = True
else:
if is_whitespace and len(retval) > 0:
retval.append(current_string)
if current_string.count('\n')>1:
white_or_not.append(False)# double newline is not "whitespace"
else:
white_or_not.append(True)
current_string = j
else:
current_string += j
is_whitespace = False
retval.append(current_string)
white_or_not.append(is_whitespace)
if is_whitespace and current_string.count('\n')>1:
white_or_not.append(False)# double newline is not "whitespace"
else:
white_or_not.append(is_whitespace)
return retval,white_or_not
#print zip(*tuple(parse_whitespace(text1)))
#print zip(*tuple(parse_whitespace(text2)))
tokens,iswhitespace = parse_whitespace(text1)
def generate_word_lists(input_tokens,input_iswhitespace):
retval_words = []
retval_whitespace = []
retval_isdoublenewline = []
j = 0
# go through and add whitespace and words, always in pairs
while j < len(input_tokens):
if input_iswhitespace[j]:
# make it so the whitespace always comes "after" the word
retval_words.append('')
retval_whitespace.append(input_tokens[j])
j += 1
elif j == len(input_tokens) - 1:
# this is the last one, so just add it
retval_words.append(input_tokens[j])
retval_whitespace.append('')
retval_isdoublenewline.append(False)
else:# it's a word
retval_words.append(input_tokens[j])
if input_iswhitespace[j+1]:
retval_whitespace.append(input_tokens[j+1])
j += 2
else:
# this can happen if it's a newline combo or followed by a newline combo
#print repr(input_tokens[j]),'is not followed by whitespace but by',repr(input_tokens[j+1])
retval_whitespace.append('')
j += 1
if retval_words[-1].count('\n') > 1:# double newline
retval_isdoublenewline.append(True)
else:
retval_isdoublenewline.append(False)
return retval_words,retval_whitespace,retval_isdoublenewline
text1_words,text1_whitespace,text1_isdoublenewline = generate_word_lists(tokens,iswhitespace)
#print "-------------------"
#print "align words only with words and whitespace"
#print zip(text1_words, text1_words_and_whitespace)
#print "-------------------"
tokens,iswhitespace = parse_whitespace(text2)
text2_words,text2_whitespace,text2_isdoublenewline = generate_word_lists(tokens,iswhitespace)
s = SequenceMatcher(None,text1_words,text2_words)
diffs = s.get_opcodes()
#print diffs
final_text = ''
newline_debt = 0
last_indent = ''
for j in diffs:
if j[0] == 'equal':
temp_addition = text1_words[j[1]:j[2]]
whitespace = text1_whitespace[j[1]:j[2]]
for k in range(len(temp_addition)):
final_text += temp_addition[k] + whitespace[k]
idx = whitespace[k].find('\n')
if idx > -1:
last_indent = whitespace[k][idx+1:]
if j[2] - j[1] > 4:# if five or more words have matched, forgive my newline debt
newline_debt = 0
elif j[0] == 'delete':
if sum([thisstr.count('\n') for thisstr in text1_whitespace[j[1]:j[2]]]) > 0:
newline_debt += 1
#print "delete -- newline debt is now",newline_debt
elif j[0] == 'replace':
print("newline debt",newline_debt)
newline_debt += sum([thisstr.count('\n') for thisstr in text1_whitespace[j[1]:j[2]]])
#print "replace -- newline debt is now",newline_debt
print("about to replace",repr(text1_words[j[1]:j[2]]).encode('unicode-escape'))
print(" with",repr(text2_words[j[3]:j[4]]).encode('unicode-escape'))
print(" whitepace from ",repr(text1_whitespace[j[1]:j[2]]).encode('unicode-escape'))
oldver_whitespace = text1_whitespace[j[1]:j[2]]
print(" whitepace to ",repr(text2_whitespace[j[3]:j[4]]).encode('unicode-escape'))
print(" newline debt",newline_debt)
temp_addition = text2_words[j[3]:j[4]]
#{{{ check to see if I am adding any double newlines -- if I am use the original version
temp_isdoublenewline = text2_isdoublenewline[j[3]:j[4]]
tstdbl_i = 0
tstdbl_j = 0
while tstdbl_i < len(temp_isdoublenewline):
if temp_isdoublenewline[tstdbl_i]:
matched = False
while tstdbl_j < len(text1_isdoublenewline[j[1]:j[2]]) and not matched:
if text1_isdoublenewline[j[1]:j[2]][tstdbl_j]:
temp_addition[tstdbl_i] = text1_words[j[1]:j[2]][tstdbl_j]
matched = True
tstdbl_j += 1
tstdbl_i += 1
#}}}
newver_whitespace = text2_whitespace[j[3]:j[4]]
whitespace = [' ' if len(x) > 0 else '' for x in newver_whitespace]# sometimes, the "whitespace" can be nothing
if newline_debt > 0:
for k in range(len(temp_addition)):
if newver_whitespace[k].count('\n') > 0:
whitespace[k] = '\n'+last_indent
newline_debt -= whitespace[k].count('\n')# shouldn't be more than one but doesn't hurt
if newline_debt < 1:
break
#if I can't make up for the whitespace with the new text, but it where it went in the old text
for k in range(min(len(oldver_whitespace),len(whitespace))):
if oldver_whitespace[k].count('\n') > 0:
whitespace[k] = oldver_whitespace[k]
newline_debt -= whitespace[k].count('\n')# shouldn't be more than one but doesn't hurt
if newline_debt < 1:
break
print(" whitepace became",repr(whitespace))
for k in range(len(temp_addition)):
final_text += temp_addition[k] + whitespace[k]
idx = whitespace[k].find('\n')
if idx > -1:
last_indent = whitespace[k][idx+1:]
elif j[0] == 'insert':
temp_addition = text2_words[j[3]:j[4]]
newver_whitespace = text2_whitespace[j[3]:j[4]]
whitespace = [' ' if len(x) > 0 else '' for x in newver_whitespace]# sometimes, the "whitespace" can be nothing
if newline_debt > 0:
for k in range(len(temp_addition)):
if newver_whitespace[k].count('\n') > 0:
whitespace[k] = '\n'+last_indent
newline_debt -= whitespace[k].count('\n')# shouldn't be more than one but doesn't hurt
if newline_debt < 1:
break
for k in range(len(temp_addition)):
final_text += temp_addition[k] + whitespace[k]
idx = whitespace[k].find('\n')
if idx > -1:
last_indent = whitespace[k][idx+1:]
else:
raise ValueError("unknown opcode"+j[0])
fp = open(arguments[1],'w',encoding='utf-8')
fp.write(final_text)
fp.close()
```
#### File: pyDiffTools/pydifftools/unseparate_comments.py
```python
import re
import sys
import codecs
sys.stdout = codecs.getwriter("utf-8")(sys.stdout.detach())
from .comment_functions import generate_alphabetnumber,matchingbrackets
def tex_unsepcomments(texfile):
if texfile[-12:] == '_sepcomm.tex':
base_filename = texfile[:-12]
print("yes, a _sepcomm.tex file called",base_filename,'.tex')
elif texfile[-4:] == '.tex':
base_filename = texfile[:-4]
print("yes, a .tex file called",base_filename,'.tex')
else:
raise RuntimeError("not a tex file??")
with open(base_filename+'_comments.tex','r',encoding='utf-8') as fp:
content = fp.read()
#comment_def_re = re.compile(r"\\newcommand\{\%s[A-Z]+")
names = ["pdfcommentAG", "pdfcommentAB", "pdfcommentJF", "pdfcommentG"]
list_of_names = []
list_of_commands = []
list_of_content = []
for j in range(0,len(names)):
comment_def_re = re.compile(r"\\newcommand\{\\(%s[a-z]+)\}\{"%(names[j]))
for m in comment_def_re.finditer(content):
print("found %d:%d"%(m.start(),m.end()),m.groups()[0])
print("text:",content[m.start():m.end()])
a,b = matchingbrackets(content,m.end()-1,'{')
print("found from %d to %d"%(a,b))
print("-----content------")
print(content[a:b+1])
print("------------------")
list_of_names.append(names[j])
list_of_commands.append(m.groups()[0])
list_of_content.append(content[a+1:b])
with open(texfile,'r', encoding='utf-8') as fp:
content = fp.read()
for j in range(0,len(list_of_names)):
a = content.find("\\%s"%list_of_commands[j])
if a<0:
raise RuntimeError("couldn't find command \\%s"%list_of_commands[j])
else:
starthighlight,b = matchingbrackets(content,a,'{')
highlight = content[starthighlight+1:b]
print("found command \\%s with highlight {%s} and going to add content {%s}"%(list_of_commands[j],highlight,list_of_content[j]))
if len(highlight) > 0:
content = content[:a] + '\\%s[%s]{%s}'%(list_of_names[j],highlight,list_of_content[j]) + content[b+1:]
else:
content = content[:a] + '\\%s{%s}'%(list_of_names[j],list_of_content[j]) + content[b+1:]
content = re.sub('\\\\include{%s_comments}\n'%base_filename,'',content)
content = re.sub('%%NUMBER OF COMMENTS [0-9]+ *\n','',content)
with open(base_filename+'.tex','w',encoding='utf-8') as fp:
fp.write(content)
print("wrote output to",base_filename+'.tex')
``` |
{
"source": "jmftrindade/6.830-project",
"score": 2
} |
#### File: 6.830-project/FD_CFD_extraction-master/tane.py
```python
from pandas import *
from collections import defaultdict
import numpy as NP
import sys
def list_duplicates(seq):
tally = defaultdict(list)
for i,item in enumerate(seq):
tally[item].append(i)
return ((key,locs) for key,locs in tally.items()
if len(locs)>0)
def findCplus(x): # this computes the Cplus of x as an intersection of smaller Cplus sets
global dictCplus
thesets=[]
for a in x:
if x.replace(a,'') in dictCplus.keys():
temp = dictCplus[x.replace(a,'')]
else:
temp=findCplus(x.replace(a,'')) # compute C+(X\{A}) for each A at a time
#dictCplus[x.replace(a,'')] = temp
thesets.insert(0, set(temp))
if list(set.intersection(*thesets)) == []:
cplus = []
else:
cplus = list(set.intersection(*thesets)) # compute the intersection in line 2 of pseudocode
return cplus
def compute_dependencies(level, listofcols):
global dictCplus
global finallistofFDs
global listofcolumns
for x in level:
thesets=[]
for a in x:
if x.replace(a,'') in dictCplus.keys():
temp = dictCplus[x.replace(a,'')]
else:
temp=computeCplus(x.replace(a,'')) # compute C+(X\{A}) for each A at a time
dictCplus[x.replace(a,'')] = temp
thesets.insert(0, set(temp))
if list(set.intersection(*thesets)) == []:
dictCplus[x] = []
else:
dictCplus[x] = list(set.intersection(*thesets)) # compute the intersection in line 2 of pseudocode
for x in level:
for a in x:
if a in dictCplus[x]:
#if x=='BCJ': print "dictCplus['BCJ'] = ", dictCplus[x]
if validfd(x.replace(a,''), a): # line 5
finallistofFDs.append([x.replace(a,''), a]) # line 6
dictCplus[x].remove(a) # line 7
listofcols=listofcolumns[:]
for j in x: # this loop computes R\X
if j in listofcols: listofcols.remove(j)
for b in listofcols: # this loop removes each b in R\X from C+(X)
if b in dictCplus[x]: dictCplus[x].remove(b)
def computeCplus(x): # this computes the Cplus from the first definition in section 3.2.2 of TANE paper. output should be a list of single attributes
global listofcolumns
listofcols = listofcolumns[:]
if x=='': return listofcols # because C+{phi} = R
cplus = []
for a in listofcols:
for b in x:
temp = x.replace(a,'')
temp = temp.replace(b,'')
if not validfd(temp, b):
cplus.append(a)
return cplus
def validfd(y,z):
if y=='' or z=='': return False
ey = computeE(y)
eyz = computeE(y+z)
if ey == eyz :
return True
else:
return False
def computeE(x):
global totaltuples
global dictpartitions
doublenorm = 0
for i in dictpartitions[''.join(sorted(x))]:
doublenorm = doublenorm + len(i)
e = (doublenorm-len(dictpartitions[''.join(sorted(x))]))/float(totaltuples)
return e
def check_superkey(x):
global dictpartitions
if ((dictpartitions[x] == [[]]) or (dictpartitions[x] == [])):
return True
else:
return False
def prune(level):
global dictCplus
global finallistofFDs
stufftobedeletedfromlevel = []
for x in level: # line 1
if dictCplus[x]==[]: # line 2
level.remove(x) # line 3
if check_superkey(x): # line 4 ### should this check for a key, instead of super key??? Not sure.
temp = dictCplus[x][:]
for i in x: # this loop computes C+(X) \ X
if i in temp: temp.remove(i)
for a in temp: # line 5
thesets=[]
for b in x:
if not( ''.join(sorted((x+a).replace(b,''))) in dictCplus.keys()):
dictCplus[''.join(sorted((x+a).replace(b,'')))] = findCplus(''.join(sorted((x+a).replace(b,''))))
thesets.insert(0,set(dictCplus[''.join(sorted((x+a).replace(b,'')))]))
if a in list(set.intersection(*thesets)): # line 6
finallistofFDs.append([x, a]) # line 7
#print "adding key FD: ", [x,a]
if x in level: stufftobedeletedfromlevel.append(x) # line 8
for item in stufftobedeletedfromlevel:
level.remove(item)
def generate_next_level(level):
nextlevel=[]
for i in range(0,len(level)): # pick an element
for j in range(i+1, len(level)): # compare it to every element that comes after it.
if ((not level[i]==level[j]) and level[i][0:-1]==level[j][0:-1]): # i.e. line 2 and 3
x = level[i]+level[j][-1] #line 4
flag = True
for a in x: # this entire for loop is for the 'for all' check in line 5
if not(x.replace(a, '') in level):
flag=False
if flag==True:
nextlevel.append(x)
stripped_product(x, level[i] , level[j] ) # compute partition of x as pi_y * pi_z (where y is level[i] and z is level[j])
return nextlevel
def stripped_product(x,y,z):
global dictpartitions
global tableT
tableS = ['']*len(tableT)
partitionY = dictpartitions[''.join(sorted(y))] # partitionY is a list of lists, each list is an equivalence class
partitionZ = dictpartitions[''.join(sorted(z))]
partitionofx = [] # line 1
for i in range(len(partitionY)): # line 2
for t in partitionY[i]: # line 3
tableT[t] = i
tableS[i]='' #line 4
for i in range(len(partitionZ)): # line 5
for t in partitionZ[i]: # line 6
if ( not (tableT[t] == 'NULL')): # line 7
tableS[tableT[t]] = sorted(list(set(tableS[tableT[t]]) | set([t])))
for t in partitionZ[i]: # line 8
if (not (tableT[t] == 'NULL')) and len(tableS[tableT[t]])>= 2 : # line 9
partitionofx.append(tableS[tableT[t]])
if not (tableT[t] == 'NULL'): tableS[tableT[t]]='' # line 10
for i in range(len(partitionY)): # line 11
for t in partitionY[i]: # line 12
tableT[t]='NULL'
dictpartitions[''.join(sorted(x))] = partitionofx
def computeSingletonPartitions(listofcols):
global data2D
global dictpartitions
for a in listofcols:
dictpartitions[a]=[]
for element in list_duplicates(data2D[a].tolist()): # list_duplicates returns 2-tuples, where 1st is a value, and 2nd is a list of indices where that value occurs
if len(element[1])>1: # ignore singleton equivalence classes
dictpartitions[a].append(element[1])
#------------------------------------------------------- START ---------------------------------------------------
if len(sys.argv) > 1:
infile=str(sys.argv[1]) # this would be e.g. "testdata.csv"
data2D = read_csv(infile)
totaltuples = len(data2D.index)
listofcolumns = list(data2D.columns.values) # returns ['A', 'B', 'C', 'D', .....]
tableT = ['NULL']*totaltuples # this is for the table T used in the function stripped_product
L0 = []
dictCplus = {'NULL': listofcolumns}
dictpartitions = {} # maps 'stringslikethis' to a list of lists, each of which contains indices
computeSingletonPartitions(listofcolumns)
finallistofFDs=[]
#print dictCplus['NULL']
L1=listofcolumns[:] # L1 is a copy of listofcolumns
l=1
L = [L0,L1]
while (not (L[l] == [])):
compute_dependencies(L[l],listofcolumns[:])
prune(L[l])
temp = generate_next_level(L[l])
L.append(temp)
l=l+1
print "List of all FDs: " , finallistofFDs
print "Total number of FDs found: ", len(finallistofFDs)
```
#### File: jmftrindade/6.830-project/statistics_plots.py
```python
import argparse
import pandas as pd
import missingno as msno
import matplotlib.pyplot as plt
import seaborn as sns # For prettier plots.
from scipy.cluster import hierarchy as hc
import numpy as np
def plot_histogram_and_correlation_dendrogram(csv_filename):
df = pd.read_csv(csv_filename)
# Per column stats (numerical columns only).
print df.describe(include='all').transpose()
# Histograms for all columns.
df.hist()
# Correlation dendrogram.
corr = 1 - df.corr()
corr_condensed = hc.distance.squareform(corr)
z = hc.linkage(corr_condensed, method='average')
fig = plt.figure(figsize=(20,12))
dendrogram = hc.dendrogram(z, labels=corr.columns,
link_color_func=lambda c: 'black')
plt.show()
# Only use this for datasets with missing values.
if df.isnull().values.any():
msno.matrix(df)
msno.dendrogram(df)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Plots histograms and correlation dendrogram for a given dataset.")
parser.add_argument('-i', '--input_csv',
help='Relative path of input CSV filename.',
required=True)
args = parser.parse_args()
plot_histogram_and_correlation_dendrogram(args.input_csv)
```
#### File: 6.830-project/willies_keras_example/nn.py
```python
import os
import time
import numpy as np
from keras.models import Model
from keras.utils.np_utils import to_categorical
from keras.layers import Dense, Input
label_names = ['positive', 'negative', 'neutral']
label2ind = { label:ind for ind,label in enumerate(label_names) }
def main():
# Get data from notes
train_tweets = []
train_labels = []
train_file = os.path.join(os.path.dirname(os.path.abspath(__file__)),'twitter-train-gold-B.tsv')
with open(train_file, 'r') as f:
for line in f.readlines():
tid,uid,label,text = line.strip().split('\t')
if text == 'Not Available':
continue
#print 'tid: [%s]' % tid
#print 'uid: [%s]' % uid
#print 'label: [%s]' % label
#print 'text: [%s]' % text
#print
train_tweets.append(text)
train_labels.append(label)
# vocabulary of all words in training set
vocab = list(set(' '.join(train_tweets).split()))
# Data -> features
train_X = extract_features(train_tweets, vocab)
num_samples,input_dim = train_X.shape
# e.g. 'positive' -> [1 0 0]
num_classes = len(label_names)
train_Y = np.array( [label2ind[label] for label in train_labels] )
Y_onehots = to_categorical(train_Y, nb_classes=num_classes)
# Fit model (AKA learn model parameters)
classifier = create_model(input_dim, 300, 200, num_classes)
classifier.fit(train_X,Y_onehots,batch_size=128,nb_epoch=10,verbose=1)
#classifier.save_weights('tmp_keras_weights')
#classifier.load_weights('tmp_keras_weights')
# Predict on test data
test_file = os.path.join(os.path.dirname(os.path.abspath(__file__)),'twitter-test-gold-B.tsv')
test_tweets = []
test_labels = []
with open(test_file, 'r') as f:
for line in f.readlines():
tid,uid,label,text = line.strip().split('\t')
if text == 'Not Available':
continue
test_tweets.append(text)
test_labels.append(label)
test_X = extract_features(test_tweets, vocab)
test_Y = np.array( [label2ind[label] for label in test_labels] )
test_Y_onehots = to_categorical(test_Y, nb_classes=num_classes)
pred_prob = classifier.predict(test_X, batch_size=128, verbose=1)
test_predictions = pred_prob.argmax(axis=1)
# display a couple results
print
print 'references: ', test_Y[:5]
print 'predictions: ', test_predictions[:5]
print
# compute confusion matrix (rows=predictions, columns=reference)
confusion = np.zeros((3,3))
for pred,ref in zip(test_predictions,test_Y):
confusion[pred][ref] += 1
print ' '.join(label_names)
print confusion
print
# compute P, R, and F1 of each class
for label in label_names:
ind = label2ind[label]
tp = confusion[ind,ind]
tp_plus_fn = confusion[:,ind].sum()
tp_plus_fp = confusion[ind,:].sum()
precision = float(tp)/tp_plus_fp
recall = float(tp)/tp_plus_fn
f1 = (2*precision*recall) / (precision+recall+1e-9)
print label
print '\tprecision: ', precision
print '\trecall: ', recall
print '\tf1: ', f1
print
def extract_features(tweets, vocab):
word2ind = { w:i for i,w in enumerate(vocab) }
V = len(vocab)
X = np.zeros((len(tweets),V))
for i,tweet in enumerate(tweets):
for word in tweet.split():
if word not in word2ind:
continue
dim = word2ind[word]
featureval = 1 # indicate this feature is "on"
X[i,dim] = featureval
return X
def create_model(input_dim, embedding_dim, hidden_dim, output_dim):
bow = Input(shape=(input_dim,))
embeddings = Dense(output_dim=embedding_dim, activation='sigmoid')(bow)
hidden = Dense(output_dim=hidden_dim, activation='sigmoid')(embeddings)
prob = Dense(output_dim=output_dim, activation='softmax')(hidden)
model = Model(input=bow, output=prob)
print
print 'compiling model'
start = time.clock()
model.compile(loss='categorical_crossentropy', optimizer='adam')
#print '\tWARNING: skipping compilation'
end = time.clock()
print 'finished compiling: ', (end-start)
print
return model
if __name__ == '__main__':
main()
``` |
{
"source": "jmftrindade/file_access_monitor",
"score": 3
} |
#### File: file_access_monitor/scripts/twitter.py
```python
import os
import sys
import time
import requests
from py2neo import Graph, Node, Relationship
graph = Graph()
graph.run("CREATE CONSTRAINT ON (u:User) ASSERT u.username IS UNIQUE")
graph.run("CREATE CONSTRAINT ON (t:Tweet) ASSERT t.id IS UNIQUE")
graph.run("CREATE CONSTRAINT ON (h:Hashtag) ASSERT h.name IS UNIQUE")
TWITTER_BEARER = os.environ["TWITTER_BEARER"]
headers = dict(accept="application/json", Authorization="Bearer " + TWITTER_BEARER)
payload = dict(
count=100,
result_type="recent",
lang="en",
q=sys.argv[1]
)
base_url = "https://api.twitter.com/1.1/search/tweets.json?"
def find_tweets(since_id):
payload["since_id"] = since_id
url = base_url + "q={q}&count={count}&result_type={result_type}&lang={lang}&since_id={since_id}".format(**payload)
r = requests.get(url, headers=headers)
tweets = r.json()["statuses"]
return tweets
def upload_tweets(tweets):
for t in tweets:
u = t["user"]
e = t["entities"]
tweet = Node("Tweet", id=t["id"])
graph.merge(tweet)
tweet["text"] = t["text"]
tweet.push()
user = Node("User", username=u["screen_name"])
graph.merge(user)
graph.merge(Relationship(user, "POSTS", tweet))
for h in e.get("hashtags", []):
hashtag = Node("Hashtag", name=h["text"].lower())
graph.merge(hashtag)
graph.merge(Relationship(hashtag, "TAGS", tweet))
for m in e.get('user_mentions', []):
mention = Node("User", username=m["screen_name"])
graph.merge(mention)
graph.merge(Relationship(tweet, "MENTIONS", mention))
reply = t.get("in_reply_to_status_id")
if reply:
reply_tweet = Node("Tweet", id=reply)
graph.merge(reply_tweet)
graph.merge(Relationship(tweet, "REPLY_TO", reply_tweet))
ret = t.get("retweeted_status", {}).get("id")
if ret:
retweet = Node("Tweet", id=ret)
graph.merge(retweet)
graph.merge(Relationship(tweet, "RETWEETS", retweet))
since_id = -1
while True:
try:
tweets = find_tweets(since_id=since_id)
if not tweets:
print("No tweets found.")
time.sleep(60)
continue
since_id = tweets[0].get("id")
upload_tweets(tweets)
print("{} tweets uploaded!".format(len(tweets)))
time.sleep(60)
except Exception as e:
print(e)
time.sleep(60)
continue
``` |
{
"source": "jmftrindade/miniplaces_challenge",
"score": 3
} |
#### File: slim/datasets/miniplaces.py
```python
import tensorflow as tf
import os
slim = tf.contrib.slim
def get_split(split_name, dataset_dir, file_pattern=None, reader=None):
reader = tf.TFRecordReader
file_pattern = os.path.join(dataset_dir, 'miniplaces_%s_*.tfrecord' % split_name)
keys_to_features = {
'image/encoded': tf.FixedLenFeature((), tf.string, default_value=''),
'image/format': tf.FixedLenFeature((), tf.string, default_value='jpg'),
'image/class/label': tf.FixedLenFeature(
[], tf.int64, default_value=tf.zeros([], dtype=tf.int64)),
}
items_to_handlers = {
'image': slim.tfexample_decoder.Image(),
'label': slim.tfexample_decoder.Tensor('image/class/label'),
}
decoder = slim.tfexample_decoder.TFExampleDecoder(
keys_to_features, items_to_handlers)
num_samples = 100000 if split_name == 'train' else 10000
print num_samples
return slim.dataset.Dataset(data_sources=file_pattern, reader=reader, decoder=decoder, num_samples=num_samples, items_to_descriptions={'image': 'A color image of varying size.','label': 'A single integer between 0 and 4'}, num_classes=100, labels_to_names=None)
```
#### File: miniplaces_challenge/weighted_majority/weighted_majority.py
```python
import argparse
import glob
import itertools
import math
import os
from pprint import pprint
import sys
def get_network_name(filename):
# XXX: Network name assumed to be encoded as net_<file_suffix>.txt
basename = os.path.splitext(os.path.basename(filename))[0]
return basename.split('_')[0]
def process_scores_file(filename, class_scores):
net = get_network_name(filename)
with open(filename) as f:
for line in f:
# <class> <#t1_guesses> <t1_score> <#t5_guesses> <t5_score>
tokens = line.rstrip().split(' ')
if len(tokens) < 4:
print 'Exiting: not enough class scores in "%s"' % (filename)
sys.exit(-1)
class_name = int(tokens[0])
top1_score = float(tokens[2])
top5_score = float(tokens[4])
if net not in class_scores:
class_scores[net] = {}
if class_name not in class_scores[net]:
class_scores[net][class_name] = {}
class_scores[net][class_name]['top1'] = top1_score
class_scores[net][class_name]['top5'] = top5_score
return class_scores
def process_class_accuracies_file(filename, class_accuracies):
net = get_network_name(filename)
with open(filename) as f:
for line in f:
# <class> <t1_acc> <t5_acc>
tokens = line.rstrip().split(' ')
if len(tokens) < 3:
print 'Exiting: not enough class accuracies in "%s"' % (filename)
sys.exit(-1)
class_name = int(tokens[0])
top1_acc = float(tokens[1])
top5_acc = float(tokens[2])
if net not in class_accuracies:
class_accuracies[net] = {}
if class_name not in class_accuracies[net]:
class_accuracies[net][class_name] = {}
class_accuracies[net][class_name]['top1'] = top1_acc
class_accuracies[net][class_name]['top5'] = top5_acc
return class_accuracies
def process_predictions_file(model_weights,
predictions_filename,
votes,
class_scores=None,
use_top1_class_scores=None,
use_top5_class_scores=None,
class_accuracies=None,
use_top1_class_accuracies=None,
use_top5_class_accuracies=None,
decay_type=None):
net = get_network_name(predictions_filename)
if net not in model_weights:
print 'Warning: no majority weight specified for "%s"; returning' % net
return votes
with open(predictions_filename) as f:
for line in f:
tokens = line.rstrip().split(' ')
# Need at least the image and 1 prediction.
if len(tokens) <= 1:
print 'Exiting: not enough predictions in "%s".' % (
predictions_filename)
sys.exit(-1)
image = tokens[0]
num_preds = len(tokens) - 1 # first entry is the image name
for i in xrange(1, num_preds + 1):
prediction = int(tokens[i])
if image not in votes:
votes[image] = {}
if prediction not in votes[image]:
votes[image][prediction] = 0.0
# Default decay is exponential.
decay_factor = 1.0
# Only first 5 predictions count.
if decay_type == 'constant_first_5':
decay_factor = 1.0 if i <= 5 else 0.0
# Only first 10 predictions count.
if decay_type == 'constant_first_10':
decay_factor = 1.0 if i <= 10 else 0.0
# Linear decay for prediction ranks.
elif decay_type == 'linear':
decay_factor = num_preds - i
# Exponential decay for prediction ranks (default setting).
else:
decay_factor = math.exp(float(-i) / float(5))
class_score = 1.0
if class_scores and net in class_scores:
if use_top1_class_scores:
class_score *= class_scores[net][prediction]['top1']
if use_top5_class_scores:
class_score *= class_scores[net][prediction]['top5']
class_accuracy = 1.0
if class_accuracies and net in class_accuracies:
if use_top1_class_accuracies:
class_accuracy *= class_accuracies[
net][prediction]['top1']
if use_top5_class_accuracies:
class_accuracy *= class_accuracies[
net][prediction]['top5']
votes[image][prediction] += round(float(model_weights[net] *
class_score *
class_accuracy *
decay_factor), 4)
return votes
def main(**kwargs):
# models = ['alexnet']
models = ['alexnet', 'alexnet02', 'alexnet05', 'resnet', 'inception']
weights = [1.0]
output_predictions = False
# Only output predictions if we're not dealing with validation data set.
if kwargs.get('labels_input_filename') is None:
output_predictions = True
if kwargs.get('grid_search'):
weights = [0.0, 0.7, 1.0]
# Remove grid_search from kwargs before passing them along.
kw = {k: v for k, v in kwargs.items() if k is not 'grid_search'}
grid_search(models, weights, output_predictions, **kw)
def has_equal_values(d):
s = set(k for k, v in d.items() if d.values().count(v) == len(d.items()))
return len(d) == len(s)
def grid_search(models, weights, output_predictions, **kwargs):
products = []
for m in models:
products.append(list(itertools.product([m], weights)))
for i in itertools.product(*products):
model_weights = dict(i)
# Skip configurations with equal weights, except when a weight is ~1.0.
w = model_weights[model_weights.keys()[0]]
if has_equal_values(model_weights) and w < 0.9:
continue
compute_majority_predictions(model_weights,
output_predictions=output_predictions,
**kwargs)
def compute_majority_predictions(model_weights,
input_dir,
labels_input_filename,
use_top1_class_scores,
use_top5_class_scores,
use_top1_class_accuracies,
use_top5_class_accuracies,
decay_type,
output_predictions):
config = dict(locals().items())
pprint(config)
correct_answers = {}
should_compute_acc = False
if labels_input_filename is not None:
should_compute_acc = True
with open(labels_input_filename) as labels:
for line in labels:
line = line.rstrip().split()
correct_answers[line[0]] = int(line[1])
# Only retrieve stuff for models that we care about.
models_prefix = (input_dir + '/[' +
'|'.join([m for m in model_weights.iterkeys()]) + ']' +
'*')
# Retrieve class scores if available.
class_scores = {}
if use_top1_class_scores or use_top5_class_scores:
pattern = models_prefix + '_scores.txt'
for scores_filename in glob.glob(pattern):
class_scores = process_scores_file(scores_filename, class_scores)
if len(class_scores) == 0:
print 'Exiting: no %s files found.' % pattern
sys.exit(-1)
# Retrieve class accuracies if available.
class_accuracies = {}
if use_top1_class_accuracies or use_top5_class_accuracies:
pattern = models_prefix + '_class_accuracies.txt'
for accuracies_filename in glob.glob(pattern):
class_accuracies = process_class_accuracies_file(accuracies_filename,
class_accuracies)
if len(class_accuracies) == 0:
print 'Exiting: no %s files found.' % pattern
sys.exit(-1)
# XXX: Filename actually matters, yuck >.<
votes = {}
pattern = models_prefix + '_predictions.txt'
for predictions_filename in glob.glob(pattern):
votes = process_predictions_file(model_weights,
predictions_filename,
votes,
class_scores,
use_top1_class_scores,
use_top5_class_scores,
class_accuracies,
use_top1_class_accuracies,
use_top5_class_accuracies,
decay_type)
if len(votes) == 0:
print 'Exiting: no %s files found.' % pattern
sys.exit(-1)
top_1_acc = 0
top_5_acc = 0
# Iterate over weighted majority votes, printing top 5 results to file.
for image in sorted(votes.iterkeys()):
image_votes = votes[image]
preds = sorted(image_votes.items(), key=lambda x: (-x[1], x[0]))
# Output top 5 predictions.
top_preds = []
for i in xrange(5):
top_preds.append(int(preds[i][0]))
# Compute accuracy if class labels available.
if should_compute_acc:
if correct_answers[image] == top_preds[0]:
top_1_acc += 1
if correct_answers[image] in top_preds[:5]:
top_5_acc += 1
if output_predictions:
print '%s %s' % (image, ' '.join(map(str, top_preds)))
if should_compute_acc:
print 'top-1 acc: %s' % (float(top_1_acc) / len(votes))
print 'top-5 acc: %s' % (float(top_5_acc) / len(votes))
print 'done.\n'
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Combine predictions using'
' weighted majority voting (single pass).')
parser.add_argument('-i', '--input_dir',
help='Relative path of directory with input files.',
required=True)
parser.add_argument('-lf', '--labels_input_filename',
help='Relative path of input file with class labels.',
required=False)
parser.add_argument('--use-top1-class-scores',
dest='use_top1_class_scores',
action='store_true')
parser.set_defaults(use_top1_class_scores=False)
parser.add_argument('--use-top5-class-scores',
dest='use_top5_class_scores',
action='store_true')
parser.set_defaults(use_top5_class_scores=False)
parser.add_argument('--use-top1-class-accuracies',
dest='use_top1_class_accuracies',
action='store_true')
parser.set_defaults(use_top1_class_accuracies=False)
parser.add_argument('--use-top5-class-accuracies',
dest='use_top5_class_accuracies',
action='store_true')
parser.set_defaults(use_top5_class_accuracies=False)
parser.add_argument('--decay-type',
choices=['constant_first_5', 'constant_first_10',
'linear', 'exponential'],
dest='decay_type',
action='store',
help='Type of decay function for prediction ranks.')
parser.add_argument('--no-grid-search',
dest='grid_search',
action='store_false')
parser.set_defaults(grid_search=True)
args = parser.parse_args()
main(**vars(args))
``` |
{
"source": "jmfuchs/aws-remediation-workflow",
"score": 2
} |
#### File: jmfuchs/aws-remediation-workflow/remediation.py
```python
from __future__ import print_function
from botocore.exceptions import ClientError
import boto3
import os
import json
import datetime
import uuid
import time
import detect
# Global Variables
channel = '#%s' % os.environ['SLACK_CHANNEL']
token_bot = '%s' % os.environ['SLACK_TOKEN_NAME']
def EC2MaliciousIPCaller(event, context):
# Log Event
print("log -- Event: %s " % json.dumps(event))
# Set Event Variables
gd_vpc_id = event["detail"]["resource"]["instanceDetails"]["networkInterfaces"][0]["vpcId"]
gd_instance_id = event["detail"]["resource"]["instanceDetails"]["instanceId"]
# Set Initial Remediation Metadata
event['remediation'] = {}
event['remediation']['success'] = False
event['remediation']['title'] = "GuardDog was unable to remediate the Instance"
event['remediation']['description'] = "Auto remediation was unsuccessful. Please review the finding and remediate manaully."
# Create Forensics Security Group
ec2 = boto3.client('ec2')
gd_sg_name = 'GuardDuty-Remediation-Workflow-Isolation'
try:
try:
# Create Isolation Security Group
gd_sg = ec2.create_security_group(
GroupName=gd_sg_name,
Description='This Security Group is used to isolate potentially compromised instances.',
VpcId=gd_vpc_id
)
gd_sg_id = gd_sg['GroupId']
# Remove Default Egress Rule
gd_sg = ec2.describe_security_groups(
GroupIds=[
gd_sg_id,
]
)
ec2.revoke_security_group_egress(
GroupId=gd_sg_id,
IpPermissions=gd_sg['SecurityGroups'][0]['IpPermissionsEgress']
)
ec2.authorize_security_group_ingress(
FromPort=22,
CidrIp='10.0.0.0/24',
GroupId=gd_sg_id,
IpProtocol='tcp',
ToPort=22,
)
except ClientError as e:
print(e)
print("log -- Isolation Security Group already exists.")
# Get Security Group ID
gd_sg = ec2.describe_security_groups(
Filters=[
{
'Name': 'vpc-id',
'Values': [
gd_vpc_id,
]
},
{
'Name': 'group-name',
'Values': [
gd_sg_name,
]
}
]
)
gd_sg_id = gd_sg['SecurityGroups'][0]['GroupId']
# Remove existing Security Groups and Attach the Isolation Security Group
ec2 = boto3.resource('ec2')
gd_instance = ec2.Instance(gd_instance_id)
print("log -- %s, %s" % (gd_instance.id, gd_instance.instance_type))
# Get all Security Groups attached to the Instance
all_sg_ids = [sg['GroupId'] for sg in gd_instance.security_groups]
# Isolate Instance
gd_instance.modify_attribute(Groups=[gd_sg_id])
# Set Remediation Metadata
event['remediation']['success'] = True
event['remediation']['title'] = "GuardDog Successfully Isolated Instance ID: %s" % gd_instance.id
event['remediation']['description'] = "Please follow your necessary forensic procedures."
except ClientError as e:
print(e)
print("log -- Error Auto-Remediating Finding")
return event
def EC2BruteForce(event, context):
# Log Event
print("log -- Event: %s " % json.dumps(event))
prefix = os.environ['RESOURCE_PREFIX']
gd_instance_id = event["detail"]["resource"]["instanceDetails"]["instanceId"]
scan_id = str(uuid.uuid4())
scan_name = '%s-inspector-scan' % prefix
target_name = '%s-target-%s' % (prefix, event["id"])
template_name = '%s-template-%s' % (prefix, event["id"])
assess_name = '%s-assessment-%s' % (prefix, event["id"])
# Set Initial Remediation Metadata
event['remediation'] = {}
event['remediation']['success'] = False
event['remediation']['title'] = "GuardDog was unable to remediate the Instance"
event['remediation']['description'] = "Auto remediation was unsuccessful. Please review the finding and remediate manaully."
# Kick off Inspector Scan
try:
gd_sev = event['detail']['severity']
# Set Severity Color
gd_color = detect.getSevColor(gd_sev)
# Set Generic GD Finding Message
message = [
{
"title": 'Compromised Resource Details',
"fields": [
{
"title": "Instance ID",
"value": gd_instance_id,
"short": 'true'
},
{
"title": "Public IP",
"value": event["detail"]["resource"]["instanceDetails"]["networkInterfaces"][0]["publicIp"],
"short": 'true'
},
{
"title": 'Image Description',
"value": event["detail"]["resource"]["instanceDetails"]["imageDescription"],
"short": 'false'
},
{
"title": "VPC ID",
"value": event["detail"]["resource"]["instanceDetails"]["networkInterfaces"][0]['vpcId'],
"short": 'true'
},
{
"title": "Subnet ID",
"value": event["detail"]["resource"]["instanceDetails"]["networkInterfaces"][0]['subnetId'],
"short": 'true'
}
],
"fallback": "Required plain-text summary of the attachment.",
"color": gd_color,
"text": 'Below are some additional details related to the GuardDuty finding.',
}]
# Post Slack Message
post = detect.PostMessage(channel, token_bot, message, event["ts"])
ec2 = boto3.client('ec2')
inspector = boto3.client('inspector')
scan_in_progress = False
tags = ec2.describe_tags(
Filters=[
{
'Name': 'resource-id',
'Values': [
gd_instance_id,
],
},
],
MaxResults=100
)
print(tags)
for i in tags['Tags']:
if i['Key'] == scan_name:
print(i['Key'])
scan_in_progress = True
if scan_in_progress == False:
print("log -- Event: Running Scan")
ec2.create_tags(
Resources=[
gd_instance_id,
],
Tags=[
{
'Key': scan_name,
'Value': scan_id
}
]
)
packages = inspector.list_rules_packages(
maxResults=100
)
group = inspector.create_resource_group(
resourceGroupTags=[
{
'key': scan_name,
'value': scan_id
},
]
)
target = inspector.create_assessment_target(
assessmentTargetName=target_name,
resourceGroupArn=group['resourceGroupArn']
)
template = inspector.create_assessment_template(
assessmentTargetArn=target['assessmentTargetArn'],
assessmentTemplateName=template_name,
durationInSeconds=900,
rulesPackageArns=packages['rulesPackageArns'],
userAttributesForFindings=[
{
'key': 'instance-id',
'value': gd_instance_id
},
{
'key': 'scan-name',
'value': scan_name
},
{
'key': 'scan-id',
'value': scan_id
},
{
'key': 'gd-slack-thread',
'value': event["ts"]
}
]
)
inspector.subscribe_to_event(
resourceArn=template['assessmentTemplateArn'],
event='ASSESSMENT_RUN_COMPLETED',
topicArn=os.environ['SNS_TOPIC_ARN']
)
assessment = inspector.start_assessment_run(
assessmentTemplateArn=template['assessmentTemplateArn'],
assessmentRunName=assess_name
)
# Set Remediation Metadata
event['remediation']['title'] = "GuardDog initiated an AWS Inspector assessment on this instance: %s" % gd_instance_id
else:
print("log -- Event: Scan Already Running")
event['remediation']['title'] = "GuardDog has already initiated an AWS Inspector scan on this instance: %s" % gd_instance_id
# Set Remediation Metadata
event['remediation']['success'] = True
event['remediation']['description'] = "Please view the console if you'd like to track the progress of the assessment (%s). A new message will be posted after the assessment has been completed." % assess_name
except ClientError as e:
print(e)
print("log -- Error Starting an AWS Inspector Assessment")
return event
def EC2CleanupBruteForce(event, context):
# Log Event
print("log -- Event: %s " % json.dumps(event))
message = json.loads(event['Records'][0]['Sns']['Message'])
cleanup = 'Failed'
ec2 = boto3.client('ec2')
inspector = boto3.client('inspector')
try:
if message['event'] == 'ASSESSMENT_RUN_COMPLETED':
run = inspector.describe_assessment_runs(
assessmentRunArns=[
message['run'],
]
)
for i in run['assessmentRuns'][0]['userAttributesForFindings']:
if i['key'] == 'instance-id':
instance_id = i['value']
elif i['key'] == 'scan-name':
scan_name = i['value']
elif i['key'] == 'scan-id':
scan_id = i['value']
elif i['key'] == 'gd-slack-thread':
thread_ts = i['value']
ec2.delete_tags(
Resources=[
instance_id,
],
Tags=[
{
'Key': scan_name,
'Value': scan_id
},
]
)
#inspector.delete_assessment_template(
# assessmentTemplateArn=message['template']
#)
#inspector.delete_assessment_target(
# assessmentTargetArn=message['target'],
#)
# Set Generic GD Finding Message
message = [
{
"title": 'Inspector Assessment Complete',
"text": 'The assessment has completed and you can view the report in the console.',
}]
# Post Slack Message
post = detect.PostMessage(channel, token_bot, message, thread_ts)
else:
print("log -- Not a Scan Completion Event")
except ClientError as e:
print(e)
print("log -- Error Cleaning up")
return cleanup
def InstanceCredentialExfiltration(event, context):
# Log Event
print("log -- Event: %s " % json.dumps(event))
# Set Initial Remediation Metadata
event['remediation'] = {}
event['remediation']['success'] = False
event['remediation']['title'] = "GuardDog was unable to remediate the Instance"
event['remediation']['description'] = "Auto remediation was unsuccessful. Please review the finding and remediate manaully."
try:
# Set Clients
iam = boto3.client('iam')
ec2 = boto3.client('ec2')
# Set Role Variable
role = event['detail']['resource']['accessKeyDetails']['userName']
# Current Time
time = datetime.datetime.utcnow().isoformat()
# Set Revoke Policy
policy = """
{
"Version": "2012-10-17",
"Statement": {
"Effect": "Deny",
"Action": "*",
"Resource": "*",
"Condition": {"DateLessThan": {"aws:TokenIssueTime": "%s"}}
}
}
""" % time
# Add policy to Role to Revoke all Current Sessions
iam.put_role_policy(
RoleName=role,
PolicyName='RevokeOldSessions',
PolicyDocument=policy.replace('\n', '').replace(' ', '')
)
# Set Remediation Metadata
event['remediation']['success'] = True
event['remediation']['title'] = "GuardDog Successfully Removed all Active Sessions for Role: %s" % role
event['remediation']['description'] = "Please follow your necessary forensic procedures."
except ClientError as e:
print(e)
print("log -- Error Auto-Remediating Finding")
return event
``` |
{
"source": "JMFU/QuantEcon.py",
"score": 4
} |
#### File: QuantEcon.py/quantecon/compute_fp.py
```python
import time
import numpy as np
def _print_after_skip(skip, it=None, dist=None, etime=None):
if it is None:
# print initial header
msg = "{i:<13}{d:<15}{t:<17}".format(i="Iteration",
d="Distance",
t="Elapsed (seconds)")
print(msg)
print("-" * len(msg))
return
if it % skip == 0:
if etime is None:
print("After {it} iterations dist is {d}".format(it=it, d=dist))
else:
# leave 4 spaces between columns if we have %3.3e in d, t
msg = "{i:<13}{d:<15.3e}{t:<18.3e}"
print(msg.format(i=it, d=dist, t=etime))
return
def compute_fixed_point(T, v, error_tol=1e-3, max_iter=50, verbose=1,
print_skip=5, *args, **kwargs):
"""
Computes and returns :math:`T^k v`, an approximate fixed point.
Here T is an operator, v is an initial condition and k is the number
of iterates. Provided that T is a contraction mapping or similar,
:math:`T^k v` will be an approximation to the fixed point.
Parameters
----------
T : callable
A callable object (e.g., function) that acts on v
v : object
An object such that T(v) is defined
error_tol : scalar(float), optional(default=1e-3)
Error tolerance
max_iter : scalar(int), optional(default=50)
Maximum number of iterations
verbose : bool, optional(default=True)
If True then print current error at each iterate.
args, kwargs :
Other arguments and keyword arguments that are passed directly
to the function T each time it is called
Returns
-------
v : object
The approximate fixed point
"""
iterate = 0
error = error_tol + 1
if verbose:
start_time = time.time()
_print_after_skip(print_skip, it=None)
while iterate < max_iter and error > error_tol:
new_v = T(v, *args, **kwargs)
iterate += 1
error = np.max(np.abs(new_v - v))
if verbose:
etime = time.time() - start_time
_print_after_skip(print_skip, iterate, error, etime)
try:
v[:] = new_v
except TypeError:
v = new_v
return v
``` |
{
"source": "jmfveneroso/name_extractor",
"score": 3
} |
#### File: name_extractor/name_extractor/nsnb_extractor.py
```python
import os
import re
import sys
from math import log
import tokenizer
import requests
class NsnbExtractor():
"""
The Not-so-Naive Bayesian extractor class extracts names from a list
of tokens based on the most likely sequence of labels on a sliding window
given the prior and conditional probabilities.
"""
tokenizer = tokenizer.Tokenizer()
def __init__(self):
# The conditional name P(t|N) and word probabilities P(t|W) were estimated
# offline through maximum likelihood over two corpora containing only names
# and only words, respectively.
self.cond_name_probs = {}
self.cond_word_probs = {}
# Prior probabilities for each one of these sequences will be estimated
# from the test data. This is the only possible source of overfitting.
# The possible label sequences for a window of size 4 are (W=word, N=name):
# WWWW, WWWN, WWNW, WWNN, WNWW, WNWN, WNNW, WNNN,
# NWWW, NWWN, NWNW, NWNN, NNWW, NNWN, NNNW, NNNN
# Consider that the corresponding index for a given sequence is the binary
# number obtained when W=0 and N=1. For example: WWNN == 0011, so its index
# is 3.
self.prior_probs = [0] * 32
# Feature probabilities will be calculated on a second run over the
# token list. Only structural features are being currently used.
self.feature_probs = [{}, {}, {}, {}, {}]
# Load the conditional probabilities from a file.
self.load_conditional_probs()
def laplace_smoothing(self, count, total_count, possibilities):
"""
Returns a smoothed probability through the Laplace or Additive Smoothing
method. More information here:
https://en.wikipedia.org/wiki/Additive_smoothing
"""
return float(count + 1) / (total_count + possibilities)
def get_sequence_index(self, seq):
"""
Get the sequence index in the prior probabilities array. The index of a
given sequence of labels is the binary number obtained when word=0 and name=1.
For example: WWNN == 0011, so the index is 3.
"""
return sum([2**j if seq[::-1][j].is_name else 0 for j in range(0, 5)])
def fit(self, docs):
"""
Estimates the prior probabilities from a list of test documents.
"""
if len(docs) == 0:
self.load_prior_probs()
return
self.prior_probs = [0] * 32
num_seqs = 0
for doc in docs:
i = 0
tkns = NsnbExtractor.tokenizer.tokenize(doc[1])
NsnbExtractor.tokenizer.assign_correct_labels(tkns, doc[2])
while i <= len(tkns) - 5:
index = self.get_sequence_index(tkns[i:i+5])
self.prior_probs[index] += 1
num_seqs += 1
i += 1
arr = [
'WWWWW', 'WWWWN', 'WWWNW', 'WWWNN', 'WWNWW', 'WWNWN', 'WWNNW', 'WWNNN',
'WNWWW', 'WNWWN', 'WNWNW', 'WNWNN', 'WNNWW', 'WNNWN', 'WNNNW', 'WNNNN',
'NWWWW', 'NWWWN', 'NWWNW', 'NWWNN', 'NWNWW', 'NWNWN', 'NWNNW', 'NWNNN',
'NNWWW', 'NNWWN', 'NNWNW', 'NNWNN', 'NNNWW', 'NNNWN', 'NNNNW', 'NNNNN',
]
# Compute prior probabilities.
for i in range(0, 32):
self.prior_probs[i] = log(self.laplace_smoothing(
self.prior_probs[i], num_seqs, 32
))
def load_conditional_probs(self, directory = "../probabilities"):
"""
Loads conditional probabilities P(t|W) and P(t|N) from a file. The
P(t|N) probabilities used in this code sample were estimated from a
list of author names obtained from the DBLP database. The P(t|W)
probabilities were estimated from text extracted from random websites
downloaded with a crawler that started from university homepages. All
capitalized words were removed as a heuristic to remove all names.
"""
# Conditional name probabilities.
with open(os.path.join(directory, "cond_name_probs.txt")) as f:
for line in f:
name, prob = line.split(" ")
self.cond_name_probs[name] = float(prob.strip())
# Conditional word probabilities.
with open(os.path.join(directory, "cond_word_probs.txt")) as f:
for line in f:
word, prob = line.split(" ")
self.cond_word_probs[word] = float(prob.strip())
def load_prior_probs(self, directory = "../probabilities"):
"""
Loads prior probabilities P(yyyy) from a file.
"""
with open(os.path.join(directory, "prior_probs.txt")) as f:
for i in range(0, 32):
prob = f.readline().split(" ")[1]
self.prior_probs[i] = float(prob.strip())
def get_tkn_probs(self, tkn, last_el, use_structural_features):
"""
Returns the conditional probabilities P(t|n)P(f1|n)... and P(t|w)P(f1|w)...
for a single token.
"""
if tkn.tkn == 'linebreak' or re.search('[0-9]', tkn.tkn):
return (0, -100)
# We are currently only considering names inside the same HTML element.
# We obtained better results with a more complex approach, that considered
# tokens accross elements, but this implementation is easier to grasp for
# the moment.
# TODO: this information can be incorporated in a feature.
if last_el != None:
if tkn.element != last_el:
return (0, -100)
# The total number of words and names in the corpora used to calculate the
# conditional probabilities loaded in load_conditional_probs(). This number
# can vary depending on the corpora characteristics.
num_words = 84599521
num_names = 3831851
tkn_value = tkn.tkn.strip().lower()
prob_word = log(float(1) / (num_words + len(self.cond_word_probs)))
if tkn_value in self.cond_word_probs:
prob_word = self.cond_word_probs[tkn_value]
prob_name = log(float(1) / (num_names + len(self.cond_name_probs)))
if tkn_value in self.cond_name_probs:
prob_name = self.cond_name_probs[tkn_value]
# Structural features are features derived from the HTML structure.
if use_structural_features:
for i in range(0, len(self.feature_probs)):
key = tkn.structural_features[i]
if key in self.feature_probs[i]:
prob_word += self.feature_probs[i][key][0]
prob_name += self.feature_probs[i][key][1]
return prob_word, prob_name
def get_sequence_probs(self, tkns, use_structural_features):
"""
Returns the probabilities of the 16 possible label sequences
for a sequence of tokens. That is: P(yyyy)P(t1|y)P(t2|y)P(t3|y)P(t4|y), where
y is a label (word or name).
"""
last_el = None
tkn_probs = []
for tkn in tkns:
tkn_probs.append(self.get_tkn_probs(tkn, last_el, use_structural_features))
last_el = tkn.element
sequence_probs = [0] * 32
for i in range(0, 32):
selector_array = [1 if ((i & 2**j) == 2**j) else 0 for j in reversed(range(0, 5))]
for j in range(0, len(tkns)):
sequence_probs[i] += tkn_probs[j][selector_array[j]]
sequence_probs[i] += self.prior_probs[i]
return sequence_probs
def estimate_structural_features(self, tkns):
"""
Estimates the probabilities associated with a structural feature
by a maximum likelihood estimation obtained after the extractor has
already assigned provisional labels to the list of tokens.
"""
# We store the feature counts here.
self.feature_probs = [{}, {}, {}, {}, {}]
name_count, word_count = 0, 0
for tkn in tkns:
if tkn.is_name:
name_count += 1
else:
word_count += 1
for i in range(0, len(tkn.structural_features)):
feature_val = tkn.structural_features[i]
if not feature_val in self.feature_probs[i]:
self.feature_probs[i][feature_val] = [0, 0]
self.feature_probs[i][feature_val][1 if tkn.is_name else 0] += 1
for i in range(0, len(tkn.structural_features)):
for key in self.feature_probs[i]:
self.feature_probs[i][key][0] = log(self.laplace_smoothing(
self.feature_probs[i][key][0], word_count, len(self.feature_probs[i])
))
self.feature_probs[i][key][1] = log(self.laplace_smoothing(
self.feature_probs[i][key][1], name_count, len(self.feature_probs[i])
))
def assign_labels(self, tkns, use_structural_features):
"""
Assigns the most probable labels for a sliding window of tokens and
returns the extracted names.
"""
names = []
i = 0
while i <= len(tkns) - 5:
cur_tkns = tkns[i:i+5]
# Get the sequence of labels with maximum probability.
seq_probs = self.get_sequence_probs(cur_tkns, use_structural_features)
index = seq_probs.index(max(seq_probs))
# In sequences: WWWW, WWWN, WWNW, WWNN, WNWW, WNWN, WNNW, WNNN,
# NWWW, NWWN, NWNW, NWNN - the first token is a word or, the first
# token is a name but the second token is a word. Since we don't
# want single names we will discard those as words and slide the window
# by one token.
if index < 24:
tkns[i].is_name = False
i += 1
# At least the two first tokens are names. These are the sequences:
# NNWW, NNWN, NNNW, NNNN.
else:
'NNWWW', 'NNWWN', 'NNWNW', 'NNWNN', 'NNNWW', 'NNNWN', 'NNNNW', 'NNNNN',
if index >= 24 and index <= 27:
name_length = 2
if index >= 28 and index <= 29:
name_length = 3
if index == 30:
name_length = 4
if index == 31:
name_length = 5
if name_length > len(cur_tkns):
name_length = len(cur_tkns)
name_start = int(i)
name_end = int(i) + int(name_length)
name_tkns = tkns[name_start:name_end]
for tkn in name_tkns:
tkn.is_name = True
# Slide window.
i += name_length
if len(name_tkns) <= 1:
continue
name_tkns = [t.tkn for t in name_tkns]
if len([t for t in name_tkns if len(t) > 1]) == 0:
continue
# Only add unique names.
name = " ".join(name_tkns).encode('utf-8')
if not name in names:
names.append(name)
return names
def extract(self, html):
"""
Extracts names from a list of tokens.
"""
tkns = NsnbExtractor.tokenizer.tokenize(html)
self.feature_probs = [{}, {}, {}, {}, {}]
names = self.assign_labels(tkns, False)
for i in range(0, 2):
self.estimate_structural_features(tkns)
names = self.assign_labels(tkns, True)
return names
if __name__ == "__main__":
os.chdir(os.path.dirname(__file__))
extractor = NsnbExtractor()
# Will load probabilities from file.
extractor.fit([])
if len(sys.argv) < 2:
print('nsnb_extractor <url>')
quit()
r = requests.get(sys.argv[1])
if r.status_code != 200:
print('HTTP response not successful')
quit()
names = extractor.extract(r.text)
for n in names:
print(n)
``` |
{
"source": "jmfveneroso/nosce",
"score": 2
} |
#### File: _drafts/notebooks/unsupervised_hmm.py
```python
import sys
import numpy as np
states = np.array(['H', 'C'])
features = np.array(['1', '2', '3'])
start = [.5, .5]
end = [1, 1]
transition_mat = np.array([
[.6, .4],
[.4, .6]
])
emission_mat = np.array([
[.2, .5],
[.4, .4],
[.4, .1]
])
observations = np.array([1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 1, 1, 1])
def forward_algorithm(observations):
global transition_mat, transition_mat
alphas = np.zeros((len(observations), len(states)))
alpha = None
for i, o in enumerate(observations):
idx = o - 1
if alpha is None:
alpha = start * emission_mat[idx]
alphas[i] = alpha
continue
alpha = np.matmul(alpha, transition_mat)
alpha *= emission_mat[idx]
alphas[i] = alpha
return alphas, np.matmul(alpha, np.transpose(end))
def backward_algorithm(observations):
global transition_mat, transition_mat
betas = np.zeros((len(observations), len(states)))
beta = end
betas[len(observations) - 1] = beta
for i, o in enumerate(reversed(observations)):
idx = o - 1
beta *= emission_mat[idx]
if i < len(observations) - 1:
beta = np.matmul(beta, np.transpose(transition_mat))
betas[len(observations) - i - 2] = beta
return betas, np.matmul(beta, np.transpose(start))
def viterbi(observations):
global states
backprobs = np.zeros((len(observations), len(states)))
backpointers = np.zeros((len(observations), len(states)))
alpha = None
for i, o in enumerate(observations):
idx = o - 1
if alpha is None:
alpha = start * emission_mat[idx]
backprobs[i] = alpha
continue
alpha_mat = transition_mat * emission_mat[idx]
alpha_mat = np.transpose(alpha_mat) * alpha
alpha = np.amax(alpha_mat, axis=1)
pointers = np.argmax(alpha_mat, axis=1)
backprobs[i] = np.amax(alpha_mat, axis=1)
backpointers[i] = np.argmax(alpha_mat, axis=1)
last_state = np.argmax(backprobs[len(observations) - 1])
res = [states[last_state]]
for i in range(0, len(observations) - 1):
last_state = int(backpointers[len(observations) - i - 1][last_state])
# print last_state
res.append(states[last_state])
# print backprobs
# print backpointers
res.reverse()
return res
# print forward_algorithm(observations)
def forward_backward_algorithm(observations):
global emission_mat, transition_mat
for i in range(0, 100):
alphas, n = forward_algorithm(observations)
betas, n = backward_algorithm(observations)
# Transition probs.
numerator = np.matmul(np.transpose(alphas), betas) * transition_mat
denominator = np.sum(numerator, axis=1)
new_transition_probs = (numerator.T / denominator).T
# Emission probs.
unary = np.zeros((len(observations), len(features)))
for i, o in enumerate(observations):
idx = o - 1
unary[i][idx] = 1
numerator = alphas.T * betas.T
denominator = np.sum(numerator, axis=1)
numerator = np.matmul(numerator, unary)
new_emission_probs = numerator.T / denominator
# print np.round(transition_mat, 4)
# print np.round(new_transition_probs, 4)
# print np.round(emission_mat, 4)
# print np.round(new_emission_probs, 4)
transition_mat = new_transition_probs
emission_mat = new_emission_probs
print( np.round(transition_mat, 4))
print( np.round(emission_mat, 4))
# forward_backward_algorithm(observations)
# print [str(x) for x in observations]
# print viterbi(observations)
import numpy as np
import re
from pathlib import Path
def load_raw_dataset(f):
with open(f, 'r') as f:
data = f.read().strip()
sentences = data.split('\n\n')
sentences = [s for s in sentences if not s.startswith('-DOCSTART-')]
X = [[t.split(' ') for t in s.split('\n') if len(s) > 0] for s in sentences]
Y = []
T = []
for i, s in enumerate(X):
tkns, labels = [], []
for j, t in enumerate(s):
l = ['O', 'I-PER'].index(t[1])
labels.append(l)
tkns.append(t[0])
X[i][j] = [X[i][j][0]] + X[i][j][2:]
Y.append(labels)
T.append(tkns)
return X, Y, T
class HiddenMarkov:
def __init__(self):
self.time_steps = 1
self.num_labels = 2
self.num_features = 11
self.num_states = self.num_labels ** self.time_steps
self.transition_mat = np.ones((self.num_states, self.num_labels))
self.start = np.zeros((self.num_states, 1))
self.start[0,:] = 1 # All previous states are label O ("other").
self.end = np.ones((self.num_states, 1)) # All ending states are equally probable.
self.feature_counts = []
for i in range(self.num_features):
self.feature_counts.append([])
for j in range(self.num_labels):
self.feature_counts[i].append({'$UNK': 1})
def idx_to_states(self, idx):
states = []
multiplier = self.num_labels ** (self.time_steps - 1)
for i in range(self.time_steps):
states.append(int(idx) // int(multiplier))
idx %= multiplier
multiplier /= self.num_labels
return states
def states_to_idx(self, states):
if len(states) < self.time_steps:
raise Exception('Wrong states length.')
acc = 0
multiplier = 1
for s in reversed(states):
acc += int(multiplier) * int(s)
multiplier *= self.num_labels
return acc
def train_features(self, X, Y, which_features=[]):
if len(which_features) != self.num_features:
which_features = [0] * self.num_features
label_count = np.ones((self.num_labels))
for i in range(len(Y)):
for j in range(len(Y[i])):
label_count += Y[i][j]
y = Y[i][j]
f = X[i][j][1:1+self.num_features]
for k in range(self.num_features):
if which_features[k] == 0:
continue
key = ''
if k < len(f):
key = f[k]
if not key in self.feature_counts[k][y]:
self.feature_counts[k][y][key] = 0
self.feature_counts[k][y][key] += 1
# Consolidate vocabularies.
feature_maps = []
for i in range(self.num_features):
feature_maps.append({})
for j in range(self.num_labels):
for k in self.feature_counts[i][j]:
feature_maps[i][k] = True
for i in range(self.num_features):
if which_features[i] == 0:
continue
for j in range(self.num_labels):
for k in feature_maps[i]:
if not k in self.feature_counts[i][j]:
self.feature_counts[i][j][k] = 1
for i in range(self.num_features):
if which_features[i] == 0:
continue
for j in range(self.num_labels):
total_count = sum([self.feature_counts[i][j][k] for k in self.feature_counts[i][j]])
for k in self.feature_counts[i][j]:
self.feature_counts[i][j][k] /= float(total_count)
def train_transitions(self, X, Y):
for i in range(len(Y)):
states = [0] * self.time_steps
for j in range(len(Y[i])):
y = Y[i][j]
idx = self.states_to_idx(states)
self.transition_mat[idx,y] += 1
states.pop(0)
states.append(y)
self.transition_mat /= np.expand_dims(np.sum(self.transition_mat, axis=1), axis=1)
self.transition_mat = np.nan_to_num(self.transition_mat)
def fit(self, X, Y):
which_features = [1] * self.num_features
self.train_features(X, Y, which_features)
self.train_transitions(X, Y)
def viterbi(self, X):
pointers = np.zeros((len(X), self.num_states), dtype=int)
state_probs = self.start
for i in range(len(X)):
emission = np.ones(self.num_labels)
f = X[i][1:1+self.num_features]
for k in range(self.num_features):
for y in range(self.num_labels):
key = ''
if k < len(f):
key = f[k]
if key in self.feature_counts[k][y]:
emission[y] *= self.feature_counts[k][y][key]
else:
emission[y] *= self.feature_counts[k][y]['$UNK']
emission[emission == 1] = 0
p = state_probs * self.transition_mat * emission
state_probs = np.zeros((self.num_states, 1))
for s in range(self.num_states):
for l in range(self.num_labels):
states = self.idx_to_states(s)
states.pop(0)
states.append(l)
idx = self.states_to_idx(states)
if p[s,l] > state_probs[idx,0]:
pointers[i,idx] = s
state_probs[idx,0] = p[s,l]
idx = np.argmax(state_probs)
labels = []
for i in reversed(range(len(X))):
states = self.idx_to_states(idx)
labels.append(states[-1])
idx = pointers[i,idx]
labels = list(reversed(labels))
return labels
def predict(self, X):
y = []
for i in range(len(X)):
labels = self.viterbi(X[i])
y.append(labels)
return y
if __name__ == '__main__':
print('Fitting...')
X, Y, _ = load_raw_dataset('data/ner_on_html/train')
hmm = HiddenMarkov()
hmm.fit(X, Y)
for name in ['train', 'valid', 'test']:
print('Predicting ' + name)
x, t, w = load_raw_dataset('data/ner_on_html/' + name)
p = hmm.predict(x)
t = [[['O', 'I-PER'][t__] for t__ in t_] for t_ in t]
p = [[['O', 'I-PER'][p__] for p__ in p_] for p_ in p]
with Path('results/score/{}.preds.txt'.format(name)).open('wb') as f:
for words, preds, tags in zip(w, p, t):
f.write(b'\n')
for word, pred, tag in zip(words, preds, tags):
f.write(' '.join([word, tag, pred]).encode() + b'\n')
``` |
{
"source": "jmg292/django-smart-filter",
"score": 2
} |
#### File: django-smart-filter/smart_filter/middleware.py
```python
from django.http import HttpResponseForbidden
from django.utils.deprecation import MiddlewareMixin
from smart_filter.smart_filter import SmartFilter
class SmartFilterMiddleware(MiddlewareMixin):
def process_request(self, request):
if not SmartFilter.request_can_pass(request):
session_keys = list(request.session.keys())
for key in session_keys:
del request.session[key]
return HttpResponseForbidden(''.join([
"Results from probabilistic analysis and machine learning have determined that ",
"requests from your IP address may be malicious. As such, your IP address has ",
"been temporarily blacklisted.\n\n",
"If you are using a VPN or other anonymization service, please disable the service ",
"and retry your request."
]))
```
#### File: django-smart-filter/smart_filter/models.py
```python
import ipaddress
from django.db import models
from django.utils import timezone
from django.core.exceptions import ValidationError
class RateLimitState(models.Model):
current_count = models.IntegerField()
last_reset_time = models.DateTimeField()
class IPCheckResult(models.Model):
ip_address = models.GenericIPAddressField(db_index=True)
query_result = models.DecimalField(max_digits=8, decimal_places=5)
is_authorized = models.BooleanField(default=False)
entry_time = models.DateTimeField(db_index=True)
def save(self, *args, **kwargs):
if not self.id:
self.entry_time = timezone.now()
super(IPCheckResult, self).save(*args, **kwargs)
class WhitelistEntry(models.Model):
name = models.CharField(max_length=64)
ip_address = models.GenericIPAddressField()
subnet = models.IntegerField(default=32)
def clean(self):
if self.subnet < 1:
raise ValidationError("Subnet mask must be greater than or equal to 1.")
address = ipaddress.ip_address(self.ip_address)
if type(address) is ipaddress.IPv6Address:
if self.subnet > 128:
raise ValidationError("Invalid IPv6 subnet mask supplied.")
elif self.subnet > 32:
raise ValidationError("Invalid IPv4 subnet mask supplied.")
return super(WhitelistEntry, self).clean()
def save(self, *args, **kwargs):
super(WhitelistEntry, self).save(*args, **kwargs)
```
#### File: django-smart-filter/smart_filter/whitelist.py
```python
import ipaddress
import logging
import socket
import struct
import threading
import typing
from smart_filter.models import WhitelistEntry
logger = logging.getLogger(__name__)
class Whitelist(object):
_instance = None
class _Whitelist(object):
def __init__(self):
self._network_list_lock = threading.Lock()
self._whitelisted_networks = {}
def append(self, whitelist_entry: WhitelistEntry):
network = ipaddress.ip_network("{0}/{1}".format(
whitelist_entry.ip_address,
whitelist_entry.subnet
))
netmask = int(network.netmask)
address = int(network.network_address)
self._network_list_lock.acquire()
if not netmask in self._whitelisted_networks:
self._whitelisted_networks[netmask] = [address]
else:
self._whitelisted_networks[netmask].append(address)
self._network_list_lock.release()
def load_whitelist(self):
logger.debug("(SmartFilter) Loading whitelist from database.")
for entry in WhitelistEntry.objects.all():
self.append(entry)
logger.info("(SmartFilter) Loaded {0} whitelisted networks from database.".format(
len(self._whitelisted_networks)
))
def is_whitelisted(self, address):
is_whitelisted = False
try:
ip_address = ipaddress.ip_address(address)
except ValueError:
logger.error("(SmartFilter) Invalid IP address supplied for whitelist check: {0}".format(
address
))
return is_whitelisted
is_whitelisted = not ip_address.is_global
if not is_whitelisted:
self._network_list_lock.acquire()
whitelisted_networks = dict(self._whitelisted_networks)
self._network_list_lock.release()
address_dec = struct.unpack("!I", ip_address.packed)[0]
for netmask in whitelisted_networks:
network_address = address_dec & netmask
for network in whitelisted_networks[netmask]:
is_whitelisted = network == network_address
if is_whitelisted:
break
return is_whitelisted
def __new__(self, *args, **kwargs):
if not Whitelist._instance:
Whitelist._instance = Whitelist._Whitelist()
Whitelist._instance.load_whitelist()
return Whitelist._instance
``` |
{
"source": "jmg292/pyarmoryctl",
"score": 3
} |
#### File: hayes_command/argument_validation/enum_argument.py
```python
from enum import Enum
class EnumTypeValidator(object):
def __init__(self, enum_type: Enum):
self.enum_type = enum_type
self.value = None
``` |
{
"source": "jmg292/r2remote",
"score": 3
} |
#### File: r2remote/plugins/_base_command_handler.py
```python
class BaseCommandHandler(object):
def __init__(self, config):
self.config = config
def handle(self, *args):
raise NotImplementedError(f"{self.__class__.__name__} does not implement the handle method.")
```
#### File: r2remote/plugins/r2_info.py
```python
import json
from ._info_classes import FileInfo, StringInfo
from ._pipe_holder import PipeHolder
from ._base_command_handler import BaseCommandHandler
class GetBinaryInfo(BaseCommandHandler):
def handle(self, *args):
file_info_json = None
try:
pipe = PipeHolder.get_pipe()
file_info_json = pipe.cmd("ij")
except ValueError:
return_value = "Pipe must be opened before analyzing."
if file_info_json:
try:
return_value = str(FileInfo.from_string(file_info_json))
except (ValueError, json.JSONDecodeError):
return_value = "Unable to decode JSON"
return return_value
class GetEntrypoints(BaseCommandHandler):
def handle(self, *args):
try:
pipe = PipeHolder.get_pipe()
return_value = pipe.cmd("iej")
except ValueError:
return_value = "Pipe must be opened before analyzing."
return return_value
class GetMainAddress(BaseCommandHandler):
def handle(self, *args):
try:
pipe = PipeHolder.get_pipe()
return_value = pipe.cmd("iMj")
except ValueError:
return_value = "Pipe must be opened before analyzing."
return return_value
class GetStrings(BaseCommandHandler):
def handle(self, *args):
string_data = None
try:
pipe = PipeHolder.get_pipe()
string_data = pipe.cmd("izzj")
except ValueError:
return_value = "Pipe must be opened before analyzing."
if string_data is not None:
return_value = json.dumps([x.__dict__ for x in StringInfo.load_all_strings(string_data)])
return return_value
class GetExports(BaseCommandHandler):
def handle(self, *args):
try:
pipe = PipeHolder.get_pipe()
return_value = pipe.cmd("iEj")
except ValueError:
return_value = "Pipe must be opened before analyzing."
return return_value
class GetSymbols(BaseCommandHandler):
def handle(self, *args):
try:
pipe = PipeHolder.get_pipe()
return_value = pipe.cmd("isj")
except ValueError:
return_value = "Pipe must be opened before analyzing."
return return_value
class GetImports(BaseCommandHandler):
def handle(self, *args):
try:
pipe = PipeHolder.get_pipe()
return_value = pipe.cmd("iij")
except ValueError:
return_value = "Pipe must be opened before analyzing."
return return_value
class GetRelocations(BaseCommandHandler):
def handle(self, *args):
try:
pipe = PipeHolder.get_pipe()
return_value = pipe.cmd("irj").strip()
if return_value:
unpacked_json = json.loads(return_value)
return_value = []
for value in unpacked_json:
if value["name"] is None:
value["name"] = ""
return_value.append(value)
return_value = json.dumps(return_value)
except ValueError:
return_value = "Pipe must be opened before analyzing."
return return_value
```
#### File: jmg292/r2remote/r2remote_shell.py
```python
import argparse
import base64
import io
import json
import socket
from message_wrapper import MessageWrapper
from protocol import Message, MessageHandlerMixin
class ShellHandler(MessageHandlerMixin):
def __init__(self, identity_key, authorized_hosts, secure = True):
self.message_wrapper = MessageWrapper(identity_key, authorized_hosts)
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.rfile = io.BytesIO() # Here, hold this for a moment
self.secure = secure
def _do_handshake(self):
print(f"[+] Authenticating . . .")
challenge = base64.b64decode(self.rfile.readline().strip())
print(f"[+] Got challenge: {challenge}")
self._write_line(self.message_wrapper.get_challenge_response(challenge))
server_response = base64.b64decode(self.rfile.readline().strip())
print(f"[+] Got response: {server_response}")
self.message_wrapper.finalize_handshake(server_response)
print("[+] Authentication successful!")
def connect(self, remote_addr, remote_port):
self.socket.connect((remote_addr, remote_port))
self.rfile = self.socket.makefile()
if self.secure:
self._do_handshake()
else:
print("[+] Insecure flag is set, skipping authentication")
def run(self):
command = ""
while command != "exit":
command = input("> ").lower().strip()
if command:
for packed_message in Message.packed_from_string(self.message_wrapper, command, secure=self.secure):
self._write_line(packed_message)
if command != "exit":
response = self._get_message(self.message_wrapper, secure=self.secure)
try:
# Try to pretty print JSON responses
response = json.loads(response)
response = json.dumps(response, indent=4)
except (ValueError, json.JSONDecodeError):
pass
print(response)
if __name__ == "__main__":
argument_parser = argparse.ArgumentParser(description="Connect a terminal to an r2remote session.")
argument_parser.add_argument("--identity-key", "-i", required=True, help="Path to identity file.")
argument_parser.add_argument("--authorized-hosts", "-a", required=True, help="Path to authorized hosts directory.")
argument_parser.add_argument("--host", required=True, help="IP address to connect to.")
argument_parser.add_argument("--port", "-p", required=True, type=int, help="Port to connect to.")
argument_parser.add_argument("--insecure", help="Disable authentication and encryption (Probably shouldn't ever use this.)", required=False, default=False, action="store_true")
arguments = argument_parser.parse_args()
print("[+] Initializing . . . ", end='')
shell = ShellHandler(arguments.identity_key, arguments.authorized_hosts, not arguments.insecure)
print("Done.\n[+] Connecting . . . ")
shell.connect(arguments.host, arguments.port)
print("[+] Starting shell.")
shell.run()
``` |
{
"source": "jmg7173/boiler-plates-and-examples",
"score": 2
} |
#### File: api-flask/api/app.py
```python
import os
from flask import Flask
from flask_jwt_extended import JWTManager
from config import Config, get_config
jwt = JWTManager()
def create_app(config: Config) -> Flask:
app = Flask(config.APP_NAME)
app.config.from_object(config)
from v1 import api as v1_api
app.register_blueprint(v1_api)
from models import db, migrate
db.init_app(app)
migrate.init_app(app, db)
jwt.init_app(app)
return app
if __name__ == '__main__':
config: Config = get_config(os.environ.get('APP_MODE'))
app: Flask = create_app(config)
app.run(host='0.0.0.0', port=8000)
```
#### File: api-flask/api/models.py
```python
import base64
import os
import urllib.request
import uuid
from hashlib import md5
from pathlib import Path
from typing import Dict
from flask import url_for
from flask_migrate import Migrate
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy.sql import func
from werkzeug.security import check_password_hash, generate_password_hash
from config import Config, get_config
config: Config = get_config(os.environ.get('APP_MODE'))
db = SQLAlchemy()
migrate = Migrate()
class PaginatedAPIMixin(object):
@staticmethod
def to_collection_dict(query, page, page_size, endpoint, **kwargs) -> Dict:
resources = query.paginate(page, page_size, error_out=False)
return {
'items': [item.to_dict() for item in resources.items],
'pagination': {
'page': page,
'page_size': page_size,
'total_pages': resources.pages,
'total_items': resources.total,
},
'_links': {
'self': url_for(endpoint, page=page, page_size=page_size, **kwargs),
'next': (
url_for(endpoint, page=page + 1, page_size=page_size, **kwargs)
if resources.has_next
else None
),
'prev': (
url_for(endpoint, page=page - 1, page_size=page_size, **kwargs)
if resources.has_prev
else None
),
},
}
class User(PaginatedAPIMixin, db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(100))
email = db.Column(db.String(100))
password = db.Column(db.String(150))
created_at = db.Column(db.DateTime, server_default=func.now())
profile_img_path = db.Column(db.String(150))
def set_profile_img(self, username, encoded_img=None, is_default_img=False):
profile_img_base_dir = Path('images/profile')
profile_img_dir = config.VOLUME_PATH / profile_img_base_dir
profile_img_filename = f'{username}_{uuid.uuid4().hex}.png'
profile_img_fullpath = profile_img_dir / profile_img_filename
profile_img_request_path = profile_img_base_dir / profile_img_filename
os.makedirs(profile_img_dir, exist_ok=True)
if is_default_img:
digest = md5(username.encode('utf-8')).hexdigest()
urllib.request.urlretrieve(
f'https://www.gravatar.com/avatar/{digest}?d=identicon&s=200',
profile_img_fullpath,
)
else:
img = base64.b64decode(encoded_img)
with open(profile_img_fullpath, 'wb') as f:
f.write(img)
if self.profile_img_path:
os.remove(config.VOLUME_PATH / self.profile_img_path)
self.profile_img_path = str(profile_img_request_path)
def __init__(self, username, email):
self.username = username
self.email = email
self.set_profile_img(self.username, is_default_img=True)
def set_password(self, password):
self.password = generate_password_hash(password)
def check_password(self, password) -> bool:
return check_password_hash(self.password, password)
def to_dict(self) -> Dict:
return {
'id': self.id,
'username': self.username,
'email': self.email,
'created_at': self.created_at.strftime('%Y-%m-%dT%H:%M:%S'),
'profile_img_path': self.profile_img_path,
'_links': {
'self': url_for('v1.users.get_user', id=self.id),
}
}
```
#### File: api/tests/conftest.py
```python
import os
import shutil
import pytest
if os.environ.get('IS_LOCAL'):
from dotenv import load_dotenv
load_dotenv('.env.local.test')
from app import create_app
from config import Config, get_config
from models import db, User
config: Config = get_config(os.environ.get('APP_MODE'))
@pytest.fixture
def app():
app = create_app(config)
return app
@pytest.fixture(scope='function')
def database(app):
with app.app_context():
db.drop_all()
shutil.rmtree(config.VOLUME_PATH / 'images' / 'profile', ignore_errors=True)
db.create_all()
yield db
@pytest.fixture
def client(app):
return app.test_client()
@pytest.fixture(scope='function')
def profile_image(request):
shutil.rmtree(config.VOLUME_PATH / 'images' / 'profile', ignore_errors=True)
def teardown():
shutil.rmtree(config.VOLUME_PATH / 'images' / 'profile', ignore_errors=True)
request.addfinalizer(teardown)
@pytest.fixture(scope='function')
def user(database):
email = '<EMAIL>'
username = 'test'
password = '<PASSWORD>'
user = User(
username=username,
email=email,
)
user.set_password(password)
database.session.add(user)
database.session.commit()
return user
```
#### File: tests/models/test_model.py
```python
import base64
import os
from models import User
def test_create_user(database):
email = '<EMAIL>'
username = 'test'
user = User(
username=username,
email=email,
)
database.session.add(user)
database.session.commit()
user = User.query.first()
assert user.email == email
def test_user_dict(database):
email = '<EMAIL>'
username = 'test'
user = User(
username=username,
email=email,
)
database.session.add(user)
database.session.commit()
assert user.to_dict() == {
'id': user.id,
'username': username,
'email': email,
'created_at': user.created_at.strftime('%Y-%m-%dT%H:%M:%S'),
'profile_img_path': user.profile_img_path,
'_links': {
'self': f'/v1/api/users/{user.id}'
}
}
def test_user_password():
email = '<EMAIL>'
username = 'test'
password = '<PASSWORD>!'
wrong_password = '<PASSWORD>!'
user = User(
username=username,
email=email,
)
user.set_password(password)
assert user.check_password(password)
assert not user.check_password(wrong_password)
def test_user_profile_image(app, profile_image):
email = '<EMAIL>'
username = 'test'
user = User(
username=username,
email=email,
)
with open(app.config['VOLUME_PATH'] / user.profile_img_path, 'rb') as f:
original_profile_img_binary = f.read()
with open('tests/profile_images/default.png', 'rb') as f:
default_profile_img_binary = f.read()
assert default_profile_img_binary == original_profile_img_binary
# Update profile image
with open('tests/profile_images/new.png', 'rb') as f:
new_profile_img_binary = f.read()
encoded_new_img = base64.b64encode(new_profile_img_binary)
user.set_profile_img(username, encoded_img=encoded_new_img)
with open(app.config['VOLUME_PATH'] / user.profile_img_path, 'rb') as f:
updated_profile_img_binary = f.read()
assert updated_profile_img_binary == new_profile_img_binary
assert len(os.listdir(app.config['VOLUME_PATH'] / 'images/profile')) == 1
```
#### File: api/v1/api.py
```python
from typing import Tuple
from flask import Blueprint, jsonify, Response
from v1.views import (
auth_api,
users_api,
)
api = Blueprint('v1', __name__, url_prefix='/v1/api')
api.register_blueprint(auth_api)
api.register_blueprint(users_api)
@api.get('/')
def api_v1() -> Tuple[Response, int]:
return jsonify({'message': 'api v1'}), 200
```
#### File: boiler-plates-and-examples/api-sanic/app.py
```python
from sanic import Sanic
from sanic.response import text
app = Sanic('App')
@app.get("/")
async def hello_world(request):
return text('Hello, world.')
if __name__ == '__main__':
app.go_fast(host='0.0.0.0')
``` |
{
"source": "jmg-74/exam",
"score": 2
} |
#### File: exam/flowers/stats.py
```python
import os
import pickle
import argparse
import datetime
import numpy as np
from tqdm import tqdm
import torch
import torch.nn as nn
import torch.optim as optim
from train import test, train, get_data, hybrid_model
def main():
# Pre-trained model
VALID_ARCH_CHOICES = ("vgg16", "vgg13", "densenet121")
# Parse command line arguments
ap = argparse.ArgumentParser()
ap.add_argument("data_dir",
help="Directory containing the dataset (default: data)",
default="data",
nargs="?")
ap.add_argument("--arch",
help="Model architecture from 'torchvision.models' (default: vgg16)",
choices=VALID_ARCH_CHOICES, default=VALID_ARCH_CHOICES[0])
# ap.add_argument("--hidden-units",
# help="Number of units the hidden layer should consist of (default: 512)",
# default=512,
# type=int)
ap.add_argument("--cpu",
help="Use CPU (else GPU) for training (default if not set: GPU)",
action="store_true")
args = ap.parse_args()
device = "cpu" if args.cpu else "cuda"
args.device = device
args.noise = 0.25
args.clip = 1.0
args.batch_size = 64
args.hidden_units = 256
args.delta = 1e-4
# Build model: chose loss function, optimizer, processor support
# # Done later to reset the model
# model = hybrid_model(arch=args.arch, hidden_units=args.hidden_units)
criterion = nn.NLLLoss()
device = "cpu" if args.cpu else "cuda"
# ===== TUNING ===========================================================
# Hyperparameters to test
lr_range = [1e-4] ##### <== choice (enumeration)
batch_size_range = [32, 16, 8, 2] #, 32, 128, 8, 4, 1] ##### <== choice (enumeration)
epochs = 30 ##### <== choice (1 value=max)
# Number of iteration for each parameter
iter = 1 ##### <== choice (single value)
# DP or not DP, that is the question
args.disable_dp = False ##### <== choice (boolean)
# ========================================================================
# File to export results
dp_or_not = "noDP_" if args.disable_dp else "DP_"
file = "experiment_stats/accuracy_data_" + dp_or_not
file += str(datetime.datetime.today()).replace(' ','_') + ".csv"
steps = len(lr_range) * len(batch_size_range) * iter
step = 0
# Write column titles
with open(file, 'w') as f:
f.write('learning_rate, batch_size, n_epochs, accuracy, n_times_for_avg\n')
# Experiment loops
for lr in lr_range:
args.learning_rate = lr
for bs in batch_size_range:
args.batch_size = bs
# Load the dataset into a dataloader ### default test batch size ###
trainloader, testloader, mapping = get_data(data_folder=args.data_dir,
batch_size=bs)
args.sample_size = len(trainloader.dataset)
#for epochs in epochs_range:
accuracy_sum = []
for _ in range(iter):
# Reset the model
model, optimizer = hybrid_model(arch=args.arch,
hidden_units=args.hidden_units,
args=args)
step += 1
_, acc = train(model=model,
trainloader=trainloader,
testloader=testloader,
epochs=epochs,
print_every=None,
criterion=criterion,
optimizer=optimizer,
device=device,
arch=args.arch,
model_dir='',
serialize=False,
detail=False,
args=args,
)
acc = np.multiply(acc, 100)
accuracy_sum.append(acc)
print(f' {step}/{steps}\tlr={lr}, bs={bs},')
for n_epoch, accur in enumerate(acc, start=1):
line = f'{lr}, {bs}, {n_epoch}, {accur:.2f}, 1\n'
with open(file, 'a') as f:
f.write(line)
print(f'\t. ×{n_epoch} epoch{"s" if n_epoch > 1 else " "}'
f' => accuracy = {accur:.2f}%')
# Sum up for identical settings, repeted `iter` times
if iter > 1:
acc_avg = np.average(accuracy_sum, axis=0)
for n_epoch, accur in enumerate(acc_avg, start=1):
line = f'{lr}, {bs}, {n_epoch}, {accur:.2f}, {iter}\n'
with open(file, 'a') as f:
f.write(line)
print(f'\t\t>>> Average on {iter} iterations >>>\tlr={lr}, bs={bs},'
f' ×{n_epoch} epoch{"s" if n_epoch > 1 else " "}'
f' => accuracy = {accur:.2f}%')
if __name__ == "__main__":
main()
```
#### File: exam/flowers/train.py
```python
import argparse
import os
from shutil import copyfile
from tqdm import tqdm
import torch
from torch import nn, optim
from torchvision import datasets, models, transforms
# To be able to fetch next packages from parent folder
import sys
sys.path.append("..")
from torch.utils.data import DataLoader
from torch.cuda import memory_allocated
from collections import OrderedDict
from torchdp import PrivacyEngine, utils
def test(model, testloader, criterion, device):
"""
Returns (loss, accuracy) of model w.r.t. the given testloader.
"""
# Switch to evaluation mode, and CUDA if possible
## _mem_monitor("TEST 0", device)
model.eval()
model.to(device)
## _mem_monitor("TEST 1 (model loaded)", device)
losses = 0
correct = 0
with torch.no_grad():
for images, labels in testloader:
images, labels = images.to(device), labels.to(device)
## _mem_monitor("TEST 2 (images loaded)", device) # ===== monitoring =====
# Forward step and loss computation
output = model(images) # Is Torch([b, 102])
losses += criterion(output, labels).item()
_, predicted = torch.max(output.data, 1) # (values, indices)
correct += (predicted == labels).sum().item()
# Switch back to training mode
model.train()
testloader_size = len(testloader.dataset)
accuracy = correct / testloader_size
loss = losses / len(testloader) # consistent with training loss
## _mem_monitor("TEST 3 (END)", device)
return loss, accuracy
def train(model, trainloader, testloader, epochs, print_every, criterion, optimizer, device,
arch="vgg16", model_dir="models", serialize=False, detail=False, args=None):
"""
Trains the model with given parameters then saves model state (parameters).
. These files are serialized (like pickle) if `serialize` is True:
checkpoint.pth represents current epoch, best_model.pth is the best one.
. Details are printed if boolean parameter `detail` is True.
"""
# Change to train mode, load on GPU if possible
model.train()
model.to(device) # In fact, already done
best_accuracy = 0
steps_nb = len(trainloader)
accuracy_list = [0 for _ in range(epochs)]
loss_list = [0 for _ in range(epochs)]
# Epoch loop
for epoch in range(1, epochs+1):
running_loss, running_step = 0, 0
# Batch loop
for step, (images, labels) in enumerate(tqdm(trainloader), start=1):
images, labels = images.to(device), labels.to(device)
## _mem_monitor("2. TRAIN : images loaded", device) # ===== Monitoring =====
# Forward and backward passes
optimizer.zero_grad()
outputs = model(images)
loss = criterion(outputs, labels)
## _mem_monitor("2.1. TRAIN : forward pass done", device) # ===== Monitoring =====
loss.backward()
## _mem_monitor("3. TRAIN : loss gradient backpropagated", device) # ===== Monitoring =====
optimizer.step()
## _mem_monitor("4. TRAIN :gradient computed", device) # ===== Monitoring =====
running_loss += loss.item()
running_step += 1
# Print perf. each `print_every` step, or last one
if (detail and (step % print_every == 0 or step == steps_nb)
# Cancel printing if it is near the end
and not(0.94 * steps_nb < step < steps_nb)):
testloss, accuracy = test(model, testloader, criterion, device)
# ===== DP ===================================================
if not args.disable_dp:
epsilon, best_alpha = optimizer.privacy_engine.get_privacy_spent(args.delta)
print(f' [DP : ε = {epsilon:.2f}, δ = {args.delta} for α = {best_alpha}]')
# ============================================================
print(f'>>> {step}/{steps_nb}, epoch {epoch}/{epochs} >>>\t'
f'Training loss: {running_loss/running_step:.3f} -- '
f'Test loss: {testloss:.3f} -- '
f'Test accuracy: {accuracy*100:.1f}%')
running_loss, running_step = 0, 0
# End of an epoch ;-)
if detail:
print()
else:
testloss, accuracy = test(model, testloader, criterion, device)
# One last print if `not detail`
if not args.disable_dp:
epsilon, best_alpha = optimizer.privacy_engine.get_privacy_spent(args.delta)
print(f' [DP : ε = {epsilon:.2f}, δ = {args.delta} for α = {best_alpha}]')
# ============================================================
print(f'>>> {step}/{steps_nb}, epoch {epoch}/{epochs} >>>\t'
f'Training loss: {running_loss/running_step:.3f} -- '
f'Test loss: {testloss:.3f} -- '
f'Test accuracy: {accuracy*100:.1f}%')
accuracy_list[epoch-1] = accuracy
loss_list[epoch-1] = testloss
# Serialize model state
if serialize:
torch.save({'epoch': epochs,
'classifier': model.classifier,
'state_dict': model.state_dict(),
'optimizer' : optimizer.state_dict(),
'class_idx_mapping': model.class_idx_mapping,
'arch': arch,
'best_accuracy': best_accuracy*100},
os.path.join(model_dir, 'checkpoint.pth'))
if accuracy > best_accuracy:
copyfile(os.path.join(model_dir,'checkpoint.pth'),
os.path.join(model_dir,'best_model.pth'))
best_accuracy = max(accuracy, best_accuracy)
return testloss, accuracy_list
def get_data(data_folder, batch_size, test_batch_size=1):
"""
Returns the dataset as a dataloader.
Arguments:
data_folder: Path to the folder where data resides.
Should have two subdirectories named "train" and "valid".
batch_size: size of batch for Stochastic Gradient Descent.
Returns tuple of:
train_dataloader: Train dataloader iterator.
test_dataloader: Validation dataloader iterator.
train_dataset.class_to_id: dict to map classes to indexes.
"""
### TODO: download dataset and unzip if folder is empty ###
train_dir = os.path.join(data_folder, "train")
valid_dir = os.path.join(data_folder, "valid")
# Define transforms for the training and validation sets
# Divide side of images by FACT
FACT = 2
train_transforms = transforms.Compose([
transforms.RandomRotation(30),
transforms.RandomResizedCrop(size=224 // FACT),
transforms.RandomHorizontalFlip(),
transforms.RandomVerticalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
validation_transforms = transforms.Compose([
transforms.Resize(256 // FACT),
transforms.CenterCrop(224 // FACT),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
# Load the datasets with ImageFolder and previous transforms
# (ImageFolder is a generic data loader, for a specific organisation
# of images in folders)
train_dataset = datasets.ImageFolder(train_dir,
transform=train_transforms)
test_dataset = datasets.ImageFolder(valid_dir,
transform=validation_transforms)
# Using the image datasets, define the dataloaders
# (DataLoader provides an iterator over a sampled dataset, see
# https://pytorch.org/docs/stable/data.html#torch.utils.data.DataLoader)
train_dataloader = DataLoader(train_dataset,
shuffle=True,
batch_size=batch_size,
num_workers=4)
test_dataloader = DataLoader(test_dataset,
shuffle=True,
batch_size=test_batch_size,
num_workers=4)
# (class_to_idx is an attribute of any ImageFolder)
return train_dataloader, test_dataloader, train_dataset.class_to_idx
def hybrid_model(arch="vgg16", hidden_units=4096, class_idx_mapping=None, args=None):
"""
Return a model based on `arch` pre-trained one and 2 new fully connected layers.
"""
# Model adapted to chosen architecture, thanks to dynamic execution
my_local = dict()
exec(f'model = models.{arch}(pretrained=True)', globals(), my_local)
model = my_local['model']
## model = utils.convert_batchnorm_modules(model) # ===== Monitoring =====
# Freeze existing model parameters for training
for param in model.parameters():
param.requires_grad = False
# Get last child module of imported model
last_child = list(model.children())[-1]
if type(last_child) == torch.nn.modules.linear.Linear:
input_features = last_child.in_features
elif type(last_child) == torch.nn.modules.container.Sequential:
input_features = last_child[0].in_features
# Add some neww layers to train
classifier = nn.Sequential(OrderedDict([ ### vgg16 : input_features = 25088
('fc1', nn.Linear(input_features, hidden_units)),
('relu', nn.ReLU()),
###('dropout', nn.Dropout(p=0.5)),
('fc2', nn.Linear(hidden_units, 102)),
###('relu2', nn.ReLU()), ## Traces of
###('fc3', nn.Linear(256, 102)), ## experiments.
('output', nn.LogSoftmax(dim=1))
]))
model.classifier = classifier
model.class_idx_mapping = class_idx_mapping
model = model.to(args.device)
## _mem_monitor("1. HYBRID_MODEL : model loaded ", args.device) # ===== Monitoring =====
#optimizer = optim.Adam(model.classifier.parameters(), lr=args.learning_rate)
optimizer = optim.SGD(model.classifier.parameters(), lr=args.learning_rate)
if not args.disable_dp:
privacy_engine = PrivacyEngine(
classifier, ### = model, idem with classifier
batch_size=args.batch_size,
sample_size=args.sample_size,
alphas=[1 + x / 10.0 for x in range(1, 100)] + list(range(12, 64)),
noise_multiplier=args.noise,
max_grad_norm=args.clip,
)
privacy_engine.attach(optimizer)
## _mem_monitor("HYBRID_MODEL after DP tranfo. ", args.device) # ===== Monitoring =====
return model, optimizer
def _mem_monitor(msg="", device=None, silent=False): ### ===== MONITORING ===== ###
"""
Print a message showing memory allocated on device.
"""
if silent: return
print("\t>>>", msg, ">>>",
f'{memory_allocated(device)/1024/1024:.3f} MB')
def main():
# Pre-trained model
VALID_ARCH_CHOICES = ("vgg16", "vgg13", "densenet121")
# Print perf. PRINT_PER_EPOCH or (PRINT_PER_EPOCH + 1) times per epoch
PRINT_PER_EPOCH = 4
# Parse command line arguments
ap = argparse.ArgumentParser()
ap.add_argument("data_dir",
help="Directory containing the dataset (default: data)",
default="data",
nargs="?")
ap.add_argument("--arch",
help="Model architecture from 'torchvision.models' (default: vgg16)",
choices=VALID_ARCH_CHOICES, default=VALID_ARCH_CHOICES[0])
ap.add_argument("--hidden-units",
help="Number of units the hidden layer should consist of (default: 512)",
default=512,
type=int)
ap.add_argument("--batch-size",
help="Batch size during training (default: 64)",
default=64,
type=int)
ap.add_argument("--test-batch-size",
help="Batch size for test, validation (default: 64)",
default=64,
type=int)
ap.add_argument("--learning-rate",
help="Learning rate for optimizer (default: 0.001)",
default=0.001,
type=float)
ap.add_argument("--epochs",
help="Number of iterations over the whole dataset (default: 3)",
default=3,
type=int)
ap.add_argument("--cpu",
help="Use CPU (else GPU) for training (default if not set: GPU)",
action="store_true")
ap.add_argument("--model-dir",
help="Directory which will contain the model checkpoints (default: models)",
default="models")
ap.add_argument("--serialize",
help="Serialize, save the trained model if set (default: not set)",
default=False,
action="store_true")
ap.add_argument("--no-detail",
help="Print details during training if not set (default: not set - slows down training)",
default=False,
action="store_true")
# DP specific
ap.add_argument("--disable-dp",
help="Disable 'Diffential Privacy' mode if set (default: not set)",
default=False,
action="store_true")
ap.add_argument("--noise",
help="Noise multiplier for Gaussian noise added (default: 0.25)",
default=0.25,
type=float)
ap.add_argument("--clip",
help="Clip per-sample gradients to this l2-norm (default: 1.0)",
default=1.0,
type=float)
ap.add_argument("--delta",
help="Target delta for DP (default: 1e-4)",
default=1e-4,
type=float)
args = ap.parse_args()
args.device = "cpu" if args.cpu else "cuda"
## _mem_monitor("INIT", args.device) # ===== Monitoring =====
# Create directory for model files: checkpoint.pth and best_model.pth
os.system("mkdir -p " + args.model_dir)
# Load the dataset into a dataloader (train / test = 6552 / 818 img)
trainloader, testloader, mapping = get_data(data_folder=args.data_dir,
batch_size=args.batch_size,
test_batch_size=args.test_batch_size)
args.sample_size = len(trainloader.dataset)
# Build model: chose loss function, optimizer, processor support
model , optimizer = hybrid_model(arch=args.arch,
hidden_units=args.hidden_units,
class_idx_mapping=mapping,
args=args)
criterion = nn.NLLLoss()
## _mem_monitor("1.1 BEFORE TRAINING", args.device) # ===== Monitoring =====
# Launch training
train(model=model,
trainloader=trainloader,
testloader=testloader,
epochs=args.epochs,
print_every=int(len(trainloader)/PRINT_PER_EPOCH),
criterion=criterion,
optimizer=optimizer,
device=args.device,
arch=args.arch,
model_dir=args.model_dir,
serialize=args.serialize,
detail=not(args.no_detail),
args=args,
)
if __name__ == '__main__':
main()
``` |
{
"source": "JMGaljaard/fltk-testbed",
"score": 2
} |
#### File: fltk/core/client.py
```python
from typing import Tuple, Any
import numpy as np
import sklearn
import time
import torch
from fltk.core.node import Node
from fltk.schedulers import MinCapableStepLR
from fltk.strategy import get_optimizer
from fltk.util.config import FedLearningConfig
class Client(Node):
"""
Federated experiment client.
"""
running = False
def __init__(self, identifier: str, rank: int, world_size: int, config: FedLearningConfig):
super().__init__(identifier, rank, world_size, config)
self.loss_function = self.config.get_loss_function()()
self.optimizer = get_optimizer(self.config.optimizer)(self.net.parameters(),
**self.config.optimizer_args)
self.scheduler = MinCapableStepLR(self.optimizer,
self.config.scheduler_step_size,
self.config.scheduler_gamma,
self.config.min_lr)
def remote_registration(self):
"""
Function to perform registration to the remote. Currently, this will connect to the Federator Client. Future
version can provide functionality to register to an arbitrary Node, including other Clients.
@return: None.
@rtype: None
"""
self.logger.info('Sending registration')
self.message('federator', 'ping', 'new_sender')
self.message('federator', 'register_client', self.id, self.rank)
self.running = True
self._event_loop()
def stop_client(self):
"""
Function to stop client after training. This allows remote clients to stop the client within a specific
timeframe.
@return: None
@rtype: None
"""
self.logger.info('Got call to stop event loop')
self.running = False
def _event_loop(self):
self.logger.info('Starting event loop')
while self.running:
time.sleep(0.1)
self.logger.info('Exiting node')
def train(self, num_epochs: int, round_id: int):
"""
Function implementing federated learning training loop, allowing to run for a configurable number of epochs
on a local dataset. Note that only the last statistics of a run are sent to the caller (i.e. Federator).
@param num_epochs: Number of epochs to run during a communication round's training loop.
@type num_epochs: int
@param round_id: Global communication round ID to be used during training.
@type round_id: int
@return: Final running loss statistic and acquired parameters of the locally trained network. NOTE that
intermediate information is only logged to the STD-out.
@rtype: Tuple[float, Dict[str, torch.Tensor]]
"""
start_time = time.time()
running_loss = 0.0
final_running_loss = 0.0
for local_epoch in range(num_epochs):
effective_epoch = round_id * num_epochs + local_epoch
progress = f'[RD-{round_id}][LE-{local_epoch}][EE-{effective_epoch}]'
if self.distributed:
# In case a client occurs within (num_epochs) communication rounds as this would cause
# an order or data to re-occur during training.
self.dataset.train_sampler.set_epoch(effective_epoch)
training_cardinality = len(self.dataset.get_train_loader())
self.logger.info(f'{progress}{self.id}: Number of training samples: {training_cardinality}')
for i, (inputs, labels) in enumerate(self.dataset.get_train_loader(), 0):
inputs, labels = inputs.to(self.device), labels.to(self.device)
# zero the parameter gradients
self.optimizer.zero_grad()
outputs = self.net(inputs)
loss = self.loss_function(outputs, labels)
loss.backward()
self.optimizer.step()
running_loss += loss.item()
# Mark logging update step
if i % self.config.log_interval == 0:
self.logger.info(
f'[{self.id}] [{local_epoch}/{num_epochs:d}, {i:5d}] loss: {running_loss / self.config.log_interval:.3f}')
final_running_loss = running_loss / self.config.log_interval
running_loss = 0.0
end_time = time.time()
duration = end_time - start_time
self.logger.info(f'{progress} Train duration is {duration} seconds')
return final_running_loss, self.get_nn_parameters(),
def set_tau_eff(self, total):
client_weight = self.get_client_datasize() / total
n = self.get_client_datasize() # pylint: disable=invalid-name
E = self.config.epochs # pylint: disable=invalid-name
B = 16 # nicely hardcoded :) # pylint: disable=invalid-name
tau_eff = int(E * n / B) * client_weight
if hasattr(self.optimizer, 'set_tau_eff'):
self.optimizer.set_tau_eff(tau_eff)
def test(self) -> Tuple[float, float, np.array]:
"""
Function implementing federated learning test loop.
@return: Statistics on test-set given a (partially) trained model; accuracy, loss, and confusion matrix.
@rtype: Tuple[float, float, np.array]
"""
start_time = time.time()
correct = 0
total = 0
targets_ = []
pred_ = []
loss = 0.0
with torch.no_grad():
for (images, labels) in self.dataset.get_test_loader():
images, labels = images.to(self.device), labels.to(self.device)
outputs = self.net(images)
_, predicted = torch.max(outputs.data, 1) # pylint: disable=no-member
total += labels.size(0)
correct += (predicted == labels).sum().item()
targets_.extend(labels.cpu().view_as(predicted).numpy())
pred_.extend(predicted.cpu().numpy())
loss += self.loss_function(outputs, labels).item()
# Calculate learning statistics
loss /= len(self.dataset.get_test_loader().dataset)
accuracy = 100.0 * correct / total
confusion_mat = sklearn.metrics.confusion_matrix(targets_, pred_)
end_time = time.time()
duration = end_time - start_time
self.logger.info(f'Test duration is {duration} seconds')
return accuracy, loss, confusion_mat
def get_client_datasize(self): # pylint: disable=missing-function-docstring
return len(self.dataset.get_train_sampler())
def exec_round(self, num_epochs: int, round_id: int) -> Tuple[Any, Any, Any, Any, float, float, float, np.array]:
"""
Function as access point for the Federator Node to kick off a remote learning round on a client.
@param num_epochs: Number of epochs to run
@type num_epochs: int
@return: Tuple containing the statistics of the training round; loss, weights, accuracy, test_loss, make-span,
training make-span, testing make-span, and confusion matrix.
@rtype: Tuple[Any, Any, Any, Any, float, float, float, np.array]
"""
self.logger.info(f"[EXEC] running {num_epochs} locally...")
start = time.time()
loss, weights = self.train(num_epochs, round_id)
time_mark_between = time.time()
accuracy, test_loss, test_conf_matrix = self.test()
end = time.time()
round_duration = end - start
train_duration = time_mark_between - start
test_duration = end - time_mark_between
# self.logger.info(f'Round duration is {duration} seconds')
if hasattr(self.optimizer, 'pre_communicate'): # aka fednova or fedprox
self.optimizer.pre_communicate()
for k, value in weights.items():
weights[k] = value.cpu()
return loss, weights, accuracy, test_loss, round_duration, train_duration, test_duration, test_conf_matrix
def __del__(self):
self.logger.info(f'Client {self.id} is stopping')
```
#### File: fltk/datasets/dataset.py
```python
from abc import abstractmethod
import torch
from torch.utils.data import DataLoader
from torch.utils.data import TensorDataset
from fltk.util.config import DistLearningConfig
class Dataset:
def __init__(self, config, learning_params: DistLearningConfig, rank: int, world_size: int):
self.config = config
self.learning_params = learning_params
self.rank = rank
self.world_size = world_size
self.train_loader = self.load_train_dataset()
self.test_loader = self.load_test_dataset()
def get_train_dataset(self):
"""
Returns the train dataset.
:return: tuple
"""
return self.train_loader
def get_test_dataset(self):
"""
Returns the test dataset.
:return: tuple
"""
return self.test_loader
@abstractmethod
def load_train_dataset(self):
"""
Loads & returns the training dataset.
:return: tuple
"""
raise NotImplementedError("load_train_dataset() isn't implemented")
@abstractmethod
def load_test_dataset(self):
"""
Loads & returns the test dataset.
:return: tuple
"""
raise NotImplementedError("load_test_dataset() isn't implemented")
def get_train_loader(self, **kwargs):
"""
Return the data loader for the train dataset.
:param batch_size: batch size of data loader.
:type batch_size: int
:return: torch.utils.data.DataLoader.
"""
return self.train_loader
def get_test_loader(self, **kwargs):
"""
Return the data loader for the test dataset.
:param batch_size: batch size of data loader.
:type batch_size: int
:return: torch.utils.data.DataLoader
"""
return self.test_loader
@staticmethod
def get_data_loader_from_data(batch_size, X, Y, **kwargs):
"""
Get a data loader created from a given set of data.
:param batch_size: batch size of data loader.
:type batch_size: int
:param X: data features,
:type X: numpy.Array()
:param Y: data labels.
:type Y: numpy.Array()
:return: torch.utils.data.DataLoader
"""
X_torch = torch.from_numpy(X).float() # pylint: disable=no-member
if "classification_problem" in kwargs and kwargs["classification_problem"] == False:
Y_torch = torch.from_numpy(Y).float() # pylint: disable=no-member
else:
Y_torch = torch.from_numpy(Y).long() # pylint: disable=no-member
dataset = TensorDataset(X_torch, Y_torch)
kwargs.pop("classification_problem", None)
return DataLoader(dataset, batch_size=batch_size, **kwargs)
```
#### File: fltk/nets/__init__.py
```python
import logging
from typing import Type, Dict
import torch
from fltk.util.config.definitions.net import Nets
from .cifar_100_resnet import Cifar100ResNet
from .cifar_100_vgg import Cifar100VGG, vgg11_bn, vgg13_bn, vgg16_bn, vgg19_bn
from .cifar_10_cnn import Cifar10CNN
from .cifar_10_resnet import Cifar10ResNet, ResNet18, ResNet34, ResNet50, ResNet101, ResNet152
from .fashion_mnist_cnn import FashionMNISTCNN
from .fashion_mnist_resnet import FashionMNISTResNet
from .mnist_cnn import MNIST_CNN
from .reddit_lstm import RNNModel
from .simple import SimpleMnist, SimpleNet
def _available_nets() -> Dict[Nets, Type[torch.nn.Module]]:
"""
Function to acquire networks provided by the nets package.
@return: Dictionary containing mapping from `Nets` definitions to Typed implementation.
@rtype: Dict[Nets, Type[torch.nn.Module]]
"""
return {
Nets.cifar100_resnet: Cifar100ResNet,
Nets.cifar100_vgg: Cifar100VGG,
Nets.cifar10_cnn: Cifar10CNN,
Nets.cifar10_resnet: Cifar10ResNet,
Nets.fashion_mnist_cnn: FashionMNISTCNN,
Nets.fashion_mnist_resnet: FashionMNISTResNet,
Nets.mnist_cnn: MNIST_CNN,
}
def get_net(name: Nets) -> Type[torch.nn.Module]:
"""
Helper function to get specific Net implementation.
@param name: Network definition to obtain.
@type name: Nets
@return: Class reference to required Network.
@rtype: Type[torch.nn.Module]
"""
logging.info(f"Getting net: {name}")
return _available_nets()[name]
def get_net_split_point(name: Nets) -> int:
"""
@deprecated Function to get split points in a network.
@param name: Network definition to get split position/module index.
@type name: Nets
@return: Index of network split position.
@rtype: int
"""
nets_split_point = {
Nets.cifar100_resnet: 48,
Nets.cifar100_vgg: 28,
Nets.cifar10_cnn: 15,
Nets.cifar10_resnet: 39,
Nets.fashion_mnist_cnn: 7,
Nets.fashion_mnist_resnet: 7,
Nets.mnist_cnn: 2,
}
return nets_split_point[name]
```
#### File: strategy/optimization/__init__.py
```python
from typing import Type
import torch
from fltk.util.config.definitions.optim import Optimizations
from .fed_prox import FedProx
from .fed_nova import FedNova
def get_optimizer(name: Optimizations, federated: bool = True) -> Type[torch.optim.Optimizer]:
"""
Helper function to get specific Optimization class references.
@param name: Optimizer class reference.
@type name: Optimizations
@return: Class reference corresponding to the requested Optimizations definition. Requires instantiation with
pre-defined args and kwargs, depending on the Type of Optimizer.
@rtype: Type[torch.optim.Optimizer]
"""
optimizers = {
Optimizations.adam: torch.optim.Adam,
Optimizations.adam_w: torch.optim.AdamW,
Optimizations.sgd: torch.optim.SGD,
}
if federated:
optimizers.update({
Optimizations.fedprox: FedProx,
Optimizations.fednova: FedNova
})
return optimizers[name]
```
#### File: util/cluster/worker.py
```python
import os
from torch import distributed as dist
def should_distribute() -> bool:
"""
Function to check whether distributed execution is needed.
Note: the WORLD_SIZE environmental variable needs to be set for this to work (larger than 1).
PytorchJobs launched from KubeFlow automatically set this property.
@return: Indicator for distributed execution.
@rtype: bool
"""
world_size = int(os.environ.get('WORLD_SIZE', 1))
return dist.is_available() and world_size > 1
```
#### File: fltk/util/data_container.py
```python
import csv
import numpy as np
import time
from dataclasses import dataclass
from pathlib import Path
from typing import List, Type, TextIO
@dataclass
class DataRecord:
pass
@dataclass
class FederatorRecord(DataRecord):
num_selected_clients: int
round_id: int
round_duration: int
test_loss: float
test_accuracy: float
# Accuracy per class?
timestamp: float = time.time()
node_name: str = ''
confusion_matrix: np.array = None
@dataclass
class ClientRecord(DataRecord):
round_id: int
train_duration: float
test_duration: float
round_duration: float
num_epochs: int
trained_items: int
accuracy: float
train_loss: float
test_loss: float
# Accuracy per class?
timestamp: float = time.time()
node_name: str = ''
confusion_matrix: np.array = None
class DataContainer:
"""
Datacontainer class for collecting experiment data. By default, an 'Excel' compatible format is used by numpy and
the csv library. As such, it is advised to use a library such as `pandas` to load data for analysis purposes.
"""
records: List[DataRecord]
file_name: str
file_handle: TextIO
file_path: Path
append_mode: bool
record_type: Type[DataRecord]
delimiter = ','
name: str
def __init__(self, name: str, output_location: Path, record_type: Type[DataRecord], append_mode: bool = False):
# print(f'Creating new Data container for client {name}')
self.records = []
self.file_name = f'{name}.csv'
self.name = name
output_location = Path(output_location)
output_location.mkdir(parents=True, exist_ok=True)
self.file_path = output_location / self.file_name
self.append_mode = append_mode
file_flag = 'a' if append_mode else 'w'
self.file_handle = open(self.file_path, file_flag)
print(f'[<=========>] Creating data container at {self.file_path}')
self.record_type = record_type
if self.append_mode:
open(self.file_path, 'w').close()
dw = csv.DictWriter(self.file_handle, self.record_type.__annotations__)
dw.writeheader()
self.file_handle.flush()
def append(self, record: DataRecord):
record.node_name = self.name
self.records.append(record)
if self.append_mode:
dw = csv.DictWriter(self.file_handle, self.record_type.__annotations__)
dw.writerow(record.__dict__)
self.file_handle.flush()
def save(self):
"""
Function to save the encapsulated data to the experiment file. The format is 'excel' compatible,
resulting in the capability of loading complex objects such as ndarrays as a field.
@return: None
@rtype: None
"""
if self.append_mode:
return
import numpy as np
np.set_printoptions(linewidth=10**6)
dw = csv.DictWriter(self.file_handle, self.record_type.__annotations__)
dw.writeheader()
# print(f'Saving {len(self.records)} for node {self.name}')
for record in self.records:
record.node_name = self.name
dw.writerow(record.__dict__)
self.file_handle.flush()
``` |
{
"source": "JMGama/simple-maths",
"score": 3
} |
#### File: simple-maths/Python/louville_distance.py
```python
from factorial import factorial
def louville_distance(precision):
"""
Finds the distance between in zeroes between 2 '1's in the
louville number.
Will return distances, so if you sum earlier numbers together
you get the index at which there would have been a '1'
"""
precision += 1 # To get far enough
distances = []
for index in range(1, precision):
try:
distances.append(factorial(index) - factorial(index - 1))
except IndexError:
break
distances[0] = 1 # Because of the way the list is structured
return distances
``` |
{
"source": "JMGama/Teachers-Evaluation",
"score": 2
} |
#### File: Teachers-Evaluation/evaluations/admin.py
```python
# from django.contrib import admin
# from .models import *
# # Formulary specifications for the Django admin page.
# class EvaluationsDetailExamQuestionAdmin(admin.ModelAdmin):
# list_display = ['id', 'idquestion', 'idexam']
# exclude = ['updatedon', 'createdon']
# class EvaluationsQuestionsAdmin(admin.ModelAdmin):
# list_display = ['id', 'description', 'type', 'status', 'optional',]
# search_fields = ['id', 'description', 'type', 'status', 'optional',]
# exclude = ['updatedon', 'createdon']
# class EvaluationsExamsAdmin(admin.ModelAdmin):
# list_display = ['id', 'description', 'idcareer', 'status', ]
# search_fields = ['id', 'description', 'status', ]
# exclude = ['updatedon', 'createdon']
# class EvaluationsTeachersAdmin(admin.ModelAdmin):
# list_display = ['idperson', 'enrollment', 'name', 'lastname',
# 'lastname2', 'instemail', 'status', ]
# search_fields = ['idperson', 'enrollment', 'name', 'lastname',
# 'lastname2', 'instemail', 'status', ]
# exclude = ['updatedon', 'createdon']
# class EvaluationsStudentsAdmin(admin.ModelAdmin):
# def get_career_name(self, obj):
# description = ParkingCareer.objects.get(
# idcareergissa__exact=obj.idcareer).description
# return description
# list_display = ['idperson', 'enrollment', 'name', 'lastname',
# 'lastname2', 'instemail', 'status', 'get_career_name']
# search_fields = ['idperson', 'enrollment', 'name', 'lastname',
# 'lastname2', 'instemail', 'status', ]
# exclude = ['updatedon', 'createdon']
# class EvaluationsDetailGroupPeriodSignatureAdmin(admin.ModelAdmin):
# list_display = ['id', 'idsignature', 'idteacher', 'idperiod', 'status', ]
# search_fields = ['id', 'idsignature__name', 'idteacher__enrollment',
# 'idteacher__name', 'idperiod__period', 'status']
# raw_id_fields = ('idsignature', 'idteacher',)
# exclude = ['updatedon', 'createdon']
# class EvaluationsDetailStudentGroupAdmin(admin.ModelAdmin):
# def get_student_id(self, obj):
# return obj.idstudent.enrollment
# def get_student_name(self, obj):
# return obj.idstudent.name + " " + obj.idstudent.lastname + " " + obj.idstudent.lastname2
# def get_materia(self, obj):
# return obj.idgroup.idsignature.name
# get_student_id.short_description = 'Matricula'
# get_student_name.short_description = 'Alumno'
# get_materia.short_description = 'Materia'
# list_display = ['id', 'idgroup', 'get_student_id',
# 'get_student_name', 'get_materia', 'status',]
# search_fields = ['idgroup', 'idstudent__enrollment', 'idstudent__name',
# 'idstudent__lastname', 'idstudent__lastname2', 'idgroup__idsignature__name', 'status',]
# raw_id_fields = ('idstudent',)
# exclude = ['updatedon', 'createdon',]
# # Register your models here.
# admin.site.register(EvaluationsStudents, EvaluationsStudentsAdmin)
# admin.site.register(EvaluationsDetailGroupPeriodSignature,
# EvaluationsDetailGroupPeriodSignatureAdmin)
# admin.site.register(EvaluationsDetailStudentGroup,
# EvaluationsDetailStudentGroupAdmin)
# admin.site.register(EvaluationsTeachers, EvaluationsTeachersAdmin)
# admin.site.register(EvaluationsExams, EvaluationsExamsAdmin)
# admin.site.register(EvaluationsDetailExamQuestion,
# EvaluationsDetailExamQuestionAdmin)
# admin.site.register(EvaluationsQuestions, EvaluationsQuestionsAdmin)
# admin.site.register(EvaluationsSignatures)
# # Admin site changes
# admin.site.site_url = '/evaluations/'
# admin.site.site_header = 'Evaluations Administration'
```
#### File: evaluations/views/logout.py
```python
from django.db.models import Q
from django.http import HttpResponse
from django.shortcuts import get_object_or_404, redirect, render
from django.views import View
class LogoutView(View):
template_login = 'evaluations/login.html'
def get(self, request):
"""Flush the session and redirects to the login page."""
try:
request.session.flush()
except KeyError:
pass
return render(request, self.template_login)
```
#### File: evaluations/views/monitoring.py
```python
import csv
from django.db import connection
from django.db.models import Q
from django.http import HttpResponse
from django.shortcuts import get_object_or_404, redirect, render
from django.views import View
from evaluations.models import (EvaluationsCareer, EvaluationsCoordinator,
EvaluationsDtlCoordinatorCareer)
class MonitoringView(View):
template_monitoring = 'evaluations/monitoring.html'
template_login = 'evaluations/login.html'
def get(self, request):
"""Get the information to show the monitoring page, with the general results"""
# Verify if the coordinator is correctly logged in.
if not request.session.get('session', False) or not request.session['type'] == 'coordinator':
return render(request, self.template_login)
# Values for the view and the monitoring navigation bar.
coordinator = EvaluationsCoordinator.objects.get(
pk__exact=request.session['id_coordinator'])
careers_id = EvaluationsDtlCoordinatorCareer.objects.filter(
fk_coordinator__exact=coordinator.id).values('fk_career')
careers = EvaluationsCareer.objects.filter(pk__in=careers_id)
# Get the general result for all the evaluations (total evaluated and average).
general_data = self.get_general_results()
# Render the home view for the coordintators.
context = {
'coordinator': coordinator,
'careers': careers,
'general_data': general_data,
}
# If the coordinator is admin, set it true in the context to show admin things (reports, actions, etc).
if coordinator.type == 'ADMIN':
context['admin_user'] = True
return render(request, self.template_monitoring, context)
def get_general_results(self):
"""Get the general results for all the evaluations"""
data = {}
with connection.cursor() as cursor:
# Get the total of students in the database.
cursor.execute(
'SELECT COUNT(id) FROM evaluations_student WHERE status = "ACTIVE"')
data['total_students'] = cursor.fetchone()[0]
# Get the total of students evaluated.
cursor.execute(
'SELECT COUNT( DISTINCT ( D.fk_student ) ) FROM evaluations_signature_evaluated A JOIN evaluations_student_signature D ON A.fk_student_signature = D.id')
data['students_evaluated'] = cursor.fetchone()[0]
# Get the total of YES answer in not optional questions for the general average.
cursor.execute(
'SELECT COUNT(id) FROM evaluations_answer WHERE answer = "yes"')
data['yes_answers'] = cursor.fetchone()[0]
# Get the total of NO answer in not optional questions for the general average.
cursor.execute(
'SELECT COUNT(id) FROM evaluations_answer WHERE answer = "no"')
data['no_answers'] = cursor.fetchone()[0]
# Calculatate the total of answers in all the evaluations.
data['total_answers'] = data['no_answers'] + data['yes_answers']
return data
```
#### File: evaluations/views/teacher_home.py
```python
from django.http import HttpResponse
from django.shortcuts import render
from django.views import View
from evaluations.models import (EvaluationsExam, EvaluationsTeacher,
EvaluationsTeacherSignature,
EvaluationsTeacherSignatureEvaluated)
class TeacherHomeView(View):
template_teacher_evaluation = 'evaluations/teacher_home.html'
template_login = 'evaluations/login.html'
def get(self, request):
# Verify if the user loggedin is a teacher, if it isn't return him to the login page.
if not request.session.get('session', False) or not request.session['type'] == 'teacher':
return render(request, self.template_login)
# Get the information for the teacher that is going to make the evaluation.
teacher = EvaluationsTeacher.objects.get(
pk__exact=request.session['id_teacher'], status='ACTIVE')
teacher_exams = EvaluationsExam.objects.filter(
type__exact='DOCENTE', status='ACTIVE')
# Get the signatures information to be evaluated for each exam.
signatures_detail = []
for exam in teacher_exams:
signatures_dtl = EvaluationsTeacherSignature.objects.filter(
fk_teacher__exact=teacher.id, fk_period__exact=exam.fk_period, status='ACTIVE')
signatures_detail.append(
{'exam': exam, 'signatures_dtl': signatures_dtl})
# Get the evaluations already made.
evaluated_signatures = self.get_teacher_signatures_evaluated(
teacher, teacher_exams)
# Return the next evaluation to be made for the teacher in the exams.
next_evaluation = self.get_teacher_next_eval_signature(
signatures_detail, evaluated_signatures)
if not next_evaluation:
try:
request.session.flush()
except KeyError:
pass
context = {
'student': teacher,
'complete': 'YES',
}
return render(request, self.template_login, context)
context = {
'teacher': teacher,
'exams_signatures': signatures_detail,
'next_evaluation': next_evaluation,
'evaluated_signatures': evaluated_signatures,
}
return render(request, self.template_teacher_evaluation, context)
def get_teacher_signatures_evaluated(self, teacher, exams):
"""Return the exam an the signatures already evaluated for that exam"""
data = []
# Get the signatures evaluated for each exam.
for exam in exams:
teacher_signatures = []
signatures_evaluated = EvaluationsTeacherSignatureEvaluated.objects.filter(
fk_exam__exact=exam.id, fk_teacher_signature__fk_teacher=teacher.id, status='ACTIVE')
for teacher_signature in signatures_evaluated:
teacher_signatures.append(teacher_signature.fk_teacher_signature)
data.append({'exam': exam,
'signatures_evaluated': teacher_signatures})
return data
def get_teacher_next_eval_signature(self, signatures, evaluated_signatures):
"""return the exam and group that is the next to evaluate (havent evaluated) for the teacher"""
next_evaluation = {}
for exam_signatures in signatures:
for exam_signatures_eval in evaluated_signatures:
if exam_signatures['exam'] == exam_signatures_eval['exam']:
for signature in exam_signatures['signatures_dtl']:
if not signature in exam_signatures_eval['signatures_evaluated']:
return {'exam': exam_signatures['exam'], 'signature_dtl': signature}
return next_evaluation
```
#### File: evaluations/views/teacher_results.py
```python
import csv
import xlsxwriter
from django.core.exceptions import ObjectDoesNotExist
from django.db.models import Q
from django.http import HttpResponse
from django.shortcuts import get_object_or_404, redirect, render
from django.views import View
from evaluations.models import (EvaluationsCareer, EvaluationsCoordinator,
EvaluationsDtlCoordinatorCareer,
EvaluationsDtlQuestionExam, EvaluationsExam,
EvaluationsSignatureQuestionResult,
EvaluationsSignatureResult, EvaluationsTeacher,
EvaluationsTeacherSignature)
class TeacherResultsView(View):
template_teacher_results = 'evaluations/teacher_results.html'
template_monitoring = 'evaluations/career_monitoring.html'
template_login = 'evaluations/login.html'
def get(self, request, career_id, teacher_id):
# Verify if the coordinator is correctly logged in.
if not request.session.get('session', False) or not request.session['type'] == 'coordinator':
return render(request, self.template_login)
# Values for the view and the monitoring navigation bar.
coordinator = EvaluationsCoordinator.objects.get(
pk__exact=request.session['id_coordinator'])
careers_id = EvaluationsDtlCoordinatorCareer.objects.filter(
fk_coordinator__exact=coordinator.id).values('fk_career')
careers = EvaluationsCareer.objects.filter(pk__in=careers_id)
career = EvaluationsCareer.objects.get(
pk__exact=career_id, status="ACTIVE")
# Get the teacher.
teacher = EvaluationsTeacher.objects.get(pk__exact=teacher_id)
# Get the teacher signatures in the career.
signatures = [signature_dtl.fk_signature for signature_dtl in EvaluationsTeacherSignature.objects.filter(
fk_teacher=teacher.id,
fk_signature__fk_career__exact=career.id,
status="ACTIVE"
).select_related('fk_signature')]
# Get the teacher results average of each exam.
teacher_results = self.get_teacher_average_results(career, teacher)
# Calculate the general average of the teacher from all the exams.
general_average = []
for exam in teacher_results:
general_average.append(exam['average'])
general_average = round(
sum(general_average)/len(general_average) if len(general_average) > 0 else 0)
# Render the teacher results view with all the things to show.
context = {
'results': teacher_results,
'general_average': general_average,
'signatures': signatures,
'teacher': teacher,
'coordinator': coordinator,
'careers': careers,
'career': career
}
return render(request, self.template_teacher_results, context)
def get_teacher_average_results(self, career, teacher):
"""Returns a list with the average result of the signature, the questions results and comments in each exam of the career"""
data = []
# Get all the exams for the career.
exams = EvaluationsExam.objects.filter(
type__exact=career.type, status__exact='ACTIVE')
# Get a list with all the teacher-signature detail of the teacher in that career.
signatures_dtl = EvaluationsTeacherSignature.objects.filter(
fk_teacher=teacher.id,
fk_signature__fk_career__exact=career.id,
status="ACTIVE"
).select_related('fk_signature')
# Results for each exam of the career.
for exam in exams:
# Get the average of all the signatures.
average = self.get_signatures_average(signatures_dtl, exam)
# If the average is false (dosn't have evaluations) continue to the next exam.
if not average:
break
# Get the averages of all the questions in the exam, for all the signatures.
questions = self.get_signatures_questions_averages(
signatures_dtl, exam)
# Add the result of the exam to the return data.
exam_results = {
'exam': exam,
'average': average,
'questions': questions
}
data.append(exam_results)
return data
def get_signatures_average(self, signatures_dtl, exam):
"""Return the final average of the result signatures in the exam"""
averages = []
for dtl_signature in signatures_dtl:
try:
# Get the average of the signature.
signature_average = EvaluationsSignatureResult.objects.get(
group__exact=dtl_signature.group,
fk_signature__exact=dtl_signature.fk_signature.id,
fk_exam__exact=exam.id,
status="ACTIVE"
).average
averages.append(float(signature_average))
except ObjectDoesNotExist:
pass
# Calculate the general average for the teacher.
average = False
if len(averages) > 0:
average = round((sum(averages)/len(averages)))
return average
def get_signatures_questions_averages(self, signatures_dtl, exam):
"""Return a dictionary with the questions as the key and the average or comments as value. This will return a empty dict if there isn't any result for the exam"""
# Get all the questions for the exam.
questions = [question.fk_question for question in EvaluationsDtlQuestionExam.objects.filter(
fk_exam=exam.id, status="ACTIVE").select_related('fk_question')]
# Get the average for each question on each signature.
data = {}
for question in questions:
# Get the average or comments of the question for each signature.
question_result = []
for dtl_signature in signatures_dtl:
try:
result = EvaluationsSignatureQuestionResult.objects.get(
group__exact=dtl_signature.group,
fk_question__exact=question.id,
fk_signature__exact=dtl_signature.fk_signature.id,
fk_exam__exact=exam.id,
).result
question_result.append(result)
except ObjectDoesNotExist:
pass
# If the question is optional calculate the final average in other case return all the comments.
if len(question_result) > 0:
if question.optional != "YES":
# Calculate the final average for the question with all the signature-question results and asign it to the result dict.
question_result = list(map(float, question_result))
average = round(
(sum(question_result)/len(question_result)))
data[question] = average
else:
# Add all the comments of the signature in the result data. Split the comments and add them to a list, only the ones that are not empty.
data['comments'] = list(filter(None, question_result[0].split('|')))
return data
``` |
{
"source": "jmgao/bfg9000",
"score": 3
} |
#### File: bfg9000/arguments/windows.py
```python
class ArgumentParser(object):
_argument_info = {}
def __init__(self, prefix_chars='/-', value_delim=':'):
self.prefix_chars = prefix_chars
self.value_delim = value_delim
self._options = []
self._short_names = {}
self._long_names = {}
self._unnamed_dest = None
@classmethod
def handler(cls, type):
def wrapper(thing):
cls._argument_info[type] = thing
return thing
return wrapper
def add(self, *args, **kwargs):
dest = kwargs.pop('dest', args[0][1:])
type = kwargs.pop('type', bool)
info = self._argument_info[type](dest, **kwargs)
for i in args:
if i[0] not in self.prefix_chars:
raise ValueError('names must begin with a prefix char')
if len(i) == 2:
self._short_names[i] = info
else:
self._long_names[i] = info
self._options.append(info)
return info
def add_unnamed(self, dest):
self._unnamed_dest = dest
def parse_known(self, args):
result = {i.name: i.default() for i in self._options}
if self._unnamed_dest:
result[self._unnamed_dest] = []
extra = []
args = iter(args)
while True:
i = next(args, None)
if i is None:
break
info = None
if i[0] in self.prefix_chars:
key, value = i[:2], i[2:]
if key in self._short_names:
info = self._short_names[key]
if info.takes_value:
if not value:
value = next(args)
elif value:
raise ValueError('no value expected for option')
else:
key, colon, value = i.partition(self.value_delim)
if key in self._long_names:
info = self._long_names[key]
if not info.takes_value and colon:
raise ValueError('no value expected for option')
elif self._unnamed_dest:
result[self._unnamed_dest].append(i)
continue
if info:
info.fill_value(result, key, value)
continue
extra.append(i)
return result, extra
class ArgumentInfo(object):
def __init__(self, name):
self.name = name
def default(self):
return None
@property
def takes_value(self):
return True
@ArgumentParser.handler('key')
class KeyArgumentInfo(ArgumentInfo):
def fill_value(self, results, key, value):
results[self.name] = key
@ArgumentParser.handler(bool)
class BoolArgumentInfo(ArgumentInfo):
def __init__(self, name, value=True):
ArgumentInfo.__init__(self, name)
self.value = value
def fill_value(self, results, key, value):
results[self.name] = self.value
@property
def takes_value(self):
return False
@ArgumentParser.handler(str)
class StrArgumentInfo(ArgumentInfo):
def __init__(self, name):
ArgumentInfo.__init__(self, name)
def fill_value(self, results, key, value):
if not value:
raise ValueError('expected value for option')
results[self.name] = value
@ArgumentParser.handler(list)
class ListArgumentInfo(ArgumentInfo):
def __init__(self, name):
ArgumentInfo.__init__(self, name)
def default(self):
return []
def fill_value(self, results, key, value):
if not value:
raise ValueError('expected value for option')
results[self.name].append(value)
@ArgumentParser.handler(dict)
class DictArgumentInfo(ArgumentInfo):
def __init__(self, name):
ArgumentInfo.__init__(self, name)
self._short_names = {}
self._long_names = {}
self._options = []
def add(self, *args, **kwargs):
dest = kwargs.pop('dest', args[0])
type = kwargs.pop('type', 'key')
info = ArgumentParser._argument_info[type](dest, **kwargs)
if type in (bool, 'key'):
for i in args:
self._long_names[i] = info
else:
for i in args:
if len(i) != 1:
raise ValueError('short names should be one character')
self._short_names[i] = info
self._options.append(info)
return info
def default(self):
return {i.name: i.default() for i in self._options}
def fill_value(self, results, key, value):
if not value:
raise ValueError('expected value for option')
subkey, subvalue = value[:1], value[1:]
if subkey in self._short_names:
info = self._short_names[subkey]
info.fill_value(results[self.name], subkey, subvalue)
elif value in self._long_names:
info = self._long_names[value]
info.fill_value(results[self.name], value, None)
```
#### File: bfg9000/bfg9000/build.py
```python
import errno
import os
from .arguments.parser import ArgumentParser
from .builtins import builtin, init as builtin_init, user_arguments
from .build_inputs import BuildInputs
from .path import exists, Path, pushd, Root
from .iterutils import listify
from .tools import init as tools_init
bfgfile = 'build.bfg'
optsfile = 'options.bfg'
user_description = """
These arguments are defined by the options.bfg file in the project's source
directory. To disambiguate them from built-in arguments, you may prefix the
argument name with `-x`. For example, `--foo` may also be written as `--x-foo`.
"""
class Toolchain(object):
__slots__ = ('target_platform')
def __init__(self):
self.target_platform = None
def is_srcdir(path):
return exists(path.append(bfgfile))
def _execute_file(f, filename, builtin_dict):
code = compile(f.read(), filename, 'exec')
try:
exec(code, builtin_dict)
except SystemExit:
pass
def load_toolchain(filename):
builtin_init()
tools_init()
toolchain = Toolchain()
builtin_dict = builtin.toolchain.bind(toolchain=toolchain)
with open(filename.string(), 'r') as f:
_execute_file(f, f.name, builtin_dict)
return toolchain
def _fill_parser(env, parent=None, filename=optsfile, usage='parse'):
builtin_init()
optspath = Path(filename, Root.srcdir)
prog = parent.prog if parent else filename
parser = ArgumentParser(prog=prog, parents=listify(parent),
add_help=False)
try:
with open(optspath.string(env.base_dirs), 'r') as f, \
pushd(env.srcdir.string()): # noqa
group = parser.add_argument_group('project-defined arguments',
description=user_description)
group.usage = usage
builtin_dict = builtin.options.bind(env=env, parser=group)
_execute_file(f, filename, builtin_dict)
builtin.options.run_post(builtin_dict, env=env, parser=group)
except IOError as e:
if e.errno != errno.ENOENT:
raise
return parser
def print_user_help(env, parent, filename=optsfile, out=None):
parser = _fill_parser(env, parent, filename, usage='help')
parser.print_help(out)
def parse_user_args(env, filename=optsfile):
parser = _fill_parser(env, None, filename)
return parser.parse_args(env.extra_args)
def execute_script(env, argv, filename=bfgfile):
builtin_init()
bfgpath = Path(filename, Root.srcdir)
build = BuildInputs(env, bfgpath)
builtin_dict = builtin.build.bind(build_inputs=build, argv=argv, env=env)
with open(bfgpath.string(env.base_dirs), 'r') as f, \
pushd(env.srcdir.string()): # noqa
_execute_file(f, filename, builtin_dict)
builtin.build.run_post(builtin_dict, build_inputs=build, argv=argv,
env=env)
return build
```
#### File: bfg9000/builtins/file_types.py
```python
from contextlib import contextmanager
from six import string_types
from . import builtin
from .find import exclude_globs, filter_by_platform
from ..file_types import *
from ..iterutils import iterate, uniques
from ..path import Path, Root, makedirs as _makedirs
def local_file(build, file_type, name, params, kwargs):
extra_args = []
for key, default in params:
extra_args.append(kwargs.pop(key, default))
if kwargs:
raise TypeError("unexpected keyword argument '{}'".format(
next(iter(kwargs))
))
return build.add_source(file_type(Path(name, Root.srcdir), *extra_args))
@contextmanager
def generated_file(build, env, file, mode='w', makedirs=True):
if makedirs:
_makedirs(file.path.parent().string(env.base_dirs), exist_ok=True)
yield open(file.path.string(env.base_dirs), mode)
build['regenerate'].outputs.append(file)
@builtin.function('build_inputs')
@builtin.type(File)
def generic_file(build, name):
return build.add_source(File(Path(name, Root.srcdir)))
@builtin.function('build_inputs')
@builtin.type(SourceFile)
def source_file(build, name, lang=None):
return build.add_source(SourceFile(Path(name, Root.srcdir), lang))
@builtin.function('build_inputs')
@builtin.type(HeaderFile)
def header_file(build, name, lang=None):
return build.add_source(HeaderFile(Path(name, Root.srcdir), lang))
# These builtins will find all the files in a directory so that they can be
# added to the distribution. XXX: Perhaps these could be reworked so that
# adding/removing files in directories doesn't force bfg to regenerate build
# files.
def _find(builtins, name, include, type, exclude, filter, as_object=True):
if not include:
return None
return builtins['find_files'](name, include, type, None, exclude, filter,
as_object=as_object)
@builtin.function('builtins', 'build_inputs')
@builtin.type(Directory, in_type=string_types + (File,))
def directory(builtins, build, name, include=None, exclude=exclude_globs,
filter=filter_by_platform):
if isinstance(name, File):
path = name.path.parent()
else:
path = Path(name, Root.srcdir)
files = _find(builtins, name, include, '*', exclude, filter)
return Directory(path, files)
@builtin.function('builtins', 'build_inputs')
@builtin.type(HeaderDirectory, in_type=string_types + (HeaderFile,))
def header_directory(builtins, build, name, include=None,
exclude=exclude_globs, filter=filter_by_platform,
system=False, lang=None):
if isinstance(name, HeaderFile):
path = name.path.parent()
lang = name.lang
else:
path = Path(name, Root.srcdir)
files = _find(builtins, name, include, 'f', exclude, filter,
lambda p: HeaderFile(p, lang))
langs = (uniques(i.lang for i in files if i.lang)
if files else iterate(lang))
return HeaderDirectory(path, files, system, langs)
```
#### File: bfg9000/builtins/toolchain.py
```python
import os
from six import iteritems
from . import builtin
from .. import shell
from .. import tools
from ..iterutils import first, isiterable
from ..languages import known_langs
from ..shell import posix as pshell
_unsafe_builtins = ['file', '__import__', 'input', 'open', 'raw_input',
'reload']
@builtin.getter(name='__builtins__', context='toolchain')
def builtins():
return {k: v for k, v in iteritems(__builtins__)
if k not in _unsafe_builtins}
@builtin.getter(context='toolchain')
def environ():
return os.environ
@builtin.function('toolchain', context='toolchain')
def target_platform(toolchain, platform):
toolchain.target_platform = platform
@builtin.function(context='toolchain')
def which(names, resolve=False, strict=True, kind='executable'):
try:
return ' '.join(shell.which(names, resolve=resolve, kind=kind))
except IOError:
if strict:
raise
result = first(names)
return pshell.join(result) if isiterable(result) else result
@builtin.function(context='toolchain')
def compiler(names, lang, strict=False):
var = known_langs[lang].var('compiler')
os.environ[var] = which(names, strict=strict, kind='compiler')
@builtin.function(context='toolchain')
def compile_options(options, lang):
# This only supports strings (and lists of strings) for options, *not*
# semantic options. It would be nice if we could support semantic options,
# but we'd either need to know the flavor of compiler at this point (we
# don't) or we'd have to store the options in some way other than as an
# environment variable.
if isiterable(options):
options = pshell.join(options)
os.environ[known_langs[lang].var('cflags')] = options
@builtin.function(context='toolchain')
def runner(names, lang, strict=False):
var = known_langs[lang].var('runner')
os.environ[var] = which(names, strict=strict, kind='runner')
```
#### File: bfg9000/bfg9000/languages.py
```python
from functools import partial
from six import iteritems
from .iterutils import listify, iterate
class _LanguageInfo(object):
def __init__(self, name, vars, exts):
self.name = name
self._vars = vars
self._exts = {k: listify(v) for k, v in iteritems(exts)}
def _get(self, attr, desc, key):
try:
return getattr(self, attr)[key]
except KeyError:
raise ValueError('language {!r} does not support {} {!r}'
.format(self.name, desc, key))
def var(self, name):
return self._get('_vars', 'var', name)
def exts(self, name):
return self._get('_exts', 'file type', name)
class _LanguageDefiner(object):
def __init__(self, langs, name):
self._langs = langs
self._name = name
self._vars = {}
self._exts = {}
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self._langs._add(_LanguageInfo(
self._name, self._vars, self._exts
))
def vars(self, **kwargs):
self._vars = kwargs
def exts(self, **kwargs):
self._exts = kwargs
class Languages(object):
def __init__(self):
self._langs = {}
self._ext2lang = {}
def __getitem__(self, name):
try:
return self._langs[name]
except KeyError:
raise ValueError('unrecognized language {!r}'.format(name))
def _add(self, info):
self._langs[info.name] = info
for kind, exts in iteritems(info._exts):
for ext in exts:
tolang = self._ext2lang.setdefault(ext, {})
if kind in tolang:
raise ValueError('{ext!r} already used by {lang!r}'.format(
ext=ext, lang=tolang[kind]
))
tolang[kind] = info.name
def fromext(self, ext, kind):
return self._ext2lang.get(ext, {}).get(kind)
def make(self, name):
return _LanguageDefiner(self, name)
known_langs = Languages()
```
#### File: bfg9000/bfg9000/options.py
```python
from six import add_metaclass
from six.moves import zip
from . import path, safe_str
from .iterutils import isiterable, iterate
from .file_types import *
from .platforms.framework import Framework
class option_list(object):
def __init__(self, *args):
self._options = []
self.collect(*args)
def append(self, option):
if ( isinstance(option, safe_str.stringy_types) or
not any(option.matches(i) for i in self._options) ):
self._options.append(option)
def extend(self, options):
for i in options:
self.append(i)
def collect(self, *args):
for i in args:
if isiterable(i):
for j in i:
self.collect(j)
elif i is not None:
self.append(i)
def copy(self):
return option_list(self._options)
def __iter__(self):
return iter(self._options)
def __len__(self):
return len(self._options)
def __eq__(self, rhs):
return type(self) == type(rhs) and self._options == rhs._options
def __ne__(self, rhs):
return not (self == rhs)
def __repr__(self):
return '<option_list({})>'.format(repr(self._options))
def __add__(self, rhs):
x = self.copy()
x += rhs
return x
def __iadd__(self, rhs):
if not isinstance(rhs, option_list):
raise TypeError('expected an option_list, got a {!r}'
.format(type(rhs)))
self.extend(rhs)
return self
# XXX: This is a separate function to make Python 2.7.8 and earlier happy. For
# details, see <https://bugs.python.org/issue21591>.
def _make_init(slots, attrs):
exec('def __init__(self, {0}):\n self._init({0})'
.format(', '.join(slots)), globals(), attrs)
class OptionMeta(type):
def __new__(cls, name, bases, attrs):
fields = attrs.pop('_fields', [])
slots = tuple(i[0] if isiterable(i) else i for i in fields)
types = tuple(i[1] if isiterable(i) else None for i in fields)
attrs.update({'__slots__': slots, '_types': types})
if '__init__' not in attrs:
_make_init(slots, attrs)
return type.__new__(cls, name, bases, attrs)
def __init__(cls, name, bases, attrs):
is_root = not any(type(i) == OptionMeta for i in bases)
if is_root:
cls.registry = {}
else:
cls.registry[name] = cls
type.__init__(cls, name, bases, attrs)
@add_metaclass(OptionMeta)
class Option(object):
def _init(self, *args):
assert len(args) == len(self.__slots__)
for k, t, v in zip(self.__slots__, self._types, args):
if t and not isinstance(v, t):
raise TypeError('expected {}; but got {}'.format(
', '.join(i.__name__ for i in iterate(t)), type(v).__name__
))
setattr(self, k, v)
def matches(self, rhs):
return self == rhs
def __eq__(self, rhs):
return type(self) == type(rhs) and all(
getattr(self, i) == getattr(rhs, i) for i in self.__slots__
)
def __ne__(self, rhs):
return not (self == rhs)
def __repr__(self):
return '<{}({})>'.format(self.__class__.__name__, ', '.join(
repr(getattr(self, i)) for i in self.__slots__
))
def option(name, fields=()):
return type(name, (Option,), {'_fields': fields})
# Compilation options
include_dir = option('include_dir', [('directory', HeaderDirectory)])
std = option('std', [('value', str)])
pic = option('pic')
pch = option('pch', [('header', PrecompiledHeader)])
class define(Option):
_fields = [ ('name', str),
('value', (str, type(None))) ]
def __init__(self, name, value=None):
Option._init(self, name, value)
# Link options
lib_dir = option('lib_dir', [('directory', Directory)])
lib = option('lib', [('library', (Library, Framework, str))])
rpath_dir = option('rpath_dir', [('path', path.BasePath)])
rpath_link_dir = option('rpath_link_dir', [('path', path.BasePath)])
lib_literal = option('lib_literal', [('value', safe_str.stringy_types)])
entry_point = option('entry_point', [('value', str)])
# General options
pthread = option('pthread')
```
#### File: bfg9000/tools/jvm.py
```python
import os
import re
from itertools import chain
from .common import BuildCommand, check_which
from .. import options as opts, safe_str, shell
from ..builtins.file_types import generated_file
from ..exceptions import PackageResolutionError
from ..file_types import *
from ..iterutils import flatten, iterate, uniques
from ..packages import Package
from ..path import Path, Root
from ..versioning import detect_version
class JvmBuilder(object):
def __init__(self, env, langinfo, command, version_output):
name = langinfo.var('compiler').lower()
self.lang = langinfo.name
self.object_format = 'jvm'
flags_name = langinfo.var('cflags').lower()
flags = shell.split(env.getvar(langinfo.var('cflags'), ''))
jar_command = check_which(env.getvar('JAR', 'jar'), kind='jar builder')
# The default command name to run JVM programs is (usually) the same as
# the name of the language, so we'll just use that here as the default.
run_name = langinfo.var('runner').lower()
run_command = check_which(
env.getvar(langinfo.var('runner'), self.lang),
kind='{} runner'.format(self.lang)
)
self.brand = 'unknown'
self.version = None
if self.lang == 'java':
try:
# Get the brand from the run command (rather than the compile
# command).
output = env.execute(
run_command + ['-version'], stdout=shell.Mode.pipe,
stderr=shell.Mode.stdout
)
if re.search(r'Java\(TM\) (\w+ )?Runtime Environment', output):
self.brand = 'oracle'
elif 'OpenJDK Runtime Environment' in output:
self.brand = 'openjdk'
except (OSError, shell.CalledProcessError):
pass
self.version = detect_version(version_output)
elif self.lang == 'scala':
if 'EPFL' in version_output:
self.brand = 'epfl'
self.version = detect_version(version_output)
self.compiler = JvmCompiler(self, env, name, command, flags_name,
flags)
self._linker = JarMaker(self, env, jar_command)
self.packages = JvmPackageResolver(self, env, run_command)
self.runner = JvmRunner(self, env, run_name, run_command)
@staticmethod
def check_command(env, command):
return env.execute(command + ['-version'], stdout=shell.Mode.pipe,
stderr=shell.Mode.stdout)
@property
def flavor(self):
return 'jvm'
@property
def family(self):
return 'jvm'
@property
def can_dual_link(self):
return False
def linker(self, mode):
if mode == 'static_library':
raise ValueError('static linking not supported with {}'.format(
self.brand
))
if mode not in ('executable', 'shared_library'):
raise KeyError(mode)
return self._linker
class JvmCompiler(BuildCommand):
def __init__(self, builder, env, name, command, flags_name, flags):
BuildCommand.__init__(self, builder, env, name, name, command,
flags=(flags_name, flags))
@property
def brand(self):
return self.builder.brand
@property
def version(self):
return self.builder.version
@property
def flavor(self):
return 'jvm'
@property
def deps_flavor(self):
return None
@property
def needs_libs(self):
return True
@property
def accepts_pch(self):
return False
def _call(self, cmd, input, output, flags=None):
jvmoutput = self.env.tool('jvmoutput')
result = list(chain(
cmd, self._always_flags, iterate(flags), [input]
))
return jvmoutput(output, result)
@property
def _always_flags(self):
return ['-verbose', '-d', '.']
def flags(self, options, output=None, mode='normal'):
flags, class_path = [], []
for i in options:
if isinstance(i, opts.lib):
class_path.append(i.library.path)
elif isinstance(i, safe_str.stringy_types):
flags.append(i)
else:
raise TypeError('unknown option type {!r}'.format(type(i)))
if class_path:
flags.extend(['-cp', safe_str.join(uniques(class_path),
os.pathsep)])
return flags
def output_file(self, name, context):
return ObjectFileList(Path(name + '.classlist'), Path(name + '.class'),
self.builder.object_format, self.lang)
class JarMaker(BuildCommand):
flags_var = 'jarflags'
def __init__(self, builder, env, command):
global_flags = shell.split(env.getvar('JARFLAGS', 'cfm'))
BuildCommand.__init__(self, builder, env, 'jar', 'jar', command,
flags=('jarflags', global_flags))
@property
def brand(self):
return self.builder.brand
@property
def version(self):
return self.builder.version
@property
def flavor(self):
return 'jar'
def can_link(self, format, langs):
return format == 'jvm'
@property
def needs_libs(self):
return False
@property
def has_link_macros(self):
return False
def pre_build(self, build, name, context):
# Fix up paths for the Class-Path field: escape spaces, use forward
# slashes on Windows, and prefix Windows drive letters with '/' to
# disambiguate them from URLs.
def fix_path(p):
if self.env.target_platform.name == 'windows':
if p[1] == ':':
p = '/' + p
p = p.replace('\\', '/')
return p.replace(' ', '%20')
libs = getattr(context, 'libs', []) + flatten(
i.libs for i in getattr(context, 'packages', [])
)
dirs = uniques(i.path for i in libs)
base = Path(name).parent()
context.manifest = File(Path(name + '-manifest.txt'))
with generated_file(build, self.env, context.manifest) as out:
classpath = ' '.join(fix_path(i.relpath(base)) for i in dirs)
if classpath:
out.write('Class-Path: {}\n'.format(classpath))
if getattr(context, 'entry_point', None):
out.write('Main-Class: {}\n'.format(context.entry_point))
return opts.option_list()
def _call(self, cmd, input, output, manifest, libs=None, flags=None):
return list(chain(
cmd, iterate(flags), [output, manifest], iterate(input)
))
def transform_input(self, input):
return ['@' + safe_str.safe_str(i) if isinstance(i, ObjectFileList)
else i for i in input]
def flags(self, options, output=None, mode='normal'):
flags = []
for i in options:
if isinstance(i, safe_str.stringy_types):
flags.append(i)
else:
raise TypeError('unknown option type {!r}'.format(type(i)))
return flags
def output_file(self, name, context):
if getattr(context, 'entry_point', None):
filetype = ExecutableLibrary
else:
filetype = Library
return filetype(Path(name + '.jar'), self.builder.object_format,
self.lang)
class JvmPackage(Package):
def __init__(self, name, format, libs=None):
Package.__init__(self, name, format)
self.libs = libs or []
def compile_options(self, compiler, output):
return opts.option_list(opts.lib(i) for i in self.libs)
def link_options(self, linker, output):
return opts.option_list()
class JvmPackageResolver(object):
def __init__(self, builder, env, command):
self.builder = builder
if self.lang == 'scala':
extra_env = {'JAVA_OPTS': '-XshowSettings:properties'}
args = ['-version']
returncode = 1
else:
extra_env = None
args = ['-XshowSettings:properties', '-version']
returncode = 0
try:
output = env.execute(
command + args, env=extra_env, stdout=shell.Mode.devnull,
stderr=shell.Mode.pipe, returncode=returncode
)
self.ext_dirs = self._get_dirs('java.ext.dirs', output)
self.classpath = self._get_dirs('java.class.path', output)
except (OSError, shell.CalledProcessError):
self.ext_dirs = []
self.classpath = []
@property
def lang(self):
return self.builder.lang
def _get_dirs(self, key, output):
ex = r'^(\s*){} = (.*(?:\n\1\s+.*)*)'.format(re.escape(key))
m = re.search(ex, output, re.MULTILINE)
if not m:
return []
return [i.strip() for i in m.group(2).split('\n')]
def _library(self, name):
jarname = name + '.jar'
for base in self.ext_dirs:
fullpath = os.path.join(base, jarname)
if os.path.exists(fullpath):
return Library(Path(fullpath, Root.absolute),
self.builder.object_format,
external=True)
for path in self.classpath:
if os.path.basename(path) == jarname and os.path.exists(path):
return Library(Path(path, Root.absolute),
self.builder.object_format,
external=True)
raise PackageResolutionError("unable to find library '{}'"
.format(name))
def resolve(self, name, version, kind, headers, libs):
return JvmPackage(name, self.builder.object_format,
libs=[self._library(name)])
class JvmRunner(BuildCommand):
def __init__(self, builder, env, name, command):
BuildCommand.__init__(self, builder, env, name, name, command)
def _call(self, cmd, file, cp=None, jar=False):
result = list(cmd)
if jar and self.lang != 'scala':
result.append('-jar')
if cp:
result.extend(['-cp', cp])
result.append(file)
return result
def run_arguments(self, file):
if isinstance(file, Executable):
return self(file, jar=True)
elif isinstance(file, ObjectFileList):
return self.run_arguments(file.object_file)
elif isinstance(file, ObjectFile):
return self(file.path.stripext().basename(), cp=file.path.parent())
raise TypeError('expected an executable or object file for {} to run'
.format(self.lang))
```
#### File: bfg9000/tools/setenv.py
```python
from . import tool
from .common import SimpleCommand
from ..platforms import platform_name
from ..safe_str import jbos, safe_str, shell_literal
from ..shell import escape_line
if platform_name() == 'windows':
@tool('setenv')
class SetEnv(SimpleCommand):
def __init__(self, env):
SimpleCommand.__init__(self, env, name='setenv', env_var='SETENV',
default=env.bfgdir.append('pysetenv'))
def _call(self, cmd, env, line):
if env:
eq = shell_literal('=')
env_vars = cmd + [jbos(safe_str(name), eq, safe_str(value))
for name, value in env.iteritems()] + ['--']
else:
env_vars = []
return env_vars + escape_line(line, listify=True)
```
#### File: test/integration/test_command.py
```python
import os.path
import re
from six import assertRegex
from . import *
class TestCommand(IntegrationTest):
def __init__(self, *args, **kwargs):
IntegrationTest.__init__(
self, os.path.join(examples_dir, '07_commands'), *args, **kwargs
)
def test_hello(self):
assertRegex(self, self.build('hello'),
re.compile(r'^\s*hello$', re.MULTILINE))
def test_world(self):
assertRegex(self, self.build('world'),
re.compile(r'^\s*world$', re.MULTILINE))
def test_script(self):
assertRegex(self, self.build('script'),
re.compile(r'^\s*hello, world!$', re.MULTILINE))
self.assertExists(output_file('file'))
def test_alias(self):
output = self.build('hello-world')
assertRegex(self, output, re.compile(r'^\s*hello$', re.MULTILINE))
assertRegex(self, output, re.compile(r'^\s*world$', re.MULTILINE))
@skip_if_backend('msbuild')
class TestRunExecutable(IntegrationTest):
def __init__(self, *args, **kwargs):
IntegrationTest.__init__(self, 'run_executable', *args, **kwargs)
def test_env_run(self):
self.assertExists(output_file('file.txt'))
def test_cxx(self):
assertRegex(self, self.build('cxx'),
re.compile(r'^\s*hello from c\+\+!$', re.MULTILINE))
def test_java(self):
assertRegex(self, self.build('java'),
re.compile(r'^\s*hello from java!$', re.MULTILINE))
def test_java_classlist(self):
assertRegex(self, self.build('java-classlist'),
re.compile(r'^\s*hello from java!$', re.MULTILINE))
def test_python(self):
assertRegex(self, self.build('python'),
re.compile(r'^\s*hello from python!$', re.MULTILINE))
```
#### File: test/integration/test_depfile.py
```python
import os.path
import shutil
from . import *
class TestDepfile(IntegrationTest):
def __init__(self, *args, **kwargs):
IntegrationTest.__init__(self, 'depfile', stage_src=True,
*args, **kwargs)
@skip_pred(lambda x: x.backend == 'make' and
env.host_platform.name == 'windows', 'xfail on windows + make')
def test_build(self):
self.build(executable('program'))
self.assertOutput([executable('program')], 'hello\n')
self.wait()
shutil.copy(os.path.join(self.srcdir, 'header_replaced.hpp'),
os.path.join(self.srcdir, 'header.hpp'))
self.build(executable('program'))
self.assertOutput([executable('program')], 'goodbye\n')
```
#### File: test/integration/test_env_vars.py
```python
import os.path
import re
from six import assertRegex
from . import *
class TestEnvVars(IntegrationTest):
def __init__(self, *args, **kwargs):
IntegrationTest.__init__(self, 'env_vars', *args, **kwargs)
@skip_if_backend('msbuild')
def test_test(self):
self.build('test')
def test_command(self):
assertRegex(self, self.build('script'),
re.compile('^\s*hello script$', re.MULTILINE))
self.assertExists(output_file('file'))
```
#### File: test/integration/test_extra_deps.py
```python
from . import *
class TestExtraDeps(IntegrationTest):
def __init__(self, *args, **kwargs):
IntegrationTest.__init__(self, 'extra_deps', *args, **kwargs)
def test_build(self):
self.build(executable('program'))
self.assertOutput([executable('program')], 'hello, world!\n')
self.assertExists(output_file('1'))
self.assertExists(output_file('2'))
self.assertExists(output_file('3'))
def test_touch_3(self):
self.build('touch3')
self.assertNotExists(output_file('1'))
self.assertExists(output_file('2'))
self.assertExists(output_file('3'))
```
#### File: test/integration/test_subdirs.py
```python
import os
import tarfile
from . import *
pjoin = os.path.join
class TestSubdirs(IntegrationTest):
def __init__(self, *args, **kwargs):
IntegrationTest.__init__(self, pjoin(examples_dir, '05_subdirs'),
install=True, *args, **kwargs)
def setUp(self):
IntegrationTest.setUp(self)
cleandir(self.installdir)
def test_build(self):
self.build()
self.assertOutput([executable('sub/program')], 'hello, library!\n')
def _check_installed(self):
extra = []
if env.target_platform.has_import_library:
extra = [pjoin(self.libdir, import_library('sub/library').path)]
self.assertDirectory(self.installdir, [
pjoin(self.includedir, 'library.hpp'),
pjoin(self.includedir, 'detail', 'export.hpp'),
pjoin(self.bindir, executable('sub/program').path),
pjoin(self.libdir, shared_library('sub/library').path),
] + extra)
@skip_if_backend('msbuild')
def test_dist(self):
dist = output_file('05_subdirs.tar.gz')
self.build('dist')
self.assertExists(dist)
with tarfile.open(self.target_path(dist)) as t:
self.assertEqual(set(t.getnames()), {
'05_subdirs/build.bfg',
'05_subdirs/include/library.hpp',
'05_subdirs/include/detail/export.hpp',
'05_subdirs/src/library.cpp',
'05_subdirs/src/program.cpp',
})
@only_if_backend('make', hide=True)
def test_dir_sentinels(self):
self.build()
self.assertTrue(os.path.isfile('sub/.dir'))
@skip_if_backend('msbuild')
def test_install(self):
self.build('install')
self._check_installed()
os.chdir(self.srcdir)
cleandir(self.builddir)
self.assertOutput([pjoin(self.bindir, executable('sub/program').path)],
'hello, library!\n')
@skip_if_backend('msbuild')
def test_install_existing_paths(self):
makedirs(self.includedir, exist_ok=True)
makedirs(self.bindir, exist_ok=True)
makedirs(self.libdir, exist_ok=True)
self.build('install')
self._check_installed()
os.chdir(self.srcdir)
cleandir(self.builddir)
self.assertOutput([pjoin(self.bindir, executable('sub/program').path)],
'hello, library!\n')
@skip_if_backend('msbuild')
def test_uninstall(self):
self.build('install')
self._check_installed()
self.build('uninstall')
self.assertDirectory(self.installdir, [])
```
#### File: unit/backends/test_ninja.py
```python
import ntpath
import os.path
import posixpath
import unittest
from six.moves import cStringIO as StringIO
from bfg9000 import path
from bfg9000 import safe_str
from bfg9000.backends.ninja.syntax import *
from bfg9000.platforms import platform_name
from bfg9000.platforms.posix import PosixPath
from bfg9000.platforms.windows import WindowsPath
quote_char = '"' if platform_name() == 'windows' else "'"
def quoted(s):
return quote_char + s + quote_char
class TestVariable(unittest.TestCase):
def test_equality(self):
self.assertTrue(Variable('foo') == Variable('foo'))
self.assertFalse(Variable('foo') != Variable('foo'))
self.assertFalse(Variable('foo') == Variable('bar'))
self.assertTrue(Variable('foo') != Variable('bar'))
class TestWriteString(unittest.TestCase):
def test_output(self):
out = Writer(StringIO())
out.write('foo: $bar', Syntax.output)
self.assertEqual(out.stream.getvalue(), 'foo$:$ $$bar')
def test_input(self):
out = Writer(StringIO())
out.write('foo: $bar', Syntax.input)
self.assertEqual(out.stream.getvalue(), 'foo:$ $$bar')
def test_shell(self):
out = Writer(StringIO())
out.write('foo: $bar', Syntax.shell)
self.assertEqual(out.stream.getvalue(), quoted('foo: $$bar'))
def test_clean(self):
out = Writer(StringIO())
out.write('foo: $bar', Syntax.clean)
self.assertEqual(out.stream.getvalue(), 'foo: $$bar')
class TestWriteLiteral(unittest.TestCase):
def test_output(self):
out = Writer(StringIO())
out.write(safe_str.literal('foo: $bar'), Syntax.output)
self.assertEqual(out.stream.getvalue(), 'foo: $bar')
def test_input(self):
out = Writer(StringIO())
out.write(safe_str.literal('foo: $bar'), Syntax.input)
self.assertEqual(out.stream.getvalue(), 'foo: $bar')
def test_shell(self):
out = Writer(StringIO())
out.write(safe_str.literal('foo: $bar'), Syntax.shell)
self.assertEqual(out.stream.getvalue(), 'foo: $bar')
def test_clean(self):
out = Writer(StringIO())
out.write(safe_str.literal('foo: $bar'), Syntax.clean)
self.assertEqual(out.stream.getvalue(), 'foo: $bar')
class TestWriteShellLiteral(unittest.TestCase):
def test_output(self):
out = Writer(StringIO())
out.write(safe_str.shell_literal('foo: $bar'), Syntax.output)
self.assertEqual(out.stream.getvalue(), 'foo$:$ $$bar')
def test_input(self):
out = Writer(StringIO())
out.write(safe_str.shell_literal('foo: $bar'), Syntax.input)
self.assertEqual(out.stream.getvalue(), 'foo:$ $$bar')
def test_shell(self):
out = Writer(StringIO())
out.write(safe_str.shell_literal('foo: $bar'), Syntax.shell)
self.assertEqual(out.stream.getvalue(), 'foo: $$bar')
def test_clean(self):
out = Writer(StringIO())
out.write(safe_str.shell_literal('foo: $bar'), Syntax.clean)
self.assertEqual(out.stream.getvalue(), 'foo: $$bar')
class TestWriteJbos(unittest.TestCase):
def test_output(self):
out = Writer(StringIO())
s = safe_str.jbos('$foo', safe_str.literal('$bar'))
out.write(s, Syntax.output)
self.assertEqual(out.stream.getvalue(), '$$foo$bar')
def test_input(self):
out = Writer(StringIO())
s = safe_str.jbos('$foo', safe_str.literal('$bar'))
out.write(s, Syntax.input)
self.assertEqual(out.stream.getvalue(), '$$foo$bar')
def test_shell(self):
out = Writer(StringIO())
s = safe_str.jbos('$foo', safe_str.literal('$bar'))
out.write(s, Syntax.shell)
if platform_name() == 'windows':
expected = '$$foo$bar'
else:
expected = quoted('$$foo') + '$bar'
self.assertEqual(out.stream.getvalue(), expected)
def test_clean(self):
out = Writer(StringIO())
s = safe_str.jbos('$foo', safe_str.literal('$bar'))
out.write(s, Syntax.clean)
self.assertEqual(out.stream.getvalue(), '$$foo$bar')
class TestWritePath(unittest.TestCase):
Path = path.Path
ospath = os.path
def test_output(self):
out = Writer(StringIO())
out.write(self.Path('foo', path.Root.srcdir), Syntax.output)
self.assertEqual(out.stream.getvalue(),
self.ospath.join('${srcdir}', 'foo'))
def test_input(self):
out = Writer(StringIO())
out.write(self.Path('foo', path.Root.srcdir), Syntax.input)
self.assertEqual(out.stream.getvalue(),
self.ospath.join('${srcdir}', 'foo'))
def test_shell(self):
out = Writer(StringIO())
out.write(self.Path('foo', path.Root.srcdir), Syntax.shell)
self.assertEqual(out.stream.getvalue(),
quoted(self.ospath.join('${srcdir}', 'foo')))
def test_clean(self):
out = Writer(StringIO())
out.write(self.Path('foo', path.Root.srcdir), Syntax.clean)
self.assertEqual(out.stream.getvalue(),
self.ospath.join('${srcdir}', 'foo'))
class TestWritePosixPath(TestWritePath):
Path = PosixPath
ospath = posixpath
class TestWriteWindowsPath(TestWritePath):
Path = WindowsPath
ospath = ntpath
```
#### File: unit/builtins/test_packages.py
```python
import mock
import ntpath
import os
import re
import sys
import unittest
from collections import namedtuple
from .common import BuiltinTest
from ... import make_env
from bfg9000 import file_types, options as opts
from bfg9000.builtins import packages
from bfg9000.exceptions import PackageResolutionError, PackageVersionError
from bfg9000.file_types import Directory, HeaderDirectory
from bfg9000.packages import CommonPackage, Framework
from bfg9000.path import abspath
from bfg9000.platforms import platform_name
from bfg9000.versioning import SpecifierSet, Version
if sys.version_info >= (3,):
open_name = 'builtins.open'
else:
open_name = '__builtin__.open'
# Fix the mock package's mock_open function to work with iter(); note: this is
# already fixed in Python 3.7.1's unittest.mock.
def mock_open(*args, **kwargs):
mo = mock.mock_open(*args, **kwargs)
handle = mo.return_value
handle.__iter__.side_effect = lambda: iter(handle.readlines.side_effect())
return mo
def mock_which(*args, **kwargs):
return [os.path.abspath('/command')]
def mock_execute(args, **kwargs):
if args[-1] == '--version':
return ('gcc (Ubuntu 5.4.0-6ubuntu1~16.04.9) 5.4.0 20160609\n' +
'Copyright (C) 2015 Free Software Foundation, Inc.\n')
elif args[-1] == '-Wl,--version':
return '', '/usr/bin/ld --version\n'
elif args[-1] == '-print-search-dirs':
return 'libraries: =/usr/lib\n'
elif args[-1] == '-print-sysroot':
return '/\n'
elif args[-1] == '--verbose':
return 'SEARCH_DIR("/usr")\n'
elif args[-1] == '/?':
return ('Microsoft (R) C/C++ Optimizing Compiler Version ' +
'19.12.25831 for x86')
elif args[-1] == '--modversion':
return '1.2.3\n'
class TestFramework(unittest.TestCase):
def test_framework(self):
env = make_env('darwin')
self.assertEqual(
packages.framework(env, 'name'),
CommonPackage('name', env.target_platform.object_format,
link_options=opts.option_list(opts.lib(
Framework('name')
)))
)
def test_framework_suffix(self):
env = make_env('darwin')
self.assertEqual(
packages.framework(env, 'name', 'suffix'),
CommonPackage('name,suffix',
env.target_platform.object_format,
link_options=opts.option_list(opts.lib(
Framework('name', 'suffix')
)))
)
def test_frameworks_unsupported(self):
env = make_env('linux')
with self.assertRaises(PackageResolutionError):
packages.framework(env, 'name')
with self.assertRaises(PackageResolutionError):
packages.framework(env, 'name', 'suffix')
class TestPackage(BuiltinTest):
def test_name(self):
with mock.patch('bfg9000.shell.execute', mock_execute), \
mock.patch('bfg9000.shell.which', mock_which): # noqa
pkg = packages.package(self.env, 'name')
self.assertEqual(pkg.name, 'name')
self.assertEqual(pkg.version, Version('1.2.3'))
self.assertEqual(pkg.specifier, SpecifierSet())
self.assertEqual(pkg.static, False)
def test_version(self):
with mock.patch('bfg9000.shell.execute', mock_execute), \
mock.patch('bfg9000.shell.which', mock_which): # noqa
pkg = packages.package(self.env, 'name', version='>1.0')
self.assertEqual(pkg.name, 'name')
self.assertEqual(pkg.version, Version('1.2.3'))
self.assertEqual(pkg.specifier, SpecifierSet('>1.0'))
self.assertEqual(pkg.static, False)
def test_lang(self):
with mock.patch('bfg9000.shell.execute', mock_execute), \
mock.patch('bfg9000.shell.which', mock_which): # noqa
pkg = packages.package(self.env, 'name', lang='c++')
self.assertEqual(pkg.name, 'name')
self.assertEqual(pkg.version, Version('1.2.3'))
self.assertEqual(pkg.specifier, SpecifierSet())
self.assertEqual(pkg.static, False)
def test_kind(self):
with mock.patch('bfg9000.shell.execute', mock_execute), \
mock.patch('bfg9000.shell.which', mock_which): # noqa
pkg = packages.package(self.env, 'name', kind='static')
self.assertEqual(pkg.name, 'name')
self.assertEqual(pkg.version, Version('1.2.3'))
self.assertEqual(pkg.specifier, SpecifierSet())
self.assertEqual(pkg.static, True)
def test_invalid_kind(self):
with self.assertRaises(ValueError):
packages.package(self.env, 'name', kind='bad')
class TestBoostPackage(unittest.TestCase):
def test_boost_version(self):
data = '#define BOOST_LIB_VERSION "1_23_4"\n'
with mock.patch(open_name, mock_open(read_data=data)):
hdr = HeaderDirectory(abspath('path'))
self.assertEqual(packages._boost_version(hdr, SpecifierSet('')),
Version('1.23.4'))
def test_boost_version_too_old(self):
data = '#define BOOST_LIB_VERSION "1_23_4"\n'
with mock.patch(open_name, mock_open(read_data=data)):
hdr = HeaderDirectory(abspath('path'))
with self.assertRaises(PackageVersionError):
packages._boost_version(hdr, SpecifierSet('>=1.30'))
def test_boost_version_cant_parse(self):
data = 'foobar\n'
with mock.patch(open_name, mock_open(read_data=data)):
hdr = HeaderDirectory(abspath('path'))
with self.assertRaises(PackageVersionError):
packages._boost_version(hdr, SpecifierSet(''))
def test_posix(self):
env = make_env('linux', clear_variables=True)
def mock_exists(x):
if ( re.search(r'[/\\]boost[/\\]version.hpp$', x) or
re.search(r'[/\\]libboost_thread', x) or
x in ['/usr/include', '/usr/lib']):
return True
return False
with mock.patch('bfg9000.builtins.packages._boost_version',
return_value=Version('1.23')), \
mock.patch('bfg9000.shell.which', return_value=['command']), \
mock.patch('bfg9000.shell.execute', mock_execute), \
mock.patch('os.path.exists', mock_exists): # noqa
pkg = packages.boost_package(env, 'thread')
self.assertEqual(pkg.name, 'boost(thread)')
self.assertEqual(pkg.version, Version('1.23'))
def test_windows_default_location(self):
env = make_env('windows', clear_variables=True)
boost_incdir = r'C:\Boost\include\boost-1.23'
def mock_walk(top):
yield (top,) + (
[('boost-1.23', ntpath.join(top, 'boost-1.23'))],
[]
)
def mock_execute(*args, **kwargs):
if args[0][1] == '/?':
return 'cl.exe'
raise ValueError()
def mock_exists(x):
if re.search(r'[/\\]boost[/\\]version.hpp$', x):
return True
return False
with mock.patch('bfg9000.builtins.find._walk_flat', mock_walk), \
mock.patch('bfg9000.builtins.packages._boost_version',
return_value=Version('1.23')), \
mock.patch('bfg9000.shell.which', return_value=['command']), \
mock.patch('bfg9000.shell.execute', mock_execute), \
mock.patch('os.path.exists', mock_exists): # noqa
pkg = packages.boost_package(env, 'thread')
self.assertEqual(pkg.name, 'boost(thread)')
self.assertEqual(pkg.version, Version('1.23'))
self.assertEqual(pkg._compile_options, opts.option_list(
opts.include_dir(HeaderDirectory(abspath(boost_incdir)))
))
self.assertEqual(pkg._link_options, opts.option_list(
opts.lib_dir(Directory(abspath(r'C:\Boost\lib')))
))
class TestSystemExecutable(BuiltinTest):
def test_name(self):
with mock.patch('bfg9000.builtins.packages.which', mock_which):
self.assertEqual(
packages.system_executable(self.env, 'name'),
file_types.Executable(abspath('/command'),
self.env.target_platform.object_format)
)
def test_format(self):
with mock.patch('bfg9000.builtins.packages.which', mock_which):
self.assertEqual(
packages.system_executable(self.env, 'name', 'format'),
file_types.Executable(abspath('/command'), 'format')
)
```
#### File: unit/builtins/test_pkg_config.py
```python
import unittest
from bfg9000.builtins.pkg_config import *
from bfg9000.safe_str import safe_str, shell_literal
class TestPkgConfigRequirement(unittest.TestCase):
def test_merge_requirements(self):
a = Requirement('foo', '>=1.0')
b = Requirement('foo', '<=2.0')
c = Requirement('bar', '<=2.0')
self.assertEqual(a & b, Requirement('foo', '>=1.0,<=2.0'))
a &= b
self.assertEqual(a, Requirement('foo', '>=1.0,<=2.0'))
self.assertRaises(ValueError, lambda: b & c)
def test_split_requirement(self):
a = Requirement('foo')
self.assertEqual(set(a.split()), {SimpleRequirement('foo')})
a = Requirement('foo', '>=1.0')
self.assertEqual(set(a.split()), {SimpleRequirement('foo', '>=1.0')})
a = Requirement('foo', '>=1.0,<=2.0')
self.assertEqual(set(a.split()), {SimpleRequirement('foo', '>=1.0'),
SimpleRequirement('foo', '<=2.0')})
def test_split_requirement_single(self):
a = Requirement('foo')
self.assertEqual(set(a.split(True)), {SimpleRequirement('foo')})
a = Requirement('foo', '>=1.0')
self.assertEqual(set(a.split(True)),
{SimpleRequirement('foo', '>=1.0')})
a = Requirement('foo', '>=1.0,<=2.0')
self.assertRaises(ValueError, lambda: a.split(True))
def test_equality(self):
R, S = Requirement, SimpleRequirement
self.assertTrue(R('foo', '>=1.0') == R('foo', '>=1.0'))
self.assertFalse(R('foo', '>=1.0') != R('foo', '>=1.0'))
self.assertTrue(S('foo', '>=1.0') == S('foo', '>=1.0'))
self.assertFalse(S('foo', '>=1.0') != S('foo', '>=1.0'))
self.assertFalse(R('foo', '>=1.0') == R('bar', '>=1.0'))
self.assertTrue(R('foo', '>=1.0') != R('bar', '>=1.0'))
self.assertFalse(R('foo', '>=1.0') == R('foo', '>=2.0'))
self.assertTrue(R('foo', '>=1.0') != R('foo', '>=2.0'))
self.assertFalse(S('foo', '>=1.0') == S('bar', '>=1.0'))
self.assertTrue(S('foo', '>=1.0') != S('bar', '>=1.0'))
self.assertFalse(S('foo', '>=1.0') == S('foo', '>=2.0'))
self.assertTrue(S('foo', '>=1.0') != S('foo', '>=2.0'))
self.assertFalse(R('foo', '>=1.0') == S('foo', '>=1.0'))
self.assertTrue(R('foo', '>=1.0') != S('foo', '>=1.0'))
self.assertFalse(S('foo', '>=1.0') == R('foo', '>=1.0'))
self.assertTrue(S('foo', '>=1.0') != R('foo', '>=1.0'))
class TestPkgConfigSimpleRequirement(unittest.TestCase):
def test_stringify(self):
r = SimpleRequirement('foo', '>=1.0')
self.assertEqual(safe_str(r), shell_literal('foo >= 1.0'))
def test_stringify_equal(self):
r = SimpleRequirement('foo', '==1.0')
self.assertEqual(safe_str(r), shell_literal('foo = 1.0'))
def test_stringify_no_version(self):
r = SimpleRequirement('foo')
self.assertEqual(safe_str(r), shell_literal('foo'))
class TestPkgConfigRequirementSet(unittest.TestCase):
def test_init(self):
s = RequirementSet([Requirement('foo', '>=1.0'),
Requirement('foo', '<=2.0'),
Requirement('bar', '>=3.0')])
self.assertEqual(set(s), {Requirement('foo', '>=1.0,<=2.0'),
Requirement('bar', '>=3.0')})
def test_add(self):
s = RequirementSet()
s.add(Requirement('foo', '>=1.0'))
s.add(Requirement('foo', '<=2.0'))
s.add(Requirement('bar', '>=3.0'))
self.assertEqual(set(s), {Requirement('foo', '>=1.0,<=2.0'),
Requirement('bar', '>=3.0')})
def test_remove(self):
s = RequirementSet([Requirement('foo', '>=1.0'),
Requirement('foo', '<=2.0'),
Requirement('bar', '>=3.0')])
s.remove('foo')
self.assertEqual(set(s), {Requirement('bar', '>=3.0')})
def test_update(self):
a = RequirementSet([Requirement('foo', '>=1.0'),
Requirement('bar', '>=3.0')])
b = RequirementSet([Requirement('foo', '<=2.0'),
Requirement('baz', '>=4.0')])
a.update(b)
self.assertEqual(set(a), {Requirement('foo', '>=1.0,<=2.0'),
Requirement('bar', '>=3.0'),
Requirement('baz', '>=4.0')})
def test_merge_from(self):
a = RequirementSet([Requirement('foo', '>=1.0'),
Requirement('bar', '>=3.0')])
b = RequirementSet([Requirement('foo', '<=2.0'),
Requirement('baz', '>=4.0')])
a.merge_from(b)
self.assertEqual(set(a), {Requirement('foo', '>=1.0,<=2.0'),
Requirement('bar', '>=3.0')})
self.assertEqual(set(b), {Requirement('baz', '>=4.0')})
def test_split(self):
s = RequirementSet([Requirement('foo', '>=1.0'),
Requirement('foo', '<=2.0'),
Requirement('bar', '>=3.0')])
self.assertEqual(set(s.split()), {SimpleRequirement('bar', '>=3.0'),
SimpleRequirement('foo', '>=1.0'),
SimpleRequirement('foo', '<=2.0')})
```
#### File: unit/builtins/test_version.py
```python
import unittest
from bfg9000.builtins.version import bfg9000_required_version, bfg9000_version
from bfg9000.versioning import bfg_version, VersionError
class TestRequiredVersion(unittest.TestCase):
def test_bfg_version(self):
bfg9000_required_version('>=0.1.0')
self.assertRaises(VersionError, bfg9000_required_version, '<=0.1.0')
def test_python_version(self):
bfg9000_required_version(python_version='>=2.7.0')
self.assertRaises(VersionError, bfg9000_required_version, None,
'<=2.0.0')
def test_both_versions(self):
bfg9000_required_version('>=0.1.0', '>=2.7.0')
self.assertRaises(VersionError, bfg9000_required_version, '<=0.1.0',
'<=2.0.0')
class TestVersion(unittest.TestCase):
def test_version(self):
self.assertEqual(bfg9000_version(), bfg_version)
```
#### File: unit/shell/test_windows.py
```python
import unittest
from bfg9000.shell.windows import *
class TestSplit(unittest.TestCase):
def test_single(self):
self.assertEqual(split('foo'), ['foo'])
def test_multiple(self):
self.assertEqual(split('foo bar baz'), ['foo', 'bar', 'baz'])
def test_backslash(self):
self.assertEqual(split(r'C:\path\to\file'), [r'C:\path\to\file'])
def test_quote(self):
self.assertEqual(split('foo "bar baz"'), ['foo', 'bar baz'])
self.assertEqual(split('foo"bar baz"'), ['foobar baz'])
self.assertEqual(split(r'foo "c:\path\\"'), ['foo', 'c:\\path\\'])
self.assertEqual(split('foo "it\'s \\"good\\""'),
['foo', 'it\'s "good"'])
def test_type(self):
self.assertEqual(split('foo bar baz', type=tuple),
('foo', 'bar', 'baz'))
def test_invalid(self):
self.assertRaises(TypeError, split, 1)
class TestListify(unittest.TestCase):
def test_string(self):
self.assertEqual(listify('foo bar baz'), ['foo', 'bar', 'baz'])
def test_list(self):
self.assertEqual(listify(['foo bar', 'baz']), ['foo bar', 'baz'])
def test_type(self):
self.assertEqual(listify('foo bar baz', type=tuple),
('foo', 'bar', 'baz'))
self.assertEqual(listify(['foo bar', 'baz'], type=tuple),
('foo bar', 'baz'))
class TestQuote(unittest.TestCase):
def test_simple(self):
self.assertEqual(quote('foo'), 'foo')
def test_space(self):
self.assertEqual(quote('foo bar'), '"foo bar"')
def test_quote(self):
self.assertEqual(quote('"foo"'), '"\\"foo\\""')
def test_backslash(self):
self.assertEqual(quote(r'foo\bar'), r'foo\bar')
self.assertEqual(quote('foo\\bar\\'), r'"foo\bar\\"')
def test_escaped_quote(self):
self.assertEqual(quote(r'foo\"bar'), r'"foo\\\"bar"')
```
#### File: test/unit/test_depfixer.py
```python
import unittest
from six.moves import cStringIO as StringIO
from bfg9000.depfixer import *
class TestEmitDeps(unittest.TestCase):
def test_empty_deps(self):
instream = StringIO('foo:\n')
outstream = StringIO()
emit_deps(instream, outstream)
self.assertEqual(outstream.getvalue(), '')
def test_single_dep(self):
instream = StringIO('foo: bar\n')
outstream = StringIO()
emit_deps(instream, outstream)
self.assertEqual(outstream.getvalue(), 'bar:\n')
def test_multiple_deps(self):
instream = StringIO('foo: bar baz\n')
outstream = StringIO()
emit_deps(instream, outstream)
self.assertEqual(outstream.getvalue(), 'bar:\nbaz:\n')
def test_multiline_deps(self):
instream = StringIO('foo: bar\nbaz: quux\n')
outstream = StringIO()
emit_deps(instream, outstream)
self.assertEqual(outstream.getvalue(), 'bar:\nquux:\n')
def test_multiple_targets(self):
instream = StringIO('foo bar: baz quux\n')
outstream = StringIO()
emit_deps(instream, outstream)
self.assertEqual(outstream.getvalue(), 'baz:\nquux:\n')
def test_windows_paths(self):
instream = StringIO('c:\\foo c:\\bar: c:\\baz c:\\quux\n')
outstream = StringIO()
emit_deps(instream, outstream)
self.assertEqual(outstream.getvalue(), 'c:\\baz:\nc:\\quux:\n')
def test_trailing_spaces(self):
instream = StringIO('foo : bar \n')
outstream = StringIO()
emit_deps(instream, outstream)
self.assertEqual(outstream.getvalue(), 'bar:\n')
def test_unexpected_newline(self):
instream = StringIO('foo\n')
outstream = StringIO()
self.assertRaises(UnexpectedTokenError, emit_deps, instream, outstream)
instream = StringIO('foo \n')
outstream = StringIO()
self.assertRaises(UnexpectedTokenError, emit_deps, instream, outstream)
def test_unexpected_colon(self):
instream = StringIO('foo: :\n')
outstream = StringIO()
self.assertRaises(UnexpectedTokenError, emit_deps, instream, outstream)
instream = StringIO('foo: bar :\n')
outstream = StringIO()
self.assertRaises(UnexpectedTokenError, emit_deps, instream, outstream)
def test_unexpected_eof(self):
instream = StringIO('foo: bar')
outstream = StringIO()
self.assertRaises(ParseError, emit_deps, instream, outstream)
```
#### File: test/unit/test_languages.py
```python
import unittest
from six import assertRaisesRegex
from bfg9000.languages import Languages
class TestLanguages(unittest.TestCase):
def setUp(self):
self.known_langs = Languages()
with self.known_langs.make('c') as x:
x.vars(compiler='CC')
x.exts(source='.c')
def test_make(self):
with self.known_langs.make('c++') as x:
x.vars(compiler='CXX')
x.exts(source=['.cxx', '.cpp'])
self.assertEqual(self.known_langs['c'].name, 'c')
self.assertEqual(self.known_langs['c'].var('compiler'), 'CC')
self.assertEqual(self.known_langs['c'].exts('source'), ['.c'])
self.assertEqual(self.known_langs['c++'].name, 'c++')
self.assertEqual(self.known_langs['c++'].var('compiler'), 'CXX')
self.assertEqual(self.known_langs['c++'].exts('source'),
['.cxx', '.cpp'])
def test_make_duplicate_ext(self):
msg = r"^'\.c' already used by 'c'$"
with assertRaisesRegex(self, ValueError, msg):
with self.known_langs.make('c++') as x:
x.exts(source=['.c', '.cpp'])
def test_get_unrecognized_lang(self):
msg = r"^unrecognized language 'c\+\+'$"
with assertRaisesRegex(self, ValueError, msg):
self.known_langs['c++']
def test_get_unrecognized_var(self):
msg = r"^language 'c' does not support var 'goofy'$"
with assertRaisesRegex(self, ValueError, msg):
self.known_langs['c'].var('goofy')
def test_get_unrecognized_exts(self):
msg = r"^language 'c' does not support file type 'goofy'$"
with assertRaisesRegex(self, ValueError, msg):
self.known_langs['c'].exts('goofy')
def test_fromext(self):
self.assertEqual(self.known_langs.fromext('.c', 'source'), 'c')
self.assertEqual(self.known_langs.fromext('.c', 'header'), None)
self.assertEqual(self.known_langs.fromext('.c', 'goofy'), None)
self.assertEqual(self.known_langs.fromext('.foo', 'source'), None)
```
#### File: unit/tools/test_ld.py
```python
import mock
import unittest
from ... import make_env
from bfg9000.tools.ld import LdLinker
from bfg9000.versioning import Version
def mock_execute(args, **kwargs):
return 'SEARCH_DIR("/dir1")\nSEARCH_DIR("=/dir2")\n'
class TestLdLinker(unittest.TestCase):
def setUp(self):
self.env = make_env()
def test_flavor(self):
ld = LdLinker(None, self.env, ['ld'], 'version')
self.assertEqual(ld.flavor, 'ld')
def test_lang(self):
class MockBuilder(object):
def __init__(self):
self.lang = 'c++'
ld = LdLinker(MockBuilder(), self.env, ['ld'], 'version')
self.assertEqual(ld.lang, 'c++')
def test_gnu_ld(self):
version = 'GNU ld (GNU Binutils for Ubuntu) 2.26.1'
ld = LdLinker(None, self.env, ['ld'], version)
self.assertEqual(ld.brand, 'bfd')
self.assertEqual(ld.version, Version('2.26.1'))
def test_gnu_gold(self):
version = 'GNU gold (GNU Binutils for Ubuntu 2.26.1) 1.11'
ld = LdLinker(None, self.env, ['ld'], version)
self.assertEqual(ld.brand, 'gold')
self.assertEqual(ld.version, Version('1.11'))
def test_unknown_brand(self):
version = 'unknown'
ld = LdLinker(None, self.env, ['ld'], version)
self.assertEqual(ld.brand, 'unknown')
self.assertEqual(ld.version, None)
def test_search_dirs(self):
with mock.patch('bfg9000.shell.execute', mock_execute):
ld = LdLinker(None, self.env, ['ld'], 'version')
self.assertEqual(ld.search_dirs(), ['/dir1', '/dir2'])
def test_search_dirs_sysroot(self):
with mock.patch('bfg9000.shell.execute', mock_execute):
ld = LdLinker(None, self.env, ['ld'], 'version')
self.assertEqual(ld.search_dirs(sysroot='/sysroot'),
['/dir1', '/sysroot/dir2'])
def test_search_dirs_fail(self):
def mock_bad_execute(*args, **kwargs):
raise OSError()
with mock.patch('bfg9000.shell.execute', mock_bad_execute):
ld = LdLinker(None, self.env, ['ld'], 'version')
self.assertEqual(ld.search_dirs(), [])
self.assertRaises(OSError, lambda: ld.search_dirs(strict=True))
```
#### File: unit/tools/test_msvc.py
```python
import mock
import unittest
from ... import make_env
from bfg9000 import file_types, options as opts
from bfg9000.languages import Languages
from bfg9000.packages import Framework
from bfg9000.path import Path
from bfg9000.tools.msvc import MsvcBuilder
from bfg9000.versioning import Version
known_langs = Languages()
with known_langs.make('c++') as x:
x.vars(compiler='CXX', cflags='CXXFLAGS')
def mock_which(*args, **kwargs):
return ['command']
class TestMsvcBuilder(unittest.TestCase):
def setUp(self):
self.env = make_env()
def test_properties(self):
with mock.patch('bfg9000.shell.which', mock_which):
cc = MsvcBuilder(self.env, known_langs['c++'], ['cl'], 'version')
self.assertEqual(cc.flavor, 'msvc')
self.assertEqual(cc.compiler.flavor, 'msvc')
self.assertEqual(cc.pch_compiler.flavor, 'msvc')
self.assertEqual(cc.linker('executable').flavor, 'msvc')
self.assertEqual(cc.linker('shared_library').flavor, 'msvc')
self.assertEqual(cc.linker('static_library').flavor, 'msvc')
self.assertEqual(cc.family, 'native')
self.assertEqual(cc.auto_link, True)
self.assertEqual(cc.can_dual_link, False)
self.assertEqual(cc.compiler.num_outputs, 1)
self.assertEqual(cc.pch_compiler.num_outputs, 2)
self.assertEqual(cc.linker('executable').num_outputs, 1)
self.assertEqual(cc.linker('shared_library').num_outputs, 2)
self.assertEqual(cc.compiler.deps_flavor, 'msvc')
self.assertEqual(cc.pch_compiler.deps_flavor, 'msvc')
self.assertEqual(cc.compiler.needs_libs, False)
self.assertEqual(cc.pch_compiler.needs_libs, False)
self.assertEqual(cc.compiler.accepts_pch, True)
self.assertEqual(cc.pch_compiler.accepts_pch, False)
self.assertRaises(KeyError, lambda: cc.linker('unknown'))
def test_msvc(self):
version = ('Microsoft (R) C/C++ Optimizing Compiler Version ' +
'19.12.25831 for x86')
with mock.patch('bfg9000.shell.which', mock_which):
cc = MsvcBuilder(self.env, known_langs['c++'], ['cl'], version)
self.assertEqual(cc.brand, 'msvc')
self.assertEqual(cc.compiler.brand, 'msvc')
self.assertEqual(cc.pch_compiler.brand, 'msvc')
self.assertEqual(cc.linker('executable').brand, 'msvc')
self.assertEqual(cc.linker('shared_library').brand, 'msvc')
self.assertEqual(cc.version, Version('19.12.25831'))
self.assertEqual(cc.compiler.version, Version('19.12.25831'))
self.assertEqual(cc.pch_compiler.version, Version('19.12.25831'))
self.assertEqual(cc.linker('executable').version,
Version('19.12.25831'))
self.assertEqual(cc.linker('shared_library').version,
Version('19.12.25831'))
def test_unknown_brand(self):
version = 'unknown'
with mock.patch('bfg9000.shell.which', mock_which):
cc = MsvcBuilder(self.env, known_langs['c++'], ['c++'], version)
self.assertEqual(cc.brand, 'unknown')
self.assertEqual(cc.compiler.brand, 'unknown')
self.assertEqual(cc.pch_compiler.brand, 'unknown')
self.assertEqual(cc.linker('executable').brand, 'unknown')
self.assertEqual(cc.linker('shared_library').brand, 'unknown')
self.assertEqual(cc.version, None)
self.assertEqual(cc.compiler.version, None)
self.assertEqual(cc.pch_compiler.version, None)
self.assertEqual(cc.linker('executable').version, None)
self.assertEqual(cc.linker('shared_library').version, None)
class TestMsvcCompiler(unittest.TestCase):
def setUp(self):
self.env = make_env()
with mock.patch('bfg9000.shell.which', mock_which):
self.compiler = MsvcBuilder(self.env, known_langs['c++'], ['cl'],
'version').compiler
def test_flags_empty(self):
self.assertEqual(self.compiler.flags(opts.option_list()), [])
def test_flags_include_dir(self):
p = Path('/path/to/include')
self.assertEqual(self.compiler.flags(opts.option_list(
opts.include_dir(file_types.HeaderDirectory(p))
)), ['/I' + p])
self.assertEqual(self.compiler.flags(opts.option_list(
opts.include_dir(file_types.HeaderDirectory(p))
), mode='pkg-config'), ['-I' + p])
def test_flags_define(self):
self.assertEqual(self.compiler.flags(opts.option_list(
opts.define('NAME')
)), ['/DNAME'])
self.assertEqual(self.compiler.flags(opts.option_list(
opts.define('NAME')
), mode='pkg-config'), ['-DNAME'])
self.assertEqual(self.compiler.flags(opts.option_list(
opts.define('NAME', 'value')
)), ['/DNAME=value'])
self.assertEqual(self.compiler.flags(opts.option_list(
opts.define('NAME', 'value')
), mode='pkg-config'), ['-DNAME=value'])
def test_flags_std(self):
self.assertEqual(self.compiler.flags(opts.option_list(
opts.std('c++14')
)), ['/std:c++14'])
def test_flags_include_pch(self):
p = Path('/path/to/header.hpp')
self.assertEqual(self.compiler.flags(opts.option_list(
opts.pch(file_types.MsvcPrecompiledHeader(p, None, 'header',
'native'))
)), ['/Yuheader'])
def test_flags_string(self):
self.assertEqual(self.compiler.flags(opts.option_list('-v')), ['-v'])
def test_flags_invalid(self):
with self.assertRaises(TypeError):
self.compiler.flags(opts.option_list(123))
class TestMsvcLinker(unittest.TestCase):
def setUp(self):
self.env = make_env()
version = ('Microsoft (R) C/C++ Optimizing Compiler Version ' +
'19.12.25831 for x86')
with mock.patch('bfg9000.shell.which', mock_which):
self.linker = MsvcBuilder(self.env, known_langs['c++'], ['cl'],
version).linker('executable')
def test_flags_empty(self):
self.assertEqual(self.linker.flags(opts.option_list()), [])
def test_flags_lib_dir(self):
libdir = Path('/path/to/lib')
lib = Path('/path/to/lib/foo.so')
# Lib dir
self.assertEqual(self.linker.flags(opts.option_list(
opts.lib_dir(file_types.Directory(libdir))
)), ['/LIBPATH:' + libdir])
self.assertEqual(self.linker.flags(opts.option_list(
opts.lib_dir(file_types.Directory(libdir))
), mode='pkg-config'), ['-L' + libdir])
# Shared library
self.assertEqual(self.linker.flags(opts.option_list(
opts.lib(file_types.SharedLibrary(lib, 'native'))
)), ['/LIBPATH:' + libdir])
# Static library
self.assertEqual(self.linker.flags(opts.option_list(
opts.lib(file_types.StaticLibrary(lib, 'native'))
)), ['/LIBPATH:' + libdir])
# Mixed
self.assertEqual(self.linker.flags(opts.option_list(
opts.lib_dir(file_types.Directory(libdir)),
opts.lib(file_types.SharedLibrary(lib, 'native'))
)), ['/LIBPATH:' + libdir])
def test_flags_string(self):
self.assertEqual(self.linker.flags(opts.option_list('-v')), ['-v'])
def test_flags_lib_literal(self):
self.assertEqual(self.linker.flags(opts.option_list(
opts.lib_literal('-lfoo')
)), [])
def test_flags_invalid(self):
with self.assertRaises(TypeError):
self.linker.flags(opts.option_list(123))
def test_lib_flags_empty(self):
self.assertEqual(self.linker.lib_flags(opts.option_list()), [])
def test_lib_flags_lib(self):
lib = Path('/path/to/lib/foo.lib')
self.assertEqual(self.linker.lib_flags(opts.option_list(
opts.lib(file_types.SharedLibrary(lib, 'native'))
)), [lib.basename()])
self.assertEqual(self.linker.lib_flags(opts.option_list(
opts.lib(file_types.SharedLibrary(lib, 'native'))
), mode='pkg-config'), ['-lfoo'])
self.assertEqual(self.linker.lib_flags(opts.option_list(
opts.lib(file_types.WholeArchive(
file_types.SharedLibrary(lib, 'native'))
)
)), ['/WHOLEARCHIVE:' + lib.basename()])
version = ('Microsoft (R) C/C++ Optimizing Compiler Version ' +
'18.00.25831 for x86')
with mock.patch('bfg9000.shell.which', mock_which):
linker = MsvcBuilder(self.env, known_langs['c++'], ['cl'],
version).linker('executable')
with self.assertRaises(TypeError):
linker.lib_flags(opts.option_list(
opts.lib(file_types.WholeArchive(
file_types.StaticLibrary(lib, 'native')
))
))
with self.assertRaises(TypeError):
self.linker.lib_flags(opts.option_list(
opts.lib(Framework('cocoa'))
))
def test_lib_flags_lib_literal(self):
self.assertEqual(self.linker.lib_flags(opts.option_list(
opts.lib_literal('/?')
)), ['/?'])
def test_lib_flags_ignored(self):
self.assertEqual(self.linker.lib_flags(opts.option_list('-Lfoo')), [])
class TestMsvcStaticLinker(unittest.TestCase):
def setUp(self):
self.env = make_env()
with mock.patch('bfg9000.shell.which', mock_which):
self.linker = MsvcBuilder(self.env, known_langs['c++'], ['cl'],
'version').linker('static_library')
def test_flags_empty(self):
self.assertEqual(self.linker.flags(opts.option_list()), [])
def test_flags_string(self):
self.assertEqual(self.linker.flags(opts.option_list('-v')), ['-v'])
def test_flags_invalid(self):
with self.assertRaises(TypeError):
self.linker.flags(opts.option_list(123))
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.